prepare the transition to #Plan.context

This change helps the transition between `dagger input` and `#Plan.context`.

In summary, the codebase now relies on a *context* for execution with mapping to *IDs*.
In the future, *context* will come from a `#Plan.context`.
In the meantime, a bridge converts `dagger input` to a plan context. This allows both *old* and *new* style configurations to co-exist with the same underlying engine.

- Implement `plancontext`. Context holds the execution context for a plan. Currently this includes the platform, local directories, secrets and services (e.g. unix/npipe).
- Contextual data can be registered at any point. In the future, this will be done by `#Plan.context`
- Migrated the `dagger input` codebase to register inputs in a `plancontext`
- Migrated low-level types/operations to the *Context ID* pattern.
  - `dagger.#Stream` now only includes an `id` (instead of `unix` path)
  - `dagger.#Secret` still includes only an ID, but now it's based off `plancontext`
  - `op.#Local` now only includes an `id` (instead of `path`, `include`, `exclude`.

Signed-off-by: Andrea Luzzardi <aluzzardi@gmail.com>
This commit is contained in:
Andrea Luzzardi
2021-11-16 16:13:45 -08:00
parent 88312f7f82
commit a61e8dcb62
27 changed files with 401 additions and 423 deletions

View File

@@ -3,13 +3,11 @@ package environment
import (
"context"
"fmt"
"path/filepath"
"cuelang.org/go/cue"
cueflow "cuelang.org/go/tools/flow"
"github.com/containerd/containerd/platforms"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"go.dagger.io/dagger/compiler"
"go.dagger.io/dagger/plancontext"
"go.dagger.io/dagger/solver"
"go.dagger.io/dagger/state"
@@ -74,59 +72,8 @@ func (e *Environment) Computed() *compiler.Value {
return e.computed
}
// Scan all scripts in the environment for references to local directories (do:"local"),
// and return all referenced directory names.
// This is used by clients to grant access to local directories when they are referenced
// by user-specified scripts.
func (e *Environment) LocalDirs() (map[string]string, error) {
dirs := map[string]string{}
localdirs := func(code *compiler.Value) error {
return Analyze(
func(op *compiler.Value) error {
do, err := op.Lookup("do").String()
if err != nil {
return err
}
if do != "local" {
return nil
}
dir, err := op.Lookup("dir").String()
if err != nil {
return err
}
abs, err := filepath.Abs(dir)
if err != nil {
return err
}
dirs[dir] = abs
return nil
},
code,
)
}
// 1. Scan the environment state
// FIXME: use a common `flow` instance to avoid rescanning the tree.
src := compiler.NewValue()
if err := src.FillPath(cue.MakePath(), e.plan); err != nil {
return nil, err
}
if err := src.FillPath(cue.MakePath(), e.input); err != nil {
return nil, err
}
flow := cueflow.New(
&cueflow.Config{},
src.Cue(),
newTaskFunc(noOpRunner),
)
for _, t := range flow.Tasks() {
if err := localdirs(compiler.Wrap(t.Value())); err != nil {
return nil, err
}
}
return dirs, nil
func (e *Environment) Context() *plancontext.Context {
return e.state.Context
}
// Up missing values in environment configuration, and write them to state.
@@ -139,7 +86,7 @@ func (e *Environment) Up(ctx context.Context, s solver.Solver) error {
flow := cueflow.New(
&cueflow.Config{},
e.src.Cue(),
newTaskFunc(newPipelineRunner(e.computed, s, e.state.Platform)),
NewTaskFunc(NewPipelineRunner(e.computed, s, e.state.Context)),
)
if err := flow.Run(ctx); err != nil {
return err
@@ -163,7 +110,7 @@ func (e *Environment) Down(ctx context.Context, _ *DownOpts) error {
type QueryOpts struct{}
func newTaskFunc(runner cueflow.RunnerFunc) cueflow.TaskFunc {
func NewTaskFunc(runner cueflow.RunnerFunc) cueflow.TaskFunc {
return func(flowVal cue.Value) (cueflow.Runner, error) {
v := compiler.Wrap(flowVal)
if !isComponent(v) {
@@ -174,11 +121,7 @@ func newTaskFunc(runner cueflow.RunnerFunc) cueflow.TaskFunc {
}
}
func noOpRunner(t *cueflow.Task) error {
return nil
}
func newPipelineRunner(computed *compiler.Value, s solver.Solver, platform string) cueflow.RunnerFunc {
func NewPipelineRunner(computed *compiler.Value, s solver.Solver, pctx *plancontext.Context) cueflow.RunnerFunc {
return cueflow.RunnerFunc(func(t *cueflow.Task) error {
ctx := t.Context()
lg := log.
@@ -200,23 +143,7 @@ func newPipelineRunner(computed *compiler.Value, s solver.Solver, platform strin
}
v := compiler.Wrap(t.Value())
var pipelinePlatform specs.Platform
if platform == "" {
pipelinePlatform = specs.Platform{OS: "linux", Architecture: "amd64"}
} else {
p, err := platforms.Parse(platform)
if err != nil {
// Record the error
span.AddEvent("command", trace.WithAttributes(
attribute.String("error", err.Error()),
))
return err
}
pipelinePlatform = p
}
p := NewPipeline(v, s, pipelinePlatform)
p := NewPipeline(v, s, pctx)
err := p.Run(ctx)
if err != nil {
// Record the error

View File

@@ -23,11 +23,11 @@ import (
bkgw "github.com/moby/buildkit/frontend/gateway/client"
bkpb "github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/rs/zerolog/log"
"gopkg.in/yaml.v3"
"go.dagger.io/dagger/compiler"
"go.dagger.io/dagger/plancontext"
"go.dagger.io/dagger/solver"
)
@@ -46,17 +46,17 @@ type Pipeline struct {
name string
s solver.Solver
state llb.State
platform specs.Platform // Platform constraint
pctx *plancontext.Context
result bkgw.Reference
image dockerfile2llb.Image
computed *compiler.Value
}
func NewPipeline(code *compiler.Value, s solver.Solver, platform specs.Platform) *Pipeline {
func NewPipeline(code *compiler.Value, s solver.Solver, pctx *plancontext.Context) *Pipeline {
return &Pipeline{
code: code,
name: code.Path().String(),
platform: platform,
pctx: pctx,
s: s,
state: llb.Scratch(),
computed: compiler.NewValue(),
@@ -233,7 +233,7 @@ func (p *Pipeline) run(ctx context.Context) error {
// so that errors map to the correct cue path.
// FIXME: might as well change FS to make every operation
// synchronous.
p.result, err = p.s.Solve(ctx, p.state, p.platform)
p.result, err = p.s.Solve(ctx, p.state, p.pctx.Platform.Get())
if err != nil {
return err
}
@@ -339,7 +339,7 @@ func (p *Pipeline) Copy(ctx context.Context, op *compiler.Value, st llb.State) (
return st, err
}
// Execute 'from' in a tmp pipeline, and use the resulting fs
from := NewPipeline(op.Lookup("from"), p.s, p.platform)
from := NewPipeline(op.Lookup("from"), p.s, p.pctx)
if err := from.Run(ctx); err != nil {
return st, err
}
@@ -361,50 +361,31 @@ func (p *Pipeline) Copy(ctx context.Context, op *compiler.Value, st llb.State) (
}
func (p *Pipeline) Local(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
dir, err := op.Lookup("dir").String()
id, err := op.Lookup("id").String()
if err != nil {
return st, err
}
dir := p.pctx.Directories.Get(plancontext.ContextKey(id))
if dir == nil {
return st, fmt.Errorf("directory %q not found", id)
}
opts := []llb.LocalOption{
llb.WithCustomName(p.vertexNamef("Local %s", dir)),
llb.WithCustomName(p.vertexNamef("Local %s", dir.Path)),
// Without hint, multiple `llb.Local` operations on the
// same path get a different digest.
llb.SessionID(p.s.SessionID()),
llb.SharedKeyHint(dir),
llb.SharedKeyHint(dir.Path),
}
includes, err := op.Lookup("include").List()
if err != nil {
return st, err
}
if len(includes) > 0 {
includePatterns := []string{}
for _, i := range includes {
pattern, err := i.String()
if err != nil {
return st, err
}
includePatterns = append(includePatterns, pattern)
}
opts = append(opts, llb.IncludePatterns(includePatterns))
}
excludes, err := op.Lookup("exclude").List()
if err != nil {
return st, err
if len(dir.Include) > 0 {
opts = append(opts, llb.IncludePatterns(dir.Include))
}
// Excludes .dagger directory by default
excludePatterns := []string{"**/.dagger/"}
if len(excludes) > 0 {
for _, i := range excludes {
pattern, err := i.String()
if err != nil {
return st, err
}
excludePatterns = append(excludePatterns, pattern)
}
if len(dir.Exclude) > 0 {
excludePatterns = dir.Exclude
}
opts = append(opts, llb.ExcludePatterns(excludePatterns))
@@ -415,13 +396,13 @@ func (p *Pipeline) Local(ctx context.Context, op *compiler.Value, st llb.State)
return st.File(
llb.Copy(
llb.Local(
dir,
dir.Path,
opts...,
),
"/",
"/",
),
llb.WithCustomName(p.vertexNamef("Local %s [copy]", dir)),
llb.WithCustomName(p.vertexNamef("Local %s [copy]", dir.Path)),
), nil
}
@@ -574,35 +555,15 @@ func (p *Pipeline) mount(ctx context.Context, dest string, mnt *compiler.Value)
return nil, fmt.Errorf("invalid stream %q: not a stream", stream.Path().String())
}
// Unix socket
unixValue := stream.Lookup("unix")
if unixValue.Exists() {
unix, err := unixValue.String()
if err != nil {
return nil, fmt.Errorf("invalid unix path id: %w", err)
}
return llb.AddSSHSocket(
llb.SSHID(fmt.Sprintf("unix=%s", unix)),
llb.SSHSocketTarget(dest),
), nil
id, err := stream.Lookup("id").String()
if err != nil {
return nil, fmt.Errorf("invalid stream %q: %w", stream.Path().String(), err)
}
// Windows named pipe
npipeValue := stream.Lookup("npipe")
if npipeValue.Exists() {
npipe, err := npipeValue.String()
if err != nil {
return nil, fmt.Errorf("invalid npipe path id: %w", err)
}
return llb.AddSSHSocket(
llb.SSHID(fmt.Sprintf("npipe=%s", npipe)),
llb.SSHSocketTarget(dest),
), nil
}
return nil, fmt.Errorf("invalid stream %q: not a valid stream", stream.Path().String())
return llb.AddSSHSocket(
llb.SSHID(id),
llb.SSHSocketTarget(dest),
), nil
}
// eg. mount: "/foo": { from: www.source }
@@ -610,7 +571,7 @@ func (p *Pipeline) mount(ctx context.Context, dest string, mnt *compiler.Value)
return nil, fmt.Errorf("invalid mount: should have %s structure",
"{from: _, path: string | *\"/\"}")
}
from := NewPipeline(mnt.Lookup("from"), p.s, p.platform)
from := NewPipeline(mnt.Lookup("from"), p.s, p.pctx)
if err := from.Run(ctx); err != nil {
return nil, err
}
@@ -736,7 +697,7 @@ func unmarshalAnything(data []byte, fn unmarshaller) (interface{}, error) {
}
// parseStringOrSecret retrieve secret as plain text or retrieve string
func parseStringOrSecret(ctx context.Context, ss solver.SecretsStore, v *compiler.Value) (string, error) {
func parseStringOrSecret(pctx *plancontext.Context, v *compiler.Value) (string, error) {
// Check if the value is a string, return as is
if value, err := v.String(); err == nil {
return value, nil
@@ -747,16 +708,16 @@ func parseStringOrSecret(ctx context.Context, ss solver.SecretsStore, v *compile
if err != nil {
return "", err
}
secretBytes, err := ss.GetSecret(ctx, id)
if err != nil {
return "", err
secret := pctx.Secrets.Get(plancontext.ContextKey(id))
if secret == nil {
return "", fmt.Errorf("secret %s not found", id)
}
return string(secretBytes), nil
return secret.PlainText, nil
}
func (p *Pipeline) Load(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
// Execute 'from' in a tmp pipeline, and use the resulting fs
from := NewPipeline(op.Lookup("from"), p.s, p.platform)
from := NewPipeline(op.Lookup("from"), p.s, p.pctx)
if err := from.Run(ctx); err != nil {
return st, err
}
@@ -774,7 +735,7 @@ func (p *Pipeline) DockerLogin(ctx context.Context, op *compiler.Value, st llb.S
// that function
// But currently it's not possible because ECR secret's is a string
// so we need to handle both options (string & secret)
secretValue, err := parseStringOrSecret(ctx, p.s.GetOptions().SecretsStore, op.Lookup("secret"))
secretValue, err := parseStringOrSecret(p.pctx, op.Lookup("secret"))
if err != nil {
return st, err
}
@@ -813,9 +774,10 @@ func (p *Pipeline) FetchContainer(ctx context.Context, op *compiler.Value, st ll
)
// Load image metadata and convert to to LLB.
platform := p.pctx.Platform.Get()
p.image, err = p.s.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{
LogName: p.vertexNamef("load metadata for %s", ref.String()),
Platform: &p.platform,
Platform: &platform,
})
if err != nil {
return st, err
@@ -869,7 +831,7 @@ func (p *Pipeline) PushContainer(ctx context.Context, op *compiler.Value, st llb
"name": ref.String(),
"push": "true",
},
}, p.platform)
}, p.pctx.Platform.Get())
if err != nil {
return st, err
@@ -928,7 +890,7 @@ func (p *Pipeline) SaveImage(ctx context.Context, op *compiler.Value, st llb.Sta
Output: func(_ map[string]string) (io.WriteCloser, error) {
return pipeW, nil
},
}, p.platform)
}, p.pctx.Platform.Get())
if err != nil {
return st, err
@@ -1088,7 +1050,7 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value, st llb.S
// docker build context. This can come from another component, so we need to
// compute it first.
if dockerContext.Exists() {
from := NewPipeline(op.Lookup("context"), p.s, p.platform)
from := NewPipeline(op.Lookup("context"), p.s, p.pctx)
if err := from.Run(ctx); err != nil {
return st, err
}
@@ -1118,7 +1080,7 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value, st llb.S
}
}
opts, err := dockerBuildOpts(ctx, op, p.s.GetOptions().SecretsStore)
opts, err := dockerBuildOpts(op, p.pctx)
if err != nil {
return st, err
}
@@ -1129,7 +1091,7 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value, st llb.S
// Set platform to configured one if no one is defined
if opts["platform"] == "" {
opts["platform"] = bkplatforms.Format(p.platform)
opts["platform"] = bkplatforms.Format(p.pctx.Platform.Get())
}
req := bkgw.SolveRequest{
@@ -1162,7 +1124,7 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value, st llb.S
return applyImageToState(p.image, st), nil
}
func dockerBuildOpts(ctx context.Context, op *compiler.Value, ss solver.SecretsStore) (map[string]string, error) {
func dockerBuildOpts(op *compiler.Value, pctx *plancontext.Context) (map[string]string, error) {
opts := map[string]string{}
if dockerfilePath := op.Lookup("dockerfilePath"); dockerfilePath.Exists() {
@@ -1205,7 +1167,7 @@ func dockerBuildOpts(ctx context.Context, op *compiler.Value, ss solver.SecretsS
return nil, err
}
for _, buildArg := range fields {
v, err := parseStringOrSecret(ctx, ss, buildArg.Value)
v, err := parseStringOrSecret(pctx, buildArg.Value)
if err != nil {
return nil, err
}