cleanup: solver/fs

- Solver: Encapsulates all access to Buildkit. Can solve plain LLB, invoke external frontends (for DockerBuild) and export (for ContainerPush)
- FS (now BuildkitFS) implements the standard Go 1.16 io/fs.FS interface and provides a read-only filesystem on top of a buildkit result. It can be used with built-ins such as fs.WalkDir (no need to have our own Walk functions anymore)
- Moved CueBuild into compiler.Build since it no longer depends on Buildkit. Instead it relies on the io/fs.FS interface, which is used both for the base config and the stdlib (go:embed also uses io/fs.FS). Overlaying base and the stdlib is now done by the same code.

Signed-off-by: Andrea Luzzardi <aluzzardi@gmail.com>
This commit is contained in:
Andrea Luzzardi 2021-03-12 13:00:11 -08:00
parent c35eca99e1
commit c923e5042b
8 changed files with 365 additions and 536 deletions

View File

@ -1,67 +0,0 @@
package dagger
import (
"context"
"errors"
"fmt"
"path"
"path/filepath"
cueerrors "cuelang.org/go/cue/errors"
cueload "cuelang.org/go/cue/load"
"github.com/rs/zerolog/log"
"dagger.io/go/dagger/compiler"
"dagger.io/go/stdlib"
)
// Build a cue configuration tree from the files in fs.
func CueBuild(ctx context.Context, fs FS, args ...string) (*compiler.Value, error) {
var (
err error
lg = log.Ctx(ctx)
)
buildConfig := &cueload.Config{
// The CUE overlay needs to be prefixed by a non-conflicting path with the
// local filesystem, otherwise Cue will merge the Overlay with whatever Cue
// files it finds locally.
Dir: "/config",
}
// Start by creating an overlay with the stdlib
buildConfig.Overlay, err = stdlib.Overlay(buildConfig.Dir)
if err != nil {
return nil, err
}
// Add the config files on top of the overlay
err = fs.Walk(ctx, func(p string, f Stat) error {
lg.Debug().Str("path", p).Msg("load")
if f.IsDir() {
return nil
}
if filepath.Ext(p) != ".cue" {
return nil
}
contents, err := fs.ReadFile(ctx, p)
if err != nil {
return fmt.Errorf("%s: %w", p, err)
}
overlayPath := path.Join(buildConfig.Dir, p)
buildConfig.Overlay[overlayPath] = cueload.FromBytes(contents)
return nil
})
if err != nil {
return nil, err
}
instances := cueload.Instances(args, buildConfig)
if len(instances) != 1 {
return nil, errors.New("only one package is supported at a time")
}
inst, err := compiler.Cue().Build(instances[0])
if err != nil {
return nil, errors.New(cueerrors.Details(err, &cueerrors.Config{}))
}
return compiler.Wrap(inst.Value(), inst), nil
}

View File

@ -20,6 +20,7 @@ import (
// buildkit
bk "github.com/moby/buildkit/client"
_ "github.com/moby/buildkit/client/connhelper/dockercontainer" // import the container connection driver
"github.com/moby/buildkit/client/llb"
bkgw "github.com/moby/buildkit/frontend/gateway/client"
// docker output
@ -142,14 +143,22 @@ func (c *Client) buildfn(ctx context.Context, env *Env, ch chan *bk.SolveStatus,
}
// Export env to a cue directory
// FIXME: this should be elsewhere
lg.Debug().Msg("exporting env")
outdir, err := env.Export(ctx, s.Scratch())
span, _ := opentracing.StartSpanFromContext(ctx, "Env.Export")
defer span.Finish()
st := llb.Scratch().File(
llb.Mkfile("state.cue", 0600, env.State().JSON()),
llb.WithCustomName("[internal] serializing state to JSON"),
)
ref, err := s.Solve(ctx, st)
if err != nil {
return nil, err
}
// Wrap cue directory in buildkit result
return outdir.Result(ctx)
res := bkgw.NewResult()
res.SetRef(ref)
return res, nil
}, ch)
if err != nil {
return fmt.Errorf("buildkit solve: %w", bkCleanError(err))

63
dagger/compiler/build.go Normal file
View File

@ -0,0 +1,63 @@
package compiler
import (
"errors"
"fmt"
"io/fs"
"path"
"path/filepath"
cueerrors "cuelang.org/go/cue/errors"
cueload "cuelang.org/go/cue/load"
)
// Build a cue configuration tree from the files in fs.
func Build(sources map[string]fs.FS, args ...string) (*Value, error) {
buildConfig := &cueload.Config{
// The CUE overlay needs to be prefixed by a non-conflicting path with the
// local filesystem, otherwise Cue will merge the Overlay with whatever Cue
// files it finds locally.
Dir: "/config",
Overlay: map[string]cueload.Source{},
}
// Map the source files into the overlay
for mnt, f := range sources {
f := f
mnt := mnt
err := fs.WalkDir(f, ".", func(p string, entry fs.DirEntry, err error) error {
if err != nil {
return err
}
if !entry.Type().IsRegular() {
return nil
}
if filepath.Ext(entry.Name()) != ".cue" {
return nil
}
contents, err := fs.ReadFile(f, p)
if err != nil {
return fmt.Errorf("%s: %w", p, err)
}
overlayPath := path.Join(buildConfig.Dir, mnt, p)
buildConfig.Overlay[overlayPath] = cueload.FromBytes(contents)
return nil
})
if err != nil {
return nil, err
}
}
instances := cueload.Instances(args, buildConfig)
if len(instances) != 1 {
return nil, errors.New("only one package is supported at a time")
}
inst, err := Cue().Build(instances[0])
if err != nil {
return nil, errors.New(cueerrors.Details(err, &cueerrors.Config{}))
}
return Wrap(inst.Value(), inst), nil
}

View File

@ -3,12 +3,14 @@ package dagger
import (
"context"
"fmt"
"io/fs"
"strings"
"time"
"cuelang.org/go/cue"
cueflow "cuelang.org/go/tools/flow"
"dagger.io/go/dagger/compiler"
"dagger.io/go/stdlib"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
@ -98,9 +100,12 @@ func (env *Env) Update(ctx context.Context, s Solver) error {
return err
}
// load cue files produced by updater
// FIXME: BuildAll() to force all files (no required package..)
base, err := CueBuild(ctx, p.FS())
// Build a Cue config by overlaying the source with the stdlib
sources := map[string]fs.FS{
stdlib.Path: stdlib.FS,
"/": p.FS(),
}
base, err := compiler.Build(sources)
if err != nil {
return fmt.Errorf("base config: %w", err)
}
@ -190,33 +195,6 @@ func (env *Env) mergeState() error {
return nil
}
// Export env to a directory of cue files
// (Use with FS.Change)
func (env *Env) Export(ctx context.Context, fs FS) (FS, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "Env.Export")
defer span.Finish()
// FIXME: we serialize as JSON to guarantee a self-contained file.
// compiler.Value.Save() leaks imports, so requires a shared cue.mod with
// client which is undesirable.
// Once compiler.Value.Save() resolves non-builtin imports with a tree shake,
// we can use it here.
// FIXME: Exporting base/input/output separately causes merge errors.
// For instance, `foo: string | *"default foo"` gets serialized as
// `{"foo":"default foo"}`, which will fail to merge if output contains
// a different definition of `foo`.
//
// fs = env.base.SaveJSON(fs, "base.cue")
// fs = env.input.SaveJSON(fs, "input.cue")
// if env.output != nil {
// fs = env.output.SaveJSON(fs, "output.cue")
// }
// For now, export a single `state.cue` containing the combined output.
fs = fs.WriteValueJSON("state.cue", env.state)
return fs, nil
}
// Compute missing values in env configuration, and write them to state.
func (env *Env) Compute(ctx context.Context, s Solver) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "Env.Compute")

View File

@ -3,228 +3,102 @@ package dagger
import (
"context"
"errors"
"os"
"path"
"strings"
"io/fs"
"time"
bk "github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
bkgw "github.com/moby/buildkit/frontend/gateway/client"
bkpb "github.com/moby/buildkit/solver/pb"
fstypes "github.com/tonistiigi/fsutil/types"
"dagger.io/go/dagger/compiler"
)
type Stat struct {
*fstypes.Stat
// BuildkitFS is a io/fs.FS adapter for Buildkit
// BuildkitFS implements the ReadFileFS, StatFS and ReadDirFS interfaces.
type BuildkitFS struct {
ref bkgw.Reference
}
type FS struct {
// Before last solve
input llb.State
// After last solve
output bkgw.Reference
// How to produce the output
s Solver
func NewBuildkitFS(ref bkgw.Reference) *BuildkitFS {
return &BuildkitFS{
ref: ref,
}
}
func (fs FS) WriteValueJSON(filename string, v *compiler.Value) FS {
return fs.Change(func(st llb.State) llb.State {
return st.File(
llb.Mkfile(filename, 0600, v.JSON()),
llb.WithCustomName("[internal] serializing state to JSON"),
)
})
// Open is not supported.
func (f *BuildkitFS) Open(name string) (fs.File, error) {
return nil, errors.New("not implemented")
}
func (fs FS) WriteValueCUE(filename string, v *compiler.Value) (FS, error) {
src, err := v.Source()
if err != nil {
return fs, err
}
return fs.Change(func(st llb.State) llb.State {
return st.File(
llb.Mkfile(filename, 0600, src),
llb.WithCustomName("[internal] serializing state to CUE"),
)
}), nil
}
func (fs FS) Solver() Solver {
return fs.s
}
// Compute output from input, if not done already.
// This method uses a pointer receiver to simplify
// calling it, since it is called in almost every
// other method.
func (fs *FS) solve(ctx context.Context) error {
if fs.output != nil {
return nil
}
output, err := fs.s.Solve(ctx, fs.input)
if err != nil {
return bkCleanError(err)
}
fs.output = output
return nil
}
func (fs FS) ReadFile(ctx context.Context, filename string) ([]byte, error) {
// Lazy solve
if err := (&fs).solve(ctx); err != nil {
return nil, err
}
// NOTE: llb.Scratch is represented by a `nil` reference. If solve result is
// Scratch, then `fs.output` is `nil`.
if fs.output == nil {
return nil, os.ErrNotExist
}
contents, err := fs.output.ReadFile(ctx, bkgw.ReadRequest{Filename: filename})
if err != nil {
return nil, bkCleanError(err)
}
return contents, nil
}
func (fs FS) ReadDir(ctx context.Context, dir string) ([]Stat, error) {
// Lazy solve
if err := (&fs).solve(ctx); err != nil {
return nil, err
}
// NOTE: llb.Scratch is represented by a `nil` reference. If solve result is
// Scratch, then `fs.output` is `nil`.
if fs.output == nil {
return []Stat{}, nil
}
st, err := fs.output.ReadDir(ctx, bkgw.ReadDirRequest{
Path: dir,
func (f *BuildkitFS) Stat(name string) (fs.FileInfo, error) {
st, err := f.ref.StatFile(context.TODO(), bkgw.StatRequest{
Path: name,
})
if err != nil {
return nil, bkCleanError(err)
return nil, err
}
out := make([]Stat, len(st))
for i := range st {
out[i] = Stat{
Stat: st[i],
}
}
return out, nil
return bkFileInfo{st}, nil
}
func (fs FS) walk(ctx context.Context, p string, fn WalkFunc) error {
files, err := fs.ReadDir(ctx, p)
if err != nil {
return err
}
for _, f := range files {
fPath := path.Join(p, f.GetPath())
if err := fn(fPath, f); err != nil {
return err
}
if f.IsDir() {
if err := fs.walk(ctx, fPath, fn); err != nil {
return err
}
}
}
return nil
}
type WalkFunc func(string, Stat) error
func (fs FS) Walk(ctx context.Context, fn WalkFunc) error {
return fs.walk(ctx, "/", fn)
}
type ChangeFunc func(llb.State) llb.State
func (fs FS) Change(changes ...ChangeFunc) FS {
for _, change := range changes {
fs = fs.Set(change(fs.input))
}
return fs
}
func (fs FS) Set(st llb.State) FS {
fs.input = st
fs.output = nil
return fs
}
func (fs FS) Solve(ctx context.Context) (FS, error) {
if err := (&fs).solve(ctx); err != nil {
return fs, err
}
return fs, nil
}
func (fs FS) LLB() llb.State {
return fs.input
}
func (fs FS) Def(ctx context.Context) (*bkpb.Definition, error) {
def, err := fs.LLB().Marshal(ctx, llb.LinuxAmd64)
func (f *BuildkitFS) ReadDir(name string) ([]fs.DirEntry, error) {
entries, err := f.ref.ReadDir(context.TODO(), bkgw.ReadDirRequest{
Path: name,
})
if err != nil {
return nil, err
}
return def.ToPB(), nil
}
func (fs FS) Ref(ctx context.Context) (bkgw.Reference, error) {
if err := (&fs).solve(ctx); err != nil {
return nil, err
res := make([]fs.DirEntry, 0, len(entries))
for _, st := range entries {
res = append(res, bkDirEntry{
bkFileInfo: bkFileInfo{
st: st,
},
})
}
return fs.output, nil
}
func (fs FS) Result(ctx context.Context) (*bkgw.Result, error) {
res := bkgw.NewResult()
ref, err := fs.Ref(ctx)
if err != nil {
return nil, err
}
res.SetRef(ref)
return res, nil
}
func (fs FS) Export(ctx context.Context, output bk.ExportEntry) (*bk.SolveResponse, error) {
// Lazy solve
if err := (&fs).solve(ctx); err != nil {
return nil, err
}
// NOTE: llb.Scratch is represented by a `nil` reference. If solve result is
// Scratch, then `fs.output` is `nil`.
if fs.output == nil {
return nil, os.ErrNotExist
}
st, err := fs.output.ToState()
if err != nil {
return nil, err
}
return fs.s.Export(ctx, st, output)
func (f *BuildkitFS) ReadFile(name string) ([]byte, error) {
return f.ref.ReadFile(context.TODO(), bkgw.ReadRequest{
Filename: name,
})
}
// A helper to remove noise from buildkit error messages.
// FIXME: Obviously a cleaner solution would be nice.
func bkCleanError(err error) error {
noise := []string{
"executor failed running ",
"buildkit-runc did not terminate successfully",
"rpc error: code = Unknown desc = ",
"failed to solve: ",
}
msg := err.Error()
for _, s := range noise {
msg = strings.ReplaceAll(msg, s, "")
}
return errors.New(msg)
// bkFileInfo is a fs.FileInfo adapter for fstypes.Stat
type bkFileInfo struct {
st *fstypes.Stat
}
func (s bkFileInfo) Name() string {
return s.st.GetPath()
}
func (s bkFileInfo) Size() int64 {
return s.st.GetSize_()
}
func (s bkFileInfo) IsDir() bool {
return s.st.IsDir()
}
func (s bkFileInfo) ModTime() time.Time {
return time.Unix(s.st.GetModTime(), 0)
}
func (s bkFileInfo) Mode() fs.FileMode {
return fs.FileMode(s.st.Mode)
}
func (s bkFileInfo) Sys() interface{} {
return s.st
}
// bkDirEntry is a fs.DirEntry adapter for fstypes.Stat
type bkDirEntry struct {
bkFileInfo
}
func (s bkDirEntry) Info() (fs.FileInfo, error) {
return s.bkFileInfo, nil
}
func (s bkDirEntry) Type() fs.FileMode {
return s.Mode()
}

View File

@ -22,23 +22,32 @@ import (
// An execution pipeline
type Pipeline struct {
name string
s Solver
fs FS
out *Fillable
name string
s Solver
state llb.State
result bkgw.Reference
out *Fillable
}
func NewPipeline(name string, s Solver, out *Fillable) *Pipeline {
return &Pipeline{
name: name,
s: s,
fs: s.Scratch(),
out: out,
name: name,
s: s,
state: llb.Scratch(),
out: out,
}
}
func (p *Pipeline) FS() FS {
return p.fs
func (p *Pipeline) State() llb.State {
return p.state
}
func (p *Pipeline) Result() bkgw.Reference {
return p.result
}
func (p *Pipeline) FS() fs.FS {
return NewBuildkitFS(p.result)
}
func isComponent(v *compiler.Value) bool {
@ -129,54 +138,54 @@ func (p *Pipeline) Do(ctx context.Context, code ...*compiler.Value) error {
Msg("pipeline was partially executed because of missing inputs")
return nil
}
if err := p.doOp(ctx, op); err != nil {
p.state, err = p.doOp(ctx, op, p.state)
if err != nil {
return err
}
// Force a buildkit solve request at each operation,
// so that errors map to the correct cue path.
// FIXME: might as well change FS to make every operation
// synchronous.
fs, err := p.fs.Solve(ctx)
p.result, err = p.s.Solve(ctx, p.state)
if err != nil {
return err
}
p.fs = fs
}
return nil
}
func (p *Pipeline) doOp(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) doOp(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
do, err := op.Get("do").String()
if err != nil {
return err
return st, err
}
switch do {
case "copy":
return p.Copy(ctx, op)
return p.Copy(ctx, op, st)
case "exec":
return p.Exec(ctx, op)
return p.Exec(ctx, op, st)
case "export":
return p.Export(ctx, op)
return p.Export(ctx, op, st)
case "fetch-container":
return p.FetchContainer(ctx, op)
return p.FetchContainer(ctx, op, st)
case "push-container":
return p.PushContainer(ctx, op)
return p.PushContainer(ctx, op, st)
case "fetch-git":
return p.FetchGit(ctx, op)
return p.FetchGit(ctx, op, st)
case "local":
return p.Local(ctx, op)
return p.Local(ctx, op, st)
case "load":
return p.Load(ctx, op)
return p.Load(ctx, op, st)
case "subdir":
return p.Subdir(ctx, op)
return p.Subdir(ctx, op, st)
case "docker-build":
return p.DockerBuild(ctx, op)
return p.DockerBuild(ctx, op, st)
case "write-file":
return p.WriteFile(ctx, op)
return p.WriteFile(ctx, op, st)
case "mkdir":
return p.Mkdir(ctx, op)
return p.Mkdir(ctx, op, st)
default:
return fmt.Errorf("invalid operation: %s", op.JSON())
return st, fmt.Errorf("invalid operation: %s", op.JSON())
}
}
@ -192,74 +201,68 @@ func (p *Pipeline) Tmp(name string) *Pipeline {
return NewPipeline(name, p.s, nil)
}
func (p *Pipeline) Subdir(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) Subdir(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
// FIXME: this could be more optimized by carrying subdir path as metadata,
// and using it in copy, load or mount.
dir, err := op.Get("dir").String()
if err != nil {
return err
return st, err
}
p.fs = p.fs.Change(func(st llb.State) llb.State {
return st.File(
llb.Copy(
p.fs.LLB(),
dir,
"/",
&llb.CopyInfo{
CopyDirContentsOnly: true,
},
),
llb.WithCustomName(p.vertexNamef("Subdir %s", dir)),
)
})
return nil
return st.File(
llb.Copy(
st,
dir,
"/",
&llb.CopyInfo{
CopyDirContentsOnly: true,
},
),
llb.WithCustomName(p.vertexNamef("Subdir %s", dir)),
), nil
}
func (p *Pipeline) Copy(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) Copy(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
// Decode copy options
src, err := op.Get("src").String()
if err != nil {
return err
return st, err
}
dest, err := op.Get("dest").String()
if err != nil {
return err
return st, err
}
// Execute 'from' in a tmp pipeline, and use the resulting fs
from := p.Tmp(op.Get("from").Path().String())
if err := from.Do(ctx, op.Get("from")); err != nil {
return err
return st, err
}
p.fs = p.fs.Change(func(st llb.State) llb.State {
return st.File(
llb.Copy(
from.FS().LLB(),
src,
dest,
// FIXME: allow more configurable llb options
// For now we define the following convenience presets:
&llb.CopyInfo{
CopyDirContentsOnly: true,
CreateDestPath: true,
AllowWildcard: true,
},
),
llb.WithCustomName(p.vertexNamef("Copy %s %s", src, dest)),
)
})
return nil
return st.File(
llb.Copy(
from.State(),
src,
dest,
// FIXME: allow more configurable llb options
// For now we define the following convenience presets:
&llb.CopyInfo{
CopyDirContentsOnly: true,
CreateDestPath: true,
AllowWildcard: true,
},
),
llb.WithCustomName(p.vertexNamef("Copy %s %s", src, dest)),
), nil
}
func (p *Pipeline) Local(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) Local(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
dir, err := op.Get("dir").String()
if err != nil {
return err
return st, err
}
var include []string
if inc := op.Get("include"); inc.Exists() {
if err := inc.Decode(&include); err != nil {
return err
return st, err
}
}
// FIXME: Remove the `Copy` and use `Local` directly.
@ -270,30 +273,26 @@ func (p *Pipeline) Local(ctx context.Context, op *compiler.Value) error {
//
// By wrapping `llb.Local` inside `llb.Copy`, we get the same digest for
// the same content.
p.fs = p.fs.Change(func(st llb.State) llb.State {
return st.File(
llb.Copy(
llb.Local(
dir,
llb.FollowPaths(include),
llb.WithCustomName(p.vertexNamef("Local %s [transfer]", dir)),
return st.File(
llb.Copy(
llb.Local(
dir,
llb.FollowPaths(include),
llb.WithCustomName(p.vertexNamef("Local %s [transfer]", dir)),
// Without hint, multiple `llb.Local` operations on the
// same path get a different digest.
llb.SessionID(p.s.SessionID()),
llb.SharedKeyHint(dir),
),
"/",
"/",
// Without hint, multiple `llb.Local` operations on the
// same path get a different digest.
llb.SessionID(p.s.SessionID()),
llb.SharedKeyHint(dir),
),
llb.WithCustomName(p.vertexNamef("Local %s [copy]", dir)),
)
})
return nil
"/",
"/",
),
llb.WithCustomName(p.vertexNamef("Local %s [copy]", dir)),
), nil
}
func (p *Pipeline) Exec(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) Exec(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
opts := []llb.RunOption{}
var cmd struct {
Args []string
@ -303,7 +302,7 @@ func (p *Pipeline) Exec(ctx context.Context, op *compiler.Value) error {
}
if err := op.Decode(&cmd); err != nil {
return err
return st, err
}
// args
opts = append(opts, llb.Args(cmd.Args))
@ -318,7 +317,7 @@ func (p *Pipeline) Exec(ctx context.Context, op *compiler.Value) error {
if cmd.Always {
cacheBuster, err := randomID(8)
if err != nil {
return err
return st, err
}
opts = append(opts, llb.AddEnv("DAGGER_CACHEBUSTER", cacheBuster))
}
@ -326,7 +325,7 @@ func (p *Pipeline) Exec(ctx context.Context, op *compiler.Value) error {
if mounts := op.Lookup("mount"); mounts.Exists() {
mntOpts, err := p.mountAll(ctx, mounts)
if err != nil {
return err
return st, err
}
opts = append(opts, mntOpts...)
}
@ -340,10 +339,7 @@ func (p *Pipeline) Exec(ctx context.Context, op *compiler.Value) error {
opts = append(opts, llb.WithCustomName(p.vertexNamef("Exec [%s]", strings.Join(args, ", "))))
// --> Execute
p.fs = p.fs.Change(func(st llb.State) llb.State {
return st.Run(opts...).Root()
})
return nil
return st.Run(opts...).Root(), nil
}
func (p *Pipeline) mountAll(ctx context.Context, mounts *compiler.Value) ([]llb.RunOption, error) {
@ -397,21 +393,21 @@ func (p *Pipeline) mount(ctx context.Context, dest string, mnt *compiler.Value)
}
mo = append(mo, llb.SourcePath(mps))
}
return llb.AddMount(dest, from.FS().LLB(), mo...), nil
return llb.AddMount(dest, from.State(), mo...), nil
}
func (p *Pipeline) Export(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) Export(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
source, err := op.Get("source").String()
if err != nil {
return err
return st, err
}
format, err := op.Get("format").String()
if err != nil {
return err
return st, err
}
contents, err := p.fs.ReadFile(ctx, source)
contents, err := fs.ReadFile(p.FS(), source)
if err != nil {
return fmt.Errorf("export %s: %w", source, err)
return st, fmt.Errorf("export %s: %w", source, err)
}
switch format {
case "string":
@ -422,13 +418,13 @@ func (p *Pipeline) Export(ctx context.Context, op *compiler.Value) error {
Msg("exporting string")
if err := p.out.Fill(string(contents)); err != nil {
return err
return st, err
}
case "json":
var o interface{}
o, err := unmarshalAnything(contents, json.Unmarshal)
if err != nil {
return err
return st, err
}
log.
@ -438,13 +434,13 @@ func (p *Pipeline) Export(ctx context.Context, op *compiler.Value) error {
Msg("exporting json")
if err := p.out.Fill(o); err != nil {
return err
return st, err
}
case "yaml":
var o interface{}
o, err := unmarshalAnything(contents, yaml.Unmarshal)
if err != nil {
return err
return st, err
}
log.
@ -454,12 +450,12 @@ func (p *Pipeline) Export(ctx context.Context, op *compiler.Value) error {
Msg("exporting yaml")
if err := p.out.Fill(o); err != nil {
return err
return st, err
}
default:
return fmt.Errorf("unsupported export format: %q", format)
return st, fmt.Errorf("unsupported export format: %q", format)
}
return nil
return st, nil
}
type unmarshaller func([]byte, interface{}) error
@ -481,31 +477,30 @@ func unmarshalAnything(data []byte, fn unmarshaller) (interface{}, error) {
return o, err
}
func (p *Pipeline) Load(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) Load(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
// Execute 'from' in a tmp pipeline, and use the resulting fs
from := p.Tmp(op.Get("from").Path().String())
if err := from.Do(ctx, op.Get("from")); err != nil {
return err
return st, err
}
p.fs = p.fs.Set(from.FS().LLB())
return nil
return from.State(), nil
}
func (p *Pipeline) FetchContainer(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) FetchContainer(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
rawRef, err := op.Get("ref").String()
if err != nil {
return err
return st, err
}
ref, err := reference.ParseNormalizedNamed(rawRef)
if err != nil {
return fmt.Errorf("failed to parse ref %s: %w", rawRef, err)
return st, fmt.Errorf("failed to parse ref %s: %w", rawRef, err)
}
// Add the default tag "latest" to a reference if it only has a repo name.
ref = reference.TagNameOnly(ref)
state := llb.Image(
st = llb.Image(
ref.String(),
llb.WithCustomName(p.vertexNamef("FetchContainer %s", rawRef)),
)
@ -517,21 +512,20 @@ func (p *Pipeline) FetchContainer(ctx context.Context, op *compiler.Value) error
LogName: p.vertexNamef("load metadata for %s", ref.String()),
})
if err != nil {
return err
return st, err
}
for _, env := range image.Config.Env {
k, v := parseKeyValue(env)
state = state.AddEnv(k, v)
st = st.AddEnv(k, v)
}
if image.Config.WorkingDir != "" {
state = state.Dir(image.Config.WorkingDir)
st = st.Dir(image.Config.WorkingDir)
}
if image.Config.User != "" {
state = state.User(image.Config.User)
st = st.User(image.Config.User)
}
p.fs = p.fs.Set(state)
return nil
return st, nil
}
func parseKeyValue(env string) (string, string) {
@ -544,45 +538,52 @@ func parseKeyValue(env string) (string, string) {
return parts[0], v
}
func (p *Pipeline) PushContainer(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) PushContainer(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
rawRef, err := op.Get("ref").String()
if err != nil {
return err
return st, err
}
ref, err := reference.ParseNormalizedNamed(rawRef)
if err != nil {
return fmt.Errorf("failed to parse ref %s: %w", rawRef, err)
return st, fmt.Errorf("failed to parse ref %s: %w", rawRef, err)
}
// Add the default tag "latest" to a reference if it only has a repo name.
ref = reference.TagNameOnly(ref)
_, err = p.fs.Export(ctx, bk.ExportEntry{
pushSt, err := p.result.ToState()
if err != nil {
return st, err
}
_, err = p.s.Export(ctx, pushSt, bk.ExportEntry{
Type: bk.ExporterImage,
Attrs: map[string]string{
"name": ref.String(),
"push": "true",
},
})
return err
return st, err
}
func (p *Pipeline) FetchGit(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) FetchGit(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
remote, err := op.Get("remote").String()
if err != nil {
return err
return st, err
}
ref, err := op.Get("ref").String()
if err != nil {
return err
return st, err
}
p.fs = p.fs.Set(
llb.Git(remote, ref, llb.WithCustomName(p.vertexNamef("FetchGit %s@%s", remote, ref))),
)
return nil
return llb.Git(
remote,
ref,
llb.WithCustomName(p.vertexNamef("FetchGit %s@%s", remote, ref)),
), nil
}
func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
var (
context = op.Lookup("context")
dockerfile = op.Lookup("dockerfile")
@ -594,7 +595,7 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value) error {
)
if !context.Exists() && !dockerfile.Exists() {
return errors.New("context or dockerfile required")
return st, errors.New("context or dockerfile required")
}
// docker build context. This can come from another component, so we need to
@ -602,11 +603,11 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value) error {
if context.Exists() {
from := p.Tmp(op.Lookup("context").Path().String())
if err := from.Do(ctx, context); err != nil {
return err
return st, err
}
contextDef, err = from.FS().Def(ctx)
contextDef, err = p.s.Marshal(ctx, from.State())
if err != nil {
return err
return st, err
}
dockerfileDef = contextDef
}
@ -615,15 +616,15 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value) error {
if dockerfile.Exists() {
content, err := dockerfile.String()
if err != nil {
return err
return st, err
}
dockerfileDef, err = p.s.Scratch().Set(
dockerfileDef, err = p.s.Marshal(ctx,
llb.Scratch().File(
llb.Mkfile("/Dockerfile", 0644, []byte(content)),
),
).Def(ctx)
)
if err != nil {
return err
return st, err
}
if contextDef == nil {
contextDef = dockerfileDef
@ -642,7 +643,7 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value) error {
if dockerfilePath := op.Lookup("dockerfilePath"); dockerfilePath.Exists() {
filename, err := dockerfilePath.String()
if err != nil {
return err
return st, err
}
req.FrontendOpt["filename"] = filename
}
@ -657,7 +658,7 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value) error {
return nil
})
if err != nil {
return err
return st, err
}
}
@ -671,7 +672,7 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value) error {
return nil
})
if err != nil {
return err
return st, err
}
}
@ -679,13 +680,13 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value) error {
p := []string{}
list, err := platforms.List()
if err != nil {
return err
return st, err
}
for _, platform := range list {
s, err := platform.String()
if err != nil {
return err
return st, err
}
p = append(p, s)
}
@ -700,65 +701,51 @@ func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value) error {
res, err := p.s.SolveRequest(ctx, req)
if err != nil {
return err
return st, err
}
st, err := res.ToState()
if err != nil {
return err
}
p.fs = p.fs.Set(st)
return nil
return res.ToState()
}
func (p *Pipeline) WriteFile(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) WriteFile(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
content, err := op.Get("content").String()
if err != nil {
return err
return st, err
}
dest, err := op.Get("dest").String()
if err != nil {
return err
return st, err
}
mode, err := op.Get("mode").Int64()
if err != nil {
return err
return st, err
}
p.fs = p.fs.Change(func(st llb.State) llb.State {
return st.File(
llb.Mkfile(dest, fs.FileMode(mode), []byte(content)),
llb.WithCustomName(p.vertexNamef("WriteFile %s", dest)),
)
})
return nil
return st.File(
llb.Mkfile(dest, fs.FileMode(mode), []byte(content)),
llb.WithCustomName(p.vertexNamef("WriteFile %s", dest)),
), nil
}
func (p *Pipeline) Mkdir(ctx context.Context, op *compiler.Value) error {
func (p *Pipeline) Mkdir(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) {
path, err := op.Get("path").String()
if err != nil {
return err
return st, err
}
dir, err := op.Get("dir").String()
if err != nil {
return err
return st, err
}
mode, err := op.Get("mode").Int64()
if err != nil {
return err
return st, err
}
p.fs = p.fs.Change(func(st llb.State) llb.State {
return st.Dir(dir).File(
llb.Mkdir(path, fs.FileMode(mode)),
llb.WithCustomName(p.vertexNamef("Mkdir %s", path)),
)
})
return nil
return st.Dir(dir).File(
llb.Mkdir(path, fs.FileMode(mode)),
llb.WithCustomName(p.vertexNamef("Mkdir %s", path)),
), nil
}

View File

@ -3,7 +3,9 @@ package dagger
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
bk "github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
@ -16,8 +18,6 @@ import (
"github.com/rs/zerolog/log"
)
// Polyfill for buildkit gateway client
// Use instead of bkgw.Client
type Solver struct {
events chan *bk.SolveStatus
control *bk.Client
@ -32,15 +32,13 @@ func NewSolver(control *bk.Client, gw bkgw.Client, events chan *bk.SolveStatus)
}
}
func (s Solver) FS(input llb.State) FS {
return FS{
s: s,
input: input,
func (s Solver) Marshal(ctx context.Context, st llb.State) (*bkpb.Definition, error) {
// FIXME: do not hardcode the platform
def, err := st.Marshal(ctx, llb.LinuxAmd64)
if err != nil {
return nil, err
}
}
func (s Solver) Scratch() FS {
return s.FS(llb.Scratch())
return def.ToPB(), nil
}
func (s Solver) SessionID() string {
@ -78,7 +76,7 @@ func (s Solver) SolveRequest(ctx context.Context, req bkgw.SolveRequest) (bkgw.R
// Solve will block until the state is solved and returns a Reference.
func (s Solver) Solve(ctx context.Context, st llb.State) (bkgw.Reference, error) {
// marshal llb
def, err := st.Marshal(ctx, llb.LinuxAmd64)
def, err := s.Marshal(ctx, st)
if err != nil {
return nil, err
}
@ -96,7 +94,7 @@ func (s Solver) Solve(ctx context.Context, st llb.State) (bkgw.Reference, error)
// call solve
return s.SolveRequest(ctx, bkgw.SolveRequest{
Definition: def.ToPB(),
Definition: def,
// makes Solve() to block until LLB graph is solved. otherwise it will
// return result (that you can for example use for next build) that
@ -110,7 +108,7 @@ func (s Solver) Solve(ctx context.Context, st llb.State) (bkgw.Reference, error)
// within buildkit from the Control API. Ideally the Gateway API should allow to
// Export directly.
func (s Solver) Export(ctx context.Context, st llb.State, output bk.ExportEntry) (*bk.SolveResponse, error) {
def, err := st.Marshal(ctx, llb.LinuxAmd64)
def, err := s.Marshal(ctx, st)
if err != nil {
return nil, err
}
@ -124,6 +122,8 @@ func (s Solver) Export(ctx context.Context, st llb.State, output bk.ExportEntry)
ch := make(chan *bk.SolveStatus)
// Forward this build session events to the main events channel, for logging
// purposes.
go func() {
for event := range ch {
s.events <- event
@ -132,7 +132,7 @@ func (s Solver) Export(ctx context.Context, st llb.State, output bk.ExportEntry)
return s.control.Build(ctx, opts, "", func(ctx context.Context, c bkgw.Client) (*bkgw.Result, error) {
return c.Solve(ctx, bkgw.SolveRequest{
Definition: def.ToPB(),
Definition: def,
})
}, ch)
}
@ -143,7 +143,7 @@ type llbOp struct {
OpMetadata bkpb.OpMetadata
}
func dumpLLB(def *llb.Definition) ([]byte, error) {
func dumpLLB(def *bkpb.Definition) ([]byte, error) {
ops := make([]llbOp, 0, len(def.Def))
for _, dt := range def.Def {
var op bkpb.Op
@ -156,3 +156,22 @@ func dumpLLB(def *llb.Definition) ([]byte, error) {
}
return json.Marshal(ops)
}
// A helper to remove noise from buildkit error messages.
// FIXME: Obviously a cleaner solution would be nice.
func bkCleanError(err error) error {
noise := []string{
"executor failed running ",
"buildkit-runc did not terminate successfully",
"rpc error: code = Unknown desc = ",
"failed to solve: ",
}
msg := err.Error()
for _, s := range noise {
msg = strings.ReplaceAll(msg, s, "")
}
return errors.New(msg)
}

View File

@ -2,48 +2,14 @@ package stdlib
import (
"embed"
"fmt"
"io/fs"
"path"
"path/filepath"
cueload "cuelang.org/go/cue/load"
)
// FS contains the filesystem of the stdlib.
//go:embed **/*.cue **/*/*.cue
var FS embed.FS
var (
// FS contains the filesystem of the stdlib.
//go:embed **/*.cue **/*/*.cue
FS embed.FS
const (
stdlibPackageName = "dagger.io"
PackageName = "dagger.io"
Path = path.Join("cue.mod", "pkg", PackageName)
)
func Overlay(prefixPath string) (map[string]cueload.Source, error) {
overlay := map[string]cueload.Source{}
err := fs.WalkDir(FS, ".", func(p string, entry fs.DirEntry, err error) error {
if err != nil {
return err
}
if !entry.Type().IsRegular() {
return nil
}
if filepath.Ext(entry.Name()) != ".cue" {
return nil
}
contents, err := FS.ReadFile(p)
if err != nil {
return fmt.Errorf("%s: %w", p, err)
}
overlayPath := path.Join(prefixPath, "cue.mod", "pkg", stdlibPackageName, p)
overlay[overlayPath] = cueload.FromBytes(contents)
return nil
})
return overlay, err
}