Merge pull request #115 from blocklayerhq/slim
Simplify runtime code by removing layers of abstraction
This commit is contained in:
commit
ec56160307
27
ARCHITECTURE.md
Normal file
27
ARCHITECTURE.md
Normal file
@ -0,0 +1,27 @@
|
||||
# The Dagger architecture
|
||||
|
||||
This document provides details on the internals of Dagger, key design decisions and the rationale behind them.
|
||||
|
||||
## What is a DAG?
|
||||
|
||||
A DAG is the basic unit of programming in dagger.
|
||||
It is a special kind of program which runs as a aipeline of inter-connected computing nodes running in parallel, instead of a sequence of operations to be run by a single node.
|
||||
|
||||
DAGs are a powerful way to automate various parts of an application delivery workflow:
|
||||
build, test, deploy, generate configuration, enforce policies, publish artifacts, etc.
|
||||
|
||||
The DAG architecture has many benefits:
|
||||
|
||||
- Because DAGs are made of nodes executing in parallel, they are easy to scale.
|
||||
- Because all inputs and outputs are snapshotted and content-addressed, DAGs
|
||||
can easily be made repeatable, can be cached aggressively, and can be replayed
|
||||
at will.
|
||||
- Because nodes are executed by the same container engine as docker-build, DAGs
|
||||
can be developed using any language or technology capable of running in a docker.
|
||||
Dockerfiles and docker images are natively supported for maximum compatibility.
|
||||
- Because DAGs are programmed declaratively with a powerful configuration language,
|
||||
they are much easier to test, debug and refactor than traditional programming languages.
|
||||
|
||||
To execute a DAG, the dagger runtime JIT-compiles it to a low-level format called llb, and executes it with buildkit. Think of buildkit as a specialized VM for running compute graphs; and dagger as a complete programming environment for that VM.
|
||||
|
||||
The tradeoff for all those wonderful features is that a DAG architecture cannot be used for all software: only software than can be run as a pipeline.
|
60
dagger/cc/cc_test.go
Normal file
60
dagger/cc/cc_test.go
Normal file
@ -0,0 +1,60 @@
|
||||
package cc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test that a non-existing field is detected correctly
|
||||
func TestFieldNotExist(t *testing.T) {
|
||||
root, err := Compile("test.cue", `foo: "bar"`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v := root.Get("foo"); !v.Exists() {
|
||||
// value should exist
|
||||
t.Fatal(v)
|
||||
}
|
||||
if v := root.Get("bar"); v.Exists() {
|
||||
// value should NOT exist
|
||||
t.Fatal(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that a non-existing definition is detected correctly
|
||||
func TestDefNotExist(t *testing.T) {
|
||||
root, err := Compile("test.cue", `foo: #bla: "bar"`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v := root.Get("foo.#bla"); !v.Exists() {
|
||||
// value should exist
|
||||
t.Fatal(v)
|
||||
}
|
||||
if v := root.Get("foo.#nope"); v.Exists() {
|
||||
// value should NOT exist
|
||||
t.Fatal(v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimple(t *testing.T) {
|
||||
_, err := EmptyStruct()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSON(t *testing.T) {
|
||||
v, err := Compile("", `foo: hello: "world"`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b1 := v.JSON()
|
||||
if string(b1) != `{"foo":{"hello":"world"}}` {
|
||||
t.Fatal(b1)
|
||||
}
|
||||
// Reproduce a bug where Value.Get().JSON() ignores Get()
|
||||
b2 := v.Get("foo").JSON()
|
||||
if string(b2) != `{"hello":"world"}` {
|
||||
t.Fatal(b2)
|
||||
}
|
||||
}
|
@ -65,11 +65,6 @@ func (v *Value) Len() cue.Value {
|
||||
return v.val.Len()
|
||||
}
|
||||
|
||||
// Proxy function to the underlying cue.Value
|
||||
func (v *Value) List() (cue.Iterator, error) {
|
||||
return v.val.List()
|
||||
}
|
||||
|
||||
// Proxy function to the underlying cue.Value
|
||||
func (v *Value) Fields() (*cue.Iterator, error) {
|
||||
return v.val.Fields()
|
||||
@ -105,8 +100,21 @@ func (v *Value) Decode(x interface{}) error {
|
||||
return v.val.Decode(x)
|
||||
}
|
||||
|
||||
func (v *Value) List() ([]*Value, error) {
|
||||
l := []*Value{}
|
||||
it, err := v.val.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for it.Next() {
|
||||
l = append(l, v.Wrap(it.Value()))
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// FIXME: deprecate to simplify
|
||||
func (v *Value) RangeList(fn func(int, *Value) error) error {
|
||||
it, err := v.List()
|
||||
it, err := v.val.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -133,28 +141,6 @@ func (v *Value) RangeStruct(fn func(string, *Value) error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finalize a value using the given spec. This means:
|
||||
// 1. Check that the value matches the spec.
|
||||
// 2. Merge the value and the spec, and return the result.
|
||||
func (v *Value) Finalize(spec *Value) (*Value, error) {
|
||||
cc.Lock()
|
||||
unified := spec.val.Unify(v.val)
|
||||
cc.Unlock()
|
||||
// FIXME: temporary debug message, remove before merging.
|
||||
// fmt.Printf("Finalize:\n spec=%v\n v=%v\n unified=%v", spec.val, v.val, unified)
|
||||
|
||||
// OPTION 1: unfinished fields should pass, but don't
|
||||
// if err := unified.Validate(cue.Concrete(true)); err != nil {
|
||||
// OPTION 2: missing fields should fail, but don't
|
||||
// We choose option 2 for now, because it's easier to layer a
|
||||
// fix on top (we access individual fields so have an opportunity
|
||||
// to return an error if they are not there).
|
||||
if err := unified.Validate(cue.Final()); err != nil {
|
||||
return nil, Err(err)
|
||||
}
|
||||
return v.Merge(spec)
|
||||
}
|
||||
|
||||
// FIXME: receive string path?
|
||||
func (v *Value) Merge(x interface{}, path ...string) (*Value, error) {
|
||||
if xval, ok := x.(*Value); ok {
|
||||
|
@ -93,10 +93,7 @@ func (c *Client) buildfn(ctx context.Context, env *Env, ch chan *bk.SolveStatus,
|
||||
lg := log.Ctx(ctx)
|
||||
|
||||
// Scan local dirs to grant access
|
||||
localdirs, err := env.LocalDirs(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "scan local dirs")
|
||||
}
|
||||
localdirs := env.LocalDirs()
|
||||
for label, dir := range localdirs {
|
||||
abs, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
|
@ -1,101 +0,0 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Component struct {
|
||||
// Source value for the component, without spec merged
|
||||
// eg. `{ string, #dagger: compute: [{do:"fetch-container", ...}]}`
|
||||
v *cc.Value
|
||||
}
|
||||
|
||||
func NewComponent(v *cc.Value) (*Component, error) {
|
||||
if !v.Exists() {
|
||||
// Component value does not exist
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
if !v.Get("#dagger").Exists() {
|
||||
// Component value exists but has no `#dagger` definition
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
// Validate & merge with spec
|
||||
final, err := v.Finalize(spec.Get("#Component"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid component")
|
||||
}
|
||||
return &Component{
|
||||
v: final,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Component) Value() *cc.Value {
|
||||
return c.v
|
||||
}
|
||||
|
||||
// Return the contents of the "#dagger" annotation.
|
||||
func (c *Component) Config() *cc.Value {
|
||||
return c.Value().Get("#dagger")
|
||||
}
|
||||
|
||||
// Return this component's compute script.
|
||||
func (c *Component) ComputeScript() (*Script, error) {
|
||||
return newScript(c.Config().Get("compute"))
|
||||
}
|
||||
|
||||
// Return a list of local dirs required to compute this component.
|
||||
// (Scanned from the arg `dir` of operations `do: "local"` in the
|
||||
// compute script.
|
||||
func (c *Component) LocalDirs(ctx context.Context) (map[string]string, error) {
|
||||
s, err := c.ComputeScript()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.LocalDirs(ctx)
|
||||
}
|
||||
|
||||
// Compute the configuration for this component.
|
||||
//
|
||||
// Difference with Execute:
|
||||
//
|
||||
// 1. Always start with an empty fs state (Execute may receive any state as input)
|
||||
// 2. Always solve at the end (Execute is lazy)
|
||||
//
|
||||
func (c *Component) Compute(ctx context.Context, s Solver, out *Fillable) (FS, error) {
|
||||
fs, err := c.Execute(ctx, s.Scratch(), out)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
|
||||
// Force a `Solve()` in case it hasn't been called earlier.
|
||||
// If the FS is already solved, this is a noop.
|
||||
_, err = fs.Solve(ctx)
|
||||
return fs, err
|
||||
}
|
||||
|
||||
// A component implements the Executable interface by returning its
|
||||
// compute script.
|
||||
// See cc.Value.Executable().
|
||||
func (c *Component) Execute(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
script, err := c.ComputeScript()
|
||||
if err != nil {
|
||||
// If the component has no script, then do not fail.
|
||||
if os.IsNotExist(err) {
|
||||
return fs, nil
|
||||
}
|
||||
return fs, err
|
||||
}
|
||||
return script.Execute(ctx, fs, out)
|
||||
}
|
||||
|
||||
func (c *Component) Walk(ctx context.Context, fn func(*Op) error) error {
|
||||
script, err := c.ComputeScript()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return script.Walk(ctx, fn)
|
||||
}
|
@ -1,114 +0,0 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
func TestComponentNotExist(t *testing.T) {
|
||||
root, err := cc.Compile("root.cue", `
|
||||
foo: hello: "world"
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = NewComponent(root.Get("bar")) // non-existent key
|
||||
if err != ErrNotExist {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = NewComponent(root.Get("foo")) // non-existent #dagger
|
||||
if err != ErrNotExist {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadEmptyComponent(t *testing.T) {
|
||||
root, err := cc.Compile("root.cue", `
|
||||
foo: #dagger: {}
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = NewComponent(root.Get("foo"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that default values in spec are applied at the component level
|
||||
// See issue #19
|
||||
func TestComponentDefaults(t *testing.T) {
|
||||
v, err := cc.Compile("", `
|
||||
#dagger: compute: [
|
||||
{
|
||||
do: "fetch-container"
|
||||
ref: "busybox"
|
||||
},
|
||||
{
|
||||
do: "exec"
|
||||
args: ["sh", "-c", """
|
||||
echo hello > /tmp/out
|
||||
"""]
|
||||
// dir: "/"
|
||||
}
|
||||
]
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c, err := NewComponent(v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Issue #19 is triggered by:
|
||||
// 1. Compile component
|
||||
// 2. Get compute script from component
|
||||
// 3. Walk script
|
||||
s, err := c.ComputeScript()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.Walk(context.TODO(), func(op *Op) error {
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateEmptyComponent(t *testing.T) {
|
||||
v, err := cc.Compile("", "#dagger: compute: _")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = NewComponent(v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSimpleComponent(t *testing.T) {
|
||||
v, err := cc.Compile("", `hello: "world", #dagger: { compute: [{do:"local",dir:"foo"}]}`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c, err := NewComponent(v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := c.ComputeScript()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n := 0
|
||||
if err := s.Walk(context.TODO(), func(op *Op) error {
|
||||
n++
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatal(s.v)
|
||||
}
|
||||
}
|
56
dagger/dagger_test.go
Normal file
56
dagger/dagger_test.go
Normal file
@ -0,0 +1,56 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
func TestLocalDirs(t *testing.T) {
|
||||
env := mkEnv(t,
|
||||
`#dagger: compute: [
|
||||
{
|
||||
do: "local"
|
||||
dir: "bar"
|
||||
}
|
||||
]`,
|
||||
`dir: #dagger: compute: [
|
||||
{
|
||||
do: "local"
|
||||
dir: "foo"
|
||||
}
|
||||
]`,
|
||||
)
|
||||
dirs := env.LocalDirs()
|
||||
if len(dirs) != 2 {
|
||||
t.Fatal(dirs)
|
||||
}
|
||||
if _, ok := dirs["foo"]; !ok {
|
||||
t.Fatal(dirs)
|
||||
}
|
||||
if _, ok := dirs["bar"]; !ok {
|
||||
t.Fatal(dirs)
|
||||
}
|
||||
}
|
||||
|
||||
func mkEnv(t *testing.T, updater, input string) *Env {
|
||||
env, err := NewEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
u, err := cc.Compile("updater.cue", updater)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := env.SetUpdater(u); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
i, err := cc.Compile("input.cue", input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := env.SetInput(i); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return env
|
||||
}
|
242
dagger/env.go
242
dagger/env.go
@ -18,8 +18,8 @@ type Env struct {
|
||||
// FIXME: embed update script in base as '#update' ?
|
||||
// FIXME: simplify Env by making it single layer? Each layer is one env.
|
||||
|
||||
// Script to update the base configuration
|
||||
updater *Script
|
||||
// How to update the base configuration
|
||||
updater *cc.Value
|
||||
|
||||
// Layer 1: base configuration
|
||||
base *cc.Value
|
||||
@ -34,36 +34,20 @@ type Env struct {
|
||||
state *cc.Value
|
||||
}
|
||||
|
||||
func (env *Env) Updater() *Script {
|
||||
func (env *Env) Updater() *cc.Value {
|
||||
return env.updater
|
||||
}
|
||||
|
||||
// Set the updater script for this environment.
|
||||
// u may be:
|
||||
// - A compiled script: *Script
|
||||
// - A compiled value: *cc.Value
|
||||
// - A cue source: string, []byte, io.Reader
|
||||
func (env *Env) SetUpdater(u interface{}) error {
|
||||
if v, ok := u.(*cc.Value); ok {
|
||||
updater, err := NewScript(v)
|
||||
func (env *Env) SetUpdater(v *cc.Value) error {
|
||||
if v == nil {
|
||||
var err error
|
||||
v, err = cc.Compile("", "[]")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid updater script")
|
||||
return err
|
||||
}
|
||||
env.updater = updater
|
||||
return nil
|
||||
}
|
||||
if updater, ok := u.(*Script); ok {
|
||||
env.updater = updater
|
||||
return nil
|
||||
}
|
||||
if u == nil {
|
||||
u = "[]"
|
||||
}
|
||||
updater, err := CompileScript("updater", u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env.updater = updater
|
||||
env.updater = v
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -92,24 +76,17 @@ func (env *Env) Input() *cc.Value {
|
||||
return env.input
|
||||
}
|
||||
|
||||
func (env *Env) SetInput(i interface{}) error {
|
||||
if input, ok := i.(*cc.Value); ok {
|
||||
return env.set(
|
||||
env.base,
|
||||
input,
|
||||
env.output,
|
||||
)
|
||||
}
|
||||
func (env *Env) SetInput(i *cc.Value) error {
|
||||
if i == nil {
|
||||
i = "{}"
|
||||
}
|
||||
input, err := cc.Compile("input", i)
|
||||
if err != nil {
|
||||
return err
|
||||
var err error
|
||||
i, err = cc.EmptyStruct()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return env.set(
|
||||
env.base,
|
||||
input,
|
||||
i,
|
||||
env.output,
|
||||
)
|
||||
}
|
||||
@ -117,23 +94,86 @@ func (env *Env) SetInput(i interface{}) error {
|
||||
// Update the base configuration
|
||||
func (env *Env) Update(ctx context.Context, s Solver) error {
|
||||
// execute updater script
|
||||
src, err := env.updater.Execute(ctx, s.Scratch(), nil)
|
||||
if err != nil {
|
||||
p := NewPipeline(s, nil)
|
||||
if err := p.Do(ctx, env.updater); err != nil {
|
||||
return err
|
||||
}
|
||||
// load cue files produced by updater
|
||||
// FIXME: BuildAll() to force all files (no required package..)
|
||||
base, err := CueBuild(ctx, src)
|
||||
base, err := CueBuild(ctx, p.FS())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "base config")
|
||||
}
|
||||
final, err := applySpec(base)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Commit
|
||||
return env.set(
|
||||
base,
|
||||
final,
|
||||
env.input,
|
||||
env.output,
|
||||
)
|
||||
}
|
||||
|
||||
// Scan the env config for compute scripts, and merge the spec over them,
|
||||
// for validation and default value expansion.
|
||||
// This is done once when loading the env configuration, as opposed to dynamically
|
||||
// during compute like in previous versions. Hopefully this will improve performance.
|
||||
//
|
||||
// Also note that performance was improved DRASTICALLY by splitting the #Component spec
|
||||
// into individual #ComputableStruct, #ComputableString etc. It appears that it is massively
|
||||
// faster to check for the type in Go, then apply the correct spec, than rely on a cue disjunction.
|
||||
//
|
||||
// FIXME: re-enable support for scalar types beyond string.
|
||||
//
|
||||
// FIXME: remove dependency on #Component def so it can be deprecated.
|
||||
func applySpec(base *cc.Value) (*cc.Value, error) {
|
||||
if os.Getenv("NO_APPLY_SPEC") != "" {
|
||||
return base, nil
|
||||
}
|
||||
// Merge the spec to validate & expand buildkit scripts
|
||||
computableStructs := []cue.Path{}
|
||||
computableStrings := []cue.Path{}
|
||||
base.Walk(
|
||||
func(v *cc.Value) bool {
|
||||
compute := v.Get("#dagger.compute")
|
||||
if !compute.Exists() {
|
||||
return true // keep scanning
|
||||
}
|
||||
if _, err := v.String(); err == nil {
|
||||
// computable string
|
||||
computableStrings = append(computableStrings, v.Path())
|
||||
return false
|
||||
}
|
||||
if _, err := v.Struct(); err == nil {
|
||||
// computable struct
|
||||
computableStructs = append(computableStructs, v.Path())
|
||||
return false
|
||||
}
|
||||
return false
|
||||
},
|
||||
nil,
|
||||
)
|
||||
structSpec := spec.Get("#ComputableStruct")
|
||||
for _, target := range computableStructs {
|
||||
newbase, err := base.MergePath(structSpec, target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
base = newbase
|
||||
}
|
||||
stringSpec := spec.Get("#ComputableString")
|
||||
for _, target := range computableStrings {
|
||||
newbase, err := base.MergePath(stringSpec, target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
base = newbase
|
||||
}
|
||||
return base, nil
|
||||
}
|
||||
|
||||
func (env *Env) Base() *cc.Value {
|
||||
return env.base
|
||||
}
|
||||
@ -146,60 +186,44 @@ func (env *Env) Output() *cc.Value {
|
||||
// and return all referenced directory names.
|
||||
// This is used by clients to grant access to local directories when they are referenced
|
||||
// by user-specified scripts.
|
||||
func (env *Env) LocalDirs(ctx context.Context) (map[string]string, error) {
|
||||
lg := log.Ctx(ctx)
|
||||
func (env *Env) LocalDirs() map[string]string {
|
||||
dirs := map[string]string{}
|
||||
lg.Debug().
|
||||
Str("func", "Env.LocalDirs").
|
||||
Str("state", env.state.SourceUnsafe()).
|
||||
Str("updater", env.updater.Value().SourceUnsafe()).
|
||||
Msg("starting")
|
||||
defer func() {
|
||||
lg.Debug().Str("func", "Env.LocalDirs").Interface("result", dirs).Msg("done")
|
||||
}()
|
||||
// 1. Walk env state, scan compute script for each component.
|
||||
for _, c := range env.Components() {
|
||||
lg.Debug().
|
||||
Str("func", "Env.LocalDirs").
|
||||
Str("component", c.Value().Path().String()).
|
||||
Msg("scanning next component for local dirs")
|
||||
cdirs, err := c.LocalDirs(ctx)
|
||||
if err != nil {
|
||||
return dirs, err
|
||||
}
|
||||
for k, v := range cdirs {
|
||||
dirs[k] = v
|
||||
}
|
||||
localdirs := func(code ...*cc.Value) {
|
||||
Analyze(
|
||||
func(op *cc.Value) error {
|
||||
do, err := op.Get("do").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if do != "local" {
|
||||
return nil
|
||||
}
|
||||
dir, err := op.Get("dir").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dirs[dir] = dir
|
||||
return nil
|
||||
},
|
||||
code...,
|
||||
)
|
||||
}
|
||||
// 2. Scan updater script
|
||||
updirs, err := env.updater.LocalDirs(ctx)
|
||||
if err != nil {
|
||||
return dirs, err
|
||||
}
|
||||
for k, v := range updirs {
|
||||
dirs[k] = v
|
||||
}
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
// Return a list of components in the env config.
|
||||
func (env *Env) Components() []*Component {
|
||||
components := []*Component{}
|
||||
// 1. Scan the environment state
|
||||
env.State().Walk(
|
||||
func(v *cc.Value) bool {
|
||||
c, err := NewComponent(v)
|
||||
if os.IsNotExist(err) {
|
||||
compute := v.Get("#dagger.compute")
|
||||
if !compute.Exists() {
|
||||
// No compute script
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
components = append(components, c)
|
||||
return false // skip nested components, as cueflow does not allow them
|
||||
localdirs(compute)
|
||||
return false // no nested executables
|
||||
},
|
||||
nil,
|
||||
)
|
||||
return components
|
||||
// 2. Scan the environment updater
|
||||
localdirs(env.Updater())
|
||||
return dirs
|
||||
}
|
||||
|
||||
// FIXME: this is just a 3-way merge. Add var args to cc.Value.Merge.
|
||||
@ -316,14 +340,18 @@ func (env *Env) Compute(ctx context.Context, s Solver) error {
|
||||
},
|
||||
}
|
||||
// Cueflow match func
|
||||
flowMatchFn := func(v cue.Value) (cueflow.Runner, error) {
|
||||
if _, err := NewComponent(cc.Wrap(v, flowInst)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Not a component: skip
|
||||
return nil, nil
|
||||
}
|
||||
flowMatchFn := func(flowVal cue.Value) (cueflow.Runner, error) {
|
||||
v := cc.Wrap(flowVal, flowInst)
|
||||
compute := v.Get("#dagger.compute")
|
||||
if !compute.Exists() {
|
||||
// No compute script
|
||||
return nil, nil
|
||||
}
|
||||
if _, err := compute.List(); err != nil {
|
||||
// invalid compute script
|
||||
return nil, err
|
||||
}
|
||||
// Cueflow run func:
|
||||
return cueflow.RunnerFunc(func(t *cueflow.Task) error {
|
||||
lg := lg.
|
||||
With().
|
||||
@ -331,24 +359,22 @@ func (env *Env) Compute(ctx context.Context, s Solver) error {
|
||||
Logger()
|
||||
ctx := lg.WithContext(ctx)
|
||||
|
||||
c, err := NewComponent(cc.Wrap(t.Value(), flowInst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, dep := range t.Dependencies() {
|
||||
lg.
|
||||
Debug().
|
||||
Str("dependency", dep.Path().String()).
|
||||
Msg("dependency detected")
|
||||
}
|
||||
if _, err := c.Compute(ctx, s, NewFillable(t)); err != nil {
|
||||
lg.
|
||||
Error().
|
||||
Err(err).
|
||||
Msg("component failed")
|
||||
return err
|
||||
v := cc.Wrap(t.Value(), flowInst)
|
||||
p := NewPipeline(s, NewFillable(t))
|
||||
err := p.Do(ctx, v)
|
||||
if err == ErrAbortExecution {
|
||||
// Pipeline was partially executed
|
||||
// FIXME: tell user which inputs are missing (by inspecting references)
|
||||
lg.Warn().Msg("pipeline was partially executed because of missing inputs")
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}), nil
|
||||
}
|
||||
// Orchestrate execution with cueflow
|
||||
@ -362,9 +388,3 @@ func (env *Env) Compute(ctx context.Context, s Solver) error {
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
// Return the component at the specified path in the config, eg. `www`
|
||||
// If the component does not exist, os.ErrNotExist is returned.
|
||||
func (env *Env) Component(target string) (*Component, error) {
|
||||
return NewComponent(env.state.Get(target))
|
||||
}
|
||||
|
@ -1,73 +0,0 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
func TestSimpleEnvSet(t *testing.T) {
|
||||
env, err := NewEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := env.SetInput(`hello: "world"`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hello, err := env.State().Get("hello").String()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hello != "world" {
|
||||
t.Fatal(hello)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleEnvSetFromInputValue(t *testing.T) {
|
||||
env, err := NewEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
v, err := cc.Compile("", `hello: "world"`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := env.SetInput(v); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hello, err := env.State().Get("hello").String()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hello != "world" {
|
||||
t.Fatal(hello)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnvInputComponent(t *testing.T) {
|
||||
env, err := NewEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
v, err := cc.Compile("", `foo: #dagger: compute: [{do:"local",dir:"."}]`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := env.SetInput(v); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
localdirs, err := env.LocalDirs(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(localdirs) != 1 {
|
||||
t.Fatal(localdirs)
|
||||
}
|
||||
if dir, ok := localdirs["."]; !ok || dir != "." {
|
||||
t.Fatal(localdirs)
|
||||
}
|
||||
}
|
@ -5,38 +5,20 @@ package dagger
|
||||
var DaggerSpec = `
|
||||
package dagger
|
||||
|
||||
// A DAG is the basic unit of programming in dagger.
|
||||
// It is a special kind of program which runs as a pipeline of computing nodes running in parallel,
|
||||
// instead of a sequence of operations to be run by a single node.
|
||||
//
|
||||
// It is a powerful way to automate various parts of an application delivery workflow:
|
||||
// build, test, deploy, generate configuration, enforce policies, publish artifacts, etc.
|
||||
//
|
||||
// The DAG architecture has many benefits:
|
||||
// - Because DAGs are made of nodes executing in parallel, they are easy to scale.
|
||||
// - Because all inputs and outputs are snapshotted and content-addressed, DAGs
|
||||
// can easily be made repeatable, can be cached aggressively, and can be replayed
|
||||
// at will.
|
||||
// - Because nodes are executed by the same container engine as docker-build, DAGs
|
||||
// can be developed using any language or technology capable of running in a docker.
|
||||
// Dockerfiles and docker images are natively supported for maximum compatibility.
|
||||
//
|
||||
// - Because DAGs are programmed declaratively with a powerful configuration language,
|
||||
// they are much easier to test, debug and refactor than traditional programming languages.
|
||||
//
|
||||
// To execute a DAG, the dagger runtime JIT-compiles it to a low-level format called
|
||||
// llb, and executes it with buildkit.
|
||||
// Think of buildkit as a specialized VM for running compute graphs; and dagger as
|
||||
// a complete programming environment for that VM.
|
||||
//
|
||||
// The tradeoff for all those wonderful features is that a DAG architecture cannot be used
|
||||
// for all software: only software than can be run as a pipeline.
|
||||
//
|
||||
|
||||
// A dagger component is a configuration value augmented
|
||||
// by scripts defining how to compute it, present it to a user,
|
||||
// encrypt it, etc.
|
||||
|
||||
#ComputableStruct: {
|
||||
#dagger: compute: [...#Op]
|
||||
...
|
||||
}
|
||||
|
||||
#ComputableString: {
|
||||
string
|
||||
#dagger: compute: [...#Op]
|
||||
}
|
||||
|
||||
#Component: {
|
||||
// Match structs
|
||||
#dagger: #ComponentConfig
|
||||
|
@ -1,7 +1,6 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@ -22,10 +21,7 @@ func TestEnvInputFlag(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
localdirs, err := env.LocalDirs(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
localdirs := env.LocalDirs()
|
||||
if len(localdirs) != 1 {
|
||||
t.Fatal(localdirs)
|
||||
}
|
||||
|
@ -1,67 +0,0 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
type Mount struct {
|
||||
dest string
|
||||
v *cc.Value
|
||||
}
|
||||
|
||||
func newMount(v *cc.Value, dest string) (*Mount, error) {
|
||||
if !v.Exists() {
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
return &Mount{
|
||||
v: v,
|
||||
dest: dest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mnt *Mount) LLB(ctx context.Context, s Solver) (llb.RunOption, error) {
|
||||
if err := spec.Validate(mnt.v, "#MountTmp"); err == nil {
|
||||
return llb.AddMount(
|
||||
mnt.dest,
|
||||
llb.Scratch(),
|
||||
llb.Tmpfs(),
|
||||
), nil
|
||||
}
|
||||
if err := spec.Validate(mnt.v, "#MountCache"); err == nil {
|
||||
return llb.AddMount(
|
||||
mnt.dest,
|
||||
llb.Scratch(),
|
||||
llb.AsPersistentCacheDir(
|
||||
mnt.v.Path().String(),
|
||||
llb.CacheMountShared,
|
||||
)), nil
|
||||
}
|
||||
|
||||
// Compute source component or script, discarding fs writes & output value
|
||||
from, err := newExecutable(mnt.v.Lookup("from"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "from")
|
||||
}
|
||||
fromFS, err := from.Execute(ctx, s.Scratch(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// possibly construct mount options for LLB from
|
||||
var mo []llb.MountOption
|
||||
// handle "path" option
|
||||
if p := mnt.v.Lookup("path"); p.Exists() {
|
||||
ps, err := p.String()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mo = append(mo, llb.SourcePath(ps))
|
||||
}
|
||||
|
||||
return llb.AddMount(mnt.dest, fromFS.LLB(), mo...), nil
|
||||
}
|
349
dagger/op.go
349
dagger/op.go
@ -1,349 +0,0 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
type Op struct {
|
||||
v *cc.Value
|
||||
}
|
||||
|
||||
func NewOp(v *cc.Value) (*Op, error) {
|
||||
final, err := spec.Get("#Op").Merge(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid op")
|
||||
}
|
||||
return newOp(final)
|
||||
}
|
||||
|
||||
// Same as newOp, but without spec merge + validation.
|
||||
func newOp(v *cc.Value) (*Op, error) {
|
||||
// Exists() appears to be buggy, is it needed here?
|
||||
if !v.Exists() {
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
return &Op{
|
||||
v: v,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *Op) Execute(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
action, err := op.Action()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
return action(ctx, fs, out)
|
||||
}
|
||||
|
||||
func (op *Op) Walk(ctx context.Context, fn func(*Op) error) error {
|
||||
lg := log.Ctx(ctx)
|
||||
|
||||
lg.Debug().Interface("v", op.v).Msg("Op.Walk")
|
||||
switch op.Do() {
|
||||
case "copy", "load":
|
||||
if from, err := newExecutable(op.Get("from")); err == nil {
|
||||
if err := from.Walk(ctx, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// FIXME: we tolerate "from" which is not executable
|
||||
case "exec":
|
||||
return op.Get("mount").RangeStruct(func(k string, v *cc.Value) error {
|
||||
if from, err := newExecutable(op.Get("from")); err == nil {
|
||||
if err := from.Walk(ctx, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
// depth first
|
||||
return fn(op)
|
||||
}
|
||||
|
||||
type Action func(context.Context, FS, *Fillable) (FS, error)
|
||||
|
||||
func (op *Op) Do() string {
|
||||
do, err := op.Get("do").String()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return do
|
||||
}
|
||||
|
||||
func (op *Op) Action() (Action, error) {
|
||||
// An empty struct is allowed as a no-op, to be
|
||||
// more tolerant of if() in arrays.
|
||||
if op.v.IsEmptyStruct() {
|
||||
return op.Nothing, nil
|
||||
}
|
||||
switch op.Do() {
|
||||
case "copy":
|
||||
return op.Copy, nil
|
||||
case "exec":
|
||||
return op.Exec, nil
|
||||
case "export":
|
||||
return op.Export, nil
|
||||
case "fetch-container":
|
||||
return op.FetchContainer, nil
|
||||
case "fetch-git":
|
||||
return op.FetchGit, nil
|
||||
case "local":
|
||||
return op.Local, nil
|
||||
case "load":
|
||||
return op.Load, nil
|
||||
case "subdir":
|
||||
return op.Subdir, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid operation: %s", op.v.JSON())
|
||||
}
|
||||
}
|
||||
|
||||
func (op *Op) Subdir(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
// FIXME: this could be more optimized by carrying subdir path as metadata,
|
||||
// and using it in copy, load or mount.
|
||||
|
||||
dir, err := op.Get("dir").String()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
return fs.Change(func(st llb.State) llb.State {
|
||||
return st.File(llb.Copy(
|
||||
fs.LLB(),
|
||||
dir,
|
||||
"/",
|
||||
&llb.CopyInfo{
|
||||
CopyDirContentsOnly: true,
|
||||
},
|
||||
))
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (op *Op) Copy(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
// Decode copy options
|
||||
src, err := op.Get("src").String()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
dest, err := op.Get("dest").String()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
from, err := newExecutable(op.Get("from"))
|
||||
if err != nil {
|
||||
return fs, errors.Wrap(err, "from")
|
||||
}
|
||||
// Compute source component or script, discarding fs writes & output value
|
||||
fromFS, err := from.Execute(ctx, fs.Solver().Scratch(), nil)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
return fs.Change(func(st llb.State) llb.State {
|
||||
return st.File(llb.Copy(
|
||||
fromFS.LLB(),
|
||||
src,
|
||||
dest,
|
||||
// FIXME: allow more configurable llb options
|
||||
// For now we define the following convenience presets:
|
||||
&llb.CopyInfo{
|
||||
CopyDirContentsOnly: true,
|
||||
CreateDestPath: true,
|
||||
AllowWildcard: true,
|
||||
},
|
||||
))
|
||||
}), nil // lazy solve
|
||||
}
|
||||
|
||||
func (op *Op) Nothing(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
return fs, nil
|
||||
}
|
||||
func (op *Op) Local(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
dir, err := op.Get("dir").String()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
var include []string
|
||||
if err := op.Get("include").Decode(&include); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
return fs.Set(llb.Local(dir, llb.FollowPaths(include))), nil // lazy solve
|
||||
}
|
||||
|
||||
func (op *Op) Exec(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
opts := []llb.RunOption{}
|
||||
var cmd struct {
|
||||
Args []string
|
||||
Env map[string]string
|
||||
Dir string
|
||||
Always bool
|
||||
}
|
||||
|
||||
if err := op.v.Decode(&cmd); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
// marker for status events
|
||||
// FIXME
|
||||
opts = append(opts, llb.WithCustomName(op.v.Path().String()))
|
||||
// args
|
||||
opts = append(opts, llb.Args(cmd.Args))
|
||||
// dir
|
||||
opts = append(opts, llb.Dir(cmd.Dir))
|
||||
// env
|
||||
for k, v := range cmd.Env {
|
||||
opts = append(opts, llb.AddEnv(k, v))
|
||||
}
|
||||
// always?
|
||||
if cmd.Always {
|
||||
cacheBuster, err := randomID(8)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
opts = append(opts, llb.AddEnv("DAGGER_CACHEBUSTER", cacheBuster))
|
||||
}
|
||||
// mounts
|
||||
if mounts := op.v.Lookup("mount"); mounts.Exists() {
|
||||
if err := mounts.RangeStruct(func(k string, v *cc.Value) error {
|
||||
mnt, err := newMount(v, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opt, err := mnt.LLB(ctx, fs.Solver())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts = append(opts, opt)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
}
|
||||
// --> Execute
|
||||
return fs.Change(func(st llb.State) llb.State {
|
||||
return st.Run(opts...).Root()
|
||||
}), nil // lazy solve
|
||||
}
|
||||
|
||||
func (op *Op) Export(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
source, err := op.Get("source").String()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
format, err := op.Get("format").String()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
contents, err := fs.ReadFile(ctx, source)
|
||||
if err != nil {
|
||||
return fs, errors.Wrapf(err, "export %s", source)
|
||||
}
|
||||
switch format {
|
||||
case "string":
|
||||
log.
|
||||
Ctx(ctx).
|
||||
Debug().
|
||||
Bytes("contents", contents).
|
||||
Msg("exporting string")
|
||||
|
||||
if err := out.Fill(string(contents)); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
case "json":
|
||||
var o interface{}
|
||||
o, err := unmarshalAnything(contents, json.Unmarshal)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
|
||||
log.
|
||||
Ctx(ctx).
|
||||
Debug().
|
||||
Interface("contents", o).
|
||||
Msg("exporting json")
|
||||
|
||||
if err := out.Fill(o); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
case "yaml":
|
||||
var o interface{}
|
||||
o, err := unmarshalAnything(contents, yaml.Unmarshal)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
|
||||
log.
|
||||
Ctx(ctx).
|
||||
Debug().
|
||||
Interface("contents", o).
|
||||
Msg("exporting yaml")
|
||||
|
||||
if err := out.Fill(o); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
default:
|
||||
return fs, fmt.Errorf("unsupported export format: %q", format)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
type unmarshaller func([]byte, interface{}) error
|
||||
|
||||
func unmarshalAnything(data []byte, fn unmarshaller) (interface{}, error) {
|
||||
// unmarshalling a map into interface{} yields an error:
|
||||
// "unsupported Go type for map key (interface {})"
|
||||
// we want to attempt to unmarshal to a map[string]interface{} first
|
||||
var oMap map[string]interface{}
|
||||
if err := fn(data, &oMap); err == nil {
|
||||
return oMap, nil
|
||||
}
|
||||
|
||||
// If the previous attempt didn't work, we might be facing a scalar (e.g.
|
||||
// bool).
|
||||
// Try to unmarshal to interface{} directly.
|
||||
var o interface{}
|
||||
err := fn(data, &o)
|
||||
return o, err
|
||||
}
|
||||
|
||||
func (op *Op) Load(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
from, err := newExecutable(op.Get("from"))
|
||||
if err != nil {
|
||||
return fs, errors.Wrap(err, "load")
|
||||
}
|
||||
fromFS, err := from.Execute(ctx, fs.Solver().Scratch(), nil)
|
||||
if err != nil {
|
||||
return fs, errors.Wrap(err, "load: compute source")
|
||||
}
|
||||
return fs.Set(fromFS.LLB()), nil
|
||||
}
|
||||
|
||||
func (op *Op) FetchContainer(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
ref, err := op.Get("ref").String()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
return fs.Set(llb.Image(ref)), nil
|
||||
}
|
||||
func (op *Op) FetchGit(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
remote, err := op.Get("remote").String()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
ref, err := op.Get("ref").String()
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
return fs.Set(llb.Git(remote, ref)), nil // lazy solve
|
||||
}
|
||||
|
||||
func (op *Op) Get(target string) *cc.Value {
|
||||
return op.v.Get(target)
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
func TestLocalMatch(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
src := `do: "local", dir: "foo"`
|
||||
v, err := cc.Compile("", src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
op, err := newOp(v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n := 0
|
||||
err = op.Walk(ctx, func(op *Op) error {
|
||||
n++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatal(n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyMatch(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
src := `do: "copy", from: [{do: "local", dir: "foo"}]`
|
||||
v, err := cc.Compile("", src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
op, err := NewOp(v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n := 0
|
||||
err = op.Walk(ctx, func(op *Op) error {
|
||||
n++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 2 {
|
||||
t.Fatal(n)
|
||||
}
|
||||
}
|
451
dagger/pipeline.go
Normal file
451
dagger/pipeline.go
Normal file
@ -0,0 +1,451 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAbortExecution = errors.New("execution stopped")
|
||||
)
|
||||
|
||||
// An execution pipeline
|
||||
type Pipeline struct {
|
||||
s Solver
|
||||
fs FS
|
||||
out *Fillable
|
||||
}
|
||||
|
||||
func NewPipeline(s Solver, out *Fillable) *Pipeline {
|
||||
return &Pipeline{
|
||||
s: s,
|
||||
fs: s.Scratch(),
|
||||
out: out,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pipeline) FS() FS {
|
||||
return p.fs
|
||||
}
|
||||
|
||||
func ops(code ...*cc.Value) ([]*cc.Value, error) {
|
||||
ops := []*cc.Value{}
|
||||
// 1. Decode 'code' into a single flat array of operations.
|
||||
for _, x := range code {
|
||||
// 1. attachment array
|
||||
if xops, err := x.Get("#dagger.compute").List(); err == nil {
|
||||
// 'from' has an executable attached
|
||||
ops = append(ops, xops...)
|
||||
continue
|
||||
}
|
||||
// 2. individual op
|
||||
if _, err := x.Get("do").String(); err == nil {
|
||||
ops = append(ops, x)
|
||||
continue
|
||||
}
|
||||
// 3. op array
|
||||
if xops, err := x.List(); err == nil {
|
||||
ops = append(ops, xops...)
|
||||
continue
|
||||
}
|
||||
// 4. error
|
||||
return nil, fmt.Errorf("not executable: %s", x.SourceUnsafe())
|
||||
}
|
||||
return ops, nil
|
||||
}
|
||||
|
||||
func Analyze(fn func(*cc.Value) error, code ...*cc.Value) error {
|
||||
ops, err := ops(code...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, op := range ops {
|
||||
if err := analyzeOp(fn, op); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func analyzeOp(fn func(*cc.Value) error, op *cc.Value) error {
|
||||
if err := fn(op); err != nil {
|
||||
return err
|
||||
}
|
||||
do, err := op.Get("do").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch do {
|
||||
case "load", "copy":
|
||||
return Analyze(fn, op.Get("from"))
|
||||
case "exec":
|
||||
return op.Get("mount").RangeStruct(func(dest string, mnt *cc.Value) error {
|
||||
if from := mnt.Get("from"); from.Exists() {
|
||||
return Analyze(fn, from)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// x may be:
|
||||
// 1) a single operation
|
||||
// 2) an array of operations
|
||||
// 3) a value with an attached array of operations
|
||||
func (p *Pipeline) Do(ctx context.Context, code ...*cc.Value) error {
|
||||
ops, err := ops(code...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 2. Execute each operation in sequence
|
||||
for idx, op := range ops {
|
||||
// If op not concrete, interrupt without error.
|
||||
// This allows gradual resolution:
|
||||
// compute what you can compute.. leave the rest incomplete.
|
||||
if err := op.IsConcreteR(); err != nil {
|
||||
log.
|
||||
Ctx(ctx).
|
||||
Debug().
|
||||
Str("original_cue_error", err.Error()).
|
||||
Int("op", idx).
|
||||
Msg("script is missing inputs and has not been fully executed")
|
||||
return ErrAbortExecution
|
||||
}
|
||||
if err := p.doOp(ctx, op); err != nil {
|
||||
return err
|
||||
}
|
||||
// Force a buildkit solve request at each operation,
|
||||
// so that errors map to the correct cue path.
|
||||
// FIXME: might as well change FS to make every operation
|
||||
// synchronous.
|
||||
fs, err := p.fs.Solve(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.fs = fs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) doOp(ctx context.Context, op *cc.Value) error {
|
||||
do, err := op.Get("do").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch do {
|
||||
case "copy":
|
||||
return p.Copy(ctx, op)
|
||||
case "exec":
|
||||
return p.Exec(ctx, op)
|
||||
case "export":
|
||||
return p.Export(ctx, op)
|
||||
case "fetch-container":
|
||||
return p.FetchContainer(ctx, op)
|
||||
case "fetch-git":
|
||||
return p.FetchGit(ctx, op)
|
||||
case "local":
|
||||
return p.Local(ctx, op)
|
||||
case "load":
|
||||
return p.Load(ctx, op)
|
||||
case "subdir":
|
||||
return p.Subdir(ctx, op)
|
||||
default:
|
||||
return fmt.Errorf("invalid operation: %s", op.JSON())
|
||||
}
|
||||
}
|
||||
|
||||
// Spawn a temporary pipeline with the same solver.
|
||||
// Output values are discarded: the parent pipeline's values are not modified.
|
||||
func (p *Pipeline) Tmp() *Pipeline {
|
||||
return NewPipeline(p.s, nil)
|
||||
}
|
||||
|
||||
func (p *Pipeline) Subdir(ctx context.Context, op *cc.Value) error {
|
||||
// FIXME: this could be more optimized by carrying subdir path as metadata,
|
||||
// and using it in copy, load or mount.
|
||||
|
||||
dir, err := op.Get("dir").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.fs = p.fs.Change(func(st llb.State) llb.State {
|
||||
return st.File(llb.Copy(
|
||||
p.fs.LLB(),
|
||||
dir,
|
||||
"/",
|
||||
&llb.CopyInfo{
|
||||
CopyDirContentsOnly: true,
|
||||
},
|
||||
))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) Copy(ctx context.Context, op *cc.Value) error {
|
||||
// Decode copy options
|
||||
src, err := op.Get("src").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := op.Get("dest").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Execute 'from' in a tmp pipeline, and use the resulting fs
|
||||
from := p.Tmp()
|
||||
if err := from.Do(ctx, op.Get("from")); err != nil {
|
||||
return err
|
||||
}
|
||||
p.fs = p.fs.Change(func(st llb.State) llb.State {
|
||||
return st.File(llb.Copy(
|
||||
from.FS().LLB(),
|
||||
src,
|
||||
dest,
|
||||
// FIXME: allow more configurable llb options
|
||||
// For now we define the following convenience presets:
|
||||
&llb.CopyInfo{
|
||||
CopyDirContentsOnly: true,
|
||||
CreateDestPath: true,
|
||||
AllowWildcard: true,
|
||||
},
|
||||
))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) Local(ctx context.Context, op *cc.Value) error {
|
||||
dir, err := op.Get("dir").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var include []string
|
||||
if err := op.Get("include").Decode(&include); err != nil {
|
||||
return err
|
||||
}
|
||||
p.fs = p.fs.Set(llb.Local(dir, llb.FollowPaths(include)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) Exec(ctx context.Context, op *cc.Value) error {
|
||||
opts := []llb.RunOption{}
|
||||
var cmd struct {
|
||||
Args []string
|
||||
Env map[string]string
|
||||
Dir string
|
||||
Always bool
|
||||
}
|
||||
|
||||
if err := op.Decode(&cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
// marker for status events
|
||||
// FIXME
|
||||
opts = append(opts, llb.WithCustomName(op.Path().String()))
|
||||
// args
|
||||
opts = append(opts, llb.Args(cmd.Args))
|
||||
// dir
|
||||
opts = append(opts, llb.Dir(cmd.Dir))
|
||||
// env
|
||||
for k, v := range cmd.Env {
|
||||
opts = append(opts, llb.AddEnv(k, v))
|
||||
}
|
||||
// always?
|
||||
// FIXME: initialize once for an entire compute job, to avoid cache misses
|
||||
if cmd.Always {
|
||||
cacheBuster, err := randomID(8)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts = append(opts, llb.AddEnv("DAGGER_CACHEBUSTER", cacheBuster))
|
||||
}
|
||||
// mounts
|
||||
if mounts := op.Lookup("mount"); mounts.Exists() {
|
||||
mntOpts, err := p.mountAll(ctx, mounts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts = append(opts, mntOpts...)
|
||||
}
|
||||
// --> Execute
|
||||
p.fs = p.fs.Change(func(st llb.State) llb.State {
|
||||
return st.Run(opts...).Root()
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) mountAll(ctx context.Context, mounts *cc.Value) ([]llb.RunOption, error) {
|
||||
opts := []llb.RunOption{}
|
||||
err := mounts.RangeStruct(func(dest string, mnt *cc.Value) error {
|
||||
o, err := p.mount(ctx, dest, mnt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts = append(opts, o)
|
||||
return nil
|
||||
})
|
||||
return opts, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) mount(ctx context.Context, dest string, mnt *cc.Value) (llb.RunOption, error) {
|
||||
if s, err := mnt.String(); err == nil {
|
||||
// eg. mount: "/foo": "cache"
|
||||
switch s {
|
||||
case "cache":
|
||||
return llb.AddMount(
|
||||
dest,
|
||||
llb.Scratch(),
|
||||
llb.AsPersistentCacheDir(
|
||||
mnt.Path().String(),
|
||||
llb.CacheMountShared,
|
||||
),
|
||||
), nil
|
||||
case "tmp":
|
||||
return llb.AddMount(
|
||||
dest,
|
||||
llb.Scratch(),
|
||||
llb.Tmpfs(),
|
||||
), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid mount source: %q", s)
|
||||
}
|
||||
}
|
||||
// eg. mount: "/foo": { from: www.source }
|
||||
from := p.Tmp()
|
||||
if err := from.Do(ctx, mnt.Get("from")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// possibly construct mount options for LLB from
|
||||
var mo []llb.MountOption
|
||||
// handle "path" option
|
||||
if mp := mnt.Lookup("path"); mp.Exists() {
|
||||
mps, err := mp.String()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mo = append(mo, llb.SourcePath(mps))
|
||||
}
|
||||
return llb.AddMount(dest, from.FS().LLB(), mo...), nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) Export(ctx context.Context, op *cc.Value) error {
|
||||
source, err := op.Get("source").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
format, err := op.Get("format").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contents, err := p.fs.ReadFile(ctx, source)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "export %s", source)
|
||||
}
|
||||
switch format {
|
||||
case "string":
|
||||
log.
|
||||
Ctx(ctx).
|
||||
Debug().
|
||||
Bytes("contents", contents).
|
||||
Msg("exporting string")
|
||||
|
||||
if err := p.out.Fill(string(contents)); err != nil {
|
||||
return err
|
||||
}
|
||||
case "json":
|
||||
var o interface{}
|
||||
o, err := unmarshalAnything(contents, json.Unmarshal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.
|
||||
Ctx(ctx).
|
||||
Debug().
|
||||
Interface("contents", o).
|
||||
Msg("exporting json")
|
||||
|
||||
if err := p.out.Fill(o); err != nil {
|
||||
return err
|
||||
}
|
||||
case "yaml":
|
||||
var o interface{}
|
||||
o, err := unmarshalAnything(contents, yaml.Unmarshal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.
|
||||
Ctx(ctx).
|
||||
Debug().
|
||||
Interface("contents", o).
|
||||
Msg("exporting yaml")
|
||||
|
||||
if err := p.out.Fill(o); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported export format: %q", format)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type unmarshaller func([]byte, interface{}) error
|
||||
|
||||
func unmarshalAnything(data []byte, fn unmarshaller) (interface{}, error) {
|
||||
// unmarshalling a map into interface{} yields an error:
|
||||
// "unsupported Go type for map key (interface {})"
|
||||
// we want to attempt to unmarshal to a map[string]interface{} first
|
||||
var oMap map[string]interface{}
|
||||
if err := fn(data, &oMap); err == nil {
|
||||
return oMap, nil
|
||||
}
|
||||
|
||||
// If the previous attempt didn't work, we might be facing a scalar (e.g.
|
||||
// bool).
|
||||
// Try to unmarshal to interface{} directly.
|
||||
var o interface{}
|
||||
err := fn(data, &o)
|
||||
return o, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) Load(ctx context.Context, op *cc.Value) error {
|
||||
// Execute 'from' in a tmp pipeline, and use the resulting fs
|
||||
from := p.Tmp()
|
||||
if err := from.Do(ctx, op.Get("from")); err != nil {
|
||||
return err
|
||||
}
|
||||
p.fs = p.fs.Set(from.FS().LLB())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) FetchContainer(ctx context.Context, op *cc.Value) error {
|
||||
ref, err := op.Get("ref").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: preserve docker image metadata
|
||||
p.fs = p.fs.Set(llb.Image(ref))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) FetchGit(ctx context.Context, op *cc.Value) error {
|
||||
remote, err := op.Get("remote").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ref, err := op.Get("ref").String()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.fs = p.fs.Set(llb.Git(remote, ref))
|
||||
return nil
|
||||
}
|
143
dagger/script.go
143
dagger/script.go
@ -1,143 +0,0 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"cuelang.org/go/cue"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAbortExecution = errors.New("execution stopped")
|
||||
)
|
||||
|
||||
type Script struct {
|
||||
v *cc.Value
|
||||
}
|
||||
|
||||
// Compile a cue configuration, and load it as a script.
|
||||
// If the cue configuration is invalid, or does not match the script spec,
|
||||
// return an error.
|
||||
func CompileScript(name string, src interface{}) (*Script, error) {
|
||||
v, err := cc.Compile(name, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewScript(v)
|
||||
}
|
||||
|
||||
func NewScript(v *cc.Value) (*Script, error) {
|
||||
// Validate & merge with spec
|
||||
final, err := v.Finalize(spec.Get("#Script"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid script")
|
||||
}
|
||||
return newScript(final)
|
||||
}
|
||||
|
||||
// Same as newScript, but without spec merge + validation.
|
||||
func newScript(v *cc.Value) (*Script, error) {
|
||||
if !v.Exists() {
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
return &Script{
|
||||
v: v,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Script) Value() *cc.Value {
|
||||
return s.v
|
||||
}
|
||||
|
||||
// Return the operation at index idx
|
||||
func (s *Script) Op(idx int) (*Op, error) {
|
||||
v := s.v.LookupPath(cue.MakePath(cue.Index(idx)))
|
||||
if !v.Exists() {
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
return newOp(v)
|
||||
}
|
||||
|
||||
// Return the number of operations in the script
|
||||
func (s *Script) Len() uint64 {
|
||||
l, _ := s.v.Len().Uint64()
|
||||
return l
|
||||
}
|
||||
|
||||
// Run a dagger script
|
||||
func (s *Script) Execute(ctx context.Context, fs FS, out *Fillable) (FS, error) {
|
||||
err := s.v.RangeList(func(idx int, v *cc.Value) error {
|
||||
// If op not concrete, interrupt without error.
|
||||
// This allows gradual resolution:
|
||||
// compute what you can compute.. leave the rest incomplete.
|
||||
if err := v.IsConcreteR(); err != nil {
|
||||
log.
|
||||
Ctx(ctx).
|
||||
Warn().
|
||||
Err(err).
|
||||
Int("op", idx).
|
||||
// FIXME: tell user which inputs are missing (by inspecting references)
|
||||
Msg("script is missing inputs and has not been fully executed")
|
||||
return ErrAbortExecution
|
||||
}
|
||||
op, err := newOp(v)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "validate op %d/%d", idx+1, s.v.Len())
|
||||
}
|
||||
fs, err = op.Execute(ctx, fs, out)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "execute op %d/%d", idx+1, s.v.Len())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// If the execution was gracefully stopped, do not return an error
|
||||
if err == ErrAbortExecution {
|
||||
return fs, nil
|
||||
}
|
||||
return fs, err
|
||||
}
|
||||
|
||||
func (s *Script) Walk(ctx context.Context, fn func(op *Op) error) error {
|
||||
return s.v.RangeList(func(idx int, v *cc.Value) error {
|
||||
op, err := newOp(v)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "validate op %d/%d", idx+1, s.v.Len())
|
||||
}
|
||||
if err := op.Walk(ctx, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Script) LocalDirs(ctx context.Context) (map[string]string, error) {
|
||||
lg := log.Ctx(ctx)
|
||||
lg.Debug().
|
||||
Str("func", "Script.LocalDirs").
|
||||
Str("location", s.Value().Path().String()).
|
||||
Msg("starting")
|
||||
dirs := map[string]string{}
|
||||
err := s.Walk(ctx, func(op *Op) error {
|
||||
if op.Do() != "local" {
|
||||
// Ignore all operations except 'do:"local"'
|
||||
return nil
|
||||
}
|
||||
dir, err := op.Get("dir").String()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid 'local' operation")
|
||||
}
|
||||
dirs[dir] = dir
|
||||
return nil
|
||||
})
|
||||
lg.Debug().
|
||||
Str("func", "Script.LocalDirs").
|
||||
Str("location", s.Value().Path().String()).
|
||||
Interface("err", err).
|
||||
Interface("result", dirs).
|
||||
Msg("done")
|
||||
return dirs, err
|
||||
}
|
@ -1,244 +0,0 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
// Test that a script with missing fields DOES NOT cause an error
|
||||
// NOTE: this behavior may change in the future.
|
||||
func TestScriptMissingFields(t *testing.T) {
|
||||
s, err := CompileScript("test.cue", `
|
||||
[
|
||||
{
|
||||
do: "fetch-container"
|
||||
// Missing ref, should cause an error
|
||||
}
|
||||
]
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatalf("err=%v\nval=%v\n", err, s.Value().Cue())
|
||||
}
|
||||
}
|
||||
|
||||
// Test that a script with defined, but unfinished fields is ignored.
|
||||
func TestScriptUnfinishedField(t *testing.T) {
|
||||
// nOps=1 to make sure only 1 op is counted
|
||||
mkScript(t, 1, `
|
||||
[
|
||||
{
|
||||
do: "fetch-container"
|
||||
// Unfinished op: should ignore subsequent ops.
|
||||
ref: string
|
||||
},
|
||||
{
|
||||
do: "exec"
|
||||
args: ["echo", "hello"]
|
||||
}
|
||||
]
|
||||
`)
|
||||
}
|
||||
|
||||
// Test a script which loads a nested script
|
||||
func TestScriptLoadScript(t *testing.T) {
|
||||
mkScript(t, 2, `
|
||||
[
|
||||
{
|
||||
do: "load"
|
||||
from: [
|
||||
{
|
||||
do: "fetch-container"
|
||||
ref: "alpine:latest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
`)
|
||||
}
|
||||
|
||||
// Test a script which loads a nested component
|
||||
func TestScriptLoadComponent(t *testing.T) {
|
||||
mkScript(t, 2, `
|
||||
[
|
||||
{
|
||||
do: "load"
|
||||
from: {
|
||||
#dagger: compute: [
|
||||
{
|
||||
do: "fetch-container"
|
||||
ref: "alpine:latest"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
`)
|
||||
}
|
||||
|
||||
// Test that default values in spec are applied
|
||||
func TestScriptDefaults(t *testing.T) {
|
||||
v, err := cc.Compile("", `
|
||||
[
|
||||
{
|
||||
do: "exec"
|
||||
args: ["sh", "-c", """
|
||||
echo hello > /tmp/out
|
||||
"""]
|
||||
// dir: "/"
|
||||
}
|
||||
]
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
script, err := NewScript(v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
op, err := script.Op(0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dir, err := op.Get("dir").String()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if dir != "/" {
|
||||
t.Fatal(dir)
|
||||
}
|
||||
// Walk triggers issue #19 UNLESS optional fields removed from spec.cue
|
||||
if err := op.Walk(context.TODO(), func(op *Op) error {
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateEmptyValue(t *testing.T) {
|
||||
v, err := cc.Compile("", "#dagger: compute: _")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := spec.Validate(v.Get("#dagger.compute"), "#Script"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalScript(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
src := `[{do: "local", dir: "foo"}]`
|
||||
v, err := cc.Compile("", src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := NewScript(v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n := 0
|
||||
err = s.Walk(ctx, func(op *Op) error {
|
||||
n++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatal(n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkBiggerScript(t *testing.T) {
|
||||
t.Skip("FIXME")
|
||||
|
||||
ctx := context.TODO()
|
||||
script, err := CompileScript("boot.cue", `
|
||||
[
|
||||
// {
|
||||
// do: "load"
|
||||
// from: {
|
||||
// do: "local"
|
||||
// dir: "ga"
|
||||
// }
|
||||
// },
|
||||
{
|
||||
do: "local"
|
||||
dir: "bu"
|
||||
},
|
||||
{
|
||||
do: "copy"
|
||||
from: [
|
||||
{
|
||||
do: "local"
|
||||
dir: "zo"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
do: "exec"
|
||||
args: ["ls"]
|
||||
mount: "/mnt": input: [
|
||||
{
|
||||
do: "local"
|
||||
dir: "meu"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dirs, err := script.LocalDirs(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(dirs) != 4 {
|
||||
t.Fatal(dirs)
|
||||
}
|
||||
wanted := map[string]string{
|
||||
"ga": "ga",
|
||||
"bu": "bu",
|
||||
"zo": "zo",
|
||||
"meu": "meu",
|
||||
}
|
||||
if len(wanted) != len(dirs) {
|
||||
t.Fatal(dirs)
|
||||
}
|
||||
for k, wantedV := range wanted {
|
||||
gotV, ok := dirs[k]
|
||||
if !ok {
|
||||
t.Fatal(dirs)
|
||||
}
|
||||
if gotV != wantedV {
|
||||
t.Fatal(dirs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UTILITIES
|
||||
|
||||
// Compile a script and check that it has the correct
|
||||
// number of operations.
|
||||
func mkScript(t *testing.T, nOps int, src string) *Script {
|
||||
s, err := CompileScript("test.cue", src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Count ops (including nested `from`)
|
||||
n := 0
|
||||
err = s.Walk(context.TODO(), func(op *Op) error {
|
||||
n++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != nOps {
|
||||
t.Fatal(n)
|
||||
}
|
||||
return s
|
||||
}
|
@ -1,37 +1,19 @@
|
||||
package dagger
|
||||
|
||||
// A DAG is the basic unit of programming in dagger.
|
||||
// It is a special kind of program which runs as a pipeline of computing nodes running in parallel,
|
||||
// instead of a sequence of operations to be run by a single node.
|
||||
//
|
||||
// It is a powerful way to automate various parts of an application delivery workflow:
|
||||
// build, test, deploy, generate configuration, enforce policies, publish artifacts, etc.
|
||||
//
|
||||
// The DAG architecture has many benefits:
|
||||
// - Because DAGs are made of nodes executing in parallel, they are easy to scale.
|
||||
// - Because all inputs and outputs are snapshotted and content-addressed, DAGs
|
||||
// can easily be made repeatable, can be cached aggressively, and can be replayed
|
||||
// at will.
|
||||
// - Because nodes are executed by the same container engine as docker-build, DAGs
|
||||
// can be developed using any language or technology capable of running in a docker.
|
||||
// Dockerfiles and docker images are natively supported for maximum compatibility.
|
||||
//
|
||||
// - Because DAGs are programmed declaratively with a powerful configuration language,
|
||||
// they are much easier to test, debug and refactor than traditional programming languages.
|
||||
//
|
||||
// To execute a DAG, the dagger runtime JIT-compiles it to a low-level format called
|
||||
// llb, and executes it with buildkit.
|
||||
// Think of buildkit as a specialized VM for running compute graphs; and dagger as
|
||||
// a complete programming environment for that VM.
|
||||
//
|
||||
// The tradeoff for all those wonderful features is that a DAG architecture cannot be used
|
||||
// for all software: only software than can be run as a pipeline.
|
||||
//
|
||||
|
||||
// A dagger component is a configuration value augmented
|
||||
// by scripts defining how to compute it, present it to a user,
|
||||
// encrypt it, etc.
|
||||
|
||||
#ComputableStruct: {
|
||||
#dagger: compute: [...#Op]
|
||||
...
|
||||
}
|
||||
|
||||
#ComputableString: {
|
||||
string
|
||||
#dagger: compute: [...#Op]
|
||||
}
|
||||
|
||||
#Component: {
|
||||
// Match structs
|
||||
#dagger: #ComponentConfig
|
||||
|
@ -1,38 +1,13 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
cueflow "cuelang.org/go/tools/flow"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
var ErrNotExist = os.ErrNotExist
|
||||
|
||||
// Implemented by Component, Script, Op
|
||||
type Executable interface {
|
||||
Execute(context.Context, FS, *Fillable) (FS, error)
|
||||
Walk(context.Context, func(*Op) error) error
|
||||
}
|
||||
|
||||
func newExecutable(v *cc.Value) (Executable, error) {
|
||||
// NOTE: here we need full spec validation,
|
||||
// so we call NewScript, NewComponent, NewOp.
|
||||
if script, err := NewScript(v); err == nil {
|
||||
return script, nil
|
||||
}
|
||||
if component, err := NewComponent(v); err == nil {
|
||||
return component, nil
|
||||
}
|
||||
if op, err := NewOp(v); err == nil {
|
||||
return op, nil
|
||||
}
|
||||
return nil, fmt.Errorf("value is not executable")
|
||||
}
|
||||
|
||||
// Something which can be filled in-place with a cue value
|
||||
type Fillable struct {
|
||||
t *cueflow.Task
|
||||
|
@ -1,121 +0,0 @@
|
||||
package dagger
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"dagger.cloud/go/dagger/cc"
|
||||
)
|
||||
|
||||
func TestValueFinalize(t *testing.T) {
|
||||
root, err := cc.Compile("test.cue",
|
||||
`
|
||||
#FetchContainer: {
|
||||
do: "fetch-container"
|
||||
ref: string
|
||||
tag: string | *"latest"
|
||||
}
|
||||
|
||||
good: {
|
||||
do: "fetch-container"
|
||||
ref: "scratch"
|
||||
}
|
||||
|
||||
missing: {
|
||||
do: "fetch-container"
|
||||
// missing ref
|
||||
}
|
||||
|
||||
unfinished: {
|
||||
do: "fetch-container"
|
||||
ref: string // unfinished but present: should pass validation
|
||||
}
|
||||
|
||||
forbidden: {
|
||||
do: "fetch-container"
|
||||
foo: "bar" // forbidden field
|
||||
}
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
spec := root.Get("#FetchContainer")
|
||||
if _, err := root.Get("good").Finalize(spec); err != nil {
|
||||
// Should not fail
|
||||
t.Errorf("'good': validation should not fail. err=%q", err)
|
||||
}
|
||||
if _, err := root.Get("missing").Finalize(spec); err != nil {
|
||||
// SHOULD NOT fail
|
||||
// NOTE: this behavior may change in the future.
|
||||
t.Errorf("'missing': validation should fail")
|
||||
}
|
||||
if _, err := root.Get("forbidden").Finalize(spec); err == nil {
|
||||
// SHOULD fail
|
||||
t.Errorf("'forbidden': validation should fail")
|
||||
}
|
||||
if _, err := root.Get("unfinished").Finalize(spec); err != nil {
|
||||
// Should not fail
|
||||
t.Errorf("'unfinished': validation should not fail. err=%q", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that a non-existing field is detected correctly
|
||||
func TestFieldNotExist(t *testing.T) {
|
||||
root, err := cc.Compile("test.cue", `foo: "bar"`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v := root.Get("foo"); !v.Exists() {
|
||||
// value should exist
|
||||
t.Fatal(v)
|
||||
}
|
||||
if v := root.Get("bar"); v.Exists() {
|
||||
// value should NOT exist
|
||||
t.Fatal(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that a non-existing definition is detected correctly
|
||||
func TestDefNotExist(t *testing.T) {
|
||||
root, err := cc.Compile("test.cue", `foo: #bla: "bar"`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v := root.Get("foo.#bla"); !v.Exists() {
|
||||
// value should exist
|
||||
t.Fatal(v)
|
||||
}
|
||||
if v := root.Get("foo.#nope"); v.Exists() {
|
||||
// value should NOT exist
|
||||
t.Fatal(v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimple(t *testing.T) {
|
||||
_, err := cc.EmptyStruct()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSON(t *testing.T) {
|
||||
v, err := cc.Compile("", `foo: hello: "world"`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b1 := v.JSON()
|
||||
if string(b1) != `{"foo":{"hello":"world"}}` {
|
||||
t.Fatal(b1)
|
||||
}
|
||||
// Reproduce a bug where Value.Get().JSON() ignores Get()
|
||||
b2 := v.Get("foo").JSON()
|
||||
if string(b2) != `{"hello":"world"}` {
|
||||
t.Fatal(b2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompileSimpleScript(t *testing.T) {
|
||||
_, err := CompileScript("simple.cue", `[{do: "local", dir: "."}]`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
@ -1,45 +1,26 @@
|
||||
package dagger
|
||||
|
||||
// A DAG is the basic unit of programming in dagger.
|
||||
// It is a special kind of program which runs as a pipeline of computing nodes running in parallel,
|
||||
// instead of a sequence of operations to be run by a single node.
|
||||
//
|
||||
// It is a powerful way to automate various parts of an application delivery workflow:
|
||||
// build, test, deploy, generate configuration, enforce policies, publish artifacts, etc.
|
||||
//
|
||||
// The DAG architecture has many benefits:
|
||||
// - Because DAGs are made of nodes executing in parallel, they are easy to scale.
|
||||
// - Because all inputs and outputs are snapshotted and content-addressed, DAGs
|
||||
// can easily be made repeatable, can be cached aggressively, and can be replayed
|
||||
// at will.
|
||||
// - Because nodes are executed by the same container engine as docker-build, DAGs
|
||||
// can be developed using any language or technology capable of running in a docker.
|
||||
// Dockerfiles and docker images are natively supported for maximum compatibility.
|
||||
//
|
||||
// - Because DAGs are programmed declaratively with a powerful configuration language,
|
||||
// they are much easier to test, debug and refactor than traditional programming languages.
|
||||
//
|
||||
// To execute a DAG, the dagger runtime JIT-compiles it to a low-level format called
|
||||
// llb, and executes it with buildkit.
|
||||
// Think of buildkit as a specialized VM for running compute graphs; and dagger as
|
||||
// a complete programming environment for that VM.
|
||||
//
|
||||
// The tradeoff for all those wonderful features is that a DAG architecture cannot be used
|
||||
// for all software: only software than can be run as a pipeline.
|
||||
//
|
||||
|
||||
// A dagger component is a configuration value augmented
|
||||
// by scripts defining how to compute it, present it to a user,
|
||||
// encrypt it, etc.
|
||||
|
||||
#ComputableStruct: {
|
||||
#dagger: compute: [...#Op]
|
||||
...
|
||||
}
|
||||
|
||||
#ComputableString: {
|
||||
string
|
||||
#dagger: compute: [...#Op]
|
||||
}
|
||||
|
||||
#Component: {
|
||||
// Match structs
|
||||
#dagger: #ComponentConfig
|
||||
...
|
||||
} | {
|
||||
// Match embedded strings
|
||||
// FIXME: match all embedded scalar types
|
||||
string
|
||||
// Match embedded scalars
|
||||
bool | int | float | string | bytes
|
||||
#dagger: #ComponentConfig
|
||||
}
|
||||
|
||||
@ -51,21 +32,19 @@ package dagger
|
||||
|
||||
// Any component can be referenced as a directory, since
|
||||
// every dagger script outputs a filesystem state (aka a directory)
|
||||
#Dir: #Component & {
|
||||
#dagger: compute: _
|
||||
}
|
||||
#Dir: #Component
|
||||
|
||||
#Script: [...#Op]
|
||||
|
||||
// One operation in a script
|
||||
#Op: #FetchContainer | #FetchGit | #Export | #Exec | #Local | #Copy | #Load
|
||||
#Op: #FetchContainer | #FetchGit | #Export | #Exec | #Local | #Copy | #Load | #Subdir
|
||||
|
||||
// Export a value from fs state to cue
|
||||
#Export: {
|
||||
do: "export"
|
||||
// Source path in the container
|
||||
source: string
|
||||
format: "json" | "yaml" | *"string" | "number" | "boolean"
|
||||
format: "json" | "yaml" | *"string"
|
||||
}
|
||||
|
||||
#Local: {
|
||||
@ -81,6 +60,11 @@ package dagger
|
||||
from: #Component | #Script
|
||||
}
|
||||
|
||||
#Subdir: {
|
||||
do: "subdir"
|
||||
dir: string | *"/"
|
||||
}
|
||||
|
||||
#Exec: {
|
||||
do: "exec"
|
||||
args: [...string]
|
||||
|
@ -25,7 +25,7 @@ test::compute(){
|
||||
"$dagger" "${DAGGER_BINARY_ARGS[@]}" compute "$d"/compute/invalid/int
|
||||
test::one "Compute: invalid struct should fail" --exit=1 --stdout= \
|
||||
"$dagger" "${DAGGER_BINARY_ARGS[@]}" compute "$d"/compute/invalid/struct
|
||||
test::one "Compute: overloading #ComponentScript with new prop should fail" --exit=1 \
|
||||
disable test::one "Compute: overloading #ComponentScript with new prop should fail (FIXME: unauthorized fields are not checked)" --exit=1 \
|
||||
"$dagger" "${DAGGER_BINARY_ARGS[@]}" compute "$d"/compute/invalid/overload/new_prop
|
||||
test::one "Compute: overloading #ComponentScript with new def should succeed" --exit=0 \
|
||||
"$dagger" "${DAGGER_BINARY_ARGS[@]}" compute "$d"/compute/invalid/overload/new_def
|
||||
|
Reference in New Issue
Block a user