Merge pull request #117 from dagger/slim3

dagger compute: automatically fetch stdlib
This commit is contained in:
Andrea Luzzardi 2021-02-16 14:41:10 -08:00 committed by GitHub
commit b6f5e8a098
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 42 additions and 876 deletions

View File

@ -15,7 +15,7 @@ test:
.PHONY: cuefmt
cuefmt:
@(cue fmt -s ./... && cue trim -s ./...)
@(cue fmt -s ./...)
.PHONY: lint
lint: cuefmt

View File

@ -2,6 +2,7 @@ package dagger
import (
"context"
"os"
"cuelang.org/go/cue"
cueflow "cuelang.org/go/tools/flow"
@ -90,13 +91,41 @@ func (env *Env) SetInput(i *cc.Value) error {
)
}
func stdlibLoader() (*cc.Value, error) {
if dev := os.Getenv("DAGGER_DEV_STDLIB"); dev != "" {
v, err := cc.Compile("stdlib.cue", `
do: "local"
dir: string
include: ["cue.mod/pkg"]
`)
if err != nil {
return nil, err
}
return v.MergeTarget(dev, "dir")
}
return cc.Compile("stdlib.cue", `
do: "fetch-git"
remote: "https://github.com/blocklayerhq/dagger-stdlib"
ref: "0625677b5aec1162621ad18fbd7b90dc9d7d54e5"
`)
}
// Update the base configuration
func (env *Env) Update(ctx context.Context, s Solver) error {
// execute updater script
p := NewPipeline(s, nil)
// always inject stdlib in cue.mod/pkg
stdlib, err := stdlibLoader()
if err != nil {
return err
}
if err := p.Do(ctx, stdlib); err != nil {
return err
}
// execute updater script
if err := p.Do(ctx, env.updater); err != nil {
return err
}
// load cue files produced by updater
// FIXME: BuildAll() to force all files (no required package..)
base, err := CueBuild(ctx, p.FS())
@ -160,6 +189,10 @@ func (env *Env) LocalDirs() map[string]string {
)
// 2. Scan the environment updater
localdirs(env.Updater())
// 3. In dev mode, always include dev stdlib directory
if dev := os.Getenv("DAGGER_DEV_STDLIB"); dev != "" {
dirs[dev] = dev
}
return dirs
}

View File

@ -231,7 +231,13 @@ func (p *Pipeline) Local(ctx context.Context, op *cc.Value) error {
if err := op.Get("include").Decode(&include); err != nil {
return err
}
p.fs = p.fs.Set(llb.Local(dir, llb.FollowPaths(include)))
p.fs = p.fs.Change(func(st llb.State) llb.State {
return st.File(llb.Copy(
llb.Local(dir, llb.FollowPaths(include)),
"/",
"/",
))
})
return nil
}

View File

@ -1,23 +0,0 @@
# ACME platform
Welcome to the acme-platform repository. It contains everything you need to start developing and shipping improvements
to the ACME Clothing Store.
For information or support, contact the ACME Platform team: platform@acme.infralabs.io
# Things you can do with ACME platform
## Pre-merge or post-merge QA
## Create a personal dev environment
## Cross-team integration testing
## Sales demos
## End-to-end product reviews
## Testing infrastructure changes
## Deploying to production (REQUIRES SPECIAL PRIVILEGES, talk to your SRE)

View File

@ -1,38 +0,0 @@
// ACME platform: everything you need to develop and ship improvements to
// the ACME clothing store.
package acme
import (
"dagger.cloud/dagger"
"dagger.cloud/netlify"
"dagger.cloud/aws/ecs"
"dagger.cloud/microstaging"
)
// Website on netlify
www: netlify & {
domain: string | *defaultDomain
// By default, use a generated microstaging.io domain
// for easy environments on demand.
let defaultDomain=microstaging.#Domain & {
token: _
prefix: "www.acme"
}
}
// API deployed on ECS
api: ecs & {
domain: _ | *defaultDomain
let defaultDomain = microstaging.#Domain & {
token: _
prefix: "api.acme"
}
}
// Database on RDS
db: rds & {
engine: "postgresql"
}

View File

@ -1,24 +0,0 @@
package alpine
#Image: {
version: string | *"latest"
packages: [...string]
#dag: {
do: [
{
//
//
// fetch alpine
},
{
for _, pkg in packages {
}
}
]
}
}

View File

@ -1 +0,0 @@
module: "acme.infralabs.io/acme"

View File

@ -1,119 +0,0 @@
package dagger
// A DAG is the basic unit of programming in dagger.
// It is a special kind of program which runs as a pipeline of computing nodes running in parallel,
// instead of a sequence of operations to be run by a single node.
//
// It is a powerful way to automate various parts of an application delivery workflow:
// build, test, deploy, generate configuration, enforce policies, publish artifacts, etc.
//
// The DAG architecture has many benefits:
// - Because DAGs are made of nodes executing in parallel, they are easy to scale.
// - Because all inputs and outputs are snapshotted and content-addressed, DAGs
// can easily be made repeatable, can be cached aggressively, and can be replayed
// at will.
// - Because nodes are executed by the same container engine as docker-build, DAGs
// can be developed using any language or technology capable of running in a docker.
// Dockerfiles and docker images are natively supported for maximum compatibility.
//
// - Because DAGs are programmed declaratively with a powerful configuration language,
// they are much easier to test, debug and refactor than traditional programming languages.
//
// To execute a DAG, the dagger runtime JIT-compiles it to a low-level format called
// llb, and executes it with buildkit.
// Think of buildkit as a specialized VM for running compute graphs; and dagger as
// a complete programming environment for that VM.
//
// The tradeoff for all those wonderful features is that a DAG architecture cannot be used
// for all software: only software than can be run as a pipeline.
//
// A dagger component is a configuration value augmented
// by scripts defining how to compute it, present it to a user,
// encrypt it, etc.
// FIXME: #Component will not match embedded scalars.
// use Runtime.isComponent() for a reliable check
#Component: {
#dagger: #ComponentConfig
...
}
// The contents of a #dagger annotation
#ComponentConfig: {
// script to compute the value
compute?: #Script
}
// Any component can be referenced as a directory, since
// every dagger script outputs a filesystem state (aka a directory)
#Dir: #Component
#Script: [...#Op]
// One operation in a script
#Op: #FetchContainer | #FetchGit | #Export | #Exec | #Local | #Copy | #Load
// Export a value from fs state to cue
#Export: {
do: "export"
// Source path in the container
source: string
format: "json" | "yaml" | *"string" | "number" | "boolean"
}
#Local: {
do: "local"
dir: string
include?: [...string] | *[]
}
// FIXME: bring back load (more efficient than copy)
#Load: {
do: "load"
from: #Component | #Script
}
#Exec: {
do: "exec"
args: [...string]
env?: [string]: string
always?: true | *false
dir: string | *"/"
mount?: [string]: #MountTmp | #MountCache | #MountComponent | #MountScript
}
#MountTmp: "tmpfs"
#MountCache: "cache"
#MountComponent: {
input: #Component
path: string | *"/"
}
#MountScript: {
input: #Script
path: string | *"/"
}
#FetchContainer: {
do: "fetch-container"
ref: string
}
#FetchGit: {
do: "fetch-git"
remote: string
ref: string
}
#Copy: {
do: "copy"
from: #Script | #Component
src: string | *"/"
dest: string | *"/"
}
#TestScript: #Script & [
{do: "fetch-container", ref: "alpine:latest"},
{do: "exec", args: ["echo", "hello", "world"]},
]

View File

@ -1,63 +0,0 @@
package netlify
import (
".../alpine"
)
auth: {
#dag: {
encrypted: true
do: [
{
action: "fetch"
type: "docker"
source: "alpine"
},
{
action: "push"
}
]
}
{
username: string
password: string
} | {
// FIXME: enrypted!
token: string
}
}
name: string
domain?: string
// FIXME: directory!
source: bl.#Dir
let base = alpine.#Image & {
version: "foo"
packages: ["rsync", "npm", "openssh"]
}
// Netlify site ID
id: {
info1: string
info2: string
#dag: {
// run code to fetch id from netlify API
from: base
do: [
{
action: "run"
command: ["netlify-get-id", name, "-o", "/netlify-id.json"]
}
]
export: json: "/netlify-id.json"
}
}
url: string

View File

@ -1,216 +0,0 @@
// Custom netlify package
// ACME platform team <platform@acme.infralabs.io>
//
// TODO: upstream to dagger standard library.
package netlify
import (
"dagger.cloud/dagger"
)
// Netlify API token
token: {
#dag: {
encrypt: cipher: "..."
}
string
}
// Netlify site name
name?: string
// Source directory to deploy
source: dagger.#Dir
let apply={
#dag: {
from: alpine.#Base
do: [
["run", "npm", "install", "netlify-cli", "-g"],
[
"copy",
[
"fetch", "git", "https://github.com/shykes/tests", "netlify-scripts",
], "/", "/src",
]
// 2. fetch custom netlify scripts & iunstall
// 3. get ID from name; create if doesn't exist
// 4. deploy (via builder)
]
command: {
debug: {
from: base
do: ["run", "sh", "-c", """
env && find /netlify
"""]
}
}
}
}
apply
deployedDir: {
#dag: {
from: apply
export: dir: "/netlify/content"
}
}
// Netlify site ID
ID: {
string
#dag: {
from: apply
export: string: "/netlify/site-id"
}
}
url: {
string
#dag: {
from: apply
export: string: "/netlify/url"
}
}
// Example of short-form cuellb pipeline
// 1. single-op pipeline can omit the array
// 2. action encoded in first key, instead of `action: ` field
// 3. op may implement short-form,
// in this case: `run: [...string]` instead of `run: { command: [...string] }`
do: run: ["ntlfy-get-site-id", name, "-o", "/netlify/site-id"]
// Declarative export from container, instead of awkward `readFile` pseudo-op
export: string: "/netlify/site-id"
}
}
// Configuration presets
preset: {
*"html" | "react" | "custom"
#dag: {
settings: {
markup: select: {
"Static HTML site (no build)": "html"
"ReactJS app built with npm": "react"
"Custom builder": "custom"
}
}
}
}
// Custom builder
// Default: no build, deploy as-is.
builder: {
in: dagger.#Dir & source
out: dagger.#Dir
if preset == "html" {
// Pass-through builder that does nothing
out: in
}
if preset == "react" {
let app = reactjs.#App & {
source: in
}
out: app.build
}
...
}
scripts: {
dagger.#Directory | *latestScripts
let latestScripts = {
#dag: {
do: {
action: "fetch"
type: "git"
source: "https://github.com/shykes/tests"
ref: "netlify-scripts"
}
}
export: dir: "/"
}
}
// This is configurable for dev mode, but hide it from end users.
#dag: settings: hidden: true
}
// Version of the netlify CLI to use
cliVersion: string | *latestCLIVersion
let latestCLIVersion = {
string
#dag: {
from: base
do: run: ["sh", "-c", "npm show netlify-cli dist-tags.latest > /latest-cli-version"]
export: string: "/latest-cli-version"
}
}
// Custom container to run netlify commands + wrappers
let base=alpine.#Base & {
package: {
npm: true
curl: true
}
}
let runner = {
#dag: {
from: base
do: [
{
run: "npm", "install
action: "run"
command: ["npm", "install", "-g", "netlify-cli@" + cliVersion]
},
{
// YOU ARE HERE
// incorporate "netify scripts from personal github" pattern from other POC
}
}
}
url: {
string
#dag: {
from: runner
do: run: {
command: #"""
netlify deploy
--dir="$(pwd)" \
--auth="$(cat /netlify/token)" \
--site="${NETLIFY_SITE_ID}" \
--message="Blocklayer 'netlify deploy'" \
--prod \
| tee /tmp/stdout
curl \
-i -X POST \
-H "Authorization: Bearer $(cat /netlify/token)" \
"https://api.netlify.com/api/v1/sites/${NETLIFY_SITE_ID}/ssl"
"""#
mount: {
"/netlify/token": token
"/netlify/source": builder.out
}
dir: "/netlify/source"
env: {
NETLIFY_SITE_ID: ID
}
}
}
}

View File

@ -1,89 +0,0 @@
package netlify
#dag: {
do: [
{
action: "fetch"
type: "container"
repository: "alpine"
tag: "latest"
},
{
action: "run"
command: "apk add ..."
},
{
action: "copy"
from: [
{
action: "fetch"
type: "git"
repo: "https://github.com/shykes/stuff"
}
]
source: "/"
dest: "/src"
},
]
}
// Name of the netlify site
name: {
string
#dag: {
}
}
// ID of the netlify site
// FIXME: compute
id: {
string
#dag: {
from: ...
do: [
action: "run"
command: ["netlify-get-id", name, "-o", "/netlify-id.txt"]
]
export: string: "/netlify-id.txt"
}
}
// API token
// FIXME: encrypt secret!
token: {
#encrypt: {
pubkey: _
cipher: _
}
string
}
// FIXME: how to receive a directory?
source: bl.#Dir
// Domain of the Netlify site
domain?: string
// FIXME: compute
url: {
#dag: {
do: [
// ...
{
action: "run"
command: "netlify deploy"
dir: "/src"
mount: "/src": source
}
]
}
string
}

View File

@ -1,3 +0,0 @@

View File

@ -1,50 +0,0 @@
package alpine
// Default version pinned to digest. Manually updated.
let defaultDigest="sha256:3c7497bf0c7af93428242d6176e8f7905f2201d8fc5861f45be7a346b5f23436"
ref: string
// Match a combination of inputs 'version' and 'digest':
*{
// no version, no digest:
ref: "index.docker.io/alpine@\(defaultDigest)"
} | {
// version, no digest
version: string
ref: "alpine:\(version)"
} | {
// digest, no version
digest: string
ref: "alpine@\(digest)"
} | {
// version and digest
version: string
digest: string
ref: "alpine:\(version)@\(digest)"
}
// Packages to install
package: [string]: true | false | string
#dagger: compute: [
{
do: "fetch-container"
"ref": ref
},
for pkg, info in package {
if (info & true) != _|_ {
do: "exec"
args: ["apk", "add", "-U", "--no-cache", pkg]
// https://github.com/blocklayerhq/dagger/issues/6
mount: foo: {}
}
if (info & string) != _|_ {
do: "exec"
args: ["apk", "add", "-U", "--no-cache", "\(pkg)\(info)"]
// https://github.com/blocklayerhq/dagger/issues/6
mount: foo: {}
}
},
]

View File

@ -1,119 +0,0 @@
package dagger
// A DAG is the basic unit of programming in dagger.
// It is a special kind of program which runs as a pipeline of computing nodes running in parallel,
// instead of a sequence of operations to be run by a single node.
//
// It is a powerful way to automate various parts of an application delivery workflow:
// build, test, deploy, generate configuration, enforce policies, publish artifacts, etc.
//
// The DAG architecture has many benefits:
// - Because DAGs are made of nodes executing in parallel, they are easy to scale.
// - Because all inputs and outputs are snapshotted and content-addressed, DAGs
// can easily be made repeatable, can be cached aggressively, and can be replayed
// at will.
// - Because nodes are executed by the same container engine as docker-build, DAGs
// can be developed using any language or technology capable of running in a docker.
// Dockerfiles and docker images are natively supported for maximum compatibility.
//
// - Because DAGs are programmed declaratively with a powerful configuration language,
// they are much easier to test, debug and refactor than traditional programming languages.
//
// To execute a DAG, the dagger runtime JIT-compiles it to a low-level format called
// llb, and executes it with buildkit.
// Think of buildkit as a specialized VM for running compute graphs; and dagger as
// a complete programming environment for that VM.
//
// The tradeoff for all those wonderful features is that a DAG architecture cannot be used
// for all software: only software than can be run as a pipeline.
//
// A dagger component is a configuration value augmented
// by scripts defining how to compute it, present it to a user,
// encrypt it, etc.
// FIXME: #Component will not match embedded scalars.
// use Runtime.isComponent() for a reliable check
#Component: {
#dagger: #ComponentConfig
...
}
// The contents of a #dagger annotation
#ComponentConfig: {
// script to compute the value
compute?: #Script
}
// Any component can be referenced as a directory, since
// every dagger script outputs a filesystem state (aka a directory)
#Dir: #Component
#Script: [...#Op]
// One operation in a script
#Op: #FetchContainer | #FetchGit | #Export | #Exec | #Local | #Copy | #Load
// Export a value from fs state to cue
#Export: {
do: "export"
// Source path in the container
source: string
format: "json" | "yaml" | *"string" | "number" | "boolean"
}
#Local: {
do: "local"
dir: string
include?: [...string] | *[]
}
// FIXME: bring back load (more efficient than copy)
#Load: {
do: "load"
from: #Component | #Script
}
#Exec: {
do: "exec"
args: [...string]
env?: [string]: string
always?: true | *false
dir: string | *"/"
mount?: [string]: #MountTmp | #MountCache | #MountComponent | #MountScript
}
#MountTmp: "tmpfs"
#MountCache: "cache"
#MountComponent: {
input: #Component
path: string | *"/"
}
#MountScript: {
input: #Script
path: string | *"/"
}
#FetchContainer: {
do: "fetch-container"
ref: string
}
#FetchGit: {
do: "fetch-git"
remote: string
ref: string
}
#Copy: {
do: "copy"
from: #Script | #Component
src: string | *"/"
dest: string | *"/"
}
#TestScript: #Script & [
{do: "fetch-container", ref: "alpine:latest"},
{do: "exec", args: ["echo", "hello", "world"]},
]

View File

@ -1,63 +0,0 @@
package example
import (
"dagger.cloud/alpine"
"dagger.cloud/dagger"
)
test: {
string
#dagger: compute: [
dagger.#Load & { from: alpine },
dagger.#Copy & {
from: [
dagger.#FetchContainer & { ref: alpine.ref },
]
dest: "/src"
// https://github.com/blocklayerhq/dagger/issues/9
src: "/"
},
dagger.#Exec & {
dir: "/src"
args: ["sh", "-c", """
ls -l > /tmp/out
"""
]
// https://github.com/blocklayerhq/dagger/issues/6
mount: foo: {}
// mount: dagger.#Mount
},
dagger.#Export & {
// https://github.com/blocklayerhq/dagger/issues/8
// source: "/tmp/out"
},
]
}
www: {
// Domain where the site will be deployed (user input)
domain: string
url: {
string & =~ "https://.*"
// https://github.com/blocklayerhq/dagger/issues/10
#dagger2: compute: [
dagger.#Load & { from: alpine },
dagger.#Exec & {
args: ["sh", "-c",
"""
echo 'deploying to netlify (not really)'
echo 'https://\(domain)/foo' > /tmp/out
"""
]
// https://github.com/blocklayerhq/dagger/issues/6
mount: foo: {}
},
dagger.#Export & {
// https://github.com/blocklayerhq/dagger/issues/8
// source: "/tmp/out"
}
]
}
}

View File

@ -1,3 +0,0 @@
package example
www: domain: "www.foobar.com"

View File

@ -1,62 +0,0 @@
package dagger
// Any component can be referenced as a directory, since
// every dagger script outputs a filesystem state (aka a directory)
#Dir: #dagger: compute: [...#Op]
// One operation in a script
#Op: #FetchContainer | #FetchGit | #Export | #Exec | #Local | #Copy | #Load | #Subdir
// Export a value from fs state to cue
#Export: {
do: "export"
// Source path in the container
source: string
format: "json" | "yaml" | *"string"
}
#Local: {
do: "local"
dir: string
include: [...string] | *[]
}
// FIXME: bring back load (more efficient than copy)
#Load: {
do: "load"
from: _
}
#Subdir: {
do: "subdir"
dir: string | *"/"
}
#Exec: {
do: "exec"
args: [...string]
env?: [string]: string
always?: true | *false
dir: string | *"/"
mount: [string]: "tmp" | "cache" | { from: _, path: string | *"/" }
}
#FetchContainer: {
do: "fetch-container"
ref: string
}
#FetchGit: {
do: "fetch-git"
remote: string
ref: string
}
#Copy: {
do: "copy"
from: _
src: string | *"/"
dest: string | *"/"
}