Merge pull request #643 from samalba/stdlib-docs

docs: Stdlib consistency and cleanup
This commit is contained in:
Sam Alba 2021-06-16 11:25:26 +02:00 committed by GitHub
commit 693a3470fb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 110 additions and 421 deletions

View File

@ -19,10 +19,13 @@ AWS CloudFormation Stack
|*config.secretKey* | `dagger.#Secret` |AWS secret key |
|*source* | `string` |Source is the Cloudformation template (JSON/YAML string) |
|*stackName* | `string` |Stackname is the cloudformation stack |
|*parameters* | `struct` |Stack parameters |
|*onFailure* | `*"DO_NOTHING" \| "ROLLBACK" \| "DELETE"` |Behavior when failure to create/update the Stack |
|*timeout* | `*10 \| \>=0 & int` |Maximum waiting time until stack creation/update (in minutes) |
|*neverUpdate* | `*false \| bool` |Never update the stack if already exists |
|*neverUpdate* | `*false \| true` |Never update the stack if already exists |
### #Stack Outputs
_No output._
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*outputs* | `struct` |- |

View File

@ -8,7 +8,7 @@ Amazon Elastic Container Registry (ECR)
## #Credentials
Convert AWS credentials to Docker Registry credentials for ECR
Convert ECR credentials to Docker Login format
### #Credentials Inputs
@ -23,4 +23,7 @@ Convert AWS credentials to Docker Registry credentials for ECR
### #Credentials Outputs
_No output._
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*username* | `"AWS"` |ECR registry |
|*secret* | `string` |ECR registry secret |

View File

@ -6,11 +6,11 @@ sidebar_label: rds
AWS Relational Database Service (RDS)
## #CreateDB
## #Database
Creates a new Database on an existing RDS Instance
### #CreateDB Inputs
### #Database Inputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
@ -22,36 +22,12 @@ Creates a new Database on an existing RDS Instance
|*secretArn* | `string` |ARN of the database secret (for connecting via rds api) |
|*dbType* | `string` |Database type MySQL or PostgreSQL (Aurora Serverless only) |
### #CreateDB Outputs
### #Database Outputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*out* | `string` |Name of the DB created |
## #CreateUser
Creates a new user credentials on an existing RDS Instance
### #CreateUser Inputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*config.region* | `string` |AWS region |
|*config.accessKey* | `dagger.#Secret` |AWS access key |
|*config.secretKey* | `dagger.#Secret` |AWS secret key |
|*username* | `string` |Username |
|*password* | `string` |Password |
|*dbArn* | `string` |ARN of the database instance |
|*secretArn* | `string` |ARN of the database secret (for connecting via rds api) |
|*grantDatabase* | `*"" \| string` |Name of the database to grants access to |
|*dbType* | `string` |Database type MySQL or PostgreSQL (Aurora Serverless only) |
### #CreateUser Outputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*out* | `string` |Outputed username |
## #Instance
Fetches information on an existing RDS Instance
@ -72,3 +48,27 @@ Fetches information on an existing RDS Instance
|*hostname* | `_\|_` |DB hostname |
|*port* | `_\|_` |DB port |
|*info* | `_\|_` |- |
## #User
Creates a new user credentials on an existing RDS Instance
### #User Inputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*config.region* | `string` |AWS region |
|*config.accessKey* | `dagger.#Secret` |AWS access key |
|*config.secretKey* | `dagger.#Secret` |AWS secret key |
|*username* | `string` |Username |
|*password* | `string` |Password |
|*dbArn* | `string` |ARN of the database instance |
|*secretArn* | `string` |ARN of the database secret (for connecting via rds api) |
|*grantDatabase* | `*"" \| string` |Name of the database to grants access to |
|*dbType* | `string` |Database type MySQL or PostgreSQL (Aurora Serverless only) |
### #User Outputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*out* | `string` |Outputed username |

View File

@ -6,32 +6,11 @@ sidebar_label: s3
AWS Simple Storage Service
## #Put
## #Object
S3 Bucket upload (file or directory)
S3 Bucket object(s) sync
### #Put Inputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*config.region* | `string` |AWS region |
|*config.accessKey* | `dagger.#Secret` |AWS access key |
|*config.secretKey* | `dagger.#Secret` |AWS secret key |
|*target* | `string` |Target S3 URL (eg. s3://\<bucket-name\>/\<path\>/\<sub-path\>) |
|*contentType* | `*"" \| string` |Object content type |
|*always* | `*true \| bool` |Always write the object to S3 |
### #Put Outputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*url* | `string` |URL of the uploaded S3 object |
## #Sync
S3 Bucket sync
### #Sync Inputs
### #Object Inputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
@ -40,11 +19,11 @@ S3 Bucket sync
|*config.secretKey* | `dagger.#Secret` |AWS secret key |
|*source* | `dagger.#Artifact` |Source Artifact to upload to S3 |
|*target* | `string` |Target S3 URL (eg. s3://\<bucket-name\>/\<path\>/\<sub-path\>) |
|*delete* | `*false \| bool` |Files that exist in the destination but not in the source are deleted during sync. |
|*delete* | `*false \| true` |Delete files that already exist on remote destination |
|*contentType* | `*"" \| string` |Object content type |
|*always* | `*true \| bool` |Always write the object to S3 |
|*always* | `*true \| false` |Always write the object to S3 |
### #Sync Outputs
### #Object Outputs
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |

View File

@ -22,4 +22,5 @@ Credentials retriever for GCR
| Name | Type | Description |
| ------------- |:-------------: |:-------------: |
|*secret* | `string` |- |
|*username* | `"oauth2accesstoken"` |GCR registry username |
|*secret* | `string` |GCR registry secret |

View File

@ -4,6 +4,8 @@ sidebar_label: helm
# dagger.io/kubernetes/helm
Helm package manager
## #Chart
Install a Helm chart

View File

@ -8,13 +8,14 @@ import (
// Backend configuration
backend: {
// Source code to build this container
source: git.#Repository | dagger.#Artifact @dagger(input)
// Container environment variables
environment: {
[string]: string @dagger(input)
}
[string]: string
} @dagger(input)
// Public hostname (need to match the master domain configures on the loadbalancer)
hostname: string @dagger(input)
@ -39,16 +40,16 @@ backend: {
dockerfilePath: *"" | string @dagger(input)
// docker build args
dockerBuildArgs: {
[string]: string @dagger(input)
}
[string]: string
} @dagger(input)
}
// Init container runs only once when the main container starts
initContainer: {
command: [...string] @dagger(input)
environment: {
[string]: string @dagger(input)
}
[string]: string
} @dagger(input)
}
}

View File

@ -9,7 +9,7 @@ database: {
let slug = name
dbType: "mysql" | "postgresql" @dagger(input)
db: rds.#CreateDB & {
db: rds.#Database & {
config: infra.awsConfig
name: slug
dbArn: infra.rdsInstanceArn
@ -17,7 +17,7 @@ database: {
secretArn: infra.rdsAdminSecretArn
}
user: rds.#CreateUser & {
user: rds.#User & {
config: infra.awsConfig
dbArn: infra.rdsInstanceArn
"dbType": dbType

View File

@ -20,7 +20,7 @@ source: dagger.#Artifact @dagger(input)
// Deployed URL
url: "\(deploy.url)index.html" @dagger(output)
deploy: s3.#Put & {
deploy: s3.#Object & {
always: true
config: awsConfig
"source": source

View File

@ -14,44 +14,11 @@ bucket: "dagger-ci"
content: "A simple test sentence"
TestS3UploadFile: {
deploy: s3.#Put & {
config: TestConfig.awsConfig
sourceInline: content
target: "s3://\(bucket)/test.txt"
}
verify: #VerifyS3 & {
config: TestConfig.awsConfig
target: deploy.target
file: "test.txt"
}
}
TestDirectory: dagger.#Artifact
TestS3UploadDir: {
deploy: s3.#Put & {
config: TestConfig.awsConfig
source: TestDirectory
target: "s3://\(bucket)/"
}
verifyFile: #VerifyS3 & {
config: TestConfig.awsConfig
target: deploy.target
file: "dirFile.txt"
}
verifyDir: #VerifyS3 & {
config: TestConfig.awsConfig
target: deploy.target
file: "foo.txt"
}
}
TestS3Sync: {
deploy: s3.#Sync & {
TestS3Object: {
deploy: s3.#Object & {
always: true
config: TestConfig.awsConfig
source: TestDirectory
target: "s3://\(bucket)/"

View File

@ -5,13 +5,17 @@ import (
"dagger.io/dagger/op"
)
// Default Alpine version
let defaultVersion = "3.13.5@sha256:69e70a79f2d41ab5d637de98c1e0b055206ba40a8145e7bddb55ccc04e13cf8f"
// Base image for Alpine Linux
#Image: {
// List of packages to install
package: [string]: true | false | string
// Alpine version to install
version: string | *defaultVersion
// Use of os package not possible : alpine is a low level component
#up: [
op.#FetchContainer & {
ref: "index.docker.io/alpine:\(version)"

View File

@ -23,7 +23,7 @@ import (
// Stack parameters
parameters: {
...
}
} @dagger(input)
// Behavior when failure to create/update the Stack
onFailure: *"DO_NOTHING" | "ROLLBACK" | "DELETE" @dagger(input)
@ -32,7 +32,7 @@ import (
timeout: *10 | uint @dagger(input)
// Never update the stack if already exists
neverUpdate: *false | bool @dagger(input)
neverUpdate: *false | true @dagger(input)
#files: {
"/entrypoint.sh": #Code
@ -48,8 +48,8 @@ import (
}
outputs: {
[string]: string @dagger(output)
}
[string]: string
} @dagger(output)
outputs: #up: [
op.#Load & {

View File

@ -6,13 +6,13 @@ import (
"dagger.io/os"
)
// Convert AWS credentials to Docker Registry credentials for ECR
// Convert ECR credentials to Docker Login format
#Credentials: {
// AWS Config
config: aws.#Config
// ECR credentials
username: "AWS"
// ECR registry
username: "AWS" @dagger(output)
ctr: os.#Container & {
image: aws.#CLI & {
@ -22,10 +22,11 @@ import (
command: "aws ecr get-login-password > /out"
}
// ECR registry secret
secret: {
os.#File & {
from: ctr
path: "/out"
}
}.read.data
}.read.data @dagger(output)
}

View File

@ -5,8 +5,8 @@ import (
"dagger.io/aws"
)
// RunTask implements ecs run-task for running a single container on ECS
#RunTask: {
// Task implements ecs run-task for running a single container on ECS
#Task: {
// AWS Config
config: aws.#Config

View File

@ -19,7 +19,6 @@ import (
// kubeconfig is the generated kube configuration file
kubeconfig: {
@dagger(output)
string
#up: [
@ -58,5 +57,5 @@ import (
format: "string"
},
]
}
} @dagger(output)
}

View File

@ -8,6 +8,7 @@ import (
// Returns an unused rule priority (randomized in available range)
#RandomRulePriority: {
// AWS Config
config: aws.#Config

View File

@ -8,7 +8,8 @@ import (
)
// Creates a new Database on an existing RDS Instance
#CreateDB: {
#Database: {
// AWS Config
config: aws.#Config
@ -83,7 +84,8 @@ import (
}
// Creates a new user credentials on an existing RDS Instance
#CreateUser: {
#User: {
// AWS Config
config: aws.#Config
@ -107,7 +109,6 @@ import (
// Outputed username
out: {
@dagger(output)
string
#up: [
@ -188,11 +189,12 @@ import (
format: "string"
},
]
}
} @dagger(output)
}
// Fetches information on an existing RDS Instance
#Instance: {
// AWS Config
config: aws.#Config

View File

@ -7,115 +7,29 @@ import (
"dagger.io/aws"
)
// S3 Bucket upload (file or directory)
#Put: {
// S3 Bucket object(s) sync
#Object: {
// AWS Config
config: aws.#Config
// Source Artifact to upload to S3
source?: dagger.#Artifact @dagger(input)
// Source inlined as a string to upload to S3
sourceInline?: string @dagger(input)
// Target S3 URL (eg. s3://<bucket-name>/<path>/<sub-path>)
target: string @dagger(input)
// Object content type
contentType: string | *"" @dagger(input)
// Always write the object to S3
always: bool | *true @dagger(input)
// URL of the uploaded S3 object
url: {
@dagger(output)
string
#up: [
op.#Load & {
from: aws.#CLI & {
"config": config
}
},
if sourceInline != _|_ {
op.#WriteFile & {
dest: "/source"
content: sourceInline
}
},
op.#Exec & {
if always != _|_ {
"always": always
}
env: {
TARGET: target
CONTENT_TYPE: contentType
}
if sourceInline == _|_ {
mount: "/source": from: source
}
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
"-c",
#"""
opts=""
op=cp
if [ -d /source ]; then
op=sync
fi
if [ -n "$CONTENT_TYPE" ]; then
opts="--content-type $CONTENT_TYPE"
fi
aws s3 $op $opts /source "$TARGET"
echo -n "$TARGET" \
| sed -E 's=^s3://([^/]*)/=https://\1.s3.amazonaws.com/=' \
> /url
"""#,
]
},
op.#Export & {
source: "/url"
format: "string"
},
]
}
}
// S3 Bucket sync
#Sync: {
// AWS Config
config: aws.#Config
// Source Artifact to upload to S3
source: dagger.#Artifact @dagger(input)
// Target S3 URL (eg. s3://<bucket-name>/<path>/<sub-path>)
target: string @dagger(input)
// Files that exist in the destination but not in the
// source are deleted during sync.
delete: *false | bool @dagger(input)
// Delete files that already exist on remote destination
delete: *false | true @dagger(input)
// Object content type
contentType: string | *"" @dagger(input)
// Always write the object to S3
always: bool | *true @dagger(input)
always: *true | false @dagger(input)
// URL of the uploaded S3 object
url: {
@dagger(output)
string
#up: [
@ -166,5 +80,5 @@ import (
format: "string"
},
]
}
} @dagger(output)
}

View File

@ -1,74 +0,0 @@
// DEPRECATED: see dagger.io/os
package file
import (
"strings"
"dagger.io/dagger"
"dagger.io/dagger/op"
)
#Create: {
filename: !="" @dagger(input)
permissions: int | *0o644 @dagger(input)
contents: string | bytes @dagger(input)
#up: [
op.#WriteFile & {dest: filename, content: contents, mode: permissions},
]
}
#Append: {
filename: !="" @dagger(input)
permissions: int | *0o644 @dagger(input)
contents: string | bytes @dagger(input)
from: dagger.#Artifact @dagger(input)
orig: (#read & {path: filename, "from": from}).data @dagger(output)
#up: [
op.#WriteFile & {dest: filename, content: "\(orig)\(contents)", mode: permissions},
]
}
#Read: {
filename: !="" @dagger(input)
from: dagger.#Artifact @dagger(input)
contents: (#read & {path: filename, "from": from}).data @dagger(output)
}
#read: {
path: !="" @dagger(input)
from: dagger.#Artifact @dagger(input)
data: {
string
#up: [
op.#Load & {"from": from},
op.#Export & {source: path},
]
} @dagger(output)
}
#Glob: {
glob: !="" @dagger(input)
filenames: [...string] @dagger(input)
from: dagger.#Artifact @dagger(input)
files: (_#glob & {"glob": glob, "from": from}).data @dagger(output)
// trim suffix because ls always ends with newline
filenames: strings.Split(strings.TrimSuffix(files, "\n"), "\n") @dagger(output)
}
_#glob: {
glob: !=""
from: dagger.#Artifact
data: {
string
_tmppath: "/tmp/ls.out"
#up: [
op.#Load & {"from": from},
op.#Exec & {
args: ["sh", "-c", "ls \(glob) > \(_tmppath)"]
},
op.#Export & {source: _tmppath},
]
}
}

View File

@ -11,8 +11,10 @@ import (
// GCP Config
config: gcp.#Config
// GCR credentials
username: "oauth2accesstoken"
// GCR registry username
username: "oauth2accesstoken" @dagger(output)
// GCR registry secret
secret: {
string

View File

@ -1,3 +1,4 @@
// Helm package manager
package helm
import (
@ -10,6 +11,7 @@ import (
// Install a Helm chart
#Chart: {
// Helm deployment name
name: string @dagger(input)

View File

@ -10,12 +10,6 @@ setup() {
"$DAGGER" compute "$TESTDIR"/stdlib/go --input-dir TestData="$TESTDIR"/stdlib/go/testdata
}
# FIXME: move to universe/universe.bats
# Assigned to: <ADD YOUR NAME HERE>
@test "stdlib: file" {
"$DAGGER" compute "$TESTDIR"/stdlib/file
}
# FIXME: move to universe/universe.bats
# Assigned to: <ADD YOUR NAME HERE>
@test "stdlib: kubernetes" {

View File

@ -1,112 +0,0 @@
package f
import (
"dagger.io/dagger/op"
"dagger.io/alpine"
"dagger.io/file"
)
TestCreate: {
_content: "hello world"
write: file.#Create & {
filename: "/file.txt"
contents: _content
}
test: #up: [
op.#Load & {from: alpine.#Image},
op.#Exec & {
args: [
"sh",
"-ec",
"""
test "$(cat /file.txt)" = "hello world"
""",
]
mount: "/file.txt": {
from: write
path: "/file.txt"
}
},
]
}
TestRead: {
read: file.#Read & {
filename: "/etc/alpine-release"
from: alpine.#Image & {version: "3.10.6"}
}
test: #up: [
op.#Load & {from: alpine.#Image},
op.#Exec & {
args: [
"sh",
"-ec",
"""
test "\(read.contents)" = "3.10.6\n"
""",
]
},
]
}
TestRead2: {
write: file.#Create & {
_content: "hello world"
filename: "/file.txt"
contents: _content
}
read: file.#Read & {
filename: "/file.txt"
from: write
}
test: #up: [
op.#Load & {from: alpine.#Image},
op.#Exec & {
args: [
"sh",
"-ec",
"""
test "\(read.contents)" = "hello world"
""",
]
},
]
}
TestAppend: {
content1: "hello world"
content2: "foo bar"
write: file.#Create & {
filename: "/file.txt"
contents: content1
}
append: file.#Append & {
filename: "/file.txt"
contents: content2
from: write
}
orig: append.orig
read: file.#Read & {
filename: "/file.txt"
from: append
}
new: read.contents
test: new & "hello worldfoo bar"
}
TestGlob: {
list: file.#Glob & {
glob: "/etc/r*"
from: alpine.#Image
}
test: list.filenames & ["/etc/resolv.conf"]
}