commit
1460e13bf0
@ -29,14 +29,14 @@ dagger new
|
||||
```
|
||||
|
||||
3. Configure the deployment with your Netlify access token.
|
||||
You can create new tokens from the [Netlify dashboard](https://app.netlify.com/user/applications/personal).
|
||||
You can create new tokens from the [Netlify dashboard](https://app.netlify.com/user/applications/personal).
|
||||
|
||||
```sh
|
||||
dagger input text www.account.token MY_TOKEN
|
||||
```
|
||||
|
||||
*NOTE: there is a dedicated command for encrypted secret inputs, but it is
|
||||
not yet implemented. Coming soon!*
|
||||
_NOTE: there is a dedicated command for encrypted secret inputs, but it is
|
||||
not yet implemented. Coming soon!_
|
||||
|
||||
4. Deploy!
|
||||
|
||||
@ -44,6 +44,53 @@ not yet implemented. Coming soon!*
|
||||
dagger up
|
||||
```
|
||||
|
||||
## Deploy a complete JAMstack app
|
||||
|
||||
This example shows how to deploy a complete app with a backend, a database and a frontend.
|
||||
|
||||
This app assumes the following infrastructure is available:
|
||||
|
||||
- AWS ECS Cluster
|
||||
- AWS ALB with a TLS certificate
|
||||
- AWS RDS Instance (MySQL or PostgreSQL)
|
||||
- AWS ECR repository
|
||||
|
||||
1. Prepare the app configuration
|
||||
|
||||
Edit the file `./examples/jamstack/app_config.cue` and review all values to match to your own needs.
|
||||
|
||||
2. Login your local docker daemon to ECR
|
||||
|
||||
_This step is temporary and will be removed soon (gh issue #301)._
|
||||
|
||||
```sh
|
||||
AWS_REGION="<REPLACE_WITH_AWS_REGION>"
|
||||
AWS_ID="<REPLACE_WITH_AWS_ACCOUNT_ID>"
|
||||
aws ecr get-login-password --region "$AWS_REGION" | docker login --username AWS --password-stdin "${AWS_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com"
|
||||
```
|
||||
|
||||
3. Deploy!
|
||||
|
||||
```sh
|
||||
cd ./examples/jamstack
|
||||
dagger new
|
||||
dagger up
|
||||
```
|
||||
|
||||
The example `app_config.cue` from the `./examples/jamstack` directory takes the source code from a remote git repository,
|
||||
but you can remove this from the file and instead points to a local source code:
|
||||
|
||||
```sh
|
||||
dagger input dir backend.source ./my/local/backend/code
|
||||
```
|
||||
|
||||
And the same mechanism applies for every single key in this file.
|
||||
|
||||
4. Get the App URL
|
||||
|
||||
```sh
|
||||
dagger query url
|
||||
```
|
||||
|
||||
## Provision a Kubernetes cluster on AWS
|
||||
|
||||
@ -79,7 +126,6 @@ dagger input text awsConfig.accessKey MY_AWS_ACCESS_KEY
|
||||
dagger input text awsConfig.secretKey MY_AWS_SECRET_KEY
|
||||
```
|
||||
|
||||
|
||||
4. Deploy!
|
||||
|
||||
```sh
|
||||
@ -103,7 +149,6 @@ Components:
|
||||
- [Amazon Cloudwatch Synthetics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries.html) for hosting the monitoring scripts
|
||||
- [Amazon CloudFormation](https://aws.amazon.com/cloudformation) for infrastructure provisioning
|
||||
|
||||
|
||||
1. Change the current directory to the example deployment plan
|
||||
|
||||
```sh
|
||||
@ -142,7 +187,6 @@ dagger input text email my_email@my_domain.tld
|
||||
dagger up
|
||||
```
|
||||
|
||||
|
||||
## Deploy an application to your Kubernetes cluster
|
||||
|
||||
This example shows two different ways to deploy an application to an existing Kubernetes cluster: with and without a Helm chart. Read the deployment plan](https://github.com/dagger/dagger/tree/main/examples/kubernetes-app)
|
||||
@ -157,7 +201,6 @@ Components:
|
||||
|
||||
How to run:
|
||||
|
||||
|
||||
1. Change the current directory to the example deployment plan
|
||||
|
||||
```sh
|
||||
|
58
examples/jamstack/app_config.cue
Normal file
58
examples/jamstack/app_config.cue
Normal file
@ -0,0 +1,58 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"dagger.io/git"
|
||||
)
|
||||
|
||||
name: "my-app"
|
||||
|
||||
// DISCLAIMER: all values below are fake and are provided as examples
|
||||
|
||||
infra: {
|
||||
awsConfig: {
|
||||
accessKey: "<REPLACE WITH AWS ACCESS KEY>"
|
||||
secretKey: "<REPLACE WITH AWS SECRET KEY>"
|
||||
region: "us-east-1"
|
||||
}
|
||||
vpcId: "vpc-020ctgv0bcde4242"
|
||||
ecrRepository: "8563296674124.dkr.ecr.us-east-1.amazonaws.com/apps"
|
||||
ecsClusterName: "bl-ecs-acme-764-ECSCluster-lRIVVg09G4HX"
|
||||
elbListenerArn: "arn:aws:elasticloadbalancing:us-east-1:8563296674124:listener/app/bl-ec-ECSAL-OSYI03K07BCO/3c2d3e78347bde5b/d02ac88cc007e24e"
|
||||
rdsAdminSecretArn: "arn:aws:secretsmanager:us-east-1:8563296674124:secret:AdminPassword-NQbBi7oU4CYS9-IGgS3B"
|
||||
rdsInstanceArn: "arn:aws:rds:us-east-1:8563296674124:cluster:bl-rds-acme-764-rdscluster-8eg3xbfjggkfdg"
|
||||
netlifyAccount: token: "<REPLACE WITH NETLIFY TOKEN>"
|
||||
}
|
||||
|
||||
database: dbType: "mysql"
|
||||
|
||||
backend: {
|
||||
source: git.#Repository & {
|
||||
remote: "https://github.com/blocklayerhq/acme-clothing.git"
|
||||
ref: "HEAD"
|
||||
subdir: "./crate/code/api"
|
||||
}
|
||||
|
||||
// DNS needs to be already configured to the ALB load-balancer
|
||||
// and a valid certificate needs to be configured for that listener
|
||||
hostname: "\(name).acme-764-api.microstaging.io"
|
||||
|
||||
container: {
|
||||
healthCheckPath: "/health-check"
|
||||
healthCheckTimeout: 40
|
||||
}
|
||||
}
|
||||
|
||||
frontend: {
|
||||
source: git.#Repository & {
|
||||
remote: "https://github.com/blocklayerhq/acme-clothing.git"
|
||||
ref: "HEAD"
|
||||
subdir: "./crate/code/web"
|
||||
}
|
||||
|
||||
writeEnvFile: ".env"
|
||||
|
||||
yarn: {
|
||||
buildDir: "public"
|
||||
script: "build:client"
|
||||
}
|
||||
}
|
104
examples/jamstack/backend.cue
Normal file
104
examples/jamstack/backend.cue
Normal file
@ -0,0 +1,104 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/aws/ecs"
|
||||
)
|
||||
|
||||
// Backend configuration
|
||||
backend: {
|
||||
// Source code to build this container
|
||||
source: dagger.#Artifact
|
||||
|
||||
// Container environment variables
|
||||
environment: [string]: string
|
||||
|
||||
// Public hostname (need to match the master domain configures on the loadbalancer)
|
||||
hostname: string
|
||||
|
||||
// Container configuration
|
||||
container: {
|
||||
// Desired number of running containers
|
||||
desiredCount: *1 | int
|
||||
// Time to wait for the HTTP timeout to complete
|
||||
healthCheckTimeout: *10 | int
|
||||
// HTTP Path to perform the healthcheck request (HTTP Get)
|
||||
healthCheckPath: *"/" | string
|
||||
// Number of times the health check needs to fail before recycling the container
|
||||
healthCheckUnhealthyThreshold: *2 | int
|
||||
// Port used by the process inside the container
|
||||
port: *80 | int
|
||||
// Memory to allocate
|
||||
memory: *1024 | int
|
||||
// Override the default container command
|
||||
command: [...string]
|
||||
// Custom dockerfile path
|
||||
dockerfilePath: *"" | string
|
||||
// docker build args
|
||||
dockerBuildArgs: [string]: string
|
||||
}
|
||||
|
||||
// Init container runs only once when the main container starts
|
||||
initContainer: {
|
||||
command: [...string]
|
||||
environment: [string]: string
|
||||
}
|
||||
}
|
||||
|
||||
// Backend deployment logic
|
||||
backend: {
|
||||
let slug = name
|
||||
|
||||
// Docker image built from source, pushed to ECR
|
||||
image: #ECRImage & {
|
||||
source: backend.source
|
||||
repository: infra.ecrRepository
|
||||
tag: slug
|
||||
awsConfig: infra.awsConfig
|
||||
if backend.container.dockerfilePath != "" {
|
||||
dockerfilePath: backend.container.dockerfilePath
|
||||
}
|
||||
buildArgs: backend.container.dockerBuildArgs
|
||||
}
|
||||
|
||||
// Creates an ECS Task + Service + deploy via Cloudformation
|
||||
app: #ECSApp & {
|
||||
awsConfig: infra.awsConfig
|
||||
"slug": slug
|
||||
clusterName: infra.ecsClusterName
|
||||
vpcId: infra.vpcId
|
||||
elbListenerArn: infra.elbListenerArn
|
||||
if infra.ecsTaskRoleArn != _|_ {
|
||||
taskRoleArn: infra.ecsTaskRoleArn
|
||||
}
|
||||
hostname: backend.hostname
|
||||
healthCheck: {
|
||||
timeout: backend.container.healthCheckTimeout
|
||||
path: backend.container.healthCheckPath
|
||||
unhealthyThresholdCount: backend.container.healthCheckUnhealthyThreshold
|
||||
}
|
||||
desiredCount: backend.container.desiredCount
|
||||
container: {
|
||||
command: backend.container.command
|
||||
environment: backend.environment
|
||||
port: backend.container.port
|
||||
memory: backend.container.memory
|
||||
"image": image.ref
|
||||
}
|
||||
}
|
||||
|
||||
// Optional container to run one-time during the deploy (eg. db migration)
|
||||
if len(backend.initContainer.command) > 0 {
|
||||
initContainer: ecs.#RunTask & {
|
||||
config: infra.awsConfig
|
||||
containerName: slug
|
||||
cluster: infra.ecsClusterName
|
||||
if infra.ecsTaskRoleArn != _|_ {
|
||||
roleArn: infra.ecsTaskRoleArn
|
||||
}
|
||||
containerEnvironment: backend.initContainer.environment
|
||||
containerCommand: backend.initContainer.command
|
||||
taskArn: app.taskArn
|
||||
}
|
||||
}
|
||||
}
|
41
examples/jamstack/database.cue
Normal file
41
examples/jamstack/database.cue
Normal file
@ -0,0 +1,41 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"dagger.io/aws/rds"
|
||||
)
|
||||
|
||||
database: {
|
||||
let slug = name
|
||||
dbType: "mysql" | "postgresql"
|
||||
|
||||
db: rds.#CreateDB & {
|
||||
config: infra.awsConfig
|
||||
name: slug
|
||||
dbArn: infra.rdsInstanceArn
|
||||
"dbType": dbType
|
||||
secretArn: infra.rdsAdminSecretArn
|
||||
}
|
||||
|
||||
user: rds.#CreateUser & {
|
||||
config: infra.awsConfig
|
||||
dbArn: infra.rdsInstanceArn
|
||||
"dbType": dbType
|
||||
secretArn: infra.rdsAdminSecretArn
|
||||
username: slug
|
||||
// FIXME: make it secure (generate infra side?)
|
||||
password: base64.Encode(null, "pwd-\(slug)")
|
||||
grantDatabase: db.out
|
||||
}
|
||||
|
||||
instance: rds.#Instance & {
|
||||
config: infra.awsConfig
|
||||
dbArn: infra.rdsInstanceArn
|
||||
}
|
||||
|
||||
hostname: instance.hostname
|
||||
port: instance.port
|
||||
dbName: db.out
|
||||
username: user.out
|
||||
password: user.password
|
||||
}
|
52
examples/jamstack/ecr_image.cue
Normal file
52
examples/jamstack/ecr_image.cue
Normal file
@ -0,0 +1,52 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/dagger/op"
|
||||
"dagger.io/aws"
|
||||
"dagger.io/aws/ecr"
|
||||
)
|
||||
|
||||
// Build an image and push it to ECR
|
||||
#ECRImage: {
|
||||
source: dagger.#Artifact
|
||||
// Path of the Dockerfile
|
||||
dockerfilePath?: string
|
||||
repository: string
|
||||
tag: string
|
||||
awsConfig: aws.#Config
|
||||
buildArgs: [string]: string
|
||||
|
||||
pushTarget: "\(repository):\(tag)"
|
||||
|
||||
// Build the image
|
||||
buildImage: op.#DockerBuild & {
|
||||
context: source
|
||||
if dockerfilePath != _|_ {
|
||||
"dockerfilePath": dockerfilePath
|
||||
}
|
||||
buildArg: buildArgs
|
||||
}
|
||||
|
||||
// Use these credentials to push
|
||||
ecrCreds: ecr.#Credentials & {
|
||||
config: awsConfig
|
||||
target: pushTarget
|
||||
}
|
||||
|
||||
push: #up: [
|
||||
op.#DockerBuild & {
|
||||
context: source
|
||||
if dockerfilePath != _|_ {
|
||||
"dockerfilePath": dockerfilePath
|
||||
}
|
||||
buildArg: buildArgs
|
||||
},
|
||||
op.#PushContainer & {
|
||||
ref: pushTarget
|
||||
},
|
||||
]
|
||||
|
||||
// FIXME: ref does not include the sha256: https://github.com/dagger/dagger/issues/303
|
||||
ref: pushTarget
|
||||
}
|
152
examples/jamstack/ecs.cue
Normal file
152
examples/jamstack/ecs.cue
Normal file
@ -0,0 +1,152 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"dagger.io/aws"
|
||||
"dagger.io/aws/elb"
|
||||
"dagger.io/aws/cloudformation"
|
||||
)
|
||||
|
||||
#ECSApp: {
|
||||
awsConfig: aws.#Config
|
||||
slug: string
|
||||
clusterName: string
|
||||
vpcId: string
|
||||
elbListenerArn: string
|
||||
taskRoleArn: *"" | string
|
||||
hostname: string
|
||||
healthCheck: {
|
||||
timeout: *10 | int
|
||||
path: *"/" | string
|
||||
unhealthyThresholdCount: *2 | int
|
||||
}
|
||||
desiredCount: int
|
||||
container: {
|
||||
command: [...string]
|
||||
environment: [string]: string
|
||||
port: *80 | int
|
||||
cpu: *256 | int
|
||||
memory: *1024 | int
|
||||
image: string
|
||||
}
|
||||
|
||||
taskArn: cfnStack.outputs.TaskArn
|
||||
|
||||
elbRulePriority: elb.#RandomRulePriority & {
|
||||
config: awsConfig
|
||||
listenerArn: elbListenerArn
|
||||
vhost: hostname
|
||||
}
|
||||
|
||||
cfnStack: cloudformation.#Stack & {
|
||||
config: awsConfig
|
||||
stackName: slug
|
||||
onFailure: "DO_NOTHING"
|
||||
parameters: {
|
||||
ELBRulePriority: elbRulePriority.out
|
||||
ImageRef: container.image
|
||||
ELBListenerArn: elbListenerArn
|
||||
}
|
||||
source: json.Marshal(template)
|
||||
}
|
||||
|
||||
template: {
|
||||
AWSTemplateFormatVersion: "2010-09-09"
|
||||
Description: "Dagger deployed app"
|
||||
Parameters: {
|
||||
ELBRulePriority: Type: "Number"
|
||||
ImageRef: Type: "String"
|
||||
ELBListenerArn: Type: "String"
|
||||
}
|
||||
Resources: {
|
||||
ECSTaskDefinition: {
|
||||
Type: "AWS::ECS::TaskDefinition"
|
||||
Properties: {
|
||||
Cpu: "\(container.cpu)"
|
||||
Memory: "\(container.memory)"
|
||||
if taskRoleArn != "" {
|
||||
TaskRoleArn: taskRoleArn
|
||||
}
|
||||
NetworkMode: "bridge"
|
||||
ContainerDefinitions: [{
|
||||
if len(container.command) > 0 {
|
||||
Command: container.command
|
||||
}
|
||||
Name: slug
|
||||
Image: Ref: "ImageRef"
|
||||
Essential: true
|
||||
Environment: [ for k, v in container.environment {
|
||||
Name: k
|
||||
Value: v
|
||||
}]
|
||||
PortMappings: [{
|
||||
ContainerPort: container.port
|
||||
}]
|
||||
StopTimeout: 5
|
||||
LogConfiguration: {
|
||||
LogDriver: "awslogs"
|
||||
Options: {
|
||||
"awslogs-group": "bl/provider/ecs/\(clusterName)"
|
||||
"awslogs-region": Ref: "AWS::Region"
|
||||
"awslogs-create-group": "true"
|
||||
"awslogs-stream-prefix": slug
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
ECSListenerRule: {
|
||||
Type: "AWS::ElasticLoadBalancingV2::ListenerRule"
|
||||
Properties: {
|
||||
ListenerArn: Ref: "ELBListenerArn"
|
||||
Priority: Ref: "ELBRulePriority"
|
||||
Conditions: [{
|
||||
Field: "host-header"
|
||||
Values: [hostname]}]
|
||||
Actions: [{
|
||||
Type: "forward"
|
||||
TargetGroupArn: Ref: "ECSTargetGroup"
|
||||
}]}}
|
||||
ECSTargetGroup: {
|
||||
Type: "AWS::ElasticLoadBalancingV2::TargetGroup"
|
||||
Properties: {
|
||||
Protocol: "HTTP"
|
||||
VpcId: vpcId
|
||||
Port: 80
|
||||
HealthCheckPath: healthCheck.path
|
||||
UnhealthyThresholdCount: healthCheck.unhealthyThresholdCount
|
||||
HealthCheckTimeoutSeconds: healthCheck.timeout
|
||||
HealthCheckIntervalSeconds: healthCheck.timeout + 1
|
||||
HealthyThresholdCount: 3
|
||||
TargetGroupAttributes: [{
|
||||
Value: "10"
|
||||
Key: "deregistration_delay.timeout_seconds"
|
||||
}]}}
|
||||
ECSService: {
|
||||
Type: "AWS::ECS::Service"
|
||||
Properties: {
|
||||
Cluster: clusterName
|
||||
DesiredCount: desiredCount
|
||||
LaunchType: "EC2"
|
||||
LoadBalancers: [{
|
||||
ContainerPort: container.port
|
||||
TargetGroupArn: Ref: "ECSTargetGroup"
|
||||
ContainerName: slug
|
||||
}]
|
||||
ServiceName: slug
|
||||
TaskDefinition: Ref: "ECSTaskDefinition"
|
||||
DeploymentConfiguration: {
|
||||
DeploymentCircuitBreaker: {
|
||||
Enable: true
|
||||
Rollback: true
|
||||
}
|
||||
MaximumPercent: 200
|
||||
MinimumHealthyPercent: 100
|
||||
}}
|
||||
DependsOn: "ECSListenerRule"
|
||||
}
|
||||
}
|
||||
Outputs: TaskArn: Value: Ref: "ECSTaskDefinition"
|
||||
}
|
||||
}
|
52
examples/jamstack/frontend.cue
Normal file
52
examples/jamstack/frontend.cue
Normal file
@ -0,0 +1,52 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/netlify"
|
||||
"dagger.io/js/react"
|
||||
)
|
||||
|
||||
frontend: {
|
||||
// Source code to build the app
|
||||
source: dagger.#Artifact
|
||||
|
||||
writeEnvFile?: string
|
||||
|
||||
// Yarn Build
|
||||
yarn: {
|
||||
// Run this yarn script
|
||||
script: string | *"build"
|
||||
|
||||
// Read build output from this directory
|
||||
// (path must be relative to working directory).
|
||||
buildDir: string | *"build"
|
||||
}
|
||||
|
||||
// Build environment variables
|
||||
environment: [string]: string
|
||||
environment: NODE_ENV: string | *"production"
|
||||
environment: APP_URL: "https://\(name).netlify.app/"
|
||||
}
|
||||
|
||||
frontend: {
|
||||
app: react.#App & {
|
||||
source: frontend.source
|
||||
env: frontend.environment
|
||||
|
||||
if frontend.writeEnvFile != _|_ {
|
||||
writeEnvFile: frontend.writeEnvFile
|
||||
}
|
||||
|
||||
yarn: {
|
||||
script: frontend.yarn.script
|
||||
buildDir: frontend.yarn.buildDir
|
||||
}
|
||||
}
|
||||
|
||||
// Host the application with Netlify
|
||||
site: netlify.#Site & {
|
||||
"name": name
|
||||
account: infra.netlifyAccount
|
||||
contents: app.build
|
||||
}
|
||||
}
|
35
examples/jamstack/infra.cue
Normal file
35
examples/jamstack/infra.cue
Normal file
@ -0,0 +1,35 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"dagger.io/aws"
|
||||
"dagger.io/netlify"
|
||||
)
|
||||
|
||||
infra: {
|
||||
// AWS auth & default region
|
||||
awsConfig: aws.#Config
|
||||
|
||||
// VPC Id
|
||||
vpcId: string
|
||||
|
||||
// ECR Image repository
|
||||
ecrRepository: string
|
||||
|
||||
// ECS cluster name
|
||||
ecsClusterName: string
|
||||
|
||||
// Execution Role ARN used for all tasks running on the cluster
|
||||
ecsTaskRoleArn?: string
|
||||
|
||||
// ELB listener ARN
|
||||
elbListenerArn: string
|
||||
|
||||
// Secret ARN for the admin password of the RDS Instance
|
||||
rdsAdminSecretArn: string
|
||||
|
||||
// ARN of the RDS Instance
|
||||
rdsInstanceArn: string
|
||||
|
||||
// Netlify credentials
|
||||
netlifyAccount: netlify.#Account
|
||||
}
|
22
examples/jamstack/main.cue
Normal file
22
examples/jamstack/main.cue
Normal file
@ -0,0 +1,22 @@
|
||||
package main
|
||||
|
||||
// Name of the application
|
||||
name: string & =~"[a-z0-9-]+"
|
||||
|
||||
// Inject db info in the container environment
|
||||
backend: environment: {
|
||||
DB_USERNAME: database.username
|
||||
DB_HOSTNAME: database.hostname
|
||||
DB_PASSWORD: database.password
|
||||
DB_DBNAME: database.dbName
|
||||
DB_PORT: "\(database.port)"
|
||||
DB_TYPE: database.dbType
|
||||
}
|
||||
|
||||
// Configure the frontend with the API URL
|
||||
frontend: environment: APP_URL_API: url.backendURL
|
||||
|
||||
url: {
|
||||
frontendURL: frontend.site.url
|
||||
backendURL: "https://\(backend.hostname)/"
|
||||
}
|
@ -18,14 +18,94 @@ import (
|
||||
|
||||
// Re-usable aws-cli component
|
||||
#CLI: {
|
||||
package: [string]: string
|
||||
|
||||
#up: [
|
||||
op.#Load & {
|
||||
from: alpine.#Image & {
|
||||
package: bash: "=5.1.0-r0"
|
||||
package: jq: "=1.6-r1"
|
||||
package: curl: "=7.74.0-r1"
|
||||
package: "aws-cli": "=1.18.177-r0"
|
||||
"package": package
|
||||
"package": bash: "=5.1.0-r0"
|
||||
"package": jq: "=1.6-r1"
|
||||
"package": curl: "=7.74.0-r1"
|
||||
"package": "aws-cli": "=1.18.177-r0"
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
// Helper for writing scripts based on AWS CLI
|
||||
#Script: {
|
||||
// AWS code
|
||||
config: #Config
|
||||
|
||||
// Script code (bash)
|
||||
code: string
|
||||
|
||||
// Extra pkgs to install
|
||||
package: [string]: string
|
||||
|
||||
// Files to mount
|
||||
files: [string]: string
|
||||
|
||||
// Env variables
|
||||
env: [string]: string
|
||||
|
||||
// Export file
|
||||
export: string
|
||||
|
||||
// Always execute the script?
|
||||
always?: bool
|
||||
|
||||
out: {
|
||||
string
|
||||
|
||||
#up: [
|
||||
op.#Load & {
|
||||
from: #CLI & {
|
||||
"package": package
|
||||
}
|
||||
},
|
||||
op.#Mkdir & {
|
||||
path: "/inputs"
|
||||
},
|
||||
for k, v in files {
|
||||
op.#WriteFile & {
|
||||
dest: k
|
||||
content: v
|
||||
}
|
||||
},
|
||||
op.#WriteFile & {
|
||||
dest: "/entrypoint.sh"
|
||||
content: code
|
||||
},
|
||||
op.#Exec & {
|
||||
if always != _|_ {
|
||||
"always": always
|
||||
}
|
||||
args: [
|
||||
"/bin/bash",
|
||||
"--noprofile",
|
||||
"--norc",
|
||||
"-eo",
|
||||
"pipefail",
|
||||
"/entrypoint.sh",
|
||||
]
|
||||
"env": env
|
||||
"env": {
|
||||
AWS_CONFIG_FILE: "/cache/aws/config"
|
||||
AWS_ACCESS_KEY_ID: config.accessKey
|
||||
AWS_SECRET_ACCESS_KEY: config.secretKey
|
||||
AWS_DEFAULT_REGION: config.region
|
||||
AWS_REGION: config.region
|
||||
AWS_DEFAULT_OUTPUT: "json"
|
||||
AWS_PAGER: ""
|
||||
}
|
||||
mount: "/cache/aws": "cache"
|
||||
},
|
||||
op.#Export & {
|
||||
source: export
|
||||
format: "string"
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -60,6 +60,7 @@ import (
|
||||
}
|
||||
},
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"/bin/bash",
|
||||
"--noprofile",
|
||||
|
36
stdlib/aws/ecr/ecr.cue
Normal file
36
stdlib/aws/ecr/ecr.cue
Normal file
@ -0,0 +1,36 @@
|
||||
package ecr
|
||||
|
||||
import (
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/aws"
|
||||
)
|
||||
|
||||
// Credentials retriever for ECR
|
||||
#Credentials: {
|
||||
|
||||
// AWS Config
|
||||
config: aws.#Config
|
||||
|
||||
// Target is the ECR image
|
||||
target: string
|
||||
|
||||
out: dagger.#Secret
|
||||
|
||||
// ECR credentials
|
||||
credentials: dagger.#RegistryCredentials & {
|
||||
username: "AWS"
|
||||
secret: out
|
||||
}
|
||||
|
||||
aws.#Script & {
|
||||
"config": config
|
||||
export: "/out"
|
||||
code: """
|
||||
aws ecr get-login-password > /out
|
||||
"""
|
||||
}
|
||||
|
||||
// Authentication for ECR Registries
|
||||
auth: dagger.#RegistryAuth
|
||||
auth: "\(target)": credentials
|
||||
}
|
67
stdlib/aws/ecs/run-task.cue
Normal file
67
stdlib/aws/ecs/run-task.cue
Normal file
@ -0,0 +1,67 @@
|
||||
package ecs
|
||||
|
||||
import (
|
||||
"dagger.io/aws"
|
||||
)
|
||||
|
||||
// RunTask implement ecs run-task
|
||||
#RunTask: {
|
||||
|
||||
// AWS Config
|
||||
config: aws.#Config
|
||||
|
||||
// ECS cluster name
|
||||
cluster: string
|
||||
|
||||
// Arn of the task to run
|
||||
taskArn: string
|
||||
|
||||
// Environment variables of the task
|
||||
containerEnvironment: [string]: string
|
||||
|
||||
// Container name
|
||||
containerName: string
|
||||
|
||||
// Container command to give
|
||||
containerCommand: [...string]
|
||||
|
||||
// Task role ARN
|
||||
roleArn: string | *""
|
||||
|
||||
containerOverrides: {
|
||||
containerOverrides: [{
|
||||
name: containerName
|
||||
if len(containerCommand) > 0 {
|
||||
command: containerCommand
|
||||
}
|
||||
if len(containerEnvironment) > 0 {
|
||||
environment: [ for k, v in containerEnvironment {
|
||||
name: k
|
||||
value: v
|
||||
}]
|
||||
}
|
||||
}]
|
||||
if roleArn != "" {
|
||||
taskRoleArn: roleArn
|
||||
}
|
||||
}
|
||||
|
||||
aws.#Script & {
|
||||
"config": config
|
||||
export: "/out"
|
||||
files: {
|
||||
"/inputs/cluster": cluster
|
||||
"/inputs/task_arn": taskArn
|
||||
"/inputs/container_overrides": containerOverrides
|
||||
}
|
||||
code: #"""
|
||||
cat /inputs/container_overrides | jq
|
||||
|
||||
aws ecs run-task \
|
||||
--cluster "$(cat /inputs/cluster)" \
|
||||
--task-definition "$(cat /inputs/task_arn)" \
|
||||
--overrides "$(cat /inputs/container_overrides)" \
|
||||
> /out
|
||||
"""#
|
||||
}
|
||||
}
|
71
stdlib/aws/elb/elb.cue
Normal file
71
stdlib/aws/elb/elb.cue
Normal file
@ -0,0 +1,71 @@
|
||||
package elb
|
||||
|
||||
import (
|
||||
"dagger.io/aws"
|
||||
)
|
||||
|
||||
// Returns a non-taken rule priority (randomized)
|
||||
#RandomRulePriority: {
|
||||
// AWS Config
|
||||
config: aws.#Config
|
||||
|
||||
// ListenerArn
|
||||
listenerArn: string
|
||||
|
||||
// Optional vhost for reusing priorities
|
||||
vhost?: string
|
||||
|
||||
// exported priority
|
||||
out: string
|
||||
|
||||
aws.#Script & {
|
||||
// FIXME: we should not rely on the cache for this but it's being
|
||||
// executed several times if enabled: https://github.com/dagger/dagger/issues/42
|
||||
// always: true
|
||||
|
||||
files: {
|
||||
"/inputs/listenerArn": listenerArn
|
||||
if vhost != _|_ {
|
||||
"/inputs/vhost": vhost
|
||||
}
|
||||
}
|
||||
|
||||
export: "/priority"
|
||||
|
||||
//FIXME: The code below can end up not finding an available prio
|
||||
// Better to exclude the existing allocated priorities from the random sequence
|
||||
code: #"""
|
||||
if [ -s /inputs/vhost ]; then
|
||||
# We passed a vhost as input, try to recycle priority from previously allocated vhost
|
||||
vhost="$(cat /inputs/vhost)"
|
||||
|
||||
priority=$(aws elbv2 describe-rules \
|
||||
--listener-arn "$(cat /inputs/listenerArn)" | \
|
||||
jq -r --arg vhost "$vhost" '.Rules[] | select(.Conditions[].HostHeaderConfig.Values[] == $vhost) | .Priority')
|
||||
|
||||
if [ -n "${priority}" ]; then
|
||||
echo -n "${priority}" > /priority
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Grab a priority random from 1-50k and check if available, retry 10 times if none available
|
||||
priority=0
|
||||
for i in {1..10}
|
||||
do
|
||||
p=$(shuf -i 1-50000 -n 1)
|
||||
# Find the next priority available that we can allocate
|
||||
aws elbv2 describe-rules \
|
||||
--listener-arn "$(cat /inputs/listenerArn)" \
|
||||
| jq -e "select(.Rules[].Priority == \"${p}\") | true" && continue
|
||||
priority="${p}"
|
||||
break
|
||||
done
|
||||
if [ "${priority}" -lt 1 ]; then
|
||||
echo "Error: cannot determine a Rule priority"
|
||||
exit 1
|
||||
fi
|
||||
echo -n "${priority}" > /priority
|
||||
"""#
|
||||
}
|
||||
}
|
196
stdlib/aws/rds/rds.cue
Normal file
196
stdlib/aws/rds/rds.cue
Normal file
@ -0,0 +1,196 @@
|
||||
package rds
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/aws"
|
||||
)
|
||||
|
||||
#CreateDB: {
|
||||
// AWS Config
|
||||
config: aws.#Config
|
||||
|
||||
// DB name
|
||||
name: string
|
||||
|
||||
// ARN of the database instance
|
||||
dbArn: string
|
||||
|
||||
// ARN of the database secret (for connecting via rds api)
|
||||
secretArn: string
|
||||
|
||||
dbType: "mysql" | "postgres"
|
||||
|
||||
// Name of the DB created
|
||||
out: string
|
||||
|
||||
aws.#Script & {
|
||||
"config": config
|
||||
|
||||
files: {
|
||||
"/inputs/name": name
|
||||
"/inputs/db_arn": dbArn
|
||||
"/inputs/secret_arn": secretArn
|
||||
"/inputs/db_type": dbType
|
||||
}
|
||||
|
||||
export: "/db_created"
|
||||
|
||||
code: #"""
|
||||
set +o pipefail
|
||||
|
||||
dbType="$(cat /inputs/db_type)"
|
||||
echo "dbType: $dbType"
|
||||
|
||||
sql="CREATE DATABASE \`$(cat /inputs/name)\`"
|
||||
if [ "$dbType" = postgres ]; then
|
||||
sql="CREATE DATABASE \"$(cat /inputs/name)\""
|
||||
fi
|
||||
|
||||
aws rds-data execute-statement \
|
||||
--resource-arn "$(cat /inputs/db_arn)" \
|
||||
--secret-arn "$(cat /inputs/secret_arn)" \
|
||||
--sql "$sql" \
|
||||
--database "$dbType" \
|
||||
--no-include-result-metadata \
|
||||
|& tee /tmp/out
|
||||
exit_code=${PIPESTATUS[0]}
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
cat /tmp/out
|
||||
grep -q "database exists\|already exists" /tmp/out
|
||||
[ $? -ne 0 ] && exit $exit_code
|
||||
fi
|
||||
cp /inputs/name /db_created
|
||||
"""#
|
||||
}
|
||||
}
|
||||
|
||||
#CreateUser: {
|
||||
// AWS Config
|
||||
config: aws.#Config
|
||||
|
||||
// Username
|
||||
username: dagger.#Secret
|
||||
|
||||
// Password
|
||||
password: dagger.#Secret
|
||||
|
||||
// ARN of the database instance
|
||||
dbArn: string
|
||||
|
||||
// ARN of the database secret (for connecting via rds api)
|
||||
secretArn: string
|
||||
|
||||
grantDatabase: string | *""
|
||||
|
||||
dbType: "mysql" | "postgres"
|
||||
|
||||
// Outputed username
|
||||
out: string
|
||||
|
||||
aws.#Script & {
|
||||
"config": config
|
||||
|
||||
files: {
|
||||
"/inputs/username": username
|
||||
"/inputs/password": password
|
||||
"/inputs/db_arn": dbArn
|
||||
"/inputs/secret_arn": secretArn
|
||||
"/inputs/grant_database": grantDatabase
|
||||
"/inputs/db_type": dbType
|
||||
}
|
||||
|
||||
export: "/username"
|
||||
|
||||
code: #"""
|
||||
set +o pipefail
|
||||
|
||||
dbType="$(cat /inputs/db_type)"
|
||||
echo "dbType: $dbType"
|
||||
|
||||
sql="CREATE USER '$(cat /inputs/username)'@'%' IDENTIFIED BY '$(cat /inputs/password)'"
|
||||
if [ "$dbType" = postgres ]; then
|
||||
sql="CREATE USER \"$(cat /inputs/username)\" WITH PASSWORD '$(cat /inputs/password)'"
|
||||
fi
|
||||
|
||||
aws rds-data execute-statement \
|
||||
--resource-arn "$(cat /inputs/db_arn)" \
|
||||
--secret-arn "$(cat /inputs/secret_arn)" \
|
||||
--sql "$sql" \
|
||||
--database "$dbType" \
|
||||
--no-include-result-metadata \
|
||||
|& tee tmp/out
|
||||
exit_code=${PIPESTATUS[0]}
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
cat tmp/out
|
||||
grep -q "Operation CREATE USER failed for\|ERROR" tmp/out
|
||||
[ $? -ne 0 ] && exit $exit_code
|
||||
fi
|
||||
cp /inputs/username /username
|
||||
|
||||
sql="SET PASSWORD FOR '$(cat /inputs/username)'@'%' = PASSWORD('$(cat /inputs/password)')"
|
||||
if [ "$dbType" = postgres ]; then
|
||||
sql="ALTER ROLE \"$(cat /inputs/username)\" WITH PASSWORD '$(cat /inputs/password)'"
|
||||
fi
|
||||
|
||||
aws rds-data execute-statement \
|
||||
--resource-arn "$(cat /inputs/db_arn)" \
|
||||
--secret-arn "$(cat /inputs/secret_arn)" \
|
||||
--sql "$sql" \
|
||||
--database "$dbType" \
|
||||
--no-include-result-metadata
|
||||
|
||||
sql="GRANT ALL ON \`$(cat /inputs/grant_database)\`.* to '$(cat /inputs/username)'@'%'"
|
||||
if [ "$dbType" = postgres ]; then
|
||||
sql="GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO \"$(cat /inputs/username)\"; GRANT ALL PRIVILEGES ON DATABASE \"$(cat /inputs/grant_database)\" to \"$(cat /inputs/username)\"; GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO \"$(cat /inputs/username)\"; ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON TABLES TO \"$(cat /inputs/username)\"; ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON SEQUENCES TO \"$(cat /inputs/username)\"; GRANT USAGE ON SCHEMA public TO \"$(cat /inputs/username)\";"
|
||||
fi
|
||||
|
||||
if [ -s /inputs/grant_database ]; then
|
||||
aws rds-data execute-statement \
|
||||
--resource-arn "$(cat /inputs/db_arn)" \
|
||||
--secret-arn "$(cat /inputs/secret_arn)" \
|
||||
--sql "$sql" \
|
||||
--database "$dbType" \
|
||||
--no-include-result-metadata
|
||||
fi
|
||||
"""#
|
||||
}
|
||||
}
|
||||
|
||||
#Instance: {
|
||||
// AWS Config
|
||||
config: aws.#Config
|
||||
|
||||
// ARN of the database instance
|
||||
dbArn: string
|
||||
|
||||
// DB hostname
|
||||
hostname: info.hostname
|
||||
|
||||
// DB port
|
||||
port: info.port
|
||||
|
||||
info: {
|
||||
hostname: string
|
||||
port: int
|
||||
}
|
||||
|
||||
info: json.Unmarshal(out)
|
||||
out: string
|
||||
|
||||
aws.#Script & {
|
||||
"config": config
|
||||
|
||||
files: "/inputs/db_arn": dbArn
|
||||
|
||||
export: "/out"
|
||||
|
||||
code: #"""
|
||||
db_arn="$(cat /inputs/db_arn)"
|
||||
data=$(aws rds describe-db-clusters --filters "Name=db-cluster-id,Values=$db_arn" )
|
||||
echo "$data" | jq -r '.DBClusters[].Endpoint' > /tmp/out
|
||||
echo "$data" | jq -r '.DBClusters[].Port' >> /tmp/out
|
||||
cat /tmp/out | jq -sR 'split("\n") | {hostname: .[0], port: (.[1] | tonumber)}' > /out
|
||||
"""#
|
||||
}
|
||||
}
|
@ -9,11 +9,17 @@ import (
|
||||
|
||||
remote: string
|
||||
ref: string
|
||||
subdir: string | *""
|
||||
|
||||
#up: [
|
||||
op.#FetchGit & {
|
||||
"remote": remote
|
||||
"ref": ref
|
||||
},
|
||||
if subdir != "" {
|
||||
op.#Subdir & {
|
||||
dir: subdir
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package react
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/dagger/op"
|
||||
"dagger.io/alpine"
|
||||
"dagger.io/docker"
|
||||
)
|
||||
|
||||
// A ReactJS application
|
||||
@ -11,6 +13,13 @@ import (
|
||||
// Application source code
|
||||
source: dagger.#Artifact
|
||||
|
||||
// Environment variables
|
||||
env: [string]: string
|
||||
|
||||
// Write the contents of `environment` to this file,
|
||||
// in the "envfile" format.
|
||||
writeEnvFile: string | *""
|
||||
|
||||
// Yarn-specific settings
|
||||
yarn: {
|
||||
// Read build output from this directory
|
||||
@ -20,47 +29,49 @@ import (
|
||||
// Run this yarn script
|
||||
script: string | *"build"
|
||||
}
|
||||
setup: [
|
||||
"mkdir -p /cache/yarn",
|
||||
]
|
||||
|
||||
// Build the application in a container, using yarn
|
||||
build: docker.#Container & {
|
||||
image: alpine.#Image & {
|
||||
build: #up: [
|
||||
op.#Load & {
|
||||
from: alpine.#Image & {
|
||||
package: bash: "=~5.1"
|
||||
package: yarn: "=~1.22"
|
||||
}
|
||||
dir: "/src"
|
||||
command: """
|
||||
yarn install --production false
|
||||
yarn run "$YARN_BUILD_SCRIPT"
|
||||
mv "$YARN_BUILD_DIRECTORY" \(outputDir)
|
||||
"""
|
||||
volume: {
|
||||
src: {
|
||||
from: source
|
||||
dest: "/src"
|
||||
}
|
||||
// yarnCache: {
|
||||
// type: "cache"
|
||||
// dest: "/cache/yarn"
|
||||
// }
|
||||
}
|
||||
outputDir: "/build"
|
||||
shell: {
|
||||
path: "/bin/bash"
|
||||
},
|
||||
op.#Exec & {
|
||||
args: [
|
||||
"/bin/bash",
|
||||
"--noprofile",
|
||||
"--norc",
|
||||
"-eo", "pipefail",
|
||||
"-eo",
|
||||
"pipefail",
|
||||
"-c",
|
||||
"""
|
||||
[ -n "$ENVFILE_NAME" ] && echo "$ENVFILE" > "$ENVFILE_NAME"
|
||||
yarn install --production false
|
||||
yarn run "$YARN_BUILD_SCRIPT"
|
||||
mv "$YARN_BUILD_DIRECTORY" /build
|
||||
""",
|
||||
]
|
||||
if env != _|_ {
|
||||
"env": env
|
||||
}
|
||||
env: {
|
||||
"env": {
|
||||
YARN_BUILD_SCRIPT: yarn.script
|
||||
YARN_CACHE_FOLDER: "/cache/yarn"
|
||||
YARN_BUILD_DIRECTORY: yarn.buildDir
|
||||
if writeEnvFile != "" {
|
||||
ENVFILE_NAME: writeEnvFile
|
||||
ENVFILE: strings.Join([ for k, v in env {"\(k)=\(v)"}], "\n")
|
||||
}
|
||||
}
|
||||
|
||||
dir: "/src"
|
||||
mount: {
|
||||
"/src": from: source
|
||||
"/cache/yarn": "cache"
|
||||
}
|
||||
},
|
||||
op.#Subdir & {
|
||||
dir: "/build"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package netlify
|
||||
import (
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/alpine"
|
||||
"dagger.io/docker"
|
||||
"dagger.io/dagger/op"
|
||||
)
|
||||
|
||||
// A Netlify account
|
||||
@ -42,34 +42,28 @@ import (
|
||||
// Logs URL for this deployment
|
||||
logsUrl: string
|
||||
|
||||
// Deployment container
|
||||
#deploy: docker.#Container & {
|
||||
image: alpine.#Image & {
|
||||
package: {
|
||||
bash: "=~5.1"
|
||||
jq: "=~1.6"
|
||||
curl: "=~7.74"
|
||||
yarn: "=~1.22"
|
||||
#up: [
|
||||
op.#Load & {
|
||||
from: alpine.#Image & {
|
||||
package: bash: "=~5.1"
|
||||
package: jq: "=~1.6"
|
||||
package: curl: "=~7.74"
|
||||
package: yarn: "=~1.22"
|
||||
}
|
||||
}
|
||||
setup: [
|
||||
"yarn global add netlify-cli@2.47.0",
|
||||
]
|
||||
shell: {
|
||||
path: "/bin/bash"
|
||||
},
|
||||
op.#Exec & {
|
||||
args: ["yarn", "global", "add", "netlify-cli@2.47.0"]
|
||||
},
|
||||
op.#Exec & {
|
||||
args: [
|
||||
"/bin/bash",
|
||||
"--noprofile",
|
||||
"--norc",
|
||||
"-eo",
|
||||
"pipefail",
|
||||
"-c",
|
||||
#code,
|
||||
]
|
||||
}
|
||||
dir: "/src"
|
||||
volume: "contents": {
|
||||
dest: "/src"
|
||||
from: contents
|
||||
}
|
||||
env: {
|
||||
NETLIFY_SITE_NAME: name
|
||||
if (create) {
|
||||
@ -81,13 +75,12 @@ import (
|
||||
NETLIFY_ACCOUNT: account.name
|
||||
NETLIFY_AUTH_TOKEN: account.token
|
||||
}
|
||||
export: {
|
||||
dir: "/src"
|
||||
mount: "/src": from: contents
|
||||
},
|
||||
op.#Export & {
|
||||
source: "/output.json"
|
||||
format: "json"
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: this is a hack to use docker.#Container while exporting
|
||||
// values.
|
||||
#up: #deploy.#up
|
||||
},
|
||||
]
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package netlify
|
||||
|
||||
#Site: #deploy: command: #"""
|
||||
#code: #"""
|
||||
create_site() {
|
||||
url="https://api.netlify.com/api/v1/${NETLIFY_ACCOUNT:-}/sites"
|
||||
|
||||
|
Reference in New Issue
Block a user