Merge branch 'main' into cloudrun-support
This commit is contained in:
commit
1b6187181d
15
.dagger/env/test-core/plan/test-core.cue
vendored
Normal file
15
.dagger/env/test-core/plan/test-core.cue
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
package testcore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"dagger.io/dagger"
|
||||||
|
)
|
||||||
|
|
||||||
|
name: dagger.#Input & {
|
||||||
|
string | *"world"
|
||||||
|
}
|
||||||
|
|
||||||
|
message: dagger.#Output & "Hello, \(name)!"
|
||||||
|
|
||||||
|
dir: dagger.#Input & dagger.#Artifact
|
||||||
|
|
||||||
|
samedir: dagger.#Output & dir
|
27
.dagger/env/test-core/values.yaml
vendored
Normal file
27
.dagger/env/test-core/values.yaml
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
plan:
|
||||||
|
module: .dagger/env/test-core/plan
|
||||||
|
name: test-core
|
||||||
|
inputs:
|
||||||
|
dir:
|
||||||
|
dir:
|
||||||
|
path: ./tests
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBhOW1rUy9vNFBDaXRDUHJu
|
||||||
|
ZVZYM1FucVorRDdUcmRaYmg3eTlTNzhhYWdjCndWZWxhZnhCZG4xZU9JQ1VyMXdR
|
||||||
|
OHY0TS81bk9FL2JuUEhuTmcxa29ORGcKLS0tIGxJUzNrZmRBNHZGRFY2Z01QK2JP
|
||||||
|
MlM1Ukdqbi9SQ0pqTi9FZ3MxN2E2QmsKHwd7P6KHPVdynOoto1jf3G4+5+vf87wU
|
||||||
|
HX1KD7Od5wRdBwn7r3OS8mdvuNIYpJDUb5YDrfjQypt020ohLocNiA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2021-06-17T14:07:53Z"
|
||||||
|
mac: ENC[AES256_GCM,data:afYut7wdvsJQgxlBg6NEV6DWk8vPi81mZgQAuWb4oe4WJeI1T9cYdtjOHPlmIpjqb86VQHJ29YTzektei2+k+VBawQxcsvefK7X1QboJTfMKLfsiug4qzNWjc7JZDvTb6dsDFM1U96gjSoAIVwdLMnucbu3681Fd7qSQgqNS61Q=,iv:ZQDHzXp0RJcUI4RtOVjdepV8zTa2kIHQhAltLkudDck=,tag:AnSFi1mKrEuXSqE4R+g7dw==,type:str]
|
||||||
|
pgp: []
|
||||||
|
encrypted_suffix: secret
|
||||||
|
version: 3.7.1
|
8
.github/workflows/docs.yml
vendored
8
.github/workflows/docs.yml
vendored
@ -14,10 +14,4 @@ jobs:
|
|||||||
deploy:
|
deploy:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- run: curl -X POST "https://api.netlify.com/build_hooks/${{ secrets.DOCS_NETLIFY_BUILD_HOOK }}"
|
||||||
with:
|
|
||||||
fetch-depth: "0"
|
|
||||||
- uses: dagger/dagger-action@v1
|
|
||||||
with:
|
|
||||||
age-key: ${{ secrets.DAGGER_AGE_KEY }}
|
|
||||||
args: up -e docs
|
|
||||||
|
@ -140,10 +140,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Source code of the sample application
|
// Source code of the sample application
|
||||||
src: dagger.#Artifact @dagger(input)
|
src: dagger.#Artifact & dagger.#Input
|
||||||
```
|
```
|
||||||
|
|
||||||
This defines a component at the key `src`, of type `dagger.#Artifact`, annotated as an user input.
|
This defines a component at the key `src`, and specifies that it is both an artifact and an input.
|
||||||
|
|
||||||
### Component 2: yarn package
|
### Component 2: yarn package
|
||||||
|
|
||||||
|
@ -329,8 +329,10 @@ func (p *Pipeline) Local(ctx context.Context, op *compiler.Value, st llb.State)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return st, err
|
return st, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Excludes .dagger directory by default
|
||||||
|
excludePatterns := []string{"**/.dagger/"}
|
||||||
if len(excludes) > 0 {
|
if len(excludes) > 0 {
|
||||||
excludePatterns := []string{}
|
|
||||||
for _, i := range excludes {
|
for _, i := range excludes {
|
||||||
pattern, err := i.String()
|
pattern, err := i.String()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -338,13 +340,23 @@ func (p *Pipeline) Local(ctx context.Context, op *compiler.Value, st llb.State)
|
|||||||
}
|
}
|
||||||
excludePatterns = append(excludePatterns, pattern)
|
excludePatterns = append(excludePatterns, pattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts = append(opts, llb.ExcludePatterns(excludePatterns))
|
|
||||||
}
|
}
|
||||||
|
opts = append(opts, llb.ExcludePatterns(excludePatterns))
|
||||||
|
|
||||||
return llb.Local(
|
// FIXME: Remove the `Copy` and use `Local` directly.
|
||||||
|
//
|
||||||
|
// Copy'ing is a costly operation which should be unnecessary.
|
||||||
|
// However, using llb.Local directly breaks caching sometimes for unknown reasons.
|
||||||
|
return st.File(
|
||||||
|
llb.Copy(
|
||||||
|
llb.Local(
|
||||||
dir,
|
dir,
|
||||||
opts...,
|
opts...,
|
||||||
|
),
|
||||||
|
"/",
|
||||||
|
"/",
|
||||||
|
),
|
||||||
|
llb.WithCustomName(p.vertexNamef("Local %s [copy]", dir)),
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,112 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/dagger"
|
|
||||||
"dagger.io/aws/ecs"
|
|
||||||
"dagger.io/git"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Backend configuration
|
|
||||||
backend: {
|
|
||||||
|
|
||||||
// Source code to build this container
|
|
||||||
source: git.#Repository | dagger.#Artifact @dagger(input)
|
|
||||||
|
|
||||||
// Container environment variables
|
|
||||||
environment: {
|
|
||||||
[string]: string
|
|
||||||
} @dagger(input)
|
|
||||||
|
|
||||||
// Public hostname (need to match the master domain configures on the loadbalancer)
|
|
||||||
hostname: string @dagger(input)
|
|
||||||
|
|
||||||
// Container configuration
|
|
||||||
container: {
|
|
||||||
// Desired number of running containers
|
|
||||||
desiredCount: *1 | int @dagger(input)
|
|
||||||
// Time to wait for the HTTP timeout to complete
|
|
||||||
healthCheckTimeout: *10 | int @dagger(input)
|
|
||||||
// HTTP Path to perform the healthcheck request (HTTP Get)
|
|
||||||
healthCheckPath: *"/" | string @dagger(input)
|
|
||||||
// Number of times the health check needs to fail before recycling the container
|
|
||||||
healthCheckUnhealthyThreshold: *2 | int @dagger(input)
|
|
||||||
// Port used by the process inside the container
|
|
||||||
port: *80 | int @dagger(input)
|
|
||||||
// Memory to allocate
|
|
||||||
memory: *1024 | int @dagger(input)
|
|
||||||
// Override the default container command
|
|
||||||
command: [...string] @dagger(input)
|
|
||||||
// Custom dockerfile path
|
|
||||||
dockerfilePath: *"" | string @dagger(input)
|
|
||||||
// docker build args
|
|
||||||
dockerBuildArgs: {
|
|
||||||
[string]: string
|
|
||||||
} @dagger(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init container runs only once when the main container starts
|
|
||||||
initContainer: {
|
|
||||||
command: [...string] @dagger(input)
|
|
||||||
environment: {
|
|
||||||
[string]: string
|
|
||||||
} @dagger(input)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backend deployment logic
|
|
||||||
backend: {
|
|
||||||
let slug = name
|
|
||||||
|
|
||||||
// Docker image built from source, pushed to ECR
|
|
||||||
image: #ECRImage & {
|
|
||||||
source: backend.source
|
|
||||||
repository: infra.ecrRepository
|
|
||||||
tag: slug
|
|
||||||
awsConfig: infra.awsConfig
|
|
||||||
if backend.container.dockerfilePath != "" {
|
|
||||||
dockerfilePath: backend.container.dockerfilePath
|
|
||||||
}
|
|
||||||
buildArgs: backend.container.dockerBuildArgs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates an ECS Task + Service + deploy via Cloudformation
|
|
||||||
app: #ECSApp & {
|
|
||||||
awsConfig: infra.awsConfig
|
|
||||||
"slug": slug
|
|
||||||
clusterName: infra.ecsClusterName
|
|
||||||
vpcId: infra.vpcId
|
|
||||||
elbListenerArn: infra.elbListenerArn
|
|
||||||
if infra.ecsTaskRoleArn != _|_ {
|
|
||||||
taskRoleArn: infra.ecsTaskRoleArn
|
|
||||||
}
|
|
||||||
hostname: backend.hostname
|
|
||||||
healthCheck: {
|
|
||||||
timeout: backend.container.healthCheckTimeout
|
|
||||||
path: backend.container.healthCheckPath
|
|
||||||
unhealthyThresholdCount: backend.container.healthCheckUnhealthyThreshold
|
|
||||||
}
|
|
||||||
desiredCount: backend.container.desiredCount
|
|
||||||
container: {
|
|
||||||
command: backend.container.command
|
|
||||||
environment: backend.environment
|
|
||||||
port: backend.container.port
|
|
||||||
memory: backend.container.memory
|
|
||||||
"image": image.ref
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Optional container to run one-time during the deploy (eg. db migration)
|
|
||||||
if len(backend.initContainer.command) > 0 {
|
|
||||||
initContainer: ecs.#RunTask & {
|
|
||||||
config: infra.awsConfig
|
|
||||||
containerName: slug
|
|
||||||
cluster: infra.ecsClusterName
|
|
||||||
if infra.ecsTaskRoleArn != _|_ {
|
|
||||||
roleArn: infra.ecsTaskRoleArn
|
|
||||||
}
|
|
||||||
containerEnvironment: backend.initContainer.environment
|
|
||||||
containerCommand: backend.initContainer.command
|
|
||||||
taskArn: app.taskArn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,41 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"dagger.io/aws/rds"
|
|
||||||
)
|
|
||||||
|
|
||||||
database: {
|
|
||||||
let slug = name
|
|
||||||
dbType: "mysql" | "postgresql" @dagger(input)
|
|
||||||
|
|
||||||
db: rds.#Database & {
|
|
||||||
config: infra.awsConfig
|
|
||||||
name: slug
|
|
||||||
dbArn: infra.rdsInstanceArn
|
|
||||||
"dbType": dbType
|
|
||||||
secretArn: infra.rdsAdminSecretArn
|
|
||||||
}
|
|
||||||
|
|
||||||
user: rds.#User & {
|
|
||||||
config: infra.awsConfig
|
|
||||||
dbArn: infra.rdsInstanceArn
|
|
||||||
"dbType": dbType
|
|
||||||
secretArn: infra.rdsAdminSecretArn
|
|
||||||
username: slug
|
|
||||||
// FIXME: make it secure (generate infra side?)
|
|
||||||
password: base64.Encode(null, "pwd-\(slug)")
|
|
||||||
grantDatabase: db.out
|
|
||||||
}
|
|
||||||
|
|
||||||
instance: rds.#Instance & {
|
|
||||||
config: infra.awsConfig
|
|
||||||
dbArn: infra.rdsInstanceArn
|
|
||||||
}
|
|
||||||
|
|
||||||
hostname: instance.hostname
|
|
||||||
port: instance.port
|
|
||||||
dbName: db.out
|
|
||||||
username: user.out
|
|
||||||
password: user.password
|
|
||||||
}
|
|
@ -1,53 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/dagger"
|
|
||||||
"dagger.io/dagger/op"
|
|
||||||
"dagger.io/aws"
|
|
||||||
"dagger.io/aws/ecr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Build an image and push it to ECR
|
|
||||||
#ECRImage: {
|
|
||||||
source: dagger.#Artifact
|
|
||||||
// Path of the Dockerfile
|
|
||||||
dockerfilePath?: string
|
|
||||||
repository: string
|
|
||||||
tag: string
|
|
||||||
awsConfig: aws.#Config
|
|
||||||
buildArgs: [string]: string
|
|
||||||
|
|
||||||
// Use these credentials to push
|
|
||||||
ecrCreds: ecr.#Credentials & {
|
|
||||||
config: awsConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
ref: {
|
|
||||||
string
|
|
||||||
|
|
||||||
#up: [
|
|
||||||
// Build the docker image
|
|
||||||
op.#DockerBuild & {
|
|
||||||
context: source
|
|
||||||
if dockerfilePath != _|_ {
|
|
||||||
"dockerfilePath": dockerfilePath
|
|
||||||
}
|
|
||||||
buildArg: buildArgs
|
|
||||||
},
|
|
||||||
// Login to Registry
|
|
||||||
op.#DockerLogin & {
|
|
||||||
target: repository
|
|
||||||
username: ecrCreds.username
|
|
||||||
secret: ecrCreds.secret
|
|
||||||
},
|
|
||||||
// Push the image to the registry
|
|
||||||
op.#PushContainer & {
|
|
||||||
ref: "\(repository):\(tag)"
|
|
||||||
},
|
|
||||||
op.#Export & {
|
|
||||||
source: "/dagger/image_ref"
|
|
||||||
format: "string"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,152 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"dagger.io/aws"
|
|
||||||
"dagger.io/aws/elb"
|
|
||||||
"dagger.io/aws/cloudformation"
|
|
||||||
)
|
|
||||||
|
|
||||||
#ECSApp: {
|
|
||||||
awsConfig: aws.#Config
|
|
||||||
slug: string
|
|
||||||
clusterName: string
|
|
||||||
vpcId: string
|
|
||||||
elbListenerArn: string
|
|
||||||
taskRoleArn: *"" | string
|
|
||||||
hostname: string
|
|
||||||
healthCheck: {
|
|
||||||
timeout: *10 | int
|
|
||||||
path: *"/" | string
|
|
||||||
unhealthyThresholdCount: *2 | int
|
|
||||||
}
|
|
||||||
desiredCount: int
|
|
||||||
container: {
|
|
||||||
command: [...string]
|
|
||||||
environment: [string]: string
|
|
||||||
port: *80 | int
|
|
||||||
cpu: *256 | int
|
|
||||||
memory: *1024 | int
|
|
||||||
image: string
|
|
||||||
}
|
|
||||||
|
|
||||||
taskArn: cfnStack.outputs.TaskArn
|
|
||||||
|
|
||||||
elbRulePriority: elb.#RandomRulePriority & {
|
|
||||||
config: awsConfig
|
|
||||||
listenerArn: elbListenerArn
|
|
||||||
vhost: hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
cfnStack: cloudformation.#Stack & {
|
|
||||||
config: awsConfig
|
|
||||||
stackName: slug
|
|
||||||
onFailure: "DO_NOTHING"
|
|
||||||
parameters: {
|
|
||||||
ELBRulePriority: elbRulePriority.out
|
|
||||||
ImageRef: container.image
|
|
||||||
ELBListenerArn: elbListenerArn
|
|
||||||
}
|
|
||||||
source: json.Marshal(template)
|
|
||||||
}
|
|
||||||
|
|
||||||
template: {
|
|
||||||
AWSTemplateFormatVersion: "2010-09-09"
|
|
||||||
Description: "Dagger deployed app"
|
|
||||||
Parameters: {
|
|
||||||
ELBRulePriority: Type: "Number"
|
|
||||||
ImageRef: Type: "String"
|
|
||||||
ELBListenerArn: Type: "String"
|
|
||||||
}
|
|
||||||
Resources: {
|
|
||||||
ECSTaskDefinition: {
|
|
||||||
Type: "AWS::ECS::TaskDefinition"
|
|
||||||
Properties: {
|
|
||||||
Cpu: "\(container.cpu)"
|
|
||||||
Memory: "\(container.memory)"
|
|
||||||
if taskRoleArn != "" {
|
|
||||||
TaskRoleArn: taskRoleArn
|
|
||||||
}
|
|
||||||
NetworkMode: "bridge"
|
|
||||||
ContainerDefinitions: [{
|
|
||||||
if len(container.command) > 0 {
|
|
||||||
Command: container.command
|
|
||||||
}
|
|
||||||
Name: slug
|
|
||||||
Image: Ref: "ImageRef"
|
|
||||||
Essential: true
|
|
||||||
Environment: [ for k, v in container.environment {
|
|
||||||
Name: k
|
|
||||||
Value: v
|
|
||||||
}]
|
|
||||||
PortMappings: [{
|
|
||||||
ContainerPort: container.port
|
|
||||||
}]
|
|
||||||
StopTimeout: 5
|
|
||||||
LogConfiguration: {
|
|
||||||
LogDriver: "awslogs"
|
|
||||||
Options: {
|
|
||||||
"awslogs-group": "bl/provider/ecs/\(clusterName)"
|
|
||||||
"awslogs-region": Ref: "AWS::Region"
|
|
||||||
"awslogs-create-group": "true"
|
|
||||||
"awslogs-stream-prefix": slug
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ECSListenerRule: {
|
|
||||||
Type: "AWS::ElasticLoadBalancingV2::ListenerRule"
|
|
||||||
Properties: {
|
|
||||||
ListenerArn: Ref: "ELBListenerArn"
|
|
||||||
Priority: Ref: "ELBRulePriority"
|
|
||||||
Conditions: [{
|
|
||||||
Field: "host-header"
|
|
||||||
Values: [hostname]}]
|
|
||||||
Actions: [{
|
|
||||||
Type: "forward"
|
|
||||||
TargetGroupArn: Ref: "ECSTargetGroup"
|
|
||||||
}]}}
|
|
||||||
ECSTargetGroup: {
|
|
||||||
Type: "AWS::ElasticLoadBalancingV2::TargetGroup"
|
|
||||||
Properties: {
|
|
||||||
Protocol: "HTTP"
|
|
||||||
VpcId: vpcId
|
|
||||||
Port: 80
|
|
||||||
HealthCheckPath: healthCheck.path
|
|
||||||
UnhealthyThresholdCount: healthCheck.unhealthyThresholdCount
|
|
||||||
HealthCheckTimeoutSeconds: healthCheck.timeout
|
|
||||||
HealthCheckIntervalSeconds: healthCheck.timeout + 1
|
|
||||||
HealthyThresholdCount: 3
|
|
||||||
TargetGroupAttributes: [{
|
|
||||||
Value: "10"
|
|
||||||
Key: "deregistration_delay.timeout_seconds"
|
|
||||||
}]}}
|
|
||||||
ECSService: {
|
|
||||||
Type: "AWS::ECS::Service"
|
|
||||||
Properties: {
|
|
||||||
Cluster: clusterName
|
|
||||||
DesiredCount: desiredCount
|
|
||||||
LaunchType: "EC2"
|
|
||||||
LoadBalancers: [{
|
|
||||||
ContainerPort: container.port
|
|
||||||
TargetGroupArn: Ref: "ECSTargetGroup"
|
|
||||||
ContainerName: slug
|
|
||||||
}]
|
|
||||||
ServiceName: slug
|
|
||||||
TaskDefinition: Ref: "ECSTaskDefinition"
|
|
||||||
DeploymentConfiguration: {
|
|
||||||
DeploymentCircuitBreaker: {
|
|
||||||
Enable: true
|
|
||||||
Rollback: true
|
|
||||||
}
|
|
||||||
MaximumPercent: 200
|
|
||||||
MinimumHealthyPercent: 100
|
|
||||||
}}
|
|
||||||
DependsOn: "ECSListenerRule"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Outputs: TaskArn: Value: Ref: "ECSTaskDefinition"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,57 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/dagger"
|
|
||||||
"dagger.io/netlify"
|
|
||||||
"dagger.io/js/yarn"
|
|
||||||
"dagger.io/git"
|
|
||||||
)
|
|
||||||
|
|
||||||
frontend: {
|
|
||||||
// Source code to build the app
|
|
||||||
source: git.#Repository | dagger.#Artifact @dagger(input)
|
|
||||||
|
|
||||||
writeEnvFile?: string @dagger(input)
|
|
||||||
|
|
||||||
// Yarn Build
|
|
||||||
yarn: {
|
|
||||||
// Run this yarn script
|
|
||||||
script: string | *"build" @dagger(input)
|
|
||||||
|
|
||||||
// Read build output from this directory
|
|
||||||
// (path must be relative to working directory).
|
|
||||||
buildDir: string | *"build" @dagger(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build environment variables
|
|
||||||
environment: {
|
|
||||||
[string]: string @dagger(input)
|
|
||||||
}
|
|
||||||
environment: {
|
|
||||||
NODE_ENV: string | *"production" @dagger(input)
|
|
||||||
}
|
|
||||||
environment: {
|
|
||||||
APP_URL: "https://\(name).netlify.app/" @dagger(input)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
frontend: {
|
|
||||||
app: yarn.#Package & {
|
|
||||||
source: frontend.source
|
|
||||||
env: frontend.environment
|
|
||||||
|
|
||||||
if frontend.writeEnvFile != _|_ {
|
|
||||||
writeEnvFile: frontend.writeEnvFile
|
|
||||||
}
|
|
||||||
|
|
||||||
script: frontend.yarn.script
|
|
||||||
buildDir: frontend.yarn.buildDir
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host the application with Netlify
|
|
||||||
site: netlify.#Site & {
|
|
||||||
"name": name
|
|
||||||
account: infra.netlifyAccount
|
|
||||||
contents: app.build
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/aws"
|
|
||||||
"dagger.io/netlify"
|
|
||||||
)
|
|
||||||
|
|
||||||
infra: {
|
|
||||||
// AWS auth & default region
|
|
||||||
awsConfig: aws.#Config
|
|
||||||
|
|
||||||
// VPC Id
|
|
||||||
vpcId: string @dagger(input)
|
|
||||||
|
|
||||||
// ECR Image repository
|
|
||||||
ecrRepository: string @dagger(input)
|
|
||||||
|
|
||||||
// ECS cluster name
|
|
||||||
ecsClusterName: string @dagger(input)
|
|
||||||
|
|
||||||
// Execution Role ARN used for all tasks running on the cluster
|
|
||||||
ecsTaskRoleArn?: string @dagger(input)
|
|
||||||
|
|
||||||
// ELB listener ARN
|
|
||||||
elbListenerArn: string @dagger(input)
|
|
||||||
|
|
||||||
// Secret ARN for the admin password of the RDS Instance
|
|
||||||
rdsAdminSecretArn: string @dagger(input)
|
|
||||||
|
|
||||||
// ARN of the RDS Instance
|
|
||||||
rdsInstanceArn: string @dagger(input)
|
|
||||||
|
|
||||||
// Netlify credentials
|
|
||||||
netlifyAccount: netlify.#Account @dagger(input)
|
|
||||||
}
|
|
@ -1,34 +0,0 @@
|
|||||||
name: my-app
|
|
||||||
infra:
|
|
||||||
awsConfig:
|
|
||||||
accessKey: <REPLACE WITH AWS ACCESS KEY>
|
|
||||||
secretKey: <REPLACE WITH AWS SECRET KEY>
|
|
||||||
region: us-east-1
|
|
||||||
vpcId: vpc-020ctgv0bcde4242
|
|
||||||
ecrRepository: 8563296674124.dkr.ecr.us-east-1.amazonaws.com/apps
|
|
||||||
ecsClusterName: bl-ecs-acme-764-ECSCluster-lRIVVg09G4HX
|
|
||||||
elbListenerArn: arn:aws:elasticloadbalancing:us-east-1:8563296674124:listener/app/bl-ec-ECSAL-OSYI03K07BCO/3c2d3e78347bde5b/d02ac88cc007e24e
|
|
||||||
rdsAdminSecretArn: arn:aws:secretsmanager:us-east-1:8563296674124:secret:AdminPassword-NQbBi7oU4CYS9-IGgS3B
|
|
||||||
rdsInstanceArn: arn:aws:rds:us-east-1:8563296674124:cluster:bl-rds-acme-764-rdscluster-8eg3xbfjggkfdg
|
|
||||||
netlifyAccount:
|
|
||||||
token: <REPLACE WITH NETLIFY TOKEN>
|
|
||||||
database:
|
|
||||||
dbType: mysql
|
|
||||||
backend:
|
|
||||||
source:
|
|
||||||
remote: https://github.com/blocklayerhq/acme-clothing.git
|
|
||||||
ref: HEAD
|
|
||||||
subdir: ./crate/code/api
|
|
||||||
hostname: my-app.acme-764-api.microstaging.io
|
|
||||||
container:
|
|
||||||
healthCheckPath: /health-check
|
|
||||||
healthCheckTimeout: 40
|
|
||||||
frontend:
|
|
||||||
source:
|
|
||||||
remote: https://github.com/blocklayerhq/acme-clothing.git
|
|
||||||
ref: HEAD
|
|
||||||
subdir: ./crate/code/web
|
|
||||||
writeEnvFile: .env
|
|
||||||
yarn:
|
|
||||||
buildDir: public
|
|
||||||
script: build:client
|
|
@ -1,22 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
// Name of the application
|
|
||||||
name: string & =~"[a-z0-9-]+" @dagger(input)
|
|
||||||
|
|
||||||
// Inject db info in the container environment
|
|
||||||
backend: environment: {
|
|
||||||
DB_USERNAME: database.username
|
|
||||||
DB_HOSTNAME: database.hostname
|
|
||||||
DB_PASSWORD: database.password
|
|
||||||
DB_DBNAME: database.dbName
|
|
||||||
DB_PORT: "\(database.port)"
|
|
||||||
DB_TYPE: database.dbType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure the frontend with the API URL
|
|
||||||
frontend: environment: APP_URL_API: url.backendURL
|
|
||||||
|
|
||||||
url: {
|
|
||||||
frontendURL: frontend.site.url @dagger(output)
|
|
||||||
backendURL: "https://\(backend.hostname)/" @dagger(output)
|
|
||||||
}
|
|
@ -1,53 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/yaml"
|
|
||||||
"dagger.io/dagger"
|
|
||||||
"dagger.io/aws"
|
|
||||||
"dagger.io/aws/eks"
|
|
||||||
"dagger.io/kubernetes"
|
|
||||||
"dagger.io/kubernetes/helm"
|
|
||||||
)
|
|
||||||
|
|
||||||
kubeSrc: {
|
|
||||||
apiVersion: "v1"
|
|
||||||
kind: "Pod"
|
|
||||||
metadata: name: "kube-test"
|
|
||||||
spec: {
|
|
||||||
restartPolicy: "Never"
|
|
||||||
containers: [{
|
|
||||||
name: "test"
|
|
||||||
image: "hello-world"
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill using:
|
|
||||||
// --input-string awsConfig.accessKey=XXX
|
|
||||||
// --input-string awsConfig.secretKey=XXX
|
|
||||||
awsConfig: aws.#Config & {
|
|
||||||
region: *"us-east-2" | string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take the kubeconfig from the EKS cluster
|
|
||||||
cluster: eks.#KubeConfig & {
|
|
||||||
config: awsConfig
|
|
||||||
clusterName: *"dagger-example-eks-cluster" | string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example of a simple `kubectl apply` using a simple config
|
|
||||||
kubeApply: kubernetes.#Resources & {
|
|
||||||
manifest: yaml.Marshal(kubeSrc)
|
|
||||||
namespace: "test"
|
|
||||||
kubeconfig: cluster.kubeconfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example of a `helm install` using a local chart
|
|
||||||
// Fill using:
|
|
||||||
// --input-dir helmChart.chartSource=./testdata/mychart
|
|
||||||
helmChart: helm.#Chart & {
|
|
||||||
name: "test-helm"
|
|
||||||
namespace: "test"
|
|
||||||
kubeconfig: cluster.kubeconfig
|
|
||||||
chartSource: dagger.#Artifact
|
|
||||||
}
|
|
@ -1,447 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
#CFNTemplate: eksControlPlane: {
|
|
||||||
AWSTemplateFormatVersion: "2010-09-09"
|
|
||||||
Description: "Amazon EKS Sample VPC - Private and Public subnets"
|
|
||||||
Parameters: {
|
|
||||||
VpcBlock: {
|
|
||||||
Type: "String"
|
|
||||||
Default: "192.168.0.0/16"
|
|
||||||
Description: "The CIDR range for the VPC. This should be a valid private (RFC 1918) CIDR range."
|
|
||||||
}
|
|
||||||
PublicSubnet01Block: {
|
|
||||||
Type: "String"
|
|
||||||
Default: "192.168.0.0/18"
|
|
||||||
Description: "CidrBlock for public subnet 01 within the VPC"
|
|
||||||
}
|
|
||||||
PublicSubnet02Block: {
|
|
||||||
Type: "String"
|
|
||||||
Default: "192.168.64.0/18"
|
|
||||||
Description: "CidrBlock for public subnet 02 within the VPC"
|
|
||||||
}
|
|
||||||
PrivateSubnet01Block: {
|
|
||||||
Type: "String"
|
|
||||||
Default: "192.168.128.0/18"
|
|
||||||
Description: "CidrBlock for private subnet 01 within the VPC"
|
|
||||||
}
|
|
||||||
PrivateSubnet02Block: {
|
|
||||||
Type: "String"
|
|
||||||
Default: "192.168.192.0/18"
|
|
||||||
Description: "CidrBlock for private subnet 02 within the VPC"
|
|
||||||
}
|
|
||||||
ClusterName: {
|
|
||||||
Type: "String"
|
|
||||||
Description: "The EKS cluster name"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Metadata: "AWS::CloudFormation::Interface": ParameterGroups: [
|
|
||||||
{
|
|
||||||
Label: default: "Worker Network Configuration"
|
|
||||||
Parameters: [
|
|
||||||
"VpcBlock",
|
|
||||||
"PublicSubnet01Block",
|
|
||||||
"PublicSubnet02Block",
|
|
||||||
"PrivateSubnet01Block",
|
|
||||||
"PrivateSubnet02Block",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
]
|
|
||||||
Resources: {
|
|
||||||
VPC: {
|
|
||||||
Type: "AWS::EC2::VPC"
|
|
||||||
Properties: {
|
|
||||||
CidrBlock: Ref: "VpcBlock"
|
|
||||||
EnableDnsSupport: true
|
|
||||||
EnableDnsHostnames: true
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Fn::Sub": "${AWS::StackName}-VPC"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
InternetGateway: Type: "AWS::EC2::InternetGateway"
|
|
||||||
VPCGatewayAttachment: {
|
|
||||||
Type: "AWS::EC2::VPCGatewayAttachment"
|
|
||||||
Properties: {
|
|
||||||
InternetGatewayId: Ref: "InternetGateway"
|
|
||||||
VpcId: Ref: "VPC"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PublicRouteTable: {
|
|
||||||
Type: "AWS::EC2::RouteTable"
|
|
||||||
Properties: {
|
|
||||||
VpcId: Ref: "VPC"
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Public Subnets"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: "Network"
|
|
||||||
Value: "Public"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrivateRouteTable01: {
|
|
||||||
Type: "AWS::EC2::RouteTable"
|
|
||||||
Properties: {
|
|
||||||
VpcId: Ref: "VPC"
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Private Subnet AZ1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: "Network"
|
|
||||||
Value: "Private01"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrivateRouteTable02: {
|
|
||||||
Type: "AWS::EC2::RouteTable"
|
|
||||||
Properties: {
|
|
||||||
VpcId: Ref: "VPC"
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Private Subnet AZ2"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: "Network"
|
|
||||||
Value: "Private02"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PublicRoute: {
|
|
||||||
DependsOn: "VPCGatewayAttachment"
|
|
||||||
Type: "AWS::EC2::Route"
|
|
||||||
Properties: {
|
|
||||||
RouteTableId: Ref: "PublicRouteTable"
|
|
||||||
DestinationCidrBlock: "0.0.0.0/0"
|
|
||||||
GatewayId: Ref: "InternetGateway"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrivateRoute01: {
|
|
||||||
DependsOn: [
|
|
||||||
"VPCGatewayAttachment",
|
|
||||||
"NatGateway01",
|
|
||||||
]
|
|
||||||
Type: "AWS::EC2::Route"
|
|
||||||
Properties: {
|
|
||||||
RouteTableId: Ref: "PrivateRouteTable01"
|
|
||||||
DestinationCidrBlock: "0.0.0.0/0"
|
|
||||||
NatGatewayId: Ref: "NatGateway01"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrivateRoute02: {
|
|
||||||
DependsOn: [
|
|
||||||
"VPCGatewayAttachment",
|
|
||||||
"NatGateway02",
|
|
||||||
]
|
|
||||||
Type: "AWS::EC2::Route"
|
|
||||||
Properties: {
|
|
||||||
RouteTableId: Ref: "PrivateRouteTable02"
|
|
||||||
DestinationCidrBlock: "0.0.0.0/0"
|
|
||||||
NatGatewayId: Ref: "NatGateway02"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
NatGateway01: {
|
|
||||||
DependsOn: [
|
|
||||||
"NatGatewayEIP1",
|
|
||||||
"PublicSubnet01",
|
|
||||||
"VPCGatewayAttachment",
|
|
||||||
]
|
|
||||||
Type: "AWS::EC2::NatGateway"
|
|
||||||
Properties: {
|
|
||||||
AllocationId: "Fn::GetAtt": [
|
|
||||||
"NatGatewayEIP1",
|
|
||||||
"AllocationId",
|
|
||||||
]
|
|
||||||
SubnetId: Ref: "PublicSubnet01"
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Fn::Sub": "${AWS::StackName}-NatGatewayAZ1"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
NatGateway02: {
|
|
||||||
DependsOn: [
|
|
||||||
"NatGatewayEIP2",
|
|
||||||
"PublicSubnet02",
|
|
||||||
"VPCGatewayAttachment",
|
|
||||||
]
|
|
||||||
Type: "AWS::EC2::NatGateway"
|
|
||||||
Properties: {
|
|
||||||
AllocationId: "Fn::GetAtt": [
|
|
||||||
"NatGatewayEIP2",
|
|
||||||
"AllocationId",
|
|
||||||
]
|
|
||||||
SubnetId: Ref: "PublicSubnet02"
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Fn::Sub": "${AWS::StackName}-NatGatewayAZ2"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
NatGatewayEIP1: {
|
|
||||||
DependsOn: [
|
|
||||||
"VPCGatewayAttachment",
|
|
||||||
]
|
|
||||||
Type: "AWS::EC2::EIP"
|
|
||||||
Properties: Domain: "vpc"
|
|
||||||
}
|
|
||||||
NatGatewayEIP2: {
|
|
||||||
DependsOn: [
|
|
||||||
"VPCGatewayAttachment",
|
|
||||||
]
|
|
||||||
Type: "AWS::EC2::EIP"
|
|
||||||
Properties: Domain: "vpc"
|
|
||||||
}
|
|
||||||
PublicSubnet01: {
|
|
||||||
Type: "AWS::EC2::Subnet"
|
|
||||||
Metadata: Comment: "Subnet 01"
|
|
||||||
Properties: {
|
|
||||||
MapPublicIpOnLaunch: true
|
|
||||||
AvailabilityZone: "Fn::Select": [
|
|
||||||
"0",
|
|
||||||
{
|
|
||||||
"Fn::GetAZs": Ref: "AWS::Region"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
CidrBlock: Ref: "PublicSubnet01Block"
|
|
||||||
VpcId: Ref: "VPC"
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet01"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
|
||||||
Value: "shared"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PublicSubnet02: {
|
|
||||||
Type: "AWS::EC2::Subnet"
|
|
||||||
Metadata: Comment: "Subnet 02"
|
|
||||||
Properties: {
|
|
||||||
MapPublicIpOnLaunch: true
|
|
||||||
AvailabilityZone: "Fn::Select": [
|
|
||||||
"1",
|
|
||||||
{
|
|
||||||
"Fn::GetAZs": Ref: "AWS::Region"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
CidrBlock: Ref: "PublicSubnet02Block"
|
|
||||||
VpcId: Ref: "VPC"
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet02"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
|
||||||
Value: "shared"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrivateSubnet01: {
|
|
||||||
Type: "AWS::EC2::Subnet"
|
|
||||||
Metadata: Comment: "Subnet 03"
|
|
||||||
Properties: {
|
|
||||||
AvailabilityZone: "Fn::Select": [
|
|
||||||
"0",
|
|
||||||
{
|
|
||||||
"Fn::GetAZs": Ref: "AWS::Region"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
CidrBlock: Ref: "PrivateSubnet01Block"
|
|
||||||
VpcId: Ref: "VPC"
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet01"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
|
||||||
Value: "shared"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrivateSubnet02: {
|
|
||||||
Type: "AWS::EC2::Subnet"
|
|
||||||
Metadata: Comment: "Private Subnet 02"
|
|
||||||
Properties: {
|
|
||||||
AvailabilityZone: "Fn::Select": [
|
|
||||||
"1",
|
|
||||||
{
|
|
||||||
"Fn::GetAZs": Ref: "AWS::Region"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
CidrBlock: Ref: "PrivateSubnet02Block"
|
|
||||||
VpcId: Ref: "VPC"
|
|
||||||
Tags: [
|
|
||||||
{
|
|
||||||
Key: "Name"
|
|
||||||
Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet02"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
|
||||||
Value: "shared"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PublicSubnet01RouteTableAssociation: {
|
|
||||||
Type: "AWS::EC2::SubnetRouteTableAssociation"
|
|
||||||
Properties: {
|
|
||||||
SubnetId: Ref: "PublicSubnet01"
|
|
||||||
RouteTableId: Ref: "PublicRouteTable"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PublicSubnet02RouteTableAssociation: {
|
|
||||||
Type: "AWS::EC2::SubnetRouteTableAssociation"
|
|
||||||
Properties: {
|
|
||||||
SubnetId: Ref: "PublicSubnet02"
|
|
||||||
RouteTableId: Ref: "PublicRouteTable"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrivateSubnet01RouteTableAssociation: {
|
|
||||||
Type: "AWS::EC2::SubnetRouteTableAssociation"
|
|
||||||
Properties: {
|
|
||||||
SubnetId: Ref: "PrivateSubnet01"
|
|
||||||
RouteTableId: Ref: "PrivateRouteTable01"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrivateSubnet02RouteTableAssociation: {
|
|
||||||
Type: "AWS::EC2::SubnetRouteTableAssociation"
|
|
||||||
Properties: {
|
|
||||||
SubnetId: Ref: "PrivateSubnet02"
|
|
||||||
RouteTableId: Ref: "PrivateRouteTable02"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ControlPlaneSecurityGroup: {
|
|
||||||
Type: "AWS::EC2::SecurityGroup"
|
|
||||||
Properties: {
|
|
||||||
GroupDescription: "Cluster communication with worker nodes"
|
|
||||||
VpcId: Ref: "VPC"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EKSIAMRole: {
|
|
||||||
Type: "AWS::IAM::Role"
|
|
||||||
Properties: {
|
|
||||||
AssumeRolePolicyDocument: Statement: [
|
|
||||||
{
|
|
||||||
Effect: "Allow"
|
|
||||||
Principal: Service: [
|
|
||||||
"eks.amazonaws.com",
|
|
||||||
]
|
|
||||||
Action: [
|
|
||||||
"sts:AssumeRole",
|
|
||||||
]
|
|
||||||
|
|
||||||
},
|
|
||||||
]
|
|
||||||
ManagedPolicyArns: [
|
|
||||||
"arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
|
|
||||||
"arn:aws:iam::aws:policy/AmazonEKSServicePolicy",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EKSCluster: {
|
|
||||||
Type: "AWS::EKS::Cluster"
|
|
||||||
Properties: {
|
|
||||||
Name: Ref: "ClusterName"
|
|
||||||
Version: "1.19"
|
|
||||||
RoleArn: "Fn::GetAtt": ["EKSIAMRole", "Arn"]
|
|
||||||
ResourcesVpcConfig: {
|
|
||||||
SecurityGroupIds: [{Ref: "ControlPlaneSecurityGroup"}]
|
|
||||||
SubnetIds: [
|
|
||||||
{Ref: "PublicSubnet01"},
|
|
||||||
{Ref: "PublicSubnet02"},
|
|
||||||
{Ref: "PrivateSubnet01"},
|
|
||||||
{Ref: "PrivateSubnet02"},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
DependsOn: ["EKSIAMRole", "PublicSubnet01", "PublicSubnet02", "PrivateSubnet01", "PrivateSubnet02", "ControlPlaneSecurityGroup"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Outputs: {
|
|
||||||
SubnetIds: {
|
|
||||||
Description: "Subnets IDs in the VPC"
|
|
||||||
Value: "Fn::Join": [
|
|
||||||
",",
|
|
||||||
[
|
|
||||||
{
|
|
||||||
Ref: "PublicSubnet01"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Ref: "PublicSubnet02"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Ref: "PrivateSubnet01"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Ref: "PrivateSubnet02"
|
|
||||||
},
|
|
||||||
],
|
|
||||||
]
|
|
||||||
}
|
|
||||||
PublicSubnets: {
|
|
||||||
Description: "List of the public subnets"
|
|
||||||
Value: "Fn::Join": [
|
|
||||||
",",
|
|
||||||
[
|
|
||||||
{
|
|
||||||
Ref: "PublicSubnet01"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Ref: "PublicSubnet02"
|
|
||||||
},
|
|
||||||
],
|
|
||||||
]
|
|
||||||
}
|
|
||||||
PrivateSubnets: {
|
|
||||||
Description: "List of the private subnets"
|
|
||||||
Value: "Fn::Join": [
|
|
||||||
",",
|
|
||||||
[
|
|
||||||
{
|
|
||||||
Ref: "PrivateSubnet01"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Ref: "PrivateSubnet02"
|
|
||||||
},
|
|
||||||
],
|
|
||||||
]
|
|
||||||
}
|
|
||||||
DefaultSecurityGroup: {
|
|
||||||
Description: "Security group for the cluster control plane communication with worker nodes"
|
|
||||||
Value: "Fn::Join": [
|
|
||||||
",",
|
|
||||||
[
|
|
||||||
{
|
|
||||||
Ref: "ControlPlaneSecurityGroup"
|
|
||||||
},
|
|
||||||
],
|
|
||||||
]
|
|
||||||
}
|
|
||||||
VPC: {
|
|
||||||
Description: "The VPC Id"
|
|
||||||
Value: Ref: "VPC"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,89 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
#CFNTemplate: eksNodeGroup: {
|
|
||||||
AWSTemplateFormatVersion: "2010-09-09"
|
|
||||||
Description: "Amazon EKS - Node Group"
|
|
||||||
Parameters: {
|
|
||||||
ClusterName: {
|
|
||||||
Type: "String"
|
|
||||||
Description: "The cluster name provided when the cluster was created. If it is incorrect, nodes will not be able to join the cluster."
|
|
||||||
}
|
|
||||||
NodeAutoScalingGroupDesiredCapacity: {
|
|
||||||
Type: "Number"
|
|
||||||
Default: 3
|
|
||||||
Description: "Desired capacity of Node Group ASG."
|
|
||||||
}
|
|
||||||
NodeAutoScalingGroupMaxSize: {
|
|
||||||
Type: "Number"
|
|
||||||
Default: 4
|
|
||||||
Description: "Maximum size of Node Group ASG. Set to at least 1 greater than NodeAutoScalingGroupDesiredCapacity."
|
|
||||||
}
|
|
||||||
NodeAutoScalingGroupMinSize: {
|
|
||||||
Type: "Number"
|
|
||||||
Default: 1
|
|
||||||
Description: "Minimum size of Node Group ASG."
|
|
||||||
}
|
|
||||||
NodeInstanceType: {
|
|
||||||
Type: "String"
|
|
||||||
Default: "t3.medium"
|
|
||||||
ConstraintDescription: "Must be a valid EC2 instance type"
|
|
||||||
Description: "EC2 instance type for the node instances"
|
|
||||||
}
|
|
||||||
Subnets: {
|
|
||||||
Type: "List<AWS::EC2::Subnet::Id>"
|
|
||||||
Description: "The subnets where workers can be created."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Resources: {
|
|
||||||
NodeInstanceRole: {
|
|
||||||
Type: "AWS::IAM::Role"
|
|
||||||
Properties: {
|
|
||||||
AssumeRolePolicyDocument: {
|
|
||||||
Version: "2012-10-17"
|
|
||||||
Statement: [
|
|
||||||
{
|
|
||||||
Effect: "Allow"
|
|
||||||
Principal: Service: [
|
|
||||||
"ec2.amazonaws.com",
|
|
||||||
]
|
|
||||||
Action: [
|
|
||||||
"sts:AssumeRole",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
ManagedPolicyArns: [
|
|
||||||
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
|
|
||||||
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
|
|
||||||
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
|
|
||||||
]
|
|
||||||
Path: "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Nodegroup: {
|
|
||||||
Type: "AWS::EKS::Nodegroup"
|
|
||||||
Properties: {
|
|
||||||
ClusterName: Ref: "ClusterName"
|
|
||||||
NodeRole: "Fn::GetAtt": [
|
|
||||||
"NodeInstanceRole",
|
|
||||||
"Arn",
|
|
||||||
]
|
|
||||||
ScalingConfig: {
|
|
||||||
MaxSize: Ref: "NodeAutoScalingGroupMaxSize"
|
|
||||||
MinSize: Ref: "NodeAutoScalingGroupMinSize"
|
|
||||||
DesiredSize: Ref: "NodeAutoScalingGroupDesiredCapacity"
|
|
||||||
}
|
|
||||||
InstanceTypes: [{Ref: "NodeInstanceType"}]
|
|
||||||
AmiType: "AL2_x86_64"
|
|
||||||
Subnets: Ref: "Subnets"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Outputs: NodeInstanceRole: {
|
|
||||||
Description: "The node instance role"
|
|
||||||
Value: "Fn::GetAtt": [
|
|
||||||
"NodeInstanceRole",
|
|
||||||
"Arn",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,41 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"dagger.io/aws"
|
|
||||||
"dagger.io/aws/cloudformation"
|
|
||||||
)
|
|
||||||
|
|
||||||
#Infrastructure: {
|
|
||||||
awsConfig: aws.#Config
|
|
||||||
namePrefix: *"" | string
|
|
||||||
workerNodeCapacity: *3 | >=1
|
|
||||||
workerNodeInstanceType: *"t3.medium" | string
|
|
||||||
|
|
||||||
clusterName: "\(namePrefix)eks-cluster"
|
|
||||||
|
|
||||||
eksControlPlane: cloudformation.#Stack & {
|
|
||||||
config: awsConfig
|
|
||||||
source: json.Marshal(#CFNTemplate.eksControlPlane)
|
|
||||||
stackName: "\(namePrefix)eks-controlplane"
|
|
||||||
neverUpdate: true
|
|
||||||
timeout: 30
|
|
||||||
parameters: ClusterName: clusterName
|
|
||||||
}
|
|
||||||
|
|
||||||
eksNodeGroup: cloudformation.#Stack & {
|
|
||||||
config: awsConfig
|
|
||||||
source: json.Marshal(#CFNTemplate.eksNodeGroup)
|
|
||||||
stackName: "\(namePrefix)eks-nodegroup"
|
|
||||||
neverUpdate: true
|
|
||||||
timeout: 30
|
|
||||||
parameters: {
|
|
||||||
ClusterName: clusterName
|
|
||||||
NodeAutoScalingGroupDesiredCapacity: 1
|
|
||||||
NodeAutoScalingGroupMaxSize: NodeAutoScalingGroupDesiredCapacity + 1
|
|
||||||
NodeInstanceType: workerNodeInstanceType
|
|
||||||
Subnets: eksControlPlane.outputs.SubnetIds
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,29 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/aws"
|
|
||||||
"dagger.io/aws/eks"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AWS account: credentials and region
|
|
||||||
awsConfig: aws.#Config & {
|
|
||||||
region: *"us-east-2" | string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Auto-provision an EKS cluster:
|
|
||||||
// - VPC, Nat Gateways, Subnets, Security Group
|
|
||||||
// - EKS Cluster
|
|
||||||
// - Instance Node Group: auto-scaling-group, ec2 instances, etc...
|
|
||||||
// base config can be changed (number of EC2 instances, types, etc...)
|
|
||||||
infra: #Infrastructure & {
|
|
||||||
"awsConfig": awsConfig
|
|
||||||
namePrefix: "dagger-example-"
|
|
||||||
workerNodeCapacity: int | *1
|
|
||||||
workerNodeInstanceType: "t3.small"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client configuration for kubectl
|
|
||||||
kubeconfig: eks.#KubeConfig & {
|
|
||||||
config: awsConfig
|
|
||||||
clusterName: infra.clusterName
|
|
||||||
}
|
|
@ -1,269 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"regexp"
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"dagger.io/aws"
|
|
||||||
"dagger.io/aws/cloudformation"
|
|
||||||
)
|
|
||||||
|
|
||||||
#Notification: {
|
|
||||||
protocol: string
|
|
||||||
endpoint: string
|
|
||||||
}
|
|
||||||
|
|
||||||
#Canary: {
|
|
||||||
name: =~"^[0-9a-z_-]{1,21}$"
|
|
||||||
slug: strings.Join(regexp.FindAll("[0-9a-zA-Z]*", name, -1), "")
|
|
||||||
url: string
|
|
||||||
expectedHTTPCode: *200 | int
|
|
||||||
timeoutInSeconds: *30 | int
|
|
||||||
intervalExpression: *"1 minute" | string
|
|
||||||
}
|
|
||||||
|
|
||||||
#HTTPMonitor: {
|
|
||||||
|
|
||||||
// For sending notifications
|
|
||||||
notifications: [...#Notification]
|
|
||||||
// Canaries (tests)
|
|
||||||
canaries: [...#Canary]
|
|
||||||
// Name of the Cloudformation stack
|
|
||||||
cfnStackName: string
|
|
||||||
// AWS Config
|
|
||||||
awsConfig: aws.#Config
|
|
||||||
|
|
||||||
cfnStack: cloudformation.#Stack & {
|
|
||||||
config: awsConfig
|
|
||||||
source: json.Marshal(#cfnTemplate)
|
|
||||||
stackName: cfnStackName
|
|
||||||
onFailure: "DO_NOTHING"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Function handler
|
|
||||||
#lambdaHandler: {
|
|
||||||
url: string
|
|
||||||
expectedHTTPCode: int
|
|
||||||
|
|
||||||
script: #"""
|
|
||||||
var synthetics = require('Synthetics');
|
|
||||||
const log = require('SyntheticsLogger');
|
|
||||||
|
|
||||||
const pageLoadBlueprint = async function () {
|
|
||||||
|
|
||||||
// INSERT URL here
|
|
||||||
const URL = "\#(url)";
|
|
||||||
|
|
||||||
let page = await synthetics.getPage();
|
|
||||||
const response = await page.goto(URL, {waitUntil: 'domcontentloaded', timeout: 30000});
|
|
||||||
//Wait for page to render.
|
|
||||||
//Increase or decrease wait time based on endpoint being monitored.
|
|
||||||
await page.waitFor(15000);
|
|
||||||
// This will take a screenshot that will be included in test output artifacts
|
|
||||||
await synthetics.takeScreenshot('loaded', 'loaded');
|
|
||||||
let pageTitle = await page.title();
|
|
||||||
log.info('Page title: ' + pageTitle);
|
|
||||||
if (response.status() !== \#(expectedHTTPCode)) {
|
|
||||||
throw "Failed to load page!";
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
exports.handler = async () => {
|
|
||||||
return await pageLoadBlueprint();
|
|
||||||
};
|
|
||||||
"""#
|
|
||||||
}
|
|
||||||
|
|
||||||
#cfnTemplate: {
|
|
||||||
AWSTemplateFormatVersion: "2010-09-09"
|
|
||||||
Description: "CloudWatch Synthetics website monitoring"
|
|
||||||
Resources: {
|
|
||||||
Topic: {
|
|
||||||
Type: "AWS::SNS::Topic"
|
|
||||||
Properties: Subscription: [
|
|
||||||
for e in notifications {
|
|
||||||
Endpoint: e.endpoint
|
|
||||||
Protocol: e.protocol
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
TopicPolicy: {
|
|
||||||
Type: "AWS::SNS::TopicPolicy"
|
|
||||||
Properties: {
|
|
||||||
PolicyDocument: {
|
|
||||||
Id: "Id1"
|
|
||||||
Version: "2012-10-17"
|
|
||||||
Statement: [
|
|
||||||
{
|
|
||||||
Sid: "Sid1"
|
|
||||||
Effect: "Allow"
|
|
||||||
Principal: AWS: "*"
|
|
||||||
Action: "sns:Publish"
|
|
||||||
Resource: Ref: "Topic"
|
|
||||||
Condition: StringEquals: "AWS:SourceOwner": Ref: "AWS::AccountId"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
Topics: [
|
|
||||||
{
|
|
||||||
Ref: "Topic"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
CanaryBucket: {
|
|
||||||
Type: "AWS::S3::Bucket"
|
|
||||||
Properties: {}
|
|
||||||
}
|
|
||||||
CanaryRole: {
|
|
||||||
Type: "AWS::IAM::Role"
|
|
||||||
Properties: {
|
|
||||||
AssumeRolePolicyDocument: {
|
|
||||||
Version: "2012-10-17"
|
|
||||||
Statement: [
|
|
||||||
{
|
|
||||||
Effect: "Allow"
|
|
||||||
Principal: Service: "lambda.amazonaws.com"
|
|
||||||
Action: "sts:AssumeRole"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
Policies: [
|
|
||||||
{
|
|
||||||
PolicyName: "execution"
|
|
||||||
PolicyDocument: {
|
|
||||||
Version: "2012-10-17"
|
|
||||||
Statement: [
|
|
||||||
{
|
|
||||||
Effect: "Allow"
|
|
||||||
Action: "s3:ListAllMyBuckets"
|
|
||||||
Resource: "*"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Effect: "Allow"
|
|
||||||
Action: "s3:PutObject"
|
|
||||||
Resource: "Fn::Sub": "${CanaryBucket.Arn}/*"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Effect: "Allow"
|
|
||||||
Action: "s3:GetBucketLocation"
|
|
||||||
Resource: "Fn::GetAtt": [
|
|
||||||
"CanaryBucket",
|
|
||||||
"Arn",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Effect: "Allow"
|
|
||||||
Action: "cloudwatch:PutMetricData"
|
|
||||||
Resource: "*"
|
|
||||||
Condition: StringEquals: "cloudwatch:namespace": "CloudWatchSynthetics"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
CanaryLogGroup: {
|
|
||||||
Type: "AWS::Logs::LogGroup"
|
|
||||||
Properties: {
|
|
||||||
LogGroupName: "Fn::Sub": "/aws/lambda/cwsyn-\(cfnStackName)"
|
|
||||||
RetentionInDays: 14
|
|
||||||
}
|
|
||||||
}
|
|
||||||
CanaryPolicy: {
|
|
||||||
Type: "AWS::IAM::Policy"
|
|
||||||
Properties: {
|
|
||||||
PolicyDocument: Statement: [
|
|
||||||
{
|
|
||||||
Effect: "Allow"
|
|
||||||
Action: [
|
|
||||||
"logs:CreateLogStream",
|
|
||||||
"logs:PutLogEvents",
|
|
||||||
]
|
|
||||||
Resource: "Fn::GetAtt": [
|
|
||||||
"CanaryLogGroup",
|
|
||||||
"Arn",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
]
|
|
||||||
PolicyName: "logs"
|
|
||||||
Roles: [
|
|
||||||
{
|
|
||||||
Ref: "CanaryRole"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for canary in canaries {
|
|
||||||
"Canary\(canary.slug)": {
|
|
||||||
Type: "AWS::Synthetics::Canary"
|
|
||||||
Properties: {
|
|
||||||
ArtifactS3Location: "Fn::Sub": "s3://${CanaryBucket}"
|
|
||||||
Code: {
|
|
||||||
#handler: #lambdaHandler & {
|
|
||||||
url: canary.url
|
|
||||||
expectedHTTPCode: canary.expectedHTTPCode
|
|
||||||
}
|
|
||||||
Handler: "index.handler"
|
|
||||||
Script: #handler.script
|
|
||||||
}
|
|
||||||
ExecutionRoleArn: "Fn::GetAtt": [
|
|
||||||
"CanaryRole",
|
|
||||||
"Arn",
|
|
||||||
]
|
|
||||||
FailureRetentionPeriod: 30
|
|
||||||
Name: canary.name
|
|
||||||
RunConfig: TimeoutInSeconds: canary.timeoutInSeconds
|
|
||||||
RuntimeVersion: "syn-1.0"
|
|
||||||
Schedule: {
|
|
||||||
DurationInSeconds: "0"
|
|
||||||
Expression: "rate(\(canary.intervalExpression))"
|
|
||||||
}
|
|
||||||
StartCanaryAfterCreation: true
|
|
||||||
SuccessRetentionPeriod: 30
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"SuccessPercentAlarm\(canary.slug)": {
|
|
||||||
DependsOn: "TopicPolicy"
|
|
||||||
Type: "AWS::CloudWatch::Alarm"
|
|
||||||
Properties: {
|
|
||||||
AlarmActions: [
|
|
||||||
{
|
|
||||||
Ref: "Topic"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
AlarmDescription: "Canary is failing."
|
|
||||||
ComparisonOperator: "LessThanThreshold"
|
|
||||||
Dimensions: [
|
|
||||||
{
|
|
||||||
Name: "CanaryName"
|
|
||||||
Value: Ref: "Canary\(canary.slug)"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
EvaluationPeriods: 1
|
|
||||||
MetricName: "SuccessPercent"
|
|
||||||
Namespace: "CloudWatchSynthetics"
|
|
||||||
OKActions: [
|
|
||||||
{
|
|
||||||
Ref: "Topic"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
Period: 300
|
|
||||||
Statistic: "Minimum"
|
|
||||||
Threshold: 90
|
|
||||||
TreatMissingData: "notBreaching"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Outputs: {
|
|
||||||
for canary in canaries {
|
|
||||||
"\(canary.slug)Canary": Value: Ref: "Canary\(canary.slug)"
|
|
||||||
"\(canary.slug)URL": Value: canary.url
|
|
||||||
}
|
|
||||||
NumberCanaries: Value: len(canaries)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,34 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/aws"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AWS account: credentials and region
|
|
||||||
awsConfig: aws.#Config & {
|
|
||||||
region: *"us-east-1" | string @dagger(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL of the website to monitor
|
|
||||||
website: string | *"https://www.google.com" @dagger(input)
|
|
||||||
|
|
||||||
// Email address to notify of monitoring alerts
|
|
||||||
email: string @dagger(input)
|
|
||||||
|
|
||||||
// The monitoring service running on AWS Cloudwatch
|
|
||||||
monitor: #HTTPMonitor & {
|
|
||||||
notifications: [
|
|
||||||
#Notification & {
|
|
||||||
endpoint: email
|
|
||||||
protocol: "email"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
canaries: [
|
|
||||||
#Canary & {
|
|
||||||
name: "default"
|
|
||||||
url: website
|
|
||||||
},
|
|
||||||
]
|
|
||||||
cfnStackName: "my-monitor"
|
|
||||||
"awsConfig": awsConfig
|
|
||||||
}
|
|
28
examples/react/.dagger/env/default/values.yaml
vendored
28
examples/react/.dagger/env/default/values.yaml
vendored
@ -1,28 +0,0 @@
|
|||||||
plan:
|
|
||||||
module: .
|
|
||||||
name: default
|
|
||||||
inputs:
|
|
||||||
www.account.name:
|
|
||||||
text: blocklayer
|
|
||||||
www.account.token:
|
|
||||||
secret: ENC[AES256_GCM,data:AGeCt/UJzWJ4UnzS/+t21GYz5wXPUoplYXTi1USXdi72wZemhzZncR2a+A==,iv:07DgGFL0oKgQsSZnp9s/Zz+6rdLShtHfStJZ9tHpsI4=,tag:jkY6TMrf7DaJMAc8/kJcAw==,type:str]
|
|
||||||
sops:
|
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
|
||||||
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBudkl4d2czaHZkSGt0SnVm
|
|
||||||
dm8xRTdaRE1WenpOczYxRFdMcDRkWDNmL1VzCjRHeWt3dnBITjlSNDZteWJhRmha
|
|
||||||
NWsrTThTZCt2eDJkRjgyOTFJeHdBMzgKLS0tIE9jOTFWMTRQei9iUkcvZDFrbmxn
|
|
||||||
ZnFaRWtEM241cDVCTStnK25BcDYyWlUKT2U8IFC21xMigjaTHHgkdUxIXKshxTmg
|
|
||||||
Q8254/qEWk+mJfsGxPf54d1RtqNqDX17kK/LeooSYAz7aqBjVLfG6w==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
lastmodified: "2021-06-15T16:43:58Z"
|
|
||||||
mac: ENC[AES256_GCM,data:ihY1s1/ngxPrL940WkhRiVdmA+zPqcx9bVqlsWyAPfM/E5thIylNn7qMBDyFG6tHJAQFdqpbwCbQuKn6MVs+d+IgsoBwIcN1y4xQn2LhC53ludL2tG4CYyZM5EKx43EE/whzTuyNrPl9ykfx/u+KeQD5CNbaB9PrDjrtc+rNrPQ=,iv:7T2NHDXWrPJAsOVLPVhqFYnYcZjoBtE5x8R4CDUD+yM=,tag:yaXlTdSH7uvQiimKVPrvFg==,type:str]
|
|
||||||
pgp: []
|
|
||||||
encrypted_suffix: secret
|
|
||||||
version: 3.7.1
|
|
@ -1 +0,0 @@
|
|||||||
../../../../stdlib
|
|
@ -1,27 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/netlify"
|
|
||||||
"dagger.io/js/yarn"
|
|
||||||
"dagger.io/git"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Source code of the sample application
|
|
||||||
repo: git.#Repository & {
|
|
||||||
remote: "https://github.com/kabirbaidhya/react-todo-app.git"
|
|
||||||
ref: "624041b17bd62292143f99bce474a0e3c2d2dd61"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host the application with Netlify
|
|
||||||
www: netlify.#Site & {
|
|
||||||
// Site name can be overridden
|
|
||||||
name: string | *"dagger-examples-react" @dagger(input)
|
|
||||||
|
|
||||||
// Deploy the output of yarn build
|
|
||||||
// (Netlify build feature is not used, to avoid extra cost).
|
|
||||||
contents: app.build
|
|
||||||
}
|
|
||||||
|
|
||||||
app: yarn.#Package & {
|
|
||||||
source: repo
|
|
||||||
}
|
|
@ -1,29 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/aws"
|
|
||||||
"dagger.io/aws/s3"
|
|
||||||
"dagger.io/dagger"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AWS Config for credentials and default region
|
|
||||||
awsConfig: aws.#Config & {
|
|
||||||
region: *"us-east-1" | string @dagger(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the S3 bucket to use
|
|
||||||
bucket: *"dagger-io-examples" | string @dagger(input)
|
|
||||||
|
|
||||||
// Source code to deploy
|
|
||||||
source: dagger.#Artifact @dagger(input)
|
|
||||||
|
|
||||||
// Deployed URL
|
|
||||||
url: "\(deploy.url)index.html" @dagger(output)
|
|
||||||
|
|
||||||
deploy: s3.#Object & {
|
|
||||||
always: true
|
|
||||||
config: awsConfig
|
|
||||||
"source": source
|
|
||||||
contentType: "text/html"
|
|
||||||
target: "s3://\(bucket)/"
|
|
||||||
}
|
|
@ -1,9 +0,0 @@
|
|||||||
<html>
|
|
||||||
</head>
|
|
||||||
<title>My Simple Website</title>
|
|
||||||
</head>
|
|
||||||
<h1>Shopping list</h1>
|
|
||||||
<li>Salad</li>
|
|
||||||
<li>Eggs</li>
|
|
||||||
<li>Potatoes</li>
|
|
||||||
</html>
|
|
28
stdlib/.dagger/env/aws-eks/values.yaml
vendored
Normal file
28
stdlib/.dagger/env/aws-eks/values.yaml
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
plan:
|
||||||
|
module: .dagger/env/aws-eks/plan
|
||||||
|
name: aws-eks
|
||||||
|
inputs:
|
||||||
|
TestConfig.awsConfig.accessKey:
|
||||||
|
secret: ENC[AES256_GCM,data:ZiNdgkTZlOyWht2CDpmYKN+ViTE=,iv:wdRiBw65BgSia9z//tUDirkkhw9O29ZoerX6eZnYx9k=,tag:S/0i/fRtQJg4Qp7tmUK4ag==,type:str]
|
||||||
|
TestConfig.awsConfig.secretKey:
|
||||||
|
secret: ENC[AES256_GCM,data:ywvQiDE4gmM6KasYWOvX1FY/Lerg5TghgoYTq1AlXDRHNGzZtY3ClQ==,iv:HCXweaSKHLwEA8Mq4up/TUaV7YDtsRpBpwYD19Jh4iw=,tag:l2hmI9BsGiRyulh4yDn/hw==,type:str]
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVdWE4VlRMWGJ3WDExWkNl
|
||||||
|
S1dqU21uVXFjaXU5eWdKRmRCUXFCd2ZaTjNrCjlkNXI3WUdnRGVibmZkbXJYaEV4
|
||||||
|
SXIveGNDNnZ6dDM4SjdrMmZIZVhyVzAKLS0tIGkzK0tMTTdHU2lacmtvakUwbGFE
|
||||||
|
M3U4UFV5REQzYko3QjlXVE02Z0J4WUkK8uHC67Mutls4drXbCi8AwuFqbRXeb69P
|
||||||
|
ZnOFZEB4NoayoOojr1mY9ssDTywHF4KwR4E9ZmJ3V3hlEAgMkqfvSA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2021-06-17T18:46:34Z"
|
||||||
|
mac: ENC[AES256_GCM,data:B+WtEMOKXy8AT/YTUaKZ9aA8fQRt2pJp3IaABpj0oYI1vCG953MnDCIxj0j2bTQN5gyaFPF8UQ1o/pRJzCKhm26wbCByUrVdHxHTwoJ7arDqQGwcNKYAuQjLtMG7gsl0BqjCg0oKO5YEa24BqHVf1dEo9AcXd6LBwqvxVjmd98g=,iv:aWxj1Oq6wmUYuWnGOc2zIpzOYJVyXV9qSzBgF+iGsHI=,tag:Bx1A8UxghYq97wEdUxbmdg==,type:str]
|
||||||
|
pgp: []
|
||||||
|
encrypted_suffix: secret
|
||||||
|
version: 3.7.1
|
2
stdlib/.dagger/env/docker-build/.gitignore
vendored
Normal file
2
stdlib/.dagger/env/docker-build/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# dagger state
|
||||||
|
state/**
|
@ -6,15 +6,38 @@ import (
|
|||||||
"dagger.io/docker"
|
"dagger.io/docker"
|
||||||
)
|
)
|
||||||
|
|
||||||
source: dagger.#Artifact
|
TestSourceBuild: dagger.#Artifact @dagger(input)
|
||||||
|
|
||||||
TestImageFromDockerfile: {
|
TestBuild: {
|
||||||
image: docker.#ImageFromDockerfile & {
|
image: docker.#Build & {
|
||||||
dockerfile: """
|
source: TestSourceBuild
|
||||||
FROM alpine
|
}
|
||||||
COPY test.txt /test.txt
|
|
||||||
"""
|
verify: #up: [
|
||||||
context: source
|
op.#Load & {
|
||||||
|
from: image
|
||||||
|
},
|
||||||
|
|
||||||
|
op.#Exec & {
|
||||||
|
always: true
|
||||||
|
args: [
|
||||||
|
"sh", "-c", """
|
||||||
|
grep -q "test" /test.txt
|
||||||
|
""",
|
||||||
|
]
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
TestSourceImageFromDockerfile: dagger.#Artifact @dagger(input)
|
||||||
|
|
||||||
|
TestImageFromDockerfile: {
|
||||||
|
image: docker.#ImageFromDockerfile & {
|
||||||
|
dockerfile: """
|
||||||
|
FROM alpine
|
||||||
|
COPY test.txt /test.txt
|
||||||
|
"""
|
||||||
|
context: TestSourceImageFromDockerfile
|
||||||
}
|
}
|
||||||
|
|
||||||
verify: #up: [
|
verify: #up: [
|
30
stdlib/.dagger/env/docker-build/values.yaml
vendored
Normal file
30
stdlib/.dagger/env/docker-build/values.yaml
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
plan:
|
||||||
|
module: .dagger/env/docker-build/plan
|
||||||
|
name: docker-build
|
||||||
|
inputs:
|
||||||
|
TestSourceBuild:
|
||||||
|
dir:
|
||||||
|
path: ./docker/testdata/build
|
||||||
|
TestSourceImageFromDockerfile:
|
||||||
|
dir:
|
||||||
|
path: ./docker/testdata/dockerfile
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA0TXlFYWNvUldMdlRtWkEz
|
||||||
|
SlBEYUY1Q0thbFdSZGpaT00xQ2Jkc1J2WkdJClBXUGVTamdmZU1KNUdjam9HN0Zl
|
||||||
|
RjRQbVRHVjR6S3RCWlJLaElaM2ZWVG8KLS0tIDJJejFkQkxYeDdHcWdPS0p0QmJ0
|
||||||
|
Mm5vT1dHbFViK2ZIakNnVkZTd2lhUHMK63jJsJVLJMbQE2NkAB8qv8JnPHpvcNes
|
||||||
|
z17EJgl0lCLqeNHtfrTfSiIP4wq8gNLK4avCKK+WGDOIMsXPzK6RNw==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2021-06-17T20:14:11Z"
|
||||||
|
mac: ENC[AES256_GCM,data:hlc0Bnfeoor/WKMbQRgTalkxngL0YXTwHAys/moXZ4ZMGd2lt+j4l4EkKSjb3QrJfPllCeqroohLKtN+lP4K9fSCMcfYzic2DTEP68rPwufmrgxys1snOHHgIEfqogL8p55fJdXn91x+WHhPNkbWaaH0WcboYsy0zemUIkjb+xc=,iv:8oUeR1dfT4lrVWyJpGPPFa/jlPgWA/ld3UM9Cw2znxk=,tag:59RyiXwzJ5j+c5faxs9U3w==,type:str]
|
||||||
|
pgp: []
|
||||||
|
encrypted_suffix: secret
|
||||||
|
version: 3.7.1
|
6
stdlib/.dagger/env/git/values.yaml
vendored
6
stdlib/.dagger/env/git/values.yaml
vendored
@ -1,3 +1,5 @@
|
|||||||
|
plan:
|
||||||
|
module: .dagger/env/git/plan
|
||||||
name: git
|
name: git
|
||||||
sops:
|
sops:
|
||||||
kms: []
|
kms: []
|
||||||
@ -14,8 +16,8 @@ sops:
|
|||||||
TmhJNisyamw3d244aGVJSEVFVUVLZGsKvd+nowA0CLXQbdvyI4J0lBjs9vdISWlo
|
TmhJNisyamw3d244aGVJSEVFVUVLZGsKvd+nowA0CLXQbdvyI4J0lBjs9vdISWlo
|
||||||
gGvR49uul3Z8raVWXFUzsyQ8xTvYNg0ovynFG2KdagSKr1DlhKMBEQ==
|
gGvR49uul3Z8raVWXFUzsyQ8xTvYNg0ovynFG2KdagSKr1DlhKMBEQ==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
lastmodified: "2021-06-15T14:34:51Z"
|
lastmodified: "2021-06-18T16:23:23Z"
|
||||||
mac: ENC[AES256_GCM,data:phQpRQlHv9c3VRcqZ7OkSfW4a9oPnTD2ucsB8TJatgzLrbP1+erj9x2jrPex0T2MExIFFzNynSAiWwueLYqCzUvuG2DfIokvM9erNfdpbIBTtJeWO9+hVIkzoQ6xeKg1wLb0q3U7Cbbe6GBFA3oabPN2kyzGbgS2LO2Ou77NMLk=,iv:sS0MRNEGBWos6XNAQEYK2UmaK9g0rd+Nx1xBNeh6w+M=,tag:DIcqzBvChde/C7T/yAhn+w==,type:str]
|
mac: ENC[AES256_GCM,data:AdTUEx0RIrJU6aZZNn9iIrl0eM2eParknCVIQL7k1arLRfYH4WyMf9lUa03+Qy83r4miNh4a9kFpNWyodbOR/j7OiLgAxWGXc08XAnIU51F2H7b55cSW9yNJj5kfos2e1pS356MoSaswg4fH8EYVUNgWC6mdBcXzC1m7uiqTS0E=,iv:mK9sjOCd7ePWR4xe5qNwmPuIyNR1nE3Ql65cF15SovI=,tag:DPUTnGTF+Ve+A7ShACNrnQ==,type:str]
|
||||||
pgp: []
|
pgp: []
|
||||||
encrypted_suffix: secret
|
encrypted_suffix: secret
|
||||||
version: 3.7.1
|
version: 3.7.1
|
||||||
|
2
stdlib/.dagger/env/kubernetes-deployment/.gitignore
vendored
Normal file
2
stdlib/.dagger/env/kubernetes-deployment/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# dagger state
|
||||||
|
state/**
|
@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
// We assume that a kinD cluster is running locally
|
// We assume that a kinD cluster is running locally
|
||||||
// To deploy a local KinD cluster, follow this link : https://kind.sigs.k8s.io/docs/user/quick-start/
|
// To deploy a local KinD cluster, follow this link : https://kind.sigs.k8s.io/docs/user/quick-start/
|
||||||
kubeconfig: string @dagger(input)
|
TestKubeconfig: string @dagger(input)
|
||||||
|
|
||||||
TestKubeApply: {
|
TestKubeApply: {
|
||||||
suffix: random.#String & {
|
suffix: random.#String & {
|
||||||
@ -31,7 +31,7 @@ TestKubeApply: {
|
|||||||
|
|
||||||
// Apply deployment
|
// Apply deployment
|
||||||
apply: kubernetes.#Resources & {
|
apply: kubernetes.#Resources & {
|
||||||
"kubeconfig": kubeconfig
|
kubeconfig: TestKubeconfig
|
||||||
namespace: "dagger-test"
|
namespace: "dagger-test"
|
||||||
manifest: yaml.Marshal(kubeSrc)
|
manifest: yaml.Marshal(kubeSrc)
|
||||||
}
|
}
|
@ -29,7 +29,7 @@ import (
|
|||||||
|
|
||||||
op.#WriteFile & {
|
op.#WriteFile & {
|
||||||
dest: "/kubeconfig"
|
dest: "/kubeconfig"
|
||||||
content: kubeconfig
|
content: TestKubeconfig
|
||||||
mode: 0o600
|
mode: 0o600
|
||||||
},
|
},
|
||||||
|
|
23
stdlib/.dagger/env/kubernetes-deployment/values.yaml
vendored
Normal file
23
stdlib/.dagger/env/kubernetes-deployment/values.yaml
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
plan:
|
||||||
|
module: .dagger/env/kubernetes-deployment/plan
|
||||||
|
name: kubernetes-deployment
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBIQ2hZZ3ZRVzRseXZyVW1h
|
||||||
|
L01WYkNrTzdHSEN5WTlyQzVDTm9FbzRENzFFCjdrcVhKa1ZwaGNyYmo3ditDR1hC
|
||||||
|
cStzcmVjUXY3V3FUZElRNUIzQlQzL0UKLS0tIHlmWTlUdFVOczM0TTF5RHFTUXps
|
||||||
|
SVVkOUtuWTJneE45em5iQ3JvbnIwWlkKgdJC5IzvVDxbWSfU41Xg/UGPxuVBSOGY
|
||||||
|
eqenr07uWppNaHuLuo9A+znQa2RQ0L2clcB2d+ka+6z5tQyHOfx1nA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2021-06-18T15:41:02Z"
|
||||||
|
mac: ENC[AES256_GCM,data:R3DuHLEyfehKe1nCWHdKB9jyOs5TXI+r2BmQDMiwI8v0xfZdOZWfwGw3NAFGDZHbaLNTajQkzviDsMhaXg5bxvmK7P8PiJOOmnm/LnDRfnJirGRGpWA7bmsHH/QZL1lb75+cwUrwRZflkKoPy2bQyoC5Rze6/oNhPIUTCwQWaMo=,iv:73ZjXAcazCND3JhC94TjUOlcMbwfTz8YDFP1BPo8yUw=,tag:wUVcfyjtf4KzpU0jDrxleQ==,type:str]
|
||||||
|
pgp: []
|
||||||
|
encrypted_suffix: secret
|
||||||
|
version: 3.7.1
|
2
stdlib/.dagger/env/kubernetes-helm/.gitignore
vendored
Normal file
2
stdlib/.dagger/env/kubernetes-helm/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# dagger state
|
||||||
|
state/**
|
@ -1,14 +1,16 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"dagger.io/dagger"
|
|
||||||
"dagger.io/kubernetes/helm"
|
"dagger.io/kubernetes/helm"
|
||||||
"dagger.io/random"
|
"dagger.io/random"
|
||||||
|
"dagger.io/dagger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// We assume that a kinD cluster is running locally
|
// We assume that a kinD cluster is running locally
|
||||||
// To deploy a local KinD cluster, follow this link : https://kind.sigs.k8s.io/docs/user/quick-start/
|
// To deploy a local KinD cluster, follow this link : https://kind.sigs.k8s.io/docs/user/quick-start/
|
||||||
kubeconfig: string @dagger(input)
|
TestKubeconfig: string @dagger(input)
|
||||||
|
|
||||||
|
TestChartSource: dagger.#Artifact @dagger(input)
|
||||||
|
|
||||||
// Deploy user local chart
|
// Deploy user local chart
|
||||||
TestHelmSimpleChart: {
|
TestHelmSimpleChart: {
|
||||||
@ -18,10 +20,10 @@ TestHelmSimpleChart: {
|
|||||||
|
|
||||||
// Deploy chart
|
// Deploy chart
|
||||||
deploy: helm.#Chart & {
|
deploy: helm.#Chart & {
|
||||||
name: "dagger-test-helm-simple-chart-\(suffix.out)"
|
name: "dagger-test-inline-chart-\(suffix.out)"
|
||||||
namespace: "dagger-test"
|
namespace: "dagger-test"
|
||||||
"kubeconfig": kubeconfig
|
kubeconfig: TestKubeconfig
|
||||||
chartSource: dagger.#Artifact
|
chartSource: TestChartSource
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify deployment
|
// Verify deployment
|
||||||
@ -37,11 +39,11 @@ TestHelmRepoChart: {
|
|||||||
seed: "repo"
|
seed: "repo"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deploy chart
|
// Deploy remote chart
|
||||||
deploy: helm.#Chart & {
|
deploy: helm.#Chart & {
|
||||||
name: "dagger-test-helm-repository-\(suffix.out)"
|
name: "dagger-test-repository-\(suffix.out)"
|
||||||
namespace: "dagger-test"
|
namespace: "dagger-test"
|
||||||
"kubeconfig": kubeconfig
|
kubeconfig: TestKubeconfig
|
||||||
repository: "https://charts.bitnami.com/bitnami"
|
repository: "https://charts.bitnami.com/bitnami"
|
||||||
chart: "redis"
|
chart: "redis"
|
||||||
}
|
}
|
@ -28,7 +28,7 @@ import (
|
|||||||
|
|
||||||
op.#WriteFile & {
|
op.#WriteFile & {
|
||||||
dest: "/kubeconfig"
|
dest: "/kubeconfig"
|
||||||
content: kubeconfig
|
content: TestKubeconfig
|
||||||
mode: 0o600
|
mode: 0o600
|
||||||
},
|
},
|
||||||
|
|
27
stdlib/.dagger/env/kubernetes-helm/values.yaml
vendored
Normal file
27
stdlib/.dagger/env/kubernetes-helm/values.yaml
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
plan:
|
||||||
|
module: .dagger/env/kubernetes-helm/plan
|
||||||
|
name: kubernetes-helm
|
||||||
|
inputs:
|
||||||
|
TestChartSource:
|
||||||
|
dir:
|
||||||
|
path: ./kubernetes/helm/testdata/mychart
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBud1hMc0dTZTlIQ3lZVEQ5
|
||||||
|
WjA2UlAvTm15ZEgycXlKKzhjTmU0Ui9xZHcwCkRxclFUTUE0aXRvaElkc3diV2Ix
|
||||||
|
N2VZZVIzS2t3cVl3UmtXOC9PY1VObzAKLS0tIG9ydkFzak1SaUo2NGxET3ZiNklZ
|
||||||
|
VHlGUExaMzcwM0pOM2VDY280UWZXSzQKAm7ZV1agxbla3Yrc7vrwJosSjQtWhdac
|
||||||
|
ZFyQ6Gi+9H7qHZM89yVjAaIg1lwr68HcjYgDzpvvhJO9YPfzwoLyHw==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2021-06-18T15:42:52Z"
|
||||||
|
mac: ENC[AES256_GCM,data:SzHFZpgiv+h1vRjq0GP+4nzj9az6pAwQwstxYz10yBGPQXnZv/VtJm071oouiK7pgD4i7cTvTKgIOaX9K74PiWSiTjWI5F9sGHvt9ZoGyU08OHM6zwGMDiYygBN2+5dd5jBvT4Xy6efa0IOMxSqhp69+VoJRWesAFsN6IfDcIEY=,iv:Af2WeB2eVk5hnWFWaQij7hz2wjXgNWDJTWDm13iKNvA=,tag:uvR1ruMc69ZhDJRtYCFQBw==,type:str]
|
||||||
|
pgp: []
|
||||||
|
encrypted_suffix: secret
|
||||||
|
version: 3.7.1
|
2
stdlib/.dagger/env/kubernetes-kustomize/.gitignore
vendored
Normal file
2
stdlib/.dagger/env/kubernetes-kustomize/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# dagger state
|
||||||
|
state/**
|
27
stdlib/.dagger/env/kubernetes-kustomize/values.yaml
vendored
Normal file
27
stdlib/.dagger/env/kubernetes-kustomize/values.yaml
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
plan:
|
||||||
|
module: .dagger/env/kubernetes-kustomize/plan
|
||||||
|
name: kubernetes-kustomize
|
||||||
|
inputs:
|
||||||
|
TestKustomize.testdata:
|
||||||
|
dir:
|
||||||
|
path: ./kubernetes/kustomize/testdata
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBTL2Jyczk3QXZiUVkyd3cw
|
||||||
|
TlYzQ2NyR2ZGZnhSRmowSGRmbFBodFRPc2dnCnJpYjdCdUpEUE10d3I4clh1eDVV
|
||||||
|
MmVqbmxiNmRvSUNqZEY3clZnci9pRkkKLS0tIGVLSVFwTy9TSElFUkdjOVlWb3Yy
|
||||||
|
OFllMEh3cVJZZnFxbW4xS1RtcFQzcFUKo/1WcYp4nPBXba8wQBe3DMt6pYQJGoSu
|
||||||
|
ja5BiCffN5wOoW9WT0j8Clx21w7BXcl46+T5GYpXDQDcqf6nCv1kYQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2021-06-17T20:48:00Z"
|
||||||
|
mac: ENC[AES256_GCM,data:SCWiSiDccPkZApOcc8RsYP7WPZUqUyYVB0UgivLhIsNSY5q3kCdenPLTUp2zLOcwaWzTPGmj++QtZjoNobcIhdVt1aJ9uXLLKRUXaRGIO3Jmhg3wj7kSPNjbDLZEB6uyA9h3edQGVVivNlNGpo91tg35QcFPPSG7UiowFnsD0zM=,iv:44hkujM/ZWjtYHau8BFMdOIeBj5jF/WnW4OOK7oSw1Y=,tag:mtJdUR+sA0tjIyAWDpXQlA==,type:str]
|
||||||
|
pgp: []
|
||||||
|
encrypted_suffix: secret
|
||||||
|
version: 3.7.1
|
2
stdlib/.dagger/env/terraform/.gitignore
vendored
Normal file
2
stdlib/.dagger/env/terraform/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# dagger state
|
||||||
|
state/**
|
74
stdlib/.dagger/env/terraform/plan/terraform.cue
vendored
Normal file
74
stdlib/.dagger/env/terraform/plan/terraform.cue
vendored
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
package terraform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"dagger.io/dagger"
|
||||||
|
"dagger.io/dagger/op"
|
||||||
|
"dagger.io/alpine"
|
||||||
|
"dagger.io/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
TestData: dagger.#Artifact @dagger(input)
|
||||||
|
|
||||||
|
TestConfig: awsConfig: {
|
||||||
|
accessKey: dagger.#Secret @dagger(input)
|
||||||
|
secretKey: dagger.#Secret @dagger(input)
|
||||||
|
region: "us-east-2"
|
||||||
|
}
|
||||||
|
|
||||||
|
#TestGetConfig: {
|
||||||
|
accessKey: dagger.#Secret
|
||||||
|
|
||||||
|
secretKey: dagger.#Secret
|
||||||
|
|
||||||
|
visibleAccessKey: string
|
||||||
|
|
||||||
|
visibleSecretKey: string
|
||||||
|
|
||||||
|
#up: [
|
||||||
|
op.#Load & {from: alpine.#Image & {
|
||||||
|
package: {
|
||||||
|
bash: true
|
||||||
|
jq: true
|
||||||
|
}
|
||||||
|
}},
|
||||||
|
|
||||||
|
op.#Exec & {
|
||||||
|
always: true
|
||||||
|
args: ["/bin/bash", "-c", #"""
|
||||||
|
export ACCESS_KEY=$(cat /accessKey)
|
||||||
|
export SECRET_KEY=$(cat /secretKey)
|
||||||
|
|
||||||
|
jq --arg key0 'visibleAccessKey' --arg value0 "$ACCESS_KEY" \
|
||||||
|
--arg key1 'visibleSecretKey' --arg value1 "$SECRET_KEY" \
|
||||||
|
'. | .[$key0]=$value0 | .[$key1]=$value1' <<< '{}' > /out
|
||||||
|
"""#,
|
||||||
|
]
|
||||||
|
mount: {
|
||||||
|
"/accessKey": secret: accessKey
|
||||||
|
"/secretKey": secret: secretKey
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
op.#Export & {
|
||||||
|
source: "/out"
|
||||||
|
format: "json"
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
TestTerraform: {
|
||||||
|
config: #TestGetConfig & {
|
||||||
|
accessKey: TestConfig.awsConfig.accessKey
|
||||||
|
secretKey: TestConfig.awsConfig.secretKey
|
||||||
|
}
|
||||||
|
|
||||||
|
apply: terraform.#Configuration & {
|
||||||
|
source: TestData
|
||||||
|
env: {
|
||||||
|
AWS_ACCESS_KEY_ID: config.visibleAccessKey
|
||||||
|
AWS_SECRET_ACCESS_KEY: config.visibleSecretKey
|
||||||
|
AWS_DEFAULT_REGION: TestConfig.awsConfig.region
|
||||||
|
AWS_REGION: TestConfig.awsConfig.region
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
31
stdlib/.dagger/env/terraform/values.yaml
vendored
Normal file
31
stdlib/.dagger/env/terraform/values.yaml
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
plan:
|
||||||
|
module: .dagger/env/terraform/plan
|
||||||
|
name: terraform
|
||||||
|
inputs:
|
||||||
|
TestConfig.awsConfig.accessKey:
|
||||||
|
secret: ENC[AES256_GCM,data:V/p84nLbgjrytefnsfItiY71ikQ=,iv:i1x3UYP+sctwY9LrRp/rfeJ8/JPWOfiiJSG0NWUiXW0=,tag:IynKh1fQEhExmmR3qGx/zQ==,type:str]
|
||||||
|
TestConfig.awsConfig.secretKey:
|
||||||
|
secret: ENC[AES256_GCM,data:cBYaVhbeV9D6acJWNU7uL8AsEtpnY0wHM8td9ZAJ9ebGB+BY4iBZLQ==,iv:SDkRKQQKBSz/cRQlW65sIjF0PhHhhKkGUEgZe9CV7Ek=,tag:OCUQmgjP2p57YoLts9Dh4w==,type:str]
|
||||||
|
TestData:
|
||||||
|
dir:
|
||||||
|
path: ./terraform/testdata
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBkOTJpQWJWY3dnM2hDdXZG
|
||||||
|
bVJEN0dNUzQ0VGhneWtHVG1hUHA5ZjdhdHdvClR0ZVpKb1RYRnc3dy9wSjVuSHBn
|
||||||
|
c2RMbzE0Y2EzN2FVak9CMk9CK0hOTFkKLS0tIG9Fdi9xWWc0TU5WY1ZsVUdZM2lw
|
||||||
|
cC9LSiswbFRKaTNXUGNIWVZVbGJqV1UK3/wsgPwR5P2fzs80wcz1dM/8sbBWMR+B
|
||||||
|
dmhP99OQisIgcwGATy0nh726pYKtosDpSLIJkLZDAUq9qRKm9bch1w==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2021-06-18T16:01:11Z"
|
||||||
|
mac: ENC[AES256_GCM,data:XznDGqfZkC6vsv696qWVxbBCUgsyU/zPZg0NCULCXAfO08Hsteb0c93Y8DA3CV8flQW3cgn5XLugNnQADJ6luTXHbqIVMVMUSe1q41Kxl7exr/dn0robqaRm5MnloG823s9X3sAOcPzyTSxy1YVZfYaYbG23w9IeNmVTyaUttkU=,iv:kEQs7+bx+7j2v5b6Bx0r+ZVtp7rj/8mgX4oRUP7cruc=,tag:oQEfCPO/0V11rmkc0yaz3Q==,type:str]
|
||||||
|
pgp: []
|
||||||
|
encrypted_suffix: secret
|
||||||
|
version: 3.7.1
|
@ -19,3 +19,15 @@ import (
|
|||||||
|
|
||||||
id: string
|
id: string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#Input: {
|
||||||
|
@dagger(input)
|
||||||
|
_
|
||||||
|
...
|
||||||
|
}
|
||||||
|
|
||||||
|
#Output: {
|
||||||
|
@dagger(output)
|
||||||
|
_
|
||||||
|
...
|
||||||
|
}
|
||||||
|
@ -39,10 +39,33 @@ dagger() {
|
|||||||
# copy_to_sandbox myenv
|
# copy_to_sandbox myenv
|
||||||
# dagger input secret -w "$DAGGER_SANDBOX" -e myenv "temporary change"
|
# dagger input secret -w "$DAGGER_SANDBOX" -e myenv "temporary change"
|
||||||
# dagger up -w "$DAGGER_SANDBOX" -e myenv
|
# dagger up -w "$DAGGER_SANDBOX" -e myenv
|
||||||
|
#
|
||||||
|
# To use testdata directory in tests, add the package name as second flag
|
||||||
|
# Usage:
|
||||||
|
# copy_to_sandbox myenv mypackage
|
||||||
copy_to_sandbox() {
|
copy_to_sandbox() {
|
||||||
local name="$1"
|
local name="$1"
|
||||||
local source="$DAGGER_WORKSPACE"/.dagger/env/"$name"
|
local source="$DAGGER_WORKSPACE"/.dagger/env/"$name"
|
||||||
local target="$DAGGER_SANDBOX"/.dagger/env/"$name"
|
local target="$DAGGER_SANDBOX"/.dagger/env/"$name"
|
||||||
|
|
||||||
cp -a "$source" "$target"
|
cp -a "$source" "$target"
|
||||||
|
|
||||||
|
if [ -d "$2" ]; then
|
||||||
|
local package="$2"
|
||||||
|
local source_package="$DAGGER_WORKSPACE"/"$package"
|
||||||
|
local target_package="$DAGGER_SANDBOX"/
|
||||||
|
|
||||||
|
cp -a "$source_package" "$target_package"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if there is a local kubernetes cluster.
|
||||||
|
#
|
||||||
|
# This is need to do kubernetes test in the CI.
|
||||||
|
skip_unless_local_kube() {
|
||||||
|
if [ -f ~/.kube/config ] && grep -q "user: kind-kind" ~/.kube/config &> /dev/null && grep -q "127.0.0.1" ~/.kube/config &> /dev/null; then
|
||||||
|
echo "Kubernetes available"
|
||||||
|
else
|
||||||
|
skip "local kubernetes cluster not available"
|
||||||
|
fi
|
||||||
}
|
}
|
@ -32,10 +32,18 @@ setup() {
|
|||||||
dagger -e aws-s3 up
|
dagger -e aws-s3 up
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "aws: eks" {
|
||||||
|
dagger -e aws-eks up
|
||||||
|
}
|
||||||
|
|
||||||
@test "docker run: local" {
|
@test "docker run: local" {
|
||||||
dagger -e docker-run-local up
|
dagger -e docker-run-local up
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "docker build" {
|
||||||
|
dagger -e docker-build up
|
||||||
|
}
|
||||||
|
|
||||||
@test "docker command: ssh" {
|
@test "docker command: ssh" {
|
||||||
dagger -e docker-command-ssh up
|
dagger -e docker-command-ssh up
|
||||||
}
|
}
|
||||||
@ -53,6 +61,40 @@ setup() {
|
|||||||
dagger -e docker-run-ssh up
|
dagger -e docker-run-ssh up
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "kubernetes: deployment" {
|
||||||
|
skip_unless_local_kube
|
||||||
|
|
||||||
|
# Copy deployment to sandbox
|
||||||
|
copy_to_sandbox kubernetes-deployment
|
||||||
|
|
||||||
|
# Set kubeconfig
|
||||||
|
dagger -w "$DAGGER_SANDBOX" -e kubernetes-deployment input text TestKubeconfig -f "$HOME"/.kube/config
|
||||||
|
|
||||||
|
dagger -w "$DAGGER_SANDBOX" -e kubernetes-deployment up
|
||||||
|
|
||||||
|
# Unset kubeconfig
|
||||||
|
dagger -w "$DAGGER_SANDBOX" -e kubernetes-deployment input unset TestKubeconfig
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "kubernetes: kustomize" {
|
||||||
|
dagger -e kubernetes-kustomize up
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "kubernetes: helm" {
|
||||||
|
skip_unless_local_kube
|
||||||
|
|
||||||
|
# Copy deployment to sandbox
|
||||||
|
copy_to_sandbox kubernetes-helm kubernetes
|
||||||
|
|
||||||
|
# Set kubeconfig
|
||||||
|
dagger -w "$DAGGER_SANDBOX" -e kubernetes-helm input text TestKubeconfig -f "$HOME"/.kube/config
|
||||||
|
|
||||||
|
dagger -w "$DAGGER_SANDBOX" -e kubernetes-helm up
|
||||||
|
|
||||||
|
# Unset kubeconfig
|
||||||
|
dagger -w "$DAGGER_SANDBOX" -e kubernetes-helm input unset TestKubeconfig
|
||||||
|
}
|
||||||
|
|
||||||
@test "google cloud: gcr" {
|
@test "google cloud: gcr" {
|
||||||
dagger -e google-gcr up
|
dagger -e google-gcr up
|
||||||
}
|
}
|
||||||
@ -64,3 +106,32 @@ setup() {
|
|||||||
@test "google cloud: cloudrun" {
|
@test "google cloud: cloudrun" {
|
||||||
dagger -e google-cloudrun up
|
dagger -e google-cloudrun up
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "terraform" {
|
||||||
|
# it must fail because of a missing var
|
||||||
|
run dagger -e terraform up
|
||||||
|
assert_failure
|
||||||
|
|
||||||
|
# Copy deployment to sandbox
|
||||||
|
copy_to_sandbox terraform terraform
|
||||||
|
|
||||||
|
# Add the var and try again
|
||||||
|
run dagger -w "$DAGGER_SANDBOX" -e terraform input text TestTerraform.apply.tfvars.input "42"
|
||||||
|
run dagger -w "$DAGGER_SANDBOX" -e terraform up
|
||||||
|
assert_success
|
||||||
|
|
||||||
|
# ensure the tfvar was passed correctly
|
||||||
|
run dagger -w "$DAGGER_SANDBOX" query -e terraform TestTerraform.apply.output.input.value -f text
|
||||||
|
assert_success
|
||||||
|
assert_output "42"
|
||||||
|
|
||||||
|
# ensure the random value is always the same
|
||||||
|
# this proves we're effectively using the s3 backend
|
||||||
|
run dagger -w "$DAGGER_SANDBOX" query -e terraform TestTerraform.apply.output.random.value -f json
|
||||||
|
assert_success
|
||||||
|
assert_output "36"
|
||||||
|
|
||||||
|
# Unset input
|
||||||
|
run dagger -w "$DAGGER_SANDBOX" -e terraform input unset TestTerraform.apply.tfvars.input
|
||||||
|
assert_success
|
||||||
|
}
|
||||||
|
@ -308,6 +308,20 @@ setup() {
|
|||||||
}'
|
}'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "dagger input dir: ignore .dagger" {
|
||||||
|
"$DAGGER" init
|
||||||
|
|
||||||
|
dagger_new_with_plan input "$TESTDIR"/cli/input/ignore
|
||||||
|
|
||||||
|
run [ -d "$TESTDIR"/cli/input/ignore/testdata/.dagger ]
|
||||||
|
assert_success
|
||||||
|
|
||||||
|
cp -R "$TESTDIR"/cli/input/ignore/testdata/ "$DAGGER_WORKSPACE"/testdata
|
||||||
|
"$DAGGER" input -e "input" dir "source" "$DAGGER_WORKSPACE"/testdata
|
||||||
|
"$DAGGER" up -e "input"
|
||||||
|
assert_success
|
||||||
|
}
|
||||||
|
|
||||||
@test "dagger input git" {
|
@test "dagger input git" {
|
||||||
"$DAGGER" init
|
"$DAGGER" init
|
||||||
|
|
||||||
|
20
tests/cli/input/ignore/main.cue
Normal file
20
tests/cli/input/ignore/main.cue
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package testing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"dagger.io/dagger/op"
|
||||||
|
"dagger.io/dagger"
|
||||||
|
)
|
||||||
|
|
||||||
|
source: dagger.#Artifact
|
||||||
|
|
||||||
|
#up: [
|
||||||
|
op.#FetchContainer & {ref: "busybox"},
|
||||||
|
op.#Exec & {
|
||||||
|
args: ["sh", "-c", """
|
||||||
|
set -exu
|
||||||
|
[ -f /source/testfile ]
|
||||||
|
[ ! -d /source/.dagger ]
|
||||||
|
"""]
|
||||||
|
mount: "/source": from: source
|
||||||
|
},
|
||||||
|
]
|
0
tests/cli/input/ignore/testdata/.dagger/foo
vendored
Normal file
0
tests/cli/input/ignore/testdata/.dagger/foo
vendored
Normal file
1
tests/cli/input/ignore/testdata/testfile
vendored
Normal file
1
tests/cli/input/ignore/testdata/testfile
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
thisisatest
|
35
tests/core.bats
Normal file
35
tests/core.bats
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# Test core Dagger features & types
|
||||||
|
|
||||||
|
setup() {
|
||||||
|
load 'helpers'
|
||||||
|
|
||||||
|
common_setup
|
||||||
|
|
||||||
|
# Use native Dagger environment here
|
||||||
|
unset DAGGER_WORKSPACE
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "core: inputs & outputs" {
|
||||||
|
# List available inputs
|
||||||
|
run dagger -e test-core input list
|
||||||
|
assert_success
|
||||||
|
assert_output --partial 'name'
|
||||||
|
assert_output --partial 'dir'
|
||||||
|
|
||||||
|
# Set text input
|
||||||
|
dagger -e test-core input text name Bob
|
||||||
|
run dagger -e test-core up
|
||||||
|
assert_success
|
||||||
|
assert_output --partial 'Hello, Bob!'
|
||||||
|
|
||||||
|
run dagger -e test-core output list
|
||||||
|
assert_success
|
||||||
|
assert_output --partial 'message "Hello, Bob!"'
|
||||||
|
|
||||||
|
# Unset text input
|
||||||
|
dagger -e test-core input unset name
|
||||||
|
run dagger -e test-core up
|
||||||
|
assert_success
|
||||||
|
assert_output --partial 'Hello, world!'
|
||||||
|
}
|
||||||
|
|
@ -1,18 +0,0 @@
|
|||||||
setup() {
|
|
||||||
load 'helpers'
|
|
||||||
|
|
||||||
common_setup
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "example: react" {
|
|
||||||
DAGGER_WORKSPACE="$TESTDIR"/../examples/react
|
|
||||||
export DAGGER_WORKSPACE
|
|
||||||
|
|
||||||
"$DAGGER" up
|
|
||||||
|
|
||||||
# curl the URL we just deployed to check if it worked
|
|
||||||
deployUrl=$("$DAGGER" query -l error -f text www.deployUrl)
|
|
||||||
run curl -sS "$deployUrl"
|
|
||||||
assert_success
|
|
||||||
assert_output --partial "Todo App"
|
|
||||||
}
|
|
@ -27,6 +27,11 @@ dagger_new_with_plan() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# dagger helper to execute the right binary
|
||||||
|
dagger() {
|
||||||
|
"${DAGGER}" "$@"
|
||||||
|
}
|
||||||
|
|
||||||
skip_unless_secrets_available() {
|
skip_unless_secrets_available() {
|
||||||
local inputFile="$1"
|
local inputFile="$1"
|
||||||
sops exec-file "$inputFile" echo > /dev/null 2>&1 || skip "$inputFile cannot be decrypted"
|
sops exec-file "$inputFile" echo > /dev/null 2>&1 || skip "$inputFile cannot be decrypted"
|
||||||
|
@ -6,59 +6,7 @@ setup() {
|
|||||||
|
|
||||||
# FIXME: move to universe/universe.bats
|
# FIXME: move to universe/universe.bats
|
||||||
# Assigned to: <ADD YOUR NAME HERE>
|
# Assigned to: <ADD YOUR NAME HERE>
|
||||||
@test "stdlib: kubernetes" {
|
# Changes in https://github.com/dagger/dagger/pull/628
|
||||||
skip_unless_local_kube
|
|
||||||
|
|
||||||
"$DAGGER" init
|
|
||||||
dagger_new_with_plan kubernetes "$TESTDIR"/stdlib/kubernetes/
|
|
||||||
|
|
||||||
run "$DAGGER" input -e "kubernetes" text kubeconfig -f ~/.kube/config
|
|
||||||
assert_success
|
|
||||||
|
|
||||||
run "$DAGGER" up -e "kubernetes"
|
|
||||||
assert_success
|
|
||||||
}
|
|
||||||
|
|
||||||
# FIXME: move to universe/universe.bats
|
|
||||||
# Assigned to: <ADD YOUR NAME HERE>
|
|
||||||
@test "stdlib: kustomize" {
|
|
||||||
"$DAGGER" compute "$TESTDIR"/stdlib/kubernetes/kustomize --input-dir TestKustomize.kustom.source="$TESTDIR"/stdlib/kubernetes/kustomize/testdata
|
|
||||||
}
|
|
||||||
|
|
||||||
# FIXME: move to universe/universe.bats
|
|
||||||
# Assigned to: <ADD YOUR NAME HERE>
|
|
||||||
@test "stdlib: helm" {
|
|
||||||
skip "helm is broken"
|
|
||||||
skip_unless_local_kube
|
|
||||||
|
|
||||||
"$DAGGER" init
|
|
||||||
dagger_new_with_plan helm "$TESTDIR"/stdlib/kubernetes/helm
|
|
||||||
|
|
||||||
run "$DAGGER" input -e "helm" text kubeconfig -f ~/.kube/config
|
|
||||||
assert_success
|
|
||||||
|
|
||||||
cp -R "$TESTDIR"/stdlib/kubernetes/helm/testdata/mychart "$DAGGER_WORKSPACE"/testdata
|
|
||||||
run "$DAGGER" input -e "helm" dir TestHelmSimpleChart.deploy.chartSource "$DAGGER_WORKSPACE"/testdata
|
|
||||||
assert_success
|
|
||||||
|
|
||||||
run "$DAGGER" up -e "helm"
|
|
||||||
assert_success
|
|
||||||
}
|
|
||||||
|
|
||||||
# FIXME: move to universe/universe.bats
|
|
||||||
# Assigned to: <ADD YOUR NAME HERE>
|
|
||||||
@test "stdlib: docker: build" {
|
|
||||||
"$DAGGER" compute "$TESTDIR"/stdlib/docker/build/ --input-dir source="$TESTDIR"/stdlib/docker/build
|
|
||||||
}
|
|
||||||
|
|
||||||
# FIXME: move to universe/universe.bats
|
|
||||||
# Assigned to: <ADD YOUR NAME HERE>
|
|
||||||
@test "stdlib: docker: dockerfile" {
|
|
||||||
"$DAGGER" compute "$TESTDIR"/stdlib/docker/dockerfile/ --input-dir source="$TESTDIR"/stdlib/docker/dockerfile/testdata
|
|
||||||
}
|
|
||||||
|
|
||||||
# FIXME: move to universe/universe.bats
|
|
||||||
# Assigned to: <ADD YOUR NAME HERE>
|
|
||||||
@test "stdlib: docker: push-and-pull" {
|
@test "stdlib: docker: push-and-pull" {
|
||||||
skip_unless_secrets_available "$TESTDIR"/stdlib/docker/push-pull/inputs.yaml
|
skip_unless_secrets_available "$TESTDIR"/stdlib/docker/push-pull/inputs.yaml
|
||||||
|
|
||||||
@ -66,38 +14,3 @@ setup() {
|
|||||||
run "$DAGGER" compute --input-yaml "$TESTDIR"/stdlib/docker/push-pull/inputs.yaml --input-dir source="$TESTDIR"/stdlib/docker/push-pull/testdata "$TESTDIR"/stdlib/docker/push-pull/
|
run "$DAGGER" compute --input-yaml "$TESTDIR"/stdlib/docker/push-pull/inputs.yaml --input-dir source="$TESTDIR"/stdlib/docker/push-pull/testdata "$TESTDIR"/stdlib/docker/push-pull/
|
||||||
assert_success
|
assert_success
|
||||||
}
|
}
|
||||||
|
|
||||||
# FIXME: move to universe/universe.bats
|
|
||||||
# Assigned to: <ADD YOUR NAME HERE>
|
|
||||||
@test "stdlib: terraform" {
|
|
||||||
skip_unless_secrets_available "$TESTDIR"/stdlib/terraform/s3/inputs.yaml
|
|
||||||
|
|
||||||
"$DAGGER" init
|
|
||||||
dagger_new_with_plan terraform "$TESTDIR"/stdlib/terraform/s3
|
|
||||||
|
|
||||||
cp -R "$TESTDIR"/stdlib/terraform/s3/testdata "$DAGGER_WORKSPACE"/testdata
|
|
||||||
"$DAGGER" -e terraform input dir TestData "$DAGGER_WORKSPACE"/testdata
|
|
||||||
sops -d "$TESTDIR"/stdlib/terraform/s3/inputs.yaml | "$DAGGER" -e "terraform" input yaml "" -f -
|
|
||||||
|
|
||||||
# it must fail because of a missing var
|
|
||||||
run "$DAGGER" up -e terraform
|
|
||||||
assert_failure
|
|
||||||
|
|
||||||
# add the var and try again
|
|
||||||
"$DAGGER" -e terraform input text TestTerraform.apply.tfvars.input "42"
|
|
||||||
run "$DAGGER" up -e terraform
|
|
||||||
assert_success
|
|
||||||
|
|
||||||
# ensure the tfvar was passed correctly
|
|
||||||
run "$DAGGER" query -e terraform \
|
|
||||||
TestTerraform.apply.output.input.value -f text
|
|
||||||
assert_success
|
|
||||||
assert_output "42"
|
|
||||||
|
|
||||||
# ensure the random value is always the same
|
|
||||||
# this proves we're effectively using the s3 backend
|
|
||||||
run "$DAGGER" query -e terraform \
|
|
||||||
TestTerraform.apply.output.random.value -f json
|
|
||||||
assert_success
|
|
||||||
assert_output "36"
|
|
||||||
}
|
|
||||||
|
@ -1,26 +0,0 @@
|
|||||||
name: default
|
|
||||||
inputs:
|
|
||||||
TestConfig.awsConfig.accessKey:
|
|
||||||
secret: ENC[AES256_GCM,data:dzhlip9kKU8mMEycFjq6MobD5BA=,iv:LKeYUbXpnWIZneGs7DCLVKxv1W2aa/3EVGO4jnDlOgc=,tag:+TcxQahxFTweyoPaROTJSQ==,type:str]
|
|
||||||
TestConfig.awsConfig.secretKey:
|
|
||||||
secret: ENC[AES256_GCM,data:bu3AI5jODWv4ePvRKw2l/1UOuH07Z0/oB2hiY4QqrhTcfjdSbr6kBg==,iv:BqddzzXqvAv0cAj2SVhoFx/kUOnRsoevqMRujCINVv0=,tag:u0KjVnbN8h54CLFARJmJ0g==,type:str]
|
|
||||||
sops:
|
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
|
||||||
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzV0ZXNW5qaGNJMjF5bnBO
|
|
||||||
d1Z1RXFhSnNRM1Vwa3lyWFJ6VVFDZTQ3cUhZClh0N1lxZ3dwSFhHTjRyS092OVVj
|
|
||||||
Tkw4ZlU4S3g0T1VGS1RYYnB1dGlzbVkKLS0tIEc4T1Z3SEU2NUNhd2FkSXlIUERM
|
|
||||||
UE5Cd2VwYkd1MHlTOXNJVEU3RVpqU2sK86kXU6ZaaVHTg9BuCEcOxnDrrW00+bwu
|
|
||||||
AHttbzqYVuC3YxXjOTzAZL8aYTStk14wGdI6TirZ9pX0fyaKAfzBUQ==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
lastmodified: "2021-05-27T16:01:59Z"
|
|
||||||
mac: ENC[AES256_GCM,data:T+0rcT9Xi/kJ8+EzCd7ewenDmc1cH/t2MxCpf+QXkILUC/uE8OgROizDMAiUYI2HpeBfZrmUgLMVzlTZirIbC51eWLAf6itbSIGKkVuz0uSNwhRpKGAROg6U1h39Scg6RpAvpzSTZvYOx5SwP78Uc6NQdp5yTDEb+0e9Wqzu+jU=,iv:INAN+EPwBv5dWWHQnaMr4QOBQWx3WCcohORvIPrBZN8=,tag:N4vtDowFKTDSHmMob5HgCw==,type:str]
|
|
||||||
pgp: []
|
|
||||||
encrypted_suffix: secret
|
|
||||||
version: 3.7.1
|
|
@ -1,53 +0,0 @@
|
|||||||
package eks
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/aws"
|
|
||||||
"dagger.io/aws/eks"
|
|
||||||
"dagger.io/kubernetes"
|
|
||||||
"dagger.io/dagger/op"
|
|
||||||
)
|
|
||||||
|
|
||||||
TestConfig: awsConfig: aws.#Config & {
|
|
||||||
region: "us-east-2"
|
|
||||||
}
|
|
||||||
|
|
||||||
TestCluster: eks.#KubeConfig & {
|
|
||||||
config: TestConfig.awsConfig
|
|
||||||
clusterName: *"dagger-example-eks-cluster" | string
|
|
||||||
}
|
|
||||||
|
|
||||||
TestEks: {
|
|
||||||
#GetPods:
|
|
||||||
"""
|
|
||||||
kubectl get pods -A
|
|
||||||
"""
|
|
||||||
|
|
||||||
#up: [
|
|
||||||
op.#Load & {
|
|
||||||
from: kubernetes.#Kubectl
|
|
||||||
},
|
|
||||||
|
|
||||||
op.#WriteFile & {
|
|
||||||
dest: "/kubeconfig"
|
|
||||||
content: TestCluster.kubeconfig
|
|
||||||
},
|
|
||||||
|
|
||||||
op.#WriteFile & {
|
|
||||||
dest: "/getPods.sh"
|
|
||||||
content: #GetPods
|
|
||||||
},
|
|
||||||
|
|
||||||
op.#Exec & {
|
|
||||||
always: true
|
|
||||||
args: [
|
|
||||||
"/bin/bash",
|
|
||||||
"--noprofile",
|
|
||||||
"--norc",
|
|
||||||
"-eo",
|
|
||||||
"pipefail",
|
|
||||||
"/getPods.sh",
|
|
||||||
]
|
|
||||||
env: KUBECONFIG: "/kubeconfig"
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,31 +0,0 @@
|
|||||||
package docker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/dagger"
|
|
||||||
"dagger.io/dagger/op"
|
|
||||||
"dagger.io/docker"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Build a Docker image from source, using included Dockerfile
|
|
||||||
source: dagger.#Artifact
|
|
||||||
|
|
||||||
TestBuild: {
|
|
||||||
image: docker.#Build & {
|
|
||||||
"source": source
|
|
||||||
}
|
|
||||||
|
|
||||||
verify: #up: [
|
|
||||||
op.#Load & {
|
|
||||||
from: image
|
|
||||||
},
|
|
||||||
|
|
||||||
op.#Exec & {
|
|
||||||
always: true
|
|
||||||
args: [
|
|
||||||
"sh", "-c", """
|
|
||||||
grep -q "test" /test.txt
|
|
||||||
""",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*.orig
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
.vscode/
|
|
@ -1,21 +0,0 @@
|
|||||||
apiVersion: v2
|
|
||||||
name: mychart
|
|
||||||
description: A Helm chart for Kubernetes
|
|
||||||
|
|
||||||
# A chart can be either an 'application' or a 'library' chart.
|
|
||||||
#
|
|
||||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
|
||||||
# to be deployed.
|
|
||||||
#
|
|
||||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
|
||||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
|
||||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
|
||||||
type: application
|
|
||||||
|
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
|
||||||
# to the chart and its templates, including the app version.
|
|
||||||
version: 0.1.0
|
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
|
||||||
# incremented each time you make changes to the application.
|
|
||||||
appVersion: 1.16.0
|
|
@ -1,21 +0,0 @@
|
|||||||
1. Get the application URL by running these commands:
|
|
||||||
{{- if .Values.ingress.enabled }}
|
|
||||||
{{- range $host := .Values.ingress.hosts }}
|
|
||||||
{{- range .paths }}
|
|
||||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- else if contains "NodePort" .Values.service.type }}
|
|
||||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mychart.fullname" . }})
|
|
||||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
|
||||||
echo http://$NODE_IP:$NODE_PORT
|
|
||||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
|
||||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
|
||||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "mychart.fullname" . }}'
|
|
||||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mychart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
|
||||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
|
||||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
|
||||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "mychart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
|
||||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
|
||||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
|
|
||||||
{{- end }}
|
|
@ -1,63 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "mychart.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "mychart.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create chart name and version as used by the chart label.
|
|
||||||
*/}}
|
|
||||||
{{- define "mychart.chart" -}}
|
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Common labels
|
|
||||||
*/}}
|
|
||||||
{{- define "mychart.labels" -}}
|
|
||||||
helm.sh/chart: {{ include "mychart.chart" . }}
|
|
||||||
{{ include "mychart.selectorLabels" . }}
|
|
||||||
{{- if .Chart.AppVersion }}
|
|
||||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
|
||||||
{{- end }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Selector labels
|
|
||||||
*/}}
|
|
||||||
{{- define "mychart.selectorLabels" -}}
|
|
||||||
app.kubernetes.io/name: {{ include "mychart.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create the name of the service account to use
|
|
||||||
*/}}
|
|
||||||
{{- define "mychart.serviceAccountName" -}}
|
|
||||||
{{- if .Values.serviceAccount.create -}}
|
|
||||||
{{ default (include "mychart.fullname" .) .Values.serviceAccount.name }}
|
|
||||||
{{- else -}}
|
|
||||||
{{ default "default" .Values.serviceAccount.name }}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
@ -1,55 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ include "mychart.fullname" . }}
|
|
||||||
labels:
|
|
||||||
{{- include "mychart.labels" . | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
replicas: {{ .Values.replicaCount }}
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
{{- include "mychart.selectorLabels" . | nindent 6 }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
{{- include "mychart.selectorLabels" . | nindent 8 }}
|
|
||||||
spec:
|
|
||||||
{{- with .Values.imagePullSecrets }}
|
|
||||||
imagePullSecrets:
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
serviceAccountName: {{ include "mychart.serviceAccountName" . }}
|
|
||||||
securityContext:
|
|
||||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
securityContext:
|
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 80
|
|
||||||
protocol: TCP
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: http
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: http
|
|
||||||
resources:
|
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
|
||||||
{{- with .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.affinity }}
|
|
||||||
affinity:
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
@ -1,41 +0,0 @@
|
|||||||
{{- if .Values.ingress.enabled -}}
|
|
||||||
{{- $fullName := include "mychart.fullname" . -}}
|
|
||||||
{{- $svcPort := .Values.service.port -}}
|
|
||||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
|
||||||
apiVersion: networking.k8s.io/v1beta1
|
|
||||||
{{- else -}}
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
{{- end }}
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: {{ $fullName }}
|
|
||||||
labels:
|
|
||||||
{{- include "mychart.labels" . | nindent 4 }}
|
|
||||||
{{- with .Values.ingress.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{- toYaml . | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if .Values.ingress.tls }}
|
|
||||||
tls:
|
|
||||||
{{- range .Values.ingress.tls }}
|
|
||||||
- hosts:
|
|
||||||
{{- range .hosts }}
|
|
||||||
- {{ . | quote }}
|
|
||||||
{{- end }}
|
|
||||||
secretName: {{ .secretName }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
rules:
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
- host: {{ .host | quote }}
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
{{- range .paths }}
|
|
||||||
- path: {{ . }}
|
|
||||||
backend:
|
|
||||||
serviceName: {{ $fullName }}
|
|
||||||
servicePort: {{ $svcPort }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
@ -1,15 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: {{ include "mychart.fullname" . }}
|
|
||||||
labels:
|
|
||||||
{{- include "mychart.labels" . | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
type: {{ .Values.service.type }}
|
|
||||||
ports:
|
|
||||||
- port: {{ .Values.service.port }}
|
|
||||||
targetPort: http
|
|
||||||
protocol: TCP
|
|
||||||
name: http
|
|
||||||
selector:
|
|
||||||
{{- include "mychart.selectorLabels" . | nindent 4 }}
|
|
@ -1,12 +0,0 @@
|
|||||||
{{- if .Values.serviceAccount.create -}}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: {{ include "mychart.serviceAccountName" . }}
|
|
||||||
labels:
|
|
||||||
{{- include "mychart.labels" . | nindent 4 }}
|
|
||||||
{{- with .Values.serviceAccount.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{- toYaml . | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,15 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: "{{ include "mychart.fullname" . }}-test-connection"
|
|
||||||
labels:
|
|
||||||
{{- include "mychart.labels" . | nindent 4 }}
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": test-success
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: wget
|
|
||||||
image: busybox
|
|
||||||
command: ['wget']
|
|
||||||
args: ['{{ include "mychart.fullname" . }}:{{ .Values.service.port }}']
|
|
||||||
restartPolicy: Never
|
|
@ -1,68 +0,0 @@
|
|||||||
# Default values for mychart.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
|
|
||||||
replicaCount: 1
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: nginx
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
|
|
||||||
imagePullSecrets: []
|
|
||||||
nameOverride: ""
|
|
||||||
fullnameOverride: ""
|
|
||||||
|
|
||||||
serviceAccount:
|
|
||||||
# Specifies whether a service account should be created
|
|
||||||
create: true
|
|
||||||
# Annotations to add to the service account
|
|
||||||
annotations: {}
|
|
||||||
# The name of the service account to use.
|
|
||||||
# If not set and create is true, a name is generated using the fullname template
|
|
||||||
name:
|
|
||||||
|
|
||||||
podSecurityContext: {}
|
|
||||||
# fsGroup: 2000
|
|
||||||
|
|
||||||
securityContext: {}
|
|
||||||
# capabilities:
|
|
||||||
# drop:
|
|
||||||
# - ALL
|
|
||||||
# readOnlyRootFilesystem: true
|
|
||||||
# runAsNonRoot: true
|
|
||||||
# runAsUser: 1000
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: ClusterIP
|
|
||||||
port: 80
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
annotations: {}
|
|
||||||
# kubernetes.io/ingress.class: nginx
|
|
||||||
# kubernetes.io/tls-acme: "true"
|
|
||||||
hosts:
|
|
||||||
- host: chart-example.local
|
|
||||||
paths: []
|
|
||||||
tls: []
|
|
||||||
# - secretName: chart-example-tls
|
|
||||||
# hosts:
|
|
||||||
# - chart-example.local
|
|
||||||
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
nodeSelector: {}
|
|
||||||
|
|
||||||
tolerations: []
|
|
||||||
|
|
||||||
affinity: {}
|
|
@ -1,25 +0,0 @@
|
|||||||
package testing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"dagger.io/dagger"
|
|
||||||
|
|
||||||
"dagger.io/terraform"
|
|
||||||
)
|
|
||||||
|
|
||||||
TestData: dagger.#Artifact
|
|
||||||
|
|
||||||
TestConfig: awsConfig: {
|
|
||||||
accessKey: string
|
|
||||||
secretkey: string
|
|
||||||
region: "us-east-2"
|
|
||||||
}
|
|
||||||
|
|
||||||
TestTerraform: apply: terraform.#Configuration & {
|
|
||||||
source: TestData
|
|
||||||
env: {
|
|
||||||
AWS_ACCESS_KEY_ID: TestConfig.awsConfig.accessKey
|
|
||||||
AWS_SECRET_ACCESS_KEY: TestConfig.awsConfig.secretKey
|
|
||||||
AWS_DEFAULT_REGION: TestConfig.awsConfig.region
|
|
||||||
AWS_REGION: TestConfig.awsConfig.region
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,5 +1,4 @@
|
|||||||
[build]
|
[build]
|
||||||
base = "website/"
|
|
||||||
publish = "build/"
|
publish = "build/"
|
||||||
command = "yarn build"
|
command = "yarn build"
|
||||||
|
|
||||||
@ -10,12 +9,12 @@
|
|||||||
# compare the current commit to the previous.
|
# compare the current commit to the previous.
|
||||||
# We can't use `$CACHED_COMMIT_REF` because that points to the PR preview
|
# We can't use `$CACHED_COMMIT_REF` because that points to the PR preview
|
||||||
# that was built before merging.
|
# that was built before merging.
|
||||||
ignore = "git diff --quiet HEAD^ HEAD -- . ../docs/ ../netlify.toml"
|
ignore = "git diff --quiet HEAD^ HEAD -- . ../docs/"
|
||||||
|
|
||||||
[context.deploy-preview]
|
[context.deploy-preview]
|
||||||
# Do not build the site if there's no site-related changes since the last
|
# Do not build the site if there's no site-related changes since the last
|
||||||
# deploy.
|
# deploy.
|
||||||
ignore = "git diff --quiet $CACHED_COMMIT_REF $COMMIT_REF . ../docs/ ../netlify.toml"
|
ignore = "git diff --quiet $CACHED_COMMIT_REF $COMMIT_REF . ../docs/"
|
||||||
command = "yarn build:withoutAuth"
|
command = "yarn build:withoutAuth"
|
||||||
|
|
||||||
[[redirects]]
|
[[redirects]]
|
Reference in New Issue
Block a user