Merge branch 'main' into cloudrun-support

This commit is contained in:
Tihomir Jovicic
2021-06-22 12:30:48 +02:00
89 changed files with 553 additions and 2184 deletions

View File

@@ -1,112 +0,0 @@
package main
import (
"dagger.io/dagger"
"dagger.io/aws/ecs"
"dagger.io/git"
)
// Backend configuration
backend: {
// Source code to build this container
source: git.#Repository | dagger.#Artifact @dagger(input)
// Container environment variables
environment: {
[string]: string
} @dagger(input)
// Public hostname (need to match the master domain configures on the loadbalancer)
hostname: string @dagger(input)
// Container configuration
container: {
// Desired number of running containers
desiredCount: *1 | int @dagger(input)
// Time to wait for the HTTP timeout to complete
healthCheckTimeout: *10 | int @dagger(input)
// HTTP Path to perform the healthcheck request (HTTP Get)
healthCheckPath: *"/" | string @dagger(input)
// Number of times the health check needs to fail before recycling the container
healthCheckUnhealthyThreshold: *2 | int @dagger(input)
// Port used by the process inside the container
port: *80 | int @dagger(input)
// Memory to allocate
memory: *1024 | int @dagger(input)
// Override the default container command
command: [...string] @dagger(input)
// Custom dockerfile path
dockerfilePath: *"" | string @dagger(input)
// docker build args
dockerBuildArgs: {
[string]: string
} @dagger(input)
}
// Init container runs only once when the main container starts
initContainer: {
command: [...string] @dagger(input)
environment: {
[string]: string
} @dagger(input)
}
}
// Backend deployment logic
backend: {
let slug = name
// Docker image built from source, pushed to ECR
image: #ECRImage & {
source: backend.source
repository: infra.ecrRepository
tag: slug
awsConfig: infra.awsConfig
if backend.container.dockerfilePath != "" {
dockerfilePath: backend.container.dockerfilePath
}
buildArgs: backend.container.dockerBuildArgs
}
// Creates an ECS Task + Service + deploy via Cloudformation
app: #ECSApp & {
awsConfig: infra.awsConfig
"slug": slug
clusterName: infra.ecsClusterName
vpcId: infra.vpcId
elbListenerArn: infra.elbListenerArn
if infra.ecsTaskRoleArn != _|_ {
taskRoleArn: infra.ecsTaskRoleArn
}
hostname: backend.hostname
healthCheck: {
timeout: backend.container.healthCheckTimeout
path: backend.container.healthCheckPath
unhealthyThresholdCount: backend.container.healthCheckUnhealthyThreshold
}
desiredCount: backend.container.desiredCount
container: {
command: backend.container.command
environment: backend.environment
port: backend.container.port
memory: backend.container.memory
"image": image.ref
}
}
// Optional container to run one-time during the deploy (eg. db migration)
if len(backend.initContainer.command) > 0 {
initContainer: ecs.#RunTask & {
config: infra.awsConfig
containerName: slug
cluster: infra.ecsClusterName
if infra.ecsTaskRoleArn != _|_ {
roleArn: infra.ecsTaskRoleArn
}
containerEnvironment: backend.initContainer.environment
containerCommand: backend.initContainer.command
taskArn: app.taskArn
}
}
}

View File

@@ -1,41 +0,0 @@
package main
import (
"encoding/base64"
"dagger.io/aws/rds"
)
database: {
let slug = name
dbType: "mysql" | "postgresql" @dagger(input)
db: rds.#Database & {
config: infra.awsConfig
name: slug
dbArn: infra.rdsInstanceArn
"dbType": dbType
secretArn: infra.rdsAdminSecretArn
}
user: rds.#User & {
config: infra.awsConfig
dbArn: infra.rdsInstanceArn
"dbType": dbType
secretArn: infra.rdsAdminSecretArn
username: slug
// FIXME: make it secure (generate infra side?)
password: base64.Encode(null, "pwd-\(slug)")
grantDatabase: db.out
}
instance: rds.#Instance & {
config: infra.awsConfig
dbArn: infra.rdsInstanceArn
}
hostname: instance.hostname
port: instance.port
dbName: db.out
username: user.out
password: user.password
}

View File

@@ -1,53 +0,0 @@
package main
import (
"dagger.io/dagger"
"dagger.io/dagger/op"
"dagger.io/aws"
"dagger.io/aws/ecr"
)
// Build an image and push it to ECR
#ECRImage: {
source: dagger.#Artifact
// Path of the Dockerfile
dockerfilePath?: string
repository: string
tag: string
awsConfig: aws.#Config
buildArgs: [string]: string
// Use these credentials to push
ecrCreds: ecr.#Credentials & {
config: awsConfig
}
ref: {
string
#up: [
// Build the docker image
op.#DockerBuild & {
context: source
if dockerfilePath != _|_ {
"dockerfilePath": dockerfilePath
}
buildArg: buildArgs
},
// Login to Registry
op.#DockerLogin & {
target: repository
username: ecrCreds.username
secret: ecrCreds.secret
},
// Push the image to the registry
op.#PushContainer & {
ref: "\(repository):\(tag)"
},
op.#Export & {
source: "/dagger/image_ref"
format: "string"
},
]
}
}

View File

@@ -1,152 +0,0 @@
package main
import (
"encoding/json"
"dagger.io/aws"
"dagger.io/aws/elb"
"dagger.io/aws/cloudformation"
)
#ECSApp: {
awsConfig: aws.#Config
slug: string
clusterName: string
vpcId: string
elbListenerArn: string
taskRoleArn: *"" | string
hostname: string
healthCheck: {
timeout: *10 | int
path: *"/" | string
unhealthyThresholdCount: *2 | int
}
desiredCount: int
container: {
command: [...string]
environment: [string]: string
port: *80 | int
cpu: *256 | int
memory: *1024 | int
image: string
}
taskArn: cfnStack.outputs.TaskArn
elbRulePriority: elb.#RandomRulePriority & {
config: awsConfig
listenerArn: elbListenerArn
vhost: hostname
}
cfnStack: cloudformation.#Stack & {
config: awsConfig
stackName: slug
onFailure: "DO_NOTHING"
parameters: {
ELBRulePriority: elbRulePriority.out
ImageRef: container.image
ELBListenerArn: elbListenerArn
}
source: json.Marshal(template)
}
template: {
AWSTemplateFormatVersion: "2010-09-09"
Description: "Dagger deployed app"
Parameters: {
ELBRulePriority: Type: "Number"
ImageRef: Type: "String"
ELBListenerArn: Type: "String"
}
Resources: {
ECSTaskDefinition: {
Type: "AWS::ECS::TaskDefinition"
Properties: {
Cpu: "\(container.cpu)"
Memory: "\(container.memory)"
if taskRoleArn != "" {
TaskRoleArn: taskRoleArn
}
NetworkMode: "bridge"
ContainerDefinitions: [{
if len(container.command) > 0 {
Command: container.command
}
Name: slug
Image: Ref: "ImageRef"
Essential: true
Environment: [ for k, v in container.environment {
Name: k
Value: v
}]
PortMappings: [{
ContainerPort: container.port
}]
StopTimeout: 5
LogConfiguration: {
LogDriver: "awslogs"
Options: {
"awslogs-group": "bl/provider/ecs/\(clusterName)"
"awslogs-region": Ref: "AWS::Region"
"awslogs-create-group": "true"
"awslogs-stream-prefix": slug
}
}
}]
}
}
ECSListenerRule: {
Type: "AWS::ElasticLoadBalancingV2::ListenerRule"
Properties: {
ListenerArn: Ref: "ELBListenerArn"
Priority: Ref: "ELBRulePriority"
Conditions: [{
Field: "host-header"
Values: [hostname]}]
Actions: [{
Type: "forward"
TargetGroupArn: Ref: "ECSTargetGroup"
}]}}
ECSTargetGroup: {
Type: "AWS::ElasticLoadBalancingV2::TargetGroup"
Properties: {
Protocol: "HTTP"
VpcId: vpcId
Port: 80
HealthCheckPath: healthCheck.path
UnhealthyThresholdCount: healthCheck.unhealthyThresholdCount
HealthCheckTimeoutSeconds: healthCheck.timeout
HealthCheckIntervalSeconds: healthCheck.timeout + 1
HealthyThresholdCount: 3
TargetGroupAttributes: [{
Value: "10"
Key: "deregistration_delay.timeout_seconds"
}]}}
ECSService: {
Type: "AWS::ECS::Service"
Properties: {
Cluster: clusterName
DesiredCount: desiredCount
LaunchType: "EC2"
LoadBalancers: [{
ContainerPort: container.port
TargetGroupArn: Ref: "ECSTargetGroup"
ContainerName: slug
}]
ServiceName: slug
TaskDefinition: Ref: "ECSTaskDefinition"
DeploymentConfiguration: {
DeploymentCircuitBreaker: {
Enable: true
Rollback: true
}
MaximumPercent: 200
MinimumHealthyPercent: 100
}}
DependsOn: "ECSListenerRule"
}
}
Outputs: TaskArn: Value: Ref: "ECSTaskDefinition"
}
}

View File

@@ -1,57 +0,0 @@
package main
import (
"dagger.io/dagger"
"dagger.io/netlify"
"dagger.io/js/yarn"
"dagger.io/git"
)
frontend: {
// Source code to build the app
source: git.#Repository | dagger.#Artifact @dagger(input)
writeEnvFile?: string @dagger(input)
// Yarn Build
yarn: {
// Run this yarn script
script: string | *"build" @dagger(input)
// Read build output from this directory
// (path must be relative to working directory).
buildDir: string | *"build" @dagger(input)
}
// Build environment variables
environment: {
[string]: string @dagger(input)
}
environment: {
NODE_ENV: string | *"production" @dagger(input)
}
environment: {
APP_URL: "https://\(name).netlify.app/" @dagger(input)
}
}
frontend: {
app: yarn.#Package & {
source: frontend.source
env: frontend.environment
if frontend.writeEnvFile != _|_ {
writeEnvFile: frontend.writeEnvFile
}
script: frontend.yarn.script
buildDir: frontend.yarn.buildDir
}
// Host the application with Netlify
site: netlify.#Site & {
"name": name
account: infra.netlifyAccount
contents: app.build
}
}

View File

@@ -1,35 +0,0 @@
package main
import (
"dagger.io/aws"
"dagger.io/netlify"
)
infra: {
// AWS auth & default region
awsConfig: aws.#Config
// VPC Id
vpcId: string @dagger(input)
// ECR Image repository
ecrRepository: string @dagger(input)
// ECS cluster name
ecsClusterName: string @dagger(input)
// Execution Role ARN used for all tasks running on the cluster
ecsTaskRoleArn?: string @dagger(input)
// ELB listener ARN
elbListenerArn: string @dagger(input)
// Secret ARN for the admin password of the RDS Instance
rdsAdminSecretArn: string @dagger(input)
// ARN of the RDS Instance
rdsInstanceArn: string @dagger(input)
// Netlify credentials
netlifyAccount: netlify.#Account @dagger(input)
}

View File

@@ -1,34 +0,0 @@
name: my-app
infra:
awsConfig:
accessKey: <REPLACE WITH AWS ACCESS KEY>
secretKey: <REPLACE WITH AWS SECRET KEY>
region: us-east-1
vpcId: vpc-020ctgv0bcde4242
ecrRepository: 8563296674124.dkr.ecr.us-east-1.amazonaws.com/apps
ecsClusterName: bl-ecs-acme-764-ECSCluster-lRIVVg09G4HX
elbListenerArn: arn:aws:elasticloadbalancing:us-east-1:8563296674124:listener/app/bl-ec-ECSAL-OSYI03K07BCO/3c2d3e78347bde5b/d02ac88cc007e24e
rdsAdminSecretArn: arn:aws:secretsmanager:us-east-1:8563296674124:secret:AdminPassword-NQbBi7oU4CYS9-IGgS3B
rdsInstanceArn: arn:aws:rds:us-east-1:8563296674124:cluster:bl-rds-acme-764-rdscluster-8eg3xbfjggkfdg
netlifyAccount:
token: <REPLACE WITH NETLIFY TOKEN>
database:
dbType: mysql
backend:
source:
remote: https://github.com/blocklayerhq/acme-clothing.git
ref: HEAD
subdir: ./crate/code/api
hostname: my-app.acme-764-api.microstaging.io
container:
healthCheckPath: /health-check
healthCheckTimeout: 40
frontend:
source:
remote: https://github.com/blocklayerhq/acme-clothing.git
ref: HEAD
subdir: ./crate/code/web
writeEnvFile: .env
yarn:
buildDir: public
script: build:client

View File

@@ -1,22 +0,0 @@
package main
// Name of the application
name: string & =~"[a-z0-9-]+" @dagger(input)
// Inject db info in the container environment
backend: environment: {
DB_USERNAME: database.username
DB_HOSTNAME: database.hostname
DB_PASSWORD: database.password
DB_DBNAME: database.dbName
DB_PORT: "\(database.port)"
DB_TYPE: database.dbType
}
// Configure the frontend with the API URL
frontend: environment: APP_URL_API: url.backendURL
url: {
frontendURL: frontend.site.url @dagger(output)
backendURL: "https://\(backend.hostname)/" @dagger(output)
}

View File

@@ -1,53 +0,0 @@
package main
import (
"encoding/yaml"
"dagger.io/dagger"
"dagger.io/aws"
"dagger.io/aws/eks"
"dagger.io/kubernetes"
"dagger.io/kubernetes/helm"
)
kubeSrc: {
apiVersion: "v1"
kind: "Pod"
metadata: name: "kube-test"
spec: {
restartPolicy: "Never"
containers: [{
name: "test"
image: "hello-world"
}]
}
}
// Fill using:
// --input-string awsConfig.accessKey=XXX
// --input-string awsConfig.secretKey=XXX
awsConfig: aws.#Config & {
region: *"us-east-2" | string
}
// Take the kubeconfig from the EKS cluster
cluster: eks.#KubeConfig & {
config: awsConfig
clusterName: *"dagger-example-eks-cluster" | string
}
// Example of a simple `kubectl apply` using a simple config
kubeApply: kubernetes.#Resources & {
manifest: yaml.Marshal(kubeSrc)
namespace: "test"
kubeconfig: cluster.kubeconfig
}
// Example of a `helm install` using a local chart
// Fill using:
// --input-dir helmChart.chartSource=./testdata/mychart
helmChart: helm.#Chart & {
name: "test-helm"
namespace: "test"
kubeconfig: cluster.kubeconfig
chartSource: dagger.#Artifact
}

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,21 +0,0 @@
apiVersion: v2
name: mychart
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 1.16.0

View File

@@ -1,21 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mychart.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "mychart.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mychart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "mychart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
{{- end }}

View File

@@ -1,63 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "mychart.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "mychart.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "mychart.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "mychart.labels" -}}
helm.sh/chart: {{ include "mychart.chart" . }}
{{ include "mychart.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "mychart.selectorLabels" -}}
app.kubernetes.io/name: {{ include "mychart.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "mychart.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "mychart.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,55 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "mychart.fullname" . }}
labels:
{{- include "mychart.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "mychart.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "mychart.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "mychart.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "mychart.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "mychart.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "mychart.fullname" . }}
labels:
{{- include "mychart.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "mychart.selectorLabels" . | nindent 4 }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "mychart.serviceAccountName" . }}
labels:
{{- include "mychart.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "mychart.fullname" . }}-test-connection"
labels:
{{- include "mychart.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "mychart.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@@ -1,68 +0,0 @@
# Default values for mychart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,447 +0,0 @@
package main
#CFNTemplate: eksControlPlane: {
AWSTemplateFormatVersion: "2010-09-09"
Description: "Amazon EKS Sample VPC - Private and Public subnets"
Parameters: {
VpcBlock: {
Type: "String"
Default: "192.168.0.0/16"
Description: "The CIDR range for the VPC. This should be a valid private (RFC 1918) CIDR range."
}
PublicSubnet01Block: {
Type: "String"
Default: "192.168.0.0/18"
Description: "CidrBlock for public subnet 01 within the VPC"
}
PublicSubnet02Block: {
Type: "String"
Default: "192.168.64.0/18"
Description: "CidrBlock for public subnet 02 within the VPC"
}
PrivateSubnet01Block: {
Type: "String"
Default: "192.168.128.0/18"
Description: "CidrBlock for private subnet 01 within the VPC"
}
PrivateSubnet02Block: {
Type: "String"
Default: "192.168.192.0/18"
Description: "CidrBlock for private subnet 02 within the VPC"
}
ClusterName: {
Type: "String"
Description: "The EKS cluster name"
}
}
Metadata: "AWS::CloudFormation::Interface": ParameterGroups: [
{
Label: default: "Worker Network Configuration"
Parameters: [
"VpcBlock",
"PublicSubnet01Block",
"PublicSubnet02Block",
"PrivateSubnet01Block",
"PrivateSubnet02Block",
]
},
]
Resources: {
VPC: {
Type: "AWS::EC2::VPC"
Properties: {
CidrBlock: Ref: "VpcBlock"
EnableDnsSupport: true
EnableDnsHostnames: true
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-VPC"
},
]
}
}
InternetGateway: Type: "AWS::EC2::InternetGateway"
VPCGatewayAttachment: {
Type: "AWS::EC2::VPCGatewayAttachment"
Properties: {
InternetGatewayId: Ref: "InternetGateway"
VpcId: Ref: "VPC"
}
}
PublicRouteTable: {
Type: "AWS::EC2::RouteTable"
Properties: {
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Public Subnets"
},
{
Key: "Network"
Value: "Public"
},
]
}
}
PrivateRouteTable01: {
Type: "AWS::EC2::RouteTable"
Properties: {
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Private Subnet AZ1"
},
{
Key: "Network"
Value: "Private01"
},
]
}
}
PrivateRouteTable02: {
Type: "AWS::EC2::RouteTable"
Properties: {
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Private Subnet AZ2"
},
{
Key: "Network"
Value: "Private02"
},
]
}
}
PublicRoute: {
DependsOn: "VPCGatewayAttachment"
Type: "AWS::EC2::Route"
Properties: {
RouteTableId: Ref: "PublicRouteTable"
DestinationCidrBlock: "0.0.0.0/0"
GatewayId: Ref: "InternetGateway"
}
}
PrivateRoute01: {
DependsOn: [
"VPCGatewayAttachment",
"NatGateway01",
]
Type: "AWS::EC2::Route"
Properties: {
RouteTableId: Ref: "PrivateRouteTable01"
DestinationCidrBlock: "0.0.0.0/0"
NatGatewayId: Ref: "NatGateway01"
}
}
PrivateRoute02: {
DependsOn: [
"VPCGatewayAttachment",
"NatGateway02",
]
Type: "AWS::EC2::Route"
Properties: {
RouteTableId: Ref: "PrivateRouteTable02"
DestinationCidrBlock: "0.0.0.0/0"
NatGatewayId: Ref: "NatGateway02"
}
}
NatGateway01: {
DependsOn: [
"NatGatewayEIP1",
"PublicSubnet01",
"VPCGatewayAttachment",
]
Type: "AWS::EC2::NatGateway"
Properties: {
AllocationId: "Fn::GetAtt": [
"NatGatewayEIP1",
"AllocationId",
]
SubnetId: Ref: "PublicSubnet01"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-NatGatewayAZ1"
},
]
}
}
NatGateway02: {
DependsOn: [
"NatGatewayEIP2",
"PublicSubnet02",
"VPCGatewayAttachment",
]
Type: "AWS::EC2::NatGateway"
Properties: {
AllocationId: "Fn::GetAtt": [
"NatGatewayEIP2",
"AllocationId",
]
SubnetId: Ref: "PublicSubnet02"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-NatGatewayAZ2"
},
]
}
}
NatGatewayEIP1: {
DependsOn: [
"VPCGatewayAttachment",
]
Type: "AWS::EC2::EIP"
Properties: Domain: "vpc"
}
NatGatewayEIP2: {
DependsOn: [
"VPCGatewayAttachment",
]
Type: "AWS::EC2::EIP"
Properties: Domain: "vpc"
}
PublicSubnet01: {
Type: "AWS::EC2::Subnet"
Metadata: Comment: "Subnet 01"
Properties: {
MapPublicIpOnLaunch: true
AvailabilityZone: "Fn::Select": [
"0",
{
"Fn::GetAZs": Ref: "AWS::Region"
},
]
CidrBlock: Ref: "PublicSubnet01Block"
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet01"
},
{
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
Value: "shared"
},
]
}
}
PublicSubnet02: {
Type: "AWS::EC2::Subnet"
Metadata: Comment: "Subnet 02"
Properties: {
MapPublicIpOnLaunch: true
AvailabilityZone: "Fn::Select": [
"1",
{
"Fn::GetAZs": Ref: "AWS::Region"
},
]
CidrBlock: Ref: "PublicSubnet02Block"
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet02"
},
{
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
Value: "shared"
},
]
}
}
PrivateSubnet01: {
Type: "AWS::EC2::Subnet"
Metadata: Comment: "Subnet 03"
Properties: {
AvailabilityZone: "Fn::Select": [
"0",
{
"Fn::GetAZs": Ref: "AWS::Region"
},
]
CidrBlock: Ref: "PrivateSubnet01Block"
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet01"
},
{
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
Value: "shared"
},
]
}
}
PrivateSubnet02: {
Type: "AWS::EC2::Subnet"
Metadata: Comment: "Private Subnet 02"
Properties: {
AvailabilityZone: "Fn::Select": [
"1",
{
"Fn::GetAZs": Ref: "AWS::Region"
},
]
CidrBlock: Ref: "PrivateSubnet02Block"
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet02"
},
{
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
Value: "shared"
},
]
}
}
PublicSubnet01RouteTableAssociation: {
Type: "AWS::EC2::SubnetRouteTableAssociation"
Properties: {
SubnetId: Ref: "PublicSubnet01"
RouteTableId: Ref: "PublicRouteTable"
}
}
PublicSubnet02RouteTableAssociation: {
Type: "AWS::EC2::SubnetRouteTableAssociation"
Properties: {
SubnetId: Ref: "PublicSubnet02"
RouteTableId: Ref: "PublicRouteTable"
}
}
PrivateSubnet01RouteTableAssociation: {
Type: "AWS::EC2::SubnetRouteTableAssociation"
Properties: {
SubnetId: Ref: "PrivateSubnet01"
RouteTableId: Ref: "PrivateRouteTable01"
}
}
PrivateSubnet02RouteTableAssociation: {
Type: "AWS::EC2::SubnetRouteTableAssociation"
Properties: {
SubnetId: Ref: "PrivateSubnet02"
RouteTableId: Ref: "PrivateRouteTable02"
}
}
ControlPlaneSecurityGroup: {
Type: "AWS::EC2::SecurityGroup"
Properties: {
GroupDescription: "Cluster communication with worker nodes"
VpcId: Ref: "VPC"
}
}
EKSIAMRole: {
Type: "AWS::IAM::Role"
Properties: {
AssumeRolePolicyDocument: Statement: [
{
Effect: "Allow"
Principal: Service: [
"eks.amazonaws.com",
]
Action: [
"sts:AssumeRole",
]
},
]
ManagedPolicyArns: [
"arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
"arn:aws:iam::aws:policy/AmazonEKSServicePolicy",
]
}
}
EKSCluster: {
Type: "AWS::EKS::Cluster"
Properties: {
Name: Ref: "ClusterName"
Version: "1.19"
RoleArn: "Fn::GetAtt": ["EKSIAMRole", "Arn"]
ResourcesVpcConfig: {
SecurityGroupIds: [{Ref: "ControlPlaneSecurityGroup"}]
SubnetIds: [
{Ref: "PublicSubnet01"},
{Ref: "PublicSubnet02"},
{Ref: "PrivateSubnet01"},
{Ref: "PrivateSubnet02"},
]
}
}
DependsOn: ["EKSIAMRole", "PublicSubnet01", "PublicSubnet02", "PrivateSubnet01", "PrivateSubnet02", "ControlPlaneSecurityGroup"]
}
}
Outputs: {
SubnetIds: {
Description: "Subnets IDs in the VPC"
Value: "Fn::Join": [
",",
[
{
Ref: "PublicSubnet01"
},
{
Ref: "PublicSubnet02"
},
{
Ref: "PrivateSubnet01"
},
{
Ref: "PrivateSubnet02"
},
],
]
}
PublicSubnets: {
Description: "List of the public subnets"
Value: "Fn::Join": [
",",
[
{
Ref: "PublicSubnet01"
},
{
Ref: "PublicSubnet02"
},
],
]
}
PrivateSubnets: {
Description: "List of the private subnets"
Value: "Fn::Join": [
",",
[
{
Ref: "PrivateSubnet01"
},
{
Ref: "PrivateSubnet02"
},
],
]
}
DefaultSecurityGroup: {
Description: "Security group for the cluster control plane communication with worker nodes"
Value: "Fn::Join": [
",",
[
{
Ref: "ControlPlaneSecurityGroup"
},
],
]
}
VPC: {
Description: "The VPC Id"
Value: Ref: "VPC"
}
}
}

View File

@@ -1,89 +0,0 @@
package main
#CFNTemplate: eksNodeGroup: {
AWSTemplateFormatVersion: "2010-09-09"
Description: "Amazon EKS - Node Group"
Parameters: {
ClusterName: {
Type: "String"
Description: "The cluster name provided when the cluster was created. If it is incorrect, nodes will not be able to join the cluster."
}
NodeAutoScalingGroupDesiredCapacity: {
Type: "Number"
Default: 3
Description: "Desired capacity of Node Group ASG."
}
NodeAutoScalingGroupMaxSize: {
Type: "Number"
Default: 4
Description: "Maximum size of Node Group ASG. Set to at least 1 greater than NodeAutoScalingGroupDesiredCapacity."
}
NodeAutoScalingGroupMinSize: {
Type: "Number"
Default: 1
Description: "Minimum size of Node Group ASG."
}
NodeInstanceType: {
Type: "String"
Default: "t3.medium"
ConstraintDescription: "Must be a valid EC2 instance type"
Description: "EC2 instance type for the node instances"
}
Subnets: {
Type: "List<AWS::EC2::Subnet::Id>"
Description: "The subnets where workers can be created."
}
}
Resources: {
NodeInstanceRole: {
Type: "AWS::IAM::Role"
Properties: {
AssumeRolePolicyDocument: {
Version: "2012-10-17"
Statement: [
{
Effect: "Allow"
Principal: Service: [
"ec2.amazonaws.com",
]
Action: [
"sts:AssumeRole",
]
},
]
}
ManagedPolicyArns: [
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
]
Path: "/"
}
}
Nodegroup: {
Type: "AWS::EKS::Nodegroup"
Properties: {
ClusterName: Ref: "ClusterName"
NodeRole: "Fn::GetAtt": [
"NodeInstanceRole",
"Arn",
]
ScalingConfig: {
MaxSize: Ref: "NodeAutoScalingGroupMaxSize"
MinSize: Ref: "NodeAutoScalingGroupMinSize"
DesiredSize: Ref: "NodeAutoScalingGroupDesiredCapacity"
}
InstanceTypes: [{Ref: "NodeInstanceType"}]
AmiType: "AL2_x86_64"
Subnets: Ref: "Subnets"
}
}
}
Outputs: NodeInstanceRole: {
Description: "The node instance role"
Value: "Fn::GetAtt": [
"NodeInstanceRole",
"Arn",
]
}
}

View File

@@ -1,41 +0,0 @@
package main
import (
"encoding/json"
"dagger.io/aws"
"dagger.io/aws/cloudformation"
)
#Infrastructure: {
awsConfig: aws.#Config
namePrefix: *"" | string
workerNodeCapacity: *3 | >=1
workerNodeInstanceType: *"t3.medium" | string
clusterName: "\(namePrefix)eks-cluster"
eksControlPlane: cloudformation.#Stack & {
config: awsConfig
source: json.Marshal(#CFNTemplate.eksControlPlane)
stackName: "\(namePrefix)eks-controlplane"
neverUpdate: true
timeout: 30
parameters: ClusterName: clusterName
}
eksNodeGroup: cloudformation.#Stack & {
config: awsConfig
source: json.Marshal(#CFNTemplate.eksNodeGroup)
stackName: "\(namePrefix)eks-nodegroup"
neverUpdate: true
timeout: 30
parameters: {
ClusterName: clusterName
NodeAutoScalingGroupDesiredCapacity: 1
NodeAutoScalingGroupMaxSize: NodeAutoScalingGroupDesiredCapacity + 1
NodeInstanceType: workerNodeInstanceType
Subnets: eksControlPlane.outputs.SubnetIds
}
}
}

View File

@@ -1,29 +0,0 @@
package main
import (
"dagger.io/aws"
"dagger.io/aws/eks"
)
// AWS account: credentials and region
awsConfig: aws.#Config & {
region: *"us-east-2" | string
}
// Auto-provision an EKS cluster:
// - VPC, Nat Gateways, Subnets, Security Group
// - EKS Cluster
// - Instance Node Group: auto-scaling-group, ec2 instances, etc...
// base config can be changed (number of EC2 instances, types, etc...)
infra: #Infrastructure & {
"awsConfig": awsConfig
namePrefix: "dagger-example-"
workerNodeCapacity: int | *1
workerNodeInstanceType: "t3.small"
}
// Client configuration for kubectl
kubeconfig: eks.#KubeConfig & {
config: awsConfig
clusterName: infra.clusterName
}

View File

@@ -1,269 +0,0 @@
package main
import (
"strings"
"regexp"
"encoding/json"
"dagger.io/aws"
"dagger.io/aws/cloudformation"
)
#Notification: {
protocol: string
endpoint: string
}
#Canary: {
name: =~"^[0-9a-z_-]{1,21}$"
slug: strings.Join(regexp.FindAll("[0-9a-zA-Z]*", name, -1), "")
url: string
expectedHTTPCode: *200 | int
timeoutInSeconds: *30 | int
intervalExpression: *"1 minute" | string
}
#HTTPMonitor: {
// For sending notifications
notifications: [...#Notification]
// Canaries (tests)
canaries: [...#Canary]
// Name of the Cloudformation stack
cfnStackName: string
// AWS Config
awsConfig: aws.#Config
cfnStack: cloudformation.#Stack & {
config: awsConfig
source: json.Marshal(#cfnTemplate)
stackName: cfnStackName
onFailure: "DO_NOTHING"
}
// Function handler
#lambdaHandler: {
url: string
expectedHTTPCode: int
script: #"""
var synthetics = require('Synthetics');
const log = require('SyntheticsLogger');
const pageLoadBlueprint = async function () {
// INSERT URL here
const URL = "\#(url)";
let page = await synthetics.getPage();
const response = await page.goto(URL, {waitUntil: 'domcontentloaded', timeout: 30000});
//Wait for page to render.
//Increase or decrease wait time based on endpoint being monitored.
await page.waitFor(15000);
// This will take a screenshot that will be included in test output artifacts
await synthetics.takeScreenshot('loaded', 'loaded');
let pageTitle = await page.title();
log.info('Page title: ' + pageTitle);
if (response.status() !== \#(expectedHTTPCode)) {
throw "Failed to load page!";
}
};
exports.handler = async () => {
return await pageLoadBlueprint();
};
"""#
}
#cfnTemplate: {
AWSTemplateFormatVersion: "2010-09-09"
Description: "CloudWatch Synthetics website monitoring"
Resources: {
Topic: {
Type: "AWS::SNS::Topic"
Properties: Subscription: [
for e in notifications {
Endpoint: e.endpoint
Protocol: e.protocol
},
]
}
TopicPolicy: {
Type: "AWS::SNS::TopicPolicy"
Properties: {
PolicyDocument: {
Id: "Id1"
Version: "2012-10-17"
Statement: [
{
Sid: "Sid1"
Effect: "Allow"
Principal: AWS: "*"
Action: "sns:Publish"
Resource: Ref: "Topic"
Condition: StringEquals: "AWS:SourceOwner": Ref: "AWS::AccountId"
},
]
}
Topics: [
{
Ref: "Topic"
},
]
}
}
CanaryBucket: {
Type: "AWS::S3::Bucket"
Properties: {}
}
CanaryRole: {
Type: "AWS::IAM::Role"
Properties: {
AssumeRolePolicyDocument: {
Version: "2012-10-17"
Statement: [
{
Effect: "Allow"
Principal: Service: "lambda.amazonaws.com"
Action: "sts:AssumeRole"
},
]
}
Policies: [
{
PolicyName: "execution"
PolicyDocument: {
Version: "2012-10-17"
Statement: [
{
Effect: "Allow"
Action: "s3:ListAllMyBuckets"
Resource: "*"
},
{
Effect: "Allow"
Action: "s3:PutObject"
Resource: "Fn::Sub": "${CanaryBucket.Arn}/*"
},
{
Effect: "Allow"
Action: "s3:GetBucketLocation"
Resource: "Fn::GetAtt": [
"CanaryBucket",
"Arn",
]
},
{
Effect: "Allow"
Action: "cloudwatch:PutMetricData"
Resource: "*"
Condition: StringEquals: "cloudwatch:namespace": "CloudWatchSynthetics"
},
]
}
},
]
}
}
CanaryLogGroup: {
Type: "AWS::Logs::LogGroup"
Properties: {
LogGroupName: "Fn::Sub": "/aws/lambda/cwsyn-\(cfnStackName)"
RetentionInDays: 14
}
}
CanaryPolicy: {
Type: "AWS::IAM::Policy"
Properties: {
PolicyDocument: Statement: [
{
Effect: "Allow"
Action: [
"logs:CreateLogStream",
"logs:PutLogEvents",
]
Resource: "Fn::GetAtt": [
"CanaryLogGroup",
"Arn",
]
},
]
PolicyName: "logs"
Roles: [
{
Ref: "CanaryRole"
},
]
}
}
for canary in canaries {
"Canary\(canary.slug)": {
Type: "AWS::Synthetics::Canary"
Properties: {
ArtifactS3Location: "Fn::Sub": "s3://${CanaryBucket}"
Code: {
#handler: #lambdaHandler & {
url: canary.url
expectedHTTPCode: canary.expectedHTTPCode
}
Handler: "index.handler"
Script: #handler.script
}
ExecutionRoleArn: "Fn::GetAtt": [
"CanaryRole",
"Arn",
]
FailureRetentionPeriod: 30
Name: canary.name
RunConfig: TimeoutInSeconds: canary.timeoutInSeconds
RuntimeVersion: "syn-1.0"
Schedule: {
DurationInSeconds: "0"
Expression: "rate(\(canary.intervalExpression))"
}
StartCanaryAfterCreation: true
SuccessRetentionPeriod: 30
}
}
"SuccessPercentAlarm\(canary.slug)": {
DependsOn: "TopicPolicy"
Type: "AWS::CloudWatch::Alarm"
Properties: {
AlarmActions: [
{
Ref: "Topic"
},
]
AlarmDescription: "Canary is failing."
ComparisonOperator: "LessThanThreshold"
Dimensions: [
{
Name: "CanaryName"
Value: Ref: "Canary\(canary.slug)"
},
]
EvaluationPeriods: 1
MetricName: "SuccessPercent"
Namespace: "CloudWatchSynthetics"
OKActions: [
{
Ref: "Topic"
},
]
Period: 300
Statistic: "Minimum"
Threshold: 90
TreatMissingData: "notBreaching"
}
}
}
}
Outputs: {
for canary in canaries {
"\(canary.slug)Canary": Value: Ref: "Canary\(canary.slug)"
"\(canary.slug)URL": Value: canary.url
}
NumberCanaries: Value: len(canaries)
}
}
}

View File

@@ -1,34 +0,0 @@
package main
import (
"dagger.io/aws"
)
// AWS account: credentials and region
awsConfig: aws.#Config & {
region: *"us-east-1" | string @dagger(input)
}
// URL of the website to monitor
website: string | *"https://www.google.com" @dagger(input)
// Email address to notify of monitoring alerts
email: string @dagger(input)
// The monitoring service running on AWS Cloudwatch
monitor: #HTTPMonitor & {
notifications: [
#Notification & {
endpoint: email
protocol: "email"
},
]
canaries: [
#Canary & {
name: "default"
url: website
},
]
cfnStackName: "my-monitor"
"awsConfig": awsConfig
}

View File

@@ -1,2 +0,0 @@
# dagger state
state/**

View File

@@ -1,28 +0,0 @@
plan:
module: .
name: default
inputs:
www.account.name:
text: blocklayer
www.account.token:
secret: ENC[AES256_GCM,data:AGeCt/UJzWJ4UnzS/+t21GYz5wXPUoplYXTi1USXdi72wZemhzZncR2a+A==,iv:07DgGFL0oKgQsSZnp9s/Zz+6rdLShtHfStJZ9tHpsI4=,tag:jkY6TMrf7DaJMAc8/kJcAw==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBudkl4d2czaHZkSGt0SnVm
dm8xRTdaRE1WenpOczYxRFdMcDRkWDNmL1VzCjRHeWt3dnBITjlSNDZteWJhRmha
NWsrTThTZCt2eDJkRjgyOTFJeHdBMzgKLS0tIE9jOTFWMTRQei9iUkcvZDFrbmxn
ZnFaRWtEM241cDVCTStnK25BcDYyWlUKT2U8IFC21xMigjaTHHgkdUxIXKshxTmg
Q8254/qEWk+mJfsGxPf54d1RtqNqDX17kK/LeooSYAz7aqBjVLfG6w==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2021-06-15T16:43:58Z"
mac: ENC[AES256_GCM,data:ihY1s1/ngxPrL940WkhRiVdmA+zPqcx9bVqlsWyAPfM/E5thIylNn7qMBDyFG6tHJAQFdqpbwCbQuKn6MVs+d+IgsoBwIcN1y4xQn2LhC53ludL2tG4CYyZM5EKx43EE/whzTuyNrPl9ykfx/u+KeQD5CNbaB9PrDjrtc+rNrPQ=,iv:7T2NHDXWrPJAsOVLPVhqFYnYcZjoBtE5x8R4CDUD+yM=,tag:yaXlTdSH7uvQiimKVPrvFg==,type:str]
pgp: []
encrypted_suffix: secret
version: 3.7.1

View File

@@ -1 +0,0 @@
../../../../stdlib

View File

@@ -1,27 +0,0 @@
package main
import (
"dagger.io/netlify"
"dagger.io/js/yarn"
"dagger.io/git"
)
// Source code of the sample application
repo: git.#Repository & {
remote: "https://github.com/kabirbaidhya/react-todo-app.git"
ref: "624041b17bd62292143f99bce474a0e3c2d2dd61"
}
// Host the application with Netlify
www: netlify.#Site & {
// Site name can be overridden
name: string | *"dagger-examples-react" @dagger(input)
// Deploy the output of yarn build
// (Netlify build feature is not used, to avoid extra cost).
contents: app.build
}
app: yarn.#Package & {
source: repo
}

View File

@@ -1,29 +0,0 @@
package main
import (
"dagger.io/aws"
"dagger.io/aws/s3"
"dagger.io/dagger"
)
// AWS Config for credentials and default region
awsConfig: aws.#Config & {
region: *"us-east-1" | string @dagger(input)
}
// Name of the S3 bucket to use
bucket: *"dagger-io-examples" | string @dagger(input)
// Source code to deploy
source: dagger.#Artifact @dagger(input)
// Deployed URL
url: "\(deploy.url)index.html" @dagger(output)
deploy: s3.#Object & {
always: true
config: awsConfig
"source": source
contentType: "text/html"
target: "s3://\(bucket)/"
}

View File

@@ -1,9 +0,0 @@
<html>
</head>
<title>My Simple Website</title>
</head>
<h1>Shopping list</h1>
<li>Salad</li>
<li>Eggs</li>
<li>Potatoes</li>
</html>