Merge pull request #188 from dagger/example-aws-eks

Example for deploying containers to AWS EKS
This commit is contained in:
Sam Alba 2021-03-18 16:37:29 -07:00 committed by GitHub
commit eb6fe17df3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 771 additions and 54 deletions

View File

@ -0,0 +1,12 @@
# Kubernetes on AWS (EKS)
This example creates a new EKS cluster and outputs its corresponding kubeconfig
## How to run
```sh
dagger compute . \
--input-string awsConfig.accessKey="MY_AWS_ACCESS_KEY" \
--input-string awsConfig.secretKey="MY_AWS_SECRET_KEY" \
| jq -j '.kubeconfig.kubeconfig' > kubeconfig
```

View File

@ -0,0 +1,447 @@
package main
#CFNTemplate: eksControlPlane: {
AWSTemplateFormatVersion: "2010-09-09"
Description: "Amazon EKS Sample VPC - Private and Public subnets"
Parameters: {
VpcBlock: {
Type: "String"
Default: "192.168.0.0/16"
Description: "The CIDR range for the VPC. This should be a valid private (RFC 1918) CIDR range."
}
PublicSubnet01Block: {
Type: "String"
Default: "192.168.0.0/18"
Description: "CidrBlock for public subnet 01 within the VPC"
}
PublicSubnet02Block: {
Type: "String"
Default: "192.168.64.0/18"
Description: "CidrBlock for public subnet 02 within the VPC"
}
PrivateSubnet01Block: {
Type: "String"
Default: "192.168.128.0/18"
Description: "CidrBlock for private subnet 01 within the VPC"
}
PrivateSubnet02Block: {
Type: "String"
Default: "192.168.192.0/18"
Description: "CidrBlock for private subnet 02 within the VPC"
}
ClusterName: {
Type: "String"
Description: "The EKS cluster name"
}
}
Metadata: "AWS::CloudFormation::Interface": ParameterGroups: [
{
Label: default: "Worker Network Configuration"
Parameters: [
"VpcBlock",
"PublicSubnet01Block",
"PublicSubnet02Block",
"PrivateSubnet01Block",
"PrivateSubnet02Block",
]
},
]
Resources: {
VPC: {
Type: "AWS::EC2::VPC"
Properties: {
CidrBlock: Ref: "VpcBlock"
EnableDnsSupport: true
EnableDnsHostnames: true
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-VPC"
},
]
}
}
InternetGateway: Type: "AWS::EC2::InternetGateway"
VPCGatewayAttachment: {
Type: "AWS::EC2::VPCGatewayAttachment"
Properties: {
InternetGatewayId: Ref: "InternetGateway"
VpcId: Ref: "VPC"
}
}
PublicRouteTable: {
Type: "AWS::EC2::RouteTable"
Properties: {
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Public Subnets"
},
{
Key: "Network"
Value: "Public"
},
]
}
}
PrivateRouteTable01: {
Type: "AWS::EC2::RouteTable"
Properties: {
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Private Subnet AZ1"
},
{
Key: "Network"
Value: "Private01"
},
]
}
}
PrivateRouteTable02: {
Type: "AWS::EC2::RouteTable"
Properties: {
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Private Subnet AZ2"
},
{
Key: "Network"
Value: "Private02"
},
]
}
}
PublicRoute: {
DependsOn: "VPCGatewayAttachment"
Type: "AWS::EC2::Route"
Properties: {
RouteTableId: Ref: "PublicRouteTable"
DestinationCidrBlock: "0.0.0.0/0"
GatewayId: Ref: "InternetGateway"
}
}
PrivateRoute01: {
DependsOn: [
"VPCGatewayAttachment",
"NatGateway01",
]
Type: "AWS::EC2::Route"
Properties: {
RouteTableId: Ref: "PrivateRouteTable01"
DestinationCidrBlock: "0.0.0.0/0"
NatGatewayId: Ref: "NatGateway01"
}
}
PrivateRoute02: {
DependsOn: [
"VPCGatewayAttachment",
"NatGateway02",
]
Type: "AWS::EC2::Route"
Properties: {
RouteTableId: Ref: "PrivateRouteTable02"
DestinationCidrBlock: "0.0.0.0/0"
NatGatewayId: Ref: "NatGateway02"
}
}
NatGateway01: {
DependsOn: [
"NatGatewayEIP1",
"PublicSubnet01",
"VPCGatewayAttachment",
]
Type: "AWS::EC2::NatGateway"
Properties: {
AllocationId: "Fn::GetAtt": [
"NatGatewayEIP1",
"AllocationId",
]
SubnetId: Ref: "PublicSubnet01"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-NatGatewayAZ1"
},
]
}
}
NatGateway02: {
DependsOn: [
"NatGatewayEIP2",
"PublicSubnet02",
"VPCGatewayAttachment",
]
Type: "AWS::EC2::NatGateway"
Properties: {
AllocationId: "Fn::GetAtt": [
"NatGatewayEIP2",
"AllocationId",
]
SubnetId: Ref: "PublicSubnet02"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-NatGatewayAZ2"
},
]
}
}
NatGatewayEIP1: {
DependsOn: [
"VPCGatewayAttachment",
]
Type: "AWS::EC2::EIP"
Properties: Domain: "vpc"
}
NatGatewayEIP2: {
DependsOn: [
"VPCGatewayAttachment",
]
Type: "AWS::EC2::EIP"
Properties: Domain: "vpc"
}
PublicSubnet01: {
Type: "AWS::EC2::Subnet"
Metadata: Comment: "Subnet 01"
Properties: {
MapPublicIpOnLaunch: true
AvailabilityZone: "Fn::Select": [
"0",
{
"Fn::GetAZs": Ref: "AWS::Region"
},
]
CidrBlock: Ref: "PublicSubnet01Block"
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet01"
},
{
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
Value: "shared"
},
]
}
}
PublicSubnet02: {
Type: "AWS::EC2::Subnet"
Metadata: Comment: "Subnet 02"
Properties: {
MapPublicIpOnLaunch: true
AvailabilityZone: "Fn::Select": [
"1",
{
"Fn::GetAZs": Ref: "AWS::Region"
},
]
CidrBlock: Ref: "PublicSubnet02Block"
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet02"
},
{
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
Value: "shared"
},
]
}
}
PrivateSubnet01: {
Type: "AWS::EC2::Subnet"
Metadata: Comment: "Subnet 03"
Properties: {
AvailabilityZone: "Fn::Select": [
"0",
{
"Fn::GetAZs": Ref: "AWS::Region"
},
]
CidrBlock: Ref: "PrivateSubnet01Block"
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet01"
},
{
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
Value: "shared"
},
]
}
}
PrivateSubnet02: {
Type: "AWS::EC2::Subnet"
Metadata: Comment: "Private Subnet 02"
Properties: {
AvailabilityZone: "Fn::Select": [
"1",
{
"Fn::GetAZs": Ref: "AWS::Region"
},
]
CidrBlock: Ref: "PrivateSubnet02Block"
VpcId: Ref: "VPC"
Tags: [
{
Key: "Name"
Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet02"
},
{
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
Value: "shared"
},
]
}
}
PublicSubnet01RouteTableAssociation: {
Type: "AWS::EC2::SubnetRouteTableAssociation"
Properties: {
SubnetId: Ref: "PublicSubnet01"
RouteTableId: Ref: "PublicRouteTable"
}
}
PublicSubnet02RouteTableAssociation: {
Type: "AWS::EC2::SubnetRouteTableAssociation"
Properties: {
SubnetId: Ref: "PublicSubnet02"
RouteTableId: Ref: "PublicRouteTable"
}
}
PrivateSubnet01RouteTableAssociation: {
Type: "AWS::EC2::SubnetRouteTableAssociation"
Properties: {
SubnetId: Ref: "PrivateSubnet01"
RouteTableId: Ref: "PrivateRouteTable01"
}
}
PrivateSubnet02RouteTableAssociation: {
Type: "AWS::EC2::SubnetRouteTableAssociation"
Properties: {
SubnetId: Ref: "PrivateSubnet02"
RouteTableId: Ref: "PrivateRouteTable02"
}
}
ControlPlaneSecurityGroup: {
Type: "AWS::EC2::SecurityGroup"
Properties: {
GroupDescription: "Cluster communication with worker nodes"
VpcId: Ref: "VPC"
}
}
EKSIAMRole: {
Type: "AWS::IAM::Role"
Properties: {
AssumeRolePolicyDocument: Statement: [
{
Effect: "Allow"
Principal: Service: [
"eks.amazonaws.com",
]
Action: [
"sts:AssumeRole",
]
},
]
ManagedPolicyArns: [
"arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
"arn:aws:iam::aws:policy/AmazonEKSServicePolicy",
]
}
}
EKSCluster: {
Type: "AWS::EKS::Cluster"
Properties: {
Name: Ref: "ClusterName"
Version: "1.19"
RoleArn: "Fn::GetAtt": ["EKSIAMRole", "Arn"]
ResourcesVpcConfig: {
SecurityGroupIds: [{Ref: "ControlPlaneSecurityGroup"}]
SubnetIds: [
{Ref: "PublicSubnet01"},
{Ref: "PublicSubnet02"},
{Ref: "PrivateSubnet01"},
{Ref: "PrivateSubnet02"},
]
}
}
DependsOn: ["EKSIAMRole", "PublicSubnet01", "PublicSubnet02", "PrivateSubnet01", "PrivateSubnet02", "ControlPlaneSecurityGroup"]
}
}
Outputs: {
SubnetIds: {
Description: "Subnets IDs in the VPC"
Value: "Fn::Join": [
",",
[
{
Ref: "PublicSubnet01"
},
{
Ref: "PublicSubnet02"
},
{
Ref: "PrivateSubnet01"
},
{
Ref: "PrivateSubnet02"
},
],
]
}
PublicSubnets: {
Description: "List of the public subnets"
Value: "Fn::Join": [
",",
[
{
Ref: "PublicSubnet01"
},
{
Ref: "PublicSubnet02"
},
],
]
}
PrivateSubnets: {
Description: "List of the private subnets"
Value: "Fn::Join": [
",",
[
{
Ref: "PrivateSubnet01"
},
{
Ref: "PrivateSubnet02"
},
],
]
}
DefaultSecurityGroup: {
Description: "Security group for the cluster control plane communication with worker nodes"
Value: "Fn::Join": [
",",
[
{
Ref: "ControlPlaneSecurityGroup"
},
],
]
}
VPC: {
Description: "The VPC Id"
Value: Ref: "VPC"
}
}
}

View File

@ -0,0 +1,89 @@
package main
#CFNTemplate: eksNodeGroup: {
AWSTemplateFormatVersion: "2010-09-09"
Description: "Amazon EKS - Node Group"
Parameters: {
ClusterName: {
Type: "String"
Description: "The cluster name provided when the cluster was created. If it is incorrect, nodes will not be able to join the cluster."
}
NodeAutoScalingGroupDesiredCapacity: {
Type: "Number"
Default: 3
Description: "Desired capacity of Node Group ASG."
}
NodeAutoScalingGroupMaxSize: {
Type: "Number"
Default: 4
Description: "Maximum size of Node Group ASG. Set to at least 1 greater than NodeAutoScalingGroupDesiredCapacity."
}
NodeAutoScalingGroupMinSize: {
Type: "Number"
Default: 1
Description: "Minimum size of Node Group ASG."
}
NodeInstanceType: {
Type: "String"
Default: "t3.medium"
ConstraintDescription: "Must be a valid EC2 instance type"
Description: "EC2 instance type for the node instances"
}
Subnets: {
Type: "List<AWS::EC2::Subnet::Id>"
Description: "The subnets where workers can be created."
}
}
Resources: {
NodeInstanceRole: {
Type: "AWS::IAM::Role"
Properties: {
AssumeRolePolicyDocument: {
Version: "2012-10-17"
Statement: [
{
Effect: "Allow"
Principal: Service: [
"ec2.amazonaws.com",
]
Action: [
"sts:AssumeRole",
]
},
]
}
ManagedPolicyArns: [
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
]
Path: "/"
}
}
Nodegroup: {
Type: "AWS::EKS::Nodegroup"
Properties: {
ClusterName: Ref: "ClusterName"
NodeRole: "Fn::GetAtt": [
"NodeInstanceRole",
"Arn",
]
ScalingConfig: {
MaxSize: Ref: "NodeAutoScalingGroupMaxSize"
MinSize: Ref: "NodeAutoScalingGroupMinSize"
DesiredSize: Ref: "NodeAutoScalingGroupDesiredCapacity"
}
InstanceTypes: [{Ref: "NodeInstanceType"}]
AmiType: "AL2_x86_64"
Subnets: Ref: "Subnets"
}
}
}
Outputs: NodeInstanceRole: {
Description: "The node instance role"
Value: "Fn::GetAtt": [
"NodeInstanceRole",
"Arn",
]
}
}

View File

@ -0,0 +1,41 @@
package main
import (
"encoding/json"
"dagger.io/aws"
"dagger.io/aws/cloudformation"
)
#Infrastructure: {
awsConfig: aws.#Config
namePrefix: *"" | string
workerNodeCapacity: *3 | >=1
workerNodeInstanceType: *"t3.medium" | string
clusterName: "\(namePrefix)eks-cluster"
eksControlPlane: cloudformation.#Stack & {
config: awsConfig
source: json.Marshal(#CFNTemplate.eksControlPlane)
stackName: "\(namePrefix)eks-controlplane"
neverUpdate: true
timeout: 30
parameters: ClusterName: clusterName
}
eksNodeGroup: cloudformation.#Stack & {
config: awsConfig
source: json.Marshal(#CFNTemplate.eksNodeGroup)
stackName: "\(namePrefix)eks-nodegroup"
neverUpdate: true
timeout: 30
parameters: {
ClusterName: clusterName
NodeAutoScalingGroupDesiredCapacity: 1
NodeAutoScalingGroupMaxSize: NodeAutoScalingGroupDesiredCapacity + 1
NodeInstanceType: workerNodeInstanceType
Subnets: eksControlPlane.outputs.SubnetIds
}
}
}

31
examples/aws-eks/main.cue Normal file
View File

@ -0,0 +1,31 @@
package main
import (
"dagger.io/aws"
"dagger.io/aws/eks"
)
// Fill using:
// --input-string awsConfig.accessKey=XXX
// --input-string awsConfig.secretKey=XXX
awsConfig: aws.#Config & {
region: *"us-east-2" | string
}
// Auto-provision an EKS cluster:
// - VPC, Nat Gateways, Subnets, Security Group
// - EKS Cluster
// - Instance Node Group: auto-scaling-group, ec2 instances, etc...
// base config can be changed (number of EC2 instances, types, etc...)
infra: #Infrastructure & {
"awsConfig": awsConfig
namePrefix: "dagger-example-"
// Cluster size is 1 for the example purpose
workerNodeCapacity: 1
workerNodeInstanceType: "t3.small"
}
kubeconfig: eks.#KubeConfig & {
config: awsConfig
clusterName: infra.clusterName
}

View File

@ -1,7 +1,12 @@
package aws package aws
import "dagger.io/dagger" import (
"dagger.io/dagger"
"dagger.io/llb"
"dagger.io/alpine"
)
// Base AWS Config
#Config: { #Config: {
// AWS region // AWS region
region: string region: string
@ -10,3 +15,17 @@ import "dagger.io/dagger"
// AWS secret key // AWS secret key
secretKey: dagger.#Secret secretKey: dagger.#Secret
} }
// Re-usable aws-cli component
#CLI: {
#compute: [
llb.#Load & {
from: alpine.#Image & {
package: bash: "=5.1.0-r0"
package: jq: "=1.6-r1"
package: curl: "=7.74.0-r1"
package: "aws-cli": "=1.18.177-r0"
}
},
]
}

View File

@ -3,9 +3,8 @@ package cloudformation
import ( import (
"encoding/json" "encoding/json"
"dagger.io/alpine"
"dagger.io/aws"
"dagger.io/llb" "dagger.io/llb"
"dagger.io/aws"
) )
// AWS CloudFormation Stack // AWS CloudFormation Stack
@ -45,16 +44,11 @@ import (
} }
} }
outputs: { outputs: [string]: string
[string]: string
#compute: [ outputs: #compute: [
llb.#Load & { llb.#Load & {
from: alpine.#Image & { from: aws.#CLI
package: bash: "=5.1.0-r0"
package: jq: "=1.6-r1"
package: "aws-cli": "=1.18.177-r0"
}
}, },
llb.#Mkdir & { llb.#Mkdir & {
path: "/src" path: "/src"
@ -97,5 +91,4 @@ import (
format: "json" format: "json"
}, },
] ]
}
} }

26
stdlib/aws/eks/code.cue Normal file
View File

@ -0,0 +1,26 @@
package eks
#Code: #"""
[ -e /cache/bin/kubectl ] || {
curl -sfL https://dl.k8s.io/v1.19.9/bin/linux/amd64/kubectl -o /cache/bin/kubectl && chmod +x /cache/bin/kubectl
}
export KUBECONFIG=/kubeconfig
export PATH="$PATH:/cache/bin"
# Generate a kube configuration
aws eks update-kubeconfig --name "$EKS_CLUSTER"
# Figure out the kubernetes username
CONTEXT="$(kubectl config current-context)"
USER="$(kubectl config view -o json | \
jq -r ".contexts[] | select(.name==\"$CONTEXT\") | .context.user")"
# Grab a kubernetes access token
ACCESS_TOKEN="$(aws eks get-token --cluster-name "$EKS_CLUSTER" | \
jq -r .status.token)"
# Remove the user config and replace it with the token
kubectl config unset "users.${USER}"
kubectl config set-credentials "$USER" --token "$ACCESS_TOKEN"
"""#

59
stdlib/aws/eks/eks.cue Normal file
View File

@ -0,0 +1,59 @@
package eks
import (
"dagger.io/llb"
"dagger.io/aws"
)
// KubeConfig config outputs a valid kube-auth-config for kubectl client
#KubeConfig: {
// AWS Config
config: aws.#Config
// EKS cluster name
clusterName: string
// kubeconfig is the generated kube configuration file
kubeconfig: {
dagger.#Secret
#compute: [
llb.#Load & {
from: aws.#CLI
},
llb.#WriteFile & {
dest: "/entrypoint.sh"
content: #Code
},
llb.#Exec & {
always: true
args: [
"/bin/bash",
"--noprofile",
"--norc",
"-eo",
"pipefail",
"/entrypoint.sh",
]
env: {
AWS_CONFIG_FILE: "/cache/aws/config"
AWS_ACCESS_KEY_ID: config.accessKey
AWS_SECRET_ACCESS_KEY: config.secretKey
AWS_DEFAULT_REGION: config.region
AWS_REGION: config.region
AWS_DEFAULT_OUTPUT: "json"
AWS_PAGER: ""
EKS_CLUSTER: clusterName
}
mount: {
"/cache/aws": "cache"
"/cache/bin": "cache"
}
},
llb.#Export & {
source: "/kubeconfig"
format: "string"
},
]
}
}