Simplified infra args to be more intuitive + moved EKS worker node group to managed nodes instead of plain ec2 instances
Signed-off-by: Sam Alba <sam.alba@gmail.com>
This commit is contained in:
parent
d282180ae6
commit
f7457e2cba
@ -3,5 +3,7 @@
|
||||
## How to run
|
||||
|
||||
```sh
|
||||
|
||||
dagger compute . \
|
||||
--input-string awsConfig.accessKey="MY_AWS_ACCESS_KEY" \
|
||||
--input-string awsConfig.secretKey="MY_AWS_SECRET_KEY"
|
||||
```
|
||||
|
@ -33,10 +33,6 @@ package main
|
||||
Type: "String"
|
||||
Description: "The EKS cluster name"
|
||||
}
|
||||
// EKSIAMRoleName: {
|
||||
// Type: "String"
|
||||
// Description: "The name of the IAM role for the EKS service to assume"
|
||||
// }
|
||||
}
|
||||
Metadata: "AWS::CloudFormation::Interface": ParameterGroups: [
|
||||
{
|
||||
@ -214,6 +210,7 @@ package main
|
||||
Type: "AWS::EC2::Subnet"
|
||||
Metadata: Comment: "Subnet 01"
|
||||
Properties: {
|
||||
MapPublicIpOnLaunch: true
|
||||
AvailabilityZone: "Fn::Select": [
|
||||
"0",
|
||||
{
|
||||
@ -227,6 +224,10 @@ package main
|
||||
Key: "Name"
|
||||
Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet01"
|
||||
},
|
||||
{
|
||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
||||
Value: "shared"
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -234,6 +235,7 @@ package main
|
||||
Type: "AWS::EC2::Subnet"
|
||||
Metadata: Comment: "Subnet 02"
|
||||
Properties: {
|
||||
MapPublicIpOnLaunch: true
|
||||
AvailabilityZone: "Fn::Select": [
|
||||
"1",
|
||||
{
|
||||
@ -247,6 +249,10 @@ package main
|
||||
Key: "Name"
|
||||
Value: "Fn::Sub": "${AWS::StackName}-PublicSubnet02"
|
||||
},
|
||||
{
|
||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
||||
Value: "shared"
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -267,6 +273,10 @@ package main
|
||||
Key: "Name"
|
||||
Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet01"
|
||||
},
|
||||
{
|
||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
||||
Value: "shared"
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -287,6 +297,10 @@ package main
|
||||
Key: "Name"
|
||||
Value: "Fn::Sub": "${AWS::StackName}-PrivateSubnet02"
|
||||
},
|
||||
{
|
||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
||||
Value: "shared"
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -340,7 +354,6 @@ package main
|
||||
|
||||
},
|
||||
]
|
||||
// RoleName: Ref: "EKSIAMRoleName"
|
||||
ManagedPolicyArns: [
|
||||
"arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
|
||||
"arn:aws:iam::aws:policy/AmazonEKSServicePolicy",
|
||||
@ -351,6 +364,7 @@ package main
|
||||
Type: "AWS::EKS::Cluster"
|
||||
Properties: {
|
||||
Name: Ref: "ClusterName"
|
||||
Version: "1.19"
|
||||
RoleArn: "Fn::GetAtt": ["EKSIAMRole", "Arn"]
|
||||
ResourcesVpcConfig: {
|
||||
SecurityGroupIds: [{Ref: "ControlPlaneSecurityGroup"}]
|
||||
|
@ -3,55 +3,11 @@ package main
|
||||
#CFNTemplate: eksNodeGroup: {
|
||||
AWSTemplateFormatVersion: "2010-09-09"
|
||||
Description: "Amazon EKS - Node Group"
|
||||
Metadata: "AWS::CloudFormation::Interface": ParameterGroups: [
|
||||
{
|
||||
Label: default: "EKS Cluster"
|
||||
Parameters: [
|
||||
"ClusterName",
|
||||
"ClusterControlPlaneSecurityGroup",
|
||||
]
|
||||
},
|
||||
{
|
||||
Label: default: "Worker Node Configuration"
|
||||
Parameters: [
|
||||
"NodeGroupName",
|
||||
"NodeAutoScalingGroupMinSize",
|
||||
"NodeAutoScalingGroupDesiredCapacity",
|
||||
"NodeAutoScalingGroupMaxSize",
|
||||
"NodeInstanceType",
|
||||
"NodeImageIdSSMParam",
|
||||
"NodeImageId",
|
||||
"NodeVolumeSize",
|
||||
// "KeyName",
|
||||
"BootstrapArguments",
|
||||
]
|
||||
},
|
||||
{
|
||||
Label: default: "Worker Network Configuration"
|
||||
Parameters: [
|
||||
"VpcId",
|
||||
"Subnets",
|
||||
]
|
||||
},
|
||||
]
|
||||
Parameters: {
|
||||
BootstrapArguments: {
|
||||
Type: "String"
|
||||
Default: ""
|
||||
Description: "Arguments to pass to the bootstrap script. See files/bootstrap.sh in https://github.com/awslabs/amazon-eks-ami"
|
||||
}
|
||||
ClusterControlPlaneSecurityGroup: {
|
||||
Type: "AWS::EC2::SecurityGroup::Id"
|
||||
Description: "The security group of the cluster control plane."
|
||||
}
|
||||
ClusterName: {
|
||||
Type: "String"
|
||||
Description: "The cluster name provided when the cluster was created. If it is incorrect, nodes will not be able to join the cluster."
|
||||
}
|
||||
// KeyName: {
|
||||
// Type: "AWS::EC2::KeyPair::KeyName"
|
||||
// Description: "The EC2 Key Pair to allow SSH access to the instances"
|
||||
// }
|
||||
NodeAutoScalingGroupDesiredCapacity: {
|
||||
Type: "Number"
|
||||
Default: 3
|
||||
@ -67,50 +23,17 @@ package main
|
||||
Default: 1
|
||||
Description: "Minimum size of Node Group ASG."
|
||||
}
|
||||
NodeGroupName: {
|
||||
Type: "String"
|
||||
Description: "Unique identifier for the Node Group."
|
||||
}
|
||||
NodeImageId: {
|
||||
Type: "String"
|
||||
Default: ""
|
||||
Description: "(Optional) Specify your own custom image ID. This value overrides any AWS Systems Manager Parameter Store value specified above."
|
||||
}
|
||||
NodeImageIdSSMParam: {
|
||||
Type: "AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>"
|
||||
Default: "/aws/service/eks/optimized-ami/1.19/amazon-linux-2/recommended/image_id"
|
||||
Description: "AWS Systems Manager Parameter Store parameter of the AMI ID for the worker node instances."
|
||||
}
|
||||
NodeInstanceType: {
|
||||
Type: "String"
|
||||
Default: "t3.medium"
|
||||
ConstraintDescription: "Must be a valid EC2 instance type"
|
||||
Description: "EC2 instance type for the node instances"
|
||||
}
|
||||
NodeVolumeSize: {
|
||||
Type: "Number"
|
||||
Default: 20
|
||||
Description: "Node volume size"
|
||||
}
|
||||
Subnets: {
|
||||
Type: "List<AWS::EC2::Subnet::Id>"
|
||||
Description: "The subnets where workers can be created."
|
||||
}
|
||||
VpcId: {
|
||||
Type: "AWS::EC2::VPC::Id"
|
||||
Description: "The VPC of the worker instances"
|
||||
}
|
||||
}
|
||||
Conditions: HasNodeImageId: "Fn::Not": [
|
||||
{
|
||||
"Fn::Equals": [
|
||||
{
|
||||
Ref: "NodeImageId"
|
||||
},
|
||||
"",
|
||||
]
|
||||
},
|
||||
]
|
||||
Resources: {
|
||||
NodeInstanceRole: {
|
||||
Type: "AWS::IAM::Role"
|
||||
@ -137,175 +60,30 @@ package main
|
||||
Path: "/"
|
||||
}
|
||||
}
|
||||
NodeInstanceProfile: {
|
||||
Type: "AWS::IAM::InstanceProfile"
|
||||
Nodegroup: {
|
||||
Type: "AWS::EKS::Nodegroup"
|
||||
Properties: {
|
||||
Path: "/"
|
||||
Roles: [
|
||||
{
|
||||
Ref: "NodeInstanceRole"
|
||||
},
|
||||
ClusterName: Ref: "ClusterName"
|
||||
NodeRole: "Fn::GetAtt": [
|
||||
"NodeInstanceRole",
|
||||
"Arn",
|
||||
]
|
||||
}
|
||||
}
|
||||
NodeSecurityGroup: {
|
||||
Type: "AWS::EC2::SecurityGroup"
|
||||
Properties: {
|
||||
GroupDescription: "Security group for all nodes in the cluster"
|
||||
Tags: [
|
||||
{
|
||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
||||
Value: "owned"
|
||||
},
|
||||
]
|
||||
VpcId: Ref: "VpcId"
|
||||
}
|
||||
}
|
||||
NodeSecurityGroupIngress: {
|
||||
Type: "AWS::EC2::SecurityGroupIngress"
|
||||
DependsOn: "NodeSecurityGroup"
|
||||
Properties: {
|
||||
Description: "Allow node to communicate with each other"
|
||||
FromPort: 0
|
||||
GroupId: Ref: "NodeSecurityGroup"
|
||||
IpProtocol: "-1"
|
||||
SourceSecurityGroupId: Ref: "NodeSecurityGroup"
|
||||
ToPort: 65535
|
||||
}
|
||||
}
|
||||
ClusterControlPlaneSecurityGroupIngress: {
|
||||
Type: "AWS::EC2::SecurityGroupIngress"
|
||||
DependsOn: "NodeSecurityGroup"
|
||||
Properties: {
|
||||
Description: "Allow pods to communicate with the cluster API Server"
|
||||
FromPort: 443
|
||||
GroupId: Ref: "ClusterControlPlaneSecurityGroup"
|
||||
IpProtocol: "tcp"
|
||||
SourceSecurityGroupId: Ref: "NodeSecurityGroup"
|
||||
ToPort: 443
|
||||
}
|
||||
}
|
||||
ControlPlaneEgressToNodeSecurityGroup: {
|
||||
Type: "AWS::EC2::SecurityGroupEgress"
|
||||
DependsOn: "NodeSecurityGroup"
|
||||
Properties: {
|
||||
Description: "Allow the cluster control plane to communicate with worker Kubelet and pods"
|
||||
DestinationSecurityGroupId: Ref: "NodeSecurityGroup"
|
||||
FromPort: 1025
|
||||
GroupId: Ref: "ClusterControlPlaneSecurityGroup"
|
||||
IpProtocol: "tcp"
|
||||
ToPort: 65535
|
||||
}
|
||||
}
|
||||
ControlPlaneEgressToNodeSecurityGroupOn443: {
|
||||
Type: "AWS::EC2::SecurityGroupEgress"
|
||||
DependsOn: "NodeSecurityGroup"
|
||||
Properties: {
|
||||
Description: "Allow the cluster control plane to communicate with pods running extension API servers on port 443"
|
||||
DestinationSecurityGroupId: Ref: "NodeSecurityGroup"
|
||||
FromPort: 443
|
||||
GroupId: Ref: "ClusterControlPlaneSecurityGroup"
|
||||
IpProtocol: "tcp"
|
||||
ToPort: 443
|
||||
}
|
||||
}
|
||||
NodeSecurityGroupFromControlPlaneIngress: {
|
||||
Type: "AWS::EC2::SecurityGroupIngress"
|
||||
DependsOn: "NodeSecurityGroup"
|
||||
Properties: {
|
||||
Description: "Allow worker Kubelets and pods to receive communication from the cluster control plane"
|
||||
FromPort: 1025
|
||||
GroupId: Ref: "NodeSecurityGroup"
|
||||
IpProtocol: "tcp"
|
||||
SourceSecurityGroupId: Ref: "ClusterControlPlaneSecurityGroup"
|
||||
ToPort: 65535
|
||||
}
|
||||
}
|
||||
NodeSecurityGroupFromControlPlaneOn443Ingress: {
|
||||
Type: "AWS::EC2::SecurityGroupIngress"
|
||||
DependsOn: "NodeSecurityGroup"
|
||||
Properties: {
|
||||
Description: "Allow pods running extension API servers on port 443 to receive communication from cluster control plane"
|
||||
FromPort: 443
|
||||
GroupId: Ref: "NodeSecurityGroup"
|
||||
IpProtocol: "tcp"
|
||||
SourceSecurityGroupId: Ref: "ClusterControlPlaneSecurityGroup"
|
||||
ToPort: 443
|
||||
}
|
||||
}
|
||||
NodeLaunchConfig: {
|
||||
Type: "AWS::AutoScaling::LaunchConfiguration"
|
||||
Properties: {
|
||||
AssociatePublicIpAddress: "true"
|
||||
BlockDeviceMappings: [
|
||||
{
|
||||
DeviceName: "/dev/xvda"
|
||||
Ebs: {
|
||||
DeleteOnTermination: true
|
||||
VolumeSize: Ref: "NodeVolumeSize"
|
||||
VolumeType: "gp2"
|
||||
}
|
||||
},
|
||||
]
|
||||
IamInstanceProfile: Ref: "NodeInstanceProfile"
|
||||
ImageId: "Fn::If": [
|
||||
"HasNodeImageId",
|
||||
{
|
||||
Ref: "NodeImageId"
|
||||
},
|
||||
{
|
||||
Ref: "NodeImageIdSSMParam"
|
||||
},
|
||||
]
|
||||
InstanceType: Ref: "NodeInstanceType"
|
||||
// KeyName: Ref: "KeyName"
|
||||
SecurityGroups: [
|
||||
{
|
||||
Ref: "NodeSecurityGroup"
|
||||
},
|
||||
]
|
||||
UserData: "Fn::Base64": "Fn::Sub": "#!/bin/bash\nset -o xtrace\n/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}\n/opt/aws/bin/cfn-signal --exit-code $? \\\n --stack ${AWS::StackName} \\\n --resource NodeGroup \\\n --region ${AWS::Region}\n"
|
||||
}
|
||||
}
|
||||
NodeGroup: {
|
||||
Type: "AWS::AutoScaling::AutoScalingGroup"
|
||||
Properties: {
|
||||
DesiredCapacity: Ref: "NodeAutoScalingGroupDesiredCapacity"
|
||||
LaunchConfigurationName: Ref: "NodeLaunchConfig"
|
||||
MaxSize: Ref: "NodeAutoScalingGroupMaxSize"
|
||||
MinSize: Ref: "NodeAutoScalingGroupMinSize"
|
||||
Tags: [
|
||||
{
|
||||
Key: "Name"
|
||||
PropagateAtLaunch: "true"
|
||||
Value: "Fn::Sub": "${ClusterName}-${NodeGroupName}-Node"
|
||||
},
|
||||
{
|
||||
Key: "Fn::Sub": "kubernetes.io/cluster/${ClusterName}"
|
||||
PropagateAtLaunch: "true"
|
||||
Value: "owned"
|
||||
},
|
||||
]
|
||||
VPCZoneIdentifier: Ref: "Subnets"
|
||||
}
|
||||
UpdatePolicy: AutoScalingRollingUpdate: {
|
||||
MaxBatchSize: "1"
|
||||
MinInstancesInService: Ref: "NodeAutoScalingGroupDesiredCapacity"
|
||||
PauseTime: "PT5M"
|
||||
ScalingConfig: {
|
||||
MaxSize: Ref: "NodeAutoScalingGroupMaxSize"
|
||||
MinSize: Ref: "NodeAutoScalingGroupMinSize"
|
||||
DesiredSize: Ref: "NodeAutoScalingGroupDesiredCapacity"
|
||||
}
|
||||
InstanceTypes: [{Ref: "NodeInstanceType"}]
|
||||
AmiType: "AL2_x86_64"
|
||||
Subnets: Ref: "Subnets"
|
||||
}
|
||||
}
|
||||
}
|
||||
Outputs: {
|
||||
NodeInstanceRole: {
|
||||
Description: "The node instance role"
|
||||
Value: "Fn::GetAtt": [
|
||||
"NodeInstanceRole",
|
||||
"Arn",
|
||||
]
|
||||
}
|
||||
NodeSecurityGroup: {
|
||||
Description: "The security group for the node group"
|
||||
Value: Ref: "NodeSecurityGroup"
|
||||
}
|
||||
Outputs: NodeInstanceRole: {
|
||||
Description: "The node instance role"
|
||||
Value: "Fn::GetAtt": [
|
||||
"NodeInstanceRole",
|
||||
"Arn",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
1
examples/aws-eks/cue.mod/module.cue
Normal file
1
examples/aws-eks/cue.mod/module.cue
Normal file
@ -0,0 +1 @@
|
||||
module: ""
|
@ -8,11 +8,10 @@ import (
|
||||
)
|
||||
|
||||
#Infrastructure: {
|
||||
awsConfig: aws.#Config
|
||||
namePrefix: *"dagger-example-" | string
|
||||
// Cluster size is 1 for example (to limit resources)
|
||||
workerNodeCapacity: *1 | >1
|
||||
workerNodeInstanceType: *"t3.small" | string
|
||||
awsConfig: aws.#Config
|
||||
namePrefix: *"" | string
|
||||
workerNodeCapacity: *3 | >=1
|
||||
workerNodeInstanceType: *"t3.medium" | string
|
||||
|
||||
let clusterName = "\(namePrefix)eks-cluster"
|
||||
|
||||
@ -21,6 +20,7 @@ import (
|
||||
source: json.Marshal(#CFNTemplate.eksControlPlane)
|
||||
stackName: "\(namePrefix)eks-controlplane"
|
||||
neverUpdate: true
|
||||
timeout: 30
|
||||
parameters: ClusterName: clusterName
|
||||
}
|
||||
|
||||
@ -29,14 +29,12 @@ import (
|
||||
source: json.Marshal(#CFNTemplate.eksNodeGroup)
|
||||
stackName: "\(namePrefix)eks-nodegroup"
|
||||
neverUpdate: true
|
||||
timeout: 30
|
||||
parameters: {
|
||||
ClusterName: clusterName
|
||||
ClusterControlPlaneSecurityGroup: eksControlPlane.outputs.DefaultSecurityGroup
|
||||
NodeAutoScalingGroupDesiredCapacity: 1
|
||||
NodeAutoScalingGroupMaxSize: NodeAutoScalingGroupDesiredCapacity + 1
|
||||
NodeGroupName: "\(namePrefix)eks-nodegroup"
|
||||
NodeInstanceType: workerNodeInstanceType
|
||||
VpcId: eksControlPlane.outputs.VPC
|
||||
Subnets: eksControlPlane.outputs.SubnetIds
|
||||
}
|
||||
}
|
||||
|
@ -18,4 +18,8 @@ awsConfig: aws.#Config & {
|
||||
// base config can be changed (number of EC2 instances, types, etc...)
|
||||
infra: #Infrastructure & {
|
||||
"awsConfig": awsConfig
|
||||
namePrefix: "dagger-example-"
|
||||
// Cluster size is 1 for the example purpose
|
||||
workerNodeCapacity: 1
|
||||
workerNodeInstanceType: "t3.small"
|
||||
}
|
||||
|
Reference in New Issue
Block a user