From d5b444df1a3499070e710265fed181823017b7d8 Mon Sep 17 00:00:00 2001 From: Guillaume de Rouville Date: Mon, 14 Jun 2021 16:35:23 +0200 Subject: [PATCH] AWS Provisioning doc Signed-off-by: Guillaume de Rouville --- docs/programming/guides/cloudformation.md | 8 - docs/programming/guides/cloudformation.mdx | 900 +++++++++++++++++++++ 2 files changed, 900 insertions(+), 8 deletions(-) delete mode 100644 docs/programming/guides/cloudformation.md create mode 100644 docs/programming/guides/cloudformation.mdx diff --git a/docs/programming/guides/cloudformation.md b/docs/programming/guides/cloudformation.md deleted file mode 100644 index 6956fa5c..00000000 --- a/docs/programming/guides/cloudformation.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -sidebar_position: 3 -slug: /programming/cloudformation ---- - -# Infra provisioning - -## Integrating with AWS Cloudformation diff --git a/docs/programming/guides/cloudformation.mdx b/docs/programming/guides/cloudformation.mdx new file mode 100644 index 00000000..84057ef9 --- /dev/null +++ b/docs/programming/guides/cloudformation.mdx @@ -0,0 +1,900 @@ +--- +sidebar_position: 3 +slug: /programming/cloudformation +--- + +# Infra provisioning + +## Integrating with AWS Cloudformation + +After deploying the app on a bucket provisioned from the web interface, let's see how Dagger can be leveraged to extend our deployment pipeline using Cloudformation's relay. + +## Prerequisites + +### Reminder + +#### Guidelines + +The provisioning strategy detailed below follows S3 best practices. In order to remain agnostic of your current AWS level, it deeply relies on S3 and Cloudformation documentation. + +#### Relays + +When developing a plan based on relays, the first thing to consider is to read their universe reference: it summarizes the expected inputs and their corresponding formats. [Here](https://dagger.io/aws/cloudformation) is the Cloudformation one. + +### Setup + +1. Initialize a new folder and a new workspace + +```bash +mkdir infra-provisioning +cd ./infra-provisioning +dagger init +``` + +2. Create a new environment + +```bash +dagger new s3-provisioning +cd ./.dagger/env/s3-provisioning/plan/ #Personal preference to directly work inside the plan +``` + +3. Create `main.cue` file with its corresponding `main` package + +```bash +touch main.cue + +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main +``` + +## Cloudformation + +Now that a plan has been set, let's implement the Cloudformation template and convert it to a Cue definition for further flexibility. + +### Template creation + +The idea here is to follow best practices in [S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/HostingWebsiteOnS3Setup.html) provisioning. Thanksfully, the AWS documentation contains a working [Cloudformation template](https://docs.aws.amazon.com/fr_fr/AWSCloudFormation/latest/UserGuide/quickref-s3.html#scenario-s3-bucket-website) that fits 95% of our needs. + +1. Tweaking the template: removing some of the ouputs + +The [template](https://docs.aws.amazon.com/fr_fr/AWSCloudFormation/latest/UserGuide/quickref-s3.html#scenario-s3-bucket-website) has far more outputs than necessary, as we just want to retrieve the bucket name: + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + + +```json +"Outputs": { + "WebsiteURL": { + "Value": { + "Fn::GetAtt": [ + "S3Bucket", + "WebsiteURL" + ] + }, + "Description": "URL for website hosted on S3" + }, + "S3BucketSecureURL": { + "Value": { + "Fn::Join": [ + "", + [ + "https://", + { + "Fn::GetAtt": [ + "S3Bucket", + "DomainName" + ] + } + ] + ] + }, + "Description": "Name of S3 bucket to hold website content" + } +} +``` + + + + +```json +"Outputs": { + "Name": { + "Value": { + "Fn::GetAtt": [ + "S3Bucket", + "Arn" + ] + }, + "Description": "Name S3 Bucket" + } +} +``` + + + + +```json +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "S3Bucket": { + "Type": "AWS::S3::Bucket", + "Properties": { + "AccessControl": "PublicRead", + "WebsiteConfiguration": { + "IndexDocument": "index.html", + "ErrorDocument": "error.html" + } + }, + "DeletionPolicy": "Retain" + }, + "BucketPolicy": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "PolicyDocument": { + "Id": "MyPolicy", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "PublicReadForGetBucketObjects", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:aws:s3:::", + { + "Ref": "S3Bucket" + }, + "/*" + ] + ] + } + } + ] + }, + "Bucket": { + "Ref": "S3Bucket" + } + } + } + }, + "Outputs": { + "Name": { + "Value": { + "Fn::GetAtt": [ + "S3Bucket", + "Arn" + ] + }, + "Description": "Name S3 Bucket" + } + } +} +``` + + + + +2. Some *"Pro tips"* + +Double-checks at the template level can be done with manual uploads on Cloudformation's web interface or by executing the below command locally: + +```bash +aws cloudformation validate-template --template-body file://template.json +``` +> PS: The *"Full Base Template"* tab contains the base template used for the following parts of the guide + + +### JSON / YAML to Cue conversion + +Once you'll get used to Cue, you might directly write Cloudformation templates in this language. As most of the current examples are either written in JSON or in YAML, let's see how to lazily convert them in Cue (optional but recommended). + +###### 1. Modify main.cue + +We will temporarly modify `main.cue` to process the conversion + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main +import "encoding/json" + +point: json.Unmarshal(data) +data: #""" +// Paste above final JSON template here +"""# +``` + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main + +import ( + "encoding/json" +) + +point: json.Unmarshal(data) +data: #""" +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "S3Bucket": { + "Type": "AWS::S3::Bucket", + "Properties": { + "AccessControl": "PublicRead", + "WebsiteConfiguration": { + "IndexDocument": "index.html", + "ErrorDocument": "error.html" + } + }, + "DeletionPolicy": "Retain" + }, + "BucketPolicy": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "PolicyDocument": { + "Id": "MyPolicy", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "PublicReadForGetBucketObjects", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:aws:s3:::", + { + "Ref": "S3Bucket" + }, + "/*" + ] + ] + } + } + ] + }, + "Bucket": { + "Ref": "S3Bucket" + } + } + } + }, + "Outputs": { + "Name": { + "Value": { + "Fn::GetAtt": [ + "S3Bucket", + "Arn" + ] + }, + "Description": "Name S3 Bucket" + } + } +} +"""# +``` + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main +import "encoding/yaml" + +point: yaml.Unmarshal(data) +data: """ +// Paste YAML here +""" +``` + + + + +###### 2. Retrieve the Unmarshalled JSON +Then, still in the same folder, query the `point` value to retrieve the Unmarshalled result of `data`: + + + +```bash +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger query point +# Output: +# { +# "AWSTemplateFormatVersion": "2010-09-09", +# "Outputs": { +# "Name": { +# "Description": "Name S3 Bucket", +# "Value": { +# "Fn::GetAtt": [ +# "S3Bucket", +# "Arn" +# ] +# } +# } +# }, +# "Resources": { +# "BucketPolicy": { +# "Properties": { +# "Bucket": { +# "Ref": "S3Bucket" +# }, +# "PolicyDocument": { +# "Id": "MyPolicy", +# "Statement": [ +# { +# "Action": "s3:GetObject", +# "Effect": "Allow", +# "Principal": "*", +# "Resource": { +# "Fn::Join": [ +# "", +# [ +# "arn:aws:s3:::", +# { +# "Ref": "S3Bucket" +# }, +# "/*" +# ] +# ] +# }, +# "Sid": "PublicReadForGetBucketObjects" +# } +# ], +# "Version": "2012-10-17" +# } +# }, +# "Type": "AWS::S3::BucketPolicy" +# }, +# "S3Bucket": { +# "DeletionPolicy": "Retain", +# "Properties": { +# "AccessControl": "PublicRead", +# "WebsiteConfiguration": { +# "ErrorDocument": "error.html", +# "IndexDocument": "index.html" +# } +# }, +# "Type": "AWS::S3::Bucket" +# } +# } +# } +``` + + + + +```bash +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger query point +# Output: +# { + #Prints value stored in point key +# } +``` + + + + +###### 3. Store the output + +This Cue version of the JSON template is going to be integrated inside our provisioning plan. Save the output for the next steps of the guide. + +## Personal plan + +With the Cloudformation template now finished, tested and converted in Cue. We can now enter the last part of our guide: piping everything together inside our personal plan. + +Before continuing, don't forget to reset your `main.cue` plan to it's *Setup* form: + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main +``` + + +### Cloudformation relay + +As our plan relies on [Cloudformation's relay](https://dagger.io/aws/cloudformation), let's dissect the expected inputs by gradually incorporating them in our plan. + +| Name | Type | Description | +| ------------- |:-------------: |:-------------: | +|*config.region* | `string` |AWS region | +|*config.accessKey* | `dagger.#Secret` |AWS access key | +|*config.secretKey* | `dagger.#Secret` |AWS secret key | +|*source* | `string` |Source is the Cloudformation template (JSON/YAML string) | +|*stackName* | `string` |Stackname is the cloudformation stack | +|*onFailure* | `*"DO_NOTHING" \| "ROLLBACK" \| "DELETE"` |Behavior when failure to create/update the Stack | +|*timeout* | `*10 \| \>=0 & int` |Timeout for waiting for the stack to be created/updated (in minutes) | +|*neverUpdate* | `*false \| bool` |Never update the stack if already exists | + +1. General insights + +As seen before in the documentation, values starting with `*` are default values. However, as a plan developer, we may face the need to add default values to inputs from relays that don't have one : Cue gives you this flexibility (cf. `config` value detailed below). + +>WARNING: All inputs without a default option have to be filled for a proper execution of the relay. In our case: +> +>* *config.region* +>* *config.accessKey* +>* *config.secretKey* +>* *source* +>* *stackName* + +2. The config value + +The config values are all part of the `aws` relay. Regarding this package, as you can see above, none of the 3 required inputs contain default options. + +For the sake of the exercise, let's say that our company's policy is to mainly deploy on the `us-east-2` region. Having this value set as a default option could be a smart and efficient decision for our dev teams. Let's see how to implement it: + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main +``` + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main + +import ( + "dagger.io/aws" +) + +// AWS account: credentials and region +awsConfig: aws.#Config & { + region: *"us-east-2" | string @dagger(input) +} +``` + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main + +import ( + "dagger.io/aws" // <-- Import AWS relay to instanciate aws.#Config +) + +// AWS account: credentials and region +awsConfig: aws.#Config & { // Assign an aws.#Config definition to a field named `awsConfig` + // awsConfig will be a directly requestable key : `dagger query awsConfig` + // awsConfig sets the region to either an input, or a default string: "us-east-2" + region: *"us-east-2" | string @dagger(input) + // As we declare an aws.#Config, Dagger/Cue will automatically know that some others values inside this definition + // are inputs, especially secrets (AccessKey, secretKey). Due to the confidential nature of secrets, we won't declare default values to them +} +``` + + + + +*Pro tips: In order to check wether it worked or not, these two commands might help* + + + + +```bash +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger input list # List required input in our personal plan +# Output: +# Input Value Set by user Description +# awsConfig.region *"us-east-2" | string false AWS region +# awsConfig.accessKey dagger.#Secret false AWS access key +# awsConfig.secretKey dagger.#Secret false AWS secret key +``` + + + + +```bash +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger query # Query values / inspect default values (Very useful in case of conflict) +# Output: +# { +# "awsConfig": { +# "region": "us-east-2" +# } +# } +``` + + + + +```bash +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger up # Try to run the plan. As expected, we encounter a failure +# Output: +# 9:07PM ERR system | required input is missing input=awsConfig.accessKey +# 9:07PM ERR system | required input is missing input=awsConfig.secretKey +# 9:07PM FTL system | some required inputs are not set, please re-run with `--force` if you think it's a mistake missing=0s +``` + + + + +Inside the `firstCommand` tab, we see that the `awsConfig.region` key has a default value set. It wasn't the case when we just imported the base relay. + +Furthemore, in the `Failed execution` tab, the execution of the `dagger up` command fails because of the unspecified secret inputs. + +3. Integrating Cloudformation relay + +Now that we have the `config` definition properly configured, we can now import the Cloudformation one, and properly fill it : + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main + +import ( + "dagger.io/aws" +) + +// AWS account: credentials and region +awsConfig: aws.#Config & { + region: *"us-east-2" | string @dagger(input) +} +``` + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main + +import ( + "dagger.io/aws" + "dagger.io/random" + "dagger.io/aws/cloudformation" +) + +// AWS account: credentials and region +awsConfig: aws.#Config & { + region: *"us-east-2" | string @dagger(input) +} + + +// Create a random suffix +suffix: random.#String & { + seed: "" +} + +// Request the cloudformation stackname as an input, or generated a default one with a random suffix to keep uniqueness +cfnStackName: *"stack-\(suffix.out)" | string @dagger(input) // Has to be unique + +// AWS Cloudformation stdlib +cfnStack: cloudformation.#Stack & { + config: awsConfig + stackName: cfnStackName + onFailure: "DO_NOTHING" + source: json.Marshal(#cfnTemplate) +} + +#cfnTemplate: { + // Paste Cue Cloudformation template here +} +``` + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main + +import ( + "dagger.io/aws" // <-- Import AWS relay to instanciate aws.#Config + "dagger.io/random" // <-- Import Random relay to instanciate random.#String + "dagger.io/aws/cloudformation" // <-- Import Cloudformation relay to instanciate aws.#Cloudformation +) + +// AWS account: credentials and region +awsConfig: aws.#Config & { // Assign an aws.#Config definition to a field named `awsConfig` + // awsConfig will be a directly requestable key : `dagger query awsConfig` + // awsConfig sets the region to either an input, or a default string: "us-east-2" + region: *"us-east-2" | string @dagger(input) + // As we declare an aws.#Config, Dagger/Cue will automatically know that some others values inside this definition + // are inputs, especially secrets (AccessKey, secretKey). Due to the confidential nature of secrets, we won't declare default values to them +} + +// AWS Cloudformation stdlib +cfnStack: cloudformation.#Stack & { // Assign an aws.#Cloudformation definition to a field named `cfnStack` + // This definition is the stdlib package to use in order to deploy AWS instances programmatically + + config: awsConfig // As seen in the relay doc, 3 config fields have to be provided : `config.region`, `config.accessKey` and `config.secretKey` + // As their names contain a `.`, it means that the value `config` expects 3 fields `region`, `accessKey` and `secretKey`, included in a `aws.#Config` parent definition + + stackName: cfnStackName // We assign to the `stackName` the `cfnStackName` declared below. + // `stackName` expects a string type. However, as a plan developer, we wanted to give the developer a choice : either a default random value, or an input + // The default random value *"stack-\(suffix.out)" uses the random.#String relay to generate a random value. We append it's result inside `"\(append_happening_here)"` + + onFailure: "DO_NOTHING" // As cited in the Cloudformation relay, the `onFailure` key defines Cloudformation's stack behavior on failure + + source: json.Marshal(#cfnTemplate) // source expects a JSON artifact. Here we remarshall the template decaled in Cue +} + +// Create a random suffix (cf. random relay) +suffix: random.#String & { // Assign a #random definition to a field named `suffix` + seed: "" // Set seed to empty string, to generate a new random string every time +} // Output -> suffix.out is a random string + +// Request the cloudformation stackname as an input, or generated a default one with a random suffix to keep uniqueness +cfnStackName: *"stack-\(suffix.out)" | string @dagger(input) // Has to be unique + +#cfnTemplate: { + // Paste Cue Cloudformation template here +} +``` + + + + +```cue +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/main.cue -- +package main + +import ( + "encoding/json" + + "dagger.io/aws" + "dagger.io/random" + "dagger.io/aws/cloudformation" +) + +// AWS account: credentials and region +awsConfig: aws.#Config & { + region: *"us-east-2" | string @dagger(input) +} + +// Create a random suffix +suffix: random.#String & { + seed: "" +} + +// Query the Cloudformation stackname, or create one with a random suffix to keep unicity +cfnStackName: *"stack-\(suffix.out)" | string @dagger(input) + +// AWS Cloudformation stdlib +cfnStack: cloudformation.#Stack & { + config: awsConfig + stackName: cfnStackName + onFailure: "DO_NOTHING" + source: json.Marshal(#cfnTemplate) +} + +#cfnTemplate: { + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "Name": { + "Description": "Name S3 Bucket", + "Value": { + "Fn::GetAtt": [ + "S3Bucket", + "Arn" + ] + } + } + }, + "Resources": { + "BucketPolicy": { + "Properties": { + "Bucket": { + "Ref": "S3Bucket" + }, + "PolicyDocument": { + "Id": "MyPolicy", + "Statement": [ + { + "Action": "s3:GetObject", + "Effect": "Allow", + "Principal": "*", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:aws:s3:::", + { + "Ref": "S3Bucket" + }, + "/*" + ] + ] + }, + "Sid": "PublicReadForGetBucketObjects" + } + ], + "Version": "2012-10-17" + } + }, + "Type": "AWS::S3::BucketPolicy" + }, + "S3Bucket": { + "DeletionPolicy": "Retain", + "Properties": { + "AccessControl": "PublicRead", + "WebsiteConfiguration": { + "ErrorDocument": "error.html", + "IndexDocument": "index.html" + } + }, + "Type": "AWS::S3::Bucket" + } + } +} +``` + + + + +### Deploy + +Finally ! We now have a working template ready to be used to provision S3 infrastructures. Let's add the missing inputs (aws credentials) and let's deploy it : + + + + +```bash +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger input secret awsConfig.accessKey yourAccessKey + +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger input secret awsConfig.secretKey yourSecretKey + +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger input list +# Input Value Set by user Description +# awsConfig.region *"us-east-2" | string false AWS region +# awsConfig.accessKey dagger.#Secret true AWS access key <-- Specified +# awsConfig.secretKey dagger.#Secret true AWS secret key <-- Specified +# suffix.length *12 | number false length of the string +# cfnStack.timeout *10 | >=0 & int false Timeout for waiting for the stack to be created/updated (in minutes) +# cfnStack.neverUpdate *false | bool false Never update the stack if already exists + +# All the other inputs have default values, we're good to go ! + +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger up +# Output: +#2:22PM INF suffix.out | computing +#2:22PM INF suffix.out | completed duration=200ms +#2:22PM INF cfnStack.outputs | computing +#2:22PM INF cfnStack.outputs | #15 1.304 { +#2:22PM INF cfnStack.outputs | #15 1.304 "Parameters": [] +#2:22PM INF cfnStack.outputs | #15 1.304 } +#2:22PM INF cfnStack.outputs | #15 2.948 { +#2:22PM INF cfnStack.outputs | #15 2.948 "StackId": "arn:aws:cloudformation:us-east-2:817126022176:stack/stack-emktqcfwksng/207d29a0-cd0b-11eb-aafd-0a6bae5481b4" +#2:22PM INF cfnStack.outputs | #15 2.948 } +#2:22PM INF cfnStack.outputs | completed duration=35s + +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger output list +# Output Value Description +# suffix.out "emktqcfwksng" generated random string +# cfnStack.outputs.Name "arn:aws:s3:::stack-emktqcfwksng-s3bucket-9eiowjs1jab4" - +``` + + + + +```bash +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger input secret awsConfig.accessKey yourAccessKey + +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger input secret awsConfig.secretKey yourSecretKey + +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger input list +# Input Value Set by user Description +# awsConfig.region *"us-east-2" | string false AWS region +# awsConfig.accessKey dagger.#Secret true AWS access key <-- Specified +# awsConfig.secretKey dagger.#Secret true AWS secret key <-- Specified +# suffix.length *12 | number false length of the string +# cfnStack.timeout *10 | >=0 & int false Timeout for waiting for the stack to be created/updated (in minutes) +# cfnStack.neverUpdate *false | bool false Never update the stack if already exists + +# All the other inputs have default values, we're good to go ! + +dagger up -l debug +#Output: +# 3:50PM DBG system | detected buildkit version version=v0.8.3 +# 3:50PM DBG system | spawning buildkit job localdirs={ +# "/tmp/infra-provisioning/.dagger/env/infra/plan": "/tmp/infra-provisioning/.dagger/env/infra/plan" +# } attrs=null +# 3:50PM DBG system | loading configuration +# ... Lots of logs ... :-D +# Output Value Description +# suffix.out "abnyiemsoqbm" generated random string +# cfnStack.outputs.Name "arn:aws:s3:::stack-abnyiemsoqbm-s3bucket-9eiowjs1jab4" - + +-- ~/infra-provisioning/.dagger/env/s3-provisioning/plan/ -- +dagger output list +# Output Value Description +# suffix.out "abnyiemsoqbm" generated random string +# cfnStack.outputs.Name "arn:aws:s3:::stack-abnyiemsoqbm-s3bucket-9eiowjs1jab4" - +``` + + + + +> The deployment went well ! + +In case of a failure, the `Debug deploy` tab shows the command to use in order to get more informations. +The name of the provisioned S3 instance lies in the `cfnStack.outputs.Name` output key, without `arn:aws:s3:::` + +> With this provisioning infrastructure, your dev team will easily be able to instanciate aws infrastructures : all they need to know is `dagger input list` and `dagger up`, isn't that awesome ? :-D + +PS: This plan could be further extended with the AWS S3 example : it could not only provision an infrastructure but also easily deploy on it. + +PS1: As it could make a nice first exercise for you, this won't be detailed here. However, we're interested in your imagination : let us know your implementations :-) +