Remove old stdlib tests
Signed-off-by: Tom Chauveau <tom.chauveau@epitech.eu>
This commit is contained in:
parent
d5c575b154
commit
b999a3dd11
@ -6,59 +6,7 @@ setup() {
|
||||
|
||||
# FIXME: move to universe/universe.bats
|
||||
# Assigned to: <ADD YOUR NAME HERE>
|
||||
@test "stdlib: kubernetes" {
|
||||
skip_unless_local_kube
|
||||
|
||||
"$DAGGER" init
|
||||
dagger_new_with_plan kubernetes "$TESTDIR"/stdlib/kubernetes/
|
||||
|
||||
run "$DAGGER" input -e "kubernetes" text kubeconfig -f ~/.kube/config
|
||||
assert_success
|
||||
|
||||
run "$DAGGER" up -e "kubernetes"
|
||||
assert_success
|
||||
}
|
||||
|
||||
# FIXME: move to universe/universe.bats
|
||||
# Assigned to: <ADD YOUR NAME HERE>
|
||||
@test "stdlib: kustomize" {
|
||||
"$DAGGER" compute "$TESTDIR"/stdlib/kubernetes/kustomize --input-dir TestKustomize.kustom.source="$TESTDIR"/stdlib/kubernetes/kustomize/testdata
|
||||
}
|
||||
|
||||
# FIXME: move to universe/universe.bats
|
||||
# Assigned to: <ADD YOUR NAME HERE>
|
||||
@test "stdlib: helm" {
|
||||
skip "helm is broken"
|
||||
skip_unless_local_kube
|
||||
|
||||
"$DAGGER" init
|
||||
dagger_new_with_plan helm "$TESTDIR"/stdlib/kubernetes/helm
|
||||
|
||||
run "$DAGGER" input -e "helm" text kubeconfig -f ~/.kube/config
|
||||
assert_success
|
||||
|
||||
cp -R "$TESTDIR"/stdlib/kubernetes/helm/testdata/mychart "$DAGGER_WORKSPACE"/testdata
|
||||
run "$DAGGER" input -e "helm" dir TestHelmSimpleChart.deploy.chartSource "$DAGGER_WORKSPACE"/testdata
|
||||
assert_success
|
||||
|
||||
run "$DAGGER" up -e "helm"
|
||||
assert_success
|
||||
}
|
||||
|
||||
# FIXME: move to universe/universe.bats
|
||||
# Assigned to: <ADD YOUR NAME HERE>
|
||||
@test "stdlib: docker: build" {
|
||||
"$DAGGER" compute "$TESTDIR"/stdlib/docker/build/ --input-dir source="$TESTDIR"/stdlib/docker/build
|
||||
}
|
||||
|
||||
# FIXME: move to universe/universe.bats
|
||||
# Assigned to: <ADD YOUR NAME HERE>
|
||||
@test "stdlib: docker: dockerfile" {
|
||||
"$DAGGER" compute "$TESTDIR"/stdlib/docker/dockerfile/ --input-dir source="$TESTDIR"/stdlib/docker/dockerfile/testdata
|
||||
}
|
||||
|
||||
# FIXME: move to universe/universe.bats
|
||||
# Assigned to: <ADD YOUR NAME HERE>
|
||||
# Changes in https://github.com/dagger/dagger/pull/628
|
||||
@test "stdlib: docker: push-and-pull" {
|
||||
skip_unless_secrets_available "$TESTDIR"/stdlib/docker/push-pull/inputs.yaml
|
||||
|
||||
@ -66,38 +14,3 @@ setup() {
|
||||
run "$DAGGER" compute --input-yaml "$TESTDIR"/stdlib/docker/push-pull/inputs.yaml --input-dir source="$TESTDIR"/stdlib/docker/push-pull/testdata "$TESTDIR"/stdlib/docker/push-pull/
|
||||
assert_success
|
||||
}
|
||||
|
||||
# FIXME: move to universe/universe.bats
|
||||
# Assigned to: <ADD YOUR NAME HERE>
|
||||
@test "stdlib: terraform" {
|
||||
skip_unless_secrets_available "$TESTDIR"/stdlib/terraform/s3/inputs.yaml
|
||||
|
||||
"$DAGGER" init
|
||||
dagger_new_with_plan terraform "$TESTDIR"/stdlib/terraform/s3
|
||||
|
||||
cp -R "$TESTDIR"/stdlib/terraform/s3/testdata "$DAGGER_WORKSPACE"/testdata
|
||||
"$DAGGER" -e terraform input dir TestData "$DAGGER_WORKSPACE"/testdata
|
||||
sops -d "$TESTDIR"/stdlib/terraform/s3/inputs.yaml | "$DAGGER" -e "terraform" input yaml "" -f -
|
||||
|
||||
# it must fail because of a missing var
|
||||
run "$DAGGER" up -e terraform
|
||||
assert_failure
|
||||
|
||||
# add the var and try again
|
||||
"$DAGGER" -e terraform input text TestTerraform.apply.tfvars.input "42"
|
||||
run "$DAGGER" up -e terraform
|
||||
assert_success
|
||||
|
||||
# ensure the tfvar was passed correctly
|
||||
run "$DAGGER" query -e terraform \
|
||||
TestTerraform.apply.output.input.value -f text
|
||||
assert_success
|
||||
assert_output "42"
|
||||
|
||||
# ensure the random value is always the same
|
||||
# this proves we're effectively using the s3 backend
|
||||
run "$DAGGER" query -e terraform \
|
||||
TestTerraform.apply.output.random.value -f json
|
||||
assert_success
|
||||
assert_output "36"
|
||||
}
|
||||
|
@ -1,2 +0,0 @@
|
||||
# dagger state
|
||||
state/**
|
@ -1,53 +0,0 @@
|
||||
package eks
|
||||
|
||||
import (
|
||||
"dagger.io/aws"
|
||||
"dagger.io/aws/eks"
|
||||
"dagger.io/kubernetes"
|
||||
"dagger.io/dagger/op"
|
||||
)
|
||||
|
||||
TestConfig: awsConfig: aws.#Config & {
|
||||
region: "us-east-2"
|
||||
}
|
||||
|
||||
TestCluster: eks.#KubeConfig & {
|
||||
config: TestConfig.awsConfig
|
||||
clusterName: *"dagger-example-eks-cluster" | string
|
||||
}
|
||||
|
||||
TestEks: {
|
||||
#GetPods:
|
||||
"""
|
||||
kubectl get pods -A
|
||||
"""
|
||||
|
||||
#up: [
|
||||
op.#Load & {
|
||||
from: kubernetes.#Kubectl
|
||||
},
|
||||
|
||||
op.#WriteFile & {
|
||||
dest: "/kubeconfig"
|
||||
content: TestCluster.kubeconfig
|
||||
},
|
||||
|
||||
op.#WriteFile & {
|
||||
dest: "/getPods.sh"
|
||||
content: #GetPods
|
||||
},
|
||||
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"/bin/bash",
|
||||
"--noprofile",
|
||||
"--norc",
|
||||
"-eo",
|
||||
"pipefail",
|
||||
"/getPods.sh",
|
||||
]
|
||||
env: KUBECONFIG: "/kubeconfig"
|
||||
},
|
||||
]
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
name: default
|
||||
inputs:
|
||||
TestConfig.awsConfig.accessKey:
|
||||
secret: ENC[AES256_GCM,data:dzhlip9kKU8mMEycFjq6MobD5BA=,iv:LKeYUbXpnWIZneGs7DCLVKxv1W2aa/3EVGO4jnDlOgc=,tag:+TcxQahxFTweyoPaROTJSQ==,type:str]
|
||||
TestConfig.awsConfig.secretKey:
|
||||
secret: ENC[AES256_GCM,data:bu3AI5jODWv4ePvRKw2l/1UOuH07Z0/oB2hiY4QqrhTcfjdSbr6kBg==,iv:BqddzzXqvAv0cAj2SVhoFx/kUOnRsoevqMRujCINVv0=,tag:u0KjVnbN8h54CLFARJmJ0g==,type:str]
|
||||
sops:
|
||||
kms: []
|
||||
gcp_kms: []
|
||||
azure_kv: []
|
||||
hc_vault: []
|
||||
age:
|
||||
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
||||
enc: |
|
||||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzV0ZXNW5qaGNJMjF5bnBO
|
||||
d1Z1RXFhSnNRM1Vwa3lyWFJ6VVFDZTQ3cUhZClh0N1lxZ3dwSFhHTjRyS092OVVj
|
||||
Tkw4ZlU4S3g0T1VGS1RYYnB1dGlzbVkKLS0tIEc4T1Z3SEU2NUNhd2FkSXlIUERM
|
||||
UE5Cd2VwYkd1MHlTOXNJVEU3RVpqU2sK86kXU6ZaaVHTg9BuCEcOxnDrrW00+bwu
|
||||
AHttbzqYVuC3YxXjOTzAZL8aYTStk14wGdI6TirZ9pX0fyaKAfzBUQ==
|
||||
-----END AGE ENCRYPTED FILE-----
|
||||
lastmodified: "2021-05-27T16:01:59Z"
|
||||
mac: ENC[AES256_GCM,data:T+0rcT9Xi/kJ8+EzCd7ewenDmc1cH/t2MxCpf+QXkILUC/uE8OgROizDMAiUYI2HpeBfZrmUgLMVzlTZirIbC51eWLAf6itbSIGKkVuz0uSNwhRpKGAROg6U1h39Scg6RpAvpzSTZvYOx5SwP78Uc6NQdp5yTDEb+0e9Wqzu+jU=,iv:INAN+EPwBv5dWWHQnaMr4QOBQWx3WCcohORvIPrBZN8=,tag:N4vtDowFKTDSHmMob5HgCw==,type:str]
|
||||
pgp: []
|
||||
encrypted_suffix: secret
|
||||
version: 3.7.1
|
@ -1,53 +0,0 @@
|
||||
package eks
|
||||
|
||||
import (
|
||||
"dagger.io/aws"
|
||||
"dagger.io/aws/eks"
|
||||
"dagger.io/kubernetes"
|
||||
"dagger.io/dagger/op"
|
||||
)
|
||||
|
||||
TestConfig: awsConfig: aws.#Config & {
|
||||
region: "us-east-2"
|
||||
}
|
||||
|
||||
TestCluster: eks.#KubeConfig & {
|
||||
config: TestConfig.awsConfig
|
||||
clusterName: *"dagger-example-eks-cluster" | string
|
||||
}
|
||||
|
||||
TestEks: {
|
||||
#GetPods:
|
||||
"""
|
||||
kubectl get pods -A
|
||||
"""
|
||||
|
||||
#up: [
|
||||
op.#Load & {
|
||||
from: kubernetes.#Kubectl
|
||||
},
|
||||
|
||||
op.#WriteFile & {
|
||||
dest: "/kubeconfig"
|
||||
content: TestCluster.kubeconfig
|
||||
},
|
||||
|
||||
op.#WriteFile & {
|
||||
dest: "/getPods.sh"
|
||||
content: #GetPods
|
||||
},
|
||||
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"/bin/bash",
|
||||
"--noprofile",
|
||||
"--norc",
|
||||
"-eo",
|
||||
"pipefail",
|
||||
"/getPods.sh",
|
||||
]
|
||||
env: KUBECONFIG: "/kubeconfig"
|
||||
},
|
||||
]
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
FROM alpine
|
||||
RUN echo test >> /test.txt
|
@ -1,31 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/dagger/op"
|
||||
"dagger.io/docker"
|
||||
)
|
||||
|
||||
// Build a Docker image from source, using included Dockerfile
|
||||
source: dagger.#Artifact
|
||||
|
||||
TestBuild: {
|
||||
image: docker.#Build & {
|
||||
"source": source
|
||||
}
|
||||
|
||||
verify: #up: [
|
||||
op.#Load & {
|
||||
from: image
|
||||
},
|
||||
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"sh", "-c", """
|
||||
grep -q "test" /test.txt
|
||||
""",
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
@ -1,34 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/dagger/op"
|
||||
"dagger.io/docker"
|
||||
)
|
||||
|
||||
source: dagger.#Artifact
|
||||
|
||||
TestImageFromDockerfile: {
|
||||
image: docker.#ImageFromDockerfile & {
|
||||
dockerfile: """
|
||||
FROM alpine
|
||||
COPY test.txt /test.txt
|
||||
"""
|
||||
context: source
|
||||
}
|
||||
|
||||
verify: #up: [
|
||||
op.#Load & {
|
||||
from: image
|
||||
},
|
||||
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"sh", "-c", """
|
||||
grep -q "test" /test.txt
|
||||
""",
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
@ -1 +0,0 @@
|
||||
test
|
@ -1,54 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/kubernetes/helm"
|
||||
"dagger.io/random"
|
||||
)
|
||||
|
||||
// We assume that a kinD cluster is running locally
|
||||
// To deploy a local KinD cluster, follow this link : https://kind.sigs.k8s.io/docs/user/quick-start/
|
||||
kubeconfig: string @dagger(input)
|
||||
|
||||
// Deploy user local chart
|
||||
TestHelmSimpleChart: {
|
||||
suffix: random.#String & {
|
||||
seed: "simple"
|
||||
}
|
||||
|
||||
// Deploy chart
|
||||
deploy: helm.#Chart & {
|
||||
name: "dagger-test-helm-simple-chart-\(suffix.out)"
|
||||
namespace: "dagger-test"
|
||||
"kubeconfig": kubeconfig
|
||||
chartSource: dagger.#Artifact
|
||||
}
|
||||
|
||||
// Verify deployment
|
||||
verify: #VerifyHelm & {
|
||||
chartName: deploy.name
|
||||
namespace: deploy.namespace
|
||||
}
|
||||
}
|
||||
|
||||
// Deploy remote chart
|
||||
TestHelmRepoChart: {
|
||||
suffix: random.#String & {
|
||||
seed: "repo"
|
||||
}
|
||||
|
||||
// Deploy chart
|
||||
deploy: helm.#Chart & {
|
||||
name: "dagger-test-helm-repository-\(suffix.out)"
|
||||
namespace: "dagger-test"
|
||||
"kubeconfig": kubeconfig
|
||||
repository: "https://charts.bitnami.com/bitnami"
|
||||
chart: "redis"
|
||||
}
|
||||
|
||||
// Verify deployment
|
||||
verify: #VerifyHelm & {
|
||||
chartName: deploy.name
|
||||
namespace: deploy.namespace
|
||||
}
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
@ -1,21 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: mychart
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 1.16.0
|
@ -1,21 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mychart.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "mychart.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mychart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "mychart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
@ -1,63 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "mychart.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "mychart.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "mychart.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "mychart.labels" -}}
|
||||
helm.sh/chart: {{ include "mychart.chart" . }}
|
||||
{{ include "mychart.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "mychart.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "mychart.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "mychart.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "mychart.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
@ -1,55 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "mychart.fullname" . }}
|
||||
labels:
|
||||
{{- include "mychart.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "mychart.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "mychart.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "mychart.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
@ -1,41 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "mychart.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "mychart.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "mychart.fullname" . }}
|
||||
labels:
|
||||
{{- include "mychart.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "mychart.selectorLabels" . | nindent 4 }}
|
@ -1,12 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "mychart.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "mychart.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "mychart.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "mychart.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "mychart.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
@ -1,68 +0,0 @@
|
||||
# Default values for mychart.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: nginx
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths: []
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
@ -1,51 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"dagger.io/dagger/op"
|
||||
"dagger.io/kubernetes"
|
||||
)
|
||||
|
||||
#VerifyHelm: {
|
||||
chartName: string
|
||||
|
||||
namespace: string
|
||||
|
||||
// Verify that pod exist
|
||||
#getHelmPods:
|
||||
"""
|
||||
kubectl get pods --namespace "$KUBE_NAMESPACE" | grep "\(chartName)"
|
||||
"""
|
||||
|
||||
#up: [
|
||||
op.#Load & {
|
||||
from: kubernetes.#Kubectl
|
||||
},
|
||||
|
||||
op.#WriteFile & {
|
||||
dest: "/getHelmPods.sh"
|
||||
content: #getHelmPods
|
||||
},
|
||||
|
||||
op.#WriteFile & {
|
||||
dest: "/kubeconfig"
|
||||
content: kubeconfig
|
||||
mode: 0o600
|
||||
},
|
||||
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"/bin/bash",
|
||||
"--noprofile",
|
||||
"--norc",
|
||||
"-eo",
|
||||
"pipefail",
|
||||
"/getHelmPods.sh",
|
||||
]
|
||||
env: {
|
||||
KUBECONFIG: "/kubeconfig"
|
||||
KUBE_NAMESPACE: namespace
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/yaml"
|
||||
"dagger.io/kubernetes"
|
||||
"dagger.io/random"
|
||||
)
|
||||
|
||||
// We assume that a kinD cluster is running locally
|
||||
// To deploy a local KinD cluster, follow this link : https://kind.sigs.k8s.io/docs/user/quick-start/
|
||||
kubeconfig: string @dagger(input)
|
||||
|
||||
TestKubeApply: {
|
||||
suffix: random.#String & {
|
||||
seed: ""
|
||||
}
|
||||
|
||||
// Pod spec
|
||||
kubeSrc: {
|
||||
apiVersion: "v1"
|
||||
kind: "Pod"
|
||||
metadata: name: "kube-test-\(suffix.out)"
|
||||
spec: {
|
||||
restartPolicy: "Never"
|
||||
containers: [{
|
||||
name: "test"
|
||||
image: "hello-world"
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
// Apply deployment
|
||||
apply: kubernetes.#Resources & {
|
||||
"kubeconfig": kubeconfig
|
||||
namespace: "dagger-test"
|
||||
manifest: yaml.Marshal(kubeSrc)
|
||||
}
|
||||
|
||||
// Verify deployment
|
||||
verify: #VerifyApply & {
|
||||
podname: kubeSrc.metadata.name
|
||||
namespace: apply.namespace
|
||||
}
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/yaml"
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/kubernetes/kustomize"
|
||||
)
|
||||
|
||||
TestKustomize: {
|
||||
testdata: dagger.#Artifact
|
||||
|
||||
// Run Kustomize
|
||||
kustom: kustomize.#Kustomize & {
|
||||
source: testdata
|
||||
kustomization: yaml.Marshal({
|
||||
resources: ["deployment.yaml", "pod.yaml"]
|
||||
images: [{
|
||||
name: "nginx"
|
||||
newTag: "v1"
|
||||
}]
|
||||
replicas: [{
|
||||
name: "nginx-deployment"
|
||||
count: 2
|
||||
}]
|
||||
})
|
||||
}
|
||||
|
||||
// Verify kustomization generation
|
||||
verify: #VerifyKustomize & {
|
||||
source: kustom
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx-deployment
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx-deployment
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx-deployment
|
||||
image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
restartPolicy: Always
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx-deployment
|
@ -1,12 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-pod
|
||||
labels:
|
||||
app: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-pod
|
||||
image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
restartPolicy: Always
|
@ -1,72 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"dagger.io/dagger/op"
|
||||
"dagger.io/dagger"
|
||||
"dagger.io/alpine"
|
||||
)
|
||||
|
||||
#VerifyKustomize: {
|
||||
source: dagger.#Artifact
|
||||
|
||||
#up: [
|
||||
op.#Load & {
|
||||
from: alpine.#Image & {
|
||||
package: bash: "=~5.1"
|
||||
}
|
||||
},
|
||||
|
||||
// Check files
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"sh", "-c", "test $(ls /source | wc -l) = 1",
|
||||
]
|
||||
mount: "/source": from: source
|
||||
},
|
||||
|
||||
// Check image tag kustomization
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"sh", "-c", #"""
|
||||
grep -q "\- image: nginx:v1" /source/result.yaml
|
||||
"""#,
|
||||
]
|
||||
mount: "/source": from: source
|
||||
},
|
||||
|
||||
// Check replicas kustomization
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"sh", "-c", #"""
|
||||
grep -q "replicas: 2" /source/result.yaml
|
||||
"""#,
|
||||
]
|
||||
mount: "/source": from: source
|
||||
},
|
||||
|
||||
// Check pod merge by kustomization
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"sh", "-c", #"""
|
||||
grep -q "kind: Pod" /source/result.yaml
|
||||
"""#,
|
||||
]
|
||||
mount: "/source": from: source
|
||||
},
|
||||
|
||||
// Check pod name
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"sh", "-c", #"""
|
||||
grep -q "name: test-pod" /source/result.yaml
|
||||
"""#,
|
||||
]
|
||||
mount: "/source": from: source
|
||||
},
|
||||
]
|
||||
}
|
@ -1,79 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"dagger.io/dagger/op"
|
||||
"dagger.io/kubernetes"
|
||||
)
|
||||
|
||||
#VerifyApply: {
|
||||
podname: string
|
||||
|
||||
namespace: string
|
||||
|
||||
// Verify that pod exist
|
||||
#GetPods:
|
||||
"""
|
||||
kubectl get pods --namespace "$KUBE_NAMESPACE" \( podname )
|
||||
"""
|
||||
|
||||
// Clear that pod for future test
|
||||
#DeletePods:
|
||||
"""
|
||||
kubectl delete pods --namespace "$KUBE_NAMESPACE" \( podname )
|
||||
"""
|
||||
|
||||
#up: [
|
||||
op.#Load & {
|
||||
from: kubernetes.#Kubectl
|
||||
},
|
||||
|
||||
op.#WriteFile & {
|
||||
dest: "/kubeconfig"
|
||||
content: kubeconfig
|
||||
mode: 0o600
|
||||
},
|
||||
|
||||
op.#WriteFile & {
|
||||
dest: "/getPods.sh"
|
||||
content: #GetPods
|
||||
},
|
||||
|
||||
// Check pods
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"/bin/bash",
|
||||
"--noprofile",
|
||||
"--norc",
|
||||
"-eo",
|
||||
"pipefail",
|
||||
"/getPods.sh",
|
||||
]
|
||||
env: {
|
||||
KUBECONFIG: "/kubeconfig"
|
||||
KUBE_NAMESPACE: namespace
|
||||
}
|
||||
},
|
||||
|
||||
op.#WriteFile & {
|
||||
dest: "/deletePods.sh"
|
||||
content: #DeletePods
|
||||
},
|
||||
|
||||
op.#Exec & {
|
||||
always: true
|
||||
args: [
|
||||
"/bin/bash",
|
||||
"--noprofile",
|
||||
"--norc",
|
||||
"-eo",
|
||||
"pipefail",
|
||||
"/deletePods.sh",
|
||||
]
|
||||
env: {
|
||||
KUBECONFIG: "/kubeconfig"
|
||||
KUBE_NAMESPACE: namespace
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
TestConfig:
|
||||
awsConfig:
|
||||
accessKey: ENC[AES256_GCM,data:cZLf9D1ymnU4A44oGiQ4fFKdEB0=,iv:rNv9rnXSvIpKeUYRqseS9aKjEG4Wim7OW0EKEbBgp+M=,tag:cOzI4KsDgCgi/w7ByFKJJw==,type:str]
|
||||
secretKey: ENC[AES256_GCM,data:ZFIHfnQYYu7ZhoXogVIHbd2wakBTw9D0TiHeadSKaYAQemCun/egNg==,iv:zISyY5zGZHfe5HZJHdfIUpX6siFIgLMrwAbZRyLH9FU=,tag:uD+1eLHY/AKR9vnpyBh+GQ==,type:str]
|
||||
sops:
|
||||
kms: []
|
||||
gcp_kms: []
|
||||
azure_kv: []
|
||||
hc_vault: []
|
||||
age:
|
||||
- recipient: age1gxwmtwahzwdmrskhf90ppwlnze30lgpm056kuesrxzeuyclrwvpsupwtpk
|
||||
enc: |
|
||||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPdU5ENVpScm0yd2RXWjRJ
|
||||
aSsxWTNvSHBBeU94Z2ZKNjhXdzJHZGNybXkwCk9FVW5EM21LSTRHMkE5VG1SRFpL
|
||||
ZGUyOHl3MEU3M3ZXTzBqSlExTU1uVTgKLS0tIDZRVDJOaEVZVnVSalRKMUVTTytV
|
||||
ZWRONHhmOEJVd1lqM1NkMFdSNHU2THMKSjtxHeq/ZSgpXrevLH4AVYyRh4jO6qjT
|
||||
J301rFx0Cu5qeSIhRiG54Pse83GD+fObDhfH0nPf5HZttDZxrISUdg==
|
||||
-----END AGE ENCRYPTED FILE-----
|
||||
lastmodified: "2021-05-28T20:45:06Z"
|
||||
mac: ENC[AES256_GCM,data:7b6X10McAD1qvsS3ZFWeteP7zLC6IAo6NdFjvaX1iyrjoZ+fT8hNkIPVKyfPFTqZzNIZ7qEYJO2PKrTjbhf6a1LEsL9gtfoX4JwINDk66TgIsJsvdp4TRIlEKoRSKK08zc+A5YFAtD1Pj+a3+NnF32ZUsoH+jqSixH2hK51RI0U=,iv:JKeSA0bp+QBE8H/kS/eIL47k1Bsg4L0q/YU4OlJmIKU=,tag:f3gzp/Nv4p4DajNfoAicAg==,type:str]
|
||||
pgp: []
|
||||
unencrypted_suffix: _unencrypted
|
||||
version: 3.7.1
|
@ -1,25 +0,0 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"dagger.io/dagger"
|
||||
|
||||
"dagger.io/terraform"
|
||||
)
|
||||
|
||||
TestData: dagger.#Artifact
|
||||
|
||||
TestConfig: awsConfig: {
|
||||
accessKey: string
|
||||
secretkey: string
|
||||
region: "us-east-2"
|
||||
}
|
||||
|
||||
TestTerraform: apply: terraform.#Configuration & {
|
||||
source: TestData
|
||||
env: {
|
||||
AWS_ACCESS_KEY_ID: TestConfig.awsConfig.accessKey
|
||||
AWS_SECRET_ACCESS_KEY: TestConfig.awsConfig.secretKey
|
||||
AWS_DEFAULT_REGION: TestConfig.awsConfig.region
|
||||
AWS_REGION: TestConfig.awsConfig.region
|
||||
}
|
||||
}
|
34
tests/stdlib/terraform/s3/testdata/test.tf
vendored
34
tests/stdlib/terraform/s3/testdata/test.tf
vendored
@ -1,34 +0,0 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.1.0"
|
||||
}
|
||||
}
|
||||
|
||||
backend "s3" {
|
||||
bucket = "dagger-ci"
|
||||
key = "terraform/tfstate"
|
||||
region = "us-east-2"
|
||||
}
|
||||
}
|
||||
|
||||
provider "random" {
|
||||
}
|
||||
|
||||
variable "input" {
|
||||
type = string
|
||||
}
|
||||
|
||||
resource "random_integer" "test" {
|
||||
min = 1
|
||||
max = 50
|
||||
}
|
||||
|
||||
output "random" {
|
||||
value = random_integer.test.result
|
||||
}
|
||||
|
||||
output "input" {
|
||||
value = var.input
|
||||
}
|
Reference in New Issue
Block a user