terraform-hcloud-kube-hetzner/init.tf

187 lines
7.4 KiB
Terraform
Raw Normal View History

2022-02-19 13:38:24 +01:00
resource "null_resource" "first_control_plane" {
connection {
user = "root"
private_key = local.ssh_private_key
agent_identity = local.ssh_identity
2022-04-09 09:51:11 +02:00
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
2022-02-06 08:40:51 +01:00
}
# Generating k3s master config file
2022-02-05 01:22:35 +01:00
provisioner "file" {
2022-04-02 22:45:41 +02:00
content = yamlencode(merge({
2022-04-13 14:33:33 +02:00
node-name = module.control_planes[keys(module.control_planes)[0]].name
token = random_password.k3s_token.result
cluster-init = true
disable-cloud-controller = true
disable = local.disable_extras
flannel-iface = "eth1"
kubelet-arg = ["cloud-provider=external", "volume-plugin-dir=/var/lib/kubelet/volumeplugins"]
kube-controller-manager-arg = "flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins"
2022-04-13 14:33:33 +02:00
node-ip = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
advertise-address = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
2022-04-13 15:59:03 +02:00
node-taint = local.control_plane_nodes[keys(module.control_planes)[0]].taints
node-label = local.control_plane_nodes[keys(module.control_planes)[0]].labels
disable-network-policy = var.cni_plugin == "calico" ? true : var.disable_network_policy
2022-04-02 22:45:41 +02:00
},
var.cni_plugin == "calico" ? {
flannel-backend = "none"
2022-04-02 22:45:41 +02:00
} : {}))
2022-04-13 14:33:33 +02:00
2022-02-16 03:18:40 +01:00
destination = "/tmp/config.yaml"
}
# Install k3s server
provisioner "remote-exec" {
inline = local.install_k3s_server
}
2022-03-03 02:06:29 +01:00
# Upon reboot start k3s and wait for it to be ready to receive commands
2021-07-30 10:12:37 +02:00
provisioner "remote-exec" {
2022-02-06 08:40:51 +01:00
inline = [
"systemctl start k3s",
2022-02-16 03:18:40 +01:00
# prepare the post_install directory
2022-04-14 13:59:05 +02:00
"mkdir -p /var/post_install",
2022-02-16 03:18:40 +01:00
# wait for k3s to become ready
2022-02-10 03:01:40 +01:00
<<-EOT
timeout 120 bash <<EOF
2022-02-16 03:18:40 +01:00
until systemctl status k3s > /dev/null; do
2022-02-20 02:04:37 +01:00
systemctl start k3s
2022-02-16 03:18:40 +01:00
echo "Waiting for the k3s server to start..."
2022-02-16 04:24:20 +01:00
sleep 2
2022-02-11 23:57:18 +01:00
done
until [ -e /etc/rancher/k3s/k3s.yaml ]; do
2022-02-11 23:57:18 +01:00
echo "Waiting for kubectl config..."
2022-02-16 04:24:20 +01:00
sleep 2
done
2022-02-12 01:45:25 +01:00
until [[ "\$(kubectl get --raw='/readyz' 2> /dev/null)" == "ok" ]]; do
2022-02-12 01:49:33 +01:00
echo "Waiting for the cluster to become ready..."
2022-02-16 04:24:20 +01:00
sleep 2
2022-02-10 03:01:40 +01:00
done
EOF
2022-02-10 03:01:40 +01:00
EOT
]
}
depends_on = [
2022-04-13 14:14:22 +02:00
hcloud_network_subnet.control_plane
]
}
resource "null_resource" "kustomization" {
connection {
user = "root"
private_key = local.ssh_private_key
agent_identity = local.ssh_identity
2022-04-09 09:51:11 +02:00
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
}
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
provisioner "file" {
content = yamlencode({
apiVersion = "kustomize.config.k8s.io/v1beta1"
kind = "Kustomization"
resources = concat([
"https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/${local.ccm_version}/ccm-networks.yaml",
"https://raw.githubusercontent.com/hetznercloud/csi-driver/${local.csi_version}/deploy/kubernetes/hcloud-csi.yml",
"https://github.com/weaveworks/kured/releases/download/${local.kured_version}/kured-${local.kured_version}-dockerhub.yaml",
2022-02-16 03:18:40 +01:00
"https://raw.githubusercontent.com/rancher/system-upgrade-controller/master/manifests/system-upgrade-controller.yaml",
2022-04-14 13:59:05 +02:00
], local.is_single_node_cluster ? [] : var.traefik_enabled ? ["traefik_config.yaml"] : []
2022-04-02 22:45:41 +02:00
, var.cni_plugin == "calico" ? ["https://projectcalico.docs.tigera.io/manifests/calico.yaml"] : []),
patchesStrategicMerge = concat([
2022-02-20 12:23:21 +01:00
file("${path.module}/kustomize/kured.yaml"),
2022-04-30 00:11:07 +02:00
file("${path.module}/kustomize/system-upgrade-controller.yaml"),
"ccm.yaml"
], var.cni_plugin == "calico" ? ["calico.yaml"] : [])
})
2022-04-14 13:59:05 +02:00
destination = "/var/post_install/kustomization.yaml"
}
# Upload traefik config
provisioner "file" {
2022-03-11 14:24:24 +01:00
content = local.is_single_node_cluster || var.traefik_enabled == false ? "" : templatefile(
"${path.module}/templates/traefik_config.yaml.tpl",
{
2022-03-09 05:19:06 +01:00
name = "${var.cluster_name}-traefik"
load_balancer_disable_ipv6 = var.load_balancer_disable_ipv6
load_balancer_type = var.load_balancer_type
2022-04-06 20:38:24 +02:00
location = var.load_balancer_location
traefik_acme_tls = var.traefik_acme_tls
traefik_acme_email = var.traefik_acme_email
2022-03-06 07:27:51 +01:00
traefik_additional_options = var.traefik_additional_options
})
2022-04-14 13:59:05 +02:00
destination = "/var/post_install/traefik_config.yaml"
}
2022-04-30 00:11:07 +02:00
# Upload the CCM patch config
provisioner "file" {
content = templatefile(
"${path.module}/templates/ccm.yaml.tpl",
{
cluster_cidr_ipv4 = local.cluster_cidr_ipv4
})
destination = "/var/post_install/ccm.yaml"
}
# Upload the calico patch config
provisioner "file" {
content = templatefile(
"${path.module}/templates/calico.yaml.tpl",
{
cluster_cidr_ipv4 = local.cluster_cidr_ipv4
})
destination = "/var/post_install/calico.yaml"
}
2022-02-16 03:18:40 +01:00
# Upload the system upgrade controller plans config
provisioner "file" {
content = templatefile(
"${path.module}/templates/plans.yaml.tpl",
{
2022-02-23 21:35:42 +01:00
channel = var.initial_k3s_channel
2022-02-16 03:18:40 +01:00
})
2022-04-14 13:59:05 +02:00
destination = "/var/post_install/plans.yaml"
2022-02-16 03:18:40 +01:00
}
2022-02-12 00:32:11 +01:00
# Deploy secrets, logging is automatically disabled due to sensitive variables
provisioner "remote-exec" {
inline = [
2022-02-16 03:18:40 +01:00
"set -ex",
2022-02-22 16:28:57 +01:00
"kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name} --dry-run=client -o yaml | kubectl apply -f -",
"kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token} --dry-run=client -o yaml | kubectl apply -f -",
2022-02-12 00:32:11 +01:00
]
}
# Deploy our post-installation kustomization
provisioner "remote-exec" {
inline = concat([
2022-02-16 03:18:40 +01:00
"set -ex",
# This ugly hack is here, because terraform serializes the
# embedded yaml files with "- |2", when there is more than
# one yamldocument in the embedded file. Kustomize does not understand
# that syntax and tries to parse the blocks content as a file, resulting
# in weird errors. so gnu sed with funny escaping is used to
# replace lines like "- |3" by "- |" (yaml block syntax).
# due to indendation this should not changes the embedded
# manifests themselves
2022-04-14 13:59:05 +02:00
"sed -i 's/^- |[0-9]\\+$/- |/g' /var/post_install/kustomization.yaml",
"kubectl apply -k /var/post_install",
2022-02-16 04:24:20 +01:00
"echo 'Waiting for the system-upgrade-controller deployment to become available...'",
2022-02-16 05:01:13 +01:00
"kubectl -n system-upgrade wait --for=condition=available --timeout=120s deployment/system-upgrade-controller",
2022-04-14 13:59:05 +02:00
"kubectl -n system-upgrade apply -f /var/post_install/plans.yaml"
],
2022-03-11 14:24:24 +01:00
local.is_single_node_cluster || var.traefik_enabled == false ? [] : [<<-EOT
timeout 120 bash <<EOF
until [ -n "\$(kubectl get -n kube-system service/traefik --output=jsonpath='{.status.loadBalancer.ingress[0].ip}' 2> /dev/null)" ]; do
echo "Waiting for load-balancer to get an IP..."
sleep 2
done
EOF
EOT
])
2022-02-16 03:18:40 +01:00
}
2022-02-06 08:40:51 +01:00
2021-07-30 10:12:37 +02:00
depends_on = [
null_resource.first_control_plane
2021-07-30 10:12:37 +02:00
]
}