2022-02-19 13:38:24 +01:00
|
|
|
module "first_control_plane" {
|
|
|
|
source = "./modules/host"
|
|
|
|
|
2021-12-03 02:11:52 +01:00
|
|
|
name = "k3s-control-plane-0"
|
2021-07-30 10:12:37 +02:00
|
|
|
|
2022-02-20 11:30:07 +01:00
|
|
|
ssh_keys = [hcloud_ssh_key.k3s.id]
|
|
|
|
public_key = var.public_key
|
|
|
|
private_key = var.private_key
|
|
|
|
additional_public_keys = var.additional_public_keys
|
|
|
|
firewall_ids = [hcloud_firewall.k3s.id]
|
|
|
|
placement_group_id = hcloud_placement_group.k3s.id
|
|
|
|
location = var.location
|
|
|
|
network_id = hcloud_network.k3s.id
|
|
|
|
ip = local.first_control_plane_network_ip
|
|
|
|
server_type = var.control_plane_server_type
|
2021-07-30 10:12:37 +02:00
|
|
|
|
|
|
|
labels = {
|
2021-09-01 00:37:11 +02:00
|
|
|
"provisioner" = "terraform",
|
2021-11-30 23:09:34 +01:00
|
|
|
"engine" = "k3s"
|
2021-07-30 10:12:37 +02:00
|
|
|
}
|
|
|
|
|
2022-02-19 13:38:24 +01:00
|
|
|
hcloud_token = var.hcloud_token
|
|
|
|
}
|
2022-02-11 16:00:19 +01:00
|
|
|
|
2022-02-19 13:38:24 +01:00
|
|
|
resource "null_resource" "first_control_plane" {
|
2022-02-06 08:40:51 +01:00
|
|
|
|
2022-02-19 13:38:24 +01:00
|
|
|
triggers = {
|
|
|
|
first_control_plane_id = module.first_control_plane.id
|
2022-02-17 13:19:21 +01:00
|
|
|
}
|
|
|
|
|
2021-07-30 10:12:37 +02:00
|
|
|
|
2022-02-19 13:38:24 +01:00
|
|
|
connection {
|
|
|
|
user = "root"
|
|
|
|
private_key = local.ssh_private_key
|
|
|
|
agent_identity = local.ssh_identity
|
|
|
|
host = module.first_control_plane.ipv4_address
|
2022-02-06 08:40:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
# Generating k3s master config file
|
2022-02-05 01:22:35 +01:00
|
|
|
provisioner "file" {
|
2022-02-07 12:56:13 +01:00
|
|
|
content = yamlencode({
|
2022-02-19 13:38:24 +01:00
|
|
|
node-name = module.first_control_plane.name
|
|
|
|
token = random_password.k3s_token.result
|
2022-02-07 12:56:13 +01:00
|
|
|
cluster-init = true
|
|
|
|
disable-cloud-controller = true
|
2022-02-08 14:14:23 +01:00
|
|
|
disable = ["servicelb", "local-storage"]
|
2022-02-07 12:56:13 +01:00
|
|
|
flannel-iface = "eth1"
|
|
|
|
kubelet-arg = "cloud-provider=external"
|
|
|
|
node-ip = local.first_control_plane_network_ip
|
|
|
|
advertise-address = local.first_control_plane_network_ip
|
2022-02-19 13:38:24 +01:00
|
|
|
tls-san = local.first_control_plane_network_ip
|
2022-02-08 09:12:16 +01:00
|
|
|
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
2022-02-16 11:06:47 +01:00
|
|
|
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
2022-02-05 01:22:35 +01:00
|
|
|
})
|
2022-02-16 03:18:40 +01:00
|
|
|
destination = "/tmp/config.yaml"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Install k3s server
|
|
|
|
provisioner "remote-exec" {
|
|
|
|
inline = local.install_k3s_server
|
|
|
|
}
|
|
|
|
|
|
|
|
# Upon reboot verify that the k3s server is starts, and wait for k3s to be ready to receive commands
|
2021-07-30 10:12:37 +02:00
|
|
|
provisioner "remote-exec" {
|
2022-02-06 08:40:51 +01:00
|
|
|
inline = [
|
2022-02-17 13:19:21 +01:00
|
|
|
"systemctl start k3s",
|
2022-02-16 03:18:40 +01:00
|
|
|
# prepare the post_install directory
|
2022-02-11 23:11:20 +01:00
|
|
|
"mkdir -p /tmp/post_install",
|
2022-02-16 03:18:40 +01:00
|
|
|
# wait for k3s to become ready
|
2022-02-10 03:01:40 +01:00
|
|
|
<<-EOT
|
2022-02-11 23:49:16 +01:00
|
|
|
timeout 120 bash <<EOF
|
2022-02-16 03:18:40 +01:00
|
|
|
until systemctl status k3s > /dev/null; do
|
2022-02-20 02:04:37 +01:00
|
|
|
systemctl start k3s
|
2022-02-16 03:18:40 +01:00
|
|
|
echo "Waiting for the k3s server to start..."
|
2022-02-16 04:24:20 +01:00
|
|
|
sleep 2
|
2022-02-11 23:57:18 +01:00
|
|
|
done
|
2022-02-11 23:49:16 +01:00
|
|
|
until [ -e /etc/rancher/k3s/k3s.yaml ]; do
|
2022-02-11 23:57:18 +01:00
|
|
|
echo "Waiting for kubectl config..."
|
2022-02-16 04:24:20 +01:00
|
|
|
sleep 2
|
2022-02-11 23:49:16 +01:00
|
|
|
done
|
2022-02-12 01:45:25 +01:00
|
|
|
until [[ "\$(kubectl get --raw='/readyz' 2> /dev/null)" == "ok" ]]; do
|
2022-02-12 01:49:33 +01:00
|
|
|
echo "Waiting for the cluster to become ready..."
|
2022-02-16 04:24:20 +01:00
|
|
|
sleep 2
|
2022-02-10 03:01:40 +01:00
|
|
|
done
|
2022-02-11 23:49:16 +01:00
|
|
|
EOF
|
2022-02-10 03:01:40 +01:00
|
|
|
EOT
|
2022-02-11 22:47:57 +01:00
|
|
|
]
|
|
|
|
}
|
|
|
|
|
2022-02-11 23:11:20 +01:00
|
|
|
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
|
|
|
|
provisioner "file" {
|
2022-02-11 23:49:54 +01:00
|
|
|
content = yamlencode({
|
|
|
|
apiVersion = "kustomize.config.k8s.io/v1beta1"
|
|
|
|
kind = "Kustomization"
|
|
|
|
resources = [
|
|
|
|
"https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/${local.ccm_version}/ccm-networks.yaml",
|
|
|
|
"https://raw.githubusercontent.com/hetznercloud/csi-driver/${local.csi_version}/deploy/kubernetes/hcloud-csi.yml",
|
|
|
|
"https://github.com/weaveworks/kured/releases/download/${local.kured_version}/kured-${local.kured_version}-dockerhub.yaml",
|
2022-02-16 03:18:40 +01:00
|
|
|
"https://raw.githubusercontent.com/rancher/system-upgrade-controller/master/manifests/system-upgrade-controller.yaml",
|
2022-02-17 15:18:01 +01:00
|
|
|
"traefik.yaml",
|
2022-02-11 23:49:54 +01:00
|
|
|
]
|
|
|
|
patchesStrategicMerge = [
|
2022-02-20 12:23:21 +01:00
|
|
|
file("${path.module}/kustomize/kured.yaml"),
|
|
|
|
file("${path.module}/kustomize/ccm.yaml"),
|
|
|
|
file("${path.module}/kustomize/system-upgrade-controller.yaml")
|
2022-02-11 23:49:54 +01:00
|
|
|
]
|
|
|
|
})
|
2022-02-11 23:11:20 +01:00
|
|
|
destination = "/tmp/post_install/kustomization.yaml"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Upload traefik config
|
|
|
|
provisioner "file" {
|
2022-02-11 23:49:54 +01:00
|
|
|
content = templatefile(
|
|
|
|
"${path.module}/templates/traefik_config.yaml.tpl",
|
|
|
|
{
|
|
|
|
lb_disable_ipv6 = var.lb_disable_ipv6
|
|
|
|
lb_server_type = var.lb_server_type
|
|
|
|
location = var.location
|
|
|
|
traefik_acme_tls = var.traefik_acme_tls
|
|
|
|
traefik_acme_email = var.traefik_acme_email
|
|
|
|
})
|
2022-02-11 23:11:20 +01:00
|
|
|
destination = "/tmp/post_install/traefik.yaml"
|
|
|
|
}
|
|
|
|
|
2022-02-16 03:18:40 +01:00
|
|
|
# Upload the system upgrade controller plans config
|
|
|
|
provisioner "file" {
|
|
|
|
content = templatefile(
|
|
|
|
"${path.module}/templates/plans.yaml.tpl",
|
|
|
|
{
|
|
|
|
channel = var.k3s_upgrade_channel
|
|
|
|
})
|
|
|
|
destination = "/tmp/post_install/plans.yaml"
|
|
|
|
}
|
|
|
|
|
2022-02-12 00:32:11 +01:00
|
|
|
# Deploy secrets, logging is automatically disabled due to sensitive variables
|
2022-02-11 22:47:57 +01:00
|
|
|
provisioner "remote-exec" {
|
|
|
|
inline = [
|
2022-02-16 03:18:40 +01:00
|
|
|
"set -ex",
|
2022-02-11 22:47:57 +01:00
|
|
|
"kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name}",
|
Expose kubeconfig in outputs...
* To do so, we need to ensure that the generated kubeconfig is part of
terraforms dependency graph. This has the additional benefit of not
depending on local files anymore which should enable multi-user
setups.
* This also means that we can't deploy CCM, CSI & Traefik from our local
host, because we don't have kubeconfig.yaml locally while provisioning
the control plane, only afterwards.
* So we just run kubectl apply on the control plane itself, after k3s is
ready.
* To do so, we need to deploy all manifests. I've merged the patches
into a single kustomization.yaml file, because that makes the
deployment of those files to the control-plane server easier.
* we could also put the traefik config into the same kustomization file,
which would save us one of the file provisioner blocks. I didn't want
this PR to get any bigger, and will consider merging this config later
on. kustomization.yaml is small enough that we could yamlencode() for
it and store the patches in separate files again, not as
inline-strings which is kind of ugly.
2022-02-11 12:45:03 +01:00
|
|
|
"kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token}",
|
2022-02-12 00:32:11 +01:00
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
# Deploy our post-installation kustomization
|
|
|
|
provisioner "remote-exec" {
|
|
|
|
inline = [
|
2022-02-16 03:18:40 +01:00
|
|
|
"set -ex",
|
2022-02-12 00:52:13 +01:00
|
|
|
# This ugly hack is here, because terraform serializes the
|
|
|
|
# embedded yaml files with "- |2", when there is more than
|
|
|
|
# one yamldocument in the embedded file. Kustomize does not understand
|
|
|
|
# that syntax and tries to parse the blocks content as a file, resulting
|
|
|
|
# in weird errors. so gnu sed with funny escaping is used to
|
|
|
|
# replace lines like "- |3" by "- |" (yaml block syntax).
|
|
|
|
# due to indendation this should not changes the embedded
|
|
|
|
# manifests themselves
|
|
|
|
"sed -i 's/^- |[0-9]\\+$/- |/g' /tmp/post_install/kustomization.yaml",
|
2022-02-11 22:47:57 +01:00
|
|
|
"kubectl apply -k /tmp/post_install",
|
2022-02-16 04:24:20 +01:00
|
|
|
"echo 'Waiting for the system-upgrade-controller deployment to become available...'",
|
2022-02-16 05:01:13 +01:00
|
|
|
"kubectl -n system-upgrade wait --for=condition=available --timeout=120s deployment/system-upgrade-controller",
|
2022-02-16 05:01:53 +01:00
|
|
|
"kubectl -n system-upgrade apply -f /tmp/post_install/plans.yaml"
|
2022-02-06 08:40:51 +01:00
|
|
|
]
|
2022-02-16 03:18:40 +01:00
|
|
|
}
|
2022-02-06 08:40:51 +01:00
|
|
|
|
2021-07-30 10:12:37 +02:00
|
|
|
depends_on = [
|
2021-09-01 00:37:11 +02:00
|
|
|
hcloud_network_subnet.k3s,
|
|
|
|
hcloud_firewall.k3s
|
2021-07-30 10:12:37 +02:00
|
|
|
]
|
|
|
|
}
|