2021-07-30 10:12:37 +02:00
resource " hcloud_server " " first_control_plane " {
2021-12-03 02:11:52 +01:00
name = " k3s-control-plane-0 "
2021-07-30 10:12:37 +02:00
2022-01-29 21:15:23 +01:00
image = data . hcloud_image . linux . name
rescue = " linux64 "
server_type = var . control_plane_server_type
location = var . location
2022-02-05 00:02:25 +01:00
ssh_keys = [ hcloud_ssh_key . k3s . id ]
2022-01-29 21:15:23 +01:00
firewall_ids = [ hcloud_firewall . k3s . id ]
2022-02-10 03:01:40 +01:00
placement_group_id = hcloud_placement_group . k3s . id
2021-07-30 10:12:37 +02:00
labels = {
2021-09-01 00:37:11 +02:00
" provisioner " = " terraform " ,
2021-11-30 23:09:34 +01:00
" engine " = " k3s "
2021-07-30 10:12:37 +02:00
}
2022-02-11 16:00:19 +01:00
connection {
user = " root "
private_key = local . ssh_private_key
agent_identity = local . ssh_identity
host = self . ipv4_address
}
2021-11-30 23:09:34 +01:00
provisioner " file " {
2022-02-06 08:40:51 +01:00
content = templatefile ( " ${ path . module } /templates/config.ign.tpl " , {
2021-12-03 02:11:52 +01:00
name = self . name
ssh_public_key = local . ssh_public_key
} )
2022-02-06 08:40:51 +01:00
destination = " /root/config.ign "
}
# Install MicroOS
provisioner " remote-exec " {
2022-02-16 03:18:40 +01:00
inline = local . microOS_install_commands
2021-07-30 10:12:37 +02:00
}
2022-02-16 03:18:40 +01:00
# Issue a reboot command and wait for the node to reboot
2022-02-07 09:11:07 +01:00
provisioner " local-exec " {
2022-02-07 13:19:06 +01:00
command = " ssh ${ local . ssh_args } root@ ${ self . ipv4_address } '(sleep 2; reboot)&'; sleep 3 "
2022-02-07 09:11:07 +01:00
}
2022-02-06 08:40:51 +01:00
provisioner " local-exec " {
2022-02-07 22:50:44 +01:00
command = < < - EOT
until ssh $ { local . ssh_args } - o ConnectTimeout =2 root @ $ { self . ipv4_address } true 2 > / dev / null
do
2022-02-10 03:31:20 +01:00
echo " Waiting for MicroOS to reboot and become available... "
2022-02-07 22:50:44 +01:00
sleep 2
done
EOT
2022-02-06 08:40:51 +01:00
}
# Generating k3s master config file
2022-02-05 01:22:35 +01:00
provisioner " file " {
2022-02-07 12:56:13 +01:00
content = yamlencode ( {
node - name = self . name
cluster - init = true
disable - cloud - controller = true
2022-02-08 14:14:23 +01:00
disable = [ " servicelb " , " local-storage " ]
2022-02-07 12:56:13 +01:00
flannel - iface = " eth1 "
kubelet - arg = " cloud-provider=external "
node - ip = local . first_control_plane_network_ip
advertise - address = local . first_control_plane_network_ip
token = random_password . k3s_token . result
2022-02-08 09:12:16 +01:00
node - taint = var . allow_scheduling_on_control_plane ? [ ] : [ " node-role.kubernetes.io/master:NoSchedule " ]
2022-02-16 03:18:40 +01:00
node - label = var . automatically_upgrade_k3s ? [ " k3s_upgrade=true " ] : [ ]
2022-02-05 01:22:35 +01:00
} )
2022-02-16 03:18:40 +01:00
destination = " /tmp/config.yaml "
}
# Install k3s server
provisioner " remote-exec " {
inline = local . install_k3s_server
}
# Issue a reboot command and wait for the node to reboot
provisioner " local-exec " {
command = " ssh ${ local . ssh_args } root@ ${ self . ipv4_address } '(sleep 2; reboot)&'; sleep 3 "
}
provisioner " local-exec " {
command = < < - EOT
until ssh $ { local . ssh_args } - o ConnectTimeout =2 root @ $ { self . ipv4_address } true 2 > / dev / null
do
echo " Waiting for MicroOS to reboot and become available... "
sleep 2
done
EOT
2022-02-05 01:22:35 +01:00
}
2022-02-10 03:31:20 +01:00
2022-02-16 03:18:40 +01:00
# Upon reboot verify that the k3s server is starts, and wait for k3s to be ready to receive commands
2021-07-30 10:12:37 +02:00
provisioner " remote-exec " {
2022-02-06 08:40:51 +01:00
inline = [
2022-02-16 03:18:40 +01:00
# prepare the post_install directory
2022-02-11 23:11:20 +01:00
" mkdir -p /tmp/post_install " ,
2022-02-16 03:18:40 +01:00
# wait for k3s to become ready
2022-02-10 03:01:40 +01:00
< < - EOT
2022-02-11 23:49:16 +01:00
timeout 120 bash < < EOF
2022-02-16 03:18:40 +01:00
until systemctl status k3s > / dev / null ; do
echo " Waiting for the k3s server to start... "
2022-02-11 23:57:18 +01:00
sleep 1
done
2022-02-11 23:49:16 +01:00
until [ - e / etc / rancher / k3s / k3s . yaml ] ; do
2022-02-11 23:57:18 +01:00
echo " Waiting for kubectl config... "
2022-02-11 23:49:16 +01:00
sleep 1
done
2022-02-12 01:45:25 +01:00
until [ [ " \ $ (kubectl get --raw='/readyz' 2> /dev/null) " = = " ok " ] ] ; do
2022-02-12 01:49:33 +01:00
echo " Waiting for the cluster to become ready... "
2022-02-11 23:49:16 +01:00
sleep 1
2022-02-10 03:01:40 +01:00
done
2022-02-11 23:49:16 +01:00
EOF
2022-02-10 03:01:40 +01:00
EOT
2022-02-11 22:47:57 +01:00
]
}
2022-02-11 23:11:20 +01:00
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
provisioner " file " {
2022-02-11 23:49:54 +01:00
content = yamlencode ( {
apiVersion = " kustomize.config.k8s.io/v1beta1 "
kind = " Kustomization "
resources = [
" https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/ ${ local . ccm_version } /ccm-networks.yaml " ,
" https://raw.githubusercontent.com/hetznercloud/csi-driver/ ${ local . csi_version } /deploy/kubernetes/hcloud-csi.yml " ,
" https://github.com/weaveworks/kured/releases/download/ ${ local . kured_version } /kured- ${ local . kured_version } -dockerhub.yaml " ,
2022-02-16 03:18:40 +01:00
" https://raw.githubusercontent.com/rancher/system-upgrade-controller/master/manifests/system-upgrade-controller.yaml " ,
" ./traefik.yaml " ,
2022-02-11 23:49:54 +01:00
]
patchesStrategicMerge = [
file ( " ${ path . module } /patches/kured.yaml " ) ,
2022-02-15 22:33:22 +01:00
file ( " ${ path . module } /patches/ccm.yaml " )
2022-02-11 23:49:54 +01:00
]
} )
2022-02-11 23:11:20 +01:00
destination = " /tmp/post_install/kustomization.yaml "
}
# Upload traefik config
provisioner " file " {
2022-02-11 23:49:54 +01:00
content = templatefile (
" ${ path . module } /templates/traefik_config.yaml.tpl " ,
{
lb_disable_ipv6 = var . lb_disable_ipv6
lb_server_type = var . lb_server_type
location = var . location
traefik_acme_tls = var . traefik_acme_tls
traefik_acme_email = var . traefik_acme_email
} )
2022-02-11 23:11:20 +01:00
destination = " /tmp/post_install/traefik.yaml "
}
2022-02-16 03:18:40 +01:00
# Upload the system upgrade controller plans config
provisioner " file " {
content = templatefile (
" ${ path . module } /templates/plans.yaml.tpl " ,
{
channel = var . k3s_upgrade_channel
} )
destination = " /tmp/post_install/plans.yaml "
}
2022-02-12 00:32:11 +01:00
# Deploy secrets, logging is automatically disabled due to sensitive variables
2022-02-11 22:47:57 +01:00
provisioner " remote-exec " {
inline = [
2022-02-16 03:18:40 +01:00
" set -ex " ,
2022-02-11 22:47:57 +01:00
" kubectl -n kube-system create secret generic hcloud --from-literal=token= ${ var . hcloud_token } --from-literal=network= ${ hcloud_network . k3s . name } " ,
Expose kubeconfig in outputs...
* To do so, we need to ensure that the generated kubeconfig is part of
terraforms dependency graph. This has the additional benefit of not
depending on local files anymore which should enable multi-user
setups.
* This also means that we can't deploy CCM, CSI & Traefik from our local
host, because we don't have kubeconfig.yaml locally while provisioning
the control plane, only afterwards.
* So we just run kubectl apply on the control plane itself, after k3s is
ready.
* To do so, we need to deploy all manifests. I've merged the patches
into a single kustomization.yaml file, because that makes the
deployment of those files to the control-plane server easier.
* we could also put the traefik config into the same kustomization file,
which would save us one of the file provisioner blocks. I didn't want
this PR to get any bigger, and will consider merging this config later
on. kustomization.yaml is small enough that we could yamlencode() for
it and store the patches in separate files again, not as
inline-strings which is kind of ugly.
2022-02-11 12:45:03 +01:00
" kubectl -n kube-system create secret generic hcloud-csi --from-literal=token= ${ var . hcloud_token } " ,
2022-02-12 00:32:11 +01:00
]
}
# Deploy our post-installation kustomization
provisioner " remote-exec " {
inline = [
2022-02-16 03:18:40 +01:00
" set -ex " ,
2022-02-12 00:52:13 +01:00
# This ugly hack is here, because terraform serializes the
# embedded yaml files with "- |2", when there is more than
# one yamldocument in the embedded file. Kustomize does not understand
# that syntax and tries to parse the blocks content as a file, resulting
# in weird errors. so gnu sed with funny escaping is used to
# replace lines like "- |3" by "- |" (yaml block syntax).
# due to indendation this should not changes the embedded
# manifests themselves
" sed -i 's/^- |[0-9] \\ + $ /- |/g' /tmp/post_install/kustomization.yaml " ,
2022-02-11 22:47:57 +01:00
" kubectl apply -k /tmp/post_install " ,
2022-02-16 03:18:40 +01:00
" echo 'Waiting for the system-upgrade-controller deployment to become available...' && kubectl -n system-upgrade wait --for=condition=available --timeout=300s deployment/system-upgrade-controller " ,
" kubectl apply -f /tmp/post_install/plans.yaml "
2022-02-06 08:40:51 +01:00
]
2022-02-16 03:18:40 +01:00
}
2022-02-06 08:40:51 +01:00
2021-07-30 10:12:37 +02:00
network {
network_id = hcloud_network . k3s . id
2021-09-01 00:37:11 +02:00
ip = local . first_control_plane_network_ip
2021-07-30 10:12:37 +02:00
}
depends_on = [
2021-09-01 00:37:11 +02:00
hcloud_network_subnet . k3s ,
hcloud_firewall . k3s
2021-07-30 10:12:37 +02:00
]
}