2021-07-30 10:12:37 +02:00
resource " hcloud_server " " first_control_plane " {
2021-12-03 02:11:52 +01:00
name = " k3s-control-plane-0 "
2021-07-30 10:12:37 +02:00
2022-01-29 21:15:23 +01:00
image = data . hcloud_image . linux . name
rescue = " linux64 "
server_type = var . control_plane_server_type
location = var . location
2022-02-05 00:02:25 +01:00
ssh_keys = [ hcloud_ssh_key . k3s . id ]
2022-01-29 21:15:23 +01:00
firewall_ids = [ hcloud_firewall . k3s . id ]
2022-02-10 03:01:40 +01:00
placement_group_id = hcloud_placement_group . k3s . id
2021-07-30 10:12:37 +02:00
labels = {
2021-09-01 00:37:11 +02:00
" provisioner " = " terraform " ,
2021-11-30 23:09:34 +01:00
" engine " = " k3s "
2021-07-30 10:12:37 +02:00
}
2022-02-11 16:00:19 +01:00
connection {
user = " root "
private_key = local . ssh_private_key
agent_identity = local . ssh_identity
host = self . ipv4_address
}
2021-11-30 23:09:34 +01:00
provisioner " file " {
2022-02-06 08:40:51 +01:00
content = templatefile ( " ${ path . module } /templates/config.ign.tpl " , {
2021-12-03 02:11:52 +01:00
name = self . name
ssh_public_key = local . ssh_public_key
} )
2022-02-06 08:40:51 +01:00
destination = " /root/config.ign "
}
# Install MicroOS
provisioner " remote-exec " {
inline = local . MicroOS_install_commands
2021-07-30 10:12:37 +02:00
}
2022-02-07 09:11:07 +01:00
# Issue a reboot command
provisioner " local-exec " {
2022-02-07 13:19:06 +01:00
command = " ssh ${ local . ssh_args } root@ ${ self . ipv4_address } '(sleep 2; reboot)&'; sleep 3 "
2022-02-07 09:11:07 +01:00
}
2022-02-06 08:40:51 +01:00
# Wait for MicroOS to reboot and be ready
provisioner " local-exec " {
2022-02-07 22:50:44 +01:00
command = < < - EOT
until ssh $ { local . ssh_args } - o ConnectTimeout =2 root @ $ { self . ipv4_address } true 2 > / dev / null
do
2022-02-10 03:31:20 +01:00
echo " Waiting for MicroOS to reboot and become available... "
2022-02-07 22:50:44 +01:00
sleep 2
done
EOT
2022-02-06 08:40:51 +01:00
}
# Generating k3s master config file
2022-02-05 01:22:35 +01:00
provisioner " file " {
2022-02-07 12:56:13 +01:00
content = yamlencode ( {
node - name = self . name
cluster - init = true
disable - cloud - controller = true
2022-02-08 14:14:23 +01:00
disable = [ " servicelb " , " local-storage " ]
2022-02-07 12:56:13 +01:00
flannel - iface = " eth1 "
kubelet - arg = " cloud-provider=external "
node - ip = local . first_control_plane_network_ip
advertise - address = local . first_control_plane_network_ip
token = random_password . k3s_token . result
2022-02-08 09:12:16 +01:00
node - taint = var . allow_scheduling_on_control_plane ? [ ] : [ " node-role.kubernetes.io/master:NoSchedule " ]
2022-02-05 01:22:35 +01:00
} )
2022-02-06 08:40:51 +01:00
destination = " /etc/rancher/k3s/config.yaml "
2022-02-05 01:22:35 +01:00
}
2022-02-10 03:31:20 +01:00
2022-02-06 08:40:51 +01:00
# Run the first control plane
2021-07-30 10:12:37 +02:00
provisioner " remote-exec " {
2022-02-06 08:40:51 +01:00
inline = [
2022-02-10 03:01:40 +01:00
# set the hostname in a persistent fashion
" hostnamectl set-hostname ${ self . name } " ,
2022-02-06 08:40:51 +01:00
# first we disable automatic reboot (after transactional updates), and configure the reboot method as kured
" rebootmgrctl set-strategy off && echo 'REBOOT_METHOD=kured' > /etc/transactional-update.conf " ,
# then we initiate the cluster
2022-02-10 03:01:40 +01:00
" systemctl enable k3s-server " ,
2022-02-11 23:11:20 +01:00
# prepare a directory for our post-installation kustomizations
" mkdir -p /tmp/post_install " ,
# start k3s and wait for the cluster to be ready
2022-02-10 03:01:40 +01:00
< < - EOT
until systemctl status k3s - server > / dev / null
do
systemctl start k3s - server
echo " Initiating the cluster... "
sleep 2
done
2022-02-11 23:11:20 +01:00
timeout 120 bash - c ' while [ [ " $ (curl -s -o /dev/null -w ''%%{http_code}'' curl -k https://localhost:6443/readyz) " ! = " 200 " ] ] ; do echo " Waiting for cluster to become ready " ; sleep 1 ; done '
2022-02-10 03:01:40 +01:00
EOT
2022-02-11 22:47:57 +01:00
]
}
2022-02-11 23:11:20 +01:00
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
provisioner " file " {
content = local . post_install_kustomization
destination = " /tmp/post_install/kustomization.yaml "
}
# Upload traefik config
provisioner " file " {
content = local . traefik_config
destination = " /tmp/post_install/traefik.yaml "
}
2022-02-11 22:47:57 +01:00
provisioner " remote-exec " {
inline = [
" kubectl -n kube-system create secret generic hcloud --from-literal=token= ${ var . hcloud_token } --from-literal=network= ${ hcloud_network . k3s . name } " ,
Expose kubeconfig in outputs...
* To do so, we need to ensure that the generated kubeconfig is part of
terraforms dependency graph. This has the additional benefit of not
depending on local files anymore which should enable multi-user
setups.
* This also means that we can't deploy CCM, CSI & Traefik from our local
host, because we don't have kubeconfig.yaml locally while provisioning
the control plane, only afterwards.
* So we just run kubectl apply on the control plane itself, after k3s is
ready.
* To do so, we need to deploy all manifests. I've merged the patches
into a single kustomization.yaml file, because that makes the
deployment of those files to the control-plane server easier.
* we could also put the traefik config into the same kustomization file,
which would save us one of the file provisioner blocks. I didn't want
this PR to get any bigger, and will consider merging this config later
on. kustomization.yaml is small enough that we could yamlencode() for
it and store the patches in separate files again, not as
inline-strings which is kind of ugly.
2022-02-11 12:45:03 +01:00
" kubectl -n kube-system create secret generic hcloud-csi --from-literal=token= ${ var . hcloud_token } " ,
2022-02-11 22:47:57 +01:00
" kubectl apply -k /tmp/post_install " ,
" rm -rf /tmp/post_install "
2022-02-06 08:40:51 +01:00
]
2021-07-30 10:12:37 +02:00
}
2022-02-06 08:40:51 +01:00
2021-07-30 10:12:37 +02:00
network {
network_id = hcloud_network . k3s . id
2021-09-01 00:37:11 +02:00
ip = local . first_control_plane_network_ip
2021-07-30 10:12:37 +02:00
}
depends_on = [
2021-09-01 00:37:11 +02:00
hcloud_network_subnet . k3s ,
hcloud_firewall . k3s
2021-07-30 10:12:37 +02:00
]
}