9dc4952665
* To do so, we need to ensure that the generated kubeconfig is part of terraforms dependency graph. This has the additional benefit of not depending on local files anymore which should enable multi-user setups. * This also means that we can't deploy CCM, CSI & Traefik from our local host, because we don't have kubeconfig.yaml locally while provisioning the control plane, only afterwards. * So we just run kubectl apply on the control plane itself, after k3s is ready. * To do so, we need to deploy all manifests. I've merged the patches into a single kustomization.yaml file, because that makes the deployment of those files to the control-plane server easier. * we could also put the traefik config into the same kustomization file, which would save us one of the file provisioner blocks. I didn't want this PR to get any bigger, and will consider merging this config later on. kustomization.yaml is small enough that we could yamlencode() for it and store the patches in separate files again, not as inline-strings which is kind of ugly.
155 lines
4.8 KiB
HCL
155 lines
4.8 KiB
HCL
resource "hcloud_server" "first_control_plane" {
|
|
name = "k3s-control-plane-0"
|
|
|
|
image = data.hcloud_image.linux.name
|
|
rescue = "linux64"
|
|
server_type = var.control_plane_server_type
|
|
location = var.location
|
|
ssh_keys = [hcloud_ssh_key.k3s.id]
|
|
firewall_ids = [hcloud_firewall.k3s.id]
|
|
placement_group_id = hcloud_placement_group.k3s.id
|
|
|
|
labels = {
|
|
"provisioner" = "terraform",
|
|
"engine" = "k3s"
|
|
}
|
|
|
|
provisioner "file" {
|
|
content = templatefile("${path.module}/templates/config.ign.tpl", {
|
|
name = self.name
|
|
ssh_public_key = local.ssh_public_key
|
|
})
|
|
destination = "/root/config.ign"
|
|
|
|
connection {
|
|
user = "root"
|
|
private_key = local.ssh_private_key
|
|
agent_identity = local.ssh_identity
|
|
host = self.ipv4_address
|
|
}
|
|
}
|
|
|
|
# Install MicroOS
|
|
provisioner "remote-exec" {
|
|
inline = local.MicroOS_install_commands
|
|
|
|
connection {
|
|
user = "root"
|
|
private_key = local.ssh_private_key
|
|
agent_identity = local.ssh_identity
|
|
host = self.ipv4_address
|
|
}
|
|
}
|
|
|
|
# Issue a reboot command
|
|
provisioner "local-exec" {
|
|
command = "ssh ${local.ssh_args} root@${self.ipv4_address} '(sleep 2; reboot)&'; sleep 3"
|
|
}
|
|
|
|
# Wait for MicroOS to reboot and be ready
|
|
provisioner "local-exec" {
|
|
command = <<-EOT
|
|
until ssh ${local.ssh_args} -o ConnectTimeout=2 root@${self.ipv4_address} true 2> /dev/null
|
|
do
|
|
echo "Waiting for MicroOS to reboot and become available..."
|
|
sleep 2
|
|
done
|
|
EOT
|
|
}
|
|
|
|
# Generating k3s master config file
|
|
provisioner "file" {
|
|
content = yamlencode({
|
|
node-name = self.name
|
|
cluster-init = true
|
|
disable-cloud-controller = true
|
|
disable = ["servicelb", "local-storage"]
|
|
flannel-iface = "eth1"
|
|
kubelet-arg = "cloud-provider=external"
|
|
node-ip = local.first_control_plane_network_ip
|
|
advertise-address = local.first_control_plane_network_ip
|
|
token = random_password.k3s_token.result
|
|
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
|
})
|
|
destination = "/etc/rancher/k3s/config.yaml"
|
|
|
|
connection {
|
|
user = "root"
|
|
private_key = local.ssh_private_key
|
|
agent_identity = local.ssh_identity
|
|
host = self.ipv4_address
|
|
}
|
|
}
|
|
|
|
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
|
|
provisioner "file" {
|
|
content = local.post_install_kustomization
|
|
destination = "/tmp/kustomization.yaml"
|
|
|
|
connection {
|
|
user = "root"
|
|
private_key = local.ssh_private_key
|
|
agent_identity = local.ssh_identity
|
|
host = self.ipv4_address
|
|
}
|
|
}
|
|
|
|
# Upload traefik config
|
|
provisioner "file" {
|
|
content = local.traefik_config
|
|
destination = "/tmp/traefik.yaml"
|
|
|
|
connection {
|
|
user = "root"
|
|
private_key = local.ssh_private_key
|
|
agent_identity = local.ssh_identity
|
|
host = self.ipv4_address
|
|
}
|
|
}
|
|
|
|
# Run the first control plane
|
|
provisioner "remote-exec" {
|
|
inline = [
|
|
# set the hostname in a persistent fashion
|
|
"hostnamectl set-hostname ${self.name}",
|
|
# first we disable automatic reboot (after transactional updates), and configure the reboot method as kured
|
|
"rebootmgrctl set-strategy off && echo 'REBOOT_METHOD=kured' > /etc/transactional-update.conf",
|
|
# then we initiate the cluster
|
|
"systemctl enable k3s-server",
|
|
<<-EOT
|
|
until systemctl status k3s-server > /dev/null
|
|
do
|
|
systemctl start k3s-server
|
|
echo "Initiating the cluster..."
|
|
sleep 2
|
|
done
|
|
EOT
|
|
, <<-EOT
|
|
timeout 120 bash -c 'while [[ "$(curl -s -o /dev/null -w ''%%{http_code}'' curl -k https://localhost:6443/readyz)" != "200" ]]; do sleep 1; done'
|
|
EOT
|
|
, "kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name}",
|
|
"kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token}",
|
|
"kubectl apply -k /tmp/",
|
|
"kubectl apply -f /tmp/traefik.yaml",
|
|
"rm /tmp/traefik.yaml /tmp/kustomization.yaml"
|
|
]
|
|
|
|
connection {
|
|
user = "root"
|
|
private_key = local.ssh_private_key
|
|
agent_identity = local.ssh_identity
|
|
host = self.ipv4_address
|
|
}
|
|
}
|
|
|
|
network {
|
|
network_id = hcloud_network.k3s.id
|
|
ip = local.first_control_plane_network_ip
|
|
}
|
|
|
|
depends_on = [
|
|
hcloud_network_subnet.k3s,
|
|
hcloud_firewall.k3s
|
|
]
|
|
}
|