First control plane node is not special anymore

The first control plane node is now identical to any other server
node. The cluster initialization happens once in two steps: first,
make sure that the k3s cluster is initialized and then apply our
configurations while the other nodes join. This change makes the
initialization more resilient and even faster than before.
This commit is contained in:
Marco Nenciarini 2022-02-22 08:50:54 +01:00
parent 116f13f6f2
commit 0c3aa36c03
No known key found for this signature in database
GPG Key ID: 589F03F01BA55038
7 changed files with 40 additions and 52 deletions

View File

@ -1,54 +1,24 @@
module "first_control_plane" {
source = "./modules/host"
name = "k3s-control-plane-0"
ssh_keys = [hcloud_ssh_key.k3s.id]
public_key = var.public_key
private_key = var.private_key
additional_public_keys = var.additional_public_keys
firewall_ids = [hcloud_firewall.k3s.id]
placement_group_id = hcloud_placement_group.k3s.id
location = var.location
network_id = hcloud_network.k3s.id
ip = local.first_control_plane_network_ip
server_type = var.control_plane_server_type
labels = {
"provisioner" = "terraform",
"engine" = "k3s"
}
hcloud_token = var.hcloud_token
}
resource "null_resource" "first_control_plane" {
triggers = {
first_control_plane_id = module.first_control_plane.id
}
connection {
user = "root"
private_key = local.ssh_private_key
agent_identity = local.ssh_identity
host = module.first_control_plane.ipv4_address
host = module.control_planes[0].ipv4_address
}
# Generating k3s master config file
provisioner "file" {
content = yamlencode({
node-name = module.first_control_plane.name
node-name = module.control_planes[0].name
token = random_password.k3s_token.result
cluster-init = true
disable-cloud-controller = true
disable = ["servicelb", "local-storage"]
flannel-iface = "eth1"
kubelet-arg = "cloud-provider=external"
node-ip = local.first_control_plane_network_ip
advertise-address = local.first_control_plane_network_ip
tls-san = local.first_control_plane_network_ip
node-ip = module.control_planes[0].private_ipv4_address
advertise-address = module.control_planes[0].private_ipv4_address
tls-san = module.control_planes[0].private_ipv4_address
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
})
@ -87,6 +57,20 @@ resource "null_resource" "first_control_plane" {
]
}
depends_on = [
hcloud_network_subnet.k3s,
hcloud_firewall.k3s
]
}
resource "null_resource" "kustomization" {
connection {
user = "root"
private_key = local.ssh_private_key
agent_identity = local.ssh_identity
host = module.control_planes[0].ipv4_address
}
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
provisioner "file" {
content = yamlencode({
@ -170,7 +154,6 @@ resource "null_resource" "first_control_plane" {
}
depends_on = [
hcloud_network_subnet.k3s,
hcloud_firewall.k3s
null_resource.first_control_plane
]
}

View File

@ -1,7 +1,7 @@
data "remote_file" "kubeconfig" {
conn {
host = module.first_control_plane.ipv4_address
host = module.control_planes[0].ipv4_address
port = 22
user = "root"
private_key = local.ssh_private_key
@ -9,11 +9,11 @@ data "remote_file" "kubeconfig" {
}
path = "/etc/rancher/k3s/k3s.yaml"
depends_on = [null_resource.first_control_plane]
depends_on = [null_resource.control_planes[0]]
}
locals {
kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", module.first_control_plane.ipv4_address)
kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", module.control_planes[0].ipv4_address)
kubeconfig_parsed = yamldecode(local.kubeconfig_external)
kubeconfig_data = {
host = local.kubeconfig_parsed["clusters"][0]["cluster"]["server"]

View File

@ -1,5 +1,5 @@
locals {
first_control_plane_network_ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 257)
first_control_plane_network_ip = module.control_planes[0].private_ipv4_address
ssh_public_key = trimspace(file(var.public_key))
# ssh_private_key is either the contents of var.private_key or null to use a ssh agent.
@ -22,7 +22,9 @@ locals {
# prepare the k3s config directory
"mkdir -p /etc/rancher/k3s",
# move the config file into place
"mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
"mv /tmp/config.yaml /etc/rancher/k3s/config.yaml",
# if the server has already been initialized just stop here
"[ -e /etc/rancher/k3s/k3s.yaml ] && exit 0",
]
install_k3s_server = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SKIP_START=true INSTALL_K3S_EXEC=server sh -"])

View File

@ -156,5 +156,5 @@ resource "hcloud_placement_group" "k3s" {
data "hcloud_load_balancer" "traefik" {
name = "traefik"
depends_on = [null_resource.first_control_plane]
depends_on = [null_resource.kustomization]
}

View File

@ -2,6 +2,9 @@ output "ipv4_address" {
value = hcloud_server.server.ipv4_address
}
output "private_ipv4_address" {
value = var.ip
}
output "name" {
value = hcloud_server.server.name

View File

@ -1,5 +1,5 @@
output "controlplanes_public_ip" {
value = concat([module.first_control_plane.ipv4_address], module.control_planes.*.ipv4_address)
value = module.control_planes.*.ipv4_address
description = "The public IP addresses of the controlplane server."
}

View File

@ -1,8 +1,8 @@
module "control_planes" {
source = "./modules/host"
count = var.servers_num - 1
name = "k3s-control-plane-${count.index + 1}"
count = var.servers_num
name = "k3s-control-plane-${count.index}"
ssh_keys = [hcloud_ssh_key.k3s.id]
public_key = var.public_key
@ -12,7 +12,7 @@ module "control_planes" {
placement_group_id = hcloud_placement_group.k3s.id
location = var.location
network_id = hcloud_network.k3s.id
ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index)
ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 257 + count.index)
server_type = var.control_plane_server_type
labels = {
@ -24,7 +24,7 @@ module "control_planes" {
}
resource "null_resource" "control_planes" {
count = var.servers_num - 1
count = var.servers_num
triggers = {
control_plane_id = module.control_planes[count.index].id
@ -41,16 +41,16 @@ resource "null_resource" "control_planes" {
provisioner "file" {
content = yamlencode({
node-name = module.control_planes[count.index].name
server = "https://${local.first_control_plane_network_ip}:6443"
server = "https://${element(module.control_planes.*.private_ipv4_address, count.index > 0 ? 0 : 1)}:6443"
token = random_password.k3s_token.result
cluster-init = true
disable-cloud-controller = true
disable = ["servicelb", "local-storage"]
flannel-iface = "eth1"
kubelet-arg = "cloud-provider=external"
node-ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index)
advertise-address = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index)
tls-san = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index)
node-ip = module.control_planes[count.index].private_ipv4_address
advertise-address = module.control_planes[count.index].private_ipv4_address
tls-san = module.control_planes[count.index].private_ipv4_address
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
})