diff --git a/master.tf b/init.tf similarity index 81% rename from master.tf rename to init.tf index 0b02168..6125242 100644 --- a/master.tf +++ b/init.tf @@ -1,54 +1,24 @@ -module "first_control_plane" { - source = "./modules/host" - - name = "k3s-control-plane-0" - - ssh_keys = [hcloud_ssh_key.k3s.id] - public_key = var.public_key - private_key = var.private_key - additional_public_keys = var.additional_public_keys - firewall_ids = [hcloud_firewall.k3s.id] - placement_group_id = hcloud_placement_group.k3s.id - location = var.location - network_id = hcloud_network.k3s.id - ip = local.first_control_plane_network_ip - server_type = var.control_plane_server_type - - labels = { - "provisioner" = "terraform", - "engine" = "k3s" - } - - hcloud_token = var.hcloud_token -} - resource "null_resource" "first_control_plane" { - - triggers = { - first_control_plane_id = module.first_control_plane.id - } - - connection { user = "root" private_key = local.ssh_private_key agent_identity = local.ssh_identity - host = module.first_control_plane.ipv4_address + host = module.control_planes[0].ipv4_address } # Generating k3s master config file provisioner "file" { content = yamlencode({ - node-name = module.first_control_plane.name + node-name = module.control_planes[0].name token = random_password.k3s_token.result cluster-init = true disable-cloud-controller = true disable = ["servicelb", "local-storage"] flannel-iface = "eth1" kubelet-arg = "cloud-provider=external" - node-ip = local.first_control_plane_network_ip - advertise-address = local.first_control_plane_network_ip - tls-san = local.first_control_plane_network_ip + node-ip = module.control_planes[0].private_ipv4_address + advertise-address = module.control_planes[0].private_ipv4_address + tls-san = module.control_planes[0].private_ipv4_address node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"] node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : [] }) @@ -87,6 +57,20 @@ resource "null_resource" "first_control_plane" { ] } + depends_on = [ + hcloud_network_subnet.k3s, + hcloud_firewall.k3s + ] +} + +resource "null_resource" "kustomization" { + connection { + user = "root" + private_key = local.ssh_private_key + agent_identity = local.ssh_identity + host = module.control_planes[0].ipv4_address + } + # Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured. provisioner "file" { content = yamlencode({ @@ -136,8 +120,8 @@ resource "null_resource" "first_control_plane" { provisioner "remote-exec" { inline = [ "set -ex", - "kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name}", - "kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token}", + "kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name} --dry-run=client -o yaml | kubectl apply -f -", + "kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token} --dry-run=client -o yaml | kubectl apply -f -", ] } @@ -170,7 +154,6 @@ resource "null_resource" "first_control_plane" { } depends_on = [ - hcloud_network_subnet.k3s, - hcloud_firewall.k3s + null_resource.first_control_plane ] } diff --git a/kubeconfig.tf b/kubeconfig.tf index ce92d94..73c1faa 100644 --- a/kubeconfig.tf +++ b/kubeconfig.tf @@ -1,7 +1,7 @@ data "remote_file" "kubeconfig" { conn { - host = module.first_control_plane.ipv4_address + host = module.control_planes[0].ipv4_address port = 22 user = "root" private_key = local.ssh_private_key @@ -9,11 +9,11 @@ data "remote_file" "kubeconfig" { } path = "/etc/rancher/k3s/k3s.yaml" - depends_on = [null_resource.first_control_plane] + depends_on = [null_resource.control_planes[0]] } locals { - kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", module.first_control_plane.ipv4_address) + kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", module.control_planes[0].ipv4_address) kubeconfig_parsed = yamldecode(local.kubeconfig_external) kubeconfig_data = { host = local.kubeconfig_parsed["clusters"][0]["cluster"]["server"] diff --git a/locals.tf b/locals.tf index 0aa10e8..72c2e61 100644 --- a/locals.tf +++ b/locals.tf @@ -1,5 +1,5 @@ locals { - first_control_plane_network_ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 257) + first_control_plane_network_ip = module.control_planes[0].private_ipv4_address ssh_public_key = trimspace(file(var.public_key)) # ssh_private_key is either the contents of var.private_key or null to use a ssh agent. @@ -22,7 +22,9 @@ locals { # prepare the k3s config directory "mkdir -p /etc/rancher/k3s", # move the config file into place - "mv /tmp/config.yaml /etc/rancher/k3s/config.yaml" + "mv /tmp/config.yaml /etc/rancher/k3s/config.yaml", + # if the server has already been initialized just stop here + "[ -e /etc/rancher/k3s/k3s.yaml ] && exit 0", ] install_k3s_server = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SKIP_START=true INSTALL_K3S_EXEC=server sh -"]) diff --git a/main.tf b/main.tf index bc4ff34..3e71a22 100644 --- a/main.tf +++ b/main.tf @@ -156,5 +156,5 @@ resource "hcloud_placement_group" "k3s" { data "hcloud_load_balancer" "traefik" { name = "traefik" - depends_on = [null_resource.first_control_plane] + depends_on = [null_resource.kustomization] } diff --git a/modules/host/out.tf b/modules/host/out.tf index 905ddff..d2997ba 100644 --- a/modules/host/out.tf +++ b/modules/host/out.tf @@ -2,6 +2,9 @@ output "ipv4_address" { value = hcloud_server.server.ipv4_address } +output "private_ipv4_address" { + value = var.ip +} output "name" { value = hcloud_server.server.name diff --git a/output.tf b/output.tf index 3db1478..62e6c6f 100644 --- a/output.tf +++ b/output.tf @@ -1,5 +1,5 @@ output "controlplanes_public_ip" { - value = concat([module.first_control_plane.ipv4_address], module.control_planes.*.ipv4_address) + value = module.control_planes.*.ipv4_address description = "The public IP addresses of the controlplane server." } diff --git a/servers.tf b/servers.tf index 6ca5942..a10c7d7 100644 --- a/servers.tf +++ b/servers.tf @@ -1,8 +1,8 @@ module "control_planes" { source = "./modules/host" - count = var.servers_num - 1 - name = "k3s-control-plane-${count.index + 1}" + count = var.servers_num + name = "k3s-control-plane-${count.index}" ssh_keys = [hcloud_ssh_key.k3s.id] public_key = var.public_key @@ -12,7 +12,7 @@ module "control_planes" { placement_group_id = hcloud_placement_group.k3s.id location = var.location network_id = hcloud_network.k3s.id - ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index) + ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 257 + count.index) server_type = var.control_plane_server_type labels = { @@ -24,7 +24,7 @@ module "control_planes" { } resource "null_resource" "control_planes" { - count = var.servers_num - 1 + count = var.servers_num triggers = { control_plane_id = module.control_planes[count.index].id @@ -41,15 +41,15 @@ resource "null_resource" "control_planes" { provisioner "file" { content = yamlencode({ node-name = module.control_planes[count.index].name - server = "https://${local.first_control_plane_network_ip}:6443" + server = "https://${element(module.control_planes.*.private_ipv4_address, count.index > 0 ? 0 : 1)}:6443" token = random_password.k3s_token.result disable-cloud-controller = true disable = ["servicelb", "local-storage"] flannel-iface = "eth1" kubelet-arg = "cloud-provider=external" - node-ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index) - advertise-address = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index) - tls-san = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index) + node-ip = module.control_planes[count.index].private_ipv4_address + advertise-address = module.control_planes[count.index].private_ipv4_address + tls-san = module.control_planes[count.index].private_ipv4_address node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"] node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : [] })