From 41177769942921336eaac9a2fae2711af347ac45 Mon Sep 17 00:00:00 2001 From: Karim Naufal Date: Sat, 9 Apr 2022 09:51:11 +0200 Subject: [PATCH] ip problem solved --- agents.tf | 6 +++--- control_planes.tf | 6 +++--- init.tf | 10 +++++----- kubeconfig.tf | 4 ++-- locals.tf | 2 -- 5 files changed, 13 insertions(+), 15 deletions(-) diff --git a/agents.tf b/agents.tf index 82aa17c..538f1e5 100644 --- a/agents.tf +++ b/agents.tf @@ -12,11 +12,11 @@ module "agents" { placement_group_id = hcloud_placement_group.k3s.id location = each.value.location server_type = each.value.server_type - ipv4_subnet_id = hcloud_network_subnet.subnet[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + 2].id + ipv4_subnet_id = hcloud_network_subnet.subnet[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + length(var.control_plane_nodepools) + 1].id # We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely # It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough. - private_ipv4 = cidrhost(local.network_ipv4_subnets[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + 2], each.value.index + 101) + private_ipv4 = cidrhost(local.network_ipv4_subnets[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + length(var.control_plane_nodepools) + 1], each.value.index + 101) labels = { "provisioner" = "terraform", @@ -46,7 +46,7 @@ resource "null_resource" "agents" { provisioner "file" { content = yamlencode({ node-name = module.agents[each.key].name - server = "https://${local.first_control_plane.private_ipv4_address}:6443" + server = "https://${module.control_planes[keys(module.control_planes)[0]].private_ipv4_address}:6443" token = random_password.k3s_token.result kubelet-arg = "cloud-provider=external" flannel-iface = "eth1" diff --git a/control_planes.tf b/control_planes.tf index c24590a..e2e9349 100644 --- a/control_planes.tf +++ b/control_planes.tf @@ -12,11 +12,11 @@ module "control_planes" { placement_group_id = hcloud_placement_group.k3s.id location = each.value.location server_type = each.value.server_type - ipv4_subnet_id = hcloud_network_subnet.subnet[1].id + ipv4_subnet_id = hcloud_network_subnet.subnet[[for i, v in var.control_plane_nodepools : i if v.name == each.value.nodepool_name][0] + 1].id # We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely # It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough. - private_ipv4 = cidrhost(local.network_ipv4_subnets[1], length([for i, v in var.control_plane_nodepools : i if v.name == each.value.nodepool_name]) + each.value.index + 101) + private_ipv4 = cidrhost(local.network_ipv4_subnets[[for i, v in var.control_plane_nodepools : i if v.name == each.value.nodepool_name][0] + 1], each.value.index + 101) labels = { "provisioner" = "terraform", @@ -46,7 +46,7 @@ resource "null_resource" "control_planes" { provisioner "file" { content = yamlencode({ node-name = module.control_planes[each.key].name - server = "https://${local.first_control_plane.private_ipv4_address}:6443" + server = "https://${module.control_planes[each.key].private_ipv4_address == "10.1.0.101" ? "10.1.0.102" : "10.1.0.101"}:6443" token = random_password.k3s_token.result disable-cloud-controller = true disable = local.disable_extras diff --git a/init.tf b/init.tf index 884fbe6..279f837 100644 --- a/init.tf +++ b/init.tf @@ -3,21 +3,21 @@ resource "null_resource" "first_control_plane" { user = "root" private_key = local.ssh_private_key agent_identity = local.ssh_identity - host = local.first_control_plane.ipv4_address + host = module.control_planes[keys(module.control_planes)[0]].ipv4_address } # Generating k3s master config file provisioner "file" { content = yamlencode({ - node-name = local.first_control_plane.name + node-name = module.control_planes[keys(module.control_planes)[0]].name token = random_password.k3s_token.result cluster-init = true disable-cloud-controller = true disable = local.disable_extras flannel-iface = "eth1" kubelet-arg = "cloud-provider=external" - node-ip = local.first_control_plane.private_ipv4_address - advertise-address = local.first_control_plane.private_ipv4_address + node-ip = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address + advertise-address = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"] node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : [] }) @@ -66,7 +66,7 @@ resource "null_resource" "kustomization" { user = "root" private_key = local.ssh_private_key agent_identity = local.ssh_identity - host = local.first_control_plane.ipv4_address + host = module.control_planes[keys(module.control_planes)[0]].ipv4_address } # Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured. diff --git a/kubeconfig.tf b/kubeconfig.tf index b4969b5..5089da6 100644 --- a/kubeconfig.tf +++ b/kubeconfig.tf @@ -1,7 +1,7 @@ data "remote_file" "kubeconfig" { conn { - host = local.first_control_plane.ipv4_address + host = module.control_planes[keys(module.control_planes)[0]].ipv4_address port = 22 user = "root" private_key = local.ssh_private_key @@ -13,7 +13,7 @@ data "remote_file" "kubeconfig" { } locals { - kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", local.first_control_plane.ipv4_address) + kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", module.control_planes[keys(module.control_planes)[0]].ipv4_address) kubeconfig_parsed = yamldecode(local.kubeconfig_external) kubeconfig_data = { host = local.kubeconfig_parsed["clusters"][0]["cluster"]["server"] diff --git a/locals.tf b/locals.tf index 84cc38b..42ef5ee 100644 --- a/locals.tf +++ b/locals.tf @@ -210,6 +210,4 @@ locals { # Default k3s node labels default_agent_labels = concat([], var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []) default_control_plane_labels = concat([], var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]) - - first_control_plane = module.control_planes[keys(module.control_planes)[0]] }