ip problem solved
This commit is contained in:
parent
65297f7ded
commit
4117776994
@ -12,11 +12,11 @@ module "agents" {
|
|||||||
placement_group_id = hcloud_placement_group.k3s.id
|
placement_group_id = hcloud_placement_group.k3s.id
|
||||||
location = each.value.location
|
location = each.value.location
|
||||||
server_type = each.value.server_type
|
server_type = each.value.server_type
|
||||||
ipv4_subnet_id = hcloud_network_subnet.subnet[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + 2].id
|
ipv4_subnet_id = hcloud_network_subnet.subnet[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + length(var.control_plane_nodepools) + 1].id
|
||||||
|
|
||||||
# We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely
|
# We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely
|
||||||
# It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough.
|
# It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough.
|
||||||
private_ipv4 = cidrhost(local.network_ipv4_subnets[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + 2], each.value.index + 101)
|
private_ipv4 = cidrhost(local.network_ipv4_subnets[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + length(var.control_plane_nodepools) + 1], each.value.index + 101)
|
||||||
|
|
||||||
labels = {
|
labels = {
|
||||||
"provisioner" = "terraform",
|
"provisioner" = "terraform",
|
||||||
@ -46,7 +46,7 @@ resource "null_resource" "agents" {
|
|||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = yamlencode({
|
content = yamlencode({
|
||||||
node-name = module.agents[each.key].name
|
node-name = module.agents[each.key].name
|
||||||
server = "https://${local.first_control_plane.private_ipv4_address}:6443"
|
server = "https://${module.control_planes[keys(module.control_planes)[0]].private_ipv4_address}:6443"
|
||||||
token = random_password.k3s_token.result
|
token = random_password.k3s_token.result
|
||||||
kubelet-arg = "cloud-provider=external"
|
kubelet-arg = "cloud-provider=external"
|
||||||
flannel-iface = "eth1"
|
flannel-iface = "eth1"
|
||||||
|
@ -12,11 +12,11 @@ module "control_planes" {
|
|||||||
placement_group_id = hcloud_placement_group.k3s.id
|
placement_group_id = hcloud_placement_group.k3s.id
|
||||||
location = each.value.location
|
location = each.value.location
|
||||||
server_type = each.value.server_type
|
server_type = each.value.server_type
|
||||||
ipv4_subnet_id = hcloud_network_subnet.subnet[1].id
|
ipv4_subnet_id = hcloud_network_subnet.subnet[[for i, v in var.control_plane_nodepools : i if v.name == each.value.nodepool_name][0] + 1].id
|
||||||
|
|
||||||
# We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely
|
# We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely
|
||||||
# It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough.
|
# It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough.
|
||||||
private_ipv4 = cidrhost(local.network_ipv4_subnets[1], length([for i, v in var.control_plane_nodepools : i if v.name == each.value.nodepool_name]) + each.value.index + 101)
|
private_ipv4 = cidrhost(local.network_ipv4_subnets[[for i, v in var.control_plane_nodepools : i if v.name == each.value.nodepool_name][0] + 1], each.value.index + 101)
|
||||||
|
|
||||||
labels = {
|
labels = {
|
||||||
"provisioner" = "terraform",
|
"provisioner" = "terraform",
|
||||||
@ -46,7 +46,7 @@ resource "null_resource" "control_planes" {
|
|||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = yamlencode({
|
content = yamlencode({
|
||||||
node-name = module.control_planes[each.key].name
|
node-name = module.control_planes[each.key].name
|
||||||
server = "https://${local.first_control_plane.private_ipv4_address}:6443"
|
server = "https://${module.control_planes[each.key].private_ipv4_address == "10.1.0.101" ? "10.1.0.102" : "10.1.0.101"}:6443"
|
||||||
token = random_password.k3s_token.result
|
token = random_password.k3s_token.result
|
||||||
disable-cloud-controller = true
|
disable-cloud-controller = true
|
||||||
disable = local.disable_extras
|
disable = local.disable_extras
|
||||||
|
10
init.tf
10
init.tf
@ -3,21 +3,21 @@ resource "null_resource" "first_control_plane" {
|
|||||||
user = "root"
|
user = "root"
|
||||||
private_key = local.ssh_private_key
|
private_key = local.ssh_private_key
|
||||||
agent_identity = local.ssh_identity
|
agent_identity = local.ssh_identity
|
||||||
host = local.first_control_plane.ipv4_address
|
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
|
||||||
}
|
}
|
||||||
|
|
||||||
# Generating k3s master config file
|
# Generating k3s master config file
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = yamlencode({
|
content = yamlencode({
|
||||||
node-name = local.first_control_plane.name
|
node-name = module.control_planes[keys(module.control_planes)[0]].name
|
||||||
token = random_password.k3s_token.result
|
token = random_password.k3s_token.result
|
||||||
cluster-init = true
|
cluster-init = true
|
||||||
disable-cloud-controller = true
|
disable-cloud-controller = true
|
||||||
disable = local.disable_extras
|
disable = local.disable_extras
|
||||||
flannel-iface = "eth1"
|
flannel-iface = "eth1"
|
||||||
kubelet-arg = "cloud-provider=external"
|
kubelet-arg = "cloud-provider=external"
|
||||||
node-ip = local.first_control_plane.private_ipv4_address
|
node-ip = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
|
||||||
advertise-address = local.first_control_plane.private_ipv4_address
|
advertise-address = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
|
||||||
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
||||||
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
||||||
})
|
})
|
||||||
@ -66,7 +66,7 @@ resource "null_resource" "kustomization" {
|
|||||||
user = "root"
|
user = "root"
|
||||||
private_key = local.ssh_private_key
|
private_key = local.ssh_private_key
|
||||||
agent_identity = local.ssh_identity
|
agent_identity = local.ssh_identity
|
||||||
host = local.first_control_plane.ipv4_address
|
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
|
||||||
}
|
}
|
||||||
|
|
||||||
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
|
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
|
|
||||||
data "remote_file" "kubeconfig" {
|
data "remote_file" "kubeconfig" {
|
||||||
conn {
|
conn {
|
||||||
host = local.first_control_plane.ipv4_address
|
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
|
||||||
port = 22
|
port = 22
|
||||||
user = "root"
|
user = "root"
|
||||||
private_key = local.ssh_private_key
|
private_key = local.ssh_private_key
|
||||||
@ -13,7 +13,7 @@ data "remote_file" "kubeconfig" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", local.first_control_plane.ipv4_address)
|
kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", module.control_planes[keys(module.control_planes)[0]].ipv4_address)
|
||||||
kubeconfig_parsed = yamldecode(local.kubeconfig_external)
|
kubeconfig_parsed = yamldecode(local.kubeconfig_external)
|
||||||
kubeconfig_data = {
|
kubeconfig_data = {
|
||||||
host = local.kubeconfig_parsed["clusters"][0]["cluster"]["server"]
|
host = local.kubeconfig_parsed["clusters"][0]["cluster"]["server"]
|
||||||
|
@ -210,6 +210,4 @@ locals {
|
|||||||
# Default k3s node labels
|
# Default k3s node labels
|
||||||
default_agent_labels = concat([], var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : [])
|
default_agent_labels = concat([], var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : [])
|
||||||
default_control_plane_labels = concat([], var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"])
|
default_control_plane_labels = concat([], var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"])
|
||||||
|
|
||||||
first_control_plane = module.control_planes[keys(module.control_planes)[0]]
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user