Merge pull request #92 from kube-hetzner/control-plane-refactor
First control plane node is not special anymore
This commit is contained in:
commit
43223e1ffc
@ -1,54 +1,24 @@
|
|||||||
module "first_control_plane" {
|
|
||||||
source = "./modules/host"
|
|
||||||
|
|
||||||
name = "k3s-control-plane-0"
|
|
||||||
|
|
||||||
ssh_keys = [hcloud_ssh_key.k3s.id]
|
|
||||||
public_key = var.public_key
|
|
||||||
private_key = var.private_key
|
|
||||||
additional_public_keys = var.additional_public_keys
|
|
||||||
firewall_ids = [hcloud_firewall.k3s.id]
|
|
||||||
placement_group_id = hcloud_placement_group.k3s.id
|
|
||||||
location = var.location
|
|
||||||
network_id = hcloud_network.k3s.id
|
|
||||||
ip = local.first_control_plane_network_ip
|
|
||||||
server_type = var.control_plane_server_type
|
|
||||||
|
|
||||||
labels = {
|
|
||||||
"provisioner" = "terraform",
|
|
||||||
"engine" = "k3s"
|
|
||||||
}
|
|
||||||
|
|
||||||
hcloud_token = var.hcloud_token
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "null_resource" "first_control_plane" {
|
resource "null_resource" "first_control_plane" {
|
||||||
|
|
||||||
triggers = {
|
|
||||||
first_control_plane_id = module.first_control_plane.id
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
connection {
|
connection {
|
||||||
user = "root"
|
user = "root"
|
||||||
private_key = local.ssh_private_key
|
private_key = local.ssh_private_key
|
||||||
agent_identity = local.ssh_identity
|
agent_identity = local.ssh_identity
|
||||||
host = module.first_control_plane.ipv4_address
|
host = module.control_planes[0].ipv4_address
|
||||||
}
|
}
|
||||||
|
|
||||||
# Generating k3s master config file
|
# Generating k3s master config file
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = yamlencode({
|
content = yamlencode({
|
||||||
node-name = module.first_control_plane.name
|
node-name = module.control_planes[0].name
|
||||||
token = random_password.k3s_token.result
|
token = random_password.k3s_token.result
|
||||||
cluster-init = true
|
cluster-init = true
|
||||||
disable-cloud-controller = true
|
disable-cloud-controller = true
|
||||||
disable = ["servicelb", "local-storage"]
|
disable = ["servicelb", "local-storage"]
|
||||||
flannel-iface = "eth1"
|
flannel-iface = "eth1"
|
||||||
kubelet-arg = "cloud-provider=external"
|
kubelet-arg = "cloud-provider=external"
|
||||||
node-ip = local.first_control_plane_network_ip
|
node-ip = module.control_planes[0].private_ipv4_address
|
||||||
advertise-address = local.first_control_plane_network_ip
|
advertise-address = module.control_planes[0].private_ipv4_address
|
||||||
tls-san = local.first_control_plane_network_ip
|
tls-san = module.control_planes[0].private_ipv4_address
|
||||||
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
||||||
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
||||||
})
|
})
|
||||||
@ -87,6 +57,20 @@ resource "null_resource" "first_control_plane" {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
depends_on = [
|
||||||
|
hcloud_network_subnet.k3s,
|
||||||
|
hcloud_firewall.k3s
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "null_resource" "kustomization" {
|
||||||
|
connection {
|
||||||
|
user = "root"
|
||||||
|
private_key = local.ssh_private_key
|
||||||
|
agent_identity = local.ssh_identity
|
||||||
|
host = module.control_planes[0].ipv4_address
|
||||||
|
}
|
||||||
|
|
||||||
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
|
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = yamlencode({
|
content = yamlencode({
|
||||||
@ -136,8 +120,8 @@ resource "null_resource" "first_control_plane" {
|
|||||||
provisioner "remote-exec" {
|
provisioner "remote-exec" {
|
||||||
inline = [
|
inline = [
|
||||||
"set -ex",
|
"set -ex",
|
||||||
"kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name}",
|
"kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name} --dry-run=client -o yaml | kubectl apply -f -",
|
||||||
"kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token}",
|
"kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token} --dry-run=client -o yaml | kubectl apply -f -",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,7 +154,6 @@ resource "null_resource" "first_control_plane" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
hcloud_network_subnet.k3s,
|
null_resource.first_control_plane
|
||||||
hcloud_firewall.k3s
|
|
||||||
]
|
]
|
||||||
}
|
}
|
@ -1,7 +1,7 @@
|
|||||||
|
|
||||||
data "remote_file" "kubeconfig" {
|
data "remote_file" "kubeconfig" {
|
||||||
conn {
|
conn {
|
||||||
host = module.first_control_plane.ipv4_address
|
host = module.control_planes[0].ipv4_address
|
||||||
port = 22
|
port = 22
|
||||||
user = "root"
|
user = "root"
|
||||||
private_key = local.ssh_private_key
|
private_key = local.ssh_private_key
|
||||||
@ -9,11 +9,11 @@ data "remote_file" "kubeconfig" {
|
|||||||
}
|
}
|
||||||
path = "/etc/rancher/k3s/k3s.yaml"
|
path = "/etc/rancher/k3s/k3s.yaml"
|
||||||
|
|
||||||
depends_on = [null_resource.first_control_plane]
|
depends_on = [null_resource.control_planes[0]]
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", module.first_control_plane.ipv4_address)
|
kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", module.control_planes[0].ipv4_address)
|
||||||
kubeconfig_parsed = yamldecode(local.kubeconfig_external)
|
kubeconfig_parsed = yamldecode(local.kubeconfig_external)
|
||||||
kubeconfig_data = {
|
kubeconfig_data = {
|
||||||
host = local.kubeconfig_parsed["clusters"][0]["cluster"]["server"]
|
host = local.kubeconfig_parsed["clusters"][0]["cluster"]["server"]
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
locals {
|
locals {
|
||||||
first_control_plane_network_ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 257)
|
first_control_plane_network_ip = module.control_planes[0].private_ipv4_address
|
||||||
|
|
||||||
ssh_public_key = trimspace(file(var.public_key))
|
ssh_public_key = trimspace(file(var.public_key))
|
||||||
# ssh_private_key is either the contents of var.private_key or null to use a ssh agent.
|
# ssh_private_key is either the contents of var.private_key or null to use a ssh agent.
|
||||||
@ -22,7 +22,9 @@ locals {
|
|||||||
# prepare the k3s config directory
|
# prepare the k3s config directory
|
||||||
"mkdir -p /etc/rancher/k3s",
|
"mkdir -p /etc/rancher/k3s",
|
||||||
# move the config file into place
|
# move the config file into place
|
||||||
"mv /tmp/config.yaml /etc/rancher/k3s/config.yaml"
|
"mv /tmp/config.yaml /etc/rancher/k3s/config.yaml",
|
||||||
|
# if the server has already been initialized just stop here
|
||||||
|
"[ -e /etc/rancher/k3s/k3s.yaml ] && exit 0",
|
||||||
]
|
]
|
||||||
|
|
||||||
install_k3s_server = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SKIP_START=true INSTALL_K3S_EXEC=server sh -"])
|
install_k3s_server = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SKIP_START=true INSTALL_K3S_EXEC=server sh -"])
|
||||||
|
2
main.tf
2
main.tf
@ -156,5 +156,5 @@ resource "hcloud_placement_group" "k3s" {
|
|||||||
data "hcloud_load_balancer" "traefik" {
|
data "hcloud_load_balancer" "traefik" {
|
||||||
name = "traefik"
|
name = "traefik"
|
||||||
|
|
||||||
depends_on = [null_resource.first_control_plane]
|
depends_on = [null_resource.kustomization]
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,9 @@ output "ipv4_address" {
|
|||||||
value = hcloud_server.server.ipv4_address
|
value = hcloud_server.server.ipv4_address
|
||||||
}
|
}
|
||||||
|
|
||||||
|
output "private_ipv4_address" {
|
||||||
|
value = var.ip
|
||||||
|
}
|
||||||
|
|
||||||
output "name" {
|
output "name" {
|
||||||
value = hcloud_server.server.name
|
value = hcloud_server.server.name
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
output "controlplanes_public_ip" {
|
output "controlplanes_public_ip" {
|
||||||
value = concat([module.first_control_plane.ipv4_address], module.control_planes.*.ipv4_address)
|
value = module.control_planes.*.ipv4_address
|
||||||
description = "The public IP addresses of the controlplane server."
|
description = "The public IP addresses of the controlplane server."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
16
servers.tf
16
servers.tf
@ -1,8 +1,8 @@
|
|||||||
module "control_planes" {
|
module "control_planes" {
|
||||||
source = "./modules/host"
|
source = "./modules/host"
|
||||||
|
|
||||||
count = var.servers_num - 1
|
count = var.servers_num
|
||||||
name = "k3s-control-plane-${count.index + 1}"
|
name = "k3s-control-plane-${count.index}"
|
||||||
|
|
||||||
ssh_keys = [hcloud_ssh_key.k3s.id]
|
ssh_keys = [hcloud_ssh_key.k3s.id]
|
||||||
public_key = var.public_key
|
public_key = var.public_key
|
||||||
@ -12,7 +12,7 @@ module "control_planes" {
|
|||||||
placement_group_id = hcloud_placement_group.k3s.id
|
placement_group_id = hcloud_placement_group.k3s.id
|
||||||
location = var.location
|
location = var.location
|
||||||
network_id = hcloud_network.k3s.id
|
network_id = hcloud_network.k3s.id
|
||||||
ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index)
|
ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 257 + count.index)
|
||||||
server_type = var.control_plane_server_type
|
server_type = var.control_plane_server_type
|
||||||
|
|
||||||
labels = {
|
labels = {
|
||||||
@ -24,7 +24,7 @@ module "control_planes" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "null_resource" "control_planes" {
|
resource "null_resource" "control_planes" {
|
||||||
count = var.servers_num - 1
|
count = var.servers_num
|
||||||
|
|
||||||
triggers = {
|
triggers = {
|
||||||
control_plane_id = module.control_planes[count.index].id
|
control_plane_id = module.control_planes[count.index].id
|
||||||
@ -41,15 +41,15 @@ resource "null_resource" "control_planes" {
|
|||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = yamlencode({
|
content = yamlencode({
|
||||||
node-name = module.control_planes[count.index].name
|
node-name = module.control_planes[count.index].name
|
||||||
server = "https://${local.first_control_plane_network_ip}:6443"
|
server = "https://${element(module.control_planes.*.private_ipv4_address, count.index > 0 ? 0 : 1)}:6443"
|
||||||
token = random_password.k3s_token.result
|
token = random_password.k3s_token.result
|
||||||
disable-cloud-controller = true
|
disable-cloud-controller = true
|
||||||
disable = ["servicelb", "local-storage"]
|
disable = ["servicelb", "local-storage"]
|
||||||
flannel-iface = "eth1"
|
flannel-iface = "eth1"
|
||||||
kubelet-arg = "cloud-provider=external"
|
kubelet-arg = "cloud-provider=external"
|
||||||
node-ip = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index)
|
node-ip = module.control_planes[count.index].private_ipv4_address
|
||||||
advertise-address = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index)
|
advertise-address = module.control_planes[count.index].private_ipv4_address
|
||||||
tls-san = cidrhost(hcloud_network_subnet.k3s.ip_range, 258 + count.index)
|
tls-san = module.control_planes[count.index].private_ipv4_address
|
||||||
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
||||||
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user