wip
This commit is contained in:
parent
4e3f34585e
commit
5e4d82fd95
@ -1,7 +1,8 @@
|
|||||||
module "control_planes" {
|
module "control_planes" {
|
||||||
source = "./modules/host"
|
source = "./modules/host"
|
||||||
|
|
||||||
count = var.control_plane_count
|
for_each = local.control_plane_nodepools
|
||||||
|
|
||||||
name = "${var.use_cluster_name_in_node_name ? "${var.cluster_name}-" : ""}control-plane"
|
name = "${var.use_cluster_name_in_node_name ? "${var.cluster_name}-" : ""}control-plane"
|
||||||
ssh_keys = [hcloud_ssh_key.k3s.id]
|
ssh_keys = [hcloud_ssh_key.k3s.id]
|
||||||
public_key = var.public_key
|
public_key = var.public_key
|
||||||
@ -9,13 +10,13 @@ module "control_planes" {
|
|||||||
additional_public_keys = var.additional_public_keys
|
additional_public_keys = var.additional_public_keys
|
||||||
firewall_ids = [hcloud_firewall.k3s.id]
|
firewall_ids = [hcloud_firewall.k3s.id]
|
||||||
placement_group_id = hcloud_placement_group.k3s.id
|
placement_group_id = hcloud_placement_group.k3s.id
|
||||||
location = var.location
|
location = each.value.location
|
||||||
server_type = var.control_plane_server_type
|
server_type = each.value.server_type
|
||||||
ipv4_subnet_id = hcloud_network_subnet.subnet[1].id
|
ipv4_subnet_id = hcloud_network_subnet.subnet[1].id
|
||||||
|
|
||||||
# We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely
|
# We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely
|
||||||
# It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough.
|
# It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough.
|
||||||
private_ipv4 = cidrhost(local.network_ipv4_subnets[1], count.index + 101)
|
private_ipv4 = cidrhost(local.network_ipv4_subnets[1], length([for i, v in var.control_plane_nodepools : i if v.name == each.value.nodepool_name]) + each.value.index + 101)
|
||||||
|
|
||||||
labels = {
|
labels = {
|
||||||
"provisioner" = "terraform",
|
"provisioner" = "terraform",
|
||||||
@ -28,33 +29,33 @@ module "control_planes" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "null_resource" "control_planes" {
|
resource "null_resource" "control_planes" {
|
||||||
count = var.control_plane_count
|
for_each = local.control_plane_nodepools
|
||||||
|
|
||||||
triggers = {
|
triggers = {
|
||||||
control_plane_id = module.control_planes[count.index].id
|
control_plane_id = module.control_planes[each.key].id
|
||||||
}
|
}
|
||||||
|
|
||||||
connection {
|
connection {
|
||||||
user = "root"
|
user = "root"
|
||||||
private_key = local.ssh_private_key
|
private_key = local.ssh_private_key
|
||||||
agent_identity = local.ssh_identity
|
agent_identity = local.ssh_identity
|
||||||
host = module.control_planes[count.index].ipv4_address
|
host = module.control_planes[each.key].ipv4_address
|
||||||
}
|
}
|
||||||
|
|
||||||
# Generating k3s server config file
|
# Generating k3s server config file
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = yamlencode({
|
content = yamlencode({
|
||||||
node-name = module.control_planes[count.index].name
|
node-name = module.control_planes[each.key].name
|
||||||
server = "https://${element(module.control_planes.*.private_ipv4_address, count.index > 0 ? 0 : 1)}:6443"
|
server = "https://${local.first_control_plane.private_ipv4_address}:6443"
|
||||||
token = random_password.k3s_token.result
|
token = random_password.k3s_token.result
|
||||||
disable-cloud-controller = true
|
disable-cloud-controller = true
|
||||||
disable = local.disable_extras
|
disable = local.disable_extras
|
||||||
flannel-iface = "eth1"
|
flannel-iface = "eth1"
|
||||||
kubelet-arg = "cloud-provider=external"
|
kubelet-arg = "cloud-provider=external"
|
||||||
node-ip = module.control_planes[count.index].private_ipv4_address
|
node-ip = module.control_planes[each.key].private_ipv4_address
|
||||||
advertise-address = module.control_planes[count.index].private_ipv4_address
|
advertise-address = module.control_planes[each.key].private_ipv4_address
|
||||||
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
node-label = each.value.labels
|
||||||
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
node-taint = each.value.taints
|
||||||
})
|
})
|
||||||
destination = "/tmp/config.yaml"
|
destination = "/tmp/config.yaml"
|
||||||
}
|
}
|
||||||
|
12
init.tf
12
init.tf
@ -3,21 +3,21 @@ resource "null_resource" "first_control_plane" {
|
|||||||
user = "root"
|
user = "root"
|
||||||
private_key = local.ssh_private_key
|
private_key = local.ssh_private_key
|
||||||
agent_identity = local.ssh_identity
|
agent_identity = local.ssh_identity
|
||||||
host = module.control_planes[0].ipv4_address
|
host = local.first_control_plane.ipv4_address
|
||||||
}
|
}
|
||||||
|
|
||||||
# Generating k3s master config file
|
# Generating k3s master config file
|
||||||
provisioner "file" {
|
provisioner "file" {
|
||||||
content = yamlencode({
|
content = yamlencode({
|
||||||
node-name = module.control_planes[0].name
|
node-name = local.first_control_plane.name
|
||||||
token = random_password.k3s_token.result
|
token = random_password.k3s_token.result
|
||||||
cluster-init = true
|
cluster-init = true
|
||||||
disable-cloud-controller = true
|
disable-cloud-controller = true
|
||||||
disable = local.disable_extras
|
disable = local.disable_extras
|
||||||
flannel-iface = "eth1"
|
flannel-iface = "eth1"
|
||||||
kubelet-arg = "cloud-provider=external"
|
kubelet-arg = "cloud-provider=external"
|
||||||
node-ip = module.control_planes[0].private_ipv4_address
|
node-ip = local.first_control_plane.private_ipv4_address
|
||||||
advertise-address = module.control_planes[0].private_ipv4_address
|
advertise-address = local.first_control_plane.private_ipv4_address
|
||||||
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
node-taint = var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]
|
||||||
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
node-label = var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []
|
||||||
})
|
})
|
||||||
@ -66,7 +66,7 @@ resource "null_resource" "kustomization" {
|
|||||||
user = "root"
|
user = "root"
|
||||||
private_key = local.ssh_private_key
|
private_key = local.ssh_private_key
|
||||||
agent_identity = local.ssh_identity
|
agent_identity = local.ssh_identity
|
||||||
host = module.control_planes[0].ipv4_address
|
host = local.first_control_plane.ipv4_address
|
||||||
}
|
}
|
||||||
|
|
||||||
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
|
# Upload kustomization.yaml, containing Hetzner CSI & CSM, as well as kured.
|
||||||
@ -97,7 +97,7 @@ resource "null_resource" "kustomization" {
|
|||||||
name = "${var.cluster_name}-traefik"
|
name = "${var.cluster_name}-traefik"
|
||||||
load_balancer_disable_ipv6 = var.load_balancer_disable_ipv6
|
load_balancer_disable_ipv6 = var.load_balancer_disable_ipv6
|
||||||
load_balancer_type = var.load_balancer_type
|
load_balancer_type = var.load_balancer_type
|
||||||
location = var.location
|
location = var.load_balancer_location
|
||||||
traefik_acme_tls = var.traefik_acme_tls
|
traefik_acme_tls = var.traefik_acme_tls
|
||||||
traefik_acme_email = var.traefik_acme_email
|
traefik_acme_email = var.traefik_acme_email
|
||||||
traefik_additional_options = var.traefik_additional_options
|
traefik_additional_options = var.traefik_additional_options
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
|
|
||||||
data "remote_file" "kubeconfig" {
|
data "remote_file" "kubeconfig" {
|
||||||
conn {
|
conn {
|
||||||
host = module.control_planes[0].ipv4_address
|
host = local.first_control_plane.ipv4_address
|
||||||
port = 22
|
port = 22
|
||||||
user = "root"
|
user = "root"
|
||||||
private_key = local.ssh_private_key
|
private_key = local.ssh_private_key
|
||||||
@ -13,7 +13,7 @@ data "remote_file" "kubeconfig" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", module.control_planes[0].ipv4_address)
|
kubeconfig_external = replace(data.remote_file.kubeconfig.content, "127.0.0.1", local.first_control_plane.ipv4_address)
|
||||||
kubeconfig_parsed = yamldecode(local.kubeconfig_external)
|
kubeconfig_parsed = yamldecode(local.kubeconfig_external)
|
||||||
kubeconfig_data = {
|
kubeconfig_data = {
|
||||||
host = local.kubeconfig_parsed["clusters"][0]["cluster"]["server"]
|
host = local.kubeconfig_parsed["clusters"][0]["cluster"]["server"]
|
||||||
|
34
locals.tf
34
locals.tf
@ -1,6 +1,6 @@
|
|||||||
locals {
|
locals {
|
||||||
# if we are in a single cluster config, we use the default klipper lb instead of Hetzner LB
|
# if we are in a single cluster config, we use the default klipper lb instead of Hetzner LB
|
||||||
is_single_node_cluster = var.control_plane_count + sum(concat([for v in var.agent_nodepools : v.count], [0])) == 1
|
is_single_node_cluster = sum(concat([for v in var.control_plane_nodepools : v.count], [0])) + sum(concat([for v in var.agent_nodepools : v.count], [0])) == 1
|
||||||
ssh_public_key = trimspace(file(var.public_key))
|
ssh_public_key = trimspace(file(var.public_key))
|
||||||
# ssh_private_key is either the contents of var.private_key or null to use a ssh agent.
|
# ssh_private_key is either the contents of var.private_key or null to use a ssh agent.
|
||||||
ssh_private_key = var.private_key == null ? null : trimspace(file(var.private_key))
|
ssh_private_key = var.private_key == null ? null : trimspace(file(var.private_key))
|
||||||
@ -169,16 +169,30 @@ locals {
|
|||||||
install_k3s_server = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=server sh -"], local.apply_k3s_selinux)
|
install_k3s_server = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=server sh -"], local.apply_k3s_selinux)
|
||||||
install_k3s_agent = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=agent sh -"], local.apply_k3s_selinux)
|
install_k3s_agent = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=agent sh -"], local.apply_k3s_selinux)
|
||||||
|
|
||||||
agent_nodepools = merge([
|
control_plane_nodepools = merge([
|
||||||
for nodepool_obj in var.agent_nodepools : {
|
for pool_index, nodepool_obj in var.control_plane_nodepools : {
|
||||||
for index in range(nodepool_obj.count) :
|
for node_index in range(nodepool_obj.count) :
|
||||||
format("%s-%s", nodepool_obj.name, index) => {
|
format("%s-%s-%s", pool_index, node_index, nodepool_obj.name) => {
|
||||||
nodepool_name : nodepool_obj.name,
|
nodepool_name : nodepool_obj.name,
|
||||||
server_type : nodepool_obj.server_type,
|
server_type : nodepool_obj.server_type,
|
||||||
location : nodepool_obj.location,
|
location : nodepool_obj.location,
|
||||||
labels : concat(local.default_labels, nodepool_obj.labels),
|
labels : concat(local.default_control_plane_labels, nodepool_obj.labels),
|
||||||
taints : nodepool_obj.taints,
|
taints : nodepool_obj.taints,
|
||||||
index : index
|
index : node_index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]...)
|
||||||
|
|
||||||
|
agent_nodepools = merge([
|
||||||
|
for pool_index, nodepool_obj in var.agent_nodepools : {
|
||||||
|
for node_index in range(nodepool_obj.count) :
|
||||||
|
format("%s-%s-%s", pool_index, node_index, nodepool_obj.name) => {
|
||||||
|
nodepool_name : nodepool_obj.name,
|
||||||
|
server_type : nodepool_obj.server_type,
|
||||||
|
location : nodepool_obj.location,
|
||||||
|
labels : concat(local.default_agent_labels, nodepool_obj.labels),
|
||||||
|
taints : nodepool_obj.taints,
|
||||||
|
index : node_index
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]...)
|
]...)
|
||||||
@ -193,7 +207,9 @@ locals {
|
|||||||
# disable k3s extras
|
# disable k3s extras
|
||||||
disable_extras = concat(["local-storage"], local.is_single_node_cluster ? [] : ["servicelb"], var.traefik_enabled ? [] : ["traefik"], var.metrics_server_enabled ? [] : ["metrics-server"])
|
disable_extras = concat(["local-storage"], local.is_single_node_cluster ? [] : ["servicelb"], var.traefik_enabled ? [] : ["traefik"], var.metrics_server_enabled ? [] : ["metrics-server"])
|
||||||
|
|
||||||
|
|
||||||
# Default k3s node labels
|
# Default k3s node labels
|
||||||
default_labels = concat([], var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : [])
|
default_agent_labels = concat([], var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : [])
|
||||||
|
default_control_plane_labels = concat([], var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"])
|
||||||
|
|
||||||
|
first_control_plane = module.control_planes[keys(module.control_planes)[0]]
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,9 @@ output "cluster_name" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
output "control_planes_public_ipv4" {
|
output "control_planes_public_ipv4" {
|
||||||
value = module.control_planes.*.ipv4_address
|
value = [
|
||||||
|
for obj in module.control_planes : obj.ipv4_address
|
||||||
|
]
|
||||||
description = "The public IPv4 addresses of the controlplane server."
|
description = "The public IPv4 addresses of the controlplane server."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
20
variables.tf
20
variables.tf
@ -20,26 +20,16 @@ variable "additional_public_keys" {
|
|||||||
default = []
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "location" {
|
|
||||||
description = "Default server location"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "network_region" {
|
variable "network_region" {
|
||||||
description = "Default region for network"
|
description = "Default region for network"
|
||||||
type = string
|
type = string
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "control_plane_server_type" {
|
variable "load_balancer_location" {
|
||||||
description = "Default control plane server type"
|
description = "Default load balancer location"
|
||||||
type = string
|
type = string
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "control_plane_count" {
|
|
||||||
description = "Number of control plane nodes."
|
|
||||||
type = number
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "load_balancer_type" {
|
variable "load_balancer_type" {
|
||||||
description = "Default load balancer server type"
|
description = "Default load balancer server type"
|
||||||
type = string
|
type = string
|
||||||
@ -51,6 +41,12 @@ variable "load_balancer_disable_ipv6" {
|
|||||||
default = false
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "control_plane_nodepools" {
|
||||||
|
description = "Number of control plane nodes."
|
||||||
|
type = list(any)
|
||||||
|
default = []
|
||||||
|
}
|
||||||
|
|
||||||
variable "agent_nodepools" {
|
variable "agent_nodepools" {
|
||||||
description = "Number of agent nodes."
|
description = "Number of agent nodes."
|
||||||
type = list(any)
|
type = list(any)
|
||||||
|
Loading…
Reference in New Issue
Block a user