terraform-hcloud-kube-hetzner/main.tf

96 lines
2.8 KiB
Terraform
Raw Permalink Normal View History

2021-11-30 23:09:34 +01:00
resource "random_password" "k3s_token" {
2021-07-30 10:12:37 +02:00
length = 48
special = false
}
2022-02-05 00:02:25 +01:00
resource "hcloud_ssh_key" "k3s" {
2022-03-09 05:19:06 +01:00
name = var.cluster_name
2021-11-30 23:09:34 +01:00
public_key = local.ssh_public_key
2021-07-30 10:12:37 +02:00
}
resource "hcloud_network" "k3s" {
2022-03-09 05:19:06 +01:00
name = var.cluster_name
2022-03-09 09:47:57 +01:00
ip_range = local.network_ipv4_cidr
2021-07-30 10:12:37 +02:00
}
2022-04-13 11:56:09 +02:00
# We start from the end of the subnets cird array,
# as we would have fewer control plane nodepools, than angent ones.
resource "hcloud_network_subnet" "control_plane" {
2022-04-13 15:59:03 +02:00
count = length(var.control_plane_nodepools)
2022-04-13 11:56:09 +02:00
network_id = hcloud_network.k3s.id
type = "cloud"
network_zone = var.network_region
ip_range = local.network_ipv4_subnets[255 - count.index]
}
# Here we start at the beginning of the subnets cird array
resource "hcloud_network_subnet" "agent" {
2022-04-13 15:59:03 +02:00
count = length(var.agent_nodepools)
2021-07-30 10:12:37 +02:00
network_id = hcloud_network.k3s.id
type = "cloud"
network_zone = var.network_region
2022-03-09 02:07:24 +01:00
ip_range = local.network_ipv4_subnets[count.index]
2021-07-30 10:12:37 +02:00
}
resource "hcloud_firewall" "k3s" {
2022-03-09 05:19:06 +01:00
name = var.cluster_name
dynamic "rule" {
for_each = concat(local.base_firewall_rules, var.extra_firewall_rules)
content {
direction = rule.value.direction
protocol = rule.value.protocol
port = lookup(rule.value, "port", null)
2022-02-28 00:07:31 +01:00
destination_ips = lookup(rule.value, "destination_ips", [])
source_ips = lookup(rule.value, "source_ips", [])
}
}
2021-07-30 10:12:37 +02:00
}
2022-04-13 07:17:01 +02:00
resource "hcloud_placement_group" "control_plane" {
count = ceil(local.control_plane_count / 10)
2022-04-13 09:29:29 +02:00
name = "${var.cluster_name}-control-plane-${count.index + 1}"
2022-04-13 07:17:01 +02:00
type = "spread"
}
resource "hcloud_placement_group" "agent" {
count = ceil(local.agent_count / 10)
2022-04-13 09:29:29 +02:00
name = "${var.cluster_name}-agent-${count.index + 1}"
2022-04-13 07:17:01 +02:00
type = "spread"
}
2022-02-14 00:24:08 +01:00
data "hcloud_load_balancer" "traefik" {
count = local.is_single_node_cluster ? 0 : var.traefik_enabled == false ? 0 : 1
2022-03-09 05:19:06 +01:00
name = "${var.cluster_name}-traefik"
depends_on = [null_resource.kustomization]
2022-02-14 00:24:08 +01:00
}
2022-02-24 01:44:56 +01:00
2022-02-25 00:21:28 +01:00
resource "null_resource" "destroy_traefik_loadbalancer" {
# this only gets triggered before total destruction of the cluster, but when the necessary elements to run the commands are still available
2022-02-24 01:44:56 +01:00
triggers = {
2022-02-25 00:21:28 +01:00
kustomization_id = null_resource.kustomization.id
2022-02-24 01:44:56 +01:00
}
# Important when issuing terraform destroy, otherwise the LB will not let the network get deleted
provisioner "local-exec" {
2022-02-25 00:21:28 +01:00
when = destroy
command = <<-EOT
kubectl -n kube-system delete service traefik --kubeconfig ${path.module}/kubeconfig.yaml
2022-02-24 01:44:56 +01:00
EOT
on_failure = continue
}
2022-02-25 00:21:28 +01:00
depends_on = [
local_sensitive_file.kubeconfig,
2022-02-25 00:21:28 +01:00
null_resource.control_planes[0],
2022-04-13 14:14:22 +02:00
hcloud_network_subnet.control_plane,
hcloud_network_subnet.agent,
2022-04-13 09:29:29 +02:00
hcloud_placement_group.control_plane,
hcloud_placement_group.agent,
2022-02-25 00:21:28 +01:00
hcloud_network.k3s,
hcloud_firewall.k3s,
hcloud_ssh_key.k3s
]
2022-02-24 01:44:56 +01:00
}