terraform-hcloud-kube-hetzner/agents.tf

86 lines
2.7 KiB
Terraform
Raw Normal View History

2022-02-19 13:38:24 +01:00
module "agents" {
source = "./modules/host"
2022-02-23 16:46:46 +01:00
for_each = local.agent_nodepools
2022-02-06 08:40:51 +01:00
2022-03-09 05:19:06 +01:00
name = "${var.use_cluster_name_in_node_name ? "${var.cluster_name}-" : ""}${each.value.nodepool_name}"
2022-02-20 11:30:07 +01:00
ssh_keys = [hcloud_ssh_key.k3s.id]
public_key = var.public_key
private_key = var.private_key
additional_public_keys = var.additional_public_keys
firewall_ids = [hcloud_firewall.k3s.id]
placement_group_id = hcloud_placement_group.k3s.id
location = each.value.location
2022-02-23 16:46:46 +01:00
server_type = each.value.server_type
2022-04-09 09:51:11 +02:00
ipv4_subnet_id = hcloud_network_subnet.subnet[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + length(var.control_plane_nodepools) + 1].id
2022-03-06 00:59:19 +01:00
# We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely
# It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough.
2022-04-09 09:51:11 +02:00
private_ipv4 = cidrhost(local.network_ipv4_subnets[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0] + length(var.control_plane_nodepools) + 1], each.value.index + 101)
2022-03-06 00:59:19 +01:00
2022-02-06 08:40:51 +01:00
labels = {
"provisioner" = "terraform",
2022-02-19 13:38:24 +01:00
"engine" = "k3s"
2022-02-06 08:40:51 +01:00
}
2022-02-25 19:16:38 +01:00
depends_on = [
hcloud_network_subnet.subnet
]
2022-02-19 13:38:24 +01:00
}
2022-02-06 08:40:51 +01:00
2022-02-19 13:38:24 +01:00
resource "null_resource" "agents" {
2022-02-23 16:46:46 +01:00
for_each = local.agent_nodepools
2022-02-19 13:38:24 +01:00
triggers = {
2022-02-23 16:46:46 +01:00
agent_id = module.agents[each.key].id
2022-02-06 08:40:51 +01:00
}
2022-02-19 13:38:24 +01:00
connection {
user = "root"
private_key = local.ssh_private_key
agent_identity = local.ssh_identity
2022-02-23 16:46:46 +01:00
host = module.agents[each.key].ipv4_address
2022-02-06 08:40:51 +01:00
}
# Generating k3s agent config file
2022-02-06 08:40:51 +01:00
provisioner "file" {
content = yamlencode({
2022-02-23 16:46:46 +01:00
node-name = module.agents[each.key].name
2022-04-09 09:51:11 +02:00
server = "https://${module.control_planes[keys(module.control_planes)[0]].private_ipv4_address}:6443"
2022-02-16 04:24:20 +01:00
token = random_password.k3s_token.result
kubelet-arg = "cloud-provider=external"
flannel-iface = "eth1"
2022-03-03 19:08:12 +01:00
node-ip = module.agents[each.key].private_ipv4_address
node-label = each.value.labels
node-taint = each.value.taints
2022-02-06 08:40:51 +01:00
})
2022-02-16 03:18:40 +01:00
destination = "/tmp/config.yaml"
2022-02-06 08:40:51 +01:00
}
2022-02-16 03:18:40 +01:00
# Install k3s agent
2022-02-06 08:40:51 +01:00
provisioner "remote-exec" {
2022-02-16 04:24:20 +01:00
inline = local.install_k3s_agent
2022-02-16 03:18:40 +01:00
}
2022-02-20 02:04:37 +01:00
# Start the k3s agent and wait for it to have started
2022-02-16 03:18:40 +01:00
provisioner "remote-exec" {
inline = [
2022-02-20 13:36:41 +01:00
"systemctl start k3s-agent 2> /dev/null",
2022-02-16 03:18:40 +01:00
<<-EOT
timeout 120 bash <<EOF
until systemctl status k3s-agent > /dev/null; do
2022-02-20 13:36:41 +01:00
systemctl start k3s-agent 2> /dev/null
2022-02-16 03:18:40 +01:00
echo "Waiting for the k3s agent to start..."
2022-02-16 04:24:20 +01:00
sleep 2
2022-02-10 03:01:40 +01:00
done
2022-02-16 03:18:40 +01:00
EOF
2022-02-10 03:01:40 +01:00
EOT
2022-02-06 08:40:51 +01:00
]
}
depends_on = [
2022-02-19 13:38:24 +01:00
null_resource.first_control_plane,
2022-02-25 19:16:38 +01:00
hcloud_network_subnet.subnet
2022-02-06 08:40:51 +01:00
]
}