terraform-hcloud-kube-hetzner/main.tf
phaer 4ee2156a95 wait for load-balancer in null-resource...
...because we can only do that whenever the cluster is operational,
which it isn't with just one node.
2022-02-21 16:59:19 +01:00

196 lines
3.6 KiB
HCL

resource "random_password" "k3s_token" {
length = 48
special = false
}
resource "hcloud_ssh_key" "k3s" {
name = "k3s"
public_key = local.ssh_public_key
}
resource "hcloud_network" "k3s" {
name = "k3s"
ip_range = "10.0.0.0/8"
}
resource "hcloud_network_subnet" "k3s" {
network_id = hcloud_network.k3s.id
type = "cloud"
network_zone = var.network_region
ip_range = "10.0.0.0/16"
}
resource "hcloud_firewall" "k3s" {
name = "k3s"
# Allowing internal cluster traffic and Hetzner metadata service and cloud API IPs
rule {
direction = "in"
protocol = "tcp"
port = "any"
source_ips = [
"127.0.0.1/32",
"10.0.0.0/8",
"169.254.169.254/32",
"213.239.246.1/32"
]
}
rule {
direction = "in"
protocol = "udp"
port = "any"
source_ips = [
"127.0.0.1/32",
"10.0.0.0/8",
"169.254.169.254/32",
"213.239.246.1/32"
]
}
rule {
direction = "in"
protocol = "icmp"
source_ips = [
"127.0.0.1/32",
"10.0.0.0/8",
"169.254.169.254/32",
"213.239.246.1/32"
]
}
# Allow all traffic to the kube api server
rule {
direction = "in"
protocol = "tcp"
port = "6443"
source_ips = [
"0.0.0.0/0"
]
}
# Allow all traffic to the ssh port
rule {
direction = "in"
protocol = "tcp"
port = "22"
source_ips = [
"0.0.0.0/0"
]
}
# Allow ping on ipv4
rule {
direction = "in"
protocol = "icmp"
source_ips = [
"0.0.0.0/0"
]
}
# Allow basic out traffic
# ICMP to ping outside services
rule {
direction = "out"
protocol = "icmp"
destination_ips = [
"0.0.0.0/0"
]
}
# DNS
rule {
direction = "out"
protocol = "tcp"
port = "53"
destination_ips = [
"0.0.0.0/0"
]
}
rule {
direction = "out"
protocol = "udp"
port = "53"
destination_ips = [
"0.0.0.0/0"
]
}
# HTTP(s)
rule {
direction = "out"
protocol = "tcp"
port = "80"
destination_ips = [
"0.0.0.0/0"
]
}
rule {
direction = "out"
protocol = "tcp"
port = "443"
destination_ips = [
"0.0.0.0/0"
]
}
#NTP
rule {
direction = "out"
protocol = "udp"
port = "123"
destination_ips = [
"0.0.0.0/0"
]
}
}
resource "hcloud_placement_group" "k3s" {
name = "k3s"
type = "spread"
labels = {
"provisioner" = "terraform",
"engine" = "k3s"
}
}
data "hcloud_load_balancer" "traefik" {
name = "traefik"
depends_on = [null_resource.cluster_provisioning]
}
resource "null_resource" "cluster_provisioning" {
triggers = {
agent_ids = "${join(",", module.agents.*.id)}"
control_plane_ids = "${join(",", concat([module.first_control_plane.id], module.control_planes.*.id))}"
}
depends_on = [ null_resource.first_control_plane, null_resource.control_planes, null_resource.agents ]
provisioner "remote-exec" {
connection {
user = "root"
private_key = local.ssh_private_key
agent_identity = local.ssh_identity
host = module.first_control_plane.ipv4_address
}
inline = [
<<-EOT
timeout 120 bash <<EOF
until [ -n "\$(kubectl get -n kube-system service/traefik --output=jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do
echo "Waiting for load-balancer to get an IP..."
done
EOF
EOT
]
}
provisioner "local-exec" {
when = destroy
command = "hcloud load-balancer delete traefik"
}
}