From a02ccdfb9fbffee18985a786979237e76993e3f9 Mon Sep 17 00:00:00 2001 From: phaer Date: Thu, 3 Mar 2022 13:29:21 +0100 Subject: [PATCH] improve support of single-node clusters --- README.md | 15 +++++++++++++++ init.tf | 18 +++++++++--------- locals.tf | 24 ++++++++++++++++++++++-- main.tf | 3 ++- output.tf | 2 +- variables.tf | 1 + 6 files changed, 50 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index abc9f21..fdadc06 100644 --- a/README.md +++ b/README.md @@ -166,6 +166,21 @@ spec: +
+ +single-node cluster + +Running a development cluster on a single node, without any high-availability is possible as well. +In this case, we don't deploy an external load-balancer, but use [k3s service load balancer](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) on the host itself and open up port 80 & 443 in the firewall. + +``` terraform +control_plane_count = 1 +allow_scheduling_on_control_plane = true +agent_nodepools = {} +``` + +
+ ## Debugging First and foremost, it depends, but it's always good to have a quick look into Hetzner quickly without having to login to the UI. That is where the `hcloud` cli comes in. diff --git a/init.tf b/init.tf index ee9134b..787550d 100644 --- a/init.tf +++ b/init.tf @@ -13,7 +13,7 @@ resource "null_resource" "first_control_plane" { token = random_password.k3s_token.result cluster-init = true disable-cloud-controller = true - disable = ["servicelb", "local-storage"] + disable = concat(["local-storage"], local.is_single_node_cluster ? [] : ["servicelb"]) flannel-iface = "eth1" kubelet-arg = "cloud-provider=external" node-ip = module.control_planes[0].private_ipv4_address @@ -75,13 +75,12 @@ resource "null_resource" "kustomization" { content = yamlencode({ apiVersion = "kustomize.config.k8s.io/v1beta1" kind = "Kustomization" - resources = [ + resources = concat([ "https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/${local.ccm_version}/ccm-networks.yaml", "https://raw.githubusercontent.com/hetznercloud/csi-driver/${local.csi_version}/deploy/kubernetes/hcloud-csi.yml", "https://github.com/weaveworks/kured/releases/download/${local.kured_version}/kured-${local.kured_version}-dockerhub.yaml", "https://raw.githubusercontent.com/rancher/system-upgrade-controller/master/manifests/system-upgrade-controller.yaml", - "traefik.yaml", - ] + ], local.is_single_node_cluster ? [] : ["traefik.yaml"]), patchesStrategicMerge = [ file("${path.module}/kustomize/kured.yaml"), file("${path.module}/kustomize/ccm.yaml"), @@ -93,7 +92,7 @@ resource "null_resource" "kustomization" { # Upload traefik config provisioner "file" { - content = templatefile( + content = local.is_single_node_cluster ? "" : templatefile( "${path.module}/templates/traefik_config.yaml.tpl", { load_balancer_disable_ipv6 = var.load_balancer_disable_ipv6 @@ -126,7 +125,7 @@ resource "null_resource" "kustomization" { # Deploy our post-installation kustomization provisioner "remote-exec" { - inline = [ + inline = concat([ "set -ex", # This ugly hack is here, because terraform serializes the # embedded yaml files with "- |2", when there is more than @@ -140,8 +139,9 @@ resource "null_resource" "kustomization" { "kubectl apply -k /tmp/post_install", "echo 'Waiting for the system-upgrade-controller deployment to become available...'", "kubectl -n system-upgrade wait --for=condition=available --timeout=120s deployment/system-upgrade-controller", - "kubectl -n system-upgrade apply -f /tmp/post_install/plans.yaml", - <<-EOT + "kubectl -n system-upgrade apply -f /tmp/post_install/plans.yaml" + ], + local.is_single_node_cluster ? [] : [<<-EOT timeout 120 bash < /dev/null)" ]; do echo "Waiting for load-balancer to get an IP..." @@ -149,7 +149,7 @@ resource "null_resource" "kustomization" { done EOF EOT - ] + ]) } depends_on = [ diff --git a/locals.tf b/locals.tf index 6ebbe16..1c9fe5d 100644 --- a/locals.tf +++ b/locals.tf @@ -1,5 +1,6 @@ locals { first_control_plane_network_ipv4 = module.control_planes[0].private_ipv4_address + is_single_node_cluster = var.control_plane_count + length(keys(var.agent_nodepools)) == 1 ssh_public_key = trimspace(file(var.public_key)) # ssh_private_key is either the contents of var.private_key or null to use a ssh agent. @@ -29,7 +30,7 @@ locals { "127.0.0.1/32", ] - base_firewall_rules = [ + base_firewall_rules = concat([ # Allowing internal cluster traffic and Hetzner metadata service and cloud API IPs { direction = "in" @@ -133,7 +134,26 @@ locals { "0.0.0.0/0" ] } - ] + ], !local.is_single_node_cluster ? [] : [ + # Allow incoming web traffic for single node clusters, because we are using k3s servicelb there, + # not an external load-balancer. + { + direction = "in" + protocol = "tcp" + port = "80" + source_ips = [ + "0.0.0.0/0" + ] + }, + { + direction = "in" + protocol = "tcp" + port = "443" + source_ips = [ + "0.0.0.0/0" + ] + } + ]) common_commands_install_k3s = [ "set -ex", diff --git a/main.tf b/main.tf index c54204f..ce18aa8 100644 --- a/main.tf +++ b/main.tf @@ -46,7 +46,8 @@ resource "hcloud_placement_group" "k3s" { } data "hcloud_load_balancer" "traefik" { - name = "traefik" + count = local.is_single_node_cluster ? 0 : 1 + name = "traefik" depends_on = [null_resource.kustomization] } diff --git a/output.tf b/output.tf index 762290d..4d2033e 100644 --- a/output.tf +++ b/output.tf @@ -12,7 +12,7 @@ output "agents_public_ipv4" { output "load_balancer_public_ipv4" { description = "The public IPv4 address of the Hetzner load balancer" - value = data.hcloud_load_balancer.traefik.ipv4 + value = local.is_single_node_cluster ? module.control_planes[0].ipv4_address : data.hcloud_load_balancer.traefik[0].ipv4 } output "kubeconfig_file" { diff --git a/variables.tf b/variables.tf index 15ecb17..d3439ae 100644 --- a/variables.tf +++ b/variables.tf @@ -65,6 +65,7 @@ variable "load_balancer_disable_ipv6" { variable "agent_nodepools" { description = "Number of agent nodes." type = map(any) + default = {} } variable "hetzner_ccm_version" {