From f1a98c72902ccc7885b859dd2025989bf867ed3d Mon Sep 17 00:00:00 2001 From: Karim Naufal Date: Tue, 12 Apr 2022 17:35:09 +0200 Subject: [PATCH] fixed single node cluster --- README.md | 9 ++------- control_planes.tf | 3 +-- init.tf | 1 - locals.tf | 4 +++- output.tf | 4 +++- terraform.tfvars.example | 9 +++++---- 6 files changed, 14 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 8cd1060..a40325f 100644 --- a/README.md +++ b/README.md @@ -170,14 +170,9 @@ spec: Single-node cluster -Running a development cluster on a single node, without any high-availability is possible as well. -In this case, we don't deploy an external load-balancer, but use [k3s service load balancer](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) on the host itself and open up port 80 & 443 in the firewall. +Running a development cluster on a single node, without any high-availability is possible as well. You need one control plane nodepool with a count of 1, and one agent nodepool with a count of 0. -``` terraform -control_plane_count = 1 -allow_scheduling_on_control_plane = true -agent_nodepools = [] -``` +In this case, we don't deploy an external load-balancer, but use the default [k3s service load balancer](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) on the host itself and open up port 80 & 443 in the firewall (done automatically). diff --git a/control_planes.tf b/control_planes.tf index 607f7aa..37655a7 100644 --- a/control_planes.tf +++ b/control_planes.tf @@ -46,7 +46,7 @@ resource "null_resource" "control_planes" { provisioner "file" { content = yamlencode({ node-name = module.control_planes[each.key].name - server = "https://${module.control_planes[each.key].private_ipv4_address == module.control_planes[keys(module.control_planes)[0]].private_ipv4_address && length(module.control_planes) > 1 ? module.control_planes[keys(module.control_planes)[1]].private_ipv4_address : module.control_planes[keys(module.control_planes)[0]].private_ipv4_address}:6443" + server = length(module.control_planes) == 1 ? null : "https://${module.control_planes[each.key].private_ipv4_address == module.control_planes[keys(module.control_planes)[0]].private_ipv4_address ? module.control_planes[keys(module.control_planes)[1]].private_ipv4_address : module.control_planes[keys(module.control_planes)[0]].private_ipv4_address}:6443" token = random_password.k3s_token.result disable-cloud-controller = true disable = local.disable_extras @@ -54,7 +54,6 @@ resource "null_resource" "control_planes" { kubelet-arg = "cloud-provider=external" node-ip = module.control_planes[each.key].private_ipv4_address advertise-address = module.control_planes[each.key].private_ipv4_address - tls-san = module.control_planes[each.key].ipv4_address node-label = each.value.labels node-taint = each.value.taints }) diff --git a/init.tf b/init.tf index 3e83242..fd6ff68 100644 --- a/init.tf +++ b/init.tf @@ -18,7 +18,6 @@ resource "null_resource" "first_control_plane" { kubelet-arg = "cloud-provider=external" node-ip = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address advertise-address = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address - tls-san = module.control_planes[keys(module.control_planes)[0]].ipv4_address node-taint = local.control_plane_nodepools[keys(module.control_planes)[0]].taints node-label = local.control_plane_nodepools[keys(module.control_planes)[0]].labels }) diff --git a/locals.tf b/locals.tf index e3feeb0..bda11db 100644 --- a/locals.tf +++ b/locals.tf @@ -211,6 +211,8 @@ locals { default_agent_labels = concat([], var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []) default_control_plane_labels = concat([], var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : []) + allow_scheduling_on_control_plane = local.is_single_node_cluster ? true : var.allow_scheduling_on_control_plane + # Default k3s node taints - default_control_plane_taints = concat([], var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]) + default_control_plane_taints = concat([], local.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"]) } diff --git a/output.tf b/output.tf index 798f81f..1ea34fd 100644 --- a/output.tf +++ b/output.tf @@ -19,7 +19,9 @@ output "agents_public_ipv4" { output "load_balancer_public_ipv4" { description = "The public IPv4 address of the Hetzner load balancer" - value = local.is_single_node_cluster ? module.control_planes[0].ipv4_address : var.traefik_enabled == false ? null : data.hcloud_load_balancer.traefik[0].ipv4 + value = local.is_single_node_cluster ? [ + for obj in module.control_planes : obj.ipv4_address + ][0] : var.traefik_enabled == false ? null : data.hcloud_load_balancer.traefik[0].ipv4 } output "kubeconfig_file" { diff --git a/terraform.tfvars.example b/terraform.tfvars.example index 9cc16a2..e62200b 100644 --- a/terraform.tfvars.example +++ b/terraform.tfvars.example @@ -28,8 +28,9 @@ network_region = "eu-central" # change to `us-east` if location is ash # You can freely add others nodepools the end of the list if you want, and increase the count of any. # Also, before decreasing the count of any nodepools to 0, it's important to drain and cordon it the nodes in question, otherwise it will leave your cluster in a bad state. -# Before initializing the cluster, you can change all parameters and add or remove any nodepools. -# If you want to have a single node cluster, just have 1 control plane nodepools with a count of 1. +# Before initializing the cluster, you can change all parameters and add or remove any nodepools. + +# If you want to have a single node cluster, just have 1 control plane nodepools with a count of 1, and one agent nodepool with a count of 0. # Example below: @@ -59,7 +60,7 @@ agent_nodepools = [ location = "fsn1", labels = [], taints = [], - count = 2 + count = 1 }, { name = "agent-large", @@ -106,7 +107,7 @@ load_balancer_location = "fsn1" # metrics_server_enabled = false # If you want to allow non-control-plane workloads to run on the control-plane nodes set "true" below. The default is "false". -# Also good for single node clusters. +# True by default for single node clusters. # allow_scheduling_on_control_plane = true # If you want to disable automatic upgrade of k3s, you can set this to false, default is "true".