improve support of single-node clusters

This commit is contained in:
phaer 2022-03-03 13:29:21 +01:00
parent 52bb5f663f
commit a02ccdfb9f
6 changed files with 50 additions and 13 deletions

View File

@ -166,6 +166,21 @@ spec:
</details>
<details>
<summary>single-node cluster</summary>
Running a development cluster on a single node, without any high-availability is possible as well.
In this case, we don't deploy an external load-balancer, but use [k3s service load balancer](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) on the host itself and open up port 80 & 443 in the firewall.
``` terraform
control_plane_count = 1
allow_scheduling_on_control_plane = true
agent_nodepools = {}
```
</details>
## Debugging
First and foremost, it depends, but it's always good to have a quick look into Hetzner quickly without having to login to the UI. That is where the `hcloud` cli comes in.

18
init.tf
View File

@ -13,7 +13,7 @@ resource "null_resource" "first_control_plane" {
token = random_password.k3s_token.result
cluster-init = true
disable-cloud-controller = true
disable = ["servicelb", "local-storage"]
disable = concat(["local-storage"], local.is_single_node_cluster ? [] : ["servicelb"])
flannel-iface = "eth1"
kubelet-arg = "cloud-provider=external"
node-ip = module.control_planes[0].private_ipv4_address
@ -75,13 +75,12 @@ resource "null_resource" "kustomization" {
content = yamlencode({
apiVersion = "kustomize.config.k8s.io/v1beta1"
kind = "Kustomization"
resources = [
resources = concat([
"https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/${local.ccm_version}/ccm-networks.yaml",
"https://raw.githubusercontent.com/hetznercloud/csi-driver/${local.csi_version}/deploy/kubernetes/hcloud-csi.yml",
"https://github.com/weaveworks/kured/releases/download/${local.kured_version}/kured-${local.kured_version}-dockerhub.yaml",
"https://raw.githubusercontent.com/rancher/system-upgrade-controller/master/manifests/system-upgrade-controller.yaml",
"traefik.yaml",
]
], local.is_single_node_cluster ? [] : ["traefik.yaml"]),
patchesStrategicMerge = [
file("${path.module}/kustomize/kured.yaml"),
file("${path.module}/kustomize/ccm.yaml"),
@ -93,7 +92,7 @@ resource "null_resource" "kustomization" {
# Upload traefik config
provisioner "file" {
content = templatefile(
content = local.is_single_node_cluster ? "" : templatefile(
"${path.module}/templates/traefik_config.yaml.tpl",
{
load_balancer_disable_ipv6 = var.load_balancer_disable_ipv6
@ -126,7 +125,7 @@ resource "null_resource" "kustomization" {
# Deploy our post-installation kustomization
provisioner "remote-exec" {
inline = [
inline = concat([
"set -ex",
# This ugly hack is here, because terraform serializes the
# embedded yaml files with "- |2", when there is more than
@ -140,8 +139,9 @@ resource "null_resource" "kustomization" {
"kubectl apply -k /tmp/post_install",
"echo 'Waiting for the system-upgrade-controller deployment to become available...'",
"kubectl -n system-upgrade wait --for=condition=available --timeout=120s deployment/system-upgrade-controller",
"kubectl -n system-upgrade apply -f /tmp/post_install/plans.yaml",
<<-EOT
"kubectl -n system-upgrade apply -f /tmp/post_install/plans.yaml"
],
local.is_single_node_cluster ? [] : [<<-EOT
timeout 120 bash <<EOF
until [ -n "\$(kubectl get -n kube-system service/traefik --output=jsonpath='{.status.loadBalancer.ingress[0].ip}' 2> /dev/null)" ]; do
echo "Waiting for load-balancer to get an IP..."
@ -149,7 +149,7 @@ resource "null_resource" "kustomization" {
done
EOF
EOT
]
])
}
depends_on = [

View File

@ -1,5 +1,6 @@
locals {
first_control_plane_network_ipv4 = module.control_planes[0].private_ipv4_address
is_single_node_cluster = var.control_plane_count + length(keys(var.agent_nodepools)) == 1
ssh_public_key = trimspace(file(var.public_key))
# ssh_private_key is either the contents of var.private_key or null to use a ssh agent.
@ -29,7 +30,7 @@ locals {
"127.0.0.1/32",
]
base_firewall_rules = [
base_firewall_rules = concat([
# Allowing internal cluster traffic and Hetzner metadata service and cloud API IPs
{
direction = "in"
@ -133,7 +134,26 @@ locals {
"0.0.0.0/0"
]
}
]
], !local.is_single_node_cluster ? [] : [
# Allow incoming web traffic for single node clusters, because we are using k3s servicelb there,
# not an external load-balancer.
{
direction = "in"
protocol = "tcp"
port = "80"
source_ips = [
"0.0.0.0/0"
]
},
{
direction = "in"
protocol = "tcp"
port = "443"
source_ips = [
"0.0.0.0/0"
]
}
])
common_commands_install_k3s = [
"set -ex",

View File

@ -46,7 +46,8 @@ resource "hcloud_placement_group" "k3s" {
}
data "hcloud_load_balancer" "traefik" {
name = "traefik"
count = local.is_single_node_cluster ? 0 : 1
name = "traefik"
depends_on = [null_resource.kustomization]
}

View File

@ -12,7 +12,7 @@ output "agents_public_ipv4" {
output "load_balancer_public_ipv4" {
description = "The public IPv4 address of the Hetzner load balancer"
value = data.hcloud_load_balancer.traefik.ipv4
value = local.is_single_node_cluster ? module.control_planes[0].ipv4_address : data.hcloud_load_balancer.traefik[0].ipv4
}
output "kubeconfig_file" {

View File

@ -65,6 +65,7 @@ variable "load_balancer_disable_ipv6" {
variable "agent_nodepools" {
description = "Number of agent nodes."
type = map(any)
default = {}
}
variable "hetzner_ccm_version" {