Merge pull request #108 from kube-hetzner/single-host

Improve support for single-node clusters
This commit is contained in:
Karim Naufal 2022-03-04 16:13:00 +01:00 committed by GitHub
commit 749bc1b201
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 52 additions and 14 deletions

View File

@ -166,6 +166,21 @@ spec:
</details> </details>
<details>
<summary>single-node cluster</summary>
Running a development cluster on a single node, without any high-availability is possible as well.
In this case, we don't deploy an external load-balancer, but use [k3s service load balancer](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) on the host itself and open up port 80 & 443 in the firewall.
``` terraform
control_plane_count = 1
allow_scheduling_on_control_plane = true
agent_nodepools = {}
```
</details>
## Debugging ## Debugging
First and foremost, it depends, but it's always good to have a quick look into Hetzner quickly without having to login to the UI. That is where the `hcloud` cli comes in. First and foremost, it depends, but it's always good to have a quick look into Hetzner quickly without having to login to the UI. That is where the `hcloud` cli comes in.

18
init.tf
View File

@ -13,7 +13,7 @@ resource "null_resource" "first_control_plane" {
token = random_password.k3s_token.result token = random_password.k3s_token.result
cluster-init = true cluster-init = true
disable-cloud-controller = true disable-cloud-controller = true
disable = ["servicelb", "local-storage"] disable = concat(["local-storage"], local.is_single_node_cluster ? [] : ["servicelb"])
flannel-iface = "eth1" flannel-iface = "eth1"
kubelet-arg = "cloud-provider=external" kubelet-arg = "cloud-provider=external"
node-ip = module.control_planes[0].private_ipv4_address node-ip = module.control_planes[0].private_ipv4_address
@ -74,13 +74,12 @@ resource "null_resource" "kustomization" {
content = yamlencode({ content = yamlencode({
apiVersion = "kustomize.config.k8s.io/v1beta1" apiVersion = "kustomize.config.k8s.io/v1beta1"
kind = "Kustomization" kind = "Kustomization"
resources = [ resources = concat([
"https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/${local.ccm_version}/ccm-networks.yaml", "https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/download/${local.ccm_version}/ccm-networks.yaml",
"https://raw.githubusercontent.com/hetznercloud/csi-driver/${local.csi_version}/deploy/kubernetes/hcloud-csi.yml", "https://raw.githubusercontent.com/hetznercloud/csi-driver/${local.csi_version}/deploy/kubernetes/hcloud-csi.yml",
"https://github.com/weaveworks/kured/releases/download/${local.kured_version}/kured-${local.kured_version}-dockerhub.yaml", "https://github.com/weaveworks/kured/releases/download/${local.kured_version}/kured-${local.kured_version}-dockerhub.yaml",
"https://raw.githubusercontent.com/rancher/system-upgrade-controller/master/manifests/system-upgrade-controller.yaml", "https://raw.githubusercontent.com/rancher/system-upgrade-controller/master/manifests/system-upgrade-controller.yaml",
"traefik.yaml", ], local.is_single_node_cluster ? [] : ["traefik.yaml"]),
]
patchesStrategicMerge = [ patchesStrategicMerge = [
file("${path.module}/kustomize/kured.yaml"), file("${path.module}/kustomize/kured.yaml"),
file("${path.module}/kustomize/ccm.yaml"), file("${path.module}/kustomize/ccm.yaml"),
@ -92,7 +91,7 @@ resource "null_resource" "kustomization" {
# Upload traefik config # Upload traefik config
provisioner "file" { provisioner "file" {
content = templatefile( content = local.is_single_node_cluster ? "" : templatefile(
"${path.module}/templates/traefik_config.yaml.tpl", "${path.module}/templates/traefik_config.yaml.tpl",
{ {
load_balancer_disable_ipv6 = var.load_balancer_disable_ipv6 load_balancer_disable_ipv6 = var.load_balancer_disable_ipv6
@ -125,7 +124,7 @@ resource "null_resource" "kustomization" {
# Deploy our post-installation kustomization # Deploy our post-installation kustomization
provisioner "remote-exec" { provisioner "remote-exec" {
inline = [ inline = concat([
"set -ex", "set -ex",
# This ugly hack is here, because terraform serializes the # This ugly hack is here, because terraform serializes the
# embedded yaml files with "- |2", when there is more than # embedded yaml files with "- |2", when there is more than
@ -139,8 +138,9 @@ resource "null_resource" "kustomization" {
"kubectl apply -k /tmp/post_install", "kubectl apply -k /tmp/post_install",
"echo 'Waiting for the system-upgrade-controller deployment to become available...'", "echo 'Waiting for the system-upgrade-controller deployment to become available...'",
"kubectl -n system-upgrade wait --for=condition=available --timeout=120s deployment/system-upgrade-controller", "kubectl -n system-upgrade wait --for=condition=available --timeout=120s deployment/system-upgrade-controller",
"kubectl -n system-upgrade apply -f /tmp/post_install/plans.yaml", "kubectl -n system-upgrade apply -f /tmp/post_install/plans.yaml"
<<-EOT ],
local.is_single_node_cluster ? [] : [<<-EOT
timeout 120 bash <<EOF timeout 120 bash <<EOF
until [ -n "\$(kubectl get -n kube-system service/traefik --output=jsonpath='{.status.loadBalancer.ingress[0].ip}' 2> /dev/null)" ]; do until [ -n "\$(kubectl get -n kube-system service/traefik --output=jsonpath='{.status.loadBalancer.ingress[0].ip}' 2> /dev/null)" ]; do
echo "Waiting for load-balancer to get an IP..." echo "Waiting for load-balancer to get an IP..."
@ -148,7 +148,7 @@ resource "null_resource" "kustomization" {
done done
EOF EOF
EOT EOT
] ])
} }
depends_on = [ depends_on = [

View File

@ -1,5 +1,7 @@
locals { locals {
ssh_public_key = trimspace(file(var.public_key)) # if we are in a single cluster config, we use the default klipper lb instead of Hetzner LB
is_single_node_cluster = var.control_plane_count + length(keys(var.agent_nodepools)) == 1
ssh_public_key = trimspace(file(var.public_key))
# ssh_private_key is either the contents of var.private_key or null to use a ssh agent. # ssh_private_key is either the contents of var.private_key or null to use a ssh agent.
ssh_private_key = var.private_key == null ? null : trimspace(file(var.private_key)) ssh_private_key = var.private_key == null ? null : trimspace(file(var.private_key))
# ssh_identity is not set if the private key is passed directly, but if ssh agent is used, the public key tells ssh agent which private key to use. # ssh_identity is not set if the private key is passed directly, but if ssh agent is used, the public key tells ssh agent which private key to use.
@ -27,7 +29,7 @@ locals {
"127.0.0.1/32", "127.0.0.1/32",
] ]
base_firewall_rules = [ base_firewall_rules = concat([
# Allowing internal cluster traffic and Hetzner metadata service and cloud API IPs # Allowing internal cluster traffic and Hetzner metadata service and cloud API IPs
{ {
direction = "in" direction = "in"
@ -131,7 +133,26 @@ locals {
"0.0.0.0/0" "0.0.0.0/0"
] ]
} }
] ], !local.is_single_node_cluster ? [] : [
# Allow incoming web traffic for single node clusters, because we are using k3s servicelb there,
# not an external load-balancer.
{
direction = "in"
protocol = "tcp"
port = "80"
source_ips = [
"0.0.0.0/0"
]
},
{
direction = "in"
protocol = "tcp"
port = "443"
source_ips = [
"0.0.0.0/0"
]
}
])
common_commands_install_k3s = [ common_commands_install_k3s = [
"set -ex", "set -ex",

View File

@ -46,7 +46,8 @@ resource "hcloud_placement_group" "k3s" {
} }
data "hcloud_load_balancer" "traefik" { data "hcloud_load_balancer" "traefik" {
name = "traefik" count = local.is_single_node_cluster ? 0 : 1
name = "traefik"
depends_on = [null_resource.kustomization] depends_on = [null_resource.kustomization]
} }

View File

@ -12,7 +12,7 @@ output "agents_public_ipv4" {
output "load_balancer_public_ipv4" { output "load_balancer_public_ipv4" {
description = "The public IPv4 address of the Hetzner load balancer" description = "The public IPv4 address of the Hetzner load balancer"
value = data.hcloud_load_balancer.traefik.ipv4 value = local.is_single_node_cluster ? module.control_planes[0].ipv4_address : data.hcloud_load_balancer.traefik[0].ipv4
} }
output "kubeconfig_file" { output "kubeconfig_file" {

View File

@ -65,6 +65,7 @@ variable "load_balancer_disable_ipv6" {
variable "agent_nodepools" { variable "agent_nodepools" {
description = "Number of agent nodes." description = "Number of agent nodes."
type = map(any) type = map(any)
default = {}
} }
variable "hetzner_ccm_version" { variable "hetzner_ccm_version" {