added traefik

This commit is contained in:
Karim Naufal 2022-01-05 15:04:22 +01:00
parent 5d315c4427
commit 76a1e00805
13 changed files with 217 additions and 187 deletions

View File

@ -32,7 +32,7 @@ _Please note that we are not affiliated to Hetzner, this is just an open source
- Lightweight and resource-efficient Kubernetes powered by [k3s](https://github.com/k3s-io/k3s) on [k3os](https://github.com/rancher/k3os) nodes.
- Automatic HA with the default setting of two control-plane and agents nodes.
- Add or remove as many nodes as you want while the cluster stays running (just change the number instances and run terraform apply again).
- (Optional) [Nginx ingress controller](https://kubernetes.github.io/ingress-nginx/) that will automatically use Hetzner's private network to allocate a Hetzner load balancer.
- Automatic Traefik ingress controller and with a hetzner load balancer and proxy protocol turned on.
_It uses Terraform to deploy as it's easy to use, and Hetzner provides a great [Hetzner Terraform Provider](https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs)._
@ -72,6 +72,7 @@ agents_num = 2
location = "fsn1"
agent_server_type = "cpx21"
control_plane_server_type = "cpx11"
lb_server_type = "lb11"
```
### Installation
@ -100,18 +101,6 @@ servers_num = 2
agents_num = 3
```
### Ingress Controller (Optional)
When using Kubernetes, it is ideal to have an ingress controller to expose services to the outside world. And it turns out that the Hetzner Cloud Controller allows us to automatically deploy a Hetzner Load Balancer that the ingress controller can use. You can install the Nginx ingress controller with the following command:
```sh
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
helm install --values=manifests/helm/nginx/values.yaml ingress-nginx ingress-nginx/ingress-nginx -n kube-system --kubeconfig kubeconfig.yaml
```
_Please note that the load balancer's geographic location and instance type are editable in [values.yaml](manifests/helm/nginx/values.yaml)._
<!-- USAGE EXAMPLES -->
## Usage
@ -147,37 +136,16 @@ By default, k3os and its embedded k3s instance get upgraded automatically on eac
kubectl label node <nodename> 'k3os.io/upgrade'- --kubeconfig kubeconfig.yaml
```
### Individual components upgrade
To upgrade individual components, you can use the following commands:
- Hetzner CCM and CSI
```sh
kubectl apply -f https://raw.githubusercontent.com/mysticaltech/kube-hetzner/master/manifests/hcloud-ccm-net.yaml --kubeconfig kubeconfig.yaml
kubectl apply -f https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml --kubeconfig kubeconfig.yaml
```
- (Optional, if installed) Nginx ingress controller
```sh
helm repo update
helm upgrade --values=manifests/helm/nginx/values.yaml ingress-nginx ingress-nginx/ingress-nginx -n kube-system --kubeconfig kubeconfig.yaml
```
As for the Hetzner CCM and CSI, their container images are set to latest and with and imagePullPolicy of "Always". This means that when the nodes upgrade, they will be automatically upgraded too.
## Takedown
If you choose to install the Nginx ingress controller, you need to delete it first to release the load balancer, as follows:
```sh
helm delete ingress-nginx -n kube-system --kubeconfig kubeconfig.yaml
```
Then you can proceed to take down the rest of the cluster with:
```sh
kubectl delete -f https://raw.githubusercontent.com/mysticaltech/kube-hetzner/master/manifests/hcloud-ccm-net.yaml --kubeconfig kubeconfig.yaml
kubectl delete -f https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml --kubeconfig kubeconfig.yaml
kubectl delete -k hetzer/csi --kubeconfig kubeconfig.yaml
kubectl delete -k hetzer/ccm --kubeconfig kubeconfig.yaml
hcloud load-balancer delete traefik
terraform destroy -auto-approve
```

View File

@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm-networks.yaml
patchesStrategicMerge:
- patch.yaml

19
hetzner/ccm/patch.yaml Normal file
View File

@ -0,0 +1,19 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hcloud-cloud-controller-manager
namespace: kube-system
spec:
template:
spec:
containers:
- image: hetznercloud/hcloud-cloud-controller-manager:latest
imagePullPolicy: Always
name: hcloud-cloud-controller-manager
command:
- "/bin/hcloud-cloud-controller-manager"
- "--cloud-provider=hcloud"
- "--leader-elect=false"
- "--allow-untagged-cloud"
- "--allocate-node-cidrs=true"
- "--cluster-cidr=10.42.0.0/16"

View File

@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml
patchesStrategicMerge:
- patch.yaml

54
hetzner/csi/patch.yaml Normal file
View File

@ -0,0 +1,54 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: hcloud-csi-controller
namespace: kube-system
spec:
template:
metadata:
labels:
app: hcloud-csi-controller
spec:
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:canary
imagePullPolicy: Always
- name: csi-resizer
image: quay.io/k8scsi/csi-resizer:canary
imagePullPolicy: Always
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:canary
imagePullPolicy: Always
- name: hcloud-csi-driver
image: hetznercloud/hcloud-csi-driver:latest
imagePullPolicy: Always
- name: liveness-probe
image: quay.io/k8scsi/livenessprobe:canary
imagePullPolicy: Always
volumes:
- name: socket-dir
emptyDir: {}
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: hcloud-csi-node
namespace: kube-system
labels:
app: hcloud-csi
spec:
selector:
matchLabels:
app: hcloud-csi
template:
spec:
containers:
- name: csi-node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:canary
imagePullPolicy: Always
- name: hcloud-csi-driver
image: hetznercloud/hcloud-csi-driver:latest
imagePullPolicy: Always
- name: liveness-probe
image: quay.io/k8scsi/livenessprobe:canary
imagePullPolicy: Always

65
main.tf
View File

@ -85,6 +85,63 @@ resource "hcloud_firewall" "k3s" {
"0.0.0.0/0"
]
}
# Allow basic out traffic
# ICMP to ping outside services
rule {
direction = "out"
protocol = "icmp"
destination_ips = [
"0.0.0.0/0"
]
}
# DNS
rule {
direction = "out"
protocol = "tcp"
port = "53"
destination_ips = [
"0.0.0.0/0"
]
}
rule {
direction = "out"
protocol = "udp"
port = "53"
destination_ips = [
"0.0.0.0/0"
]
}
# HTTP(s)
rule {
direction = "out"
protocol = "tcp"
port = "80"
destination_ips = [
"0.0.0.0/0"
]
}
rule {
direction = "out"
protocol = "tcp"
port = "443"
destination_ips = [
"0.0.0.0/0"
]
}
#NTP
rule {
direction = "out"
protocol = "udp"
port = "123"
destination_ips = [
"0.0.0.0/0"
]
}
}
@ -108,3 +165,11 @@ locals {
data "hcloud_image" "linux" {
name = local.hcloud_image_name
}
resource "local_file" "traefik_config" {
content = templatefile("${path.module}/templates/traefik_config.yaml.tpl", {
lb_server_type = var.lb_server_type
location = var.location
})
filename = "${path.module}/templates/rendered/traefik_config.yaml"
}

View File

@ -1,87 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hcloud-cloud-controller-manager
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: hcloud-cloud-controller-manager
template:
metadata:
labels:
app: hcloud-cloud-controller-manager
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: cloud-controller-manager
dnsPolicy: Default
tolerations:
# this taint is set by all kubelets running `--cloud-provider=external`
# so we should tolerate it to schedule the cloud controller manager
- key: "node.cloudprovider.kubernetes.io/uninitialized"
value: "true"
effect: "NoSchedule"
- key: "CriticalAddonsOnly"
operator: "Exists"
# cloud controller manages should be able to run on masters
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
- key: "node.kubernetes.io/not-ready"
effect: "NoSchedule"
hostNetwork: true
containers:
- image: hetznercloud/hcloud-cloud-controller-manager:v1.12.0
name: hcloud-cloud-controller-manager
command:
- "/bin/hcloud-cloud-controller-manager"
- "--cloud-provider=hcloud"
- "--leader-elect=false"
- "--allow-untagged-cloud"
- "--allocate-node-cidrs=true"
- "--cluster-cidr=10.42.0.0/16"
resources:
requests:
cpu: 100m
memory: 50Mi
limits:
cpu: 500m
memory: 500Mi
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
name: hcloud
key: token
- name: HCLOUD_NETWORK
valueFrom:
secretKeyRef:
name: hcloud
key: network

View File

@ -1,8 +0,0 @@
controller:
service:
type: LoadBalancer
annotations:
load-balancer.hetzner.cloud/name: nginx-ingress
load-balancer.hetzner.cloud/use-private-ip: true
load-balancer.hetzner.cloud/location: fsn1
load-balancer.hetzner.cloud/type: lb11

View File

@ -1,50 +0,0 @@
# Doc: https://rancher.com/docs/k3s/latest/en/upgrades/automated/
# agent plan
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: k3s-agent
namespace: system-upgrade
labels:
k3s_upgrade: agent
spec:
concurrency: 1
channel: https://update.k3s.io/v1-release/channels/stable
nodeSelector:
matchExpressions:
- {key: k3s_upgrade, operator: Exists}
- {key: k3s_upgrade, operator: NotIn, values: ["disabled", "false"]}
- {key: node-role.kubernetes.io/master, operator: NotIn, values: ["true"]}
serviceAccountName: system-upgrade
prepare:
image: rancher/k3s-upgrade
args: ["prepare", "k3s-server"]
drain:
force: true
skipWaitForDeleteTimeout: 60
upgrade:
image: rancher/k3s-upgrade
---
# server plan
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: k3s-server
namespace: system-upgrade
labels:
k3s_upgrade: server
spec:
concurrency: 1
channel: https://update.k3s.io/v1-release/channels/stable
nodeSelector:
matchExpressions:
- {key: k3s_upgrade, operator: Exists}
- {key: k3s_upgrade, operator: NotIn, values: ["disabled", "false"]}
- {key: node-role.kubernetes.io/master, operator: In, values: ["true"]}
tolerations:
- {key: node-role.kubernetes.io/master, effect: NoSchedule, operator: Exists}
- {key: CriticalAddonsOnly, effect: NoExecute, operator: Exists}
serviceAccountName: system-upgrade
cordon: true
upgrade:
image: rancher/k3s-upgrade

View File

@ -29,7 +29,7 @@ resource "hcloud_server" "first_control_plane" {
}
}
# Install k3os
provisioner "remote-exec" {
inline = local.k3os_install_commands
@ -40,6 +40,7 @@ resource "hcloud_server" "first_control_plane" {
}
}
# Wait for k3os to be ready and fetch kubeconfig.yaml
provisioner "local-exec" {
command = <<-EOT
sleep 60 && ping ${self.ipv4_address} | grep --line-buffered "bytes from" | head -1 && sleep 60 && scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${var.private_key} rancher@${self.ipv4_address}:/etc/rancher/k3s/k3s.yaml ${path.module}/kubeconfig.yaml
@ -47,16 +48,21 @@ resource "hcloud_server" "first_control_plane" {
EOT
}
# Install the Hetzner Cloud cloud controller and cloud storage interface
# Install Hetzner CCM and CSI
provisioner "local-exec" {
command = <<-EOT
kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name} --kubeconfig ${path.module}/kubeconfig.yaml
kubectl apply -f ${path.module}/manifests/hcloud-ccm-net.yaml --kubeconfig ${path.module}/kubeconfig.yaml
kubectl apply -k ${path.module}/hetzner/ccm --kubeconfig ${path.module}/kubeconfig.yaml
kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token} --kubeconfig ${path.module}/kubeconfig.yaml
kubectl apply -f https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml --kubeconfig ${path.module}/kubeconfig.yaml
kubectl apply -k ${path.module}/hetzner/csi --kubeconfig ${path.module}/kubeconfig.yaml
EOT
}
# Configure the Traefik ingress controller
provisioner "local-exec" {
command = "kubectl apply -f ${local_file.traefik_config.filename} --kubeconfig ${path.module}/kubeconfig.yaml"
}
network {
network_id = hcloud_network.k3s.id
ip = local.first_control_plane_network_ip

View File

@ -0,0 +1,21 @@
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
service:
enabled: true
type: LoadBalancer
annotations:
"load-balancer.hetzner.cloud/name": "traefik"
"load-balancer.hetzner.cloud/use-private-ip": "true"
"load-balancer.hetzner.cloud/location": "fsn1"
"load-balancer.hetzner.cloud/type": "lb11"
"load-balancer.hetzner.cloud/uses-proxyprotocol": "true"
additionalArguments:
- "--entryPoints.web.proxyProtocol.trustedIPs=127.0.0.1/32,10.0.0.0/8"
- "--entryPoints.websecure.proxyProtocol.trustedIPs=127.0.0.1/32,10.0.0.0/8"
- "--entryPoints.web.forwardedHeaders.trustedIPs=127.0.0.1/32,10.0.0.0/8"
- "--entryPoints.websecure.forwardedHeaders.trustedIPs=127.0.0.1/32,10.0.0.0/8"

View File

@ -0,0 +1,21 @@
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
service:
enabled: true
type: LoadBalancer
annotations:
"load-balancer.hetzner.cloud/name": "traefik"
"load-balancer.hetzner.cloud/use-private-ip": "true"
"load-balancer.hetzner.cloud/location": "${location}"
"load-balancer.hetzner.cloud/type": "${lb_server_type}"
"load-balancer.hetzner.cloud/uses-proxyprotocol": "true"
additionalArguments:
- "--entryPoints.web.proxyProtocol.trustedIPs=127.0.0.1/32,10.0.0.0/8"
- "--entryPoints.websecure.proxyProtocol.trustedIPs=127.0.0.1/32,10.0.0.0/8"
- "--entryPoints.web.forwardedHeaders.trustedIPs=127.0.0.1/32,10.0.0.0/8"
- "--entryPoints.websecure.forwardedHeaders.trustedIPs=127.0.0.1/32,10.0.0.0/8"

View File

@ -43,3 +43,8 @@ variable "agent_server_type" {
description = "Default agent server type"
default = "cx21"
}
variable "lb_server_type" {
description = "Default load balancer server type"
default = "lb11"
}