before move to k3os
This commit is contained in:
parent
d9923a2ab3
commit
f308220bfe
@ -52,8 +52,8 @@ Then you'll need you have both the [terraform](https://learn.hashicorp.com/tutor
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
gofish install terraform
|
gofish install terraform
|
||||||
gofish install helm
|
|
||||||
gofish install kubectl
|
gofish install kubectl
|
||||||
|
gofish install helm
|
||||||
```
|
```
|
||||||
|
|
||||||
### Creating terraform.tfvars
|
### Creating terraform.tfvars
|
||||||
|
1
init.cfg
1
init.cfg
@ -32,6 +32,7 @@
|
|||||||
emit_via = stdio
|
emit_via = stdio
|
||||||
[base]
|
[base]
|
||||||
debuglevel = 1
|
debuglevel = 1
|
||||||
|
|
||||||
runcmd:
|
runcmd:
|
||||||
- sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication no/' /etc/ssh/sshd_config
|
- sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication no/' /etc/ssh/sshd_config
|
||||||
- sed -i -e '/^X11Forwarding/s/^.*$/X11Forwarding no/' /etc/ssh/sshd_config
|
- sed -i -e '/^X11Forwarding/s/^.*$/X11Forwarding no/' /etc/ssh/sshd_config
|
||||||
|
11
main.tf
11
main.tf
@ -101,14 +101,19 @@ data "template_cloudinit_config" "init_cfg" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "ccm_manifest" {
|
data "template_file" "ccm" {
|
||||||
template = file("${path.module}/manifests/hcloud-ccm-net.yaml")
|
template = file("${path.module}/manifests/hcloud-ccm-net.yaml")
|
||||||
}
|
}
|
||||||
|
|
||||||
data "template_file" "upgrade_plan" {
|
data "template_file" "plans" {
|
||||||
template = file("${path.module}/manifests/upgrade/plan.yaml")
|
template = file("${path.module}/manifests/upgrade/plans.yaml")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data "template_file" "kured" {
|
||||||
|
template = file("${path.module}/manifests/upgrade/kured.yaml")
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
first_control_plane_network_ip = cidrhost(hcloud_network.k3s.ip_range, 2)
|
first_control_plane_network_ip = cidrhost(hcloud_network.k3s.ip_range, 2)
|
||||||
}
|
}
|
||||||
|
139
manifests/upgrade/kured.yaml
Normal file
139
manifests/upgrade/kured.yaml
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: kured
|
||||||
|
rules:
|
||||||
|
# Allow kured to read spec.unschedulable
|
||||||
|
# Allow kubectl to drain/uncordon
|
||||||
|
#
|
||||||
|
# NB: These permissions are tightly coupled to the bundled version of kubectl; the ones below
|
||||||
|
# match https://github.com/kubernetes/kubernetes/blob/v1.19.4/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go
|
||||||
|
#
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods"]
|
||||||
|
verbs: ["list","delete","get"]
|
||||||
|
- apiGroups: ["apps"]
|
||||||
|
resources: ["daemonsets"]
|
||||||
|
verbs: ["get"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods/eviction"]
|
||||||
|
verbs: ["create"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: kured
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: kured
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: kured
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
namespace: kube-system
|
||||||
|
name: kured
|
||||||
|
rules:
|
||||||
|
# Allow kured to lock/unlock itself
|
||||||
|
- apiGroups: ["apps"]
|
||||||
|
resources: ["daemonsets"]
|
||||||
|
resourceNames: ["kured"]
|
||||||
|
verbs: ["update"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
namespace: kube-system
|
||||||
|
name: kured
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
namespace: kube-system
|
||||||
|
name: kured
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: kured
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: kured
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: kured # Must match `--ds-name`
|
||||||
|
namespace: kube-system # Must match `--ds-namespace`
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
name: kured
|
||||||
|
updateStrategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
name: kured
|
||||||
|
spec:
|
||||||
|
serviceAccountName: kured
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
hostPID: true # Facilitate entering the host mount namespace via init
|
||||||
|
restartPolicy: Always
|
||||||
|
containers:
|
||||||
|
- name: kured
|
||||||
|
image: docker.io/weaveworks/kured:1.8.0
|
||||||
|
# If you find yourself here wondering why there is no
|
||||||
|
# :latest tag on Docker Hub,see the FAQ in the README
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
securityContext:
|
||||||
|
privileged: true # Give permission to nsenter /proc/1/ns/mnt
|
||||||
|
env:
|
||||||
|
# Pass in the name of the node on which this pod is scheduled
|
||||||
|
# for use with drain/uncordon operations and lock acquisition
|
||||||
|
- name: KURED_NODE_ID
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
command:
|
||||||
|
- /usr/bin/kured
|
||||||
|
- --reboot-sentinel-command="/usr/bin/needs-restarting -r"
|
||||||
|
# - --force-reboot=false
|
||||||
|
# - --drain-grace-period=-1
|
||||||
|
# - --skip-wait-for-delete-timeout=0
|
||||||
|
# - --drain-timeout=0
|
||||||
|
# - --period=1h
|
||||||
|
# - --ds-namespace=kube-system
|
||||||
|
# - --ds-name=kured
|
||||||
|
# - --lock-annotation=weave.works/kured-node-lock
|
||||||
|
# - --lock-ttl=0
|
||||||
|
# - --prometheus-url=http://prometheus.monitoring.svc.cluster.local
|
||||||
|
# - --alert-filter-regexp=^RebootRequired$
|
||||||
|
# - --alert-firing-only=false
|
||||||
|
# - --reboot-sentinel=/var/run/reboot-required
|
||||||
|
# - --prefer-no-schedule-taint=""
|
||||||
|
# - --slack-hook-url=https://hooks.slack.com/...
|
||||||
|
# - --slack-username=prod
|
||||||
|
# - --slack-channel=alerting
|
||||||
|
# - --notify-url="" # See also shoutrrr url format
|
||||||
|
# - --message-template-drain=Draining node %s
|
||||||
|
# - --message-template-drain=Rebooting node %s
|
||||||
|
# - --blocking-pod-selector=runtime=long,cost=expensive
|
||||||
|
# - --blocking-pod-selector=name=temperamental
|
||||||
|
# - --blocking-pod-selector=...
|
||||||
|
# - --reboot-days=sun,mon,tue,wed,thu,fri,sat
|
||||||
|
# - --start-time=0:00
|
||||||
|
# - --end-time=23:59:59
|
||||||
|
# - --time-zone=UTC
|
||||||
|
# - --annotate-nodes=false
|
||||||
|
# - --lock-release-delay=30m
|
@ -32,13 +32,13 @@ resource "hcloud_server" "first_control_plane" {
|
|||||||
"until systemctl is-active --quiet k3s.service; do sleep 1; done",
|
"until systemctl is-active --quiet k3s.service; do sleep 1; done",
|
||||||
"until kubectl get node ${self.name}; do sleep 1; done",
|
"until kubectl get node ${self.name}; do sleep 1; done",
|
||||||
"kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name}",
|
"kubectl -n kube-system create secret generic hcloud --from-literal=token=${var.hcloud_token} --from-literal=network=${hcloud_network.k3s.name}",
|
||||||
"kubectl apply -f -<<EOF\n${data.template_file.ccm_manifest.rendered}\nEOF",
|
"kubectl apply -f -<<EOF\n${data.template_file.ccm.rendered}\nEOF",
|
||||||
"kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token}",
|
"kubectl -n kube-system create secret generic hcloud-csi --from-literal=token=${var.hcloud_token}",
|
||||||
"kubectl apply -f https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml",
|
"kubectl apply -f https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml",
|
||||||
"kubectl apply -f https://raw.githubusercontent.com/rancher/system-upgrade-controller/master/manifests/system-upgrade-controller.yaml",
|
"kubectl apply -f https://raw.githubusercontent.com/rancher/system-upgrade-controller/master/manifests/system-upgrade-controller.yaml",
|
||||||
"kubectl apply -f -<<EOF\n${data.template_file.upgrade_plan.rendered}\nEOF",
|
"sleep 33",
|
||||||
"latest=$(curl -s https://api.github.com/repos/weaveworks/kured/releases | jq -r .[0].tag_name)",
|
"kubectl apply -f -<<EOF\n${data.template_file.plans.rendered}\nEOF",
|
||||||
"kubectl apply -f https://github.com/weaveworks/kured/releases/download/$latest/kured-$latest-dockerhub.yaml"
|
"kubectl apply -f -<<EOF\n${data.template_file.kured.rendered}\nEOF",
|
||||||
]
|
]
|
||||||
|
|
||||||
connection {
|
connection {
|
||||||
|
@ -58,7 +58,7 @@ variable "initial_commands" {
|
|||||||
description = "Initial commands to run on each machines."
|
description = "Initial commands to run on each machines."
|
||||||
default = [
|
default = [
|
||||||
"dnf upgrade -y",
|
"dnf upgrade -y",
|
||||||
"dnf install -y container-selinux selinux-policy-base fail2ban k3s-selinux dnf-automatic jq",
|
"dnf install -y container-selinux selinux-policy-base fail2ban k3s-selinux dnf-automatic jq dnf-utils",
|
||||||
"systemctl enable --now fail2ban",
|
"systemctl enable --now fail2ban",
|
||||||
"systemctl enable --now dnf-automatic.timer",
|
"systemctl enable --now dnf-automatic.timer",
|
||||||
"systemctl disable firewalld",
|
"systemctl disable firewalld",
|
||||||
@ -66,4 +66,3 @@ variable "initial_commands" {
|
|||||||
"sleep 11; shutdown -r +0"
|
"sleep 11; shutdown -r +0"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user