ready to merge calico addition
This commit is contained in:
parent
31f4effd84
commit
c821e36348
@ -37,7 +37,7 @@ _Please note that we are not affiliated to Hetzner, this is just an open source
|
|||||||
- Proper use of the Hetzner private network to minimize latency and remove the need for encryption.
|
- Proper use of the Hetzner private network to minimize latency and remove the need for encryption.
|
||||||
- Automatic HA with the default setting of three control-plane nodes and two agent nodes.
|
- Automatic HA with the default setting of three control-plane nodes and two agent nodes.
|
||||||
- Super-HA: Nodepools for both control-plane and agent nodes can be in different locations.
|
- Super-HA: Nodepools for both control-plane and agent nodes can be in different locations.
|
||||||
- Possibility to have a single node cluster with a proper ingress controller (Traefik).
|
- Possibility to have a single node cluster with a proper ingress controller.
|
||||||
- Ability to add nodes and nodepools when the cluster running.
|
- Ability to add nodes and nodepools when the cluster running.
|
||||||
- Traefik ingress controller attached to a Hetzner load balancer with proxy protocol turned on.
|
- Traefik ingress controller attached to a Hetzner load balancer with proxy protocol turned on.
|
||||||
- Tons of flexible configuration options to suits all needs.
|
- Tons of flexible configuration options to suits all needs.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
module "agents" {
|
module "agents" {
|
||||||
source = "./modules/host"
|
source = "./modules/host"
|
||||||
|
|
||||||
for_each = local.agent_nodepools
|
for_each = local.agent_nodes
|
||||||
|
|
||||||
name = "${var.use_cluster_name_in_node_name ? "${var.cluster_name}-" : ""}${each.value.nodepool_name}"
|
name = "${var.use_cluster_name_in_node_name ? "${var.cluster_name}-" : ""}${each.value.nodepool_name}"
|
||||||
ssh_keys = [hcloud_ssh_key.k3s.id]
|
ssh_keys = [hcloud_ssh_key.k3s.id]
|
||||||
@ -27,7 +27,7 @@ module "agents" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "null_resource" "agents" {
|
resource "null_resource" "agents" {
|
||||||
for_each = local.agent_nodepools
|
for_each = local.agent_nodes
|
||||||
|
|
||||||
triggers = {
|
triggers = {
|
||||||
agent_id = module.agents[each.key].id
|
agent_id = module.agents[each.key].id
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
module "control_planes" {
|
module "control_planes" {
|
||||||
source = "./modules/host"
|
source = "./modules/host"
|
||||||
|
|
||||||
for_each = local.control_plane_nodepools
|
for_each = local.control_plane_nodes
|
||||||
|
|
||||||
name = "${var.use_cluster_name_in_node_name ? "${var.cluster_name}-" : ""}${each.value.nodepool_name}"
|
name = "${var.use_cluster_name_in_node_name ? "${var.cluster_name}-" : ""}${each.value.nodepool_name}"
|
||||||
ssh_keys = [hcloud_ssh_key.k3s.id]
|
ssh_keys = [hcloud_ssh_key.k3s.id]
|
||||||
@ -29,7 +29,7 @@ module "control_planes" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "null_resource" "control_planes" {
|
resource "null_resource" "control_planes" {
|
||||||
for_each = local.control_plane_nodepools
|
for_each = local.control_plane_nodes
|
||||||
|
|
||||||
triggers = {
|
triggers = {
|
||||||
control_plane_id = module.control_planes[each.key].id
|
control_plane_id = module.control_planes[each.key].id
|
||||||
|
4
init.tf
4
init.tf
@ -19,8 +19,8 @@ resource "null_resource" "first_control_plane" {
|
|||||||
kube-controller-manager-arg = "flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
kube-controller-manager-arg = "flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins"
|
||||||
node-ip = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
|
node-ip = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
|
||||||
advertise-address = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
|
advertise-address = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
|
||||||
node-taint = local.control_plane_nodepools[keys(module.control_planes)[0]].taints
|
node-taint = local.control_plane_nodes[keys(module.control_planes)[0]].taints
|
||||||
node-label = local.control_plane_nodepools[keys(module.control_planes)[0]].labels
|
node-label = local.control_plane_nodes[keys(module.control_planes)[0]].labels
|
||||||
disable-network-policy = var.cni_plugin == "calico" ? true : var.disable_network_policy
|
disable-network-policy = var.cni_plugin == "calico" ? true : var.disable_network_policy
|
||||||
},
|
},
|
||||||
var.cni_plugin == "calico" ? {
|
var.cni_plugin == "calico" ? {
|
||||||
|
@ -172,7 +172,7 @@ locals {
|
|||||||
install_k3s_server = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=server sh -"], local.apply_k3s_selinux)
|
install_k3s_server = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=server sh -"], local.apply_k3s_selinux)
|
||||||
install_k3s_agent = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=agent sh -"], local.apply_k3s_selinux)
|
install_k3s_agent = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=agent sh -"], local.apply_k3s_selinux)
|
||||||
|
|
||||||
control_plane_nodepools = merge([
|
control_plane_nodes = merge([
|
||||||
for pool_index, nodepool_obj in var.control_plane_nodepools : {
|
for pool_index, nodepool_obj in var.control_plane_nodepools : {
|
||||||
for node_index in range(nodepool_obj.count) :
|
for node_index in range(nodepool_obj.count) :
|
||||||
format("%s-%s-%s", pool_index, node_index, nodepool_obj.name) => {
|
format("%s-%s-%s", pool_index, node_index, nodepool_obj.name) => {
|
||||||
@ -186,7 +186,7 @@ locals {
|
|||||||
}
|
}
|
||||||
]...)
|
]...)
|
||||||
|
|
||||||
agent_nodepools = merge([
|
agent_nodes = merge([
|
||||||
for pool_index, nodepool_obj in var.agent_nodepools : {
|
for pool_index, nodepool_obj in var.agent_nodepools : {
|
||||||
for node_index in range(nodepool_obj.count) :
|
for node_index in range(nodepool_obj.count) :
|
||||||
format("%s-%s-%s", pool_index, node_index, nodepool_obj.name) => {
|
format("%s-%s-%s", pool_index, node_index, nodepool_obj.name) => {
|
||||||
|
4
main.tf
4
main.tf
@ -16,7 +16,7 @@ resource "hcloud_network" "k3s" {
|
|||||||
# We start from the end of the subnets cird array,
|
# We start from the end of the subnets cird array,
|
||||||
# as we would have fewer control plane nodepools, than angent ones.
|
# as we would have fewer control plane nodepools, than angent ones.
|
||||||
resource "hcloud_network_subnet" "control_plane" {
|
resource "hcloud_network_subnet" "control_plane" {
|
||||||
count = length(local.control_plane_nodepools)
|
count = length(var.control_plane_nodepools)
|
||||||
network_id = hcloud_network.k3s.id
|
network_id = hcloud_network.k3s.id
|
||||||
type = "cloud"
|
type = "cloud"
|
||||||
network_zone = var.network_region
|
network_zone = var.network_region
|
||||||
@ -25,7 +25,7 @@ resource "hcloud_network_subnet" "control_plane" {
|
|||||||
|
|
||||||
# Here we start at the beginning of the subnets cird array
|
# Here we start at the beginning of the subnets cird array
|
||||||
resource "hcloud_network_subnet" "agent" {
|
resource "hcloud_network_subnet" "agent" {
|
||||||
count = length(local.agent_nodepools)
|
count = length(var.agent_nodepools)
|
||||||
network_id = hcloud_network.k3s.id
|
network_id = hcloud_network.k3s.id
|
||||||
type = "cloud"
|
type = "cloud"
|
||||||
network_zone = var.network_region
|
network_zone = var.network_region
|
||||||
|
@ -27,7 +27,7 @@ network_region = "eu-central" # change to `us-east` if location is ash
|
|||||||
# Once the cluster is created, you can change nodepool count, and even set it to 0 (in the case of the first control-plane nodepool, the minimum is 1),
|
# Once the cluster is created, you can change nodepool count, and even set it to 0 (in the case of the first control-plane nodepool, the minimum is 1),
|
||||||
# you can also rename it (if the count is taken to 0), but do not remove a nodepool from the list after the cluster is created.
|
# you can also rename it (if the count is taken to 0), but do not remove a nodepool from the list after the cluster is created.
|
||||||
|
|
||||||
# The only nodepools that are safe to remove from the list when you edit it, are the ones at the end of the lists. This is due to how IPs are allocated.
|
# The only nodepools that are safe to remove from the list when you edit it, are the ones at the end of the lists. This is due to how subnets and IPs are allocated (FILO).
|
||||||
# You can however freely add others nodepools the end of each list if you want! The maximum number of nodepools you can create, combined for both lists is 255.
|
# You can however freely add others nodepools the end of each list if you want! The maximum number of nodepools you can create, combined for both lists is 255.
|
||||||
# Also, before decreasing the count of any nodepools to 0, it's important to drain and cordon it the nodes in question, otherwise it will leave your cluster in a bad state.
|
# Also, before decreasing the count of any nodepools to 0, it's important to drain and cordon it the nodes in question, otherwise it will leave your cluster in a bad state.
|
||||||
|
|
||||||
@ -161,7 +161,7 @@ load_balancer_location = "fsn1"
|
|||||||
|
|
||||||
# If you want to configure a different CNI for k3s, use this flag
|
# If you want to configure a different CNI for k3s, use this flag
|
||||||
# possible values: flannel (Default), calico
|
# possible values: flannel (Default), calico
|
||||||
# cni_plugin = "flannel"
|
# cni_plugin = "calico"
|
||||||
|
|
||||||
# If you want to disable the k3s default network policy controller, use this flag
|
# If you want to disable the k3s default network policy controller, use this flag
|
||||||
# Calico overrides this value to true automatically
|
# Calico overrides this value to true automatically
|
||||||
|
Loading…
Reference in New Issue
Block a user