ready to merge calico addition

This commit is contained in:
Karim Naufal 2022-04-13 15:59:03 +02:00
parent 31f4effd84
commit c821e36348
No known key found for this signature in database
GPG Key ID: 9CB4A7C28C139CA5
7 changed files with 13 additions and 13 deletions

View File

@ -37,7 +37,7 @@ _Please note that we are not affiliated to Hetzner, this is just an open source
- Proper use of the Hetzner private network to minimize latency and remove the need for encryption.
- Automatic HA with the default setting of three control-plane nodes and two agent nodes.
- Super-HA: Nodepools for both control-plane and agent nodes can be in different locations.
- Possibility to have a single node cluster with a proper ingress controller (Traefik).
- Possibility to have a single node cluster with a proper ingress controller.
- Ability to add nodes and nodepools when the cluster running.
- Traefik ingress controller attached to a Hetzner load balancer with proxy protocol turned on.
- Tons of flexible configuration options to suits all needs.

View File

@ -1,7 +1,7 @@
module "agents" {
source = "./modules/host"
for_each = local.agent_nodepools
for_each = local.agent_nodes
name = "${var.use_cluster_name_in_node_name ? "${var.cluster_name}-" : ""}${each.value.nodepool_name}"
ssh_keys = [hcloud_ssh_key.k3s.id]
@ -27,7 +27,7 @@ module "agents" {
}
resource "null_resource" "agents" {
for_each = local.agent_nodepools
for_each = local.agent_nodes
triggers = {
agent_id = module.agents[each.key].id

View File

@ -1,7 +1,7 @@
module "control_planes" {
source = "./modules/host"
for_each = local.control_plane_nodepools
for_each = local.control_plane_nodes
name = "${var.use_cluster_name_in_node_name ? "${var.cluster_name}-" : ""}${each.value.nodepool_name}"
ssh_keys = [hcloud_ssh_key.k3s.id]
@ -29,7 +29,7 @@ module "control_planes" {
}
resource "null_resource" "control_planes" {
for_each = local.control_plane_nodepools
for_each = local.control_plane_nodes
triggers = {
control_plane_id = module.control_planes[each.key].id

View File

@ -19,8 +19,8 @@ resource "null_resource" "first_control_plane" {
kube-controller-manager-arg = "flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins"
node-ip = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
advertise-address = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
node-taint = local.control_plane_nodepools[keys(module.control_planes)[0]].taints
node-label = local.control_plane_nodepools[keys(module.control_planes)[0]].labels
node-taint = local.control_plane_nodes[keys(module.control_planes)[0]].taints
node-label = local.control_plane_nodes[keys(module.control_planes)[0]].labels
disable-network-policy = var.cni_plugin == "calico" ? true : var.disable_network_policy
},
var.cni_plugin == "calico" ? {

View File

@ -172,7 +172,7 @@ locals {
install_k3s_server = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=server sh -"], local.apply_k3s_selinux)
install_k3s_agent = concat(local.common_commands_install_k3s, ["curl -sfL https://get.k3s.io | INSTALL_K3S_SKIP_START=true INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_CHANNEL=${var.initial_k3s_channel} INSTALL_K3S_EXEC=agent sh -"], local.apply_k3s_selinux)
control_plane_nodepools = merge([
control_plane_nodes = merge([
for pool_index, nodepool_obj in var.control_plane_nodepools : {
for node_index in range(nodepool_obj.count) :
format("%s-%s-%s", pool_index, node_index, nodepool_obj.name) => {
@ -186,7 +186,7 @@ locals {
}
]...)
agent_nodepools = merge([
agent_nodes = merge([
for pool_index, nodepool_obj in var.agent_nodepools : {
for node_index in range(nodepool_obj.count) :
format("%s-%s-%s", pool_index, node_index, nodepool_obj.name) => {

View File

@ -16,7 +16,7 @@ resource "hcloud_network" "k3s" {
# We start from the end of the subnets cird array,
# as we would have fewer control plane nodepools, than angent ones.
resource "hcloud_network_subnet" "control_plane" {
count = length(local.control_plane_nodepools)
count = length(var.control_plane_nodepools)
network_id = hcloud_network.k3s.id
type = "cloud"
network_zone = var.network_region
@ -25,7 +25,7 @@ resource "hcloud_network_subnet" "control_plane" {
# Here we start at the beginning of the subnets cird array
resource "hcloud_network_subnet" "agent" {
count = length(local.agent_nodepools)
count = length(var.agent_nodepools)
network_id = hcloud_network.k3s.id
type = "cloud"
network_zone = var.network_region

View File

@ -27,7 +27,7 @@ network_region = "eu-central" # change to `us-east` if location is ash
# Once the cluster is created, you can change nodepool count, and even set it to 0 (in the case of the first control-plane nodepool, the minimum is 1),
# you can also rename it (if the count is taken to 0), but do not remove a nodepool from the list after the cluster is created.
# The only nodepools that are safe to remove from the list when you edit it, are the ones at the end of the lists. This is due to how IPs are allocated.
# The only nodepools that are safe to remove from the list when you edit it, are the ones at the end of the lists. This is due to how subnets and IPs are allocated (FILO).
# You can however freely add others nodepools the end of each list if you want! The maximum number of nodepools you can create, combined for both lists is 255.
# Also, before decreasing the count of any nodepools to 0, it's important to drain and cordon it the nodes in question, otherwise it will leave your cluster in a bad state.
@ -161,7 +161,7 @@ load_balancer_location = "fsn1"
# If you want to configure a different CNI for k3s, use this flag
# possible values: flannel (Default), calico
# cni_plugin = "flannel"
# cni_plugin = "calico"
# If you want to disable the k3s default network policy controller, use this flag
# Calico overrides this value to true automatically