fixed single node cluster
This commit is contained in:
parent
d3bb8b2605
commit
f1a98c7290
@ -170,14 +170,9 @@ spec:
|
||||
|
||||
<summary>Single-node cluster</summary>
|
||||
|
||||
Running a development cluster on a single node, without any high-availability is possible as well.
|
||||
In this case, we don't deploy an external load-balancer, but use [k3s service load balancer](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) on the host itself and open up port 80 & 443 in the firewall.
|
||||
Running a development cluster on a single node, without any high-availability is possible as well. You need one control plane nodepool with a count of 1, and one agent nodepool with a count of 0.
|
||||
|
||||
``` terraform
|
||||
control_plane_count = 1
|
||||
allow_scheduling_on_control_plane = true
|
||||
agent_nodepools = []
|
||||
```
|
||||
In this case, we don't deploy an external load-balancer, but use the default [k3s service load balancer](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) on the host itself and open up port 80 & 443 in the firewall (done automatically).
|
||||
|
||||
</details>
|
||||
|
||||
|
@ -46,7 +46,7 @@ resource "null_resource" "control_planes" {
|
||||
provisioner "file" {
|
||||
content = yamlencode({
|
||||
node-name = module.control_planes[each.key].name
|
||||
server = "https://${module.control_planes[each.key].private_ipv4_address == module.control_planes[keys(module.control_planes)[0]].private_ipv4_address && length(module.control_planes) > 1 ? module.control_planes[keys(module.control_planes)[1]].private_ipv4_address : module.control_planes[keys(module.control_planes)[0]].private_ipv4_address}:6443"
|
||||
server = length(module.control_planes) == 1 ? null : "https://${module.control_planes[each.key].private_ipv4_address == module.control_planes[keys(module.control_planes)[0]].private_ipv4_address ? module.control_planes[keys(module.control_planes)[1]].private_ipv4_address : module.control_planes[keys(module.control_planes)[0]].private_ipv4_address}:6443"
|
||||
token = random_password.k3s_token.result
|
||||
disable-cloud-controller = true
|
||||
disable = local.disable_extras
|
||||
@ -54,7 +54,6 @@ resource "null_resource" "control_planes" {
|
||||
kubelet-arg = "cloud-provider=external"
|
||||
node-ip = module.control_planes[each.key].private_ipv4_address
|
||||
advertise-address = module.control_planes[each.key].private_ipv4_address
|
||||
tls-san = module.control_planes[each.key].ipv4_address
|
||||
node-label = each.value.labels
|
||||
node-taint = each.value.taints
|
||||
})
|
||||
|
1
init.tf
1
init.tf
@ -18,7 +18,6 @@ resource "null_resource" "first_control_plane" {
|
||||
kubelet-arg = "cloud-provider=external"
|
||||
node-ip = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
|
||||
advertise-address = module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
|
||||
tls-san = module.control_planes[keys(module.control_planes)[0]].ipv4_address
|
||||
node-taint = local.control_plane_nodepools[keys(module.control_planes)[0]].taints
|
||||
node-label = local.control_plane_nodepools[keys(module.control_planes)[0]].labels
|
||||
})
|
||||
|
@ -211,6 +211,8 @@ locals {
|
||||
default_agent_labels = concat([], var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : [])
|
||||
default_control_plane_labels = concat([], var.automatically_upgrade_k3s ? ["k3s_upgrade=true"] : [])
|
||||
|
||||
allow_scheduling_on_control_plane = local.is_single_node_cluster ? true : var.allow_scheduling_on_control_plane
|
||||
|
||||
# Default k3s node taints
|
||||
default_control_plane_taints = concat([], var.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"])
|
||||
default_control_plane_taints = concat([], local.allow_scheduling_on_control_plane ? [] : ["node-role.kubernetes.io/master:NoSchedule"])
|
||||
}
|
||||
|
@ -19,7 +19,9 @@ output "agents_public_ipv4" {
|
||||
|
||||
output "load_balancer_public_ipv4" {
|
||||
description = "The public IPv4 address of the Hetzner load balancer"
|
||||
value = local.is_single_node_cluster ? module.control_planes[0].ipv4_address : var.traefik_enabled == false ? null : data.hcloud_load_balancer.traefik[0].ipv4
|
||||
value = local.is_single_node_cluster ? [
|
||||
for obj in module.control_planes : obj.ipv4_address
|
||||
][0] : var.traefik_enabled == false ? null : data.hcloud_load_balancer.traefik[0].ipv4
|
||||
}
|
||||
|
||||
output "kubeconfig_file" {
|
||||
|
@ -28,8 +28,9 @@ network_region = "eu-central" # change to `us-east` if location is ash
|
||||
# You can freely add others nodepools the end of the list if you want, and increase the count of any.
|
||||
# Also, before decreasing the count of any nodepools to 0, it's important to drain and cordon it the nodes in question, otherwise it will leave your cluster in a bad state.
|
||||
|
||||
# Before initializing the cluster, you can change all parameters and add or remove any nodepools.
|
||||
# If you want to have a single node cluster, just have 1 control plane nodepools with a count of 1.
|
||||
# Before initializing the cluster, you can change all parameters and add or remove any nodepools.
|
||||
|
||||
# If you want to have a single node cluster, just have 1 control plane nodepools with a count of 1, and one agent nodepool with a count of 0.
|
||||
|
||||
# Example below:
|
||||
|
||||
@ -59,7 +60,7 @@ agent_nodepools = [
|
||||
location = "fsn1",
|
||||
labels = [],
|
||||
taints = [],
|
||||
count = 2
|
||||
count = 1
|
||||
},
|
||||
{
|
||||
name = "agent-large",
|
||||
@ -106,7 +107,7 @@ load_balancer_location = "fsn1"
|
||||
# metrics_server_enabled = false
|
||||
|
||||
# If you want to allow non-control-plane workloads to run on the control-plane nodes set "true" below. The default is "false".
|
||||
# Also good for single node clusters.
|
||||
# True by default for single node clusters.
|
||||
# allow_scheduling_on_control_plane = true
|
||||
|
||||
# If you want to disable automatic upgrade of k3s, you can set this to false, default is "true".
|
||||
|
Loading…
Reference in New Issue
Block a user