Merge pull request #113 from kube-hetzner/name-suffixes
add random pet names for cluster & nodes
This commit is contained in:
commit
c2f8c747c5
2
.gitignore
vendored
2
.gitignore
vendored
@ -6,4 +6,4 @@ kubeconfig.yaml-e
|
|||||||
terraform.tfvars
|
terraform.tfvars
|
||||||
plans-custom.yaml
|
plans-custom.yaml
|
||||||
traefik-custom.yaml
|
traefik-custom.yaml
|
||||||
kured-custom.yaml
|
kured-custom.yaml
|
||||||
|
@ -3,7 +3,7 @@ module "agents" {
|
|||||||
|
|
||||||
for_each = local.agent_nodepools
|
for_each = local.agent_nodepools
|
||||||
|
|
||||||
name = each.key
|
name = "${var.use_cluster_name_in_node_name ? "${random_pet.cluster.id}-" : ""}${each.value.nodepool_name}"
|
||||||
ssh_keys = [hcloud_ssh_key.k3s.id]
|
ssh_keys = [hcloud_ssh_key.k3s.id]
|
||||||
public_key = var.public_key
|
public_key = var.public_key
|
||||||
private_key = var.private_key
|
private_key = var.private_key
|
||||||
@ -23,8 +23,6 @@ module "agents" {
|
|||||||
"engine" = "k3s"
|
"engine" = "k3s"
|
||||||
}
|
}
|
||||||
|
|
||||||
hcloud_token = var.hcloud_token
|
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
hcloud_network_subnet.subnet
|
hcloud_network_subnet.subnet
|
||||||
]
|
]
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
module "control_planes" {
|
module "control_planes" {
|
||||||
source = "./modules/host"
|
source = "./modules/host"
|
||||||
|
|
||||||
count = var.control_plane_count
|
count = var.control_plane_count
|
||||||
name = "control-plane-${count.index}"
|
name = "${var.use_cluster_name_in_node_name ? "${random_pet.cluster.id}-" : ""}control-plane"
|
||||||
|
|
||||||
ssh_keys = [hcloud_ssh_key.k3s.id]
|
ssh_keys = [hcloud_ssh_key.k3s.id]
|
||||||
public_key = var.public_key
|
public_key = var.public_key
|
||||||
private_key = var.private_key
|
private_key = var.private_key
|
||||||
@ -23,8 +22,6 @@ module "control_planes" {
|
|||||||
"engine" = "k3s"
|
"engine" = "k3s"
|
||||||
}
|
}
|
||||||
|
|
||||||
hcloud_token = var.hcloud_token
|
|
||||||
|
|
||||||
depends_on = [
|
depends_on = [
|
||||||
hcloud_network_subnet.subnet
|
hcloud_network_subnet.subnet
|
||||||
]
|
]
|
||||||
|
1
init.tf
1
init.tf
@ -94,6 +94,7 @@ resource "null_resource" "kustomization" {
|
|||||||
content = local.is_single_node_cluster ? "" : templatefile(
|
content = local.is_single_node_cluster ? "" : templatefile(
|
||||||
"${path.module}/templates/traefik_config.yaml.tpl",
|
"${path.module}/templates/traefik_config.yaml.tpl",
|
||||||
{
|
{
|
||||||
|
name = "${random_pet.cluster.id}-traefik"
|
||||||
load_balancer_disable_ipv6 = var.load_balancer_disable_ipv6
|
load_balancer_disable_ipv6 = var.load_balancer_disable_ipv6
|
||||||
load_balancer_type = var.load_balancer_type
|
load_balancer_type = var.load_balancer_type
|
||||||
location = var.location
|
location = var.location
|
||||||
|
@ -173,6 +173,7 @@ locals {
|
|||||||
for nodepool_name, nodepool_obj in var.agent_nodepools : {
|
for nodepool_name, nodepool_obj in var.agent_nodepools : {
|
||||||
for index in range(nodepool_obj.count) :
|
for index in range(nodepool_obj.count) :
|
||||||
format("%s-%s", nodepool_name, index) => {
|
format("%s-%s", nodepool_name, index) => {
|
||||||
|
nodepool_name : nodepool_name,
|
||||||
server_type : nodepool_obj.server_type,
|
server_type : nodepool_obj.server_type,
|
||||||
subnet : nodepool_obj.subnet,
|
subnet : nodepool_obj.subnet,
|
||||||
index : index
|
index : index
|
||||||
|
15
main.tf
15
main.tf
@ -1,15 +1,20 @@
|
|||||||
|
resource "random_pet" "cluster" {
|
||||||
|
length = 1
|
||||||
|
prefix = var.cluster_prefix
|
||||||
|
}
|
||||||
|
|
||||||
resource "random_password" "k3s_token" {
|
resource "random_password" "k3s_token" {
|
||||||
length = 48
|
length = 48
|
||||||
special = false
|
special = false
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "hcloud_ssh_key" "k3s" {
|
resource "hcloud_ssh_key" "k3s" {
|
||||||
name = "k3s"
|
name = random_pet.cluster.id
|
||||||
public_key = local.ssh_public_key
|
public_key = local.ssh_public_key
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "hcloud_network" "k3s" {
|
resource "hcloud_network" "k3s" {
|
||||||
name = "k3s"
|
name = random_pet.cluster.id
|
||||||
ip_range = var.network_ipv4_range
|
ip_range = var.network_ipv4_range
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,7 +37,7 @@ resource "hcloud_network_subnet" "subnet" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "hcloud_firewall" "k3s" {
|
resource "hcloud_firewall" "k3s" {
|
||||||
name = "k3s"
|
name = random_pet.cluster.id
|
||||||
|
|
||||||
dynamic "rule" {
|
dynamic "rule" {
|
||||||
for_each = concat(local.base_firewall_rules, var.extra_firewall_rules)
|
for_each = concat(local.base_firewall_rules, var.extra_firewall_rules)
|
||||||
@ -47,7 +52,7 @@ resource "hcloud_firewall" "k3s" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "hcloud_placement_group" "k3s" {
|
resource "hcloud_placement_group" "k3s" {
|
||||||
name = "k3s"
|
name = random_pet.cluster.id
|
||||||
type = "spread"
|
type = "spread"
|
||||||
labels = {
|
labels = {
|
||||||
"provisioner" = "terraform",
|
"provisioner" = "terraform",
|
||||||
@ -57,7 +62,7 @@ resource "hcloud_placement_group" "k3s" {
|
|||||||
|
|
||||||
data "hcloud_load_balancer" "traefik" {
|
data "hcloud_load_balancer" "traefik" {
|
||||||
count = local.is_single_node_cluster ? 0 : 1
|
count = local.is_single_node_cluster ? 0 : 1
|
||||||
name = "traefik"
|
name = "${random_pet.cluster.id}-traefik"
|
||||||
|
|
||||||
depends_on = [null_resource.kustomization]
|
depends_on = [null_resource.kustomization]
|
||||||
}
|
}
|
||||||
|
@ -10,4 +10,7 @@ locals {
|
|||||||
ssh_identity_file = var.private_key == null ? var.public_key : var.private_key
|
ssh_identity_file = var.private_key == null ? var.public_key : var.private_key
|
||||||
# shared flags for ssh to ignore host keys, to use our ssh identity file for all connections during provisioning.
|
# shared flags for ssh to ignore host keys, to use our ssh identity file for all connections during provisioning.
|
||||||
ssh_args = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${local.ssh_identity_file}"
|
ssh_args = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${local.ssh_identity_file}"
|
||||||
|
|
||||||
|
# the hosts name with its unique suffix attached
|
||||||
|
name = "${var.name}-${random_pet.server.id}"
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,25 @@
|
|||||||
|
resource "random_pet" "server" {
|
||||||
|
length = 1
|
||||||
|
keepers = {
|
||||||
|
# We re-create the id (and server) whenever one of those attributes
|
||||||
|
# changes. This should include all input variables to this module,
|
||||||
|
# but NO SENSITIVE values as they might be logged here.
|
||||||
|
name = var.name
|
||||||
|
public_key = var.public_key
|
||||||
|
additional_public_keys = join(",", var.additional_public_keys)
|
||||||
|
ssh_keys = join(",", var.ssh_keys)
|
||||||
|
firewall_ids = join(",", var.firewall_ids)
|
||||||
|
placement_group_id = var.placement_group_id
|
||||||
|
labels = join(",", [for k, v in var.labels: "${k}=${v}" ])
|
||||||
|
location = var.location
|
||||||
|
ipv4_subnet_id = var.ipv4_subnet_id
|
||||||
|
private_ipv4 = var.private_ipv4
|
||||||
|
server_type = var.server_type
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
resource "hcloud_server" "server" {
|
resource "hcloud_server" "server" {
|
||||||
name = var.name
|
name = local.name
|
||||||
|
|
||||||
image = "ubuntu-20.04"
|
image = "ubuntu-20.04"
|
||||||
rescue = "linux64"
|
rescue = "linux64"
|
||||||
@ -90,7 +110,7 @@ data "template_cloudinit_config" "config" {
|
|||||||
content = templatefile(
|
content = templatefile(
|
||||||
"${path.module}/templates/userdata.yaml.tpl",
|
"${path.module}/templates/userdata.yaml.tpl",
|
||||||
{
|
{
|
||||||
hostname = var.name
|
hostname = local.name
|
||||||
sshAuthorizedKeys = concat([local.ssh_public_key], var.additional_public_keys)
|
sshAuthorizedKeys = concat([local.ssh_public_key], var.additional_public_keys)
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -1,9 +1,3 @@
|
|||||||
variable "hcloud_token" {
|
|
||||||
description = "Hetzner Cloud API Token"
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "name" {
|
variable "name" {
|
||||||
description = "Host name"
|
description = "Host name"
|
||||||
type = string
|
type = string
|
||||||
|
@ -1,3 +1,8 @@
|
|||||||
|
output "cluster_name" {
|
||||||
|
value = random_pet.cluster.id
|
||||||
|
description = "Shared suffix for all resources belonging to this cluster."
|
||||||
|
}
|
||||||
|
|
||||||
output "control_planes_public_ipv4" {
|
output "control_planes_public_ipv4" {
|
||||||
value = module.control_planes.*.ipv4_address
|
value = module.control_planes.*.ipv4_address
|
||||||
description = "The public IPv4 addresses of the controlplane server."
|
description = "The public IPv4 addresses of the controlplane server."
|
||||||
|
@ -9,7 +9,7 @@ spec:
|
|||||||
enabled: true
|
enabled: true
|
||||||
type: LoadBalancer
|
type: LoadBalancer
|
||||||
annotations:
|
annotations:
|
||||||
"load-balancer.hetzner.cloud/name": "traefik"
|
"load-balancer.hetzner.cloud/name": ${name}
|
||||||
# make hetzners load-balancer connect to our nodes via our private k3s
|
# make hetzners load-balancer connect to our nodes via our private k3s
|
||||||
"load-balancer.hetzner.cloud/use-private-ip": "true"
|
"load-balancer.hetzner.cloud/use-private-ip": "true"
|
||||||
# keep hetzner-ccm from exposing our private ingress ip, which in general isn't routeable from the public internet
|
# keep hetzner-ccm from exposing our private ingress ip, which in general isn't routeable from the public internet
|
||||||
@ -31,4 +31,4 @@ spec:
|
|||||||
- "--certificatesresolvers.le.acme.tlschallenge=true"
|
- "--certificatesresolvers.le.acme.tlschallenge=true"
|
||||||
- "--certificatesresolvers.le.acme.email=${traefik_acme_email}"
|
- "--certificatesresolvers.le.acme.email=${traefik_acme_email}"
|
||||||
- "--certificatesresolvers.le.acme.storage=/data/acme.json"
|
- "--certificatesresolvers.le.acme.storage=/data/acme.json"
|
||||||
%{ endif ~}
|
%{ endif ~}
|
||||||
|
@ -77,6 +77,12 @@ load_balancer_type = "lb11"
|
|||||||
# Allows you to specify either stable, latest, or testing (defaults to stable), see https://rancher.com/docs/k3s/latest/en/upgrades/basic/
|
# Allows you to specify either stable, latest, or testing (defaults to stable), see https://rancher.com/docs/k3s/latest/en/upgrades/basic/
|
||||||
# initial_k3s_channel = "latest"
|
# initial_k3s_channel = "latest"
|
||||||
|
|
||||||
|
# Whether to use the cluster name in the node name, i.e. add the prefix k3s-(cluster_name)- to the nodes? The default is "true".
|
||||||
|
# use_cluster_name_in_node_name = false
|
||||||
|
|
||||||
|
# Prefix for the cluster name, by default "k3s"
|
||||||
|
# cluster_prefix = ""
|
||||||
|
|
||||||
# Adding extra firewall rules, like opening a port
|
# Adding extra firewall rules, like opening a port
|
||||||
# In this example with allow port TCP 5432 for a Postgres service we will open via a nodeport
|
# In this example with allow port TCP 5432 for a Postgres service we will open via a nodeport
|
||||||
# More info on the format here https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs/resources/firewall
|
# More info on the format here https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs/resources/firewall
|
||||||
|
13
variables.tf
13
variables.tf
@ -121,7 +121,20 @@ variable "extra_firewall_rules" {
|
|||||||
description = "Additional firewall rules to apply to the cluster"
|
description = "Additional firewall rules to apply to the cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "use_cluster_name_in_node_name" {
|
||||||
|
type = bool
|
||||||
|
default = true
|
||||||
|
description = "Whether to use the cluster name in the node name"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "cluster_prefix" {
|
||||||
|
type = string
|
||||||
|
default = "k3s"
|
||||||
|
description = "Prefix for the cluster name"
|
||||||
|
}
|
||||||
|
|
||||||
variable "traefik_additional_options" {
|
variable "traefik_additional_options" {
|
||||||
type = list(string)
|
type = list(string)
|
||||||
default = []
|
default = []
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user