Files
workshop/aula-08/main.tf
ArgoCD Setup e8c793058c refactor: remover todas as dependências do GitLab no workshop
- Aula 08: nginx-ingress TCP passthrough gitlab→gitea, comments
- Aula 09: add-client.sh API GitLab→Gitea
- Aula 11: node-bugado deployment image registry.kube.quest→gitea.kube.quest
- Aula 12: setup.sh/cleanup.sh API GitLab→Gitea, ArgoCD repoURL
- Aula 13: k8s manifests, benchmarks: registry.kube.quest→gitea.kube.quest,
           gitlab-registry→gitea-registry, GITLAB_TOKEN→GITEA_TOKEN
- Aula 14: comments GitLab→Gitea
- README raiz: arquitetura, tabela, DNS
2026-03-14 02:02:01 -03:00

552 lines
16 KiB
HCL

############################################################
# Hetzner Talos Kubernetes Cluster - Base Infrastructure
# Using custom Talos image created from ISO
############################################################
############################################################
# PROVIDERS CONFIGURATION
############################################################
provider "hcloud" {
token = var.hcloud_token
}
############################################################
# DATA SOURCES
############################################################
# Use the custom Talos image created in aula-07
data "hcloud_image" "talos" {
id = var.talos_image_id
}
############################################################
# RANDOM RESOURCES
############################################################
resource "random_string" "cluster_id" {
length = 6
special = false
lower = true
upper = false
}
locals {
cluster_name = "talos-${random_string.cluster_id.result}"
control_plane_count = var.enable_ha ? 3 : 1
# Endpoint: LoadBalancer IP if enabled, otherwise Floating IP
cluster_endpoint_ip = var.enable_loadbalancer ? hcloud_load_balancer.cluster[0].ipv4 : hcloud_floating_ip.control_plane[0].ip_address
common_labels = {
cluster = local.cluster_name
environment = var.environment
managed_by = "terraform"
}
}
############################################################
# SSH KEY (for emergency access only)
############################################################
data "hcloud_ssh_keys" "all" {}
locals {
ssh_key_normalized = trimspace(split(" ", var.ssh_public_key)[0] == "ssh-rsa" ?
join(" ", slice(split(" ", var.ssh_public_key), 0, 2)) :
var.ssh_public_key)
ssh_key_matches = [
for key in data.hcloud_ssh_keys.all.ssh_keys : key.id
if key.public_key == local.ssh_key_normalized || key.public_key == var.ssh_public_key
]
ssh_key_id = length(local.ssh_key_matches) > 0 ? local.ssh_key_matches[0] : hcloud_ssh_key.admin[0].id
}
resource "hcloud_ssh_key" "admin" {
count = length(local.ssh_key_matches) == 0 ? 1 : 0
name = "${local.cluster_name}-admin"
public_key = var.ssh_public_key
labels = local.common_labels
}
############################################################
# NETWORK CONFIGURATION
############################################################
resource "hcloud_network" "cluster" {
name = "${local.cluster_name}-network"
ip_range = "10.0.0.0/16"
labels = local.common_labels
}
resource "hcloud_network_subnet" "cluster" {
type = "cloud"
network_id = hcloud_network.cluster.id
network_zone = "eu-central"
ip_range = "10.0.1.0/24"
}
############################################################
# FIREWALL CONFIGURATION
############################################################
resource "hcloud_firewall" "cluster" {
name = "${local.cluster_name}-firewall"
labels = local.common_labels
# Talos API access
rule {
direction = "in"
protocol = "tcp"
port = "50000"
source_ips = ["0.0.0.0/0", "::/0"]
}
# Kubernetes API
rule {
direction = "in"
protocol = "tcp"
port = "6443"
source_ips = ["0.0.0.0/0", "::/0"]
}
# Allow HTTP/HTTPS for Ingress
rule {
direction = "in"
protocol = "tcp"
port = "80"
source_ips = ["0.0.0.0/0", "::/0"]
}
rule {
direction = "in"
protocol = "tcp"
port = "443"
source_ips = ["0.0.0.0/0", "::/0"]
}
# Allow NodePort range (for services)
rule {
direction = "in"
protocol = "tcp"
port = "30000-32767"
source_ips = ["0.0.0.0/0", "::/0"]
}
# Allow VXLAN for Flannel CNI (private network only - secure)
rule {
direction = "in"
protocol = "udp"
port = "4789"
source_ips = ["10.0.0.0/8"]
}
# Allow all TCP traffic between cluster nodes (private network)
rule {
direction = "in"
protocol = "tcp"
port = "any"
source_ips = ["10.0.0.0/8"]
}
# Allow all UDP traffic between cluster nodes (private network)
rule {
direction = "in"
protocol = "udp"
port = "any"
source_ips = ["10.0.0.0/8"]
}
# Allow all outbound traffic
rule {
direction = "out"
protocol = "tcp"
port = "any"
destination_ips = ["0.0.0.0/0", "::/0"]
}
rule {
direction = "out"
protocol = "udp"
port = "any"
destination_ips = ["0.0.0.0/0", "::/0"]
}
rule {
direction = "out"
protocol = "icmp"
destination_ips = ["0.0.0.0/0", "::/0"]
}
}
############################################################
# PLACEMENT GROUP (keep nodes close for low latency)
############################################################
resource "hcloud_placement_group" "cluster" {
name = "${local.cluster_name}-pg"
type = "spread"
labels = local.common_labels
}
############################################################
# CONTROL PLANE NODES (HA with 3 CAX11 nodes)
############################################################
resource "hcloud_server" "control_plane" {
count = local.control_plane_count
name = "${local.cluster_name}-cp-${count.index}"
server_type = "cax11"
image = data.hcloud_image.talos.id
location = "nbg1" # CAX11 only available in Nuremberg
ssh_keys = [local.ssh_key_id]
firewall_ids = [hcloud_firewall.cluster.id]
placement_group_id = hcloud_placement_group.cluster.id
labels = merge(local.common_labels, {
role = "control-plane"
node = "cp-${count.index}"
arch = "arm64"
})
public_net {
ipv4_enabled = true
ipv6_enabled = true
}
lifecycle {
ignore_changes = [ssh_keys]
}
}
resource "hcloud_server_network" "control_plane" {
count = local.control_plane_count
server_id = hcloud_server.control_plane[count.index].id
network_id = hcloud_network.cluster.id
ip = "10.0.1.${10 + count.index}"
}
# Floating IP for stable control plane access (only if LoadBalancer is disabled)
resource "hcloud_floating_ip" "control_plane" {
count = var.enable_loadbalancer ? 0 : 1
type = "ipv4"
name = "${local.cluster_name}-cp-ip"
home_location = "nbg1"
labels = local.common_labels
}
resource "hcloud_floating_ip_assignment" "control_plane" {
count = var.enable_loadbalancer ? 0 : 1
floating_ip_id = hcloud_floating_ip.control_plane[0].id
server_id = hcloud_server.control_plane[0].id
}
############################################################
# LOAD BALANCER (for HA access to control plane and ingress)
############################################################
resource "hcloud_load_balancer" "cluster" {
count = var.enable_loadbalancer ? 1 : 0
name = "${local.cluster_name}-lb"
load_balancer_type = "lb11"
location = "nbg1"
labels = local.common_labels
}
resource "hcloud_load_balancer_network" "cluster" {
count = var.enable_loadbalancer ? 1 : 0
load_balancer_id = hcloud_load_balancer.cluster[0].id
network_id = hcloud_network.cluster.id
ip = "10.0.1.2"
depends_on = [hcloud_network_subnet.cluster]
}
# Kubernetes API (6443) -> Control Planes
resource "hcloud_load_balancer_service" "kubernetes_api" {
count = var.enable_loadbalancer ? 1 : 0
load_balancer_id = hcloud_load_balancer.cluster[0].id
protocol = "tcp"
listen_port = 6443
destination_port = 6443
health_check {
protocol = "tcp"
port = 6443
interval = 10
timeout = 5
retries = 3
}
}
# Talos API (50000) -> Control Planes
resource "hcloud_load_balancer_service" "talos_api" {
count = var.enable_loadbalancer ? 1 : 0
load_balancer_id = hcloud_load_balancer.cluster[0].id
protocol = "tcp"
listen_port = 50000
destination_port = 50000
health_check {
protocol = "tcp"
port = 50000
interval = 10
timeout = 5
retries = 3
}
}
# HTTP (80) -> Workers (NGINX Ingress)
resource "hcloud_load_balancer_service" "http" {
count = var.enable_loadbalancer ? 1 : 0
load_balancer_id = hcloud_load_balancer.cluster[0].id
protocol = "tcp"
listen_port = 80
destination_port = 80
health_check {
protocol = "tcp"
port = 80
interval = 10
timeout = 5
retries = 3
}
}
# HTTPS (443) -> Workers (NGINX Ingress)
resource "hcloud_load_balancer_service" "https" {
count = var.enable_loadbalancer ? 1 : 0
load_balancer_id = hcloud_load_balancer.cluster[0].id
protocol = "tcp"
listen_port = 443
destination_port = 443
health_check {
protocol = "tcp"
port = 443
interval = 10
timeout = 5
retries = 3
}
}
# SSH (22) -> Workers (Gitea SSH)
resource "hcloud_load_balancer_service" "ssh" {
count = var.enable_loadbalancer ? 1 : 0
load_balancer_id = hcloud_load_balancer.cluster[0].id
protocol = "tcp"
listen_port = 22
destination_port = 22
health_check {
protocol = "tcp"
port = 22
interval = 10
timeout = 5
retries = 3
}
}
# LB Targets: Control Planes (for 6443 and 50000)
resource "hcloud_load_balancer_target" "control_plane" {
count = var.enable_loadbalancer ? local.control_plane_count : 0
type = "server"
load_balancer_id = hcloud_load_balancer.cluster[0].id
server_id = hcloud_server.control_plane[count.index].id
use_private_ip = true
depends_on = [hcloud_load_balancer_network.cluster]
}
# LB Targets: Workers (for 80, 443, and 22)
resource "hcloud_load_balancer_target" "worker" {
count = var.enable_loadbalancer ? 1 : 0
type = "server"
load_balancer_id = hcloud_load_balancer.cluster[0].id
server_id = hcloud_server.worker[count.index].id
use_private_ip = true
depends_on = [hcloud_load_balancer_network.cluster]
}
############################################################
# WORKER NODE (Single CAX11)
############################################################
resource "hcloud_server" "worker" {
count = 1
name = "${local.cluster_name}-worker-${count.index}"
server_type = "cax11"
image = data.hcloud_image.talos.id
location = "nbg1"
ssh_keys = [local.ssh_key_id]
firewall_ids = [hcloud_firewall.cluster.id]
placement_group_id = hcloud_placement_group.cluster.id
labels = merge(local.common_labels, {
role = "worker"
node = "worker-${count.index}"
arch = "arm64"
})
public_net {
ipv4_enabled = true
ipv6_enabled = true
}
lifecycle {
ignore_changes = [ssh_keys]
}
}
resource "hcloud_server_network" "worker" {
count = 1
server_id = hcloud_server.worker[count.index].id
network_id = hcloud_network.cluster.id
ip = "10.0.1.${20 + count.index}"
}
############################################################
# TALOS CONFIGURATION
############################################################
# Generate Talos machine secrets
resource "talos_machine_secrets" "this" {
talos_version = var.talos_version
}
# Generate Talos client configuration
data "talos_client_configuration" "this" {
cluster_name = local.cluster_name
client_configuration = talos_machine_secrets.this.client_configuration
endpoints = [local.cluster_endpoint_ip]
}
# Control plane configuration
data "talos_machine_configuration" "control_plane" {
count = local.control_plane_count
cluster_name = local.cluster_name
machine_type = "controlplane"
cluster_endpoint = "https://${local.cluster_endpoint_ip}:6443"
machine_secrets = talos_machine_secrets.this.machine_secrets
talos_version = var.talos_version
config_patches = [
templatefile("${path.module}/talos-patches/control-plane.yaml", {
cluster_name = local.cluster_name
node_name = hcloud_server.control_plane[count.index].name
is_ha = var.enable_ha
is_first_cp = count.index == 0
etcd_peers = [for i in range(local.control_plane_count) : "10.0.1.${10 + i}"]
floating_ip = local.cluster_endpoint_ip
})
]
depends_on = [
hcloud_server.control_plane,
hcloud_load_balancer.cluster,
hcloud_floating_ip_assignment.control_plane
]
}
# Worker configuration
data "talos_machine_configuration" "worker" {
count = 1
cluster_name = local.cluster_name
machine_type = "worker"
cluster_endpoint = "https://${local.cluster_endpoint_ip}:6443"
machine_secrets = talos_machine_secrets.this.machine_secrets
talos_version = var.talos_version
config_patches = [
templatefile("${path.module}/talos-patches/worker.yaml", {
cluster_name = local.cluster_name
node_name = hcloud_server.worker[count.index].name
})
]
depends_on = [
hcloud_server.worker,
hcloud_load_balancer.cluster,
hcloud_floating_ip_assignment.control_plane
]
}
############################################################
# APPLY TALOS CONFIGURATION
############################################################
resource "talos_machine_configuration_apply" "control_plane" {
count = local.control_plane_count
client_configuration = talos_machine_secrets.this.client_configuration
machine_configuration_input = data.talos_machine_configuration.control_plane[count.index].machine_configuration
endpoint = hcloud_server.control_plane[count.index].ipv4_address
node = hcloud_server.control_plane[count.index].ipv4_address
depends_on = [
hcloud_server_network.control_plane,
data.talos_machine_configuration.control_plane
]
}
resource "talos_machine_configuration_apply" "worker" {
count = 1
client_configuration = talos_machine_secrets.this.client_configuration
machine_configuration_input = data.talos_machine_configuration.worker[count.index].machine_configuration
endpoint = hcloud_server.worker[count.index].ipv4_address
node = hcloud_server.worker[count.index].ipv4_address
depends_on = [
hcloud_server_network.worker,
data.talos_machine_configuration.worker,
talos_machine_configuration_apply.control_plane
]
}
############################################################
# BOOTSTRAP KUBERNETES
############################################################
resource "talos_machine_bootstrap" "this" {
client_configuration = talos_machine_secrets.this.client_configuration
node = hcloud_server.control_plane[0].ipv4_address
depends_on = [
talos_machine_configuration_apply.control_plane,
talos_machine_configuration_apply.worker
]
}
############################################################
# GET KUBECONFIG
############################################################
resource "talos_cluster_kubeconfig" "this" {
client_configuration = talos_machine_secrets.this.client_configuration
node = hcloud_server.control_plane[0].ipv4_address
depends_on = [talos_machine_bootstrap.this]
}
############################################################
# SAVE CONFIGURATIONS
############################################################
resource "local_sensitive_file" "kubeconfig" {
# Replace the internal hostname with the LB/Floating IP for external access
content = replace(
talos_cluster_kubeconfig.this.kubeconfig_raw,
"https://${local.cluster_name}.local:6443",
"https://${local.cluster_endpoint_ip}:6443"
)
filename = "${path.root}/kubeconfig"
}
resource "local_sensitive_file" "talosconfig" {
content = data.talos_client_configuration.this.talos_config
filename = "${path.root}/talosconfig"
}