refactor: migrar GitLab → Gitea (aulas 10, 11, 13)

- Aula 10: Gitea + Registry + Actions + Runner (substituiu GitLab)
  - gitea-values.yaml: PostgreSQL standalone, Valkey standalone, ~800Mi RAM
  - setup.sh/cleanup.sh: namespace gitea, Helm gitea-charts/gitea + actions
  - README.md: documentação completa com de→para (GitLab/Harbor/Tekton vs Gitea)

- Aula 11: ArgoCD (GitOps) — removido GitLab Runner (runner vive na aula-10)
  - setup.sh: só ArgoCD, integração SSH com Gitea
  - node-bugado/.gitea/workflows/ci.yml: pipeline convertida

- Aula 13: Container Factory — atualizado para Gitea
  - setup.sh/cleanup.sh: referências GitLab → Gitea
  - pipelines/postgresql/ci.yml: Gitea Actions workflow
  - README.md: conexão com act_runner explicada

- CLAUDE.md: tabela de aulas atualizada
This commit is contained in:
ArgoCD Setup
2026-03-14 01:44:30 -03:00
parent ff7af56c30
commit d380cd8585
35 changed files with 3374 additions and 1202 deletions

View File

@@ -0,0 +1,77 @@
# =============================================================================
# Pipeline CI: DevOps Toolbox (eStargz + GZIP)
# =============================================================================
# Constrói imagem em ambos os formatos para benchmark
# =============================================================================
stages:
- build
- push
variables:
REGISTRY: registry.kube.quest
IMAGE_NAME: factory/devops-toolbox
DOCKER_HOST: tcp://docker:2376
DOCKER_TLS_CERTDIR: "/certs"
DOCKER_TLS_VERIFY: 1
DOCKER_CERT_PATH: "$DOCKER_TLS_CERTDIR/client"
# -----------------------------------------------------------------------------
# Build eStargz (lazy pulling)
# -----------------------------------------------------------------------------
build-estargz:
stage: build
image: docker:27-dind
services:
- docker:27-dind
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $REGISTRY
- docker buildx create --use --name multiarch --driver docker-container
script:
- |
docker buildx build \
--platform linux/arm64,linux/amd64 \
--output type=image,name=${REGISTRY}/${IMAGE_NAME}:latest,push=true,compression=estargz,force-compression=true,oci-mediatypes=true \
--cache-from type=registry,ref=${REGISTRY}/${IMAGE_NAME}:cache \
--cache-to type=registry,ref=${REGISTRY}/${IMAGE_NAME}:cache,mode=max \
.
rules:
- if: $CI_COMMIT_BRANCH == "main"
# -----------------------------------------------------------------------------
# Build GZIP (tradicional, para benchmark)
# -----------------------------------------------------------------------------
build-gzip:
stage: build
image: docker:27-dind
services:
- docker:27-dind
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $REGISTRY
- docker buildx create --use --name multiarch --driver docker-container
script:
- |
docker buildx build \
--platform linux/arm64,linux/amd64 \
--output type=image,name=${REGISTRY}/${IMAGE_NAME}:gzip,push=true,compression=gzip,oci-mediatypes=true \
--cache-from type=registry,ref=${REGISTRY}/${IMAGE_NAME}:cache \
.
rules:
- if: $CI_COMMIT_BRANCH == "main"
# -----------------------------------------------------------------------------
# Tag como versão
# -----------------------------------------------------------------------------
push-tags:
stage: push
image: docker:27-cli
services:
- docker:27-dind
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $REGISTRY
script:
- docker buildx imagetools create -t ${REGISTRY}/${IMAGE_NAME}:v1 ${REGISTRY}/${IMAGE_NAME}:latest
rules:
- if: $CI_COMMIT_BRANCH == "main"
needs:
- build-estargz

View File

@@ -0,0 +1,90 @@
# =============================================================================
# DevOps Toolbox - Demonstração de eStargz
# =============================================================================
# Imagem grande (~650MB) com múltiplas ferramentas em camadas separadas.
# Ideal para demonstrar lazy pulling: você só usa UMA ferramenta por vez!
# =============================================================================
FROM alpine:3.21
LABEL maintainer="workshop"
LABEL description="DevOps toolbox for eStargz lazy pulling demonstration"
# -----------------------------------------------------------------------------
# Camada 1: Ferramentas básicas (~50MB)
# -----------------------------------------------------------------------------
RUN apk add --no-cache \
bash \
curl \
wget \
jq \
git \
openssh-client \
ca-certificates \
unzip
# -----------------------------------------------------------------------------
# Camada 2: Terraform (~100MB)
# -----------------------------------------------------------------------------
ARG TERRAFORM_VERSION=1.9.8
ARG TARGETARCH
RUN wget -q "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_${TARGETARCH}.zip" -O /tmp/terraform.zip && \
unzip -q /tmp/terraform.zip -d /usr/local/bin/ && \
rm /tmp/terraform.zip && \
chmod +x /usr/local/bin/terraform
# -----------------------------------------------------------------------------
# Camada 3: OpenTofu (~100MB)
# -----------------------------------------------------------------------------
ARG TOFU_VERSION=1.8.8
RUN wget -q "https://github.com/opentofu/opentofu/releases/download/v${TOFU_VERSION}/tofu_${TOFU_VERSION}_linux_${TARGETARCH}.zip" -O /tmp/tofu.zip && \
unzip -q /tmp/tofu.zip -d /usr/local/bin/ && \
rm /tmp/tofu.zip && \
chmod +x /usr/local/bin/tofu
# -----------------------------------------------------------------------------
# Camada 4: Kubectl (~50MB)
# -----------------------------------------------------------------------------
ARG KUBECTL_VERSION=1.31.4
RUN curl -sLO "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl" && \
chmod +x kubectl && \
mv kubectl /usr/local/bin/
# -----------------------------------------------------------------------------
# Camada 5: Helm (~50MB)
# -----------------------------------------------------------------------------
ARG HELM_VERSION=3.16.4
RUN wget -q "https://get.helm.sh/helm-v${HELM_VERSION}-linux-${TARGETARCH}.tar.gz" -O /tmp/helm.tar.gz && \
tar -xzf /tmp/helm.tar.gz -C /tmp && \
mv /tmp/linux-${TARGETARCH}/helm /usr/local/bin/ && \
rm -rf /tmp/helm.tar.gz /tmp/linux-${TARGETARCH}
# -----------------------------------------------------------------------------
# Camada 6: AWS CLI (~200MB)
# -----------------------------------------------------------------------------
RUN apk add --no-cache aws-cli
# -----------------------------------------------------------------------------
# Camada 7: Python + Ansible (~150MB)
# -----------------------------------------------------------------------------
RUN apk add --no-cache python3 py3-pip && \
pip3 install --no-cache-dir ansible --break-system-packages --quiet
# -----------------------------------------------------------------------------
# Camada 8: k9s (~50MB)
# -----------------------------------------------------------------------------
ARG K9S_VERSION=0.32.7
RUN wget -q "https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_Linux_${TARGETARCH}.tar.gz" -O /tmp/k9s.tar.gz && \
tar -xzf /tmp/k9s.tar.gz -C /usr/local/bin/ k9s && \
rm /tmp/k9s.tar.gz
# -----------------------------------------------------------------------------
# Entrypoint
# -----------------------------------------------------------------------------
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
WORKDIR /workspace
ENTRYPOINT ["/entrypoint.sh"]
CMD ["--help"]

View File

@@ -0,0 +1,64 @@
#!/bin/bash
# =============================================================================
# DevOps Toolbox Entrypoint
# =============================================================================
# Executa a ferramenta especificada ou mostra ajuda
# =============================================================================
set -e
# Ferramentas disponíveis
TOOLS="terraform tofu kubectl helm aws ansible k9s"
show_help() {
echo "DevOps Toolbox - Demonstração de eStargz Lazy Pulling"
echo ""
echo "Uso: docker run toolbox <ferramenta> [argumentos]"
echo ""
echo "Ferramentas disponíveis:"
echo " terraform - Infrastructure as Code"
echo " tofu - OpenTofu (Terraform fork)"
echo " kubectl - Kubernetes CLI"
echo " helm - Kubernetes package manager"
echo " aws - AWS CLI"
echo " ansible - Configuration management"
echo " k9s - Kubernetes TUI"
echo ""
echo "Exemplos:"
echo " docker run toolbox terraform version"
echo " docker run toolbox kubectl version --client"
echo " docker run toolbox helm version"
echo ""
echo "Com eStargz, apenas a camada da ferramenta usada é baixada!"
}
show_versions() {
echo "Versões instaladas:"
echo ""
terraform version 2>/dev/null | head -1 || echo "terraform: não disponível"
tofu version 2>/dev/null | head -1 || echo "tofu: não disponível"
kubectl version --client 2>/dev/null | head -1 || echo "kubectl: não disponível"
helm version --short 2>/dev/null || echo "helm: não disponível"
aws --version 2>/dev/null || echo "aws: não disponível"
ansible --version 2>/dev/null | head -1 || echo "ansible: não disponível"
k9s version --short 2>/dev/null || echo "k9s: não disponível"
}
# Processa argumentos
case "$1" in
--help|-h|"")
show_help
;;
--versions|-v)
show_versions
;;
terraform|tofu|kubectl|helm|aws|ansible|k9s)
exec "$@"
;;
*)
echo "Erro: Ferramenta '$1' não reconhecida"
echo ""
show_help
exit 1
;;
esac

View File

@@ -0,0 +1,38 @@
stages:
- build
variables:
REGISTRY: registry.kube.quest
IMAGE_NAME: factory/large-test
build:
stage: build
image: docker:27-dind
services:
- docker:27-dind
variables:
DOCKER_TLS_CERTDIR: ""
DOCKER_HOST: tcp://docker:2375
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker buildx create --use --name builder --driver docker-container
script:
# Build eStargz (lazy pulling)
- echo "Building eStargz version..."
- |
docker buildx build \
--output type=image,name=${REGISTRY}/${IMAGE_NAME}:latest,push=true,compression=estargz,force-compression=true,oci-mediatypes=true \
.
# Build GZIP tradicional
- echo "Building GZIP version..."
- |
docker buildx build \
--output type=image,name=${REGISTRY}/${IMAGE_NAME}:gzip,push=true,compression=gzip,oci-mediatypes=true \
.
- echo "Images pushed:"
- echo " - ${REGISTRY}/${IMAGE_NAME}:latest (eStargz ~1.5GB)"
- echo " - ${REGISTRY}/${IMAGE_NAME}:gzip (GZIP ~1.5GB)"
tags:
- kubernetes

View File

@@ -0,0 +1,34 @@
# Imagem de teste grande (~1.5GB) para benchmark de lazy pulling
FROM alpine:3.21
# Camada 1: Base tools (~50MB)
RUN apk add --no-cache \
bash \
curl \
wget \
jq \
python3 \
py3-pip
# Camada 2: Dados dummy 1 (~300MB)
RUN dd if=/dev/urandom of=/data1.bin bs=1M count=300
# Camada 3: Dados dummy 2 (~300MB)
RUN dd if=/dev/urandom of=/data2.bin bs=1M count=300
# Camada 4: Dados dummy 3 (~300MB)
RUN dd if=/dev/urandom of=/data3.bin bs=1M count=300
# Camada 5: Dados dummy 4 (~300MB)
RUN dd if=/dev/urandom of=/data4.bin bs=1M count=300
# Camada 6: Dados dummy 5 (~300MB)
RUN dd if=/dev/urandom of=/data5.bin bs=1M count=300
# Script de teste que acessa apenas arquivos pequenos
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Entrypoint simples que NÃO acessa os arquivos grandes
# Isso permite testar o lazy pulling - container inicia sem precisar dos dados
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Entrypoint simples que NÃO acessa os arquivos grandes
# Permite testar lazy pulling - container inicia sem baixar dados
echo "Container iniciado em $(date)"
echo "Hostname: $(hostname)"
echo "Este container tem ~1.5GB de dados que NÃO são acessados no startup"
# Loop infinito para manter container running
while true; do
sleep 3600
done

View File

@@ -0,0 +1,51 @@
# =============================================================================
# Dockerfile - PostgreSQL Production (Alpine)
# =============================================================================
#
# Imagem customizada PostgreSQL para substituir Bitnami.
# Otimizada para produção e formato eStargz (lazy pulling).
#
# Build (formato eStargz):
# docker buildx build \
# --output type=image,name=registry.kube.quest/factory/postgresql:17,push=true,compression=estargz,force-compression=true,oci-mediatypes=true \
# .
#
# =============================================================================
FROM postgres:17-alpine
LABEL maintainer="workshop"
LABEL description="PostgreSQL 17 Alpine - Production Ready"
LABEL org.opencontainers.image.title="postgresql"
LABEL org.opencontainers.image.version="17"
# Variáveis de ambiente padrão
ENV POSTGRES_USER=postgres \
POSTGRES_DB=postgres \
PGDATA=/var/lib/postgresql/data/pgdata \
LANG=en_US.UTF-8
# Instalar dependências adicionais úteis
RUN apk add --no-cache \
tzdata \
curl \
jq
# Criar diretório para configurações customizadas
RUN mkdir -p /etc/postgresql/postgresql.conf.d
# Copiar configuração de produção
COPY postgresql.conf /etc/postgresql/postgresql.conf.d/00-production.conf
# Healthcheck nativo
HEALTHCHECK --interval=30s --timeout=5s --start-period=30s --retries=3 \
CMD pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB} || exit 1
# Expor porta padrão
EXPOSE 5432
# Usuário não-root (já vem configurado na imagem oficial)
USER postgres
# Comando padrão com configuração customizada
CMD ["postgres", "-c", "config_file=/etc/postgresql/postgresql.conf.d/00-production.conf"]

View File

@@ -0,0 +1,77 @@
# =============================================================================
# PostgreSQL Production Configuration
# =============================================================================
# Otimizado para containers Kubernetes com ~1GB de memória
# Documentação: https://www.postgresql.org/docs/17/runtime-config.html
# =============================================================================
# -----------------------------------------------------------------------------
# Conexões
# -----------------------------------------------------------------------------
listen_addresses = '*'
max_connections = 100
superuser_reserved_connections = 3
# -----------------------------------------------------------------------------
# Memória (para container com 1GB)
# -----------------------------------------------------------------------------
# shared_buffers: ~25% da RAM disponível
shared_buffers = 256MB
# effective_cache_size: ~50% da RAM (estimativa para o planner)
effective_cache_size = 512MB
# work_mem: memória por operação de sort/hash (cuidado com max_connections)
work_mem = 8MB
# maintenance_work_mem: para VACUUM, CREATE INDEX, etc.
maintenance_work_mem = 64MB
# -----------------------------------------------------------------------------
# WAL (Write-Ahead Logging)
# -----------------------------------------------------------------------------
wal_level = replica
max_wal_size = 1GB
min_wal_size = 80MB
checkpoint_completion_target = 0.9
# -----------------------------------------------------------------------------
# Query Planner
# -----------------------------------------------------------------------------
# random_page_cost: baixo para SSD (1.1) vs HDD (4.0)
random_page_cost = 1.1
# effective_io_concurrency: alto para SSD
effective_io_concurrency = 200
# default_statistics_target: mais estatísticas = melhores planos
default_statistics_target = 100
# -----------------------------------------------------------------------------
# Logging
# -----------------------------------------------------------------------------
# Log para stderr (compatível com kubectl logs)
log_destination = 'stderr'
logging_collector = off
# O que logar
log_statement = 'ddl'
log_min_duration_statement = 1000
# Formato do log
log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '
# -----------------------------------------------------------------------------
# Locale/Encoding
# -----------------------------------------------------------------------------
timezone = 'America/Sao_Paulo'
lc_messages = 'en_US.UTF-8'
# -----------------------------------------------------------------------------
# Performance
# -----------------------------------------------------------------------------
# JIT: desabilitar em containers pequenos (overhead de compilação)
jit = off
# Huge pages: requer configuração do host
huge_pages = off