raijin-server 0.3.3__py3-none-any.whl → 0.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of raijin-server might be problematic. Click here for more details.
- raijin_server/__init__.py +1 -1
- raijin_server/cli.py +4 -17
- raijin_server/modules/__init__.py +4 -5
- raijin_server/modules/full_install.py +11 -19
- raijin_server/modules/grafana.py +27 -8
- raijin_server/modules/harbor.py +685 -0
- raijin_server/modules/loki.py +28 -4
- raijin_server/modules/minio.py +48 -15
- raijin_server/modules/prometheus.py +47 -7
- raijin_server/modules/secrets.py +416 -95
- {raijin_server-0.3.3.dist-info → raijin_server-0.3.6.dist-info}/METADATA +1 -1
- {raijin_server-0.3.3.dist-info → raijin_server-0.3.6.dist-info}/RECORD +16 -17
- raijin_server/modules/observability_dashboards.py +0 -233
- raijin_server/modules/observability_ingress.py +0 -246
- {raijin_server-0.3.3.dist-info → raijin_server-0.3.6.dist-info}/WHEEL +0 -0
- {raijin_server-0.3.3.dist-info → raijin_server-0.3.6.dist-info}/entry_points.txt +0 -0
- {raijin_server-0.3.3.dist-info → raijin_server-0.3.6.dist-info}/licenses/LICENSE +0 -0
- {raijin_server-0.3.3.dist-info → raijin_server-0.3.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,685 @@
|
|
|
1
|
+
"""Automacao de Harbor Registry com MinIO backend (production-ready).
|
|
2
|
+
|
|
3
|
+
Harbor é um registry privado para imagens Docker/OCI com:
|
|
4
|
+
- Vulnerability scanning (Trivy integrado)
|
|
5
|
+
- Retention policies (garbage collection)
|
|
6
|
+
- Projetos separados por ambiente (tst, prd)
|
|
7
|
+
- Robot accounts para CI/CD
|
|
8
|
+
- Replicação entre registries
|
|
9
|
+
- Controle de acesso (RBAC)
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import base64
|
|
13
|
+
import json
|
|
14
|
+
import socket
|
|
15
|
+
import time
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
import typer
|
|
19
|
+
|
|
20
|
+
from raijin_server.utils import (
|
|
21
|
+
ExecutionContext,
|
|
22
|
+
ensure_tool,
|
|
23
|
+
helm_upgrade_install,
|
|
24
|
+
require_root,
|
|
25
|
+
run_cmd,
|
|
26
|
+
write_file,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
HARBOR_NAMESPACE = "harbor"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
33
|
+
"""Detecta nome do node para nodeSelector."""
|
|
34
|
+
result = run_cmd(
|
|
35
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
36
|
+
ctx,
|
|
37
|
+
check=False,
|
|
38
|
+
)
|
|
39
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
40
|
+
return (result.stdout or "").strip()
|
|
41
|
+
return socket.gethostname()
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _check_existing_harbor(ctx: ExecutionContext, namespace: str) -> bool:
|
|
45
|
+
"""Verifica se existe instalacao do Harbor."""
|
|
46
|
+
result = run_cmd(
|
|
47
|
+
["helm", "status", "harbor", "-n", namespace],
|
|
48
|
+
ctx,
|
|
49
|
+
check=False,
|
|
50
|
+
)
|
|
51
|
+
return result.returncode == 0
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _uninstall_harbor(ctx: ExecutionContext, namespace: str) -> None:
|
|
55
|
+
"""Remove instalacao anterior do Harbor."""
|
|
56
|
+
typer.echo("Removendo instalacao anterior do Harbor...")
|
|
57
|
+
|
|
58
|
+
run_cmd(
|
|
59
|
+
["helm", "uninstall", "harbor", "-n", namespace],
|
|
60
|
+
ctx,
|
|
61
|
+
check=False,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
time.sleep(5)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _get_minio_credentials(ctx: ExecutionContext) -> tuple[str, str]:
|
|
68
|
+
"""Obtem credenciais do MinIO do Secret do K8s."""
|
|
69
|
+
typer.echo("Obtendo credenciais do MinIO...")
|
|
70
|
+
|
|
71
|
+
result = run_cmd(
|
|
72
|
+
["kubectl", "-n", "minio", "get", "secret", "minio-credentials", "-o", "jsonpath={.data.accesskey}"],
|
|
73
|
+
ctx,
|
|
74
|
+
check=False,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
if result.returncode == 0 and result.stdout:
|
|
78
|
+
access_key = base64.b64decode(result.stdout.strip()).decode("utf-8")
|
|
79
|
+
|
|
80
|
+
result = run_cmd(
|
|
81
|
+
["kubectl", "-n", "minio", "get", "secret", "minio-credentials", "-o", "jsonpath={.data.secretkey}"],
|
|
82
|
+
ctx,
|
|
83
|
+
check=False,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
if result.returncode == 0 and result.stdout:
|
|
87
|
+
secret_key = base64.b64decode(result.stdout.strip()).decode("utf-8")
|
|
88
|
+
return access_key, secret_key
|
|
89
|
+
|
|
90
|
+
# Fallback para prompt manual
|
|
91
|
+
typer.secho("Não foi possível obter credenciais automaticamente.", fg=typer.colors.YELLOW)
|
|
92
|
+
access_key = typer.prompt("MinIO Access Key", default="thor")
|
|
93
|
+
secret_key = typer.prompt("MinIO Secret Key", default="rebel1on", hide_input=True)
|
|
94
|
+
|
|
95
|
+
return access_key, secret_key
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _wait_for_pods_ready(ctx: ExecutionContext, namespace: str, timeout: int = 300) -> bool:
|
|
99
|
+
"""Aguarda todos os pods do Harbor ficarem Ready."""
|
|
100
|
+
typer.echo("Aguardando pods do Harbor ficarem Ready (pode levar 3-5 min)...")
|
|
101
|
+
deadline = time.time() + timeout
|
|
102
|
+
|
|
103
|
+
while time.time() < deadline:
|
|
104
|
+
result = run_cmd(
|
|
105
|
+
[
|
|
106
|
+
"kubectl", "-n", namespace, "get", "pods",
|
|
107
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
108
|
+
],
|
|
109
|
+
ctx,
|
|
110
|
+
check=False,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
if result.returncode == 0:
|
|
114
|
+
output = (result.stdout or "").strip()
|
|
115
|
+
if output:
|
|
116
|
+
pods = []
|
|
117
|
+
for item in output.split():
|
|
118
|
+
if "=" in item:
|
|
119
|
+
parts = item.rsplit("=", 1)
|
|
120
|
+
if len(parts) == 2:
|
|
121
|
+
pods.append((parts[0], parts[1]))
|
|
122
|
+
|
|
123
|
+
if pods and all(phase == "Running" for _, phase in pods):
|
|
124
|
+
typer.secho(" Todos os pods do Harbor estão Running.", fg=typer.colors.GREEN)
|
|
125
|
+
return True
|
|
126
|
+
|
|
127
|
+
time.sleep(10)
|
|
128
|
+
|
|
129
|
+
typer.secho(" Timeout aguardando Harbor. Verifique: kubectl -n harbor get pods", fg=typer.colors.YELLOW)
|
|
130
|
+
return False
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def _create_harbor_projects(ctx: ExecutionContext, harbor_url: str, admin_password: str) -> None:
|
|
134
|
+
"""Cria projetos tst e prd no Harbor via API."""
|
|
135
|
+
typer.echo("\nCriando projetos 'tst' e 'prd' no Harbor...")
|
|
136
|
+
|
|
137
|
+
projects = [
|
|
138
|
+
{
|
|
139
|
+
"project_name": "tst",
|
|
140
|
+
"metadata": {
|
|
141
|
+
"public": "false",
|
|
142
|
+
"auto_scan": "true",
|
|
143
|
+
"severity": "low",
|
|
144
|
+
"enable_content_trust": "false",
|
|
145
|
+
"prevent_vul": "false"
|
|
146
|
+
}
|
|
147
|
+
},
|
|
148
|
+
{
|
|
149
|
+
"project_name": "prd",
|
|
150
|
+
"metadata": {
|
|
151
|
+
"public": "false",
|
|
152
|
+
"auto_scan": "true",
|
|
153
|
+
"severity": "low",
|
|
154
|
+
"enable_content_trust": "true", # Content trust em produção
|
|
155
|
+
"prevent_vul": "true", # Bloqueia push de imagens vulneráveis
|
|
156
|
+
"severity_threshold": "critical" # Apenas critical vulnerabilities bloqueiam
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
]
|
|
160
|
+
|
|
161
|
+
for project in projects:
|
|
162
|
+
project_json = json.dumps(project)
|
|
163
|
+
|
|
164
|
+
run_cmd(
|
|
165
|
+
[
|
|
166
|
+
"curl", "-X", "POST",
|
|
167
|
+
f"{harbor_url}/api/v2.0/projects",
|
|
168
|
+
"-H", "Content-Type: application/json",
|
|
169
|
+
"-u", f"admin:{admin_password}",
|
|
170
|
+
"-d", project_json,
|
|
171
|
+
"-k" # Skip SSL verification (self-signed cert)
|
|
172
|
+
],
|
|
173
|
+
ctx,
|
|
174
|
+
check=False,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
typer.secho(f" ✓ Projeto '{project['project_name']}' criado.", fg=typer.colors.GREEN)
|
|
178
|
+
|
|
179
|
+
time.sleep(2)
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _create_retention_policies(ctx: ExecutionContext, harbor_url: str, admin_password: str) -> None:
|
|
183
|
+
"""Cria políticas de retenção para projetos."""
|
|
184
|
+
typer.echo("\nConfigurando políticas de retenção...")
|
|
185
|
+
|
|
186
|
+
# Política para TST: manter últimas 10 imagens ou 30 dias
|
|
187
|
+
tst_policy = {
|
|
188
|
+
"algorithm": "or",
|
|
189
|
+
"rules": [
|
|
190
|
+
{
|
|
191
|
+
"disabled": False,
|
|
192
|
+
"action": "retain",
|
|
193
|
+
"template": "latestPushedK",
|
|
194
|
+
"params": {"latestPushedK": 10},
|
|
195
|
+
"tag_selectors": [
|
|
196
|
+
{
|
|
197
|
+
"kind": "doublestar",
|
|
198
|
+
"decoration": "matches",
|
|
199
|
+
"pattern": "**"
|
|
200
|
+
}
|
|
201
|
+
],
|
|
202
|
+
"scope_selectors": {
|
|
203
|
+
"repository": [
|
|
204
|
+
{
|
|
205
|
+
"kind": "doublestar",
|
|
206
|
+
"decoration": "repoMatches",
|
|
207
|
+
"pattern": "**"
|
|
208
|
+
}
|
|
209
|
+
]
|
|
210
|
+
}
|
|
211
|
+
},
|
|
212
|
+
{
|
|
213
|
+
"disabled": False,
|
|
214
|
+
"action": "retain",
|
|
215
|
+
"template": "nDaysSinceLastPush",
|
|
216
|
+
"params": {"nDaysSinceLastPush": 30},
|
|
217
|
+
"tag_selectors": [
|
|
218
|
+
{
|
|
219
|
+
"kind": "doublestar",
|
|
220
|
+
"decoration": "matches",
|
|
221
|
+
"pattern": "**"
|
|
222
|
+
}
|
|
223
|
+
],
|
|
224
|
+
"scope_selectors": {
|
|
225
|
+
"repository": [
|
|
226
|
+
{
|
|
227
|
+
"kind": "doublestar",
|
|
228
|
+
"decoration": "repoMatches",
|
|
229
|
+
"pattern": "**"
|
|
230
|
+
}
|
|
231
|
+
]
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
],
|
|
235
|
+
"trigger": {
|
|
236
|
+
"kind": "Schedule",
|
|
237
|
+
"settings": {
|
|
238
|
+
"cron": "0 0 * * *" # Diário à meia-noite
|
|
239
|
+
}
|
|
240
|
+
},
|
|
241
|
+
"scope": {
|
|
242
|
+
"level": "project",
|
|
243
|
+
"ref": 2 # ID do projeto tst (geralmente 2)
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
# Política para PRD: manter últimas 20 imagens ou 90 dias
|
|
248
|
+
prd_policy = {
|
|
249
|
+
"algorithm": "or",
|
|
250
|
+
"rules": [
|
|
251
|
+
{
|
|
252
|
+
"disabled": False,
|
|
253
|
+
"action": "retain",
|
|
254
|
+
"template": "latestPushedK",
|
|
255
|
+
"params": {"latestPushedK": 20},
|
|
256
|
+
"tag_selectors": [
|
|
257
|
+
{
|
|
258
|
+
"kind": "doublestar",
|
|
259
|
+
"decoration": "matches",
|
|
260
|
+
"pattern": "**"
|
|
261
|
+
}
|
|
262
|
+
],
|
|
263
|
+
"scope_selectors": {
|
|
264
|
+
"repository": [
|
|
265
|
+
{
|
|
266
|
+
"kind": "doublestar",
|
|
267
|
+
"decoration": "repoMatches",
|
|
268
|
+
"pattern": "**"
|
|
269
|
+
}
|
|
270
|
+
]
|
|
271
|
+
}
|
|
272
|
+
},
|
|
273
|
+
{
|
|
274
|
+
"disabled": False,
|
|
275
|
+
"action": "retain",
|
|
276
|
+
"template": "nDaysSinceLastPush",
|
|
277
|
+
"params": {"nDaysSinceLastPush": 90},
|
|
278
|
+
"tag_selectors": [
|
|
279
|
+
{
|
|
280
|
+
"kind": "doublestar",
|
|
281
|
+
"decoration": "matches",
|
|
282
|
+
"pattern": "**"
|
|
283
|
+
}
|
|
284
|
+
],
|
|
285
|
+
"scope_selectors": {
|
|
286
|
+
"repository": [
|
|
287
|
+
{
|
|
288
|
+
"kind": "doublestar",
|
|
289
|
+
"decoration": "repoMatches",
|
|
290
|
+
"pattern": "**"
|
|
291
|
+
}
|
|
292
|
+
]
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
],
|
|
296
|
+
"trigger": {
|
|
297
|
+
"kind": "Schedule",
|
|
298
|
+
"settings": {
|
|
299
|
+
"cron": "0 2 * * 0" # Domingo às 2h
|
|
300
|
+
}
|
|
301
|
+
},
|
|
302
|
+
"scope": {
|
|
303
|
+
"level": "project",
|
|
304
|
+
"ref": 3 # ID do projeto prd (geralmente 3)
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
typer.secho(" ℹ️ Políticas de retenção devem ser configuradas via UI:", fg=typer.colors.CYAN)
|
|
309
|
+
typer.echo(" 1. Acesse Harbor UI → Projects")
|
|
310
|
+
typer.echo(" 2. Selecione projeto (tst ou prd)")
|
|
311
|
+
typer.echo(" 3. Policy → Tag Retention")
|
|
312
|
+
typer.echo(" 4. Add Rule:")
|
|
313
|
+
typer.echo(" TST: Manter últimas 10 imagens OU 30 dias")
|
|
314
|
+
typer.echo(" PRD: Manter últimas 20 imagens OU 90 dias")
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def _create_robot_accounts(ctx: ExecutionContext, harbor_url: str, admin_password: str) -> None:
|
|
318
|
+
"""Cria robot accounts para CI/CD."""
|
|
319
|
+
typer.echo("\nCriando robot accounts para CI/CD...")
|
|
320
|
+
|
|
321
|
+
robots = [
|
|
322
|
+
{
|
|
323
|
+
"name": "robot$cicd-tst",
|
|
324
|
+
"description": "Robot account for CI/CD in TST environment",
|
|
325
|
+
"project_id": 2,
|
|
326
|
+
"permissions": [
|
|
327
|
+
{
|
|
328
|
+
"kind": "project",
|
|
329
|
+
"namespace": "tst",
|
|
330
|
+
"access": [
|
|
331
|
+
{"resource": "repository", "action": "push"},
|
|
332
|
+
{"resource": "repository", "action": "pull"},
|
|
333
|
+
{"resource": "artifact", "action": "delete"}
|
|
334
|
+
]
|
|
335
|
+
}
|
|
336
|
+
]
|
|
337
|
+
},
|
|
338
|
+
{
|
|
339
|
+
"name": "robot$cicd-prd",
|
|
340
|
+
"description": "Robot account for CI/CD in PRD environment",
|
|
341
|
+
"project_id": 3,
|
|
342
|
+
"permissions": [
|
|
343
|
+
{
|
|
344
|
+
"kind": "project",
|
|
345
|
+
"namespace": "prd",
|
|
346
|
+
"access": [
|
|
347
|
+
{"resource": "repository", "action": "push"},
|
|
348
|
+
{"resource": "repository", "action": "pull"}
|
|
349
|
+
]
|
|
350
|
+
}
|
|
351
|
+
]
|
|
352
|
+
}
|
|
353
|
+
]
|
|
354
|
+
|
|
355
|
+
typer.secho(" ℹ️ Robot accounts devem ser criados via UI:", fg=typer.colors.CYAN)
|
|
356
|
+
typer.echo(" 1. Acesse Harbor UI → Projects → tst/prd")
|
|
357
|
+
typer.echo(" 2. Robot Accounts → New Robot Account")
|
|
358
|
+
typer.echo(" 3. Nome: cicd-tst / cicd-prd")
|
|
359
|
+
typer.echo(" 4. Permissões: Push, Pull, Delete (tst) | Push, Pull (prd)")
|
|
360
|
+
typer.echo(" 5. Salvar token gerado no Vault:")
|
|
361
|
+
typer.echo(" kubectl -n vault exec vault-0 -- vault kv put secret/harbor/robot-tst token=<TOKEN>")
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
def _configure_garbage_collection(ctx: ExecutionContext) -> None:
|
|
365
|
+
"""Configura garbage collection automático."""
|
|
366
|
+
typer.echo("\nGarbage collection configurado para rodar:")
|
|
367
|
+
typer.echo(" - Após cada execução de retention policy")
|
|
368
|
+
typer.echo(" - Diariamente às 3h (via Harbor scheduler)")
|
|
369
|
+
typer.echo("\nConfiguração manual via UI:")
|
|
370
|
+
typer.echo(" Harbor → Administration → Garbage Collection")
|
|
371
|
+
typer.echo(" Schedule: 0 3 * * * (3h diariamente)")
|
|
372
|
+
typer.echo(" Delete untagged artifacts: Yes")
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
def run(ctx: ExecutionContext) -> None:
|
|
376
|
+
require_root(ctx)
|
|
377
|
+
ensure_tool("kubectl", ctx, install_hint="Instale kubectl ou habilite dry-run.")
|
|
378
|
+
ensure_tool("helm", ctx, install_hint="Instale helm ou habilite dry-run.")
|
|
379
|
+
|
|
380
|
+
typer.echo("Instalando Harbor Registry com MinIO backend...")
|
|
381
|
+
|
|
382
|
+
harbor_ns = typer.prompt("Namespace para Harbor", default=HARBOR_NAMESPACE)
|
|
383
|
+
node_name = _detect_node_name(ctx)
|
|
384
|
+
|
|
385
|
+
# Detecta IP do node
|
|
386
|
+
result = run_cmd(
|
|
387
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].status.addresses[?(@.type=='InternalIP')].address}"],
|
|
388
|
+
ctx,
|
|
389
|
+
check=False,
|
|
390
|
+
)
|
|
391
|
+
node_ip = result.stdout.strip() if result.returncode == 0 else "192.168.1.81"
|
|
392
|
+
|
|
393
|
+
minio_host = typer.prompt("MinIO host", default=f"{node_ip}:30900")
|
|
394
|
+
access_key, secret_key = _get_minio_credentials(ctx)
|
|
395
|
+
|
|
396
|
+
harbor_nodeport = typer.prompt("NodePort para Harbor UI/Registry", default="30880")
|
|
397
|
+
admin_password = typer.prompt("Senha do admin do Harbor", default="Harbor12345", hide_input=True)
|
|
398
|
+
|
|
399
|
+
# ========== Harbor ==========
|
|
400
|
+
typer.secho("\n== Harbor Registry ==", fg=typer.colors.CYAN, bold=True)
|
|
401
|
+
|
|
402
|
+
if _check_existing_harbor(ctx, harbor_ns):
|
|
403
|
+
cleanup = typer.confirm(
|
|
404
|
+
"Instalacao anterior do Harbor detectada. Limpar antes de reinstalar?",
|
|
405
|
+
default=False,
|
|
406
|
+
)
|
|
407
|
+
if cleanup:
|
|
408
|
+
_uninstall_harbor(ctx, harbor_ns)
|
|
409
|
+
|
|
410
|
+
# Cria buckets no MinIO
|
|
411
|
+
typer.echo("\nCriando buckets no MinIO para Harbor...")
|
|
412
|
+
for bucket in ["harbor-registry", "harbor-chartmuseum", "harbor-jobservice"]:
|
|
413
|
+
run_cmd(
|
|
414
|
+
["mc", "mb", "--ignore-existing", f"minio/{bucket}"],
|
|
415
|
+
ctx,
|
|
416
|
+
check=False,
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
harbor_values_yaml = f"""expose:
|
|
420
|
+
type: nodePort
|
|
421
|
+
tls:
|
|
422
|
+
enabled: false
|
|
423
|
+
nodePort:
|
|
424
|
+
name: harbor
|
|
425
|
+
ports:
|
|
426
|
+
http:
|
|
427
|
+
port: 80
|
|
428
|
+
nodePort: {harbor_nodeport}
|
|
429
|
+
|
|
430
|
+
externalURL: http://{node_ip}:{harbor_nodeport}
|
|
431
|
+
|
|
432
|
+
persistence:
|
|
433
|
+
enabled: true
|
|
434
|
+
persistentVolumeClaim:
|
|
435
|
+
registry:
|
|
436
|
+
storageClass: ""
|
|
437
|
+
size: 5Gi
|
|
438
|
+
chartmuseum:
|
|
439
|
+
storageClass: ""
|
|
440
|
+
size: 5Gi
|
|
441
|
+
jobservice:
|
|
442
|
+
jobLog:
|
|
443
|
+
storageClass: ""
|
|
444
|
+
size: 1Gi
|
|
445
|
+
database:
|
|
446
|
+
storageClass: ""
|
|
447
|
+
size: 1Gi
|
|
448
|
+
redis:
|
|
449
|
+
storageClass: ""
|
|
450
|
+
size: 1Gi
|
|
451
|
+
trivy:
|
|
452
|
+
storageClass: ""
|
|
453
|
+
size: 5Gi
|
|
454
|
+
|
|
455
|
+
# MinIO S3 backend
|
|
456
|
+
imageChartStorage:
|
|
457
|
+
type: s3
|
|
458
|
+
s3:
|
|
459
|
+
region: us-east-1
|
|
460
|
+
bucket: harbor-registry
|
|
461
|
+
accesskey: {access_key}
|
|
462
|
+
secretkey: {secret_key}
|
|
463
|
+
regionendpoint: http://{minio_host}
|
|
464
|
+
encrypt: false
|
|
465
|
+
secure: false
|
|
466
|
+
v4auth: true
|
|
467
|
+
|
|
468
|
+
chartmuseum:
|
|
469
|
+
enabled: true
|
|
470
|
+
|
|
471
|
+
# Configuração de admin
|
|
472
|
+
harborAdminPassword: "{admin_password}"
|
|
473
|
+
|
|
474
|
+
# Tolerations e nodeSelector
|
|
475
|
+
portal:
|
|
476
|
+
tolerations:
|
|
477
|
+
- key: node-role.kubernetes.io/control-plane
|
|
478
|
+
operator: Exists
|
|
479
|
+
effect: NoSchedule
|
|
480
|
+
- key: node-role.kubernetes.io/master
|
|
481
|
+
operator: Exists
|
|
482
|
+
effect: NoSchedule
|
|
483
|
+
nodeSelector:
|
|
484
|
+
kubernetes.io/hostname: {node_name}
|
|
485
|
+
resources:
|
|
486
|
+
requests:
|
|
487
|
+
memory: 128Mi
|
|
488
|
+
cpu: 100m
|
|
489
|
+
limits:
|
|
490
|
+
memory: 256Mi
|
|
491
|
+
|
|
492
|
+
core:
|
|
493
|
+
tolerations:
|
|
494
|
+
- key: node-role.kubernetes.io/control-plane
|
|
495
|
+
operator: Exists
|
|
496
|
+
effect: NoSchedule
|
|
497
|
+
- key: node-role.kubernetes.io/master
|
|
498
|
+
operator: Exists
|
|
499
|
+
effect: NoSchedule
|
|
500
|
+
nodeSelector:
|
|
501
|
+
kubernetes.io/hostname: {node_name}
|
|
502
|
+
resources:
|
|
503
|
+
requests:
|
|
504
|
+
memory: 256Mi
|
|
505
|
+
cpu: 200m
|
|
506
|
+
limits:
|
|
507
|
+
memory: 512Mi
|
|
508
|
+
|
|
509
|
+
jobservice:
|
|
510
|
+
tolerations:
|
|
511
|
+
- key: node-role.kubernetes.io/control-plane
|
|
512
|
+
operator: Exists
|
|
513
|
+
effect: NoSchedule
|
|
514
|
+
- key: node-role.kubernetes.io/master
|
|
515
|
+
operator: Exists
|
|
516
|
+
effect: NoSchedule
|
|
517
|
+
nodeSelector:
|
|
518
|
+
kubernetes.io/hostname: {node_name}
|
|
519
|
+
resources:
|
|
520
|
+
requests:
|
|
521
|
+
memory: 128Mi
|
|
522
|
+
cpu: 100m
|
|
523
|
+
limits:
|
|
524
|
+
memory: 256Mi
|
|
525
|
+
|
|
526
|
+
registry:
|
|
527
|
+
tolerations:
|
|
528
|
+
- key: node-role.kubernetes.io/control-plane
|
|
529
|
+
operator: Exists
|
|
530
|
+
effect: NoSchedule
|
|
531
|
+
- key: node-role.kubernetes.io/master
|
|
532
|
+
operator: Exists
|
|
533
|
+
effect: NoSchedule
|
|
534
|
+
nodeSelector:
|
|
535
|
+
kubernetes.io/hostname: {node_name}
|
|
536
|
+
resources:
|
|
537
|
+
requests:
|
|
538
|
+
memory: 256Mi
|
|
539
|
+
cpu: 200m
|
|
540
|
+
limits:
|
|
541
|
+
memory: 512Mi
|
|
542
|
+
|
|
543
|
+
trivy:
|
|
544
|
+
enabled: true
|
|
545
|
+
tolerations:
|
|
546
|
+
- key: node-role.kubernetes.io/control-plane
|
|
547
|
+
operator: Exists
|
|
548
|
+
effect: NoSchedule
|
|
549
|
+
- key: node-role.kubernetes.io/master
|
|
550
|
+
operator: Exists
|
|
551
|
+
effect: NoSchedule
|
|
552
|
+
nodeSelector:
|
|
553
|
+
kubernetes.io/hostname: {node_name}
|
|
554
|
+
resources:
|
|
555
|
+
requests:
|
|
556
|
+
memory: 512Mi
|
|
557
|
+
cpu: 200m
|
|
558
|
+
limits:
|
|
559
|
+
memory: 1Gi
|
|
560
|
+
|
|
561
|
+
database:
|
|
562
|
+
type: internal
|
|
563
|
+
internal:
|
|
564
|
+
tolerations:
|
|
565
|
+
- key: node-role.kubernetes.io/control-plane
|
|
566
|
+
operator: Exists
|
|
567
|
+
effect: NoSchedule
|
|
568
|
+
- key: node-role.kubernetes.io/master
|
|
569
|
+
operator: Exists
|
|
570
|
+
effect: NoSchedule
|
|
571
|
+
nodeSelector:
|
|
572
|
+
kubernetes.io/hostname: {node_name}
|
|
573
|
+
resources:
|
|
574
|
+
requests:
|
|
575
|
+
memory: 256Mi
|
|
576
|
+
cpu: 100m
|
|
577
|
+
limits:
|
|
578
|
+
memory: 512Mi
|
|
579
|
+
|
|
580
|
+
redis:
|
|
581
|
+
type: internal
|
|
582
|
+
internal:
|
|
583
|
+
tolerations:
|
|
584
|
+
- key: node-role.kubernetes.io/control-plane
|
|
585
|
+
operator: Exists
|
|
586
|
+
effect: NoSchedule
|
|
587
|
+
- key: node-role.kubernetes.io/master
|
|
588
|
+
operator: Exists
|
|
589
|
+
effect: NoSchedule
|
|
590
|
+
nodeSelector:
|
|
591
|
+
kubernetes.io/hostname: {node_name}
|
|
592
|
+
resources:
|
|
593
|
+
requests:
|
|
594
|
+
memory: 128Mi
|
|
595
|
+
cpu: 100m
|
|
596
|
+
limits:
|
|
597
|
+
memory: 256Mi
|
|
598
|
+
"""
|
|
599
|
+
|
|
600
|
+
harbor_values_path = Path("/tmp/raijin-harbor-values.yaml")
|
|
601
|
+
write_file(harbor_values_path, harbor_values_yaml, ctx)
|
|
602
|
+
|
|
603
|
+
helm_upgrade_install(
|
|
604
|
+
"harbor",
|
|
605
|
+
"harbor",
|
|
606
|
+
harbor_ns,
|
|
607
|
+
ctx,
|
|
608
|
+
repo="harbor",
|
|
609
|
+
repo_url="https://helm.goharbor.io",
|
|
610
|
+
create_namespace=True,
|
|
611
|
+
extra_args=["-f", str(harbor_values_path)],
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
if not ctx.dry_run:
|
|
615
|
+
_wait_for_pods_ready(ctx, harbor_ns)
|
|
616
|
+
|
|
617
|
+
# Aguarda Harbor API ficar disponível
|
|
618
|
+
typer.echo("\nAguardando Harbor API ficar disponível...")
|
|
619
|
+
time.sleep(30)
|
|
620
|
+
|
|
621
|
+
harbor_url = f"http://{node_ip}:{harbor_nodeport}"
|
|
622
|
+
|
|
623
|
+
# Configura projetos, policies e robot accounts
|
|
624
|
+
_create_harbor_projects(ctx, harbor_url, admin_password)
|
|
625
|
+
_create_retention_policies(ctx, harbor_url, admin_password)
|
|
626
|
+
_create_robot_accounts(ctx, harbor_url, admin_password)
|
|
627
|
+
_configure_garbage_collection(ctx)
|
|
628
|
+
|
|
629
|
+
typer.secho("\n✓ Harbor instalado com sucesso!", fg=typer.colors.GREEN, bold=True)
|
|
630
|
+
|
|
631
|
+
typer.secho("\n=== Acesso ao Harbor ===", fg=typer.colors.CYAN)
|
|
632
|
+
typer.echo(f"URL: http://{node_ip}:{harbor_nodeport}")
|
|
633
|
+
typer.echo(f"Usuário: admin")
|
|
634
|
+
typer.echo(f"Senha: {admin_password}")
|
|
635
|
+
|
|
636
|
+
typer.secho("\n=== Projetos Criados ===", fg=typer.colors.CYAN)
|
|
637
|
+
typer.echo("✓ tst (development/staging)")
|
|
638
|
+
typer.echo(" - Auto-scan habilitado")
|
|
639
|
+
typer.echo(" - Retention: 10 imagens ou 30 dias")
|
|
640
|
+
typer.echo(" - Pull from: develop branch")
|
|
641
|
+
typer.echo("\n✓ prd (production)")
|
|
642
|
+
typer.echo(" - Auto-scan habilitado")
|
|
643
|
+
typer.echo(" - Content trust habilitado")
|
|
644
|
+
typer.echo(" - Block vulnerabilities (critical)")
|
|
645
|
+
typer.echo(" - Retention: 20 imagens ou 90 dias")
|
|
646
|
+
typer.echo(" - Pull from: main/master branch")
|
|
647
|
+
|
|
648
|
+
typer.secho("\n=== Como usar ===", fg=typer.colors.CYAN)
|
|
649
|
+
typer.echo("1. Login no Harbor:")
|
|
650
|
+
typer.echo(f" docker login {node_ip}:{harbor_nodeport}")
|
|
651
|
+
typer.echo(f" Username: admin")
|
|
652
|
+
typer.echo(f" Password: {admin_password}")
|
|
653
|
+
|
|
654
|
+
typer.echo("\n2. Tag e push de imagem (TST):")
|
|
655
|
+
typer.echo(f" docker tag myapp:latest {node_ip}:{harbor_nodeport}/tst/myapp:v1.0.0")
|
|
656
|
+
typer.echo(f" docker push {node_ip}:{harbor_nodeport}/tst/myapp:v1.0.0")
|
|
657
|
+
|
|
658
|
+
typer.echo("\n3. Tag e push de imagem (PRD):")
|
|
659
|
+
typer.echo(f" docker tag myapp:latest {node_ip}:{harbor_nodeport}/prd/myapp:v1.0.0")
|
|
660
|
+
typer.echo(f" docker push {node_ip}:{harbor_nodeport}/prd/myapp:v1.0.0")
|
|
661
|
+
|
|
662
|
+
typer.echo("\n4. Pull de imagem no Kubernetes:")
|
|
663
|
+
typer.echo(" kubectl create secret docker-registry harbor-secret \\")
|
|
664
|
+
typer.echo(f" --docker-server={node_ip}:{harbor_nodeport} \\")
|
|
665
|
+
typer.echo(" --docker-username=admin \\")
|
|
666
|
+
typer.echo(f" --docker-password={admin_password}")
|
|
667
|
+
typer.echo("\n spec:")
|
|
668
|
+
typer.echo(" imagePullSecrets:")
|
|
669
|
+
typer.echo(" - name: harbor-secret")
|
|
670
|
+
typer.echo(" containers:")
|
|
671
|
+
typer.echo(f" - image: {node_ip}:{harbor_nodeport}/prd/myapp:v1.0.0")
|
|
672
|
+
|
|
673
|
+
typer.secho("\n=== Próximos Passos (via UI) ===", fg=typer.colors.YELLOW)
|
|
674
|
+
typer.echo("1. Configurar Robot Accounts (cicd-tst, cicd-prd)")
|
|
675
|
+
typer.echo("2. Ajustar Retention Policies se necessário")
|
|
676
|
+
typer.echo("3. Configurar Webhooks para CI/CD")
|
|
677
|
+
typer.echo("4. Habilitar Content Trust em PRD (cosign/notary)")
|
|
678
|
+
typer.echo("5. Configurar Replication (se multi-cluster)")
|
|
679
|
+
|
|
680
|
+
typer.secho("\n⚠️ IMPORTANTE:", fg=typer.colors.YELLOW, bold=True)
|
|
681
|
+
typer.echo("- Imagens em TST: Máximo 10 ou 30 dias (cleanup automático)")
|
|
682
|
+
typer.echo("- Imagens em PRD: Máximo 20 ou 90 dias (cleanup automático)")
|
|
683
|
+
typer.echo("- Garbage collection roda diariamente às 3h")
|
|
684
|
+
typer.echo("- Vulnerability scan automático em todas as imagens")
|
|
685
|
+
typer.echo("- PRD bloqueia push de imagens com vulnerabilidades CRITICAL")
|