iec-builder 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +111 -0
- package/.iec.yaml +5 -0
- package/CLAUDE.md +174 -0
- package/Dockerfile +34 -0
- package/README.md +84 -0
- package/catalog-info.yaml +11 -0
- package/dist/config/env.d.ts +219 -0
- package/dist/config/env.d.ts.map +1 -0
- package/dist/config/env.js +89 -0
- package/dist/config/env.js.map +1 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +148 -0
- package/dist/index.js.map +1 -0
- package/dist/middleware/auth.d.ts +43 -0
- package/dist/middleware/auth.d.ts.map +1 -0
- package/dist/middleware/auth.js +217 -0
- package/dist/middleware/auth.js.map +1 -0
- package/dist/middleware/org-access.d.ts +28 -0
- package/dist/middleware/org-access.d.ts.map +1 -0
- package/dist/middleware/org-access.js +102 -0
- package/dist/middleware/org-access.js.map +1 -0
- package/dist/models/types.d.ts +254 -0
- package/dist/models/types.d.ts.map +1 -0
- package/dist/models/types.js +2 -0
- package/dist/models/types.js.map +1 -0
- package/dist/routes/ai.d.ts +2 -0
- package/dist/routes/ai.d.ts.map +1 -0
- package/dist/routes/ai.js +77 -0
- package/dist/routes/ai.js.map +1 -0
- package/dist/routes/audit.d.ts +2 -0
- package/dist/routes/audit.d.ts.map +1 -0
- package/dist/routes/audit.js +102 -0
- package/dist/routes/audit.js.map +1 -0
- package/dist/routes/builds.d.ts +2 -0
- package/dist/routes/builds.d.ts.map +1 -0
- package/dist/routes/builds.js +262 -0
- package/dist/routes/builds.js.map +1 -0
- package/dist/routes/cluster.d.ts +2 -0
- package/dist/routes/cluster.d.ts.map +1 -0
- package/dist/routes/cluster.js +181 -0
- package/dist/routes/cluster.js.map +1 -0
- package/dist/routes/config.d.ts +2 -0
- package/dist/routes/config.d.ts.map +1 -0
- package/dist/routes/config.js +291 -0
- package/dist/routes/config.js.map +1 -0
- package/dist/routes/databases.d.ts +2 -0
- package/dist/routes/databases.d.ts.map +1 -0
- package/dist/routes/databases.js +161 -0
- package/dist/routes/databases.js.map +1 -0
- package/dist/routes/db-whitelist.d.ts +2 -0
- package/dist/routes/db-whitelist.d.ts.map +1 -0
- package/dist/routes/db-whitelist.js +148 -0
- package/dist/routes/db-whitelist.js.map +1 -0
- package/dist/routes/domains.d.ts +2 -0
- package/dist/routes/domains.d.ts.map +1 -0
- package/dist/routes/domains.js +449 -0
- package/dist/routes/domains.js.map +1 -0
- package/dist/routes/oauth.d.ts +2 -0
- package/dist/routes/oauth.d.ts.map +1 -0
- package/dist/routes/oauth.js +180 -0
- package/dist/routes/oauth.js.map +1 -0
- package/dist/routes/observability.d.ts +2 -0
- package/dist/routes/observability.d.ts.map +1 -0
- package/dist/routes/observability.js +167 -0
- package/dist/routes/observability.js.map +1 -0
- package/dist/routes/orgs.d.ts +2 -0
- package/dist/routes/orgs.d.ts.map +1 -0
- package/dist/routes/orgs.js +270 -0
- package/dist/routes/orgs.js.map +1 -0
- package/dist/routes/platform.d.ts +2 -0
- package/dist/routes/platform.d.ts.map +1 -0
- package/dist/routes/platform.js +107 -0
- package/dist/routes/platform.js.map +1 -0
- package/dist/routes/push.d.ts +2 -0
- package/dist/routes/push.d.ts.map +1 -0
- package/dist/routes/push.js +233 -0
- package/dist/routes/push.js.map +1 -0
- package/dist/routes/rotation.d.ts +3 -0
- package/dist/routes/rotation.d.ts.map +1 -0
- package/dist/routes/rotation.js +154 -0
- package/dist/routes/rotation.js.map +1 -0
- package/dist/routes/services.d.ts +2 -0
- package/dist/routes/services.d.ts.map +1 -0
- package/dist/routes/services.js +246 -0
- package/dist/routes/services.js.map +1 -0
- package/dist/routes/storage.d.ts +2 -0
- package/dist/routes/storage.d.ts.map +1 -0
- package/dist/routes/storage.js +118 -0
- package/dist/routes/storage.js.map +1 -0
- package/dist/routes/users.d.ts +2 -0
- package/dist/routes/users.d.ts.map +1 -0
- package/dist/routes/users.js +183 -0
- package/dist/routes/users.js.map +1 -0
- package/dist/routes/versions.d.ts +2 -0
- package/dist/routes/versions.d.ts.map +1 -0
- package/dist/routes/versions.js +195 -0
- package/dist/routes/versions.js.map +1 -0
- package/dist/routes/webhooks.d.ts +2 -0
- package/dist/routes/webhooks.d.ts.map +1 -0
- package/dist/routes/webhooks.js +334 -0
- package/dist/routes/webhooks.js.map +1 -0
- package/dist/services/__tests__/deploy-pipeline.integration.test.d.ts +2 -0
- package/dist/services/__tests__/deploy-pipeline.integration.test.d.ts.map +1 -0
- package/dist/services/__tests__/deploy-pipeline.integration.test.js +482 -0
- package/dist/services/__tests__/deploy-pipeline.integration.test.js.map +1 -0
- package/dist/services/bio-client.d.ts +68 -0
- package/dist/services/bio-client.d.ts.map +1 -0
- package/dist/services/bio-client.js +110 -0
- package/dist/services/bio-client.js.map +1 -0
- package/dist/services/build-queue.d.ts +7 -0
- package/dist/services/build-queue.d.ts.map +1 -0
- package/dist/services/build-queue.js +114 -0
- package/dist/services/build-queue.js.map +1 -0
- package/dist/services/builder.d.ts +7 -0
- package/dist/services/builder.d.ts.map +1 -0
- package/dist/services/builder.js +1384 -0
- package/dist/services/builder.js.map +1 -0
- package/dist/services/catalog.d.ts +177 -0
- package/dist/services/catalog.d.ts.map +1 -0
- package/dist/services/catalog.js +805 -0
- package/dist/services/catalog.js.map +1 -0
- package/dist/services/catalog.test.d.ts +2 -0
- package/dist/services/catalog.test.d.ts.map +1 -0
- package/dist/services/catalog.test.js +467 -0
- package/dist/services/catalog.test.js.map +1 -0
- package/dist/services/cloudflare.d.ts +43 -0
- package/dist/services/cloudflare.d.ts.map +1 -0
- package/dist/services/cloudflare.js +182 -0
- package/dist/services/cloudflare.js.map +1 -0
- package/dist/services/config-validator.d.ts +28 -0
- package/dist/services/config-validator.d.ts.map +1 -0
- package/dist/services/config-validator.js +68 -0
- package/dist/services/config-validator.js.map +1 -0
- package/dist/services/config-validator.test.d.ts +2 -0
- package/dist/services/config-validator.test.d.ts.map +1 -0
- package/dist/services/config-validator.test.js +151 -0
- package/dist/services/config-validator.test.js.map +1 -0
- package/dist/services/crypto.d.ts +19 -0
- package/dist/services/crypto.d.ts.map +1 -0
- package/dist/services/crypto.js +63 -0
- package/dist/services/crypto.js.map +1 -0
- package/dist/services/database.d.ts +26 -0
- package/dist/services/database.d.ts.map +1 -0
- package/dist/services/database.js +100 -0
- package/dist/services/database.js.map +1 -0
- package/dist/services/db-credential-manager.d.ts +73 -0
- package/dist/services/db-credential-manager.d.ts.map +1 -0
- package/dist/services/db-credential-manager.js +342 -0
- package/dist/services/db-credential-manager.js.map +1 -0
- package/dist/services/db-provisioner.d.ts +57 -0
- package/dist/services/db-provisioner.d.ts.map +1 -0
- package/dist/services/db-provisioner.js +400 -0
- package/dist/services/db-provisioner.js.map +1 -0
- package/dist/services/db-provisioner.test.d.ts +2 -0
- package/dist/services/db-provisioner.test.d.ts.map +1 -0
- package/dist/services/db-provisioner.test.js +141 -0
- package/dist/services/db-provisioner.test.js.map +1 -0
- package/dist/services/db-whitelist.d.ts +58 -0
- package/dist/services/db-whitelist.d.ts.map +1 -0
- package/dist/services/db-whitelist.js +379 -0
- package/dist/services/db-whitelist.js.map +1 -0
- package/dist/services/dependency-resolver.d.ts +58 -0
- package/dist/services/dependency-resolver.d.ts.map +1 -0
- package/dist/services/dependency-resolver.js +180 -0
- package/dist/services/dependency-resolver.js.map +1 -0
- package/dist/services/dependency-resolver.test.d.ts +2 -0
- package/dist/services/dependency-resolver.test.d.ts.map +1 -0
- package/dist/services/dependency-resolver.test.js +195 -0
- package/dist/services/dependency-resolver.test.js.map +1 -0
- package/dist/services/deploy-gate.d.ts +19 -0
- package/dist/services/deploy-gate.d.ts.map +1 -0
- package/dist/services/deploy-gate.js +56 -0
- package/dist/services/deploy-gate.js.map +1 -0
- package/dist/services/deploy-gate.test.d.ts +2 -0
- package/dist/services/deploy-gate.test.d.ts.map +1 -0
- package/dist/services/deploy-gate.test.js +199 -0
- package/dist/services/deploy-gate.test.js.map +1 -0
- package/dist/services/dockerfile-generator.d.ts +31 -0
- package/dist/services/dockerfile-generator.d.ts.map +1 -0
- package/dist/services/dockerfile-generator.js +544 -0
- package/dist/services/dockerfile-generator.js.map +1 -0
- package/dist/services/dockerfile-generator.test.d.ts +2 -0
- package/dist/services/dockerfile-generator.test.d.ts.map +1 -0
- package/dist/services/dockerfile-generator.test.js +144 -0
- package/dist/services/dockerfile-generator.test.js.map +1 -0
- package/dist/services/forgejo.d.ts +58 -0
- package/dist/services/forgejo.d.ts.map +1 -0
- package/dist/services/forgejo.js +131 -0
- package/dist/services/forgejo.js.map +1 -0
- package/dist/services/koko.d.ts +153 -0
- package/dist/services/koko.d.ts.map +1 -0
- package/dist/services/koko.js +260 -0
- package/dist/services/koko.js.map +1 -0
- package/dist/services/kubernetes.d.ts +16 -0
- package/dist/services/kubernetes.d.ts.map +1 -0
- package/dist/services/kubernetes.js +102 -0
- package/dist/services/kubernetes.js.map +1 -0
- package/dist/services/oauth-provisioner.d.ts +30 -0
- package/dist/services/oauth-provisioner.d.ts.map +1 -0
- package/dist/services/oauth-provisioner.js +182 -0
- package/dist/services/oauth-provisioner.js.map +1 -0
- package/dist/services/oauth-provisioner.test.d.ts +2 -0
- package/dist/services/oauth-provisioner.test.d.ts.map +1 -0
- package/dist/services/oauth-provisioner.test.js +349 -0
- package/dist/services/oauth-provisioner.test.js.map +1 -0
- package/dist/services/pod-diagnostics.d.ts +11 -0
- package/dist/services/pod-diagnostics.d.ts.map +1 -0
- package/dist/services/pod-diagnostics.js +201 -0
- package/dist/services/pod-diagnostics.js.map +1 -0
- package/dist/services/rotation-scheduler.d.ts +2 -0
- package/dist/services/rotation-scheduler.d.ts.map +1 -0
- package/dist/services/rotation-scheduler.js +215 -0
- package/dist/services/rotation-scheduler.js.map +1 -0
- package/dist/services/storage-credential-manager.d.ts +43 -0
- package/dist/services/storage-credential-manager.d.ts.map +1 -0
- package/dist/services/storage-credential-manager.js +159 -0
- package/dist/services/storage-credential-manager.js.map +1 -0
- package/dist/services/storage-provisioner.d.ts +32 -0
- package/dist/services/storage-provisioner.d.ts.map +1 -0
- package/dist/services/storage-provisioner.js +136 -0
- package/dist/services/storage-provisioner.js.map +1 -0
- package/dist/services/storage.d.ts +65 -0
- package/dist/services/storage.d.ts.map +1 -0
- package/dist/services/storage.js +204 -0
- package/dist/services/storage.js.map +1 -0
- package/dist/services/troubleshooter.d.ts +22 -0
- package/dist/services/troubleshooter.d.ts.map +1 -0
- package/dist/services/troubleshooter.js +168 -0
- package/dist/services/troubleshooter.js.map +1 -0
- package/dist/services/vault-client.d.ts +114 -0
- package/dist/services/vault-client.d.ts.map +1 -0
- package/dist/services/vault-client.js +411 -0
- package/dist/services/vault-client.js.map +1 -0
- package/dist/utils/logger.d.ts +2 -0
- package/dist/utils/logger.d.ts.map +1 -0
- package/dist/utils/logger.js +6 -0
- package/dist/utils/logger.js.map +1 -0
- package/dist/utils/response.d.ts +13 -0
- package/dist/utils/response.d.ts.map +1 -0
- package/dist/utils/response.js +12 -0
- package/dist/utils/response.js.map +1 -0
- package/docs/registry-migration.md +301 -0
- package/docs/registry-quickstart.md +169 -0
- package/ecosystem.config.cjs +14 -0
- package/findings.md +168 -0
- package/helm/default-service/Chart.yaml +6 -0
- package/helm/default-service/templates/deployment.yaml +97 -0
- package/helm/default-service/templates/ingress.yaml +43 -0
- package/helm/default-service/templates/service.yaml +17 -0
- package/helm/default-service/values.yaml +82 -0
- package/helm/services/iec-builder/Chart.yaml +6 -0
- package/helm/services/iec-builder/templates/_helpers.tpl +61 -0
- package/helm/services/iec-builder/templates/deployment.yaml +73 -0
- package/helm/services/iec-builder/templates/service.yaml +15 -0
- package/helm/services/iec-builder/templates/serviceaccount.yaml +12 -0
- package/helm/services/iec-builder/values.yaml +56 -0
- package/helm/vault-values.yaml +127 -0
- package/package.json +45 -0
- package/progress.md +156 -0
- package/scripts/.vault-init-keys.json +23 -0
- package/scripts/backfill-ownership.ts +113 -0
- package/scripts/finalize-mongo-auth.sh +212 -0
- package/scripts/setup-ipset.sh +107 -0
- package/scripts/setup-mongo-auth.sh +163 -0
- package/scripts/setup-neo4j-auth.sh +62 -0
- package/scripts/setup-redis-auth.sh +55 -0
- package/scripts/setup-registry-secret.sh +71 -0
- package/scripts/setup-vault.sh +308 -0
- package/src/config/env.ts +117 -0
- package/src/index.ts +153 -0
- package/src/middleware/auth.ts +294 -0
- package/src/middleware/org-access.ts +126 -0
- package/src/models/types.ts +288 -0
- package/src/routes/ai.ts +115 -0
- package/src/routes/audit.ts +121 -0
- package/src/routes/builds.ts +320 -0
- package/src/routes/cluster.ts +235 -0
- package/src/routes/config.ts +369 -0
- package/src/routes/databases.ts +201 -0
- package/src/routes/db-whitelist.ts +204 -0
- package/src/routes/domains.ts +547 -0
- package/src/routes/oauth.ts +195 -0
- package/src/routes/observability.ts +205 -0
- package/src/routes/orgs.ts +330 -0
- package/src/routes/platform.ts +134 -0
- package/src/routes/rotation.ts +191 -0
- package/src/routes/services.ts +290 -0
- package/src/routes/storage.ts +153 -0
- package/src/routes/users.ts +235 -0
- package/src/routes/webhooks.ts +384 -0
- package/src/services/__tests__/catalog-storage.test.ts +186 -0
- package/src/services/__tests__/deploy-pipeline.integration.test.ts +624 -0
- package/src/services/__tests__/pod-diagnostics.test.ts +332 -0
- package/src/services/__tests__/storage-credential-manager.test.ts +129 -0
- package/src/services/__tests__/storage-provisioner.test.ts +166 -0
- package/src/services/__tests__/troubleshooter.test.ts +329 -0
- package/src/services/bio-client.ts +189 -0
- package/src/services/build-queue.ts +137 -0
- package/src/services/builder.ts +1800 -0
- package/src/services/catalog.test.ts +1389 -0
- package/src/services/catalog.ts +1187 -0
- package/src/services/cloudflare.ts +259 -0
- package/src/services/config-validator.test.ts +190 -0
- package/src/services/config-validator.ts +108 -0
- package/src/services/crypto.ts +78 -0
- package/src/services/database.ts +122 -0
- package/src/services/db-credential-manager.test.ts +101 -0
- package/src/services/db-credential-manager.ts +447 -0
- package/src/services/db-provisioner.test.ts +602 -0
- package/src/services/db-provisioner.ts +589 -0
- package/src/services/db-whitelist.test.ts +671 -0
- package/src/services/db-whitelist.ts +496 -0
- package/src/services/dependency-resolver.test.ts +677 -0
- package/src/services/dependency-resolver.ts +319 -0
- package/src/services/deploy-gate.test.ts +247 -0
- package/src/services/deploy-gate.ts +75 -0
- package/src/services/dockerfile-generator.test.ts +401 -0
- package/src/services/dockerfile-generator.ts +606 -0
- package/src/services/forgejo.ts +212 -0
- package/src/services/koko.ts +492 -0
- package/src/services/kubernetes.ts +141 -0
- package/src/services/oauth-provisioner.test.ts +477 -0
- package/src/services/oauth-provisioner.ts +286 -0
- package/src/services/pod-diagnostics.ts +261 -0
- package/src/services/rotation-scheduler.ts +293 -0
- package/src/services/storage-credential-manager.ts +223 -0
- package/src/services/storage-provisioner.ts +216 -0
- package/src/services/storage.ts +274 -0
- package/src/services/troubleshooter.ts +208 -0
- package/src/services/vault-client.test.ts +272 -0
- package/src/services/vault-client.ts +587 -0
- package/src/utils/logger.ts +6 -0
- package/src/utils/response.ts +23 -0
- package/task_plan.md +171 -0
- package/tsconfig.json +20 -0
- package/vitest.config.ts +19 -0
|
@@ -0,0 +1,1800 @@
|
|
|
1
|
+
import { exec as execCb } from 'child_process'
|
|
2
|
+
import { createHash } from 'crypto'
|
|
3
|
+
import { promisify } from 'util'
|
|
4
|
+
|
|
5
|
+
const execAsync = promisify(execCb)
|
|
6
|
+
import { simpleGit, SimpleGit } from 'simple-git'
|
|
7
|
+
import { mkdir, rm, access, readFile, readdir } from 'fs/promises'
|
|
8
|
+
import { join, resolve, dirname } from 'path'
|
|
9
|
+
import { fileURLToPath } from 'url'
|
|
10
|
+
import { parse as parseYaml } from 'yaml'
|
|
11
|
+
import { env } from '../config/env.js'
|
|
12
|
+
import { getBuildsCollection, getServicesCollection, publishBuildEvent } from './database.js'
|
|
13
|
+
import { configureDnsForService, buildDnsHostname } from './cloudflare.js'
|
|
14
|
+
import { registerOrUpdateDomainInKoko, registerOrUpdateServiceInKoko, registerDatabaseInKoko, registerAvailableScopes, checkScopeGrant, autoSeedScopeGrant, syncOnboardingToKoko } from './koko.js'
|
|
15
|
+
import { extractTarball } from './storage.js'
|
|
16
|
+
import { parseCatalogInfo, detectFramework, type ParsedCatalog } from './catalog.js'
|
|
17
|
+
import { WalletClient } from '@insureco/wallet'
|
|
18
|
+
import { checkDeployReserve, deriveWalletId, isDeployGateSkipped } from './deploy-gate.js'
|
|
19
|
+
import { resolveDependencies, type ResolveDependenciesResult } from './dependency-resolver.js'
|
|
20
|
+
import { generateDockerfile, extractNextPublicEnvVars, validateNonRootDockerfile, patchCustomDockerfile } from './dockerfile-generator.js'
|
|
21
|
+
import { provisionDatabases, provisionConsumedDatabases, buildConnectionString, type ProvisionResult } from './db-provisioner.js'
|
|
22
|
+
import { provisionOAuthClient } from './oauth-provisioner.js'
|
|
23
|
+
import { getBioClient } from './bio-client.js'
|
|
24
|
+
import { provisionStorage, calculateStorageGasPerMonth } from './storage-provisioner.js'
|
|
25
|
+
import { validateConfigCompleteness, computeConfigDefaults } from './config-validator.js'
|
|
26
|
+
import type { Build, BuildStatus, CredentialRotation, RotationHistoryEntry, DatabaseCredential, DatabaseSharingConfig, StorageCredential, Service, CustomDomainRecord, DeployDiagnostics } from '../models/types.js'
|
|
27
|
+
import { captureDeployDiagnostics } from './pod-diagnostics.js'
|
|
28
|
+
import {
|
|
29
|
+
isVaultEnabled,
|
|
30
|
+
isVaultHealthy,
|
|
31
|
+
ensureMongoDbConnection,
|
|
32
|
+
ensureVaultMongoRole,
|
|
33
|
+
ensureVaultRedisRole,
|
|
34
|
+
ensureVaultNeo4jRole,
|
|
35
|
+
ensureVaultPolicy,
|
|
36
|
+
ensureKubeAuthRole,
|
|
37
|
+
ensureServiceAccount,
|
|
38
|
+
getVaultAgentAnnotations,
|
|
39
|
+
addMinioPolicyRules,
|
|
40
|
+
type VaultAnnotations,
|
|
41
|
+
type VaultDatabaseRoleConfig,
|
|
42
|
+
} from './vault-client.js'
|
|
43
|
+
import { logger } from '../utils/logger.js'
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Provision databases via Vault dynamic credentials.
|
|
47
|
+
* Creates Vault roles, policies, K8s auth bindings, and ServiceAccount.
|
|
48
|
+
* Returns Vault Agent annotations for pod injection.
|
|
49
|
+
* Falls back gracefully — caller should catch errors and fall back to Phase 1.
|
|
50
|
+
*/
|
|
51
|
+
// Database types with working Vault connections.
|
|
52
|
+
// Redis and Neo4j connections are TODO (Phase 2c/2d) — skip them to
|
|
53
|
+
// avoid Vault Agent init loops on non-existent connections.
|
|
54
|
+
const VAULT_SUPPORTED_DB_TYPES = new Set<string>(['mongodb'])
|
|
55
|
+
|
|
56
|
+
async function provisionDatabasesVault(
|
|
57
|
+
serviceName: string,
|
|
58
|
+
environment: string,
|
|
59
|
+
namespace: string,
|
|
60
|
+
databases: ReadonlyArray<{ type: 'mongodb' | 'redis' | 'neo4j'; name?: string }>
|
|
61
|
+
): Promise<VaultAnnotations> {
|
|
62
|
+
// Only provision databases that have working Vault connections
|
|
63
|
+
const vaultDatabases = databases.filter((db) => VAULT_SUPPORTED_DB_TYPES.has(db.type))
|
|
64
|
+
|
|
65
|
+
if (vaultDatabases.length === 0) {
|
|
66
|
+
return {}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// 1. Ensure K8s ServiceAccount (needed for Vault K8s auth)
|
|
70
|
+
ensureServiceAccount(serviceName, namespace)
|
|
71
|
+
|
|
72
|
+
// 1b. Ensure Vault database connections exist (idempotent, one-time setup per DB type)
|
|
73
|
+
const hasMongo = vaultDatabases.some((db) => db.type === 'mongodb')
|
|
74
|
+
if (hasMongo && env.DB_MONGODB_ADMIN_URI) {
|
|
75
|
+
const connectionUri = env.DB_MONGODB_ADMIN_URI.replace('localhost', env.DB_MONGODB_HOST)
|
|
76
|
+
await ensureMongoDbConnection('mongodb-tawa', connectionUri)
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// 2. Ensure Vault roles for each database
|
|
80
|
+
const dbTypes: string[] = []
|
|
81
|
+
for (const db of vaultDatabases) {
|
|
82
|
+
const dbName = db.name || `${serviceName}-${environment}`
|
|
83
|
+
const config: VaultDatabaseRoleConfig = {
|
|
84
|
+
serviceName,
|
|
85
|
+
environment,
|
|
86
|
+
databaseName: dbName,
|
|
87
|
+
role: 'readWrite',
|
|
88
|
+
dbType: db.type,
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
if (db.type === 'mongodb') {
|
|
92
|
+
await ensureVaultMongoRole(config)
|
|
93
|
+
}
|
|
94
|
+
dbTypes.push(db.type)
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// 3. Ensure Vault policy + K8s auth binding
|
|
98
|
+
await ensureVaultPolicy(serviceName, environment, dbTypes)
|
|
99
|
+
await ensureKubeAuthRole(serviceName, environment, namespace)
|
|
100
|
+
|
|
101
|
+
// 4. Build Vault Agent annotations (only for supported types)
|
|
102
|
+
return getVaultAgentAnnotations(serviceName, environment, vaultDatabases)
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Merge new database credentials with existing ones.
|
|
107
|
+
* Replaces entries matching (type, environment), keeps others.
|
|
108
|
+
* Returns a new array (immutable).
|
|
109
|
+
*/
|
|
110
|
+
function mergeCredentials(
|
|
111
|
+
existing: ReadonlyArray<DatabaseCredential>,
|
|
112
|
+
incoming: ReadonlyArray<DatabaseCredential>
|
|
113
|
+
): DatabaseCredential[] {
|
|
114
|
+
const incomingKeys = new Set(
|
|
115
|
+
incoming.map((c) => `${c.type}:${c.environment}`)
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
const kept = existing.filter(
|
|
119
|
+
(c) => !incomingKeys.has(`${c.type}:${c.environment}`)
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
return [...kept, ...incoming]
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Inject git credentials into clone URLs for known hosts.
|
|
127
|
+
*
|
|
128
|
+
* Handles three cases:
|
|
129
|
+
* 1. Forgejo HTTPS: https://git.insureco.io/org/repo.git
|
|
130
|
+
* → https://forgejo-token:TOKEN@git.insureco.io/org/repo.git
|
|
131
|
+
*
|
|
132
|
+
* 2. GitHub HTTPS: https://github.com/org/repo.git
|
|
133
|
+
* → https://x-access-token:TOKEN@github.com/org/repo.git
|
|
134
|
+
*
|
|
135
|
+
* 3. GitHub SSH: git@github.com:org/repo.git
|
|
136
|
+
* → https://x-access-token:TOKEN@github.com/org/repo.git
|
|
137
|
+
*/
|
|
138
|
+
function injectGitCredentials(repoUrl: string): string {
|
|
139
|
+
// Forgejo — convert SSH to HTTPS and inject token
|
|
140
|
+
if (env.FORGEJO_TOKEN) {
|
|
141
|
+
try {
|
|
142
|
+
const forgejoHost = new URL(env.FORGEJO_URL).host
|
|
143
|
+
|
|
144
|
+
// SSH format: git@git.insureco.io:org/repo.git
|
|
145
|
+
const sshMatch = repoUrl.match(new RegExp(`^git@${forgejoHost.replace('.', '\\.')}:(.+)$`))
|
|
146
|
+
if (sshMatch) {
|
|
147
|
+
return `https://forgejo-token:${env.FORGEJO_TOKEN}@${forgejoHost}/${sshMatch[1]}`
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// HTTPS format: https://git.insureco.io/org/repo.git
|
|
151
|
+
const repoUrlObj = new URL(repoUrl)
|
|
152
|
+
if (repoUrlObj.host === forgejoHost) {
|
|
153
|
+
return `${repoUrlObj.protocol}//forgejo-token:${env.FORGEJO_TOKEN}@${repoUrlObj.host}${repoUrlObj.pathname}`
|
|
154
|
+
}
|
|
155
|
+
} catch {
|
|
156
|
+
// Not a valid URL and not SSH format, continue
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
// GitHub — convert SSH to HTTPS and inject token
|
|
161
|
+
if (env.GITHUB_TOKEN) {
|
|
162
|
+
// SSH format: git@github.com:org/repo.git
|
|
163
|
+
const sshMatch = repoUrl.match(/^git@github\.com:(.+)$/)
|
|
164
|
+
if (sshMatch) {
|
|
165
|
+
return `https://x-access-token:${env.GITHUB_TOKEN}@github.com/${sshMatch[1]}`
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
// HTTPS format: https://github.com/org/repo.git
|
|
169
|
+
try {
|
|
170
|
+
const repoUrlObj = new URL(repoUrl)
|
|
171
|
+
if (repoUrlObj.host === 'github.com') {
|
|
172
|
+
return `https://x-access-token:${env.GITHUB_TOKEN}@github.com${repoUrlObj.pathname}`
|
|
173
|
+
}
|
|
174
|
+
} catch {
|
|
175
|
+
// Not a valid URL, return as-is
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
return repoUrl
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
/**
|
|
183
|
+
* Extract the organization/owner from a git repo URL.
|
|
184
|
+
* Handles HTTPS (https://github.com/org/repo.git) and SSH (git@github.com:org/repo.git)
|
|
185
|
+
*/
|
|
186
|
+
function extractRepoOrg(repoUrl: string): string | undefined {
|
|
187
|
+
// Try HTTPS format first: https://host[:port]/org/repo.git
|
|
188
|
+
try {
|
|
189
|
+
const url = new URL(repoUrl)
|
|
190
|
+
const parts = url.pathname.split('/').filter(Boolean)
|
|
191
|
+
if (parts.length >= 2) return parts[0]
|
|
192
|
+
} catch {
|
|
193
|
+
// Not a valid URL — try SSH format below
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// SSH format: git@host:org/repo.git
|
|
197
|
+
const sshMatch = repoUrl.match(/^[^@]+@[^:]+:([^/]+)\//)
|
|
198
|
+
if (sshMatch) return sshMatch[1]
|
|
199
|
+
|
|
200
|
+
return undefined
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
/** Escape a value for use in helm --set (commas and braces need escaping) */
|
|
204
|
+
function escapeHelmValue(value: string): string {
|
|
205
|
+
const str = value ?? ''
|
|
206
|
+
return str.replace(/,/g, '\\,').replace(/\{/g, '\\{').replace(/\}/g, '\\}')
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
/** Escape a value for safe shell interpolation */
|
|
210
|
+
function escapeShellArg(value: string): string {
|
|
211
|
+
const str = value ?? ''
|
|
212
|
+
const sanitized = str.replace(/\0/g, '')
|
|
213
|
+
return `'${sanitized.replace(/'/g, "'\\''")}'`
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
/** Validate that a string is a valid env var key name */
|
|
217
|
+
const ENV_KEY_REGEX = /^[A-Za-z_][A-Za-z0-9_]*$/
|
|
218
|
+
function validateEnvKey(key: string): void {
|
|
219
|
+
if (!ENV_KEY_REGEX.test(key)) {
|
|
220
|
+
throw new Error(`Invalid config key name: ${key}`)
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
interface IecYamlConfig {
|
|
225
|
+
dockerfile?: string
|
|
226
|
+
buildContext?: string
|
|
227
|
+
helmChart?: string
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
/**
|
|
231
|
+
* Read .iec.yaml from the app directory to get monorepo build configuration
|
|
232
|
+
*/
|
|
233
|
+
async function readIecYaml(workDir: string, appPath?: string): Promise<IecYamlConfig | null> {
|
|
234
|
+
const configPath = appPath
|
|
235
|
+
? join(workDir, appPath, '.iec.yaml')
|
|
236
|
+
: join(workDir, '.iec.yaml')
|
|
237
|
+
|
|
238
|
+
try {
|
|
239
|
+
await access(configPath)
|
|
240
|
+
const content = await readFile(configPath, 'utf-8')
|
|
241
|
+
const config = parseYaml(content) as IecYamlConfig
|
|
242
|
+
logger.info({ configPath, config }, 'Loaded .iec.yaml config')
|
|
243
|
+
return config
|
|
244
|
+
} catch {
|
|
245
|
+
return null
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
/**
|
|
250
|
+
* Auto-discover the helm chart path within a work directory.
|
|
251
|
+
* Convention: check helm/, then helm/serviceName/, then scan helm subdirectories.
|
|
252
|
+
*/
|
|
253
|
+
async function resolveHelmChartPath(
|
|
254
|
+
workDir: string,
|
|
255
|
+
serviceName: string,
|
|
256
|
+
appPath?: string
|
|
257
|
+
): Promise<string> {
|
|
258
|
+
const baseDir = appPath ? join(workDir, appPath) : workDir
|
|
259
|
+
|
|
260
|
+
// 1. Check helm/ directly (Chart.yaml at root of helm/)
|
|
261
|
+
const helmRoot = join(baseDir, 'helm')
|
|
262
|
+
try {
|
|
263
|
+
await access(join(helmRoot, 'Chart.yaml'))
|
|
264
|
+
return 'helm'
|
|
265
|
+
} catch { /* not here */ }
|
|
266
|
+
|
|
267
|
+
// 2. Convention: helm/<serviceName>/
|
|
268
|
+
const conventionPath = join(helmRoot, serviceName)
|
|
269
|
+
try {
|
|
270
|
+
await access(join(conventionPath, 'Chart.yaml'))
|
|
271
|
+
return `helm/${serviceName}`
|
|
272
|
+
} catch { /* not here */ }
|
|
273
|
+
|
|
274
|
+
// 3. Scan helm/*/ for any Chart.yaml (single chart in helm/)
|
|
275
|
+
try {
|
|
276
|
+
const entries = await readdir(helmRoot, { withFileTypes: true })
|
|
277
|
+
for (const entry of entries) {
|
|
278
|
+
if (entry.isDirectory()) {
|
|
279
|
+
try {
|
|
280
|
+
await access(join(helmRoot, entry.name, 'Chart.yaml'))
|
|
281
|
+
logger.info({ discovered: `helm/${entry.name}` }, 'Auto-discovered helm chart')
|
|
282
|
+
return `helm/${entry.name}`
|
|
283
|
+
} catch { /* no Chart.yaml here */ }
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
} catch { /* helm/ doesn't exist or isn't readable */ }
|
|
287
|
+
|
|
288
|
+
// 4. Check chart/ directory (alternative convention)
|
|
289
|
+
try {
|
|
290
|
+
await access(join(baseDir, 'chart', 'Chart.yaml'))
|
|
291
|
+
return 'chart'
|
|
292
|
+
} catch { /* not here */ }
|
|
293
|
+
|
|
294
|
+
// 5. Fallback — use built-in default chart
|
|
295
|
+
const builderDir = dirname(fileURLToPath(import.meta.url))
|
|
296
|
+
const defaultChart = resolve(builderDir, '../../helm/default-service')
|
|
297
|
+
logger.info({ defaultChart }, 'No helm chart found in project, using built-in default')
|
|
298
|
+
return defaultChart
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
export async function executeBuild(buildId: string): Promise<void> {
|
|
302
|
+
const builds = getBuildsCollection()
|
|
303
|
+
const services = getServicesCollection()
|
|
304
|
+
|
|
305
|
+
const build = await builds.findOne({ id: buildId })
|
|
306
|
+
if (!build) {
|
|
307
|
+
logger.error({ buildId }, 'Build not found')
|
|
308
|
+
return
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
const service = await services.findOne({ id: build.serviceId })
|
|
312
|
+
if (!service) {
|
|
313
|
+
await updateBuildStatus(buildId, 'failed', 'Service not found')
|
|
314
|
+
return
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
// Backfill build org from service if missing (handles webhook builds pre-Phase 4.1)
|
|
318
|
+
if (!build.org && service.org) {
|
|
319
|
+
await builds.updateOne(
|
|
320
|
+
{ id: buildId },
|
|
321
|
+
{ $set: { org: service.org, updatedAt: new Date().toISOString() } }
|
|
322
|
+
)
|
|
323
|
+
// Intentional mutation: keep local build in sync with DB update
|
|
324
|
+
build.org = service.org
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
const workDir = join(env.WORKSPACE_DIR, buildId)
|
|
328
|
+
let deployPhaseReached = false
|
|
329
|
+
|
|
330
|
+
try {
|
|
331
|
+
// Clone repository or extract tarball
|
|
332
|
+
await updateBuildStatus(buildId, 'cloning')
|
|
333
|
+
const actualCommitSha = await cloneOrExtract(build.repoUrl, build.branch, build.commitSha, workDir)
|
|
334
|
+
|
|
335
|
+
// Update build with actual commit sha
|
|
336
|
+
await builds.updateOne(
|
|
337
|
+
{ id: buildId },
|
|
338
|
+
{ $set: { commitSha: actualCommitSha, updatedAt: new Date().toISOString() } }
|
|
339
|
+
)
|
|
340
|
+
await appendBuildLog(buildId, `Source ready @ ${actualCommitSha}`)
|
|
341
|
+
|
|
342
|
+
// Read .iec.yaml for monorepo configuration
|
|
343
|
+
const iecConfig = await readIecYaml(workDir, service.appPath)
|
|
344
|
+
if (iecConfig) {
|
|
345
|
+
await appendBuildLog(buildId, `Found .iec.yaml config in ${service.appPath || 'root'}`)
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
// Determine dockerfile path and build context for monorepos
|
|
349
|
+
// Priority: .iec.yaml > service config > defaults
|
|
350
|
+
let dockerfilePath = iecConfig?.dockerfile || service.dockerfilePath || 'Dockerfile'
|
|
351
|
+
// Only treat dockerfilePath as explicit if it's a non-default value.
|
|
352
|
+
// service.dockerfilePath defaults to 'Dockerfile' in the schema, which is not an explicit opt-in.
|
|
353
|
+
const hasExplicitDockerfile = !!(iecConfig?.dockerfile || (service.dockerfilePath && service.dockerfilePath !== 'Dockerfile'))
|
|
354
|
+
let buildContext = workDir
|
|
355
|
+
|
|
356
|
+
// Resolve buildContext from .iec.yaml or service config
|
|
357
|
+
const contextOverride = iecConfig?.buildContext || service.buildContext
|
|
358
|
+
if (contextOverride) {
|
|
359
|
+
const baseDir = service.appPath ? join(workDir, service.appPath) : workDir
|
|
360
|
+
buildContext = resolve(baseDir, contextOverride)
|
|
361
|
+
await appendBuildLog(buildId, `Using build context: ${buildContext} (from ${contextOverride})`)
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
// For monorepos with appPath, resolve dockerfile relative to appPath
|
|
365
|
+
if (service.appPath) {
|
|
366
|
+
if (!dockerfilePath.startsWith('/')) {
|
|
367
|
+
dockerfilePath = join(service.appPath, dockerfilePath)
|
|
368
|
+
}
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
// Always parse catalog-info.yaml — provisioning (databases, OAuth, dependencies)
|
|
372
|
+
// needs it regardless of whether the project ships its own Dockerfile.
|
|
373
|
+
// For monorepos with appPath, look in the app subdirectory first.
|
|
374
|
+
const catalogDir = service.appPath ? join(workDir, service.appPath) : workDir
|
|
375
|
+
let catalog: ParsedCatalog | null = await parseCatalogInfo(catalogDir)
|
|
376
|
+
|
|
377
|
+
if (catalog) {
|
|
378
|
+
await appendBuildLog(buildId, `Detected framework: ${catalog.framework}`)
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
// app-root annotation: tells the builder where package.json lives (e.g., "backend")
|
|
382
|
+
// Sets the Docker build context so COPY commands are relative to the app root.
|
|
383
|
+
// Only applies when no explicit buildContext override exists (.iec.yaml or service config).
|
|
384
|
+
if (catalog?.appRoot && buildContext === workDir) {
|
|
385
|
+
const appRootDir = join(workDir, catalog.appRoot)
|
|
386
|
+
try {
|
|
387
|
+
await access(appRootDir)
|
|
388
|
+
buildContext = appRootDir
|
|
389
|
+
await appendBuildLog(buildId, `Using app root: ${catalog.appRoot}`)
|
|
390
|
+
} catch {
|
|
391
|
+
throw new Error(
|
|
392
|
+
`app-root directory "${catalog.appRoot}" not found in repository. ` +
|
|
393
|
+
`Verify the insureco.io/app-root annotation matches an existing directory.`
|
|
394
|
+
)
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
// Dockerfile strategy:
|
|
399
|
+
// - Default: builder generates an optimized Dockerfile (devs never need one)
|
|
400
|
+
// - Opt-in: power users set insureco.io/custom-dockerfile: 'true' to bring their own
|
|
401
|
+
// - Explicit: .iec.yaml or service config specifies a custom dockerfile path
|
|
402
|
+
let useCustomDockerfile = hasExplicitDockerfile || catalog?.customDockerfile === true
|
|
403
|
+
|
|
404
|
+
// If custom Dockerfile is requested but the file doesn't exist, fall back to auto-generation.
|
|
405
|
+
// This handles services with stale dockerfilePath in the DB or removed Dockerfiles.
|
|
406
|
+
if (useCustomDockerfile) {
|
|
407
|
+
const fullCustomPath = dockerfilePath.startsWith('/')
|
|
408
|
+
? dockerfilePath
|
|
409
|
+
: join(workDir, dockerfilePath)
|
|
410
|
+
try {
|
|
411
|
+
await access(fullCustomPath)
|
|
412
|
+
} catch {
|
|
413
|
+
await appendBuildLog(
|
|
414
|
+
buildId,
|
|
415
|
+
`[warn] Custom Dockerfile not found at ${dockerfilePath} — falling back to auto-generation`
|
|
416
|
+
)
|
|
417
|
+
useCustomDockerfile = false
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
if (env.ENABLE_AUTO_DOCKERFILE === 'true' && !useCustomDockerfile) {
|
|
422
|
+
// Warn if a Dockerfile exists but is being ignored (vibe coder probably generated it by accident)
|
|
423
|
+
try {
|
|
424
|
+
const projectDockerfile = join(buildContext, 'Dockerfile')
|
|
425
|
+
await access(projectDockerfile)
|
|
426
|
+
await appendBuildLog(
|
|
427
|
+
buildId,
|
|
428
|
+
`[warn] Found Dockerfile in project — ignoring in favor of builder-generated Dockerfile. ` +
|
|
429
|
+
`To use your own, add 'insureco.io/custom-dockerfile: "true"' to your catalog-info.yaml annotations.`
|
|
430
|
+
)
|
|
431
|
+
} catch {
|
|
432
|
+
// No Dockerfile in project — expected, nothing to warn about
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
if (catalog) {
|
|
436
|
+
const generatedPath = await generateDockerfile(buildContext, catalog)
|
|
437
|
+
dockerfilePath = generatedPath.replace(buildContext + '/', '')
|
|
438
|
+
await appendBuildLog(buildId, `Generated Dockerfile for ${catalog.framework}`)
|
|
439
|
+
} else {
|
|
440
|
+
// Try to detect framework without catalog
|
|
441
|
+
const detectedFramework = await detectFramework(buildContext)
|
|
442
|
+
if (detectedFramework !== 'unknown') {
|
|
443
|
+
await appendBuildLog(buildId, `Auto-detected framework: ${detectedFramework}`)
|
|
444
|
+
|
|
445
|
+
// Extract NEXT_PUBLIC_* env vars for Next.js
|
|
446
|
+
const envVars = detectedFramework === 'nextjs'
|
|
447
|
+
? await extractNextPublicEnvVars(buildContext)
|
|
448
|
+
: []
|
|
449
|
+
|
|
450
|
+
catalog = {
|
|
451
|
+
name: service.name,
|
|
452
|
+
description: '',
|
|
453
|
+
catalogVersion: '0.1.0',
|
|
454
|
+
framework: detectedFramework,
|
|
455
|
+
nodeVersion: '20',
|
|
456
|
+
buildCommand: 'npm run build',
|
|
457
|
+
startCommand: 'npm start',
|
|
458
|
+
outputDir: detectedFramework === 'nextjs' ? '.next' : 'dist',
|
|
459
|
+
port: 3000,
|
|
460
|
+
healthEndpoint: '/health',
|
|
461
|
+
envVars,
|
|
462
|
+
routes: [],
|
|
463
|
+
cronjobs: [],
|
|
464
|
+
databases: [],
|
|
465
|
+
consumesDatabase: [],
|
|
466
|
+
internalDependencies: [],
|
|
467
|
+
dependencies: [],
|
|
468
|
+
storage: [],
|
|
469
|
+
lifecycle: 'production',
|
|
470
|
+
owner: 'unknown',
|
|
471
|
+
type: 'service',
|
|
472
|
+
podTier: 'nano',
|
|
473
|
+
configDeclarations: [],
|
|
474
|
+
modules: [],
|
|
475
|
+
externalDependencies: [],
|
|
476
|
+
copyPaths: [],
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
const generatedPath = await generateDockerfile(buildContext, catalog)
|
|
480
|
+
dockerfilePath = generatedPath.replace(buildContext + '/', '')
|
|
481
|
+
await appendBuildLog(buildId, `Generated Dockerfile for ${detectedFramework}`)
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
} else if (useCustomDockerfile) {
|
|
485
|
+
await appendBuildLog(buildId, `Using custom Dockerfile: ${dockerfilePath}`)
|
|
486
|
+
|
|
487
|
+
// Auto-patch custom Dockerfiles for platform compliance:
|
|
488
|
+
// - Replace non-numeric USER directives with USER 1001 (K8s runAsNonRoot)
|
|
489
|
+
// - Inject Vault entrypoint wrapper if missing (when Vault is enabled)
|
|
490
|
+
const fullCustomPath = dockerfilePath.startsWith('/')
|
|
491
|
+
? dockerfilePath
|
|
492
|
+
: join(buildContext, dockerfilePath)
|
|
493
|
+
const patches = await patchCustomDockerfile(fullCustomPath, env.VAULT_ENABLED === 'true')
|
|
494
|
+
for (const patch of patches) {
|
|
495
|
+
await appendBuildLog(buildId, `[auto-patch] ${patch.message}`)
|
|
496
|
+
}
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
// Deploy gate: check wallet balance before deploying
|
|
500
|
+
if (env.ENABLE_K8S_DEPLOY === 'true' && env.WALLET_URL && catalog) {
|
|
501
|
+
if (isDeployGateSkipped(service.name)) {
|
|
502
|
+
await appendBuildLog(buildId, `Deploy gate skipped: ${service.name} is a platform service`)
|
|
503
|
+
} else {
|
|
504
|
+
const ownerId = catalog.owner
|
|
505
|
+
const walletId = deriveWalletId(ownerId)
|
|
506
|
+
const podTier = catalog.podTier || 'nano'
|
|
507
|
+
|
|
508
|
+
// Auto-create wallet if it doesn't exist (safety net for missed Bio-id registration)
|
|
509
|
+
try {
|
|
510
|
+
const walletClient = new WalletClient({
|
|
511
|
+
baseUrl: env.WALLET_URL,
|
|
512
|
+
serviceKey: env.INTERNAL_SERVICE_KEY,
|
|
513
|
+
retries: 1,
|
|
514
|
+
timeoutMs: 8_000,
|
|
515
|
+
})
|
|
516
|
+
await walletClient.ensureWallet({
|
|
517
|
+
ownerId,
|
|
518
|
+
ownerType: 'organization',
|
|
519
|
+
ownerName: ownerId,
|
|
520
|
+
})
|
|
521
|
+
} catch {
|
|
522
|
+
// fail-open: wallet service may be unavailable
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
const reserveResult = await checkDeployReserve(env.WALLET_URL, walletId, podTier, env.INTERNAL_SERVICE_KEY)
|
|
526
|
+
|
|
527
|
+
// Storage gas adds to the reserve requirement
|
|
528
|
+
const storageGas = calculateStorageGasPerMonth(catalog.storage || [])
|
|
529
|
+
const totalRequired = reserveResult.requiredTokens + (storageGas * 3)
|
|
530
|
+
const totalShortfall = Math.max(0, totalRequired - reserveResult.currentBalance)
|
|
531
|
+
|
|
532
|
+
if (totalShortfall > 0) {
|
|
533
|
+
throw new Error(
|
|
534
|
+
`Deploy gate: insufficient gas reserve. Required: ${totalRequired} tokens ` +
|
|
535
|
+
`(hosting: ${reserveResult.requiredTokens}, storage: ${storageGas * 3}), ` +
|
|
536
|
+
`available: ${reserveResult.currentBalance}. ` +
|
|
537
|
+
`Shortfall: ${totalShortfall} tokens. Top up your wallet to deploy.`
|
|
538
|
+
)
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
const storageNote = storageGas > 0 ? ` (includes ${storageGas} gas/mo storage)` : ''
|
|
542
|
+
await appendBuildLog(buildId, `Deploy gate passed: ${reserveResult.currentBalance}/${reserveResult.requiredTokens} tokens${storageNote}`)
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
// Config preflight: validate required config declarations before building
|
|
547
|
+
if (catalog?.configDeclarations && catalog.configDeclarations.length > 0) {
|
|
548
|
+
const storedConfig = service.config || {}
|
|
549
|
+
const storedSecretKeys = Object.keys(service.secrets || {})
|
|
550
|
+
const validation = validateConfigCompleteness(
|
|
551
|
+
catalog.configDeclarations,
|
|
552
|
+
storedConfig,
|
|
553
|
+
storedSecretKeys,
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
if (!validation.valid) {
|
|
557
|
+
throw new Error(
|
|
558
|
+
`Config preflight failed: missing required config vars: ${validation.missing.join(', ')}. ` +
|
|
559
|
+
`Set them with \`tawa config set\` or \`tawa config push\` before deploying.`
|
|
560
|
+
)
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
await appendBuildLog(buildId, `Config preflight passed (${catalog.configDeclarations.length} declarations checked)`)
|
|
564
|
+
|
|
565
|
+
// Cache declarations on service record for push endpoint
|
|
566
|
+
await services.updateOne(
|
|
567
|
+
{ id: service.id },
|
|
568
|
+
{ $set: { configDeclarations: catalog.configDeclarations, updatedAt: new Date().toISOString() } }
|
|
569
|
+
)
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
// Build output preflight: detect no-op build script with default output-dir
|
|
573
|
+
if (catalog && catalog.outputDir === 'dist') {
|
|
574
|
+
try {
|
|
575
|
+
const pkgPath = join(buildContext, 'package.json')
|
|
576
|
+
const pkg = JSON.parse(await readFile(pkgPath, 'utf-8'))
|
|
577
|
+
const buildScript = pkg?.scripts?.build || ''
|
|
578
|
+
const isNoOp = /^\s*(echo\s|exit\s*0|true|:)\s*/i.test(buildScript)
|
|
579
|
+
|
|
580
|
+
if (isNoOp) {
|
|
581
|
+
throw new Error(
|
|
582
|
+
`Build output preflight failed: build script is a no-op ("${buildScript.trim()}") ` +
|
|
583
|
+
`but output-dir is "dist" (default). The generated Dockerfile will fail copying /app/dist. ` +
|
|
584
|
+
`Set \`insureco.io/output-dir\` to your source directory (e.g. "src") and ` +
|
|
585
|
+
`\`insureco.io/start-command\` accordingly in catalog-info.yaml.`
|
|
586
|
+
)
|
|
587
|
+
}
|
|
588
|
+
} catch (err: any) {
|
|
589
|
+
if (err.message.startsWith('Build output preflight failed')) throw err
|
|
590
|
+
}
|
|
591
|
+
await appendBuildLog(buildId, 'Build output preflight passed')
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
// Validate Dockerfile runs as non-root (skip for static/nginx which needs root)
|
|
595
|
+
if (catalog?.framework !== 'static') {
|
|
596
|
+
const fullDockerfilePath = dockerfilePath.startsWith('/')
|
|
597
|
+
? dockerfilePath
|
|
598
|
+
: join(buildContext, dockerfilePath)
|
|
599
|
+
await validateNonRootDockerfile(fullDockerfilePath)
|
|
600
|
+
await appendBuildLog(buildId, 'Dockerfile non-root validation passed')
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
// Build Docker image
|
|
604
|
+
await updateBuildStatus(buildId, 'building')
|
|
605
|
+
const imageTag = `${env.DOCKER_REGISTRY}/${service.name}:${actualCommitSha.substring(0, 7)}`
|
|
606
|
+
await buildDockerImage(buildContext, imageTag, buildId, dockerfilePath)
|
|
607
|
+
await appendBuildLog(buildId, `Built image: ${imageTag}`)
|
|
608
|
+
|
|
609
|
+
// Push to registry
|
|
610
|
+
await updateBuildStatus(buildId, 'pushing')
|
|
611
|
+
await pushDockerImage(imageTag, buildId)
|
|
612
|
+
await appendBuildLog(buildId, `Pushed image to registry`)
|
|
613
|
+
|
|
614
|
+
// Update build with image tag
|
|
615
|
+
await builds.updateOne(
|
|
616
|
+
{ id: buildId },
|
|
617
|
+
{ $set: { imageTag, updatedAt: new Date().toISOString() } }
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
// Deploy to Kubernetes (optional - skip if not configured)
|
|
621
|
+
if (env.ENABLE_K8S_DEPLOY === 'true') {
|
|
622
|
+
await updateBuildStatus(buildId, 'deploying')
|
|
623
|
+
deployPhaseReached = true
|
|
624
|
+
|
|
625
|
+
// Use environment from build (set by CLI/API) - no more branch inference
|
|
626
|
+
const environment = build.environment
|
|
627
|
+
const namespace = service.namespace || `${service.name}-${environment}`
|
|
628
|
+
|
|
629
|
+
// Ensure namespace exists before any K8s operations (DB secrets, etc.)
|
|
630
|
+
await ensureNamespaceExists(namespace)
|
|
631
|
+
|
|
632
|
+
// Check Vault availability (feature-flagged + health-checked)
|
|
633
|
+
const vaultActive = isVaultEnabled() && await isVaultHealthy()
|
|
634
|
+
let vaultAnnotations: VaultAnnotations = {}
|
|
635
|
+
|
|
636
|
+
// Provision databases if catalog specifies them
|
|
637
|
+
let provisionedEnvVars: Record<string, string> = {}
|
|
638
|
+
let sharingConfigs: DatabaseSharingConfig[] = []
|
|
639
|
+
let dbResult: ProvisionResult = { envVars: {}, credentials: [] }
|
|
640
|
+
if (catalog?.databases && catalog.databases.length > 0) {
|
|
641
|
+
if (vaultActive) {
|
|
642
|
+
// Phase 2: Vault dynamic credentials — Agent sidecar injects DB env vars
|
|
643
|
+
try {
|
|
644
|
+
vaultAnnotations = await provisionDatabasesVault(
|
|
645
|
+
service.name, environment, namespace, catalog.databases
|
|
646
|
+
)
|
|
647
|
+
const vaultCount = catalog.databases.filter((db) => VAULT_SUPPORTED_DB_TYPES.has(db.type)).length
|
|
648
|
+
if (vaultCount > 0) {
|
|
649
|
+
await appendBuildLog(buildId, `Vault: provisioned ${vaultCount} dynamic database role(s)`)
|
|
650
|
+
}
|
|
651
|
+
} catch (vaultError) {
|
|
652
|
+
// Vault provisioning failed — fall back to Phase 1 static credentials for ALL databases
|
|
653
|
+
const msg = vaultError instanceof Error ? vaultError.message : 'Unknown error'
|
|
654
|
+
logger.warn({ error: msg }, 'Vault provisioning failed, falling back to static credentials')
|
|
655
|
+
await appendBuildLog(buildId, `[warn] Vault provisioning failed (${msg}), using static credentials`)
|
|
656
|
+
vaultAnnotations = {}
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
// Provision non-Vault databases (e.g. Redis, Neo4j) via Phase 1 static creds,
|
|
660
|
+
// or ALL databases if Vault provisioning failed above
|
|
661
|
+
const staticDatabases = Object.keys(vaultAnnotations).length > 0
|
|
662
|
+
? catalog.databases.filter((db) => !VAULT_SUPPORTED_DB_TYPES.has(db.type))
|
|
663
|
+
: catalog.databases
|
|
664
|
+
|
|
665
|
+
if (staticDatabases.length > 0) {
|
|
666
|
+
dbResult = await provisionDatabases(
|
|
667
|
+
service.name, environment, namespace,
|
|
668
|
+
staticDatabases, service.databaseCredentials
|
|
669
|
+
)
|
|
670
|
+
provisionedEnvVars = { ...provisionedEnvVars, ...dbResult.envVars }
|
|
671
|
+
}
|
|
672
|
+
} else {
|
|
673
|
+
// Phase 1: Static credentials (fallback when Vault disabled or unhealthy)
|
|
674
|
+
dbResult = await provisionDatabases(
|
|
675
|
+
service.name, environment, namespace,
|
|
676
|
+
catalog.databases, service.databaseCredentials
|
|
677
|
+
)
|
|
678
|
+
provisionedEnvVars = { ...provisionedEnvVars, ...dbResult.envVars }
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
if (dbResult.credentials.length > 0) {
|
|
682
|
+
const mergedCreds = mergeCredentials(service.databaseCredentials || [], dbResult.credentials)
|
|
683
|
+
await services.updateOne(
|
|
684
|
+
{ id: service.id },
|
|
685
|
+
{ $set: { databaseCredentials: mergedCreds, updatedAt: new Date().toISOString() } }
|
|
686
|
+
)
|
|
687
|
+
await appendBuildLog(buildId, `Database credentials provisioned for ${dbResult.credentials.length} database(s)`)
|
|
688
|
+
}
|
|
689
|
+
|
|
690
|
+
if (!vaultActive || Object.keys(vaultAnnotations).length === 0) {
|
|
691
|
+
await appendBuildLog(buildId, `Provisioned ${catalog.databases.length} database(s)`)
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
// Register databases in Koko so koko-db CLI can discover them
|
|
695
|
+
// In Vault mode, dbResult is empty so this falls through to buildConnectionString()
|
|
696
|
+
let registeredCount = 0
|
|
697
|
+
await Promise.all(
|
|
698
|
+
catalog.databases.map(async (db) => {
|
|
699
|
+
const dbName = db.name || `${service.name}-${environment}`
|
|
700
|
+
const credential = dbResult.credentials.find(c => c.databaseName === dbName)
|
|
701
|
+
const connString = credential
|
|
702
|
+
? dbResult.envVars[db.type === 'mongodb' ? 'MONGODB_URI' : db.type === 'redis' ? 'REDIS_URL' : 'NEO4J_URI']
|
|
703
|
+
: buildConnectionString(db.type, service.name, environment, db.name)
|
|
704
|
+
if (connString) {
|
|
705
|
+
const ok = await registerDatabaseInKoko(service.name, dbName, environment, connString, credential?.username)
|
|
706
|
+
if (ok) registeredCount++
|
|
707
|
+
}
|
|
708
|
+
})
|
|
709
|
+
)
|
|
710
|
+
await appendBuildLog(buildId, `Registered ${registeredCount}/${catalog.databases.length} database(s) in Koko`)
|
|
711
|
+
|
|
712
|
+
// Store database sharing config for owner services (used by consumers on their deploy)
|
|
713
|
+
sharingConfigs = catalog.databases
|
|
714
|
+
.filter(db => db.sharedWith && db.sharedWith.length > 0)
|
|
715
|
+
.map(db => ({
|
|
716
|
+
type: db.type,
|
|
717
|
+
databaseName: db.name || `${service.name}-${environment}`,
|
|
718
|
+
sharedWith: db.sharedWith!.map(s => ({ service: s.service, access: s.access })),
|
|
719
|
+
}))
|
|
720
|
+
|
|
721
|
+
if (sharingConfigs.length > 0) {
|
|
722
|
+
await services.updateOne(
|
|
723
|
+
{ id: service.id },
|
|
724
|
+
{ $set: { databaseSharing: sharingConfigs, updatedAt: new Date().toISOString() } }
|
|
725
|
+
)
|
|
726
|
+
await appendBuildLog(buildId, `Database sharing config stored (${sharingConfigs.length} shared database(s))`)
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
// Cache catalog spec on service (used by tawa-web databases page)
|
|
730
|
+
const catalogSpecUpdate = {
|
|
731
|
+
databases: catalog.databases.map(db => ({ type: db.type, name: db.name })),
|
|
732
|
+
storage: catalog.storage?.map(s => ({ tier: s.tier })) ?? [],
|
|
733
|
+
}
|
|
734
|
+
await services.updateOne(
|
|
735
|
+
{ id: service.id },
|
|
736
|
+
{ $set: { catalogSpec: catalogSpecUpdate, updatedAt: new Date().toISOString() } }
|
|
737
|
+
)
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
// Check scope grants before provisioning consumed databases
|
|
741
|
+
if (catalog?.consumesDatabase && catalog.consumesDatabase.length > 0) {
|
|
742
|
+
for (const consumed of catalog.consumesDatabase) {
|
|
743
|
+
const grantResult = await checkScopeGrant(consumed.service, service.name, consumed.type)
|
|
744
|
+
|
|
745
|
+
if (grantResult === null) {
|
|
746
|
+
// Koko unreachable — fail-open during rollout
|
|
747
|
+
await appendBuildLog(buildId, `[warn] Koko unreachable, skipping scope-grant check for ${consumed.service}:${consumed.type}`)
|
|
748
|
+
} else if (grantResult.granted) {
|
|
749
|
+
await appendBuildLog(buildId, `Scope grant approved for ${consumed.service}:${consumed.type}`)
|
|
750
|
+
} else if (grantResult.status === 'none') {
|
|
751
|
+
// No grant exists yet — fail-open during rollout (owner hasn't auto-seeded yet)
|
|
752
|
+
await appendBuildLog(buildId, `[warn] No scope grant found for ${consumed.service}:${consumed.type}, proceeding (rollout period)`)
|
|
753
|
+
} else if (grantResult.status === 'denied' || grantResult.status === 'revoked') {
|
|
754
|
+
throw new Error(
|
|
755
|
+
`Deploy blocked: access to ${consumed.service}'s ${consumed.type} database has been ${grantResult.status}. ` +
|
|
756
|
+
`Request access at https://tawa.insureco.io/console/scopes`
|
|
757
|
+
)
|
|
758
|
+
} else {
|
|
759
|
+
// pending — fail-open during rollout, will be fail-closed later
|
|
760
|
+
await appendBuildLog(buildId, `[warn] Scope grant pending for ${consumed.service}:${consumed.type}, proceeding (rollout period)`)
|
|
761
|
+
}
|
|
762
|
+
}
|
|
763
|
+
}
|
|
764
|
+
|
|
765
|
+
// Provision consumed databases (from other services)
|
|
766
|
+
if (catalog?.consumesDatabase && catalog.consumesDatabase.length > 0) {
|
|
767
|
+
const consumedResult = await provisionConsumedDatabases(
|
|
768
|
+
service.name,
|
|
769
|
+
environment,
|
|
770
|
+
namespace,
|
|
771
|
+
catalog.consumesDatabase,
|
|
772
|
+
service.databaseCredentials || [],
|
|
773
|
+
async (ownerName) => {
|
|
774
|
+
const ownerService = await services.findOne({ name: ownerName })
|
|
775
|
+
if (!ownerService) return null
|
|
776
|
+
return { name: ownerService.name, databaseSharing: ownerService.databaseSharing }
|
|
777
|
+
}
|
|
778
|
+
)
|
|
779
|
+
|
|
780
|
+
provisionedEnvVars = { ...provisionedEnvVars, ...consumedResult.envVars }
|
|
781
|
+
|
|
782
|
+
if (consumedResult.credentials.length > 0) {
|
|
783
|
+
const mergedCreds = mergeCredentials(service.databaseCredentials || [], consumedResult.credentials)
|
|
784
|
+
await services.updateOne(
|
|
785
|
+
{ id: service.id },
|
|
786
|
+
{ $set: { databaseCredentials: mergedCreds, updatedAt: new Date().toISOString() } }
|
|
787
|
+
)
|
|
788
|
+
}
|
|
789
|
+
|
|
790
|
+
await appendBuildLog(buildId, `Provisioned ${catalog.consumesDatabase.length} consumed database(s)`)
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
// Provision object storage (Vault-required, 0.3.0+ catalogs)
|
|
794
|
+
if (catalog?.storage && catalog.storage.length > 0) {
|
|
795
|
+
const storageResult = await provisionStorage(
|
|
796
|
+
service.name,
|
|
797
|
+
environment,
|
|
798
|
+
namespace,
|
|
799
|
+
catalog.storage,
|
|
800
|
+
)
|
|
801
|
+
|
|
802
|
+
// Add MinIO creds path to Vault policy (creates policy if it doesn't exist yet)
|
|
803
|
+
await addMinioPolicyRules(service.name, environment)
|
|
804
|
+
|
|
805
|
+
// If no databases were declared, Vault plumbing (ServiceAccount, K8s auth)
|
|
806
|
+
// hasn't been set up yet — create it now so the pod can authenticate to Vault
|
|
807
|
+
const noDatabases = !catalog?.databases || catalog.databases.length === 0
|
|
808
|
+
if (noDatabases) {
|
|
809
|
+
ensureServiceAccount(service.name, namespace)
|
|
810
|
+
await ensureKubeAuthRole(service.name, environment, namespace)
|
|
811
|
+
|
|
812
|
+
// Set base Vault Agent annotations (normally set by database provisioning)
|
|
813
|
+
const kubeRoleName = `svc-${service.name}-${environment}`
|
|
814
|
+
vaultAnnotations = {
|
|
815
|
+
'vault.hashicorp.com/agent-inject': 'true',
|
|
816
|
+
'vault.hashicorp.com/role': kubeRoleName,
|
|
817
|
+
'vault.hashicorp.com/agent-pre-populate-only': 'false',
|
|
818
|
+
}
|
|
819
|
+
}
|
|
820
|
+
|
|
821
|
+
// Merge Vault annotations for sidecar injection
|
|
822
|
+
vaultAnnotations = { ...vaultAnnotations, ...storageResult.vaultAnnotations }
|
|
823
|
+
|
|
824
|
+
if (storageResult.credential) {
|
|
825
|
+
const existingStorage = service.storageCredentials || []
|
|
826
|
+
const updatedStorage: StorageCredential[] = [
|
|
827
|
+
...existingStorage.filter((c) => c.environment !== environment),
|
|
828
|
+
storageResult.credential,
|
|
829
|
+
]
|
|
830
|
+
|
|
831
|
+
await services.updateOne(
|
|
832
|
+
{ id: service.id },
|
|
833
|
+
{ $set: { storageCredentials: updatedStorage, updatedAt: new Date().toISOString() } }
|
|
834
|
+
)
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
await appendBuildLog(
|
|
838
|
+
buildId,
|
|
839
|
+
`Provisioned ${catalog.storage.length} storage bucket(s) (${storageResult.gasPerMonth} gas/month)`
|
|
840
|
+
)
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
// Resolve unified dependencies (scope grant check + URL injection)
|
|
844
|
+
// This handles both new spec.dependencies (0.4.0+) and legacy internalDependencies/externalDependencies
|
|
845
|
+
const unifiedDeps = catalog?.dependencies || []
|
|
846
|
+
const hasScopedDeps = unifiedDeps.some(d => d.scopes.length > 0)
|
|
847
|
+
let depResult: ResolveDependenciesResult = { envVars: {}, warnings: [], blocked: [], grantResults: [] }
|
|
848
|
+
|
|
849
|
+
if (unifiedDeps.length > 0 && env.KOKO_URL) {
|
|
850
|
+
depResult = await resolveDependencies({
|
|
851
|
+
serviceName: service.name,
|
|
852
|
+
environment,
|
|
853
|
+
dependencies: unifiedDeps,
|
|
854
|
+
kokoUrl: env.KOKO_URL,
|
|
855
|
+
bioUrl: env.BIO_ID_URL,
|
|
856
|
+
bioInternalKey: env.BIO_INTERNAL_KEY || '',
|
|
857
|
+
})
|
|
858
|
+
|
|
859
|
+
provisionedEnvVars = { ...provisionedEnvVars, ...depResult.envVars }
|
|
860
|
+
|
|
861
|
+
for (const warning of depResult.warnings) {
|
|
862
|
+
await appendBuildLog(buildId, `[warn] ${warning}`)
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
// Create scope grant requests in Bio-ID for deps that don't have an active grant.
|
|
866
|
+
// Runs BEFORE the blocked check so that 'none' deps get a grant request created
|
|
867
|
+
// (triggering email notification) before the deploy is blocked.
|
|
868
|
+
if (hasScopedDeps && env.BIO_INTERNAL_KEY) {
|
|
869
|
+
const bioClient = getBioClient()
|
|
870
|
+
const clientId = `${service.name}-${environment}`
|
|
871
|
+
const grantStatusMap = new Map(depResult.grantResults.map(g => [g.service, g.status]))
|
|
872
|
+
|
|
873
|
+
for (const dep of unifiedDeps.filter(d => d.scopes.length > 0)) {
|
|
874
|
+
const status = grantStatusMap.get(dep.service)
|
|
875
|
+
|
|
876
|
+
// Already approved or pending — no need to create a new request
|
|
877
|
+
if (status === 'approved' || status === 'pending') {
|
|
878
|
+
await appendBuildLog(buildId, `Scope grant for ${dep.service} [${dep.scopes.join(', ')}]: ${status}`)
|
|
879
|
+
continue
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
// denied/revoked — already blocked, skip grant creation
|
|
883
|
+
if (status === 'denied' || status === 'revoked') {
|
|
884
|
+
continue
|
|
885
|
+
}
|
|
886
|
+
|
|
887
|
+
// none/unreachable — create a new scope grant request
|
|
888
|
+
try {
|
|
889
|
+
const result = await bioClient.createScopeGrant({
|
|
890
|
+
requestingServiceId: service.name,
|
|
891
|
+
targetServiceId: dep.service,
|
|
892
|
+
requestedScopes: [...dep.scopes],
|
|
893
|
+
credentialType: 'oauth_client',
|
|
894
|
+
credentialId: clientId,
|
|
895
|
+
})
|
|
896
|
+
|
|
897
|
+
if (result.success) {
|
|
898
|
+
await appendBuildLog(buildId, `Scope request created for ${dep.service} [${dep.scopes.join(', ')}] — pending approval`)
|
|
899
|
+
} else {
|
|
900
|
+
await appendBuildLog(buildId, `Scope request for ${dep.service} failed: ${result.error?.message || 'unknown'} (non-blocking)`)
|
|
901
|
+
}
|
|
902
|
+
} catch (error) {
|
|
903
|
+
const message = error instanceof Error ? error.message : 'unknown error'
|
|
904
|
+
await appendBuildLog(buildId, `Scope request for ${dep.service} failed: ${message} (non-blocking)`)
|
|
905
|
+
}
|
|
906
|
+
}
|
|
907
|
+
}
|
|
908
|
+
|
|
909
|
+
if (depResult.blocked.length > 0) {
|
|
910
|
+
const blockMessages = depResult.blocked.join('; ')
|
|
911
|
+
throw new Error(`Deploy blocked: ${blockMessages}`)
|
|
912
|
+
}
|
|
913
|
+
|
|
914
|
+
const directCount = unifiedDeps.filter(d => d.transport === 'direct').length
|
|
915
|
+
const gatewayCount = unifiedDeps.filter(d => d.transport === 'gateway').length
|
|
916
|
+
await appendBuildLog(
|
|
917
|
+
buildId,
|
|
918
|
+
`Resolved ${Object.keys(depResult.envVars).length} dependency URLs ` +
|
|
919
|
+
`(${directCount} direct, ${gatewayCount} gateway, ${depResult.warnings.length} warnings)`
|
|
920
|
+
)
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
// OAuth provisioning (opt-in via spec.auth or scoped dependencies)
|
|
924
|
+
if (catalog?.authMode === 'sso' || catalog?.authMode === 'service-only' || hasScopedDeps) {
|
|
925
|
+
// Collect dependency scopes for OAuth client (approved + unreachable pass; pending/none are now blocked)
|
|
926
|
+
const depScopes = unifiedDeps
|
|
927
|
+
.filter(d => d.scopes.length > 0)
|
|
928
|
+
.flatMap(d => d.scopes)
|
|
929
|
+
const oauthCreds = await provisionOAuthClient(service.name, environment, namespace, service.customDomains, build.requestedById, catalog?.authMode, service.id, service.oauthCredentials, hasScopedDeps, depScopes, build.org || service.org)
|
|
930
|
+
if (oauthCreds) {
|
|
931
|
+
// Resolve Bio-ID's public URL: prefer verified custom domain, fall back to platform hostname
|
|
932
|
+
const bioService = await getServicesCollection().findOne({ name: 'bio' })
|
|
933
|
+
const bioCustomDomain = bioService?.customDomains?.find(
|
|
934
|
+
(d: CustomDomainRecord) => d.dnsVerified && d.environment === environment
|
|
935
|
+
)
|
|
936
|
+
const bioIdUrl = bioCustomDomain
|
|
937
|
+
? `https://${bioCustomDomain.domain}`
|
|
938
|
+
: `https://${buildDnsHostname('bio', environment)}`
|
|
939
|
+
|
|
940
|
+
// Resolve this service's public URL: prefer verified custom domain, fall back to platform hostname
|
|
941
|
+
const svcCustomDomain = (service.customDomains ?? []).find(
|
|
942
|
+
(d: CustomDomainRecord) => d.dnsVerified && d.environment === environment
|
|
943
|
+
)
|
|
944
|
+
const svcPublicUrl = svcCustomDomain
|
|
945
|
+
? `https://${svcCustomDomain.domain}`
|
|
946
|
+
: `https://${buildDnsHostname(service.name, environment)}`
|
|
947
|
+
|
|
948
|
+
provisionedEnvVars = {
|
|
949
|
+
...provisionedEnvVars,
|
|
950
|
+
BIO_CLIENT_ID: oauthCreds.clientId,
|
|
951
|
+
BIO_CLIENT_SECRET: oauthCreds.clientSecret,
|
|
952
|
+
BIO_ID_URL: bioIdUrl,
|
|
953
|
+
BIO_ID_CALLBACK_URL: `${svcPublicUrl}/api/auth/callback`,
|
|
954
|
+
}
|
|
955
|
+
const grantInfo = hasScopedDeps ? `, grants include client_credentials` : ''
|
|
956
|
+
await appendBuildLog(buildId, `OAuth client provisioned (${catalog?.authMode || 'dependencies'}): ${oauthCreds.clientId}, bio: ${bioIdUrl}, callback: ${svcPublicUrl}/api/auth/callback${grantInfo}`)
|
|
957
|
+
}
|
|
958
|
+
} else if (catalog?.authMode === 'none') {
|
|
959
|
+
await appendBuildLog(buildId, 'OAuth skipped (auth.mode: none)')
|
|
960
|
+
} else if (catalog) {
|
|
961
|
+
await appendBuildLog(buildId, 'OAuth skipped (spec.auth not configured)')
|
|
962
|
+
}
|
|
963
|
+
|
|
964
|
+
// Module registration in Bio-ID (opt-in via spec.modules)
|
|
965
|
+
if (catalog?.modules && catalog.modules.length > 0 && env.BIO_INTERNAL_KEY) {
|
|
966
|
+
const bioClient = getBioClient()
|
|
967
|
+
let serviceApiKey: string | undefined
|
|
968
|
+
|
|
969
|
+
for (const mod of catalog.modules) {
|
|
970
|
+
try {
|
|
971
|
+
const result = await bioClient.registerModule({
|
|
972
|
+
moduleId: mod.moduleId,
|
|
973
|
+
name: mod.name,
|
|
974
|
+
description: mod.description,
|
|
975
|
+
serviceId: service.name,
|
|
976
|
+
owner: catalog.owner,
|
|
977
|
+
homepage: mod.homepage,
|
|
978
|
+
scopes: [...mod.scopes],
|
|
979
|
+
defaultScopes: mod.defaultScopes ? [...mod.defaultScopes] : undefined,
|
|
980
|
+
onboarding: mod.onboarding ? { ...mod.onboarding } : undefined,
|
|
981
|
+
})
|
|
982
|
+
if (result.success) {
|
|
983
|
+
await appendBuildLog(buildId, `Module registered in Bio-ID: ${mod.moduleId}`)
|
|
984
|
+
// Capture the per-service API key returned by Bio-ID
|
|
985
|
+
const responseKey = (result as unknown as Record<string, unknown>).serviceApiKey as string | undefined
|
|
986
|
+
if (responseKey) {
|
|
987
|
+
serviceApiKey = responseKey
|
|
988
|
+
}
|
|
989
|
+
} else {
|
|
990
|
+
await appendBuildLog(buildId, `Module registration warning for ${mod.moduleId}: ${result.error?.message || 'unknown error'}`)
|
|
991
|
+
}
|
|
992
|
+
} catch (error) {
|
|
993
|
+
const message = error instanceof Error ? error.message : 'unknown error'
|
|
994
|
+
await appendBuildLog(buildId, `Module registration failed for ${mod.moduleId}: ${message} (non-blocking)`)
|
|
995
|
+
}
|
|
996
|
+
}
|
|
997
|
+
|
|
998
|
+
// Inject the per-service API key (scoped to this service, not the builder's master key)
|
|
999
|
+
if (serviceApiKey) {
|
|
1000
|
+
provisionedEnvVars = {
|
|
1001
|
+
...provisionedEnvVars,
|
|
1002
|
+
BIO_INTERNAL_KEY: serviceApiKey,
|
|
1003
|
+
}
|
|
1004
|
+
await appendBuildLog(buildId, `Service API key provisioned for ${catalog.modules.length} module(s)`)
|
|
1005
|
+
}
|
|
1006
|
+
}
|
|
1007
|
+
|
|
1008
|
+
// Apply config defaults from catalog declarations (lowest priority)
|
|
1009
|
+
if (catalog?.configDeclarations && catalog.configDeclarations.length > 0) {
|
|
1010
|
+
const configDefaults = computeConfigDefaults(
|
|
1011
|
+
catalog.configDeclarations,
|
|
1012
|
+
service.config || {},
|
|
1013
|
+
Object.keys(service.secrets || {}),
|
|
1014
|
+
)
|
|
1015
|
+
if (Object.keys(configDefaults).length > 0) {
|
|
1016
|
+
provisionedEnvVars = { ...configDefaults, ...provisionedEnvVars }
|
|
1017
|
+
await appendBuildLog(buildId, `Applied ${Object.keys(configDefaults).length} config default(s): ${Object.keys(configDefaults).join(', ')}`)
|
|
1018
|
+
}
|
|
1019
|
+
}
|
|
1020
|
+
|
|
1021
|
+
await deployToKubernetes(service, imageTag, environment, workDir, buildId, iecConfig, provisionedEnvVars, catalog?.port, catalog?.healthEndpoint, vaultAnnotations, catalog?.framework)
|
|
1022
|
+
await appendBuildLog(buildId, `Deployed to Kubernetes namespace: ${namespace}`)
|
|
1023
|
+
|
|
1024
|
+
// Register service in Koko after successful deployment
|
|
1025
|
+
// Extract repo name and org from URL (e.g., git@github.com:insurecosys/iec-koko.git -> iec-koko)
|
|
1026
|
+
const repoName = service.repoUrl.split('/').pop()?.replace('.git', '') || service.name
|
|
1027
|
+
const repoOrg = extractRepoOrg(service.repoUrl)
|
|
1028
|
+
|
|
1029
|
+
// Owner priority: build.org (CLI auth) > service.org (stored) > catalog.owner > fallback
|
|
1030
|
+
const serviceOwner = build.org || service.org || catalog?.owner || repoOrg
|
|
1031
|
+
|
|
1032
|
+
const kokoService = await registerOrUpdateServiceInKoko(
|
|
1033
|
+
service.name, // Use service name as ID, not UUID
|
|
1034
|
+
service.name,
|
|
1035
|
+
service.description || `${service.name} service`,
|
|
1036
|
+
repoName,
|
|
1037
|
+
namespace,
|
|
1038
|
+
service.port || 3000,
|
|
1039
|
+
service.healthEndpoint || '/health',
|
|
1040
|
+
service.tags || [],
|
|
1041
|
+
catalog?.routes || [],
|
|
1042
|
+
serviceOwner,
|
|
1043
|
+
repoOrg,
|
|
1044
|
+
{
|
|
1045
|
+
podTier: catalog?.podTier || 'nano',
|
|
1046
|
+
storageTiers: catalog?.storage?.map((s) => s.tier) ?? [],
|
|
1047
|
+
homepage: catalog?.homepage,
|
|
1048
|
+
},
|
|
1049
|
+
)
|
|
1050
|
+
if (kokoService) {
|
|
1051
|
+
await appendBuildLog(buildId, `Service registered in Koko: ${kokoService.id}`)
|
|
1052
|
+
}
|
|
1053
|
+
|
|
1054
|
+
// Register available scopes + auto-seed grants AFTER Koko knows about the service
|
|
1055
|
+
if (sharingConfigs.length > 0) {
|
|
1056
|
+
const scopesRegistered = await registerAvailableScopes(
|
|
1057
|
+
service.name,
|
|
1058
|
+
serviceOwner || service.name,
|
|
1059
|
+
environment,
|
|
1060
|
+
sharingConfigs
|
|
1061
|
+
)
|
|
1062
|
+
if (scopesRegistered > 0) {
|
|
1063
|
+
await appendBuildLog(buildId, `Registered ${scopesRegistered}/${sharingConfigs.length} available scope(s) in Koko`)
|
|
1064
|
+
}
|
|
1065
|
+
|
|
1066
|
+
// Auto-seed approved grants for all listed consumers (migration + backwards compat).
|
|
1067
|
+
// The owner explicitly listed them in sharedWith, so auto-approve is safe.
|
|
1068
|
+
for (const config of sharingConfigs) {
|
|
1069
|
+
for (const consumer of config.sharedWith) {
|
|
1070
|
+
await autoSeedScopeGrant(service.name, consumer.service, config.type, consumer.access)
|
|
1071
|
+
}
|
|
1072
|
+
}
|
|
1073
|
+
}
|
|
1074
|
+
|
|
1075
|
+
// Sync onboarding spec to Koko (idempotent — cleans up if removed from catalog)
|
|
1076
|
+
if (catalog?.onboarding) {
|
|
1077
|
+
const synced = await syncOnboardingToKoko(
|
|
1078
|
+
service.name,
|
|
1079
|
+
catalog.onboarding.routes.map(r => ({
|
|
1080
|
+
pattern: r.pattern,
|
|
1081
|
+
steps: r.steps.map(s => ({ stepId: s.stepId, label: s.label, description: s.description })),
|
|
1082
|
+
}))
|
|
1083
|
+
)
|
|
1084
|
+
if (synced) {
|
|
1085
|
+
await appendBuildLog(buildId, `Onboarding spec synced to Koko (${catalog.onboarding.routes.length} route(s))`)
|
|
1086
|
+
}
|
|
1087
|
+
} else {
|
|
1088
|
+
// No onboarding declared — clean up any previous spec
|
|
1089
|
+
await syncOnboardingToKoko(service.name, undefined)
|
|
1090
|
+
}
|
|
1091
|
+
|
|
1092
|
+
// Backfill service ownership if not yet set (handles services created before ownership tracking)
|
|
1093
|
+
const ownershipUpdates: Record<string, string> = {}
|
|
1094
|
+
if (!service.org && build.org) {
|
|
1095
|
+
ownershipUpdates.org = build.org
|
|
1096
|
+
}
|
|
1097
|
+
if (!service.createdBy && build.requestedBy) {
|
|
1098
|
+
ownershipUpdates.createdBy = build.requestedBy
|
|
1099
|
+
}
|
|
1100
|
+
if (Object.keys(ownershipUpdates).length > 0) {
|
|
1101
|
+
await services.updateOne(
|
|
1102
|
+
{ id: service.id },
|
|
1103
|
+
{ $set: { ...ownershipUpdates, updatedAt: new Date().toISOString() } }
|
|
1104
|
+
)
|
|
1105
|
+
await appendBuildLog(buildId, `Service ownership backfilled: ${Object.keys(ownershipUpdates).join(', ')}`)
|
|
1106
|
+
}
|
|
1107
|
+
|
|
1108
|
+
// Configure DNS after successful deployment
|
|
1109
|
+
if (env.ENABLE_DNS_MANAGEMENT === 'true') {
|
|
1110
|
+
const dnsRecord = await configureDnsForService(service.name, environment)
|
|
1111
|
+
if (dnsRecord) {
|
|
1112
|
+
const hostname = buildDnsHostname(service.name, environment)
|
|
1113
|
+
await appendBuildLog(buildId, `DNS configured: ${hostname} -> ${dnsRecord.content}`)
|
|
1114
|
+
|
|
1115
|
+
// Register domain in Koko service registry
|
|
1116
|
+
const domainBinding = await registerOrUpdateDomainInKoko(
|
|
1117
|
+
hostname,
|
|
1118
|
+
service.id,
|
|
1119
|
+
environment as 'dev' | 'sandbox' | 'uat' | 'prod',
|
|
1120
|
+
'platform',
|
|
1121
|
+
env.CLOUDFLARE_ZONE_ID,
|
|
1122
|
+
dnsRecord.id
|
|
1123
|
+
)
|
|
1124
|
+
if (domainBinding) {
|
|
1125
|
+
await appendBuildLog(buildId, `Domain registered in Koko: ${domainBinding.id}`)
|
|
1126
|
+
}
|
|
1127
|
+
} else {
|
|
1128
|
+
await appendBuildLog(buildId, `DNS configuration skipped (not configured)`)
|
|
1129
|
+
}
|
|
1130
|
+
}
|
|
1131
|
+
// Post-deploy pod health check
|
|
1132
|
+
try {
|
|
1133
|
+
const diagnostics = await captureDeployDiagnostics(service.name, namespace)
|
|
1134
|
+
const hasIssues = diagnostics.pods.some(
|
|
1135
|
+
(p) => (p.phase !== 'Running' && p.phase !== 'Succeeded') || p.restartCount > 0
|
|
1136
|
+
)
|
|
1137
|
+
if (hasIssues) {
|
|
1138
|
+
await appendBuildLog(buildId, `[post-deploy] ${diagnostics.summary}`)
|
|
1139
|
+
for (const pod of diagnostics.pods) {
|
|
1140
|
+
if (pod.phase !== 'Running' && pod.phase !== 'Succeeded') {
|
|
1141
|
+
for (const event of pod.events.slice(-5)) {
|
|
1142
|
+
await appendBuildLog(buildId, `[post-deploy] ${event.type}: ${event.reason} - ${event.message}`)
|
|
1143
|
+
}
|
|
1144
|
+
}
|
|
1145
|
+
}
|
|
1146
|
+
await builds.updateOne({ id: buildId }, { $set: { diagnostics } })
|
|
1147
|
+
} else {
|
|
1148
|
+
await appendBuildLog(buildId, `[post-deploy] ${diagnostics.summary}`)
|
|
1149
|
+
}
|
|
1150
|
+
} catch (diagError) {
|
|
1151
|
+
logger.warn({ buildId, error: diagError }, 'Failed to capture post-deploy diagnostics')
|
|
1152
|
+
}
|
|
1153
|
+
} else {
|
|
1154
|
+
logger.warn({ buildId }, 'ENABLE_K8S_DEPLOY is not true — Kubernetes deployment SKIPPED')
|
|
1155
|
+
await appendBuildLog(buildId, `WARNING: Kubernetes deployment was SKIPPED (ENABLE_K8S_DEPLOY is not 'true'). Image was built and pushed but NOT deployed to the cluster.`)
|
|
1156
|
+
}
|
|
1157
|
+
|
|
1158
|
+
// Mark as completed
|
|
1159
|
+
await updateBuildStatus(buildId, 'completed')
|
|
1160
|
+
await appendBuildLog(buildId, `Build completed successfully`)
|
|
1161
|
+
|
|
1162
|
+
// Update service with last build info + credential rotation clock
|
|
1163
|
+
const now = new Date()
|
|
1164
|
+
const rotationTtlMs = env.ROTATION_TTL_DAYS * 24 * 60 * 60 * 1000
|
|
1165
|
+
const isAutoRotation = build.requestedBy === 'rotation-scheduler@internal.tawa'
|
|
1166
|
+
const isManualRefresh = build.requestedBy?.startsWith('refresh:')
|
|
1167
|
+
const trigger: RotationHistoryEntry['trigger'] = isAutoRotation
|
|
1168
|
+
? 'auto-rotation'
|
|
1169
|
+
: isManualRefresh
|
|
1170
|
+
? 'manual-refresh'
|
|
1171
|
+
: 'deploy'
|
|
1172
|
+
|
|
1173
|
+
const newHistoryEntry: RotationHistoryEntry = {
|
|
1174
|
+
rotatedAt: now.toISOString(),
|
|
1175
|
+
trigger,
|
|
1176
|
+
environment: build.environment,
|
|
1177
|
+
buildId,
|
|
1178
|
+
}
|
|
1179
|
+
|
|
1180
|
+
const existingHistory = service.credentialRotation?.rotationHistory ?? []
|
|
1181
|
+
const credentialRotation: CredentialRotation = {
|
|
1182
|
+
lastRotatedAt: now.toISOString(),
|
|
1183
|
+
nextRotationAt: new Date(now.getTime() + rotationTtlMs).toISOString(),
|
|
1184
|
+
rotationPolicy: service.credentialRotation?.rotationPolicy ?? 'auto',
|
|
1185
|
+
rotationHistory: [newHistoryEntry, ...existingHistory].slice(0, 50),
|
|
1186
|
+
}
|
|
1187
|
+
|
|
1188
|
+
await services.updateOne(
|
|
1189
|
+
{ id: service.id },
|
|
1190
|
+
{
|
|
1191
|
+
$set: {
|
|
1192
|
+
lastBuildId: buildId,
|
|
1193
|
+
lastBuildStatus: 'completed',
|
|
1194
|
+
credentialRotation,
|
|
1195
|
+
updatedAt: now.toISOString(),
|
|
1196
|
+
}
|
|
1197
|
+
}
|
|
1198
|
+
)
|
|
1199
|
+
|
|
1200
|
+
} catch (error) {
|
|
1201
|
+
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
|
|
1202
|
+
logger.error({ buildId, error: errorMessage }, 'Build failed')
|
|
1203
|
+
await updateBuildStatus(buildId, 'failed', errorMessage)
|
|
1204
|
+
await appendBuildLog(buildId, `Build failed: ${errorMessage}`)
|
|
1205
|
+
|
|
1206
|
+
// Capture pod diagnostics if failure occurred during deploy phase
|
|
1207
|
+
if (deployPhaseReached) {
|
|
1208
|
+
try {
|
|
1209
|
+
const namespace = service.namespace || `${service.name}-${build.environment}`
|
|
1210
|
+
const diagnostics = await captureDeployDiagnostics(service.name, namespace)
|
|
1211
|
+
await appendBuildLog(buildId, `[diagnostics] ${diagnostics.summary}`)
|
|
1212
|
+
for (const pod of diagnostics.pods) {
|
|
1213
|
+
await appendBuildLog(buildId, `[diagnostics] Pod ${pod.name}: ${pod.phase} (restarts: ${pod.restartCount})`)
|
|
1214
|
+
for (const event of pod.events.slice(-5)) {
|
|
1215
|
+
await appendBuildLog(buildId, `[diagnostics] ${event.type}: ${event.reason} - ${event.message}`)
|
|
1216
|
+
}
|
|
1217
|
+
if (pod.logs) {
|
|
1218
|
+
await appendBuildLog(buildId, `[diagnostics] Container logs:\n${pod.logs}`)
|
|
1219
|
+
}
|
|
1220
|
+
}
|
|
1221
|
+
await builds.updateOne({ id: buildId }, { $set: { diagnostics } })
|
|
1222
|
+
} catch (diagError) {
|
|
1223
|
+
logger.warn({ buildId, error: diagError }, 'Failed to capture deploy failure diagnostics')
|
|
1224
|
+
}
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
await services.updateOne(
|
|
1228
|
+
{ id: service.id },
|
|
1229
|
+
{
|
|
1230
|
+
$set: {
|
|
1231
|
+
lastBuildId: buildId,
|
|
1232
|
+
lastBuildStatus: 'failed',
|
|
1233
|
+
updatedAt: new Date().toISOString()
|
|
1234
|
+
}
|
|
1235
|
+
}
|
|
1236
|
+
)
|
|
1237
|
+
} finally {
|
|
1238
|
+
// Cleanup workspace
|
|
1239
|
+
try {
|
|
1240
|
+
await rm(workDir, { recursive: true, force: true })
|
|
1241
|
+
} catch {
|
|
1242
|
+
// Ignore cleanup errors
|
|
1243
|
+
}
|
|
1244
|
+
}
|
|
1245
|
+
}
|
|
1246
|
+
|
|
1247
|
+
/**
|
|
1248
|
+
* Clone repository from git or extract tarball from GridFS
|
|
1249
|
+
*/
|
|
1250
|
+
async function cloneOrExtract(repoUrl: string, branch: string, commitSha: string | undefined, workDir: string): Promise<string> {
|
|
1251
|
+
await mkdir(workDir, { recursive: true })
|
|
1252
|
+
|
|
1253
|
+
// Handle GridFS tarballs (from tawa push)
|
|
1254
|
+
if (repoUrl.startsWith('gridfs://')) {
|
|
1255
|
+
const tarballId = repoUrl.replace('gridfs://', '')
|
|
1256
|
+
logger.info({ tarballId, workDir }, 'Extracting tarball from GridFS')
|
|
1257
|
+
await extractTarball(tarballId, workDir)
|
|
1258
|
+
return commitSha || 'push'
|
|
1259
|
+
}
|
|
1260
|
+
|
|
1261
|
+
// Handle self-hosted git (push:// protocol) - tarball already uploaded
|
|
1262
|
+
if (repoUrl.startsWith('push://')) {
|
|
1263
|
+
logger.info({ repoUrl, workDir }, 'Source from push (tarball mode)')
|
|
1264
|
+
return commitSha || 'push'
|
|
1265
|
+
}
|
|
1266
|
+
|
|
1267
|
+
// Standard git clone — inject Forgejo token for private repos
|
|
1268
|
+
const cloneUrl = injectGitCredentials(repoUrl)
|
|
1269
|
+
const git: SimpleGit = simpleGit()
|
|
1270
|
+
await git.clone(cloneUrl, workDir, ['--branch', branch, '--single-branch'])
|
|
1271
|
+
|
|
1272
|
+
const repoGit = simpleGit(workDir)
|
|
1273
|
+
|
|
1274
|
+
// If a specific commit SHA is provided (not "latest" or undefined), checkout that commit
|
|
1275
|
+
if (commitSha && commitSha !== 'latest' && /^[a-f0-9]{7,40}$/i.test(commitSha)) {
|
|
1276
|
+
await repoGit.checkout(commitSha)
|
|
1277
|
+
return commitSha
|
|
1278
|
+
}
|
|
1279
|
+
|
|
1280
|
+
// Otherwise, get the current HEAD commit
|
|
1281
|
+
const log = await repoGit.log({ maxCount: 1 })
|
|
1282
|
+
return log.latest?.hash || 'HEAD'
|
|
1283
|
+
}
|
|
1284
|
+
|
|
1285
|
+
async function buildDockerImage(workDir: string, imageTag: string, buildId: string, dockerfilePath: string = 'Dockerfile'): Promise<void> {
|
|
1286
|
+
const fullDockerfilePath = join(workDir, dockerfilePath)
|
|
1287
|
+
|
|
1288
|
+
logger.info({ imageTag, workDir, dockerfilePath: fullDockerfilePath }, 'Building Docker image')
|
|
1289
|
+
await execAndLog(
|
|
1290
|
+
`docker build -t ${imageTag} -f ${fullDockerfilePath} ${workDir}`,
|
|
1291
|
+
buildId,
|
|
1292
|
+
{ label: 'docker build' }
|
|
1293
|
+
)
|
|
1294
|
+
}
|
|
1295
|
+
|
|
1296
|
+
/**
|
|
1297
|
+
* Authenticate Docker CLI with the container registry.
|
|
1298
|
+
* Credentials are passed via stdin to avoid shell escaping issues.
|
|
1299
|
+
*/
|
|
1300
|
+
async function ensureDockerAuth(buildId: string): Promise<void> {
|
|
1301
|
+
if (!env.DOCKER_REGISTRY_TOKEN || !env.DOCKER_REGISTRY_USER) {
|
|
1302
|
+
logger.warn('DOCKER_REGISTRY_USER or DOCKER_REGISTRY_TOKEN not set — skipping docker login (assuming local registry or pre-authenticated)')
|
|
1303
|
+
return
|
|
1304
|
+
}
|
|
1305
|
+
|
|
1306
|
+
const registryHost = env.DOCKER_REGISTRY.split('/')[0]
|
|
1307
|
+
logger.info({ registryHost, user: env.DOCKER_REGISTRY_USER }, 'Authenticating with Docker registry')
|
|
1308
|
+
|
|
1309
|
+
await execAndLog(
|
|
1310
|
+
`echo "${env.DOCKER_REGISTRY_TOKEN}" | docker login ${registryHost} -u ${env.DOCKER_REGISTRY_USER} --password-stdin`,
|
|
1311
|
+
buildId,
|
|
1312
|
+
{ label: 'docker login' }
|
|
1313
|
+
)
|
|
1314
|
+
}
|
|
1315
|
+
|
|
1316
|
+
async function pushDockerImage(imageTag: string, buildId: string): Promise<void> {
|
|
1317
|
+
// Ensure we're authenticated before pushing
|
|
1318
|
+
await ensureDockerAuth(buildId)
|
|
1319
|
+
|
|
1320
|
+
logger.info({ imageTag }, 'Pushing image to registry')
|
|
1321
|
+
await execAndLog(
|
|
1322
|
+
`docker push ${imageTag}`,
|
|
1323
|
+
buildId,
|
|
1324
|
+
{ label: 'docker push' }
|
|
1325
|
+
)
|
|
1326
|
+
}
|
|
1327
|
+
|
|
1328
|
+
async function ensureNamespaceExists(namespace: string): Promise<void> {
|
|
1329
|
+
try {
|
|
1330
|
+
await execAsync(`kubectl create namespace ${namespace} --dry-run=client -o yaml | kubectl apply -f -`, {
|
|
1331
|
+
encoding: 'utf8',
|
|
1332
|
+
shell: '/bin/bash',
|
|
1333
|
+
})
|
|
1334
|
+
logger.info({ namespace }, 'Namespace ensured')
|
|
1335
|
+
} catch (error) {
|
|
1336
|
+
logger.warn({ namespace, error }, 'Failed to ensure namespace (may already exist)')
|
|
1337
|
+
}
|
|
1338
|
+
}
|
|
1339
|
+
|
|
1340
|
+
async function deployToKubernetes(
|
|
1341
|
+
service: Service,
|
|
1342
|
+
imageTag: string,
|
|
1343
|
+
environment: string,
|
|
1344
|
+
workDir: string,
|
|
1345
|
+
buildId: string,
|
|
1346
|
+
iecConfig?: IecYamlConfig | null,
|
|
1347
|
+
extraEnvVars?: Record<string, string>,
|
|
1348
|
+
catalogPort?: number,
|
|
1349
|
+
catalogHealthEndpoint?: string,
|
|
1350
|
+
vaultAnnotations?: VaultAnnotations,
|
|
1351
|
+
framework?: string
|
|
1352
|
+
): Promise<void> {
|
|
1353
|
+
|
|
1354
|
+
// Determine namespace: use service.namespace or generate from name-environment
|
|
1355
|
+
const namespace = service.namespace || `${service.name}-${environment}`
|
|
1356
|
+
|
|
1357
|
+
// Resolve helm chart path — explicit config takes precedence, otherwise auto-discover
|
|
1358
|
+
const explicitChart = iecConfig?.helmChart || service.helmChart
|
|
1359
|
+
const helmChartPath = explicitChart
|
|
1360
|
+
? (service.appPath && !explicitChart.startsWith('/') ? join(service.appPath, explicitChart) : explicitChart)
|
|
1361
|
+
: await resolveHelmChartPath(workDir, service.name, service.appPath)
|
|
1362
|
+
|
|
1363
|
+
const chart = helmChartPath.startsWith('/') ? helmChartPath : join(workDir, helmChartPath)
|
|
1364
|
+
logger.info({ helmChartPath, chart }, 'Resolved helm chart path')
|
|
1365
|
+
|
|
1366
|
+
logger.info({ serviceName: service.name, namespace, imageTag, environment }, 'Deploying to Kubernetes')
|
|
1367
|
+
|
|
1368
|
+
// Ensure namespace exists (also called earlier for DB provisioning)
|
|
1369
|
+
await ensureNamespaceExists(namespace)
|
|
1370
|
+
|
|
1371
|
+
// Enable Goldilocks resource recommendations for this namespace
|
|
1372
|
+
try {
|
|
1373
|
+
await execAsync(`kubectl label namespace ${namespace} goldilocks.fairwinds.com/enabled=true --overwrite`, {
|
|
1374
|
+
encoding: 'utf8',
|
|
1375
|
+
shell: '/bin/bash',
|
|
1376
|
+
})
|
|
1377
|
+
} catch (error) {
|
|
1378
|
+
logger.warn({ namespace, error }, 'Failed to label namespace for Goldilocks')
|
|
1379
|
+
}
|
|
1380
|
+
|
|
1381
|
+
// Apply default resource limits to prevent unbounded resource usage
|
|
1382
|
+
try {
|
|
1383
|
+
const limitRangeYaml = `
|
|
1384
|
+
apiVersion: v1
|
|
1385
|
+
kind: LimitRange
|
|
1386
|
+
metadata:
|
|
1387
|
+
name: default-limits
|
|
1388
|
+
namespace: ${namespace}
|
|
1389
|
+
spec:
|
|
1390
|
+
limits:
|
|
1391
|
+
- default:
|
|
1392
|
+
cpu: "500m"
|
|
1393
|
+
memory: "512Mi"
|
|
1394
|
+
defaultRequest:
|
|
1395
|
+
cpu: "50m"
|
|
1396
|
+
memory: "64Mi"
|
|
1397
|
+
max:
|
|
1398
|
+
cpu: "2"
|
|
1399
|
+
memory: "2Gi"
|
|
1400
|
+
type: Container`
|
|
1401
|
+
await execAsync(`echo '${limitRangeYaml}' | kubectl apply -f -`, {
|
|
1402
|
+
encoding: 'utf8',
|
|
1403
|
+
shell: '/bin/bash',
|
|
1404
|
+
})
|
|
1405
|
+
logger.info({ namespace }, 'LimitRange applied')
|
|
1406
|
+
} catch (error) {
|
|
1407
|
+
logger.warn({ namespace, error }, 'Failed to apply LimitRange')
|
|
1408
|
+
}
|
|
1409
|
+
|
|
1410
|
+
// Ensure imagePullSecret exists in namespace (copy from default namespace)
|
|
1411
|
+
try {
|
|
1412
|
+
await execAsync(`kubectl get secret insureco -n ${namespace} 2>/dev/null || kubectl get secret insureco -n default -o yaml | sed 's/namespace: default/namespace: ${namespace}/' | kubectl apply -f -`, {
|
|
1413
|
+
encoding: 'utf8',
|
|
1414
|
+
shell: '/bin/bash'
|
|
1415
|
+
})
|
|
1416
|
+
logger.info({ namespace }, 'ImagePullSecret ensured')
|
|
1417
|
+
} catch (error) {
|
|
1418
|
+
logger.warn({ namespace, error }, 'Failed to ensure imagePullSecret')
|
|
1419
|
+
}
|
|
1420
|
+
|
|
1421
|
+
// Build hostname based on environment
|
|
1422
|
+
const hostname = buildDnsHostname(service.name, environment)
|
|
1423
|
+
|
|
1424
|
+
// Build helm command
|
|
1425
|
+
const imageRepo = imageTag.split(':')[0]
|
|
1426
|
+
const imageTagOnly = imageTag.split(':')[1]
|
|
1427
|
+
let command = `helm upgrade --install ${service.name} ${chart} --namespace ${namespace} --set image.repository=${imageRepo} --set image.tag=${imageTagOnly}`
|
|
1428
|
+
|
|
1429
|
+
// Override service port if catalog specifies one (e.g., 80 for static/nginx, 3000 for Node.js)
|
|
1430
|
+
if (catalogPort) {
|
|
1431
|
+
command += ` --set service.port=${catalogPort}`
|
|
1432
|
+
}
|
|
1433
|
+
|
|
1434
|
+
// Ensure imagePullSecrets matches the registry secret managed by the builder
|
|
1435
|
+
command += ` --set imagePullSecrets[0].name=insureco`
|
|
1436
|
+
|
|
1437
|
+
// Set dynamic hostname for ingress (must include paths to avoid overwriting the whole object)
|
|
1438
|
+
// Note: TLS is handled by Cloudflare proxy, not cert-manager
|
|
1439
|
+
command += ` --set ingress.enabled=true`
|
|
1440
|
+
command += ` --set ingress.className=nginx`
|
|
1441
|
+
command += ` --set ingress.hosts[0].host=${hostname}`
|
|
1442
|
+
command += ` --set ingress.hosts[0].paths[0].path=/`
|
|
1443
|
+
command += ` --set ingress.hosts[0].paths[0].pathType=Prefix`
|
|
1444
|
+
|
|
1445
|
+
// Add verified custom domains as additional ingress hosts
|
|
1446
|
+
const DOMAIN_RE = /^[a-z0-9]([a-z0-9-]*[a-z0-9])?(\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)+$/
|
|
1447
|
+
const verifiedCustomDomains = (service.customDomains || []).filter(
|
|
1448
|
+
d => d.dnsVerified && d.environment === environment
|
|
1449
|
+
)
|
|
1450
|
+
|
|
1451
|
+
let domainIdx = 0
|
|
1452
|
+
for (const customDomain of verifiedCustomDomains) {
|
|
1453
|
+
if (!DOMAIN_RE.test(customDomain.domain)) {
|
|
1454
|
+
logger.warn({ domain: customDomain.domain }, 'Skipping invalid custom domain')
|
|
1455
|
+
continue
|
|
1456
|
+
}
|
|
1457
|
+
const idx = domainIdx + 1 // [0] is the platform hostname
|
|
1458
|
+
command += ` --set ingress.hosts[${idx}].host=${escapeShellArg(customDomain.domain)}`
|
|
1459
|
+
command += ` --set ingress.hosts[${idx}].paths[0].path=/`
|
|
1460
|
+
command += ` --set ingress.hosts[${idx}].paths[0].pathType=Prefix`
|
|
1461
|
+
domainIdx++
|
|
1462
|
+
}
|
|
1463
|
+
|
|
1464
|
+
if (verifiedCustomDomains.length > 0) {
|
|
1465
|
+
await appendBuildLog(buildId, `Custom domains: ${verifiedCustomDomains.map(d => d.domain).join(', ')}`)
|
|
1466
|
+
}
|
|
1467
|
+
|
|
1468
|
+
// Raise the nginx ingress body-size limit for services that declare storage
|
|
1469
|
+
// (default 1m is too small for file uploads — Word docs, PDFs, etc.)
|
|
1470
|
+
const hasStorage = vaultAnnotations?.['vault.hashicorp.com/agent-inject-secret-storage']
|
|
1471
|
+
if (hasStorage) {
|
|
1472
|
+
command += ` --set-json 'ingress.annotations={"nginx.ingress.kubernetes.io/proxy-body-size":"10m"}'`
|
|
1473
|
+
}
|
|
1474
|
+
|
|
1475
|
+
// Canonical public URL for this service (prefers verified custom domain)
|
|
1476
|
+
const primaryDomain = verifiedCustomDomains[0]?.domain
|
|
1477
|
+
const serviceUrl = `https://${primaryDomain || hostname}`
|
|
1478
|
+
|
|
1479
|
+
// Set health probe paths from catalog (framework-specific, e.g. /api/health for nextjs)
|
|
1480
|
+
// When Vault is active, liveness probe is overridden to exec-based (file check),
|
|
1481
|
+
// so only set the readiness httpGet probe. The liveness override happens below.
|
|
1482
|
+
const vaultActive = vaultAnnotations && Object.keys(vaultAnnotations).length > 0
|
|
1483
|
+
if (catalogHealthEndpoint) {
|
|
1484
|
+
if (!vaultActive) {
|
|
1485
|
+
command += ` --set livenessProbe.httpGet.path=${catalogHealthEndpoint}`
|
|
1486
|
+
}
|
|
1487
|
+
command += ` --set readinessProbe.httpGet.path=${catalogHealthEndpoint}`
|
|
1488
|
+
}
|
|
1489
|
+
|
|
1490
|
+
// Static uses nginx-unprivileged (non-root, UID 101, port 8080)
|
|
1491
|
+
// Override default securityContext (UID 1001) to match nginx image user
|
|
1492
|
+
if (framework === 'static') {
|
|
1493
|
+
command += ` --set securityContext.runAsUser=101`
|
|
1494
|
+
command += ` --set securityContext.fsGroup=101`
|
|
1495
|
+
command += ` --set containerSecurityContext.allowPrivilegeEscalation=false`
|
|
1496
|
+
}
|
|
1497
|
+
|
|
1498
|
+
// Set environment-specific URLs for portal service
|
|
1499
|
+
// BIO_ID_URL is now resolved generically by the OAuth provisioner above
|
|
1500
|
+
if (service.name === 'portal' && extraEnvVars) {
|
|
1501
|
+
extraEnvVars.PORTAL_URL = serviceUrl
|
|
1502
|
+
extraEnvVars.NEXT_PUBLIC_BIO_ID_URL = extraEnvVars.BIO_ID_URL || `https://${buildDnsHostname('bio', environment)}`
|
|
1503
|
+
logger.info({ serviceName: service.name, portalUrl: serviceUrl }, 'Set portal-specific URLs')
|
|
1504
|
+
}
|
|
1505
|
+
|
|
1506
|
+
// Set environment-specific URLs for bio service (OAuth provider)
|
|
1507
|
+
// Bio needs to know its own public URL for JWT issuer and email links
|
|
1508
|
+
if (service.name === 'bio' && extraEnvVars) {
|
|
1509
|
+
extraEnvVars.BIO_ID_URL = serviceUrl
|
|
1510
|
+
extraEnvVars.NEXTAUTH_URL = serviceUrl
|
|
1511
|
+
extraEnvVars.JWT_ISSUER = serviceUrl
|
|
1512
|
+
logger.info({ serviceName: service.name, bioUrl: serviceUrl }, 'Set URLs for bio')
|
|
1513
|
+
}
|
|
1514
|
+
|
|
1515
|
+
// Add helm values from service config
|
|
1516
|
+
if (service.helmValues) {
|
|
1517
|
+
for (const [key, value] of Object.entries(service.helmValues)) {
|
|
1518
|
+
command += ` --set ${key}=${value}`
|
|
1519
|
+
}
|
|
1520
|
+
}
|
|
1521
|
+
|
|
1522
|
+
// Inject platform default env vars (lowest precedence — user config can override)
|
|
1523
|
+
// All deployed environments use 'production' — 'development' is for local dev only
|
|
1524
|
+
const platformNodeEnv = 'production'
|
|
1525
|
+
const platformDefaults: Record<string, string> = {
|
|
1526
|
+
NODE_ENV: platformNodeEnv,
|
|
1527
|
+
SERVICE_URL: serviceUrl,
|
|
1528
|
+
}
|
|
1529
|
+
|
|
1530
|
+
for (const [key, value] of Object.entries(platformDefaults)) {
|
|
1531
|
+
command += ` --set env.${key}=${escapeShellArg(value)}`
|
|
1532
|
+
}
|
|
1533
|
+
logger.info(
|
|
1534
|
+
{ serviceName: service.name, environment, NODE_ENV: platformNodeEnv },
|
|
1535
|
+
'Injecting platform default env vars'
|
|
1536
|
+
)
|
|
1537
|
+
|
|
1538
|
+
// Inject managed config vars (from `tawa config set`)
|
|
1539
|
+
if (service.config && Object.keys(service.config).length > 0) {
|
|
1540
|
+
// Warn if user has set NODE_ENV to a value that conflicts with the deploy target
|
|
1541
|
+
const userNodeEnv = service.config.NODE_ENV
|
|
1542
|
+
if (userNodeEnv && userNodeEnv !== platformNodeEnv) {
|
|
1543
|
+
const warning = `Warning: NODE_ENV is set to "${userNodeEnv}" but deploying to "${environment}" (expected "${platformNodeEnv}"). User override will be used.`
|
|
1544
|
+
logger.warn({ serviceName: service.name, userNodeEnv, expected: platformNodeEnv, environment }, warning)
|
|
1545
|
+
await appendBuildLog(buildId, `⚠️ ${warning}`)
|
|
1546
|
+
}
|
|
1547
|
+
|
|
1548
|
+
for (const [key, value] of Object.entries(service.config)) {
|
|
1549
|
+
validateEnvKey(key)
|
|
1550
|
+
command += ` --set env.${key}=${escapeShellArg(escapeHelmValue(value))}`
|
|
1551
|
+
}
|
|
1552
|
+
logger.info({ serviceName: service.name, keys: Object.keys(service.config) }, 'Injecting managed config vars')
|
|
1553
|
+
}
|
|
1554
|
+
|
|
1555
|
+
// Inject provisioned env vars (from DB and OAuth provisioners)
|
|
1556
|
+
if (extraEnvVars && Object.keys(extraEnvVars).length > 0) {
|
|
1557
|
+
for (const [key, value] of Object.entries(extraEnvVars)) {
|
|
1558
|
+
validateEnvKey(key)
|
|
1559
|
+
command += ` --set env.${key}=${escapeShellArg(escapeHelmValue(value))}`
|
|
1560
|
+
}
|
|
1561
|
+
logger.info({ serviceName: service.name, keys: Object.keys(extraEnvVars) }, 'Injecting provisioned env vars')
|
|
1562
|
+
}
|
|
1563
|
+
|
|
1564
|
+
// Create K8s Secret for managed secrets (from `tawa config set --secret`)
|
|
1565
|
+
const podAnnotations: Record<string, string> = {}
|
|
1566
|
+
|
|
1567
|
+
if (service.secrets && Object.keys(service.secrets).length > 0) {
|
|
1568
|
+
if (!process.env.CONFIG_ENCRYPTION_KEY) {
|
|
1569
|
+
throw new Error('CONFIG_ENCRYPTION_KEY is required to deploy services with managed secrets')
|
|
1570
|
+
}
|
|
1571
|
+
const { decryptRecord } = await import('./crypto.js')
|
|
1572
|
+
const decrypted = decryptRecord(service.secrets)
|
|
1573
|
+
const secretName = `${service.name}-managed-secrets`
|
|
1574
|
+
|
|
1575
|
+
// Validate all key names before building shell command
|
|
1576
|
+
for (const k of Object.keys(decrypted)) {
|
|
1577
|
+
validateEnvKey(k)
|
|
1578
|
+
}
|
|
1579
|
+
|
|
1580
|
+
const literals = Object.entries(decrypted)
|
|
1581
|
+
.map(([k, v]) => `--from-literal=${k}=${escapeShellArg(v)}`)
|
|
1582
|
+
.join(' ')
|
|
1583
|
+
|
|
1584
|
+
await execAndLog(
|
|
1585
|
+
`kubectl create secret generic ${secretName} ${literals} --namespace ${namespace} --dry-run=client -o yaml | kubectl apply -f -`,
|
|
1586
|
+
buildId,
|
|
1587
|
+
{ label: 'kubectl secret', shell: '/bin/bash' }
|
|
1588
|
+
)
|
|
1589
|
+
|
|
1590
|
+
command += ` --set secretRef=${secretName}`
|
|
1591
|
+
|
|
1592
|
+
// Checksum of secret content forces pod restart when secrets change
|
|
1593
|
+
// (otherwise same image tag = no pod rollout = stale secrets)
|
|
1594
|
+
const secretHash = createHash('sha256')
|
|
1595
|
+
.update(Object.keys(decrypted).sort().map(k => `${k}=${decrypted[k]}`).join('\n'))
|
|
1596
|
+
.digest('hex')
|
|
1597
|
+
.slice(0, 16)
|
|
1598
|
+
podAnnotations['checksum/managed-secrets'] = secretHash
|
|
1599
|
+
|
|
1600
|
+
logger.info({ serviceName: service.name, keys: Object.keys(service.secrets) }, 'Injecting managed secrets')
|
|
1601
|
+
}
|
|
1602
|
+
|
|
1603
|
+
// Zero-downtime rolling update strategy (credential rotation compliance)
|
|
1604
|
+
command += ` --set strategy.type=RollingUpdate`
|
|
1605
|
+
command += ` --set strategy.rollingUpdate.maxSurge=1`
|
|
1606
|
+
command += ` --set strategy.rollingUpdate.maxUnavailable=0`
|
|
1607
|
+
|
|
1608
|
+
// Vault Agent annotations for dynamic database credentials
|
|
1609
|
+
if (vaultActive) {
|
|
1610
|
+
Object.assign(podAnnotations, vaultAnnotations)
|
|
1611
|
+
command += ` --set serviceAccountName=${service.name}`
|
|
1612
|
+
|
|
1613
|
+
// Shared volume for Vault credential rotation liveness signaling
|
|
1614
|
+
command += ` --set-json 'volumes=[{"name":"vault-signals","emptyDir":{"medium":"Memory"}}]'`
|
|
1615
|
+
command += ` --set-json 'volumeMounts=[{"name":"vault-signals","mountPath":"/vault/signals"}]'`
|
|
1616
|
+
|
|
1617
|
+
// Override liveness probe: file-based check instead of httpGet
|
|
1618
|
+
// Vault Agent removes /vault/signals/alive when credentials rotate at max_ttl,
|
|
1619
|
+
// causing K8s to restart the container with fresh credentials.
|
|
1620
|
+
// Explicitly null out httpGet to avoid conflict with chart defaults (custom charts
|
|
1621
|
+
// may define livenessProbe.httpGet in values.yaml — Helm merges rather than replaces).
|
|
1622
|
+
command += ` --set-json 'livenessProbe={"exec":{"command":["cat","/vault/signals/alive"]},"httpGet":null,"initialDelaySeconds":30,"periodSeconds":10}'`
|
|
1623
|
+
}
|
|
1624
|
+
|
|
1625
|
+
// Inject pod annotations (secret checksum + Vault annotations merged)
|
|
1626
|
+
if (Object.keys(podAnnotations).length > 0) {
|
|
1627
|
+
const annotationsJson = JSON.stringify(podAnnotations)
|
|
1628
|
+
command += ` --set-json podAnnotations=${escapeShellArg(annotationsJson)}`
|
|
1629
|
+
}
|
|
1630
|
+
|
|
1631
|
+
command += ' --wait --timeout 5m --history-max 5'
|
|
1632
|
+
|
|
1633
|
+
// If the current release is in a failed state, uninstall it first so
|
|
1634
|
+
// helm upgrade --install starts clean instead of fighting stacked failures.
|
|
1635
|
+
try {
|
|
1636
|
+
const { stdout: statusOut } = await execAsync(
|
|
1637
|
+
`helm status ${service.name} -n ${namespace} -o json 2>/dev/null`,
|
|
1638
|
+
{ encoding: 'utf8', shell: '/bin/bash' }
|
|
1639
|
+
)
|
|
1640
|
+
const releaseStatus = JSON.parse(statusOut)
|
|
1641
|
+
if (releaseStatus?.info?.status === 'failed') {
|
|
1642
|
+
await execAndLog(
|
|
1643
|
+
`helm uninstall ${service.name} -n ${namespace}`,
|
|
1644
|
+
buildId,
|
|
1645
|
+
{ label: 'helm cleanup (failed release)' }
|
|
1646
|
+
)
|
|
1647
|
+
}
|
|
1648
|
+
} catch {
|
|
1649
|
+
// No existing release or helm status failed — proceed normally
|
|
1650
|
+
}
|
|
1651
|
+
|
|
1652
|
+
await execAndLog(command, buildId, { label: 'helm deploy' })
|
|
1653
|
+
logger.info({ serviceName: service.name, namespace, imageTag }, 'Deployed to Kubernetes')
|
|
1654
|
+
}
|
|
1655
|
+
|
|
1656
|
+
async function updateBuildStatus(buildId: string, status: BuildStatus, error?: string): Promise<void> {
|
|
1657
|
+
const builds = getBuildsCollection()
|
|
1658
|
+
const now = new Date().toISOString()
|
|
1659
|
+
|
|
1660
|
+
const update: Partial<Build> = { status, updatedAt: now }
|
|
1661
|
+
|
|
1662
|
+
if (status === 'cloning') {
|
|
1663
|
+
update.startedAt = now
|
|
1664
|
+
}
|
|
1665
|
+
|
|
1666
|
+
if (status === 'completed' || status === 'failed') {
|
|
1667
|
+
update.completedAt = now
|
|
1668
|
+
}
|
|
1669
|
+
|
|
1670
|
+
if (error) {
|
|
1671
|
+
update.error = error
|
|
1672
|
+
}
|
|
1673
|
+
|
|
1674
|
+
await builds.updateOne({ id: buildId }, { $set: update })
|
|
1675
|
+
await publishBuildEvent(buildId, status, { error })
|
|
1676
|
+
}
|
|
1677
|
+
|
|
1678
|
+
async function appendBuildLog(buildId: string, log: string): Promise<void> {
|
|
1679
|
+
const builds = getBuildsCollection()
|
|
1680
|
+
const timestamp = new Date().toISOString()
|
|
1681
|
+
await builds.updateOne(
|
|
1682
|
+
{ id: buildId },
|
|
1683
|
+
{ $push: { logs: `[${timestamp}] ${log}` } }
|
|
1684
|
+
)
|
|
1685
|
+
}
|
|
1686
|
+
|
|
1687
|
+
/**
|
|
1688
|
+
* Mark builds stuck in active states (cloning/building/pushing/deploying) as failed.
|
|
1689
|
+
* Called on startup to recover from crashes, and periodically to catch orphaned builds.
|
|
1690
|
+
*/
|
|
1691
|
+
export async function recoverStaleBuilds(maxAgeMinutes = 30): Promise<number> {
|
|
1692
|
+
const builds = getBuildsCollection()
|
|
1693
|
+
const cutoff = new Date(Date.now() - maxAgeMinutes * 60 * 1000).toISOString()
|
|
1694
|
+
|
|
1695
|
+
const result = await builds.updateMany(
|
|
1696
|
+
{
|
|
1697
|
+
status: { $in: ['cloning', 'building', 'pushing', 'deploying'] as const },
|
|
1698
|
+
updatedAt: { $lt: cutoff },
|
|
1699
|
+
},
|
|
1700
|
+
{
|
|
1701
|
+
$set: {
|
|
1702
|
+
status: 'failed' as const,
|
|
1703
|
+
error: 'Build timed out (server restart or stale process)',
|
|
1704
|
+
completedAt: new Date().toISOString(),
|
|
1705
|
+
updatedAt: new Date().toISOString(),
|
|
1706
|
+
},
|
|
1707
|
+
}
|
|
1708
|
+
)
|
|
1709
|
+
|
|
1710
|
+
if (result.modifiedCount > 0) {
|
|
1711
|
+
logger.warn({ recovered: result.modifiedCount, maxAgeMinutes }, 'Recovered stale builds')
|
|
1712
|
+
}
|
|
1713
|
+
|
|
1714
|
+
return result.modifiedCount
|
|
1715
|
+
}
|
|
1716
|
+
|
|
1717
|
+
/**
|
|
1718
|
+
* Execute a shell command, capture stdout/stderr, and append output to build logs.
|
|
1719
|
+
* Also pipes output to the process logger for PM2 visibility.
|
|
1720
|
+
* Throws on non-zero exit code with the captured stderr.
|
|
1721
|
+
*/
|
|
1722
|
+
async function execAndLog(
|
|
1723
|
+
command: string,
|
|
1724
|
+
buildId: string,
|
|
1725
|
+
options: { label?: string; shell?: string } = {}
|
|
1726
|
+
): Promise<string> {
|
|
1727
|
+
const { label, shell } = options
|
|
1728
|
+
|
|
1729
|
+
try {
|
|
1730
|
+
const { stdout } = await execAsync(command, {
|
|
1731
|
+
encoding: 'utf8',
|
|
1732
|
+
maxBuffer: 10 * 1024 * 1024,
|
|
1733
|
+
shell: shell || '/bin/bash',
|
|
1734
|
+
})
|
|
1735
|
+
|
|
1736
|
+
// Log significant output lines to build logs (skip blank lines, limit noise)
|
|
1737
|
+
const lines = stdout.split('\n').filter((line: string) => line.trim())
|
|
1738
|
+
if (lines.length > 0) {
|
|
1739
|
+
// For large output (docker build), log last 30 lines to avoid flooding
|
|
1740
|
+
const tail = lines.length > 30 ? lines.slice(-30) : lines
|
|
1741
|
+
const prefix = label ? `[${label}] ` : ''
|
|
1742
|
+
for (const line of tail) {
|
|
1743
|
+
logger.info(line)
|
|
1744
|
+
}
|
|
1745
|
+
await appendBuildLog(buildId, `${prefix}${tail.join('\n')}`)
|
|
1746
|
+
}
|
|
1747
|
+
|
|
1748
|
+
return stdout
|
|
1749
|
+
} catch (error: unknown) {
|
|
1750
|
+
const execError = error as { stdout?: string; stderr?: string; message?: string }
|
|
1751
|
+
const stderr = execError.stderr || execError.message || 'Unknown error'
|
|
1752
|
+
const stdout = execError.stdout || ''
|
|
1753
|
+
|
|
1754
|
+
// Log what we can from the failed command
|
|
1755
|
+
const errorLines = stderr.split('\n').filter((line: string) => line.trim())
|
|
1756
|
+
const outputLines = stdout.split('\n').filter((line: string) => line.trim())
|
|
1757
|
+
const allLines = [...outputLines.slice(-20), ...errorLines.slice(-20)]
|
|
1758
|
+
const prefix = label ? `[${label}] ` : ''
|
|
1759
|
+
|
|
1760
|
+
if (allLines.length > 0) {
|
|
1761
|
+
for (const line of allLines) {
|
|
1762
|
+
logger.error(line)
|
|
1763
|
+
}
|
|
1764
|
+
await appendBuildLog(buildId, `${prefix}${allLines.join('\n')}`)
|
|
1765
|
+
}
|
|
1766
|
+
|
|
1767
|
+
throw error
|
|
1768
|
+
}
|
|
1769
|
+
}
|
|
1770
|
+
|
|
1771
|
+
/**
|
|
1772
|
+
* Determine the environment from the branch name
|
|
1773
|
+
* Branch to environment mapping:
|
|
1774
|
+
* - main, master -> prod
|
|
1775
|
+
* - develop, development -> sandbox
|
|
1776
|
+
* - staging -> uat
|
|
1777
|
+
* - feature/* -> sandbox (default for feature branches)
|
|
1778
|
+
*/
|
|
1779
|
+
function determineEnvironmentFromBranch(branch: string): string {
|
|
1780
|
+
const lowerBranch = branch.toLowerCase()
|
|
1781
|
+
|
|
1782
|
+
if (lowerBranch === 'main' || lowerBranch === 'master') {
|
|
1783
|
+
return 'prod'
|
|
1784
|
+
}
|
|
1785
|
+
if (lowerBranch === 'develop' || lowerBranch === 'development') {
|
|
1786
|
+
return 'sandbox'
|
|
1787
|
+
}
|
|
1788
|
+
if (lowerBranch === 'staging' || lowerBranch === 'uat') {
|
|
1789
|
+
return 'uat'
|
|
1790
|
+
}
|
|
1791
|
+
if (lowerBranch.startsWith('feature/') || lowerBranch.startsWith('feat/')) {
|
|
1792
|
+
return 'sandbox'
|
|
1793
|
+
}
|
|
1794
|
+
if (lowerBranch.startsWith('hotfix/') || lowerBranch.startsWith('release/')) {
|
|
1795
|
+
return 'prod'
|
|
1796
|
+
}
|
|
1797
|
+
|
|
1798
|
+
// Default to sandbox for unknown branches
|
|
1799
|
+
return 'sandbox'
|
|
1800
|
+
}
|