onveloz 0.0.0-beta.17 → 0.0.0-beta.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.mjs +1316 -492
  2. package/package.json +3 -1
package/dist/index.mjs CHANGED
@@ -14,6 +14,8 @@ import { createInterface } from "node:readline";
14
14
  import ora from "ora";
15
15
  import { createAuthClient } from "better-auth/client";
16
16
  import { deviceAuthorizationClient } from "better-auth/client/plugins";
17
+ import net from "node:net";
18
+ import WebSocket from "ws";
17
19
  import { link, mkdir, mkdtemp, readdir, rm, stat } from "node:fs/promises";
18
20
  import ignore from "ignore";
19
21
  import tar from "tar";
@@ -51,6 +53,131 @@ const DATABASE_ENGINE_DEFAULTS = {
51
53
  defaultVersion: "7"
52
54
  }
53
55
  };
56
+ /** Default size tiers for disk-based engines (PostgreSQL, MySQL). */
57
+ const DISK_ENGINE_SIZES = {
58
+ basico: {
59
+ label: "Básico",
60
+ cpu: "250m",
61
+ memory: "256Mi",
62
+ cpuLabel: "0.25 vCPU",
63
+ memoryLabel: "256 MB"
64
+ },
65
+ essencial: {
66
+ label: "Essencial",
67
+ cpu: "500m",
68
+ memory: "512Mi",
69
+ cpuLabel: "0.5 vCPU",
70
+ memoryLabel: "512 MB"
71
+ },
72
+ turbo: {
73
+ label: "Turbo",
74
+ cpu: "1",
75
+ memory: "1Gi",
76
+ cpuLabel: "1 vCPU",
77
+ memoryLabel: "1 GB"
78
+ },
79
+ "turbo-plus": {
80
+ label: "Turbo Plus",
81
+ cpu: "1500m",
82
+ memory: "2Gi",
83
+ cpuLabel: "1.5 vCPU",
84
+ memoryLabel: "2 GB"
85
+ },
86
+ nitro: {
87
+ label: "Nitro",
88
+ cpu: "2",
89
+ memory: "4Gi",
90
+ cpuLabel: "2 vCPU",
91
+ memoryLabel: "4 GB"
92
+ },
93
+ "nitro-plus": {
94
+ label: "Nitro Plus",
95
+ cpu: "4",
96
+ memory: "8Gi",
97
+ cpuLabel: "4 vCPU",
98
+ memoryLabel: "8 GB"
99
+ }
100
+ };
101
+ /** Redis size tiers — memory-heavy since Redis is purely in-memory, CPU stays minimal. */
102
+ const REDIS_SIZES = {
103
+ basico: {
104
+ label: "Básico",
105
+ cpu: "150m",
106
+ memory: "128Mi",
107
+ cpuLabel: "0.15 vCPU",
108
+ memoryLabel: "128 MB"
109
+ },
110
+ essencial: {
111
+ label: "Essencial",
112
+ cpu: "250m",
113
+ memory: "256Mi",
114
+ cpuLabel: "0.25 vCPU",
115
+ memoryLabel: "256 MB"
116
+ },
117
+ turbo: {
118
+ label: "Turbo",
119
+ cpu: "250m",
120
+ memory: "512Mi",
121
+ cpuLabel: "0.25 vCPU",
122
+ memoryLabel: "512 MB"
123
+ },
124
+ "turbo-plus": {
125
+ label: "Turbo Plus",
126
+ cpu: "500m",
127
+ memory: "1Gi",
128
+ cpuLabel: "0.5 vCPU",
129
+ memoryLabel: "1 GB"
130
+ },
131
+ nitro: {
132
+ label: "Nitro",
133
+ cpu: "500m",
134
+ memory: "2Gi",
135
+ cpuLabel: "0.5 vCPU",
136
+ memoryLabel: "2 GB"
137
+ },
138
+ "nitro-plus": {
139
+ label: "Nitro Plus",
140
+ cpu: "1",
141
+ memory: "4Gi",
142
+ cpuLabel: "1 vCPU",
143
+ memoryLabel: "4 GB"
144
+ }
145
+ };
146
+ /** Per-engine size tier maps. */
147
+ const DATABASE_SIZES_BY_ENGINE = {
148
+ postgresql: DISK_ENGINE_SIZES,
149
+ mysql: DISK_ENGINE_SIZES,
150
+ redis: REDIS_SIZES
151
+ };
152
+ /**
153
+ * Flat union of all size keys across engines.
154
+ * Kept for backward compatibility (Zod schemas, Prisma column).
155
+ */
156
+ const DATABASE_SIZES = {
157
+ ...DISK_ENGINE_SIZES,
158
+ ...REDIS_SIZES
159
+ };
160
+ /** All unique size keys across all engines (for Zod validation). */
161
+ const DATABASE_SIZE_KEYS = [...new Set([...Object.keys(DISK_ENGINE_SIZES), ...Object.keys(REDIS_SIZES)])];
162
+ /** Get size tiers for a specific engine. */
163
+ function getDatabaseSizes(engine) {
164
+ return DATABASE_SIZES_BY_ENGINE[engine];
165
+ }
166
+ /** Get size option list for a specific engine (for UI selectors). */
167
+ function getDatabaseSizeOptions(engine) {
168
+ const sizes = getDatabaseSizes(engine);
169
+ return Object.entries(sizes).map(([key, tier]) => ({
170
+ value: key,
171
+ label: tier.label,
172
+ description: `${tier.cpuLabel} · ${tier.memoryLabel}`
173
+ }));
174
+ }
175
+ /** @deprecated Use getDatabaseSizeOptions(engine) instead. */
176
+ const DATABASE_SIZE_OPTIONS = getDatabaseSizeOptions("postgresql");
177
+ function resolveDatabaseSize(cpu, memory) {
178
+ for (const [key, tier] of Object.entries(DATABASE_SIZES)) if (tier.cpu === cpu && tier.memory === memory) return key;
179
+ return null;
180
+ }
54
181
 
55
182
  //#endregion
56
183
  //#region ../../packages/config/veloz-config.ts
@@ -114,10 +241,6 @@ const VolumeConfigSchema = z$1.object({
114
241
  ].some((p) => value === p || value.startsWith(p + "/")), "Caminho de montagem não permitido por segurança"),
115
242
  sizeGb: z$1.number().int().min(10).max(100).optional().default(10)
116
243
  });
117
- const DatabaseResourcesSchema = z$1.object({
118
- cpu: z$1.string().regex(/^[0-9]+(\.[0-9]+)?|[0-9]+m$/).default("500m").optional(),
119
- memory: z$1.string().regex(/^[0-9]+(Mi|Gi)$/).default("512Mi").optional()
120
- });
121
244
  const PoolerConfigSchema = z$1.object({
122
245
  enabled: z$1.boolean().default(false),
123
246
  poolMode: z$1.enum([
@@ -128,6 +251,8 @@ const PoolerConfigSchema = z$1.object({
128
251
  defaultPoolSize: z$1.number().int().min(1).max(200).default(20).optional(),
129
252
  maxClientConn: z$1.number().int().min(1).max(1e4).default(100).optional()
130
253
  });
254
+ const LOWERCASE_SIZE_KEYS = DATABASE_SIZE_KEYS.map((k) => k);
255
+ const DatabaseSizeSchema = z$1.enum(LOWERCASE_SIZE_KEYS);
131
256
  const DatabaseConfigSchema = z$1.object({
132
257
  id: z$1.string().optional(),
133
258
  name: z$1.string().optional(),
@@ -137,8 +262,8 @@ const DatabaseConfigSchema = z$1.object({
137
262
  "redis"
138
263
  ]),
139
264
  version: z$1.string().optional(),
140
- storage: z$1.string().regex(/^[0-9]+(Gi)$/).default("10Gi").optional(),
141
- resources: DatabaseResourcesSchema.optional(),
265
+ storage: z$1.string().regex(/^[0-9]+(Gi)$/).refine((val) => Number.parseInt(val, 10) >= 10, "Tamanho mínimo de storage é 10Gi").optional(),
266
+ size: DatabaseSizeSchema.optional(),
142
267
  pooler: PoolerConfigSchema.optional(),
143
268
  fromTemplate: z$1.string().optional()
144
269
  });
@@ -299,7 +424,10 @@ function loadConfig() {
299
424
  return config;
300
425
  }
301
426
  /**
302
- * Save the veloz.json config to project root
427
+ * Save the veloz.json config to project root.
428
+ *
429
+ * When possible, prefer `patchConfig` to avoid overwriting user-authored
430
+ * fields with Zod-applied defaults.
303
431
  */
304
432
  function saveConfig(config) {
305
433
  const path = getConfigPath();
@@ -310,6 +438,18 @@ function saveConfig(config) {
310
438
  writeFileSync(path, JSON.stringify(configWithSchema, null, 2), "utf-8");
311
439
  }
312
440
  /**
441
+ * Apply targeted patches to veloz.json without running Zod parse.
442
+ * Reads the raw JSON as VelozConfig (type-only cast, no transforms/defaults),
443
+ * applies the patch function, and writes back — preserving user-authored values.
444
+ */
445
+ function patchConfig(patchFn) {
446
+ const path = getConfigPath();
447
+ if (!existsSync(path)) throw new Error(`${getConfigFileName()} not found`);
448
+ const raw = JSON.parse(readFileSync(path, "utf-8"));
449
+ patchFn(raw);
450
+ writeFileSync(path, JSON.stringify(raw, null, 2), "utf-8");
451
+ }
452
+ /**
313
453
  * Require config to exist, throw if not found
314
454
  */
315
455
  function requireConfig() {
@@ -955,6 +1095,7 @@ envGroup.command("set", {
955
1095
  const serviceId = await resolveServiceId(c.options.service);
956
1096
  const client = await getClient();
957
1097
  const pares = c.args.pares.split(/\s+/);
1098
+ const allWarnings = [];
958
1099
  for (const par of pares) {
959
1100
  const eqIndex = par.indexOf("=");
960
1101
  if (eqIndex === -1) {
@@ -967,13 +1108,15 @@ envGroup.command("set", {
967
1108
  spin.stop();
968
1109
  throw new Error("Chave não pode estar vazia.");
969
1110
  }
970
- await client.envVars.set({
1111
+ const result = await client.envVars.set({
971
1112
  serviceId,
972
1113
  key,
973
1114
  value
974
1115
  });
1116
+ if (result.warnings) allWarnings.push(...result.warnings);
975
1117
  }
976
1118
  spin.stop();
1119
+ for (const w of allWarnings) warn(w);
977
1120
  if (pares.length === 1) {
978
1121
  const key = pares[0].slice(0, pares[0].indexOf("="));
979
1122
  success(`Variável ${chalk.bold(key)} definida com sucesso.`);
@@ -1069,11 +1212,12 @@ envGroup.command("import", {
1069
1212
  key: envVar.key
1070
1213
  });
1071
1214
  }
1072
- await client.envVars.setBulk({
1215
+ const result = await client.envVars.setBulk({
1073
1216
  serviceId,
1074
1217
  vars: envVars
1075
1218
  });
1076
1219
  spin.stop();
1220
+ if (result.warnings) for (const w of result.warnings) warn(w);
1077
1221
  success(`${varsCount} variável(is) importada(s) com sucesso!`);
1078
1222
  info("Faça um novo deploy para aplicar as alterações.");
1079
1223
  }
@@ -1233,9 +1377,11 @@ function updateServiceVolumesInConfig(serviceKey, updater) {
1233
1377
  if (!config) return false;
1234
1378
  const currentService = config.services[serviceKey];
1235
1379
  if (!currentService) return false;
1236
- currentService.volumes = updater([...currentService.volumes ?? []]);
1237
- config.updated = (/* @__PURE__ */ new Date()).toISOString();
1238
- saveConfig(config);
1380
+ const updatedVolumes = updater([...currentService.volumes ?? []]);
1381
+ patchConfig((raw) => {
1382
+ if (raw.services[serviceKey]) raw.services[serviceKey].volumes = updatedVolumes;
1383
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
1384
+ });
1239
1385
  return true;
1240
1386
  }
1241
1387
 
@@ -1883,8 +2029,7 @@ dbGroup.command("create", {
1883
2029
  engine: z.string().optional().describe("Engine (postgresql, mysql, redis)"),
1884
2030
  engineVersion: z.string().optional().describe("Versão do engine"),
1885
2031
  storage: z.string().optional().describe("Armazenamento (ex: 10Gi, 20Gi)"),
1886
- cpu: z.string().optional().describe("Limite de CPU (ex: 500m, 1)"),
1887
- memory: z.string().optional().describe("Limite de memória (ex: 512Mi, 1Gi)"),
2032
+ size: z.string().optional().describe("Tier de recursos (basico, essencial, turbo, turbo-plus, nitro, nitro-plus)"),
1888
2033
  pooler: z.boolean().optional().describe("Habilitar PgBouncer (apenas PostgreSQL)")
1889
2034
  }),
1890
2035
  async run(c) {
@@ -1935,30 +2080,25 @@ dbGroup.command("create", {
1935
2080
  engine: validEngine,
1936
2081
  engineVersion: version,
1937
2082
  storage,
1938
- cpuLimit: c.options.cpu,
1939
- memoryLimit: c.options.memory,
2083
+ size: c.options.size,
1940
2084
  poolerEnabled: c.options.pooler
1941
2085
  })
1942
2086
  });
1943
2087
  success(`Banco de dados ${chalk.bold(db.name)} criado! Provisionando...`);
1944
2088
  info("Use 'veloz db credentials " + db.name + "' para ver as credenciais quando estiver pronto.");
1945
- const config = loadConfig();
1946
- if (config) {
1947
- const updatedDatabases = { ...config.databases };
1948
- updatedDatabases[name] = {
1949
- id: db.id,
1950
- engine: validEngine,
1951
- version: version ?? void 0,
1952
- storage: storage ?? void 0,
1953
- ...c.options.cpu || c.options.memory ? { resources: {
1954
- ...c.options.cpu && { cpu: c.options.cpu },
1955
- ...c.options.memory && { memory: c.options.memory }
1956
- } } : {},
1957
- ...c.options.pooler ? { pooler: { enabled: true } } : {}
1958
- };
1959
- config.databases = updatedDatabases;
1960
- config.updated = (/* @__PURE__ */ new Date()).toISOString();
1961
- saveConfig(config);
2089
+ if (loadConfig()) {
2090
+ patchConfig((raw) => {
2091
+ raw.databases ??= {};
2092
+ raw.databases[name] = {
2093
+ id: db.id,
2094
+ engine: validEngine,
2095
+ ...version ? { version } : {},
2096
+ ...storage ? { storage } : {},
2097
+ ...c.options.size ? { size: c.options.size } : {},
2098
+ ...c.options.pooler ? { pooler: { enabled: true } } : {}
2099
+ };
2100
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
2101
+ });
1962
2102
  info(`Adicionado ao ${getConfigFileName()}.`);
1963
2103
  }
1964
2104
  }
@@ -2024,10 +2164,13 @@ dbGroup.command("delete", {
2024
2164
  if (config?.databases) {
2025
2165
  const key = Object.entries(config.databases).find(([k, d]) => d.id === db.id || k === db.name)?.[0];
2026
2166
  if (key) {
2027
- const { [key]: _, ...rest } = config.databases;
2028
- config.databases = Object.keys(rest).length > 0 ? rest : void 0;
2029
- config.updated = (/* @__PURE__ */ new Date()).toISOString();
2030
- saveConfig(config);
2167
+ patchConfig((raw) => {
2168
+ if (raw.databases) {
2169
+ delete raw.databases[key];
2170
+ if (Object.keys(raw.databases).length === 0) raw.databases = void 0;
2171
+ }
2172
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
2173
+ });
2031
2174
  info(`Removido do ${getConfigFileName()}.`);
2032
2175
  }
2033
2176
  }
@@ -2047,6 +2190,291 @@ dbGroup.command("restart", {
2047
2190
  success(`Banco de dados ${chalk.bold(db.name)} reiniciado com sucesso.`);
2048
2191
  }
2049
2192
  });
2193
+ dbGroup.command("update", {
2194
+ description: "Atualizar configurações de um banco de dados",
2195
+ middleware: [requireAuth],
2196
+ args: z.object({ name: z.string().describe("Nome ou ID do banco de dados") }),
2197
+ options: z.object({
2198
+ size: z.string().optional().describe("Tier de recursos (basico, essencial, turbo, turbo-plus, nitro, nitro-plus)"),
2199
+ storage: z.string().optional().describe("Armazenamento (ex: 20Gi, 50Gi)"),
2200
+ pooler: z.boolean().optional().describe("Habilitar/desabilitar PgBouncer"),
2201
+ poolMode: z.string().optional().describe("Modo do pooler (transaction, session, statement)"),
2202
+ poolSize: z.number().optional().describe("Pool size padrão (1-200)"),
2203
+ maxConnections: z.number().optional().describe("Máximo de conexões do cliente (1-10000)")
2204
+ }),
2205
+ async run(c) {
2206
+ const db = await resolveDatabaseByName(getProjectId$1(), c.args.name);
2207
+ const client = await getClient();
2208
+ const hasFlags = c.options.size !== void 0 || c.options.storage !== void 0 || c.options.pooler !== void 0 || c.options.poolMode !== void 0 || c.options.poolSize !== void 0 || c.options.maxConnections !== void 0;
2209
+ let size = c.options.size;
2210
+ let storage = c.options.storage;
2211
+ let poolerEnabled = c.options.pooler;
2212
+ let poolMode = c.options.poolMode;
2213
+ let poolSize = c.options.poolSize;
2214
+ let maxConnections = c.options.maxConnections;
2215
+ if (!hasFlags && isInteractive()) {
2216
+ const engineSizes = getDatabaseSizes(db.engine ?? "postgresql");
2217
+ const engineSizeKeys = Object.keys(engineSizes);
2218
+ const currentSizeLabel = db.size ? engineSizes[db.size]?.label ?? db.size : "—";
2219
+ console.log(`\n ${chalk.dim("Tier atual:")} ${currentSizeLabel}`);
2220
+ if (await promptConfirm("Alterar tier de recursos?", false)) size = await promptSelect("Novo tier:", engineSizeKeys.map((k) => {
2221
+ const s = engineSizes[k];
2222
+ const current = k === db.size ? " (atual)" : "";
2223
+ return {
2224
+ label: `${s.label} — ${s.cpuLabel}, ${s.memoryLabel}${current}`,
2225
+ value: k
2226
+ };
2227
+ }));
2228
+ const currentStorageGb = db.storageGb;
2229
+ console.log(` ${chalk.dim("Storage atual:")} ${currentStorageGb} GB`);
2230
+ if (await promptConfirm("Alterar armazenamento?", false)) {
2231
+ const storageOptions = STORAGE_OPTIONS.filter((s) => parseInt(s.replace("Gi", ""), 10) > currentStorageGb);
2232
+ if (storageOptions.length === 0) info("Já está no armazenamento máximo disponível.");
2233
+ else storage = await promptSelect("Novo armazenamento:", storageOptions.map((s) => ({
2234
+ label: formatStorageLabel(s),
2235
+ value: s
2236
+ })));
2237
+ }
2238
+ if (db.engine === "postgresql") {
2239
+ const poolerStatus = db.poolerEnabled ? "ativado" : "desativado";
2240
+ console.log(` ${chalk.dim("Pooler:")} ${poolerStatus}`);
2241
+ if (await promptConfirm("Alterar configuração do pooler?", false)) {
2242
+ poolerEnabled = await promptConfirm("Habilitar PgBouncer?", db.poolerEnabled);
2243
+ if (poolerEnabled) poolMode = await promptSelect("Modo do pool:", [
2244
+ {
2245
+ label: "Transaction (padrão)",
2246
+ value: "transaction"
2247
+ },
2248
+ {
2249
+ label: "Session",
2250
+ value: "session"
2251
+ },
2252
+ {
2253
+ label: "Statement",
2254
+ value: "statement"
2255
+ }
2256
+ ]);
2257
+ }
2258
+ }
2259
+ }
2260
+ const validEngineSizes = getDatabaseSizes(db.engine ?? "postgresql");
2261
+ const validSizeKeys = Object.keys(validEngineSizes);
2262
+ if (size && !validSizeKeys.includes(size)) throw new Error(`Tier inválido: ${size}. Opções para ${db.engine}: ${validSizeKeys.join(", ")}`);
2263
+ let updated = false;
2264
+ if (size && size !== db.size) if (db.databaseStatus !== "LIVE") warn(`Banco de dados não está LIVE — não é possível alterar recursos agora.`);
2265
+ else {
2266
+ const sizeConfig = validEngineSizes[size];
2267
+ await withSpinner({
2268
+ text: `Atualizando tier para ${sizeConfig.label} (${sizeConfig.cpuLabel}, ${sizeConfig.memoryLabel})...`,
2269
+ fn: () => client.databases.updateResources({
2270
+ serviceId: db.id,
2271
+ size
2272
+ })
2273
+ });
2274
+ success(`Tier atualizado para ${sizeConfig.label}.`);
2275
+ updated = true;
2276
+ }
2277
+ if (storage) {
2278
+ const desiredGb = parseInt(storage.replace("Gi", ""), 10);
2279
+ if (isNaN(desiredGb) || desiredGb <= 0) throw new Error(`Storage inválido: ${storage}. Use formato como "20Gi".`);
2280
+ if (desiredGb <= db.storageGb) warn(`Storage solicitado (${desiredGb} GB) não é maior que o atual (${db.storageGb} GB). Redução não é suportada.`);
2281
+ else {
2282
+ const dataVolume = (await client.volumes.list({ serviceId: db.id })).find((v) => v.name === "data");
2283
+ if (!dataVolume) warn("Volume de dados não encontrado. O banco pode ainda estar sendo provisionado.");
2284
+ else {
2285
+ await withSpinner({
2286
+ text: `Redimensionando armazenamento de ${db.storageGb} GB para ${desiredGb} GB...`,
2287
+ fn: () => client.volumes.update({
2288
+ volumeId: dataVolume.id,
2289
+ sizeGb: desiredGb
2290
+ })
2291
+ });
2292
+ success(`Armazenamento atualizado para ${desiredGb} GB.`);
2293
+ updated = true;
2294
+ }
2295
+ }
2296
+ }
2297
+ if (poolerEnabled !== void 0 || poolMode || poolSize || maxConnections) if (db.engine !== "postgresql") warn(`Connection pooler só é suportado para PostgreSQL.`);
2298
+ else if (db.databaseStatus !== "LIVE") warn(`Banco de dados não está LIVE — não é possível alterar o pooler agora.`);
2299
+ else {
2300
+ await withSpinner({
2301
+ text: "Atualizando configuração do pooler...",
2302
+ fn: () => client.databases.updatePooler({
2303
+ serviceId: db.id,
2304
+ enabled: poolerEnabled ?? db.poolerEnabled,
2305
+ poolMode,
2306
+ defaultPoolSize: poolSize,
2307
+ maxClientConn: maxConnections
2308
+ })
2309
+ });
2310
+ success(`Pooler ${poolerEnabled ?? db.poolerEnabled ? "ativado" : "desativado"}.`);
2311
+ updated = true;
2312
+ }
2313
+ if (!updated) {
2314
+ info("Nenhuma alteração realizada.");
2315
+ return;
2316
+ }
2317
+ if (loadConfig()) {
2318
+ patchConfig((raw) => {
2319
+ raw.databases ??= {};
2320
+ const configKey = Object.entries(raw.databases).find(([k, d]) => d?.id === db.id || k === db.name)?.[0] ?? db.name;
2321
+ raw.databases[configKey] ??= { engine: db.engine };
2322
+ if (size) raw.databases[configKey].size = size;
2323
+ if (storage) raw.databases[configKey].storage = storage;
2324
+ if (poolerEnabled !== void 0 || poolMode || poolSize || maxConnections) {
2325
+ const existing = raw.databases[configKey].pooler ?? { enabled: false };
2326
+ raw.databases[configKey].pooler = {
2327
+ ...existing,
2328
+ ...poolerEnabled !== void 0 ? { enabled: poolerEnabled } : {},
2329
+ ...poolMode ? { poolMode } : {},
2330
+ ...poolSize ? { defaultPoolSize: poolSize } : {},
2331
+ ...maxConnections ? { maxClientConn: maxConnections } : {}
2332
+ };
2333
+ }
2334
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
2335
+ });
2336
+ info(`Atualizado no ${getConfigFileName()}.`);
2337
+ }
2338
+ }
2339
+ });
2340
+ dbGroup.command("query", {
2341
+ description: "Executar uma consulta no banco de dados",
2342
+ middleware: [requireAuth],
2343
+ args: z.object({ name: z.string().describe("Nome ou ID do banco de dados") }),
2344
+ options: z.object({ query: z.string().optional().describe("Consulta SQL ou comando Redis") }),
2345
+ alias: { query: "q" },
2346
+ async run(c) {
2347
+ const db = await resolveDatabaseByName(getProjectId$1(), c.args.name);
2348
+ let query = c.options.query;
2349
+ if (!query) {
2350
+ if (!isInteractive()) throw new Error("Use --query ou -q para fornecer a consulta em modo não-interativo.");
2351
+ query = await prompt(`Consulta ${ENGINE_DISPLAY[db.engine] ?? db.engine} (ex: ${db.engine === "redis" ? "GET chave" : "SELECT * FROM tabela LIMIT 10"}):`);
2352
+ if (!query) throw new Error("Consulta é obrigatória.");
2353
+ }
2354
+ const client = await getClient();
2355
+ console.log(chalk.dim(`\n Executando consulta em ${chalk.bold(db.name)}...\n`));
2356
+ const stream = await client.databases.executeQuery({
2357
+ serviceId: db.id,
2358
+ query
2359
+ });
2360
+ let hasOutput = false;
2361
+ for await (const event of stream) switch (event.type) {
2362
+ case "output":
2363
+ hasOutput = true;
2364
+ console.log(event.content);
2365
+ break;
2366
+ case "error":
2367
+ console.log(chalk.red(event.content));
2368
+ break;
2369
+ case "status":
2370
+ if (event.content === "Consulta finalizada.") {
2371
+ if (!hasOutput) info("Consulta executada sem retorno.");
2372
+ console.log();
2373
+ }
2374
+ break;
2375
+ }
2376
+ }
2377
+ });
2378
+ const ENGINE_DEFAULT_PORTS = {
2379
+ postgresql: 5432,
2380
+ mysql: 3306,
2381
+ redis: 6379
2382
+ };
2383
+ const ENGINE_CONNECT_HINTS = {
2384
+ postgresql: (p) => `psql -h localhost -p ${p}`,
2385
+ mysql: (p) => `mysql -h 127.0.0.1 -P ${p}`,
2386
+ redis: (p) => `redis-cli -p ${p}`
2387
+ };
2388
+ dbGroup.command("tunnel", {
2389
+ description: "Criar túnel local para um banco de dados",
2390
+ middleware: [requireAuth],
2391
+ args: z.object({ name: z.string().describe("Nome ou ID do banco de dados") }),
2392
+ options: z.object({ port: z.number().optional().describe("Porta local (padrão: porta do engine)") }),
2393
+ alias: { port: "p" },
2394
+ async run(c) {
2395
+ const db = await resolveDatabaseByName(getProjectId$1(), c.args.name);
2396
+ const client = await getClient();
2397
+ const localPort = c.options.port ?? ENGINE_DEFAULT_PORTS[db.engine] ?? 5432;
2398
+ const engineLabel = ENGINE_DISPLAY[db.engine] ?? db.engine;
2399
+ const creds = await withSpinner({
2400
+ text: "Carregando credenciais...",
2401
+ fn: () => client.databases.getCredentials({ serviceId: db.id })
2402
+ });
2403
+ let localUrl;
2404
+ if (db.engine === "redis") localUrl = creds.password ? `redis://:${creds.password}@127.0.0.1:${localPort}` : `redis://127.0.0.1:${localPort}`;
2405
+ else localUrl = `${db.engine === "mysql" ? "mysql" : "postgresql"}://${creds.username}:${creds.password}@127.0.0.1:${localPort}/${creds.database}`;
2406
+ const server = net.createServer(async (socket) => {
2407
+ let session;
2408
+ try {
2409
+ session = await client.databases.createTunnelSession({ serviceId: db.id });
2410
+ } catch (err) {
2411
+ const message = err instanceof Error ? err.message : "Erro desconhecido";
2412
+ console.log(chalk.red(` Falha ao criar sessão de túnel: ${message}`));
2413
+ socket.destroy();
2414
+ return;
2415
+ }
2416
+ const ws = new WebSocket(`${session.proxyUrl}/connect?token=${session.token}`);
2417
+ ws.binaryType = "nodebuffer";
2418
+ let wsReady = false;
2419
+ const pendingData = [];
2420
+ ws.on("open", () => {
2421
+ wsReady = true;
2422
+ for (const chunk of pendingData) ws.send(chunk);
2423
+ pendingData.length = 0;
2424
+ });
2425
+ ws.on("message", (data) => {
2426
+ if (!socket.destroyed) socket.write(data);
2427
+ });
2428
+ ws.on("close", () => {
2429
+ if (!socket.destroyed) socket.destroy();
2430
+ });
2431
+ ws.on("error", (err) => {
2432
+ console.log(chalk.red(` Erro no túnel: ${err.message}`));
2433
+ if (!socket.destroyed) socket.destroy();
2434
+ });
2435
+ socket.on("data", (data) => {
2436
+ if (wsReady) ws.send(data);
2437
+ else pendingData.push(data);
2438
+ });
2439
+ socket.on("close", () => {
2440
+ if (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING) ws.close();
2441
+ });
2442
+ socket.on("error", () => {
2443
+ if (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING) ws.close();
2444
+ });
2445
+ });
2446
+ server.on("error", (err) => {
2447
+ if (err.code === "EADDRINUSE") console.log(chalk.red(`\n Porta ${localPort} já está em uso. Use --port para escolher outra.\n`));
2448
+ else console.log(chalk.red(`\n Erro ao iniciar servidor local: ${err.message}\n`));
2449
+ process.exit(1);
2450
+ });
2451
+ server.listen(localPort, "127.0.0.1", () => {
2452
+ console.log();
2453
+ success(`Túnel ${engineLabel} ativo para ${chalk.bold(db.name)}`);
2454
+ console.log();
2455
+ console.log(` ${chalk.dim("Endereço local:")} ${chalk.bold(`127.0.0.1:${localPort}`)}`);
2456
+ console.log(` ${chalk.dim("URL de conexão:")} ${chalk.cyan(localUrl)}`);
2457
+ const hint = ENGINE_CONNECT_HINTS[db.engine];
2458
+ if (hint) console.log(` ${chalk.dim("Conectar com:")} ${chalk.cyan(hint(localPort))}`);
2459
+ console.log();
2460
+ console.log(chalk.dim(" Pressione Ctrl+C para encerrar o túnel."));
2461
+ console.log();
2462
+ });
2463
+ await new Promise((resolve$1) => {
2464
+ const shutdown = () => {
2465
+ console.log(chalk.dim("\n Encerrando túnel..."));
2466
+ server.close(() => {
2467
+ resolve$1();
2468
+ });
2469
+ setTimeout(() => {
2470
+ resolve$1();
2471
+ }, 3e3);
2472
+ };
2473
+ process.on("SIGINT", shutdown);
2474
+ process.on("SIGTERM", shutdown);
2475
+ });
2476
+ }
2477
+ });
2050
2478
 
2051
2479
  //#endregion
2052
2480
  //#region src/commands/template.ts
@@ -2127,7 +2555,7 @@ templateGroup.command("deploy", {
2127
2555
  projectName: c.options.name
2128
2556
  })
2129
2557
  });
2130
- success(`Template '${template.displayName}' implantado com sucesso!`);
2558
+ success(`Template '${template.displayName}' deploy concluído com sucesso!`);
2131
2559
  console.log(chalk.bold(`\n Projeto: ${result.projectName}`));
2132
2560
  console.log(chalk.dim(` Slug: ${result.projectSlug}`));
2133
2561
  console.log();
@@ -2570,6 +2998,104 @@ async function getFilesToUpload(directory) {
2570
2998
  await walk(directory);
2571
2999
  return files;
2572
3000
  }
3001
+ /**
3002
+ * Create a base tarball from the project directory (no extra files).
3003
+ * This is the expensive operation (file walk + gzip) that should happen once.
3004
+ */
3005
+ async function createBaseTarball(directory) {
3006
+ const tempDir = await mkdtemp(join(tmpdir(), "veloz-base-"));
3007
+ const tarPath = join(tempDir, "source.tar.gz");
3008
+ try {
3009
+ const files = await getFilesToUpload(directory);
3010
+ if (files.length === 0) throw new Error("No files to upload");
3011
+ const relativePaths = files.map((f) => relative(directory, f));
3012
+ await tar.create({
3013
+ gzip: true,
3014
+ file: tarPath,
3015
+ cwd: directory
3016
+ }, relativePaths);
3017
+ return {
3018
+ tarPath,
3019
+ tempDir,
3020
+ relativePaths,
3021
+ directory
3022
+ };
3023
+ } catch (err) {
3024
+ await rm(tempDir, {
3025
+ recursive: true,
3026
+ force: true
3027
+ }).catch(() => {});
3028
+ throw err;
3029
+ }
3030
+ }
3031
+ /**
3032
+ * Create a service-specific tarball. If extraFiles need injection, creates
3033
+ * an overlay tarball using hardlinks from the base. Otherwise, returns the
3034
+ * base tarball path directly.
3035
+ */
3036
+ async function createServiceTarball(base, extraFiles) {
3037
+ const injectedFiles = [];
3038
+ if (extraFiles) for (const file of extraFiles) {
3039
+ if (existsSync(join(base.directory, file.name))) continue;
3040
+ injectedFiles.push(file);
3041
+ }
3042
+ if (injectedFiles.length === 0) return {
3043
+ tarPath: base.tarPath,
3044
+ tempDir: null
3045
+ };
3046
+ const tempDir = await mkdtemp(join(tmpdir(), "veloz-overlay-"));
3047
+ const tarPath = join(tempDir, "source.tar.gz");
3048
+ const stagingDir = join(tempDir, "staging");
3049
+ await mkdir(stagingDir, { recursive: true });
3050
+ try {
3051
+ const relativePaths = [...base.relativePaths];
3052
+ for (const rel of relativePaths) {
3053
+ const src = join(base.directory, rel);
3054
+ const dest = join(stagingDir, rel);
3055
+ await mkdir(dirname(dest), { recursive: true });
3056
+ await link(src, dest);
3057
+ }
3058
+ for (const file of injectedFiles) {
3059
+ writeFileSync(join(stagingDir, file.name), file.content);
3060
+ if (!relativePaths.includes(file.name)) relativePaths.push(file.name);
3061
+ }
3062
+ await tar.create({
3063
+ gzip: true,
3064
+ file: tarPath,
3065
+ cwd: stagingDir
3066
+ }, relativePaths);
3067
+ return {
3068
+ tarPath,
3069
+ tempDir
3070
+ };
3071
+ } catch (err) {
3072
+ await rm(tempDir, {
3073
+ recursive: true,
3074
+ force: true
3075
+ }).catch(() => {});
3076
+ throw err;
3077
+ }
3078
+ }
3079
+ /** Upload a pre-built tarball for a deployment. */
3080
+ async function uploadTarball(deploymentId, tarPath) {
3081
+ const client = await getClient();
3082
+ const { uploadUrl } = await client.deployments.getUploadUrl({ deploymentId });
3083
+ const fileBuffer = readFileSync(tarPath);
3084
+ const putResponse = await fetch(uploadUrl, {
3085
+ method: "PUT",
3086
+ headers: { "Content-Type": "application/gzip" },
3087
+ body: fileBuffer
3088
+ });
3089
+ if (!putResponse.ok) throw new Error(`Upload falhou: ${putResponse.status}`);
3090
+ await client.deployments.startBuild({ deploymentId });
3091
+ }
3092
+ /** Clean up shared tarball temp directory. */
3093
+ async function cleanupTarball(base) {
3094
+ await rm(base.tempDir, {
3095
+ recursive: true,
3096
+ force: true
3097
+ }).catch(() => {});
3098
+ }
2573
3099
  async function createTarball(directory, extraFiles) {
2574
3100
  const tempDir = await mkdtemp(join(tmpdir(), "veloz-upload-"));
2575
3101
  const tarPath = join(tempDir, "source.tar.gz");
@@ -2649,36 +3175,13 @@ async function calculateDirectorySize(directory) {
2649
3175
  return totalSize;
2650
3176
  }
2651
3177
 
2652
- //#endregion
2653
- //#region src/lib/retry.ts
2654
- async function withRetry(fn, maxRetries = 3) {
2655
- for (let attempt = 0; attempt <= maxRetries; attempt++) try {
2656
- return await fn();
2657
- } catch (error) {
2658
- if (attempt >= maxRetries) throw error;
2659
- const rateLimit = isRateLimitError(error);
2660
- if (rateLimit) {
2661
- const waitMs = Math.min(rateLimit.retryAfterMs, 3e4);
2662
- await new Promise((r) => {
2663
- setTimeout(r, waitMs);
2664
- });
2665
- } else {
2666
- const delay = Math.min(1e3 * Math.pow(2, attempt), 1e4);
2667
- await new Promise((r) => {
2668
- setTimeout(r, delay);
2669
- });
2670
- }
2671
- }
2672
- throw new Error("Max retries exceeded");
2673
- }
2674
-
2675
3178
  //#endregion
2676
3179
  //#region src/lib/deploy-constants.ts
2677
3180
  const statusLabels = {
2678
3181
  QUEUED: "Na fila",
2679
3182
  BUILDING: "Compilando",
2680
- BUILD_FAILED: "Falha na construção",
2681
- DEPLOYING: "Implantando",
3183
+ BUILD_FAILED: "Falha na compilação",
3184
+ DEPLOYING: "Realizando deploy",
2682
3185
  LIVE: "Ativo",
2683
3186
  FAILED: "Falhou",
2684
3187
  CANCELLED: "Cancelado"
@@ -2714,33 +3217,7 @@ const TERMINAL_STATUSES = new Set([
2714
3217
  ]);
2715
3218
 
2716
3219
  //#endregion
2717
- //#region src/lib/deploy-cancel.ts
2718
- const activeDeploymentIds = /* @__PURE__ */ new Set();
2719
- let sigintHandlerRegistered = false;
2720
- function trackDeployment(deploymentId) {
2721
- activeDeploymentIds.add(deploymentId);
2722
- }
2723
- function untrackDeployment(deploymentId) {
2724
- activeDeploymentIds.delete(deploymentId);
2725
- }
2726
- function setupSigintHandler() {
2727
- if (sigintHandlerRegistered) return;
2728
- sigintHandlerRegistered = true;
2729
- process.on("SIGINT", async () => {
2730
- if (activeDeploymentIds.size === 0) process.exit(130);
2731
- if (process.stdout.isTTY) console.log(chalk.yellow("\n\nCancelando deploy(s)..."));
2732
- try {
2733
- const client = await getClient();
2734
- const cancelPromises = Array.from(activeDeploymentIds).map((deploymentId) => client.deployments.cancel({ deploymentId }).catch(() => {}));
2735
- await Promise.all(cancelPromises);
2736
- if (process.stdout.isTTY) console.log(chalk.yellow("Deploy cancelado."));
2737
- } catch {}
2738
- process.exit(130);
2739
- });
2740
- }
2741
-
2742
- //#endregion
2743
- //#region src/lib/deploy-parallel.ts
3220
+ //#region src/lib/deploy-stream.ts
2744
3221
  async function fetchDeployUrls$1(client, serviceId) {
2745
3222
  try {
2746
3223
  return (await client.domains.list({ serviceId })).map((d) => `https://${d.domain}`);
@@ -2764,256 +3241,6 @@ function getFailureHints$1(status) {
2764
3241
  default: return ["Execute 'veloz logs -f' para mais detalhes."];
2765
3242
  }
2766
3243
  }
2767
- function renderProgress(progressMap, prevLineCount) {
2768
- for (let i = 0; i < prevLineCount; i++) process.stdout.write("\x1B[1A\x1B[2K");
2769
- let lineCount = 0;
2770
- for (const [, progress] of progressMap) {
2771
- const icon = statusIcons[progress.status] || chalk.gray("○");
2772
- const label = statusLabels[progress.status] || progress.status;
2773
- process.stdout.write(`${icon} ${chalk.bold(progress.serviceName)}: ${label}\n`);
2774
- lineCount++;
2775
- if (progress.status === "BUILDING" || progress.status === "BUILD_FAILED") {
2776
- const nonEmptyLines = progress.logLines.filter((l) => l.trim());
2777
- if (nonEmptyLines.length > 0) {
2778
- const tail = nonEmptyLines.slice(-3);
2779
- for (const line of tail) {
2780
- process.stdout.write(` ${chalk.dim(line)}\n`);
2781
- lineCount++;
2782
- }
2783
- } else if (progress.status === "BUILDING") {
2784
- process.stdout.write(` ${chalk.dim("Aguardando logs do build...")}\n`);
2785
- lineCount++;
2786
- }
2787
- } else if (progress.status === "QUEUED") {
2788
- process.stdout.write(` ${chalk.dim("Na fila para construção...")}\n`);
2789
- lineCount++;
2790
- }
2791
- }
2792
- return lineCount;
2793
- }
2794
- async function deployServicesInParallel(services) {
2795
- const client = await getClient();
2796
- const mcp = isMcpMode();
2797
- const isTTY = !mcp && process.stdout.isTTY;
2798
- const isGHA = !mcp && process.env.GITHUB_ACTIONS === "true";
2799
- if (mcp) log(`[deploy] Iniciando deploy de ${services.length} serviço(s)`);
2800
- else if (isGHA) process.stdout.write(`Iniciando deploy de ${services.length} serviço(s)\n`);
2801
- else if (isTTY) console.log(chalk.cyan(`\nIniciando deploy de ${services.length} serviço(s)...\n`));
2802
- else process.stdout.write(`Iniciando deploy de ${services.length} serviço(s)\n`);
2803
- setupSigintHandler();
2804
- const progressMap = /* @__PURE__ */ new Map();
2805
- const projectRoot = process.cwd();
2806
- const sizeInBytes = await calculateDirectorySize(projectRoot);
2807
- const sizeMB = Math.round(sizeInBytes / (1024 * 1024) * 10) / 10;
2808
- const deploymentPromises = services.map(async (service) => {
2809
- try {
2810
- const deployment = await withRetry(() => client.deployments.create({
2811
- serviceId: service.serviceId,
2812
- serviceConfig: service.serviceConfig
2813
- }));
2814
- await withRetry(() => uploadSource(deployment.id, projectRoot, service.extraFiles));
2815
- trackDeployment(deployment.id);
2816
- progressMap.set(service.serviceId, {
2817
- serviceName: service.serviceName,
2818
- deploymentId: deployment.id,
2819
- status: "QUEUED",
2820
- logLines: [],
2821
- completed: false,
2822
- success: false
2823
- });
2824
- if (mcp) log(`[deploy] ✓ ${service.serviceName}: Upload concluído (${sizeMB} MB)`);
2825
- else if (isGHA) process.stdout.write(`✓ ${service.serviceName}: Upload concluído (${sizeMB} MB)\n`);
2826
- else if (isTTY) console.log(`${chalk.green("✓")} ${chalk.bold(service.serviceName)}: Upload concluído ${chalk.dim(`(${sizeMB} MB)`)}`);
2827
- else process.stdout.write(`✓ ${service.serviceName}: Upload concluído (${sizeMB} MB)\n`);
2828
- return {
2829
- service,
2830
- deploymentId: deployment.id
2831
- };
2832
- } catch (error) {
2833
- if (mcp) log(`[deploy] ✗ ${service.serviceName}: Falha ao iniciar deploy`);
2834
- else if (isGHA) process.stdout.write(`::error::${service.serviceName}: Falha ao iniciar deploy\n`);
2835
- else if (isTTY) console.log(`${chalk.red("✗")} ${chalk.bold(service.serviceName)}: Falha ao iniciar deploy`);
2836
- else process.stdout.write(`✗ ${service.serviceName}: Falha ao iniciar deploy\n`);
2837
- progressMap.set(service.serviceId, {
2838
- serviceName: service.serviceName,
2839
- deploymentId: "",
2840
- status: "FAILED",
2841
- logLines: [],
2842
- completed: true,
2843
- success: false
2844
- });
2845
- throw error;
2846
- }
2847
- });
2848
- const activeDeployments = (await Promise.allSettled(deploymentPromises)).filter((d) => d.status === "fulfilled").map((d) => d.value);
2849
- if (activeDeployments.length === 0) {
2850
- if (mcp) throw new Error("Todos os deploys falharam ao iniciar.");
2851
- else if (isGHA) process.stdout.write("::error::Todos os deploys falharam ao iniciar.\n");
2852
- else if (isTTY) console.error(chalk.red("\n✗ Todos os deploys falharam ao iniciar."));
2853
- else process.stdout.write("✗ Todos os deploys falharam ao iniciar.\n");
2854
- process.exit(1);
2855
- }
2856
- if (mcp) log(`[deploy] Monitorando ${activeDeployments.length} deploy(s)`);
2857
- else if (isGHA) process.stdout.write(`Monitorando ${activeDeployments.length} deploy(s)\n`);
2858
- else if (isTTY) {
2859
- console.log(chalk.cyan(`\nMonitorando progresso dos deploys:\n`));
2860
- console.log(chalk.dim("─".repeat(50)) + "\n");
2861
- } else process.stdout.write(`Monitorando ${activeDeployments.length} deploy(s)\n`);
2862
- let lineCount = 0;
2863
- if (isTTY) lineCount = renderProgress(progressMap, lineCount);
2864
- const streamPromises = activeDeployments.map(async ({ service, deploymentId }) => {
2865
- try {
2866
- await new Promise((resolve$1) => {
2867
- setTimeout(resolve$1, 1e3);
2868
- });
2869
- const stream = await client.logs.streamBuildLogs({ deploymentId });
2870
- for await (const event of stream) {
2871
- const progress = progressMap.get(service.serviceId);
2872
- if (!progress) continue;
2873
- if (event.type === "status") {
2874
- progress.status = event.content;
2875
- if (TERMINAL_STATUSES.has(event.content)) {
2876
- progress.completed = true;
2877
- progress.success = event.content === "LIVE";
2878
- }
2879
- if (mcp) {
2880
- const label = statusLabels[event.content] || event.content;
2881
- log(`[deploy] [${service.serviceName}] ${label}`);
2882
- } else if (!isTTY) {
2883
- const label = statusLabels[event.content] || event.content;
2884
- process.stdout.write(`[${service.serviceName}] ${label}\n`);
2885
- }
2886
- } else if (event.type === "log") {
2887
- const newLines = event.content.split("\n").filter((l) => l.trim());
2888
- progress.logLines.push(...newLines);
2889
- if (mcp) for (const line of newLines) log(`[build] [${service.serviceName}] ${line}`);
2890
- else if (!isTTY) for (const line of newLines) process.stdout.write(`[${service.serviceName}] ${line}\n`);
2891
- }
2892
- if (isTTY) lineCount = renderProgress(progressMap, lineCount);
2893
- }
2894
- } catch (error) {
2895
- const errorMessage = error instanceof Error ? error.message : String(error);
2896
- const progress = progressMap.get(service.serviceId);
2897
- if (progress && !progress.completed) {
2898
- if (mcp) log(`[deploy] ✗ Erro no streaming de logs para ${service.serviceName}: ${errorMessage}`);
2899
- else if (isGHA) process.stdout.write(`::error::Erro no streaming de logs para ${service.serviceName}: ${errorMessage}\n`);
2900
- else if (!isTTY) process.stdout.write(`✗ Erro no streaming de logs para ${service.serviceName}: ${errorMessage}\n`);
2901
- progress.status = "FAILED";
2902
- progress.completed = true;
2903
- if (isTTY) lineCount = renderProgress(progressMap, lineCount);
2904
- }
2905
- } finally {
2906
- untrackDeployment(deploymentId);
2907
- }
2908
- });
2909
- await Promise.all(streamPromises);
2910
- if (isTTY) renderProgress(progressMap, lineCount);
2911
- const successfulEntries = Array.from(progressMap.entries()).filter(([, p]) => p.success);
2912
- const failedEntries = Array.from(progressMap.entries()).filter(([, p]) => !p.success);
2913
- const urlMap = /* @__PURE__ */ new Map();
2914
- await Promise.all(successfulEntries.map(async ([serviceId]) => {
2915
- const urls = await fetchDeployUrls$1(client, serviceId);
2916
- if (urls.length > 0) urlMap.set(serviceId, urls);
2917
- }));
2918
- if (isTTY) console.log(chalk.dim("\n" + "─".repeat(50)));
2919
- if (successfulEntries.length > 0) if (mcp) {
2920
- log(`[deploy] ✓ ${successfulEntries.length} serviço(s) implantado(s) com sucesso`);
2921
- for (const [serviceId, progress] of successfulEntries) {
2922
- log(`[deploy] ✓ ${progress.serviceName}`);
2923
- for (const url of urlMap.get(serviceId) ?? []) log(`[deploy] ${url}`);
2924
- }
2925
- } else if (isGHA) {
2926
- process.stdout.write(`\n✓ ${successfulEntries.length} serviço(s) implantado(s) com sucesso\n`);
2927
- for (const [serviceId, progress] of successfulEntries) {
2928
- process.stdout.write(` ✓ ${progress.serviceName}\n`);
2929
- for (const url of urlMap.get(serviceId) ?? []) process.stdout.write(` ${url}\n`);
2930
- }
2931
- } else if (isTTY) {
2932
- console.log(chalk.green(`\n✓ ${successfulEntries.length} serviço(s) implantado(s) com sucesso:\n`));
2933
- for (const [serviceId, progress] of successfulEntries) {
2934
- console.log(` ${chalk.green("✓")} ${chalk.bold(progress.serviceName)}`);
2935
- for (const url of urlMap.get(serviceId) ?? []) console.log(` ${chalk.cyan(url)}`);
2936
- }
2937
- } else {
2938
- process.stdout.write(`\n${successfulEntries.length} serviço(s) implantado(s) com sucesso:\n`);
2939
- for (const [serviceId, progress] of successfulEntries) {
2940
- process.stdout.write(` ✓ ${progress.serviceName}\n`);
2941
- for (const url of urlMap.get(serviceId) ?? []) process.stdout.write(` ${url}\n`);
2942
- }
2943
- }
2944
- if (failedEntries.length > 0) if (mcp) {
2945
- log(`[deploy] ✗ ${failedEntries.length} serviço(s) falhou(aram)`);
2946
- for (const [, progress] of failedEntries) {
2947
- log(`[deploy] ✗ ${progress.serviceName} (${progress.status})`);
2948
- const hints = getFailureHints$1(progress.status);
2949
- for (const hint of hints) log(`[deploy] → ${hint}`);
2950
- }
2951
- } else if (isGHA) {
2952
- process.stdout.write(`\n✗ ${failedEntries.length} serviço(s) falhou(aram)\n`);
2953
- for (const [, progress] of failedEntries) {
2954
- process.stdout.write(`::error::${progress.serviceName} falhou (${progress.status})\n`);
2955
- const hints = getFailureHints$1(progress.status);
2956
- for (const hint of hints) process.stdout.write(` ${hint}\n`);
2957
- }
2958
- } else if (isTTY) {
2959
- console.log(chalk.red(`\n✗ ${failedEntries.length} serviço(s) falhou(aram):\n`));
2960
- for (const [, progress] of failedEntries) {
2961
- console.log(` ${chalk.red("✗")} ${chalk.bold(progress.serviceName)} ${chalk.dim(`(${progress.status})`)}`);
2962
- if (progress.logLines.length > 0) {
2963
- console.log(chalk.red(` ${"─".repeat(50)}`));
2964
- console.log(chalk.red.bold(" Logs de build:"));
2965
- console.log(chalk.red(` ${"─".repeat(50)}`));
2966
- for (const line of progress.logLines) if (line.trim()) console.log(` ${chalk.dim(line)}`);
2967
- console.log(chalk.red(` ${"─".repeat(50)}`));
2968
- }
2969
- const hints = getFailureHints$1(progress.status);
2970
- for (const hint of hints) console.log(chalk.yellow(` → ${hint}`));
2971
- }
2972
- } else {
2973
- process.stdout.write(`\n${failedEntries.length} serviço(s) falhou(aram):\n`);
2974
- for (const [, progress] of failedEntries) {
2975
- process.stdout.write(`\n ✗ ${progress.serviceName} (${progress.status})\n`);
2976
- const hints = getFailureHints$1(progress.status);
2977
- for (const hint of hints) process.stdout.write(` → ${hint}\n`);
2978
- }
2979
- }
2980
- if (successfulEntries.length > 0) info("\nUse 'veloz logs -f' para acompanhar os logs de execução.");
2981
- const results = [];
2982
- for (const [serviceId, progress] of progressMap) results.push({
2983
- status: progress.status,
2984
- logs: progress.logLines.filter((l) => l.trim()),
2985
- urls: urlMap.get(serviceId) ?? [],
2986
- serviceName: progress.serviceName
2987
- });
2988
- if (failedEntries.length > 0 && !mcp) process.exit(1);
2989
- return results;
2990
- }
2991
-
2992
- //#endregion
2993
- //#region src/lib/deploy-stream.ts
2994
- async function fetchDeployUrls(client, serviceId) {
2995
- try {
2996
- return (await client.domains.list({ serviceId })).map((d) => `https://${d.domain}`);
2997
- } catch {
2998
- return [];
2999
- }
3000
- }
3001
- function getFailureHints(status) {
3002
- switch (status) {
3003
- case "BUILD_FAILED": return [
3004
- "Verifique os logs de build acima para erros de compilação",
3005
- "Teste o build localmente: rode o comando de build do seu projeto",
3006
- "Use 'veloz config show' para verificar as configurações"
3007
- ];
3008
- case "DEPLOY_FAILED": return [
3009
- "O build passou mas o serviço falhou ao iniciar",
3010
- "Verifique se a porta configurada está correta: 'veloz config show'",
3011
- "Veja os logs de runtime: 'veloz logs -f'"
3012
- ];
3013
- case "CANCELLED": return ["Deploy cancelado. Execute 'veloz deploy' para tentar novamente."];
3014
- default: return ["Execute 'veloz logs -f' para mais detalhes."];
3015
- }
3016
- }
3017
3244
  /** Raw BuildKit line: `#N content` */
3018
3245
  const BUILDKIT_PREFIX_RE = /^#(\d+)\s+(.*)/;
3019
3246
  /** Docker build step: `[stage step/total] COMMAND` */
@@ -3395,7 +3622,7 @@ async function streamDeploymentLogs(deploymentId, serviceId, serviceName) {
3395
3622
  }
3396
3623
  if (isGHA) endGroup();
3397
3624
  if (renderer) renderer.stopSpinner();
3398
- const urls = finalStatus === "LIVE" ? await fetchDeployUrls(client, serviceId) : [];
3625
+ const urls = finalStatus === "LIVE" ? await fetchDeployUrls$1(client, serviceId) : [];
3399
3626
  if (finalStatus === "LIVE") {
3400
3627
  if (buildSpinner) {
3401
3628
  buildSpinner.stop();
@@ -3409,7 +3636,7 @@ async function streamDeploymentLogs(deploymentId, serviceId, serviceName) {
3409
3636
  buildSpinner = null;
3410
3637
  }
3411
3638
  const label = statusLabels[finalStatus] ?? finalStatus;
3412
- const hints = getFailureHints(finalStatus);
3639
+ const hints = getFailureHints$1(finalStatus);
3413
3640
  if (mcp) {
3414
3641
  log(`✗ Deploy finalizou: ${label}`);
3415
3642
  for (const hint of hints) log(` → ${hint}`);
@@ -3439,63 +3666,31 @@ async function streamDeploymentLogs(deploymentId, serviceId, serviceName) {
3439
3666
  serviceName
3440
3667
  };
3441
3668
  }
3442
- /**
3443
- * MCP streaming variant — yields progress events as an async generator.
3444
- * Each yield becomes an MCP `notifications/progress` message if the client
3445
- * provides a progressToken.
3446
- */
3447
- async function* streamDeploymentLogsMcp(deploymentId, serviceId, serviceName) {
3448
- const client = await getClient();
3449
- const allLogLines = [];
3450
- let finalStatus = "";
3451
- yield {
3452
- type: "status",
3453
- message: serviceName ? `Build: ${serviceName}` : "Build iniciando..."
3454
- };
3455
- try {
3456
- const stream = await client.logs.streamBuildLogs({ deploymentId });
3457
- for await (const event of stream) if (event.type === "status") {
3458
- const label = statusLabels[event.content] ?? event.content;
3459
- finalStatus = event.content;
3460
- yield {
3461
- type: "status",
3462
- message: label
3463
- };
3464
- } else if (event.type === "log") {
3465
- const lines = event.content.split("\n").filter((l) => l.trim());
3466
- allLogLines.push(...lines);
3467
- for (const line of lines) yield {
3468
- type: "log",
3469
- message: line
3470
- };
3471
- }
3472
- } catch {
3669
+
3670
+ //#endregion
3671
+ //#region src/lib/deploy-cancel.ts
3672
+ const activeDeploymentIds = /* @__PURE__ */ new Set();
3673
+ let sigintHandlerRegistered = false;
3674
+ function trackDeployment(deploymentId) {
3675
+ activeDeploymentIds.add(deploymentId);
3676
+ }
3677
+ function untrackDeployment(deploymentId) {
3678
+ activeDeploymentIds.delete(deploymentId);
3679
+ }
3680
+ function setupSigintHandler() {
3681
+ if (sigintHandlerRegistered) return;
3682
+ sigintHandlerRegistered = true;
3683
+ process.on("SIGINT", async () => {
3684
+ if (activeDeploymentIds.size === 0) process.exit(130);
3685
+ if (process.stdout.isTTY) console.log(chalk.yellow("\n\nCancelando deploy(s)..."));
3473
3686
  try {
3474
- finalStatus = (await client.deployments.get({ deploymentId })).status;
3475
- try {
3476
- const logs = await client.logs.getBuildLogs({ deploymentId });
3477
- if (logs.buildLogs) {
3478
- const lines = logs.buildLogs.split("\n").filter((l) => l.trim());
3479
- allLogLines.push(...lines);
3480
- for (const line of lines) yield {
3481
- type: "log",
3482
- message: line
3483
- };
3484
- }
3485
- } catch {}
3687
+ const client = await getClient();
3688
+ const cancelPromises = Array.from(activeDeploymentIds).map((deploymentId) => client.deployments.cancel({ deploymentId }).catch(() => {}));
3689
+ await Promise.all(cancelPromises);
3690
+ if (process.stdout.isTTY) console.log(chalk.yellow("Deploy cancelado."));
3486
3691
  } catch {}
3487
- }
3488
- const urls = finalStatus === "LIVE" ? await fetchDeployUrls(client, serviceId) : [];
3489
- yield {
3490
- type: "result",
3491
- message: finalStatus === "LIVE" ? "Deploy concluído" : `Deploy finalizou: ${finalStatus}`,
3492
- data: {
3493
- status: finalStatus,
3494
- logs: allLogLines,
3495
- urls,
3496
- serviceName
3497
- }
3498
- };
3692
+ process.exit(130);
3693
+ });
3499
3694
  }
3500
3695
 
3501
3696
  //#endregion
@@ -3507,7 +3702,7 @@ const LOGO_LINES = [
3507
3702
  ];
3508
3703
  const BRAND_COLOR = "#FF4D00";
3509
3704
  function getVersion() {
3510
- return "0.0.0-beta.17";
3705
+ return "0.0.0-beta.18";
3511
3706
  }
3512
3707
  function printBanner(subtitle) {
3513
3708
  const version = getVersion();
@@ -3529,6 +3724,494 @@ function printBanner(subtitle) {
3529
3724
  console.log();
3530
3725
  }
3531
3726
 
3727
+ //#endregion
3728
+ //#region src/lib/retry.ts
3729
+ async function withRetry(fn, maxRetries = 3) {
3730
+ for (let attempt = 0; attempt <= maxRetries; attempt++) try {
3731
+ return await fn();
3732
+ } catch (error) {
3733
+ if (attempt >= maxRetries) throw error;
3734
+ const rateLimit = isRateLimitError(error);
3735
+ if (rateLimit) {
3736
+ const waitMs = Math.min(rateLimit.retryAfterMs, 3e4);
3737
+ await new Promise((r) => {
3738
+ setTimeout(r, waitMs);
3739
+ });
3740
+ } else {
3741
+ const delay = Math.min(1e3 * Math.pow(2, attempt), 1e4);
3742
+ await new Promise((r) => {
3743
+ setTimeout(r, delay);
3744
+ });
3745
+ }
3746
+ }
3747
+ throw new Error("Max retries exceeded");
3748
+ }
3749
+
3750
+ //#endregion
3751
+ //#region src/lib/deploy-core.ts
3752
+ async function fetchDeployUrls(client, serviceId) {
3753
+ try {
3754
+ return (await client.domains.list({ serviceId })).map((d) => `https://${d.domain}`);
3755
+ } catch {
3756
+ return [];
3757
+ }
3758
+ }
3759
+ /**
3760
+ * Deploy a single service using the full streamDeploymentLogs experience
3761
+ * (BuildProgressRenderer with progress bars, spinner, runtime logs).
3762
+ */
3763
+ async function deploySingleService(service, options) {
3764
+ const { projectRoot } = options;
3765
+ const client = await getClient();
3766
+ const sizeInBytes = await calculateDirectorySize(projectRoot);
3767
+ const sizeMB = Math.round(sizeInBytes / (1024 * 1024) * 10) / 10;
3768
+ const baseTarball = await createBaseTarball(projectRoot);
3769
+ const deployment = await withRetry(() => client.deployments.create({
3770
+ serviceId: service.serviceId,
3771
+ serviceConfig: service.serviceConfig
3772
+ }));
3773
+ const { tarPath, tempDir: overlayTempDir } = await createServiceTarball(baseTarball, service.extraFiles);
3774
+ try {
3775
+ await withRetry(() => uploadTarball(deployment.id, tarPath));
3776
+ } finally {
3777
+ if (overlayTempDir) await rm(overlayTempDir, {
3778
+ recursive: true,
3779
+ force: true
3780
+ }).catch(() => {});
3781
+ await cleanupTarball(baseTarball);
3782
+ }
3783
+ if (sizeMB > 5) info(`Upload concluído (${sizeMB} MB)`);
3784
+ else success(`Upload concluído`);
3785
+ setupSigintHandler();
3786
+ trackDeployment(deployment.id);
3787
+ try {
3788
+ return [await streamDeploymentLogs(deployment.id, service.serviceId, service.serviceName)];
3789
+ } finally {
3790
+ untrackDeployment(deployment.id);
3791
+ }
3792
+ }
3793
+ /**
3794
+ * Deploy multiple services in parallel with progress tracking.
3795
+ */
3796
+ async function deployMultipleServices(services, options) {
3797
+ const { projectRoot, output } = options;
3798
+ const client = await getClient();
3799
+ const isTTY = process.stdout.isTTY && !process.env.GITHUB_ACTIONS;
3800
+ setupSigintHandler();
3801
+ const sizeInBytes = await calculateDirectorySize(projectRoot);
3802
+ const sizeMB = Math.round(sizeInBytes / (1024 * 1024) * 10) / 10;
3803
+ const baseTarball = await createBaseTarball(projectRoot);
3804
+ const progressMap = /* @__PURE__ */ new Map();
3805
+ const deploymentPromises = services.map(async (service) => {
3806
+ try {
3807
+ const deployment = await withRetry(() => client.deployments.create({
3808
+ serviceId: service.serviceId,
3809
+ serviceConfig: service.serviceConfig
3810
+ }));
3811
+ const { tarPath, tempDir: overlayTempDir } = await createServiceTarball(baseTarball, service.extraFiles);
3812
+ try {
3813
+ await withRetry(() => uploadTarball(deployment.id, tarPath));
3814
+ } finally {
3815
+ if (overlayTempDir) await rm(overlayTempDir, {
3816
+ recursive: true,
3817
+ force: true
3818
+ }).catch(() => {});
3819
+ }
3820
+ trackDeployment(deployment.id);
3821
+ progressMap.set(service.serviceId, {
3822
+ serviceName: service.serviceName,
3823
+ status: "QUEUED",
3824
+ logLines: [],
3825
+ completed: false,
3826
+ success: false
3827
+ });
3828
+ output.uploadComplete(service.serviceName, sizeMB);
3829
+ return {
3830
+ service,
3831
+ deploymentId: deployment.id
3832
+ };
3833
+ } catch (error) {
3834
+ output.uploadFailed(service.serviceName);
3835
+ progressMap.set(service.serviceId, {
3836
+ serviceName: service.serviceName,
3837
+ status: "FAILED",
3838
+ logLines: [],
3839
+ completed: true,
3840
+ success: false
3841
+ });
3842
+ throw error;
3843
+ }
3844
+ });
3845
+ const deployments = await Promise.allSettled(deploymentPromises);
3846
+ await cleanupTarball(baseTarball);
3847
+ const activeDeployments = deployments.filter((d) => d.status === "fulfilled").map((d) => d.value);
3848
+ if (activeDeployments.length === 0) {
3849
+ output.allUploadsFailed();
3850
+ process.exit(1);
3851
+ }
3852
+ output.monitoringStart(activeDeployments.length);
3853
+ if (isTTY) output.initProgress(progressMap);
3854
+ const streamPromises = activeDeployments.map(async ({ service, deploymentId }) => {
3855
+ try {
3856
+ await new Promise((resolve$1) => {
3857
+ setTimeout(resolve$1, 1e3);
3858
+ });
3859
+ const stream = await client.logs.streamBuildLogs({ deploymentId });
3860
+ for await (const event of stream) {
3861
+ const progress = progressMap.get(service.serviceId);
3862
+ if (!progress) continue;
3863
+ if (event.type === "status") {
3864
+ progress.status = event.content;
3865
+ if (TERMINAL_STATUSES.has(event.content)) {
3866
+ progress.completed = true;
3867
+ progress.success = event.content === "LIVE";
3868
+ }
3869
+ if (!isTTY) output.statusUpdate(service.serviceName, event.content);
3870
+ } else if (event.type === "log") {
3871
+ const newLines = event.content.split("\n").filter((l) => l.trim());
3872
+ progress.logLines.push(...newLines);
3873
+ if (!isTTY) for (const line of newLines) output.logLine(service.serviceName, line);
3874
+ }
3875
+ if (isTTY) output.renderProgress(progressMap);
3876
+ }
3877
+ } catch (error) {
3878
+ const errorMessage = error instanceof Error ? error.message : String(error);
3879
+ const progress = progressMap.get(service.serviceId);
3880
+ if (progress && !progress.completed) {
3881
+ output.streamError(service.serviceName, errorMessage);
3882
+ progress.status = "FAILED";
3883
+ progress.completed = true;
3884
+ if (isTTY) output.renderProgress(progressMap);
3885
+ }
3886
+ } finally {
3887
+ untrackDeployment(deploymentId);
3888
+ }
3889
+ });
3890
+ await Promise.all(streamPromises);
3891
+ if (isTTY) output.renderProgress(progressMap);
3892
+ const successfulEntries = Array.from(progressMap.entries()).filter(([, p]) => p.success);
3893
+ const failedEntries = Array.from(progressMap.entries()).filter(([, p]) => !p.success);
3894
+ const urlMap = /* @__PURE__ */ new Map();
3895
+ await Promise.all(successfulEntries.map(async ([serviceId]) => {
3896
+ const urls = await fetchDeployUrls(client, serviceId);
3897
+ if (urls.length > 0) urlMap.set(serviceId, urls);
3898
+ }));
3899
+ const successful = successfulEntries.map(([serviceId, p]) => ({
3900
+ serviceId,
3901
+ serviceName: p.serviceName,
3902
+ status: p.status,
3903
+ logLines: p.logLines.filter((l) => l.trim()),
3904
+ urls: urlMap.get(serviceId) ?? []
3905
+ }));
3906
+ const failed = failedEntries.map(([serviceId, p]) => ({
3907
+ serviceId,
3908
+ serviceName: p.serviceName,
3909
+ status: p.status,
3910
+ logLines: p.logLines.filter((l) => l.trim()),
3911
+ urls: []
3912
+ }));
3913
+ output.printSummary(successful, failed);
3914
+ if (successful.length > 0) output.printFollowUp();
3915
+ const results = [];
3916
+ for (const [serviceId, progress] of progressMap) results.push({
3917
+ status: progress.status,
3918
+ logs: progress.logLines.filter((l) => l.trim()),
3919
+ urls: urlMap.get(serviceId) ?? [],
3920
+ serviceName: progress.serviceName
3921
+ });
3922
+ if (failed.length > 0 && !process.env.VELOZ_MCP) process.exit(1);
3923
+ return results;
3924
+ }
3925
+ /**
3926
+ * Unified deploy function for 1-to-N services.
3927
+ *
3928
+ * - Single service: uses the full streamDeploymentLogs experience
3929
+ * (BuildProgressRenderer with progress bars, spinner, runtime logs)
3930
+ * - Multiple services: parallel deploy with compact progress tracking
3931
+ */
3932
+ async function deployServices(services, options) {
3933
+ if (services.length === 1) return deploySingleService(services[0], options);
3934
+ return deployMultipleServices(services, options);
3935
+ }
3936
+
3937
+ //#endregion
3938
+ //#region src/lib/deploy-output-tty.ts
3939
+ var TtyOutput = class {
3940
+ prevLineCount = 0;
3941
+ startDeploy(serviceCount) {
3942
+ console.log(chalk.cyan(`\nIniciando deploy de ${serviceCount} serviço(s)...\n`));
3943
+ }
3944
+ uploadComplete(serviceName, sizeMB) {
3945
+ console.log(`${chalk.green("✓")} ${chalk.bold(serviceName)}: Upload concluído ${chalk.dim(`(${sizeMB} MB)`)}`);
3946
+ }
3947
+ uploadFailed(serviceName) {
3948
+ console.log(`${chalk.red("✗")} ${chalk.bold(serviceName)}: Falha ao iniciar deploy`);
3949
+ }
3950
+ allUploadsFailed() {
3951
+ console.error(chalk.red("\n✗ Todos os deploys falharam ao iniciar."));
3952
+ }
3953
+ monitoringStart(_activeCount) {
3954
+ console.log(chalk.cyan(`\nMonitorando progresso dos deploys:\n`));
3955
+ console.log(chalk.dim("─".repeat(50)) + "\n");
3956
+ }
3957
+ statusUpdate(_serviceName, _status) {}
3958
+ logLine(_serviceName, _line) {}
3959
+ streamError(_serviceName, _error) {}
3960
+ initProgress(entries) {
3961
+ this.prevLineCount = this.doRenderProgress(entries, 0);
3962
+ }
3963
+ renderProgress(entries) {
3964
+ this.prevLineCount = this.doRenderProgress(entries, this.prevLineCount);
3965
+ }
3966
+ doRenderProgress(progressMap, prevLineCount) {
3967
+ for (let i = 0; i < prevLineCount; i++) process.stdout.write("\x1B[1A\x1B[2K");
3968
+ let lineCount = 0;
3969
+ for (const [, progress] of progressMap) {
3970
+ const icon = statusIcons[progress.status] || chalk.gray("○");
3971
+ const label = statusLabels[progress.status] || progress.status;
3972
+ process.stdout.write(`${icon} ${chalk.bold(progress.serviceName)}: ${label}\n`);
3973
+ lineCount++;
3974
+ if (progress.status === "BUILDING" || progress.status === "BUILD_FAILED") {
3975
+ const nonEmptyLines = progress.logLines.filter((l) => l.trim());
3976
+ if (nonEmptyLines.length > 0) {
3977
+ const tail = nonEmptyLines.slice(-3);
3978
+ for (const line of tail) {
3979
+ process.stdout.write(` ${chalk.dim(line)}\n`);
3980
+ lineCount++;
3981
+ }
3982
+ } else if (progress.status === "BUILDING") {
3983
+ process.stdout.write(` ${chalk.dim("Aguardando logs do build...")}\n`);
3984
+ lineCount++;
3985
+ }
3986
+ } else if (progress.status === "QUEUED") {
3987
+ process.stdout.write(` ${chalk.dim("Na fila para compilação...")}\n`);
3988
+ lineCount++;
3989
+ }
3990
+ }
3991
+ return lineCount;
3992
+ }
3993
+ buildStart(_serviceName) {}
3994
+ buildEnd() {}
3995
+ printSummary(successful, failed) {
3996
+ console.log(chalk.dim("\n" + "─".repeat(50)));
3997
+ if (successful.length > 0) {
3998
+ console.log(chalk.green(`\n✓ ${successful.length} serviço(s) com deploy concluído:\n`));
3999
+ for (const entry of successful) {
4000
+ console.log(` ${chalk.green("✓")} ${chalk.bold(entry.serviceName)}`);
4001
+ for (const url of entry.urls) console.log(` ${chalk.cyan(url)}`);
4002
+ }
4003
+ }
4004
+ if (failed.length > 0) {
4005
+ console.log(chalk.red(`\n✗ ${failed.length} serviço(s) falhou(aram):\n`));
4006
+ for (const entry of failed) {
4007
+ console.log(` ${chalk.red("✗")} ${chalk.bold(entry.serviceName)} ${chalk.dim(`(${entry.status})`)}`);
4008
+ if (entry.logLines.length > 0) {
4009
+ console.log(chalk.red(` ${"─".repeat(50)}`));
4010
+ console.log(chalk.red.bold(" Logs de build:"));
4011
+ console.log(chalk.red(` ${"─".repeat(50)}`));
4012
+ for (const line of entry.logLines) if (line.trim()) console.log(` ${chalk.dim(line)}`);
4013
+ console.log(chalk.red(` ${"─".repeat(50)}`));
4014
+ }
4015
+ const hints = getFailureHints(entry.status);
4016
+ for (const hint of hints) console.log(chalk.yellow(` → ${hint}`));
4017
+ }
4018
+ }
4019
+ }
4020
+ printFollowUp() {
4021
+ console.log(chalk.cyan("\nℹ Use 'veloz logs -f' para acompanhar os logs de execução."));
4022
+ }
4023
+ };
4024
+
4025
+ //#endregion
4026
+ //#region src/lib/deploy-output-plain.ts
4027
+ var PlainOutput = class {
4028
+ startDeploy(serviceCount) {
4029
+ process.stdout.write(`Iniciando deploy de ${serviceCount} serviço(s)\n`);
4030
+ }
4031
+ uploadComplete(serviceName, sizeMB) {
4032
+ process.stdout.write(`✓ ${serviceName}: Upload concluído (${sizeMB} MB)\n`);
4033
+ }
4034
+ uploadFailed(serviceName) {
4035
+ process.stdout.write(`✗ ${serviceName}: Falha ao iniciar deploy\n`);
4036
+ }
4037
+ allUploadsFailed() {
4038
+ process.stdout.write("✗ Todos os deploys falharam ao iniciar.\n");
4039
+ }
4040
+ monitoringStart(activeCount) {
4041
+ process.stdout.write(`Monitorando ${activeCount} deploy(s)\n`);
4042
+ }
4043
+ statusUpdate(serviceName, status) {
4044
+ const label = statusLabels[status] || status;
4045
+ process.stdout.write(`[${serviceName}] ${label}\n`);
4046
+ }
4047
+ logLine(serviceName, line) {
4048
+ process.stdout.write(`[${serviceName}] ${line}\n`);
4049
+ }
4050
+ streamError(serviceName, error) {
4051
+ process.stdout.write(`✗ Erro no streaming de logs para ${serviceName}: ${error}\n`);
4052
+ }
4053
+ initProgress(_entries) {}
4054
+ renderProgress(_entries) {}
4055
+ buildStart(_serviceName) {}
4056
+ buildEnd() {}
4057
+ printSummary(successful, failed) {
4058
+ if (successful.length > 0) {
4059
+ process.stdout.write(`\n${successful.length} serviço(s) com deploy concluído:\n`);
4060
+ for (const entry of successful) {
4061
+ process.stdout.write(` ✓ ${entry.serviceName}\n`);
4062
+ for (const url of entry.urls) process.stdout.write(` ${url}\n`);
4063
+ }
4064
+ }
4065
+ if (failed.length > 0) {
4066
+ process.stdout.write(`\n${failed.length} serviço(s) falhou(aram):\n`);
4067
+ for (const entry of failed) {
4068
+ process.stdout.write(`\n ✗ ${entry.serviceName} (${entry.status})\n`);
4069
+ const hints = getFailureHints(entry.status);
4070
+ for (const hint of hints) process.stdout.write(` → ${hint}\n`);
4071
+ }
4072
+ }
4073
+ }
4074
+ printFollowUp() {
4075
+ process.stdout.write("Use 'veloz logs -f' para acompanhar os logs de execução.\n");
4076
+ }
4077
+ };
4078
+
4079
+ //#endregion
4080
+ //#region src/lib/deploy-output-gha.ts
4081
+ var GhaOutput = class {
4082
+ startDeploy(serviceCount) {
4083
+ process.stdout.write(`Iniciando deploy de ${serviceCount} serviço(s)\n`);
4084
+ }
4085
+ uploadComplete(serviceName, sizeMB) {
4086
+ process.stdout.write(`✓ ${serviceName}: Upload concluído (${sizeMB} MB)\n`);
4087
+ }
4088
+ uploadFailed(serviceName) {
4089
+ process.stdout.write(`::error::${serviceName}: Falha ao iniciar deploy\n`);
4090
+ }
4091
+ allUploadsFailed() {
4092
+ process.stdout.write("::error::Todos os deploys falharam ao iniciar.\n");
4093
+ }
4094
+ monitoringStart(activeCount) {
4095
+ process.stdout.write(`Monitorando ${activeCount} deploy(s)\n`);
4096
+ }
4097
+ statusUpdate(serviceName, status) {
4098
+ const label = statusLabels[status] || status;
4099
+ process.stdout.write(`[${serviceName}] ${label}\n`);
4100
+ }
4101
+ logLine(serviceName, line) {
4102
+ process.stdout.write(`[${serviceName}] ${line}\n`);
4103
+ }
4104
+ streamError(serviceName, error) {
4105
+ process.stdout.write(`::error::Erro no streaming de logs para ${serviceName}: ${error}\n`);
4106
+ }
4107
+ initProgress(_entries) {}
4108
+ renderProgress(_entries) {}
4109
+ buildStart(serviceName) {
4110
+ startGroup(serviceName ? `Build: ${serviceName}` : "Build");
4111
+ }
4112
+ buildEnd() {
4113
+ endGroup();
4114
+ }
4115
+ printSummary(successful, failed) {
4116
+ if (successful.length > 0) {
4117
+ process.stdout.write(`\n✓ ${successful.length} serviço(s) com deploy concluído\n`);
4118
+ for (const entry of successful) {
4119
+ process.stdout.write(` ✓ ${entry.serviceName}\n`);
4120
+ for (const url of entry.urls) process.stdout.write(` ${url}\n`);
4121
+ }
4122
+ }
4123
+ if (failed.length > 0) {
4124
+ process.stdout.write(`\n✗ ${failed.length} serviço(s) falhou(aram)\n`);
4125
+ for (const entry of failed) {
4126
+ process.stdout.write(`::error::${entry.serviceName} falhou (${entry.status})\n`);
4127
+ const hints = getFailureHints(entry.status);
4128
+ for (const hint of hints) process.stdout.write(` ${hint}\n`);
4129
+ }
4130
+ }
4131
+ }
4132
+ printFollowUp() {
4133
+ process.stdout.write("Use 'veloz logs -f' para acompanhar os logs de execução.\n");
4134
+ }
4135
+ };
4136
+
4137
+ //#endregion
4138
+ //#region src/lib/deploy-output-mcp.ts
4139
+ var McpOutput = class {
4140
+ startDeploy(serviceCount) {
4141
+ log(`[deploy] Iniciando deploy de ${serviceCount} serviço(s)`);
4142
+ }
4143
+ uploadComplete(serviceName, sizeMB) {
4144
+ log(`[deploy] ✓ ${serviceName}: Upload concluído (${sizeMB} MB)`);
4145
+ }
4146
+ uploadFailed(serviceName) {
4147
+ log(`[deploy] ✗ ${serviceName}: Falha ao iniciar deploy`);
4148
+ }
4149
+ allUploadsFailed() {
4150
+ throw new Error("Todos os deploys falharam ao iniciar.");
4151
+ }
4152
+ monitoringStart(activeCount) {
4153
+ log(`[deploy] Monitorando ${activeCount} deploy(s)`);
4154
+ }
4155
+ statusUpdate(serviceName, status) {
4156
+ log(`[deploy] [${serviceName}] ${statusLabels[status] || status}`);
4157
+ }
4158
+ logLine(serviceName, line) {
4159
+ log(`[build] [${serviceName}] ${line}`);
4160
+ }
4161
+ streamError(serviceName, error) {
4162
+ log(`[deploy] ✗ Erro no streaming de logs para ${serviceName}: ${error}`);
4163
+ }
4164
+ initProgress(_entries) {}
4165
+ renderProgress(_entries) {}
4166
+ buildStart(_serviceName) {}
4167
+ buildEnd() {}
4168
+ printSummary(successful, failed) {
4169
+ if (successful.length > 0) {
4170
+ log(`[deploy] ✓ ${successful.length} serviço(s) com deploy concluído`);
4171
+ for (const entry of successful) {
4172
+ log(`[deploy] ✓ ${entry.serviceName}`);
4173
+ for (const url of entry.urls) log(`[deploy] ${url}`);
4174
+ }
4175
+ }
4176
+ if (failed.length > 0) {
4177
+ log(`[deploy] ✗ ${failed.length} serviço(s) falhou(aram)`);
4178
+ for (const entry of failed) {
4179
+ log(`[deploy] ✗ ${entry.serviceName} (${entry.status})`);
4180
+ const hints = getFailureHints(entry.status);
4181
+ for (const hint of hints) log(`[deploy] → ${hint}`);
4182
+ }
4183
+ }
4184
+ }
4185
+ printFollowUp() {
4186
+ log("[deploy] Use 'veloz logs -f' para acompanhar os logs de execução.");
4187
+ }
4188
+ };
4189
+
4190
+ //#endregion
4191
+ //#region src/lib/deploy-output.ts
4192
+ function getFailureHints(status) {
4193
+ switch (status) {
4194
+ case "BUILD_FAILED": return [
4195
+ "Verifique os logs de build acima para erros de compilação",
4196
+ "Teste o build localmente: rode o comando de build do seu projeto",
4197
+ "Use 'veloz config show' para verificar as configurações"
4198
+ ];
4199
+ case "DEPLOY_FAILED": return [
4200
+ "O build passou mas o serviço falhou ao iniciar",
4201
+ "Verifique se a porta configurada está correta: 'veloz config show'",
4202
+ "Veja os logs de runtime: 'veloz logs -f'"
4203
+ ];
4204
+ case "CANCELLED": return ["Deploy cancelado. Execute 'veloz deploy' para tentar novamente."];
4205
+ default: return ["Execute 'veloz logs -f' para mais detalhes."];
4206
+ }
4207
+ }
4208
+ function createDeployOutput() {
4209
+ if (isMcpMode()) return new McpOutput();
4210
+ if (process.env.GITHUB_ACTIONS === "true") return new GhaOutput();
4211
+ if (process.stdout.isTTY) return new TtyOutput();
4212
+ return new PlainOutput();
4213
+ }
4214
+
3532
4215
  //#endregion
3533
4216
  //#region src/lib/deploy-config.ts
3534
4217
  function resolveServiceConf(velozConfig, serviceId) {
@@ -3558,7 +4241,10 @@ function resolveServiceConf(velozConfig, serviceId) {
3558
4241
  nixpkgsArchive: build?.nixpkgsArchive ?? void 0,
3559
4242
  packageManager: build?.packageManager,
3560
4243
  installCommand: build?.installCommand ?? void 0,
3561
- volumes: merged.volumes ?? void 0
4244
+ volumes: merged.volumes?.map((v) => ({
4245
+ ...v,
4246
+ sizeGb: v.sizeGb ?? 10
4247
+ })) ?? void 0
3562
4248
  };
3563
4249
  }
3564
4250
  }
@@ -3986,7 +4672,7 @@ async function autoUpdate() {
3986
4672
  if (process.env.VELOZ_MCP === "true") return;
3987
4673
  const pm = detectPackageManager();
3988
4674
  if (!pm) return;
3989
- const currentVersion = "0.0.0-beta.17";
4675
+ const currentVersion = "0.0.0-beta.18";
3990
4676
  const latestVersion = await fetchLatestVersion();
3991
4677
  if (!latestVersion || latestVersion === currentVersion) return;
3992
4678
  const installCmd = getInstallCommand(pm, latestVersion);
@@ -4018,26 +4704,27 @@ async function provisionDatabases(config, opts) {
4018
4704
  text: "Verificando bancos de dados...",
4019
4705
  fn: () => client.databases.list({ projectId })
4020
4706
  });
4021
- let updatedConfig = { ...config };
4707
+ const idUpdates = {};
4022
4708
  for (const [key, dbConfig] of entries) {
4023
4709
  const existing = serverDatabases.find((d) => dbConfig.id && d.id === dbConfig.id || d.name === key);
4024
4710
  if (existing) {
4025
- if (!dbConfig.id || dbConfig.id !== existing.id) updatedConfig = {
4026
- ...updatedConfig,
4027
- databases: {
4028
- ...updatedConfig.databases,
4029
- [key]: {
4030
- ...dbConfig,
4031
- id: existing.id
4032
- }
4033
- }
4034
- };
4711
+ if (!dbConfig.id || dbConfig.id !== existing.id) idUpdates[key] = existing.id;
4035
4712
  if (existing.databaseStatus === "FAILED") {
4036
4713
  warn(`Banco de dados "${key}" está com status FAILED no servidor.`);
4037
4714
  console.log(chalk.dim(` Use 'veloz db restart ${key}' para tentar reiniciar.`));
4038
4715
  console.log();
4039
- } else if (existing.databaseStatus === "PENDING" || existing.databaseStatus === "PROVISIONING" || existing.databaseStatus === "WAITING_ON_PROVIDER") info(`Banco de dados "${key}" ainda está sendo provisionado (${existing.databaseStatus === "WAITING_ON_PROVIDER" ? "levando mais tempo" : existing.databaseStatus}).`);
4040
- else info(`Banco de dados "${key}" encontrado no servidor (${chalk.green("LIVE")}).`);
4716
+ continue;
4717
+ }
4718
+ if (existing.databaseStatus === "PENDING" || existing.databaseStatus === "PROVISIONING" || existing.databaseStatus === "WAITING_ON_PROVIDER") {
4719
+ info(`Banco de dados "${key}" ainda está sendo provisionado (${existing.databaseStatus === "WAITING_ON_PROVIDER" ? "levando mais tempo" : existing.databaseStatus}).`);
4720
+ continue;
4721
+ }
4722
+ const serviceId = existing.id;
4723
+ const isLive = existing.databaseStatus === "LIVE";
4724
+ await updateDatabaseResources(client, key, dbConfig, existing, serviceId, isLive);
4725
+ await updateDatabasePooler(client, key, dbConfig, existing, serviceId, isLive);
4726
+ await updateDatabaseStorage(client, key, dbConfig, serviceId);
4727
+ info(`Banco de dados "${key}" encontrado no servidor (${chalk.green(existing.databaseStatus ?? "LIVE")}).`);
4041
4728
  continue;
4042
4729
  }
4043
4730
  console.log();
@@ -4063,8 +4750,7 @@ async function provisionDatabases(config, opts) {
4063
4750
  engine: dbConfig.engine,
4064
4751
  engineVersion: dbConfig.version,
4065
4752
  storage: dbConfig.storage,
4066
- cpuLimit: dbConfig.resources?.cpu,
4067
- memoryLimit: dbConfig.resources?.memory,
4753
+ size: dbConfig.size ?? "essencial",
4068
4754
  poolerEnabled: dbConfig.pooler?.enabled,
4069
4755
  poolerPoolMode: dbConfig.pooler?.poolMode,
4070
4756
  poolerDefaultPoolSize: dbConfig.pooler?.defaultPoolSize,
@@ -4072,29 +4758,128 @@ async function provisionDatabases(config, opts) {
4072
4758
  })
4073
4759
  });
4074
4760
  success(`Banco de dados "${key}" criado (provisionando...).`);
4075
- updatedConfig = {
4076
- ...updatedConfig,
4077
- databases: {
4078
- ...updatedConfig.databases,
4079
- [key]: {
4080
- ...dbConfig,
4081
- id: db.id
4082
- }
4083
- }
4084
- };
4761
+ idUpdates[key] = db.id;
4085
4762
  } catch (error) {
4086
4763
  warn(`Falha ao criar banco de dados "${key}": ${error instanceof Error ? error.message : String(error)}`);
4087
4764
  console.log(chalk.dim(` Continuando com o deploy dos serviços...`));
4088
4765
  console.log();
4089
4766
  }
4090
4767
  }
4091
- if (JSON.stringify(updatedConfig.databases) !== JSON.stringify(config.databases)) try {
4092
- saveConfig(updatedConfig);
4768
+ if (Object.keys(idUpdates).length > 0) try {
4769
+ patchConfig((raw) => {
4770
+ raw.databases ??= {};
4771
+ for (const [key, id] of Object.entries(idUpdates)) if (raw.databases[key]) raw.databases[key].id = id;
4772
+ else {
4773
+ const dbConfig = entries.find(([k]) => k === key)?.[1];
4774
+ if (dbConfig) raw.databases[key] = {
4775
+ engine: dbConfig.engine,
4776
+ id
4777
+ };
4778
+ }
4779
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
4780
+ });
4093
4781
  info(`Arquivo veloz.json atualizado com IDs dos bancos de dados.`);
4094
4782
  } catch (error) {
4095
4783
  warn(`Não foi possível atualizar veloz.json: ${error instanceof Error ? error.message : String(error)}`);
4096
4784
  }
4097
- return updatedConfig;
4785
+ const updatedDatabases = { ...databases };
4786
+ for (const [key, id] of Object.entries(idUpdates)) if (updatedDatabases[key]) updatedDatabases[key] = {
4787
+ ...updatedDatabases[key],
4788
+ id
4789
+ };
4790
+ return {
4791
+ ...config,
4792
+ databases: updatedDatabases
4793
+ };
4794
+ }
4795
+ /**
4796
+ * Compare desired size tier from config against the server state and call
4797
+ * updateResources if they differ.
4798
+ */
4799
+ async function updateDatabaseResources(client, key, dbConfig, existing, serviceId, isLive) {
4800
+ const desiredSize = dbConfig.size;
4801
+ if (!desiredSize) return;
4802
+ if ((existing.size ?? resolveDatabaseSize(existing.cpuLimit, existing.memoryLimit)) === desiredSize) return;
4803
+ if (!isLive) {
4804
+ warn(`Banco de dados "${key}" não está LIVE — não é possível alterar recursos agora.`);
4805
+ return;
4806
+ }
4807
+ const sizeConfig = DATABASE_SIZES[desiredSize];
4808
+ if (!sizeConfig) {
4809
+ warn(`Tamanho "${desiredSize}" inválido para banco de dados "${key}".`);
4810
+ return;
4811
+ }
4812
+ try {
4813
+ await withSpinner({
4814
+ text: `Atualizando recursos de "${key}" para ${sizeConfig.label} (${sizeConfig.cpuLabel}, ${sizeConfig.memoryLabel})...`,
4815
+ fn: () => client.databases.updateResources({
4816
+ serviceId,
4817
+ size: desiredSize
4818
+ })
4819
+ });
4820
+ success(`Recursos de "${key}" atualizados para ${sizeConfig.label}.`);
4821
+ } catch (error) {
4822
+ warn(`Falha ao atualizar recursos de "${key}": ${error instanceof Error ? error.message : String(error)}`);
4823
+ }
4824
+ }
4825
+ /**
4826
+ * Compare desired pooler settings from config against the server state and call
4827
+ * updatePooler if they differ.
4828
+ */
4829
+ async function updateDatabasePooler(client, key, dbConfig, existing, serviceId, isLive) {
4830
+ const desiredPooler = dbConfig.pooler;
4831
+ if (!desiredPooler) return;
4832
+ const enabledChanged = desiredPooler.enabled !== existing.poolerEnabled;
4833
+ const poolModeChanged = desiredPooler.poolMode !== void 0 && desiredPooler.poolMode !== existing.poolerPoolMode;
4834
+ if (!enabledChanged && !poolModeChanged) return;
4835
+ if (!isLive) {
4836
+ warn(`Banco de dados "${key}" não está LIVE — não é possível alterar o pooler agora.`);
4837
+ return;
4838
+ }
4839
+ if (existing.engine !== "postgresql") {
4840
+ warn(`Connection pooler só é suportado para PostgreSQL (banco "${key}" usa ${existing.engine}).`);
4841
+ return;
4842
+ }
4843
+ try {
4844
+ await withSpinner({
4845
+ text: `Atualizando pooler de "${key}"...`,
4846
+ fn: () => client.databases.updatePooler({
4847
+ serviceId,
4848
+ enabled: desiredPooler.enabled,
4849
+ poolMode: desiredPooler.poolMode,
4850
+ defaultPoolSize: desiredPooler.defaultPoolSize,
4851
+ maxClientConn: desiredPooler.maxClientConn
4852
+ })
4853
+ });
4854
+ success(`Pooler de "${key}" ${desiredPooler.enabled ? "ativado" : "desativado"}.`);
4855
+ } catch (error) {
4856
+ warn(`Falha ao atualizar pooler de "${key}": ${error instanceof Error ? error.message : String(error)}`);
4857
+ }
4858
+ }
4859
+ /**
4860
+ * Compare desired storage size from config against the server's volume and call
4861
+ * volumes.update to resize if the config requests a larger volume.
4862
+ */
4863
+ async function updateDatabaseStorage(client, key, dbConfig, serviceId) {
4864
+ const desiredStorage = dbConfig.storage;
4865
+ if (!desiredStorage) return;
4866
+ const desiredSizeGb = parseInt(desiredStorage.replace("Gi", ""), 10);
4867
+ if (isNaN(desiredSizeGb) || desiredSizeGb <= 0) return;
4868
+ const dataVolume = (await client.volumes.list({ serviceId })).find((v) => v.name === "data");
4869
+ if (!dataVolume) return;
4870
+ if (desiredSizeGb <= dataVolume.sizeGb) return;
4871
+ try {
4872
+ await withSpinner({
4873
+ text: `Redimensionando armazenamento de "${key}" de ${dataVolume.sizeGb} GB para ${desiredSizeGb} GB...`,
4874
+ fn: () => client.volumes.update({
4875
+ volumeId: dataVolume.id,
4876
+ sizeGb: desiredSizeGb
4877
+ })
4878
+ });
4879
+ success(`Armazenamento de "${key}" atualizado para ${desiredSizeGb} GB.`);
4880
+ } catch (error) {
4881
+ warn(`Falha ao redimensionar armazenamento de "${key}": ${error instanceof Error ? error.message : String(error)}`);
4882
+ }
4098
4883
  }
4099
4884
  /**
4100
4885
  * Get hint messages about auto-injected DATABASE_URLs for display during env var prompts.
@@ -4205,39 +4990,6 @@ async function triggerDeploy(serviceId, serviceName, preDetection) {
4205
4990
  untrackDeployment(deployment.id);
4206
4991
  }
4207
4992
  }
4208
- /**
4209
- * MCP variant of triggerDeploy — yields progress events as async generator.
4210
- * Upload phase is awaited, then build log streaming is delegated to the
4211
- * streaming generator.
4212
- */
4213
- async function* triggerDeployMcp(serviceId, serviceName) {
4214
- const client = await getClient();
4215
- const serviceConf = resolveServiceConf(loadConfig(), serviceId);
4216
- const extraFiles = prepareExtraFiles(detectLocalRepo(), serviceConf);
4217
- yield {
4218
- type: "status",
4219
- message: "Iniciando deploy..."
4220
- };
4221
- const deployment = await withRetry(() => client.deployments.create({
4222
- serviceId,
4223
- serviceConfig: serviceConf
4224
- }));
4225
- yield {
4226
- type: "status",
4227
- message: "Fazendo upload do código..."
4228
- };
4229
- await withRetry(() => uploadSource(deployment.id, process.cwd(), extraFiles));
4230
- yield {
4231
- type: "status",
4232
- message: "Upload concluído. Aguardando build..."
4233
- };
4234
- trackDeployment(deployment.id);
4235
- try {
4236
- yield* streamDeploymentLogsMcp(deployment.id, serviceId, serviceName);
4237
- } finally {
4238
- untrackDeployment(deployment.id);
4239
- }
4240
- }
4241
4993
  function warnIfEphemeralFsDetected(detection, serviceConf, serviceLabel) {
4242
4994
  if (!detection.usesNodeFs || (serviceConf?.volumes?.length ?? 0) > 0) return;
4243
4995
  warn(`Uso de fs/node:fs detectado${serviceLabel ? ` no serviço ${chalk.bold(serviceLabel)}` : ""}. O filesystem do container é efêmero; configure um volume em veloz.json ou use 'veloz volumes create'.`);
@@ -4248,7 +5000,7 @@ async function maybeConfigurePersistentVolume(serviceConfig, detection, opts, se
4248
5000
  if (!await promptConfirm(`Deseja adicionar um volume persistente para ${serviceLabel}?`, true)) return;
4249
5001
  const name = await prompt(`Nome do volume ${chalk.dim("(data)")}:`) || "data";
4250
5002
  const mountPath = await prompt(`Mount path ${chalk.dim("(/data)")}:`) || "/data";
4251
- const sizeInput = await prompt(`Tamanho em GB ${chalk.dim("(1)")}:`);
5003
+ const sizeInput = await prompt(`Tamanho em GB ${chalk.dim("(10)")}:`);
4252
5004
  const parsedSize = Number.parseInt(sizeInput || "10", 10);
4253
5005
  serviceConfig.volumes = [{
4254
5006
  name,
@@ -4294,7 +5046,12 @@ async function findServicesFromConfig() {
4294
5046
  }
4295
5047
  }
4296
5048
  if (configUpdated) {
4297
- saveConfig(config);
5049
+ const serviceIdMap = {};
5050
+ for (const [key, svc] of Object.entries(config.services)) if (svc.id) serviceIdMap[key] = svc.id;
5051
+ patchConfig((raw) => {
5052
+ for (const [key, id] of Object.entries(serviceIdMap)) if (raw.services[key]) raw.services[key].id = id;
5053
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
5054
+ });
4298
5055
  info(`Arquivo ${getConfigFileName()} atualizado com IDs dos serviços.`);
4299
5056
  }
4300
5057
  }
@@ -4589,13 +5346,15 @@ async function createServiceFlow(projectId, projectName, repoName, opts = {}) {
4589
5346
  await promptEnvVars(service$1.id, serviceDetection.envVars.map((v) => v.key), config$1);
4590
5347
  }
4591
5348
  config$1 = await provisionDatabases(config$1, { yes: opts.yes ?? false });
4592
- await deployServicesInParallel(createdServices.map(({ service: service$1, app }) => ({
5349
+ await deployServices(createdServices.map(({ service: service$1, app }) => ({
4593
5350
  serviceId: service$1.id,
4594
5351
  serviceName: app.name,
4595
- path: resolve(process.cwd(), app.root),
4596
5352
  serviceConfig: resolveServiceConf(config$1, service$1.id),
4597
5353
  extraFiles: prepareExtraFiles(detectLocalRepo(app.root), { rootDirectory: app.root })
4598
- })));
5354
+ })), {
5355
+ projectRoot: process.cwd(),
5356
+ output: createDeployOutput()
5357
+ });
4599
5358
  return createdServices[createdServices.length - 1]?.service.id || "";
4600
5359
  }
4601
5360
  const fw = detection.framework;
@@ -4735,13 +5494,10 @@ async function createEnvironmentFlow(rawConfig, envName, opts) {
4735
5494
  },
4736
5495
  services: envServices
4737
5496
  };
4738
- saveConfig({
4739
- ...rawConfig,
4740
- environments: {
4741
- ...rawConfig.environments,
4742
- [envName]: envOverride
4743
- },
4744
- updated: (/* @__PURE__ */ new Date()).toISOString()
5497
+ patchConfig((raw) => {
5498
+ raw.environments ??= {};
5499
+ raw.environments[envName] = envOverride;
5500
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
4745
5501
  });
4746
5502
  success(`Ambiente "${envName}" salvo em ${getConfigFileName()}`);
4747
5503
  setActiveEnv(envName);
@@ -4862,8 +5618,11 @@ async function addServiceFlow(existingConfig, opts) {
4862
5618
  };
4863
5619
  await maybeConfigurePersistentVolume(updatedConfig$1.services[app.root], detectLocalRepo(app.root), opts, app.name);
4864
5620
  }
4865
- updatedConfig$1.updated = (/* @__PURE__ */ new Date()).toISOString();
4866
- saveConfig(updatedConfig$1);
5621
+ const newServices = Object.fromEntries(createdServices.map(({ app }) => [app.root, updatedConfig$1.services[app.root]]));
5622
+ patchConfig((raw) => {
5623
+ for (const [key, value] of Object.entries(newServices)) if (value) raw.services[key] = value;
5624
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
5625
+ });
4867
5626
  info(`Arquivo ${getConfigFileName()} atualizado com ${createdServices.length} novo(s) serviço(s).`);
4868
5627
  if (!opts.yes) for (const { service: service$1, app } of createdServices) {
4869
5628
  console.log(chalk.cyan(`\n── Configurando variáveis: ${app.name} ──\n`));
@@ -4871,13 +5630,15 @@ async function addServiceFlow(existingConfig, opts) {
4871
5630
  await promptEnvVars(service$1.id, serviceDetection.envVars.map((v) => v.key), updatedConfig$1);
4872
5631
  }
4873
5632
  await provisionDatabases(updatedConfig$1, { yes: opts.yes ?? false });
4874
- await deployServicesInParallel(createdServices.map(({ service: service$1, app }) => ({
5633
+ await deployServices(createdServices.map(({ service: service$1, app }) => ({
4875
5634
  serviceId: service$1.id,
4876
5635
  serviceName: app.name,
4877
- path: resolve(process.cwd(), app.root),
4878
5636
  serviceConfig: resolveServiceConf(updatedConfig$1, service$1.id),
4879
5637
  extraFiles: prepareExtraFiles(detectLocalRepo(app.root), { rootDirectory: app.root })
4880
- })));
5638
+ })), {
5639
+ projectRoot: process.cwd(),
5640
+ output: createDeployOutput()
5641
+ });
4881
5642
  return;
4882
5643
  }
4883
5644
  const fw = detection.framework;
@@ -4964,7 +5725,11 @@ async function addServiceFlow(existingConfig, opts) {
4964
5725
  updated: (/* @__PURE__ */ new Date()).toISOString()
4965
5726
  };
4966
5727
  await maybeConfigurePersistentVolume(updatedConfig.services[serviceKey], detection, opts, service.name);
4967
- saveConfig(updatedConfig);
5728
+ const newServiceEntry = updatedConfig.services[serviceKey];
5729
+ patchConfig((raw) => {
5730
+ raw.services[serviceKey] = newServiceEntry;
5731
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
5732
+ });
4968
5733
  info(`Arquivo ${getConfigFileName()} atualizado.`);
4969
5734
  await provisionDatabases(updatedConfig, { yes: opts.yes ?? false });
4970
5735
  await triggerDeploy(service.id, service.name);
@@ -5019,17 +5784,32 @@ function registerDeploy(cli$1) {
5019
5784
  async function* mcpDeployFlow(opts) {
5020
5785
  const configuredServices = await findServicesFromConfig();
5021
5786
  if (configuredServices.length === 0) throw new Error("Nenhum serviço configurado. Execute 'veloz deploy' no terminal para configurar o projeto primeiro.");
5787
+ const currentConfig = loadConfig();
5788
+ if (currentConfig) await provisionDatabases(currentConfig, { yes: true });
5789
+ let servicesToProcess = configuredServices;
5022
5790
  if (opts.service) {
5023
5791
  const found = configuredServices.find((s) => s.key === opts.service || s.serviceName.toLowerCase() === opts.service.toLowerCase() || s.serviceId === opts.service);
5024
5792
  if (!found) {
5793
+ if ((currentConfig?.databases ? Object.keys(currentConfig.databases).find((k) => k === opts.service || k.toLowerCase() === opts.service.toLowerCase()) : void 0) && currentConfig) {
5794
+ await provisionDatabases(currentConfig, { yes: true });
5795
+ return;
5796
+ }
5025
5797
  const available = configuredServices.map((s) => `${s.key} (${s.serviceName})`).join(", ");
5026
- throw new Error(`Serviço '${opts.service}' não encontrado. Disponíveis: ${available}`);
5798
+ const dbNames = currentConfig?.databases ? Object.keys(currentConfig.databases) : [];
5799
+ const hint = dbNames.length > 0 ? `\nBancos de dados: ${dbNames.join(", ")} (use 'veloz db update' para gerenciar)` : "";
5800
+ throw new Error(`Serviço '${opts.service}' não encontrado. Disponíveis: ${available}${hint}`);
5027
5801
  }
5028
- yield* triggerDeployMcp(found.serviceId, found.serviceName);
5029
- return;
5802
+ servicesToProcess = [found];
5030
5803
  }
5031
- if (configuredServices.length === 1) yield* triggerDeployMcp(configuredServices[0].serviceId, configuredServices[0].serviceName);
5032
- else for (const svc of configuredServices) yield* triggerDeployMcp(svc.serviceId, svc.serviceName);
5804
+ const results = await deployServices(await computeExtraFilesForServices(servicesToProcess), {
5805
+ projectRoot: process.cwd(),
5806
+ output: createDeployOutput()
5807
+ });
5808
+ for (const result of results) yield {
5809
+ type: "result",
5810
+ message: result.status === "LIVE" ? "Deploy concluído" : `Deploy finalizou: ${result.status}`,
5811
+ data: result
5812
+ };
5033
5813
  }
5034
5814
  async function cliDeployFlow(opts) {
5035
5815
  if (opts.verbose) process.env.VELOZ_VERBOSE = "true";
@@ -5064,10 +5844,21 @@ async function cliDeployFlow(opts) {
5064
5844
  if (opts.service) {
5065
5845
  const found = configuredServices.find((s) => s.key === opts.service || s.serviceName.toLowerCase() === opts.service.toLowerCase() || s.serviceId === opts.service);
5066
5846
  if (!found) {
5847
+ const dbMatch = currentConfig?.databases ? Object.keys(currentConfig.databases).find((k) => k === opts.service || k.toLowerCase() === opts.service.toLowerCase()) : void 0;
5848
+ if (dbMatch && currentConfig) {
5849
+ info(`"${dbMatch}" é um banco de dados. Aplicando alterações de configuração...`);
5850
+ await provisionDatabases(currentConfig, { yes: opts.yes ?? false });
5851
+ return;
5852
+ }
5067
5853
  const available = configuredServices.map((s) => ` • ${s.key} (${s.serviceName})`).join("\n");
5068
- throw new Error(`Serviço '${opts.service}' não encontrado.\n\nServiços disponíveis:\n${available}`);
5854
+ const dbNames = currentConfig?.databases ? Object.keys(currentConfig.databases) : [];
5855
+ const dbHint = dbNames.length > 0 ? `\n\nBancos de dados:\n${dbNames.map((n) => ` • ${n} (banco de dados)`).join("\n")}` : "";
5856
+ throw new Error(`Serviço '${opts.service}' não encontrado.\n\nServiços disponíveis:\n${available}${dbHint}`);
5069
5857
  }
5070
- await triggerDeploy(found.serviceId, found.serviceName);
5858
+ await deployServices(await computeExtraFilesForServices([found]), {
5859
+ projectRoot: process.cwd(),
5860
+ output: createDeployOutput()
5861
+ });
5071
5862
  return;
5072
5863
  }
5073
5864
  if (opts.all || opts.yes || configuredServices.length === 1) {
@@ -5083,8 +5874,10 @@ async function cliDeployFlow(opts) {
5083
5874
  return;
5084
5875
  }
5085
5876
  }
5086
- if (configuredServices.length === 1) await triggerDeploy(configuredServices[0].serviceId, configuredServices[0].serviceName);
5087
- else await deployServicesInParallel(await computeExtraFilesForServices(configuredServices));
5877
+ await deployServices(await computeExtraFilesForServices(configuredServices), {
5878
+ projectRoot: process.cwd(),
5879
+ output: createDeployOutput()
5880
+ });
5088
5881
  return;
5089
5882
  }
5090
5883
  console.log(chalk.bold("\nServiços disponíveis:\n"));
@@ -5100,8 +5893,11 @@ async function cliDeployFlow(opts) {
5100
5893
  info("Nenhum serviço selecionado.");
5101
5894
  return;
5102
5895
  }
5103
- if (selectedServices.length === 1) await triggerDeploy(selectedServices[0].serviceId, selectedServices[0].serviceName);
5104
- else await deployServicesInParallel(await computeExtraFilesForServices(selectedServices));
5896
+ await deployServices(await computeExtraFilesForServices(selectedServices), {
5897
+ projectRoot: process.cwd(),
5898
+ output: createDeployOutput()
5899
+ });
5900
+ return;
5105
5901
  }
5106
5902
  if (!isGitRepo()) throw new Error("Este diretório não é um repositório git. Inicialize com `git init` e adicione um remote.");
5107
5903
  info("Detectando repositório git...");
@@ -5402,7 +6198,35 @@ function registerPull(cli$1) {
5402
6198
  else config = buildFreshConfig(project, services, databases);
5403
6199
  if (existingConfig && !c.options.force && isInteractive()) config = await pruneRemovedEntries(config, existingConfig, services, databases);
5404
6200
  config.updated = (/* @__PURE__ */ new Date()).toISOString();
5405
- saveConfig(config);
6201
+ if (existingConfig && !c.options.force) patchConfig((raw) => {
6202
+ raw.project = config.project;
6203
+ for (const [key, svc] of Object.entries(config.services)) if (raw.services[key]) {
6204
+ raw.services[key].id = svc.id;
6205
+ raw.services[key].name = svc.name;
6206
+ } else raw.services[key] = svc;
6207
+ for (const key of Object.keys(raw.services)) if (!config.services[key]) delete raw.services[key];
6208
+ const mergedDatabases = config.databases ?? {};
6209
+ if (Object.keys(mergedDatabases).length > 0) {
6210
+ raw.databases ??= {};
6211
+ for (const [key, db] of Object.entries(mergedDatabases)) if (raw.databases[key]) {
6212
+ raw.databases[key].id = db.id;
6213
+ raw.databases[key].name = db.name;
6214
+ raw.databases[key].engine = db.engine;
6215
+ if (db.version) raw.databases[key].version = db.version;
6216
+ } else {
6217
+ raw.databases[key] = {
6218
+ id: db.id,
6219
+ name: db.name,
6220
+ engine: db.engine
6221
+ };
6222
+ if (db.version) raw.databases[key].version = db.version;
6223
+ }
6224
+ for (const key of Object.keys(raw.databases)) if (!mergedDatabases[key]) delete raw.databases[key];
6225
+ if (Object.keys(raw.databases).length === 0) raw.databases = void 0;
6226
+ }
6227
+ raw.updated = config.updated;
6228
+ });
6229
+ else saveConfig(config);
5406
6230
  success(`${getConfigFileName()} atualizado com sucesso.`);
5407
6231
  const svcCount = Object.keys(config.services).length;
5408
6232
  const dbCount = Object.keys(config.databases ?? {}).length;
@@ -5502,7 +6326,7 @@ async function pruneRemovedEntries(config, existingConfig, services, databases)
5502
6326
  //#region src/index.ts
5503
6327
  if (process.argv.includes("--mcp")) process.env.VELOZ_MCP = "true";
5504
6328
  const cli = Cli.create("veloz", {
5505
- version: "0.0.0-beta.17",
6329
+ version: "0.0.0-beta.18",
5506
6330
  description: "CLI da plataforma Veloz — deploy rápido para o Brasil",
5507
6331
  env: z.object({ VELOZ_ENV: z.string().optional().describe("Ambiente alvo (ex: preview, staging)") })
5508
6332
  });