onveloz 0.0.0-beta.16 → 0.0.0-beta.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.mjs +1396 -534
  2. package/package.json +3 -1
package/dist/index.mjs CHANGED
@@ -14,6 +14,8 @@ import { createInterface } from "node:readline";
14
14
  import ora from "ora";
15
15
  import { createAuthClient } from "better-auth/client";
16
16
  import { deviceAuthorizationClient } from "better-auth/client/plugins";
17
+ import net from "node:net";
18
+ import WebSocket from "ws";
17
19
  import { link, mkdir, mkdtemp, readdir, rm, stat } from "node:fs/promises";
18
20
  import ignore from "ignore";
19
21
  import tar from "tar";
@@ -51,6 +53,131 @@ const DATABASE_ENGINE_DEFAULTS = {
51
53
  defaultVersion: "7"
52
54
  }
53
55
  };
56
+ /** Default size tiers for disk-based engines (PostgreSQL, MySQL). */
57
+ const DISK_ENGINE_SIZES = {
58
+ basico: {
59
+ label: "Básico",
60
+ cpu: "250m",
61
+ memory: "256Mi",
62
+ cpuLabel: "0.25 vCPU",
63
+ memoryLabel: "256 MB"
64
+ },
65
+ essencial: {
66
+ label: "Essencial",
67
+ cpu: "500m",
68
+ memory: "512Mi",
69
+ cpuLabel: "0.5 vCPU",
70
+ memoryLabel: "512 MB"
71
+ },
72
+ turbo: {
73
+ label: "Turbo",
74
+ cpu: "1",
75
+ memory: "1Gi",
76
+ cpuLabel: "1 vCPU",
77
+ memoryLabel: "1 GB"
78
+ },
79
+ "turbo-plus": {
80
+ label: "Turbo Plus",
81
+ cpu: "1500m",
82
+ memory: "2Gi",
83
+ cpuLabel: "1.5 vCPU",
84
+ memoryLabel: "2 GB"
85
+ },
86
+ nitro: {
87
+ label: "Nitro",
88
+ cpu: "2",
89
+ memory: "4Gi",
90
+ cpuLabel: "2 vCPU",
91
+ memoryLabel: "4 GB"
92
+ },
93
+ "nitro-plus": {
94
+ label: "Nitro Plus",
95
+ cpu: "4",
96
+ memory: "8Gi",
97
+ cpuLabel: "4 vCPU",
98
+ memoryLabel: "8 GB"
99
+ }
100
+ };
101
+ /** Redis size tiers — memory-heavy since Redis is purely in-memory, CPU stays minimal. */
102
+ const REDIS_SIZES = {
103
+ basico: {
104
+ label: "Básico",
105
+ cpu: "150m",
106
+ memory: "128Mi",
107
+ cpuLabel: "0.15 vCPU",
108
+ memoryLabel: "128 MB"
109
+ },
110
+ essencial: {
111
+ label: "Essencial",
112
+ cpu: "250m",
113
+ memory: "256Mi",
114
+ cpuLabel: "0.25 vCPU",
115
+ memoryLabel: "256 MB"
116
+ },
117
+ turbo: {
118
+ label: "Turbo",
119
+ cpu: "250m",
120
+ memory: "512Mi",
121
+ cpuLabel: "0.25 vCPU",
122
+ memoryLabel: "512 MB"
123
+ },
124
+ "turbo-plus": {
125
+ label: "Turbo Plus",
126
+ cpu: "500m",
127
+ memory: "1Gi",
128
+ cpuLabel: "0.5 vCPU",
129
+ memoryLabel: "1 GB"
130
+ },
131
+ nitro: {
132
+ label: "Nitro",
133
+ cpu: "500m",
134
+ memory: "2Gi",
135
+ cpuLabel: "0.5 vCPU",
136
+ memoryLabel: "2 GB"
137
+ },
138
+ "nitro-plus": {
139
+ label: "Nitro Plus",
140
+ cpu: "1",
141
+ memory: "4Gi",
142
+ cpuLabel: "1 vCPU",
143
+ memoryLabel: "4 GB"
144
+ }
145
+ };
146
+ /** Per-engine size tier maps. */
147
+ const DATABASE_SIZES_BY_ENGINE = {
148
+ postgresql: DISK_ENGINE_SIZES,
149
+ mysql: DISK_ENGINE_SIZES,
150
+ redis: REDIS_SIZES
151
+ };
152
+ /**
153
+ * Flat union of all size keys across engines.
154
+ * Kept for backward compatibility (Zod schemas, Prisma column).
155
+ */
156
+ const DATABASE_SIZES = {
157
+ ...DISK_ENGINE_SIZES,
158
+ ...REDIS_SIZES
159
+ };
160
+ /** All unique size keys across all engines (for Zod validation). */
161
+ const DATABASE_SIZE_KEYS = [...new Set([...Object.keys(DISK_ENGINE_SIZES), ...Object.keys(REDIS_SIZES)])];
162
+ /** Get size tiers for a specific engine. */
163
+ function getDatabaseSizes(engine) {
164
+ return DATABASE_SIZES_BY_ENGINE[engine];
165
+ }
166
+ /** Get size option list for a specific engine (for UI selectors). */
167
+ function getDatabaseSizeOptions(engine) {
168
+ const sizes = getDatabaseSizes(engine);
169
+ return Object.entries(sizes).map(([key, tier]) => ({
170
+ value: key,
171
+ label: tier.label,
172
+ description: `${tier.cpuLabel} · ${tier.memoryLabel}`
173
+ }));
174
+ }
175
+ /** @deprecated Use getDatabaseSizeOptions(engine) instead. */
176
+ const DATABASE_SIZE_OPTIONS = getDatabaseSizeOptions("postgresql");
177
+ function resolveDatabaseSize(cpu, memory) {
178
+ for (const [key, tier] of Object.entries(DATABASE_SIZES)) if (tier.cpu === cpu && tier.memory === memory) return key;
179
+ return null;
180
+ }
54
181
 
55
182
  //#endregion
56
183
  //#region ../../packages/config/veloz-config.ts
@@ -63,17 +190,22 @@ const PackageManagerSchema = z$1.enum([
63
190
  "bun",
64
191
  "auto"
65
192
  ]);
193
+ const BuildMethodSchema = z$1.enum(["nixpacks", "dockerfile"]);
66
194
  const BuildConfigSchema = z$1.object({
195
+ method: BuildMethodSchema.default("nixpacks").optional(),
67
196
  command: z$1.string().nullable().optional(),
68
197
  nodeVersion: z$1.string().regex(/^[0-9]+(\.[0-9]+){0,2}(\.x)?$/).default("20").optional(),
69
198
  nixpkgsArchive: z$1.string().regex(/^[a-f0-9]{40}$/).optional(),
70
199
  packageManager: PackageManagerSchema.default("auto").optional(),
71
200
  installCommand: z$1.string().nullable().optional(),
72
201
  outputDir: z$1.string().nullable().optional(),
73
- aptPackages: z$1.array(z$1.string().regex(/^[a-z0-9][a-z0-9.+-]+$/, "Nome de pacote inválido")).optional()
202
+ aptPackages: z$1.array(z$1.string().regex(/^[a-z0-9][a-z0-9.+-]+$/, "Nome de pacote inválido")).optional(),
203
+ dockerfile: z$1.string().optional(),
204
+ context: z$1.string().optional()
74
205
  });
75
206
  const RuntimeConfigSchema = z$1.object({
76
207
  command: z$1.string().nullable().optional(),
208
+ preStartCommand: z$1.string().nullable().optional(),
77
209
  port: z$1.number().min(1).max(65535).default(3e3).optional(),
78
210
  fsGroup: z$1.number().int().min(0).max(65534).default(1001).optional(),
79
211
  healthCheck: z$1.object({
@@ -109,10 +241,6 @@ const VolumeConfigSchema = z$1.object({
109
241
  ].some((p) => value === p || value.startsWith(p + "/")), "Caminho de montagem não permitido por segurança"),
110
242
  sizeGb: z$1.number().int().min(10).max(100).optional().default(10)
111
243
  });
112
- const DatabaseResourcesSchema = z$1.object({
113
- cpu: z$1.string().regex(/^[0-9]+(\.[0-9]+)?|[0-9]+m$/).default("500m").optional(),
114
- memory: z$1.string().regex(/^[0-9]+(Mi|Gi)$/).default("512Mi").optional()
115
- });
116
244
  const PoolerConfigSchema = z$1.object({
117
245
  enabled: z$1.boolean().default(false),
118
246
  poolMode: z$1.enum([
@@ -123,6 +251,8 @@ const PoolerConfigSchema = z$1.object({
123
251
  defaultPoolSize: z$1.number().int().min(1).max(200).default(20).optional(),
124
252
  maxClientConn: z$1.number().int().min(1).max(1e4).default(100).optional()
125
253
  });
254
+ const LOWERCASE_SIZE_KEYS = DATABASE_SIZE_KEYS.map((k) => k);
255
+ const DatabaseSizeSchema = z$1.enum(LOWERCASE_SIZE_KEYS);
126
256
  const DatabaseConfigSchema = z$1.object({
127
257
  id: z$1.string().optional(),
128
258
  name: z$1.string().optional(),
@@ -132,13 +262,13 @@ const DatabaseConfigSchema = z$1.object({
132
262
  "redis"
133
263
  ]),
134
264
  version: z$1.string().optional(),
135
- storage: z$1.string().regex(/^[0-9]+(Gi)$/).default("10Gi").optional(),
136
- resources: DatabaseResourcesSchema.optional(),
265
+ storage: z$1.string().regex(/^[0-9]+(Gi)$/).refine((val) => Number.parseInt(val, 10) >= 10, "Tamanho mínimo de storage é 10Gi").optional(),
266
+ size: DatabaseSizeSchema.optional(),
137
267
  pooler: PoolerConfigSchema.optional(),
138
268
  fromTemplate: z$1.string().optional()
139
269
  });
140
270
  const ServiceConfigSchema = z$1.object({
141
- id: z$1.string(),
271
+ id: z$1.string().optional(),
142
272
  name: z$1.string(),
143
273
  type: ServiceTypeSchema.default("web"),
144
274
  root: z$1.string().default(".").optional(),
@@ -294,7 +424,10 @@ function loadConfig() {
294
424
  return config;
295
425
  }
296
426
  /**
297
- * Save the veloz.json config to project root
427
+ * Save the veloz.json config to project root.
428
+ *
429
+ * When possible, prefer `patchConfig` to avoid overwriting user-authored
430
+ * fields with Zod-applied defaults.
298
431
  */
299
432
  function saveConfig(config) {
300
433
  const path = getConfigPath();
@@ -305,6 +438,18 @@ function saveConfig(config) {
305
438
  writeFileSync(path, JSON.stringify(configWithSchema, null, 2), "utf-8");
306
439
  }
307
440
  /**
441
+ * Apply targeted patches to veloz.json without running Zod parse.
442
+ * Reads the raw JSON as VelozConfig (type-only cast, no transforms/defaults),
443
+ * applies the patch function, and writes back — preserving user-authored values.
444
+ */
445
+ function patchConfig(patchFn) {
446
+ const path = getConfigPath();
447
+ if (!existsSync(path)) throw new Error(`${getConfigFileName()} not found`);
448
+ const raw = JSON.parse(readFileSync(path, "utf-8"));
449
+ patchFn(raw);
450
+ writeFileSync(path, JSON.stringify(raw, null, 2), "utf-8");
451
+ }
452
+ /**
308
453
  * Require config to exist, throw if not found
309
454
  */
310
455
  function requireConfig() {
@@ -811,6 +956,10 @@ function throwNotFound(flag, entries) {
811
956
  * 3. Default from `veloz use` → remembered choice
812
957
  * 4. Interactive prompt → last resort
813
958
  */
959
+ function requireServiceId(service) {
960
+ if (!service.id) throw new Error(`Serviço "${service.name}" não possui ID. Execute 'veloz deploy' para vincular o serviço.`);
961
+ return service;
962
+ }
814
963
  async function resolveService(serviceFlag) {
815
964
  const config = requireConfig();
816
965
  const entries = Object.entries(config.services);
@@ -821,7 +970,7 @@ async function resolveService(serviceFlag) {
821
970
  const [key, service] = found;
822
971
  return {
823
972
  key,
824
- service: mergeServiceWithDefaults(service, config.defaults),
973
+ service: requireServiceId(mergeServiceWithDefaults(service, config.defaults)),
825
974
  config
826
975
  };
827
976
  }
@@ -829,7 +978,7 @@ async function resolveService(serviceFlag) {
829
978
  const [key, service] = entries[0];
830
979
  return {
831
980
  key,
832
- service: mergeServiceWithDefaults(service, config.defaults),
981
+ service: requireServiceId(mergeServiceWithDefaults(service, config.defaults)),
833
982
  config
834
983
  };
835
984
  }
@@ -838,7 +987,7 @@ async function resolveService(serviceFlag) {
838
987
  const service = config.services[defaultKey];
839
988
  return {
840
989
  key: defaultKey,
841
- service: mergeServiceWithDefaults(service, config.defaults),
990
+ service: requireServiceId(mergeServiceWithDefaults(service, config.defaults)),
842
991
  config
843
992
  };
844
993
  }
@@ -848,13 +997,12 @@ async function resolveService(serviceFlag) {
848
997
  })));
849
998
  return {
850
999
  key: selectedKey,
851
- service: mergeServiceWithDefaults(config.services[selectedKey], config.defaults),
1000
+ service: requireServiceId(mergeServiceWithDefaults(config.services[selectedKey], config.defaults)),
852
1001
  config
853
1002
  };
854
1003
  }
855
1004
  async function resolveServiceId(serviceFlag) {
856
1005
  const { service } = await resolveService(serviceFlag);
857
- if (!service.id) throw new Error(`Serviço "${service.name}" não possui ID. Execute 'veloz deploy' para vincular o serviço.`);
858
1006
  return service.id;
859
1007
  }
860
1008
  function resolveAllServices(serviceFlag) {
@@ -947,6 +1095,7 @@ envGroup.command("set", {
947
1095
  const serviceId = await resolveServiceId(c.options.service);
948
1096
  const client = await getClient();
949
1097
  const pares = c.args.pares.split(/\s+/);
1098
+ const allWarnings = [];
950
1099
  for (const par of pares) {
951
1100
  const eqIndex = par.indexOf("=");
952
1101
  if (eqIndex === -1) {
@@ -959,13 +1108,15 @@ envGroup.command("set", {
959
1108
  spin.stop();
960
1109
  throw new Error("Chave não pode estar vazia.");
961
1110
  }
962
- await client.envVars.set({
1111
+ const result = await client.envVars.set({
963
1112
  serviceId,
964
1113
  key,
965
1114
  value
966
1115
  });
1116
+ if (result.warnings) allWarnings.push(...result.warnings);
967
1117
  }
968
1118
  spin.stop();
1119
+ for (const w of allWarnings) warn(w);
969
1120
  if (pares.length === 1) {
970
1121
  const key = pares[0].slice(0, pares[0].indexOf("="));
971
1122
  success(`Variável ${chalk.bold(key)} definida com sucesso.`);
@@ -1061,11 +1212,12 @@ envGroup.command("import", {
1061
1212
  key: envVar.key
1062
1213
  });
1063
1214
  }
1064
- await client.envVars.setBulk({
1215
+ const result = await client.envVars.setBulk({
1065
1216
  serviceId,
1066
1217
  vars: envVars
1067
1218
  });
1068
1219
  spin.stop();
1220
+ if (result.warnings) for (const w of result.warnings) warn(w);
1069
1221
  success(`${varsCount} variável(is) importada(s) com sucesso!`);
1070
1222
  info("Faça um novo deploy para aplicar as alterações.");
1071
1223
  }
@@ -1225,9 +1377,11 @@ function updateServiceVolumesInConfig(serviceKey, updater) {
1225
1377
  if (!config) return false;
1226
1378
  const currentService = config.services[serviceKey];
1227
1379
  if (!currentService) return false;
1228
- currentService.volumes = updater([...currentService.volumes ?? []]);
1229
- config.updated = (/* @__PURE__ */ new Date()).toISOString();
1230
- saveConfig(config);
1380
+ const updatedVolumes = updater([...currentService.volumes ?? []]);
1381
+ patchConfig((raw) => {
1382
+ if (raw.services[serviceKey]) raw.services[serviceKey].volumes = updatedVolumes;
1383
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
1384
+ });
1231
1385
  return true;
1232
1386
  }
1233
1387
 
@@ -1454,6 +1608,7 @@ function printServiceConfig(service) {
1454
1608
  console.log(` ${chalk.bold("Root Dir:")} ${formatValue(service.rootDirectory || "/")}`);
1455
1609
  console.log(` ${chalk.bold("Build Command:")} ${formatValue(service.buildCommand)}`);
1456
1610
  console.log(` ${chalk.bold("Start Command:")} ${formatValue(service.startCommand)}`);
1611
+ console.log(` ${chalk.bold("Pre-Start Cmd:")} ${formatValue(service.preStartCommand)}`);
1457
1612
  console.log(` ${chalk.bold("Porta:")} ${formatValue(service.port)}`);
1458
1613
  console.log(` ${chalk.bold("Instâncias:")} ${formatValue(service.instanceCount)}`);
1459
1614
  console.log(` ${chalk.bold("CPU Limit:")} ${formatValue(service.cpuLimit)}`);
@@ -1471,6 +1626,7 @@ configGroup.command("show", {
1471
1626
  rootDirectory: z.string().nullable(),
1472
1627
  buildCommand: z.string().nullable(),
1473
1628
  startCommand: z.string().nullable(),
1629
+ preStartCommand: z.string().nullable(),
1474
1630
  port: z.number().nullable(),
1475
1631
  instanceCount: z.number().nullable(),
1476
1632
  cpuLimit: z.string().nullable(),
@@ -1499,6 +1655,7 @@ configGroup.command("show", {
1499
1655
  rootDirectory: svc.rootDirectory ?? null,
1500
1656
  buildCommand: svc.buildCommand ?? null,
1501
1657
  startCommand: svc.startCommand ?? null,
1658
+ preStartCommand: svc.preStartCommand ?? null,
1502
1659
  port: svc.port ?? null,
1503
1660
  instanceCount: svc.instanceCount ?? null,
1504
1661
  cpuLimit: svc.cpuLimit ?? null,
@@ -1513,6 +1670,7 @@ configGroup.command("set", {
1513
1670
  name: z.string().optional().describe("Nome do serviço"),
1514
1671
  build: z.string().optional().describe("Comando de build"),
1515
1672
  start: z.string().optional().describe("Comando de start"),
1673
+ preStart: z.string().optional().describe("Comando executado antes de iniciar o serviço (ex: migrations)"),
1516
1674
  port: z.string().optional().describe("Porta do serviço"),
1517
1675
  root: z.string().optional().describe("Diretório raiz"),
1518
1676
  instances: z.string().optional().describe("Número de instâncias"),
@@ -1536,6 +1694,7 @@ configGroup.command("set", {
1536
1694
  if (c.options.name) updates.name = c.options.name;
1537
1695
  if (c.options.build !== void 0) updates.buildCommand = c.options.build === "none" ? null : c.options.build;
1538
1696
  if (c.options.start !== void 0) updates.startCommand = c.options.start === "none" ? null : c.options.start;
1697
+ if (c.options.preStart !== void 0) updates.preStartCommand = c.options.preStart === "none" ? null : c.options.preStart;
1539
1698
  if (c.options.port) updates.port = parseInt(c.options.port, 10);
1540
1699
  if (c.options.root !== void 0) updates.rootDirectory = c.options.root === "/" ? null : c.options.root;
1541
1700
  if (c.options.instances) updates.instanceCount = parseInt(c.options.instances, 10);
@@ -1584,6 +1743,8 @@ configGroup.command("edit", {
1584
1743
  if (buildCmd) updates.buildCommand = buildCmd === "none" ? null : buildCmd;
1585
1744
  const startCmd = await prompt(`Start command ${chalk.dim(`(${svc.startCommand || "—"})`)}: `);
1586
1745
  if (startCmd) updates.startCommand = startCmd === "none" ? null : startCmd;
1746
+ const preStartCmd = await prompt(`Pre-start command ${chalk.dim(`(${svc.preStartCommand || "—"})`)}: `);
1747
+ if (preStartCmd) updates.preStartCommand = preStartCmd === "none" ? null : preStartCmd;
1587
1748
  const port = await prompt(`Porta ${chalk.dim(`(${svc.port})`)}: `);
1588
1749
  if (port) updates.port = parseInt(port, 10);
1589
1750
  const rootDir = await prompt(`Diretório raiz ${chalk.dim(`(${svc.rootDirectory || "/"})`)}: `);
@@ -1615,6 +1776,7 @@ configGroup.command("reset", {
1615
1776
  options: z.object({
1616
1777
  build: z.boolean().default(false).describe("Resetar comando de build"),
1617
1778
  start: z.boolean().default(false).describe("Resetar comando de start"),
1779
+ preStart: z.boolean().default(false).describe("Resetar comando de pre-start"),
1618
1780
  all: z.boolean().default(false).describe("Resetar todas as configurações opcionais"),
1619
1781
  service: z.string().optional().describe("Serviço alvo (chave ou nome)")
1620
1782
  }),
@@ -1625,13 +1787,15 @@ configGroup.command("reset", {
1625
1787
  if (c.options.all) {
1626
1788
  updates.buildCommand = null;
1627
1789
  updates.startCommand = null;
1790
+ updates.preStartCommand = null;
1628
1791
  updates.rootDirectory = null;
1629
1792
  } else {
1630
1793
  if (c.options.build) updates.buildCommand = null;
1631
1794
  if (c.options.start) updates.startCommand = null;
1795
+ if (c.options.preStart) updates.preStartCommand = null;
1632
1796
  }
1633
1797
  if (Object.keys(updates).length === 0) {
1634
- warn("Especifique o que resetar: --build, --start, ou --all");
1798
+ warn("Especifique o que resetar: --build, --start, --pre-start, ou --all");
1635
1799
  return;
1636
1800
  }
1637
1801
  await withSpinner({
@@ -1865,8 +2029,7 @@ dbGroup.command("create", {
1865
2029
  engine: z.string().optional().describe("Engine (postgresql, mysql, redis)"),
1866
2030
  engineVersion: z.string().optional().describe("Versão do engine"),
1867
2031
  storage: z.string().optional().describe("Armazenamento (ex: 10Gi, 20Gi)"),
1868
- cpu: z.string().optional().describe("Limite de CPU (ex: 500m, 1)"),
1869
- memory: z.string().optional().describe("Limite de memória (ex: 512Mi, 1Gi)"),
2032
+ size: z.string().optional().describe("Tier de recursos (basico, essencial, turbo, turbo-plus, nitro, nitro-plus)"),
1870
2033
  pooler: z.boolean().optional().describe("Habilitar PgBouncer (apenas PostgreSQL)")
1871
2034
  }),
1872
2035
  async run(c) {
@@ -1917,30 +2080,25 @@ dbGroup.command("create", {
1917
2080
  engine: validEngine,
1918
2081
  engineVersion: version,
1919
2082
  storage,
1920
- cpuLimit: c.options.cpu,
1921
- memoryLimit: c.options.memory,
2083
+ size: c.options.size,
1922
2084
  poolerEnabled: c.options.pooler
1923
2085
  })
1924
2086
  });
1925
2087
  success(`Banco de dados ${chalk.bold(db.name)} criado! Provisionando...`);
1926
2088
  info("Use 'veloz db credentials " + db.name + "' para ver as credenciais quando estiver pronto.");
1927
- const config = loadConfig();
1928
- if (config) {
1929
- const updatedDatabases = { ...config.databases };
1930
- updatedDatabases[name] = {
1931
- id: db.id,
1932
- engine: validEngine,
1933
- version: version ?? void 0,
1934
- storage: storage ?? void 0,
1935
- ...c.options.cpu || c.options.memory ? { resources: {
1936
- ...c.options.cpu && { cpu: c.options.cpu },
1937
- ...c.options.memory && { memory: c.options.memory }
1938
- } } : {},
1939
- ...c.options.pooler ? { pooler: { enabled: true } } : {}
1940
- };
1941
- config.databases = updatedDatabases;
1942
- config.updated = (/* @__PURE__ */ new Date()).toISOString();
1943
- saveConfig(config);
2089
+ if (loadConfig()) {
2090
+ patchConfig((raw) => {
2091
+ raw.databases ??= {};
2092
+ raw.databases[name] = {
2093
+ id: db.id,
2094
+ engine: validEngine,
2095
+ ...version ? { version } : {},
2096
+ ...storage ? { storage } : {},
2097
+ ...c.options.size ? { size: c.options.size } : {},
2098
+ ...c.options.pooler ? { pooler: { enabled: true } } : {}
2099
+ };
2100
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
2101
+ });
1944
2102
  info(`Adicionado ao ${getConfigFileName()}.`);
1945
2103
  }
1946
2104
  }
@@ -2006,10 +2164,13 @@ dbGroup.command("delete", {
2006
2164
  if (config?.databases) {
2007
2165
  const key = Object.entries(config.databases).find(([k, d]) => d.id === db.id || k === db.name)?.[0];
2008
2166
  if (key) {
2009
- const { [key]: _, ...rest } = config.databases;
2010
- config.databases = Object.keys(rest).length > 0 ? rest : void 0;
2011
- config.updated = (/* @__PURE__ */ new Date()).toISOString();
2012
- saveConfig(config);
2167
+ patchConfig((raw) => {
2168
+ if (raw.databases) {
2169
+ delete raw.databases[key];
2170
+ if (Object.keys(raw.databases).length === 0) raw.databases = void 0;
2171
+ }
2172
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
2173
+ });
2013
2174
  info(`Removido do ${getConfigFileName()}.`);
2014
2175
  }
2015
2176
  }
@@ -2029,6 +2190,291 @@ dbGroup.command("restart", {
2029
2190
  success(`Banco de dados ${chalk.bold(db.name)} reiniciado com sucesso.`);
2030
2191
  }
2031
2192
  });
2193
+ dbGroup.command("update", {
2194
+ description: "Atualizar configurações de um banco de dados",
2195
+ middleware: [requireAuth],
2196
+ args: z.object({ name: z.string().describe("Nome ou ID do banco de dados") }),
2197
+ options: z.object({
2198
+ size: z.string().optional().describe("Tier de recursos (basico, essencial, turbo, turbo-plus, nitro, nitro-plus)"),
2199
+ storage: z.string().optional().describe("Armazenamento (ex: 20Gi, 50Gi)"),
2200
+ pooler: z.boolean().optional().describe("Habilitar/desabilitar PgBouncer"),
2201
+ poolMode: z.string().optional().describe("Modo do pooler (transaction, session, statement)"),
2202
+ poolSize: z.number().optional().describe("Pool size padrão (1-200)"),
2203
+ maxConnections: z.number().optional().describe("Máximo de conexões do cliente (1-10000)")
2204
+ }),
2205
+ async run(c) {
2206
+ const db = await resolveDatabaseByName(getProjectId$1(), c.args.name);
2207
+ const client = await getClient();
2208
+ const hasFlags = c.options.size !== void 0 || c.options.storage !== void 0 || c.options.pooler !== void 0 || c.options.poolMode !== void 0 || c.options.poolSize !== void 0 || c.options.maxConnections !== void 0;
2209
+ let size = c.options.size;
2210
+ let storage = c.options.storage;
2211
+ let poolerEnabled = c.options.pooler;
2212
+ let poolMode = c.options.poolMode;
2213
+ let poolSize = c.options.poolSize;
2214
+ let maxConnections = c.options.maxConnections;
2215
+ if (!hasFlags && isInteractive()) {
2216
+ const engineSizes = getDatabaseSizes(db.engine ?? "postgresql");
2217
+ const engineSizeKeys = Object.keys(engineSizes);
2218
+ const currentSizeLabel = db.size ? engineSizes[db.size]?.label ?? db.size : "—";
2219
+ console.log(`\n ${chalk.dim("Tier atual:")} ${currentSizeLabel}`);
2220
+ if (await promptConfirm("Alterar tier de recursos?", false)) size = await promptSelect("Novo tier:", engineSizeKeys.map((k) => {
2221
+ const s = engineSizes[k];
2222
+ const current = k === db.size ? " (atual)" : "";
2223
+ return {
2224
+ label: `${s.label} — ${s.cpuLabel}, ${s.memoryLabel}${current}`,
2225
+ value: k
2226
+ };
2227
+ }));
2228
+ const currentStorageGb = db.storageGb;
2229
+ console.log(` ${chalk.dim("Storage atual:")} ${currentStorageGb} GB`);
2230
+ if (await promptConfirm("Alterar armazenamento?", false)) {
2231
+ const storageOptions = STORAGE_OPTIONS.filter((s) => parseInt(s.replace("Gi", ""), 10) > currentStorageGb);
2232
+ if (storageOptions.length === 0) info("Já está no armazenamento máximo disponível.");
2233
+ else storage = await promptSelect("Novo armazenamento:", storageOptions.map((s) => ({
2234
+ label: formatStorageLabel(s),
2235
+ value: s
2236
+ })));
2237
+ }
2238
+ if (db.engine === "postgresql") {
2239
+ const poolerStatus = db.poolerEnabled ? "ativado" : "desativado";
2240
+ console.log(` ${chalk.dim("Pooler:")} ${poolerStatus}`);
2241
+ if (await promptConfirm("Alterar configuração do pooler?", false)) {
2242
+ poolerEnabled = await promptConfirm("Habilitar PgBouncer?", db.poolerEnabled);
2243
+ if (poolerEnabled) poolMode = await promptSelect("Modo do pool:", [
2244
+ {
2245
+ label: "Transaction (padrão)",
2246
+ value: "transaction"
2247
+ },
2248
+ {
2249
+ label: "Session",
2250
+ value: "session"
2251
+ },
2252
+ {
2253
+ label: "Statement",
2254
+ value: "statement"
2255
+ }
2256
+ ]);
2257
+ }
2258
+ }
2259
+ }
2260
+ const validEngineSizes = getDatabaseSizes(db.engine ?? "postgresql");
2261
+ const validSizeKeys = Object.keys(validEngineSizes);
2262
+ if (size && !validSizeKeys.includes(size)) throw new Error(`Tier inválido: ${size}. Opções para ${db.engine}: ${validSizeKeys.join(", ")}`);
2263
+ let updated = false;
2264
+ if (size && size !== db.size) if (db.databaseStatus !== "LIVE") warn(`Banco de dados não está LIVE — não é possível alterar recursos agora.`);
2265
+ else {
2266
+ const sizeConfig = validEngineSizes[size];
2267
+ await withSpinner({
2268
+ text: `Atualizando tier para ${sizeConfig.label} (${sizeConfig.cpuLabel}, ${sizeConfig.memoryLabel})...`,
2269
+ fn: () => client.databases.updateResources({
2270
+ serviceId: db.id,
2271
+ size
2272
+ })
2273
+ });
2274
+ success(`Tier atualizado para ${sizeConfig.label}.`);
2275
+ updated = true;
2276
+ }
2277
+ if (storage) {
2278
+ const desiredGb = parseInt(storage.replace("Gi", ""), 10);
2279
+ if (isNaN(desiredGb) || desiredGb <= 0) throw new Error(`Storage inválido: ${storage}. Use formato como "20Gi".`);
2280
+ if (desiredGb <= db.storageGb) warn(`Storage solicitado (${desiredGb} GB) não é maior que o atual (${db.storageGb} GB). Redução não é suportada.`);
2281
+ else {
2282
+ const dataVolume = (await client.volumes.list({ serviceId: db.id })).find((v) => v.name === "data");
2283
+ if (!dataVolume) warn("Volume de dados não encontrado. O banco pode ainda estar sendo provisionado.");
2284
+ else {
2285
+ await withSpinner({
2286
+ text: `Redimensionando armazenamento de ${db.storageGb} GB para ${desiredGb} GB...`,
2287
+ fn: () => client.volumes.update({
2288
+ volumeId: dataVolume.id,
2289
+ sizeGb: desiredGb
2290
+ })
2291
+ });
2292
+ success(`Armazenamento atualizado para ${desiredGb} GB.`);
2293
+ updated = true;
2294
+ }
2295
+ }
2296
+ }
2297
+ if (poolerEnabled !== void 0 || poolMode || poolSize || maxConnections) if (db.engine !== "postgresql") warn(`Connection pooler só é suportado para PostgreSQL.`);
2298
+ else if (db.databaseStatus !== "LIVE") warn(`Banco de dados não está LIVE — não é possível alterar o pooler agora.`);
2299
+ else {
2300
+ await withSpinner({
2301
+ text: "Atualizando configuração do pooler...",
2302
+ fn: () => client.databases.updatePooler({
2303
+ serviceId: db.id,
2304
+ enabled: poolerEnabled ?? db.poolerEnabled,
2305
+ poolMode,
2306
+ defaultPoolSize: poolSize,
2307
+ maxClientConn: maxConnections
2308
+ })
2309
+ });
2310
+ success(`Pooler ${poolerEnabled ?? db.poolerEnabled ? "ativado" : "desativado"}.`);
2311
+ updated = true;
2312
+ }
2313
+ if (!updated) {
2314
+ info("Nenhuma alteração realizada.");
2315
+ return;
2316
+ }
2317
+ if (loadConfig()) {
2318
+ patchConfig((raw) => {
2319
+ raw.databases ??= {};
2320
+ const configKey = Object.entries(raw.databases).find(([k, d]) => d?.id === db.id || k === db.name)?.[0] ?? db.name;
2321
+ raw.databases[configKey] ??= { engine: db.engine };
2322
+ if (size) raw.databases[configKey].size = size;
2323
+ if (storage) raw.databases[configKey].storage = storage;
2324
+ if (poolerEnabled !== void 0 || poolMode || poolSize || maxConnections) {
2325
+ const existing = raw.databases[configKey].pooler ?? { enabled: false };
2326
+ raw.databases[configKey].pooler = {
2327
+ ...existing,
2328
+ ...poolerEnabled !== void 0 ? { enabled: poolerEnabled } : {},
2329
+ ...poolMode ? { poolMode } : {},
2330
+ ...poolSize ? { defaultPoolSize: poolSize } : {},
2331
+ ...maxConnections ? { maxClientConn: maxConnections } : {}
2332
+ };
2333
+ }
2334
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
2335
+ });
2336
+ info(`Atualizado no ${getConfigFileName()}.`);
2337
+ }
2338
+ }
2339
+ });
2340
+ dbGroup.command("query", {
2341
+ description: "Executar uma consulta no banco de dados",
2342
+ middleware: [requireAuth],
2343
+ args: z.object({ name: z.string().describe("Nome ou ID do banco de dados") }),
2344
+ options: z.object({ query: z.string().optional().describe("Consulta SQL ou comando Redis") }),
2345
+ alias: { query: "q" },
2346
+ async run(c) {
2347
+ const db = await resolveDatabaseByName(getProjectId$1(), c.args.name);
2348
+ let query = c.options.query;
2349
+ if (!query) {
2350
+ if (!isInteractive()) throw new Error("Use --query ou -q para fornecer a consulta em modo não-interativo.");
2351
+ query = await prompt(`Consulta ${ENGINE_DISPLAY[db.engine] ?? db.engine} (ex: ${db.engine === "redis" ? "GET chave" : "SELECT * FROM tabela LIMIT 10"}):`);
2352
+ if (!query) throw new Error("Consulta é obrigatória.");
2353
+ }
2354
+ const client = await getClient();
2355
+ console.log(chalk.dim(`\n Executando consulta em ${chalk.bold(db.name)}...\n`));
2356
+ const stream = await client.databases.executeQuery({
2357
+ serviceId: db.id,
2358
+ query
2359
+ });
2360
+ let hasOutput = false;
2361
+ for await (const event of stream) switch (event.type) {
2362
+ case "output":
2363
+ hasOutput = true;
2364
+ console.log(event.content);
2365
+ break;
2366
+ case "error":
2367
+ console.log(chalk.red(event.content));
2368
+ break;
2369
+ case "status":
2370
+ if (event.content === "Consulta finalizada.") {
2371
+ if (!hasOutput) info("Consulta executada sem retorno.");
2372
+ console.log();
2373
+ }
2374
+ break;
2375
+ }
2376
+ }
2377
+ });
2378
+ const ENGINE_DEFAULT_PORTS = {
2379
+ postgresql: 5432,
2380
+ mysql: 3306,
2381
+ redis: 6379
2382
+ };
2383
+ const ENGINE_CONNECT_HINTS = {
2384
+ postgresql: (p) => `psql -h localhost -p ${p}`,
2385
+ mysql: (p) => `mysql -h 127.0.0.1 -P ${p}`,
2386
+ redis: (p) => `redis-cli -p ${p}`
2387
+ };
2388
+ dbGroup.command("tunnel", {
2389
+ description: "Criar túnel local para um banco de dados",
2390
+ middleware: [requireAuth],
2391
+ args: z.object({ name: z.string().describe("Nome ou ID do banco de dados") }),
2392
+ options: z.object({ port: z.number().optional().describe("Porta local (padrão: porta do engine)") }),
2393
+ alias: { port: "p" },
2394
+ async run(c) {
2395
+ const db = await resolveDatabaseByName(getProjectId$1(), c.args.name);
2396
+ const client = await getClient();
2397
+ const localPort = c.options.port ?? ENGINE_DEFAULT_PORTS[db.engine] ?? 5432;
2398
+ const engineLabel = ENGINE_DISPLAY[db.engine] ?? db.engine;
2399
+ const creds = await withSpinner({
2400
+ text: "Carregando credenciais...",
2401
+ fn: () => client.databases.getCredentials({ serviceId: db.id })
2402
+ });
2403
+ let localUrl;
2404
+ if (db.engine === "redis") localUrl = creds.password ? `redis://:${creds.password}@127.0.0.1:${localPort}` : `redis://127.0.0.1:${localPort}`;
2405
+ else localUrl = `${db.engine === "mysql" ? "mysql" : "postgresql"}://${creds.username}:${creds.password}@127.0.0.1:${localPort}/${creds.database}`;
2406
+ const server = net.createServer(async (socket) => {
2407
+ let session;
2408
+ try {
2409
+ session = await client.databases.createTunnelSession({ serviceId: db.id });
2410
+ } catch (err) {
2411
+ const message = err instanceof Error ? err.message : "Erro desconhecido";
2412
+ console.log(chalk.red(` Falha ao criar sessão de túnel: ${message}`));
2413
+ socket.destroy();
2414
+ return;
2415
+ }
2416
+ const ws = new WebSocket(`${session.proxyUrl}/connect?token=${session.token}`);
2417
+ ws.binaryType = "nodebuffer";
2418
+ let wsReady = false;
2419
+ const pendingData = [];
2420
+ ws.on("open", () => {
2421
+ wsReady = true;
2422
+ for (const chunk of pendingData) ws.send(chunk);
2423
+ pendingData.length = 0;
2424
+ });
2425
+ ws.on("message", (data) => {
2426
+ if (!socket.destroyed) socket.write(data);
2427
+ });
2428
+ ws.on("close", () => {
2429
+ if (!socket.destroyed) socket.destroy();
2430
+ });
2431
+ ws.on("error", (err) => {
2432
+ console.log(chalk.red(` Erro no túnel: ${err.message}`));
2433
+ if (!socket.destroyed) socket.destroy();
2434
+ });
2435
+ socket.on("data", (data) => {
2436
+ if (wsReady) ws.send(data);
2437
+ else pendingData.push(data);
2438
+ });
2439
+ socket.on("close", () => {
2440
+ if (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING) ws.close();
2441
+ });
2442
+ socket.on("error", () => {
2443
+ if (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING) ws.close();
2444
+ });
2445
+ });
2446
+ server.on("error", (err) => {
2447
+ if (err.code === "EADDRINUSE") console.log(chalk.red(`\n Porta ${localPort} já está em uso. Use --port para escolher outra.\n`));
2448
+ else console.log(chalk.red(`\n Erro ao iniciar servidor local: ${err.message}\n`));
2449
+ process.exit(1);
2450
+ });
2451
+ server.listen(localPort, "127.0.0.1", () => {
2452
+ console.log();
2453
+ success(`Túnel ${engineLabel} ativo para ${chalk.bold(db.name)}`);
2454
+ console.log();
2455
+ console.log(` ${chalk.dim("Endereço local:")} ${chalk.bold(`127.0.0.1:${localPort}`)}`);
2456
+ console.log(` ${chalk.dim("URL de conexão:")} ${chalk.cyan(localUrl)}`);
2457
+ const hint = ENGINE_CONNECT_HINTS[db.engine];
2458
+ if (hint) console.log(` ${chalk.dim("Conectar com:")} ${chalk.cyan(hint(localPort))}`);
2459
+ console.log();
2460
+ console.log(chalk.dim(" Pressione Ctrl+C para encerrar o túnel."));
2461
+ console.log();
2462
+ });
2463
+ await new Promise((resolve$1) => {
2464
+ const shutdown = () => {
2465
+ console.log(chalk.dim("\n Encerrando túnel..."));
2466
+ server.close(() => {
2467
+ resolve$1();
2468
+ });
2469
+ setTimeout(() => {
2470
+ resolve$1();
2471
+ }, 3e3);
2472
+ };
2473
+ process.on("SIGINT", shutdown);
2474
+ process.on("SIGTERM", shutdown);
2475
+ });
2476
+ }
2477
+ });
2032
2478
 
2033
2479
  //#endregion
2034
2480
  //#region src/commands/template.ts
@@ -2109,7 +2555,7 @@ templateGroup.command("deploy", {
2109
2555
  projectName: c.options.name
2110
2556
  })
2111
2557
  });
2112
- success(`Template '${template.displayName}' implantado com sucesso!`);
2558
+ success(`Template '${template.displayName}' deploy concluído com sucesso!`);
2113
2559
  console.log(chalk.bold(`\n Projeto: ${result.projectName}`));
2114
2560
  console.log(chalk.dim(` Slug: ${result.projectSlug}`));
2115
2561
  console.log();
@@ -2247,7 +2693,7 @@ function registerLink(cli$1) {
2247
2693
  },
2248
2694
  services: services.map(([key, service]) => ({
2249
2695
  key,
2250
- id: service.id,
2696
+ id: service.id ?? "",
2251
2697
  name: service.name,
2252
2698
  type: service.type
2253
2699
  }))
@@ -2284,13 +2730,16 @@ function detectFramework(pkgJsonStr, pm) {
2284
2730
  ...pkg.dependencies,
2285
2731
  ...pkg.devDependencies
2286
2732
  };
2733
+ const scripts = pkg.scripts ?? {};
2287
2734
  const hasReact = !!allDeps["react"];
2735
+ const buildCmd = scripts.build ? pmRun(pm, "build") : null;
2736
+ const startCmd = scripts.start ? pmRun(pm, "start") : null;
2288
2737
  if (allDeps["next"]) return {
2289
2738
  name: "nextjs",
2290
2739
  label: "Next.js",
2291
2740
  type: "WEB",
2292
- buildCommand: pmRun(pm, "build"),
2293
- startCommand: pmRun(pm, "start"),
2741
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2742
+ startCommand: startCmd ?? pmRun(pm, "start"),
2294
2743
  outputDir: ".next",
2295
2744
  port: 3e3
2296
2745
  };
@@ -2298,8 +2747,8 @@ function detectFramework(pkgJsonStr, pm) {
2298
2747
  name: "nuxt",
2299
2748
  label: "Nuxt",
2300
2749
  type: "WEB",
2301
- buildCommand: pmRun(pm, "build"),
2302
- startCommand: pmRun(pm, "start"),
2750
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2751
+ startCommand: startCmd ?? pmRun(pm, "start"),
2303
2752
  outputDir: ".output",
2304
2753
  port: 3e3
2305
2754
  };
@@ -2307,8 +2756,8 @@ function detectFramework(pkgJsonStr, pm) {
2307
2756
  name: "remix",
2308
2757
  label: "Remix",
2309
2758
  type: "WEB",
2310
- buildCommand: pmRun(pm, "build"),
2311
- startCommand: pmRun(pm, "start"),
2759
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2760
+ startCommand: startCmd ?? pmRun(pm, "start"),
2312
2761
  outputDir: "build",
2313
2762
  port: 3e3
2314
2763
  };
@@ -2316,7 +2765,7 @@ function detectFramework(pkgJsonStr, pm) {
2316
2765
  name: "astro",
2317
2766
  label: "Astro",
2318
2767
  type: "STATIC",
2319
- buildCommand: pmRun(pm, "build"),
2768
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2320
2769
  startCommand: null,
2321
2770
  outputDir: "dist",
2322
2771
  port: 3e3
@@ -2325,8 +2774,8 @@ function detectFramework(pkgJsonStr, pm) {
2325
2774
  name: "sveltekit",
2326
2775
  label: "SvelteKit",
2327
2776
  type: "WEB",
2328
- buildCommand: pmRun(pm, "build"),
2329
- startCommand: pmRun(pm, "preview"),
2777
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2778
+ startCommand: scripts.preview ? pmRun(pm, "preview") : startCmd ?? pmRun(pm, "start"),
2330
2779
  outputDir: "build",
2331
2780
  port: 3e3
2332
2781
  };
@@ -2334,7 +2783,7 @@ function detectFramework(pkgJsonStr, pm) {
2334
2783
  name: "gatsby",
2335
2784
  label: "Gatsby",
2336
2785
  type: "STATIC",
2337
- buildCommand: pmRun(pm, "build"),
2786
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2338
2787
  startCommand: null,
2339
2788
  outputDir: "public",
2340
2789
  port: 3e3
@@ -2343,8 +2792,8 @@ function detectFramework(pkgJsonStr, pm) {
2343
2792
  name: "angular",
2344
2793
  label: "Angular",
2345
2794
  type: "WEB",
2346
- buildCommand: pmRun(pm, "build"),
2347
- startCommand: pmRun(pm, "start"),
2795
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2796
+ startCommand: startCmd ?? pmRun(pm, "start"),
2348
2797
  outputDir: "dist",
2349
2798
  port: 4200
2350
2799
  };
@@ -2352,8 +2801,8 @@ function detectFramework(pkgJsonStr, pm) {
2352
2801
  name: "hono",
2353
2802
  label: "Hono",
2354
2803
  type: "WEB",
2355
- buildCommand: pmRun(pm, "build"),
2356
- startCommand: pmRun(pm, "start"),
2804
+ buildCommand: buildCmd,
2805
+ startCommand: startCmd,
2357
2806
  outputDir: null,
2358
2807
  port: 3e3
2359
2808
  };
@@ -2361,8 +2810,8 @@ function detectFramework(pkgJsonStr, pm) {
2361
2810
  name: "express",
2362
2811
  label: "Express",
2363
2812
  type: "WEB",
2364
- buildCommand: pmRun(pm, "build"),
2365
- startCommand: pmRun(pm, "start"),
2813
+ buildCommand: buildCmd,
2814
+ startCommand: startCmd,
2366
2815
  outputDir: null,
2367
2816
  port: 3e3
2368
2817
  };
@@ -2370,8 +2819,8 @@ function detectFramework(pkgJsonStr, pm) {
2370
2819
  name: "fastify",
2371
2820
  label: "Fastify",
2372
2821
  type: "WEB",
2373
- buildCommand: pmRun(pm, "build"),
2374
- startCommand: pmRun(pm, "start"),
2822
+ buildCommand: buildCmd,
2823
+ startCommand: startCmd,
2375
2824
  outputDir: null,
2376
2825
  port: 3e3
2377
2826
  };
@@ -2379,8 +2828,8 @@ function detectFramework(pkgJsonStr, pm) {
2379
2828
  name: "nestjs",
2380
2829
  label: "NestJS",
2381
2830
  type: "WEB",
2382
- buildCommand: pmRun(pm, "build"),
2383
- startCommand: pmRun(pm, "start:prod"),
2831
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2832
+ startCommand: scripts["start:prod"] ? pmRun(pm, "start:prod") : startCmd ?? pmRun(pm, "start:prod"),
2384
2833
  outputDir: "dist",
2385
2834
  port: 3e3
2386
2835
  };
@@ -2388,7 +2837,7 @@ function detectFramework(pkgJsonStr, pm) {
2388
2837
  name: hasReact ? "vite-react" : "vite",
2389
2838
  label: hasReact ? "Vite + React" : "Vite",
2390
2839
  type: "STATIC",
2391
- buildCommand: pmRun(pm, "build"),
2840
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2392
2841
  startCommand: null,
2393
2842
  outputDir: "dist",
2394
2843
  port: 3e3
@@ -2397,17 +2846,17 @@ function detectFramework(pkgJsonStr, pm) {
2397
2846
  name: "cra",
2398
2847
  label: "Create React App",
2399
2848
  type: "STATIC",
2400
- buildCommand: pmRun(pm, "build"),
2849
+ buildCommand: buildCmd ?? pmRun(pm, "build"),
2401
2850
  startCommand: null,
2402
2851
  outputDir: "build",
2403
2852
  port: 3e3
2404
2853
  };
2405
- if (pkg.scripts?.build || pkg.scripts?.start) return {
2854
+ if (buildCmd || startCmd) return {
2406
2855
  name: "node",
2407
2856
  label: "Node.js",
2408
2857
  type: "WEB",
2409
- buildCommand: pkg.scripts?.build ? pmRun(pm, "build") : "",
2410
- startCommand: pkg.scripts?.start ? pmRun(pm, "start") : "node index.js",
2858
+ buildCommand: buildCmd ?? "",
2859
+ startCommand: startCmd,
2411
2860
  outputDir: "dist",
2412
2861
  port: 3e3
2413
2862
  };
@@ -2549,6 +2998,104 @@ async function getFilesToUpload(directory) {
2549
2998
  await walk(directory);
2550
2999
  return files;
2551
3000
  }
3001
+ /**
3002
+ * Create a base tarball from the project directory (no extra files).
3003
+ * This is the expensive operation (file walk + gzip) that should happen once.
3004
+ */
3005
+ async function createBaseTarball(directory) {
3006
+ const tempDir = await mkdtemp(join(tmpdir(), "veloz-base-"));
3007
+ const tarPath = join(tempDir, "source.tar.gz");
3008
+ try {
3009
+ const files = await getFilesToUpload(directory);
3010
+ if (files.length === 0) throw new Error("No files to upload");
3011
+ const relativePaths = files.map((f) => relative(directory, f));
3012
+ await tar.create({
3013
+ gzip: true,
3014
+ file: tarPath,
3015
+ cwd: directory
3016
+ }, relativePaths);
3017
+ return {
3018
+ tarPath,
3019
+ tempDir,
3020
+ relativePaths,
3021
+ directory
3022
+ };
3023
+ } catch (err) {
3024
+ await rm(tempDir, {
3025
+ recursive: true,
3026
+ force: true
3027
+ }).catch(() => {});
3028
+ throw err;
3029
+ }
3030
+ }
3031
+ /**
3032
+ * Create a service-specific tarball. If extraFiles need injection, creates
3033
+ * an overlay tarball using hardlinks from the base. Otherwise, returns the
3034
+ * base tarball path directly.
3035
+ */
3036
+ async function createServiceTarball(base, extraFiles) {
3037
+ const injectedFiles = [];
3038
+ if (extraFiles) for (const file of extraFiles) {
3039
+ if (existsSync(join(base.directory, file.name))) continue;
3040
+ injectedFiles.push(file);
3041
+ }
3042
+ if (injectedFiles.length === 0) return {
3043
+ tarPath: base.tarPath,
3044
+ tempDir: null
3045
+ };
3046
+ const tempDir = await mkdtemp(join(tmpdir(), "veloz-overlay-"));
3047
+ const tarPath = join(tempDir, "source.tar.gz");
3048
+ const stagingDir = join(tempDir, "staging");
3049
+ await mkdir(stagingDir, { recursive: true });
3050
+ try {
3051
+ const relativePaths = [...base.relativePaths];
3052
+ for (const rel of relativePaths) {
3053
+ const src = join(base.directory, rel);
3054
+ const dest = join(stagingDir, rel);
3055
+ await mkdir(dirname(dest), { recursive: true });
3056
+ await link(src, dest);
3057
+ }
3058
+ for (const file of injectedFiles) {
3059
+ writeFileSync(join(stagingDir, file.name), file.content);
3060
+ if (!relativePaths.includes(file.name)) relativePaths.push(file.name);
3061
+ }
3062
+ await tar.create({
3063
+ gzip: true,
3064
+ file: tarPath,
3065
+ cwd: stagingDir
3066
+ }, relativePaths);
3067
+ return {
3068
+ tarPath,
3069
+ tempDir
3070
+ };
3071
+ } catch (err) {
3072
+ await rm(tempDir, {
3073
+ recursive: true,
3074
+ force: true
3075
+ }).catch(() => {});
3076
+ throw err;
3077
+ }
3078
+ }
3079
+ /** Upload a pre-built tarball for a deployment. */
3080
+ async function uploadTarball(deploymentId, tarPath) {
3081
+ const client = await getClient();
3082
+ const { uploadUrl } = await client.deployments.getUploadUrl({ deploymentId });
3083
+ const fileBuffer = readFileSync(tarPath);
3084
+ const putResponse = await fetch(uploadUrl, {
3085
+ method: "PUT",
3086
+ headers: { "Content-Type": "application/gzip" },
3087
+ body: fileBuffer
3088
+ });
3089
+ if (!putResponse.ok) throw new Error(`Upload falhou: ${putResponse.status}`);
3090
+ await client.deployments.startBuild({ deploymentId });
3091
+ }
3092
+ /** Clean up shared tarball temp directory. */
3093
+ async function cleanupTarball(base) {
3094
+ await rm(base.tempDir, {
3095
+ recursive: true,
3096
+ force: true
3097
+ }).catch(() => {});
3098
+ }
2552
3099
  async function createTarball(directory, extraFiles) {
2553
3100
  const tempDir = await mkdtemp(join(tmpdir(), "veloz-upload-"));
2554
3101
  const tarPath = join(tempDir, "source.tar.gz");
@@ -2628,36 +3175,13 @@ async function calculateDirectorySize(directory) {
2628
3175
  return totalSize;
2629
3176
  }
2630
3177
 
2631
- //#endregion
2632
- //#region src/lib/retry.ts
2633
- async function withRetry(fn, maxRetries = 3) {
2634
- for (let attempt = 0; attempt <= maxRetries; attempt++) try {
2635
- return await fn();
2636
- } catch (error) {
2637
- if (attempt >= maxRetries) throw error;
2638
- const rateLimit = isRateLimitError(error);
2639
- if (rateLimit) {
2640
- const waitMs = Math.min(rateLimit.retryAfterMs, 3e4);
2641
- await new Promise((r) => {
2642
- setTimeout(r, waitMs);
2643
- });
2644
- } else {
2645
- const delay = Math.min(1e3 * Math.pow(2, attempt), 1e4);
2646
- await new Promise((r) => {
2647
- setTimeout(r, delay);
2648
- });
2649
- }
2650
- }
2651
- throw new Error("Max retries exceeded");
2652
- }
2653
-
2654
3178
  //#endregion
2655
3179
  //#region src/lib/deploy-constants.ts
2656
3180
  const statusLabels = {
2657
3181
  QUEUED: "Na fila",
2658
3182
  BUILDING: "Compilando",
2659
- BUILD_FAILED: "Falha na construção",
2660
- DEPLOYING: "Implantando",
3183
+ BUILD_FAILED: "Falha na compilação",
3184
+ DEPLOYING: "Realizando deploy",
2661
3185
  LIVE: "Ativo",
2662
3186
  FAILED: "Falhou",
2663
3187
  CANCELLED: "Cancelado"
@@ -2693,33 +3217,7 @@ const TERMINAL_STATUSES = new Set([
2693
3217
  ]);
2694
3218
 
2695
3219
  //#endregion
2696
- //#region src/lib/deploy-cancel.ts
2697
- const activeDeploymentIds = /* @__PURE__ */ new Set();
2698
- let sigintHandlerRegistered = false;
2699
- function trackDeployment(deploymentId) {
2700
- activeDeploymentIds.add(deploymentId);
2701
- }
2702
- function untrackDeployment(deploymentId) {
2703
- activeDeploymentIds.delete(deploymentId);
2704
- }
2705
- function setupSigintHandler() {
2706
- if (sigintHandlerRegistered) return;
2707
- sigintHandlerRegistered = true;
2708
- process.on("SIGINT", async () => {
2709
- if (activeDeploymentIds.size === 0) process.exit(130);
2710
- if (process.stdout.isTTY) console.log(chalk.yellow("\n\nCancelando deploy(s)..."));
2711
- try {
2712
- const client = await getClient();
2713
- const cancelPromises = Array.from(activeDeploymentIds).map((deploymentId) => client.deployments.cancel({ deploymentId }).catch(() => {}));
2714
- await Promise.all(cancelPromises);
2715
- if (process.stdout.isTTY) console.log(chalk.yellow("Deploy cancelado."));
2716
- } catch {}
2717
- process.exit(130);
2718
- });
2719
- }
2720
-
2721
- //#endregion
2722
- //#region src/lib/deploy-parallel.ts
3220
+ //#region src/lib/deploy-stream.ts
2723
3221
  async function fetchDeployUrls$1(client, serviceId) {
2724
3222
  try {
2725
3223
  return (await client.domains.list({ serviceId })).map((d) => `https://${d.domain}`);
@@ -2743,256 +3241,6 @@ function getFailureHints$1(status) {
2743
3241
  default: return ["Execute 'veloz logs -f' para mais detalhes."];
2744
3242
  }
2745
3243
  }
2746
- function renderProgress(progressMap, prevLineCount) {
2747
- for (let i = 0; i < prevLineCount; i++) process.stdout.write("\x1B[1A\x1B[2K");
2748
- let lineCount = 0;
2749
- for (const [, progress] of progressMap) {
2750
- const icon = statusIcons[progress.status] || chalk.gray("○");
2751
- const label = statusLabels[progress.status] || progress.status;
2752
- process.stdout.write(`${icon} ${chalk.bold(progress.serviceName)}: ${label}\n`);
2753
- lineCount++;
2754
- if (progress.status === "BUILDING" || progress.status === "BUILD_FAILED") {
2755
- const nonEmptyLines = progress.logLines.filter((l) => l.trim());
2756
- if (nonEmptyLines.length > 0) {
2757
- const tail = nonEmptyLines.slice(-3);
2758
- for (const line of tail) {
2759
- process.stdout.write(` ${chalk.dim(line)}\n`);
2760
- lineCount++;
2761
- }
2762
- } else if (progress.status === "BUILDING") {
2763
- process.stdout.write(` ${chalk.dim("Aguardando logs do build...")}\n`);
2764
- lineCount++;
2765
- }
2766
- } else if (progress.status === "QUEUED") {
2767
- process.stdout.write(` ${chalk.dim("Na fila para construção...")}\n`);
2768
- lineCount++;
2769
- }
2770
- }
2771
- return lineCount;
2772
- }
2773
- async function deployServicesInParallel(services) {
2774
- const client = await getClient();
2775
- const mcp = isMcpMode();
2776
- const isTTY = !mcp && process.stdout.isTTY;
2777
- const isGHA = !mcp && process.env.GITHUB_ACTIONS === "true";
2778
- if (mcp) log(`[deploy] Iniciando deploy de ${services.length} serviço(s)`);
2779
- else if (isGHA) process.stdout.write(`Iniciando deploy de ${services.length} serviço(s)\n`);
2780
- else if (isTTY) console.log(chalk.cyan(`\nIniciando deploy de ${services.length} serviço(s)...\n`));
2781
- else process.stdout.write(`Iniciando deploy de ${services.length} serviço(s)\n`);
2782
- setupSigintHandler();
2783
- const progressMap = /* @__PURE__ */ new Map();
2784
- const projectRoot = process.cwd();
2785
- const sizeInBytes = await calculateDirectorySize(projectRoot);
2786
- const sizeMB = Math.round(sizeInBytes / (1024 * 1024) * 10) / 10;
2787
- const deploymentPromises = services.map(async (service) => {
2788
- try {
2789
- const deployment = await withRetry(() => client.deployments.create({
2790
- serviceId: service.serviceId,
2791
- serviceConfig: service.serviceConfig
2792
- }));
2793
- await withRetry(() => uploadSource(deployment.id, projectRoot, service.extraFiles));
2794
- trackDeployment(deployment.id);
2795
- progressMap.set(service.serviceId, {
2796
- serviceName: service.serviceName,
2797
- deploymentId: deployment.id,
2798
- status: "QUEUED",
2799
- logLines: [],
2800
- completed: false,
2801
- success: false
2802
- });
2803
- if (mcp) log(`[deploy] ✓ ${service.serviceName}: Upload concluído (${sizeMB} MB)`);
2804
- else if (isGHA) process.stdout.write(`✓ ${service.serviceName}: Upload concluído (${sizeMB} MB)\n`);
2805
- else if (isTTY) console.log(`${chalk.green("✓")} ${chalk.bold(service.serviceName)}: Upload concluído ${chalk.dim(`(${sizeMB} MB)`)}`);
2806
- else process.stdout.write(`✓ ${service.serviceName}: Upload concluído (${sizeMB} MB)\n`);
2807
- return {
2808
- service,
2809
- deploymentId: deployment.id
2810
- };
2811
- } catch (error) {
2812
- if (mcp) log(`[deploy] ✗ ${service.serviceName}: Falha ao iniciar deploy`);
2813
- else if (isGHA) process.stdout.write(`::error::${service.serviceName}: Falha ao iniciar deploy\n`);
2814
- else if (isTTY) console.log(`${chalk.red("✗")} ${chalk.bold(service.serviceName)}: Falha ao iniciar deploy`);
2815
- else process.stdout.write(`✗ ${service.serviceName}: Falha ao iniciar deploy\n`);
2816
- progressMap.set(service.serviceId, {
2817
- serviceName: service.serviceName,
2818
- deploymentId: "",
2819
- status: "FAILED",
2820
- logLines: [],
2821
- completed: true,
2822
- success: false
2823
- });
2824
- throw error;
2825
- }
2826
- });
2827
- const activeDeployments = (await Promise.allSettled(deploymentPromises)).filter((d) => d.status === "fulfilled").map((d) => d.value);
2828
- if (activeDeployments.length === 0) {
2829
- if (mcp) throw new Error("Todos os deploys falharam ao iniciar.");
2830
- else if (isGHA) process.stdout.write("::error::Todos os deploys falharam ao iniciar.\n");
2831
- else if (isTTY) console.error(chalk.red("\n✗ Todos os deploys falharam ao iniciar."));
2832
- else process.stdout.write("✗ Todos os deploys falharam ao iniciar.\n");
2833
- process.exit(1);
2834
- }
2835
- if (mcp) log(`[deploy] Monitorando ${activeDeployments.length} deploy(s)`);
2836
- else if (isGHA) process.stdout.write(`Monitorando ${activeDeployments.length} deploy(s)\n`);
2837
- else if (isTTY) {
2838
- console.log(chalk.cyan(`\nMonitorando progresso dos deploys:\n`));
2839
- console.log(chalk.dim("─".repeat(50)) + "\n");
2840
- } else process.stdout.write(`Monitorando ${activeDeployments.length} deploy(s)\n`);
2841
- let lineCount = 0;
2842
- if (isTTY) lineCount = renderProgress(progressMap, lineCount);
2843
- const streamPromises = activeDeployments.map(async ({ service, deploymentId }) => {
2844
- try {
2845
- await new Promise((resolve$1) => {
2846
- setTimeout(resolve$1, 1e3);
2847
- });
2848
- const stream = await client.logs.streamBuildLogs({ deploymentId });
2849
- for await (const event of stream) {
2850
- const progress = progressMap.get(service.serviceId);
2851
- if (!progress) continue;
2852
- if (event.type === "status") {
2853
- progress.status = event.content;
2854
- if (TERMINAL_STATUSES.has(event.content)) {
2855
- progress.completed = true;
2856
- progress.success = event.content === "LIVE";
2857
- }
2858
- if (mcp) {
2859
- const label = statusLabels[event.content] || event.content;
2860
- log(`[deploy] [${service.serviceName}] ${label}`);
2861
- } else if (!isTTY) {
2862
- const label = statusLabels[event.content] || event.content;
2863
- process.stdout.write(`[${service.serviceName}] ${label}\n`);
2864
- }
2865
- } else if (event.type === "log") {
2866
- const newLines = event.content.split("\n").filter((l) => l.trim());
2867
- progress.logLines.push(...newLines);
2868
- if (mcp) for (const line of newLines) log(`[build] [${service.serviceName}] ${line}`);
2869
- else if (!isTTY) for (const line of newLines) process.stdout.write(`[${service.serviceName}] ${line}\n`);
2870
- }
2871
- if (isTTY) lineCount = renderProgress(progressMap, lineCount);
2872
- }
2873
- } catch (error) {
2874
- const errorMessage = error instanceof Error ? error.message : String(error);
2875
- const progress = progressMap.get(service.serviceId);
2876
- if (progress && !progress.completed) {
2877
- if (mcp) log(`[deploy] ✗ Erro no streaming de logs para ${service.serviceName}: ${errorMessage}`);
2878
- else if (isGHA) process.stdout.write(`::error::Erro no streaming de logs para ${service.serviceName}: ${errorMessage}\n`);
2879
- else if (!isTTY) process.stdout.write(`✗ Erro no streaming de logs para ${service.serviceName}: ${errorMessage}\n`);
2880
- progress.status = "FAILED";
2881
- progress.completed = true;
2882
- if (isTTY) lineCount = renderProgress(progressMap, lineCount);
2883
- }
2884
- } finally {
2885
- untrackDeployment(deploymentId);
2886
- }
2887
- });
2888
- await Promise.all(streamPromises);
2889
- if (isTTY) renderProgress(progressMap, lineCount);
2890
- const successfulEntries = Array.from(progressMap.entries()).filter(([, p]) => p.success);
2891
- const failedEntries = Array.from(progressMap.entries()).filter(([, p]) => !p.success);
2892
- const urlMap = /* @__PURE__ */ new Map();
2893
- await Promise.all(successfulEntries.map(async ([serviceId]) => {
2894
- const urls = await fetchDeployUrls$1(client, serviceId);
2895
- if (urls.length > 0) urlMap.set(serviceId, urls);
2896
- }));
2897
- if (isTTY) console.log(chalk.dim("\n" + "─".repeat(50)));
2898
- if (successfulEntries.length > 0) if (mcp) {
2899
- log(`[deploy] ✓ ${successfulEntries.length} serviço(s) implantado(s) com sucesso`);
2900
- for (const [serviceId, progress] of successfulEntries) {
2901
- log(`[deploy] ✓ ${progress.serviceName}`);
2902
- for (const url of urlMap.get(serviceId) ?? []) log(`[deploy] ${url}`);
2903
- }
2904
- } else if (isGHA) {
2905
- process.stdout.write(`\n✓ ${successfulEntries.length} serviço(s) implantado(s) com sucesso\n`);
2906
- for (const [serviceId, progress] of successfulEntries) {
2907
- process.stdout.write(` ✓ ${progress.serviceName}\n`);
2908
- for (const url of urlMap.get(serviceId) ?? []) process.stdout.write(` ${url}\n`);
2909
- }
2910
- } else if (isTTY) {
2911
- console.log(chalk.green(`\n✓ ${successfulEntries.length} serviço(s) implantado(s) com sucesso:\n`));
2912
- for (const [serviceId, progress] of successfulEntries) {
2913
- console.log(` ${chalk.green("✓")} ${chalk.bold(progress.serviceName)}`);
2914
- for (const url of urlMap.get(serviceId) ?? []) console.log(` ${chalk.cyan(url)}`);
2915
- }
2916
- } else {
2917
- process.stdout.write(`\n${successfulEntries.length} serviço(s) implantado(s) com sucesso:\n`);
2918
- for (const [serviceId, progress] of successfulEntries) {
2919
- process.stdout.write(` ✓ ${progress.serviceName}\n`);
2920
- for (const url of urlMap.get(serviceId) ?? []) process.stdout.write(` ${url}\n`);
2921
- }
2922
- }
2923
- if (failedEntries.length > 0) if (mcp) {
2924
- log(`[deploy] ✗ ${failedEntries.length} serviço(s) falhou(aram)`);
2925
- for (const [, progress] of failedEntries) {
2926
- log(`[deploy] ✗ ${progress.serviceName} (${progress.status})`);
2927
- const hints = getFailureHints$1(progress.status);
2928
- for (const hint of hints) log(`[deploy] → ${hint}`);
2929
- }
2930
- } else if (isGHA) {
2931
- process.stdout.write(`\n✗ ${failedEntries.length} serviço(s) falhou(aram)\n`);
2932
- for (const [, progress] of failedEntries) {
2933
- process.stdout.write(`::error::${progress.serviceName} falhou (${progress.status})\n`);
2934
- const hints = getFailureHints$1(progress.status);
2935
- for (const hint of hints) process.stdout.write(` ${hint}\n`);
2936
- }
2937
- } else if (isTTY) {
2938
- console.log(chalk.red(`\n✗ ${failedEntries.length} serviço(s) falhou(aram):\n`));
2939
- for (const [, progress] of failedEntries) {
2940
- console.log(` ${chalk.red("✗")} ${chalk.bold(progress.serviceName)} ${chalk.dim(`(${progress.status})`)}`);
2941
- if (progress.logLines.length > 0) {
2942
- console.log(chalk.red(` ${"─".repeat(50)}`));
2943
- console.log(chalk.red.bold(" Logs de build:"));
2944
- console.log(chalk.red(` ${"─".repeat(50)}`));
2945
- for (const line of progress.logLines) if (line.trim()) console.log(` ${chalk.dim(line)}`);
2946
- console.log(chalk.red(` ${"─".repeat(50)}`));
2947
- }
2948
- const hints = getFailureHints$1(progress.status);
2949
- for (const hint of hints) console.log(chalk.yellow(` → ${hint}`));
2950
- }
2951
- } else {
2952
- process.stdout.write(`\n${failedEntries.length} serviço(s) falhou(aram):\n`);
2953
- for (const [, progress] of failedEntries) {
2954
- process.stdout.write(`\n ✗ ${progress.serviceName} (${progress.status})\n`);
2955
- const hints = getFailureHints$1(progress.status);
2956
- for (const hint of hints) process.stdout.write(` → ${hint}\n`);
2957
- }
2958
- }
2959
- if (successfulEntries.length > 0) info("\nUse 'veloz logs -f' para acompanhar os logs de execução.");
2960
- const results = [];
2961
- for (const [serviceId, progress] of progressMap) results.push({
2962
- status: progress.status,
2963
- logs: progress.logLines.filter((l) => l.trim()),
2964
- urls: urlMap.get(serviceId) ?? [],
2965
- serviceName: progress.serviceName
2966
- });
2967
- if (failedEntries.length > 0 && !mcp) process.exit(1);
2968
- return results;
2969
- }
2970
-
2971
- //#endregion
2972
- //#region src/lib/deploy-stream.ts
2973
- async function fetchDeployUrls(client, serviceId) {
2974
- try {
2975
- return (await client.domains.list({ serviceId })).map((d) => `https://${d.domain}`);
2976
- } catch {
2977
- return [];
2978
- }
2979
- }
2980
- function getFailureHints(status) {
2981
- switch (status) {
2982
- case "BUILD_FAILED": return [
2983
- "Verifique os logs de build acima para erros de compilação",
2984
- "Teste o build localmente: rode o comando de build do seu projeto",
2985
- "Use 'veloz config show' para verificar as configurações"
2986
- ];
2987
- case "DEPLOY_FAILED": return [
2988
- "O build passou mas o serviço falhou ao iniciar",
2989
- "Verifique se a porta configurada está correta: 'veloz config show'",
2990
- "Veja os logs de runtime: 'veloz logs -f'"
2991
- ];
2992
- case "CANCELLED": return ["Deploy cancelado. Execute 'veloz deploy' para tentar novamente."];
2993
- default: return ["Execute 'veloz logs -f' para mais detalhes."];
2994
- }
2995
- }
2996
3244
  /** Raw BuildKit line: `#N content` */
2997
3245
  const BUILDKIT_PREFIX_RE = /^#(\d+)\s+(.*)/;
2998
3246
  /** Docker build step: `[stage step/total] COMMAND` */
@@ -3374,7 +3622,7 @@ async function streamDeploymentLogs(deploymentId, serviceId, serviceName) {
3374
3622
  }
3375
3623
  if (isGHA) endGroup();
3376
3624
  if (renderer) renderer.stopSpinner();
3377
- const urls = finalStatus === "LIVE" ? await fetchDeployUrls(client, serviceId) : [];
3625
+ const urls = finalStatus === "LIVE" ? await fetchDeployUrls$1(client, serviceId) : [];
3378
3626
  if (finalStatus === "LIVE") {
3379
3627
  if (buildSpinner) {
3380
3628
  buildSpinner.stop();
@@ -3388,7 +3636,7 @@ async function streamDeploymentLogs(deploymentId, serviceId, serviceName) {
3388
3636
  buildSpinner = null;
3389
3637
  }
3390
3638
  const label = statusLabels[finalStatus] ?? finalStatus;
3391
- const hints = getFailureHints(finalStatus);
3639
+ const hints = getFailureHints$1(finalStatus);
3392
3640
  if (mcp) {
3393
3641
  log(`✗ Deploy finalizou: ${label}`);
3394
3642
  for (const hint of hints) log(` → ${hint}`);
@@ -3418,63 +3666,31 @@ async function streamDeploymentLogs(deploymentId, serviceId, serviceName) {
3418
3666
  serviceName
3419
3667
  };
3420
3668
  }
3421
- /**
3422
- * MCP streaming variant — yields progress events as an async generator.
3423
- * Each yield becomes an MCP `notifications/progress` message if the client
3424
- * provides a progressToken.
3425
- */
3426
- async function* streamDeploymentLogsMcp(deploymentId, serviceId, serviceName) {
3427
- const client = await getClient();
3428
- const allLogLines = [];
3429
- let finalStatus = "";
3430
- yield {
3431
- type: "status",
3432
- message: serviceName ? `Build: ${serviceName}` : "Build iniciando..."
3433
- };
3434
- try {
3435
- const stream = await client.logs.streamBuildLogs({ deploymentId });
3436
- for await (const event of stream) if (event.type === "status") {
3437
- const label = statusLabels[event.content] ?? event.content;
3438
- finalStatus = event.content;
3439
- yield {
3440
- type: "status",
3441
- message: label
3442
- };
3443
- } else if (event.type === "log") {
3444
- const lines = event.content.split("\n").filter((l) => l.trim());
3445
- allLogLines.push(...lines);
3446
- for (const line of lines) yield {
3447
- type: "log",
3448
- message: line
3449
- };
3450
- }
3451
- } catch {
3669
+
3670
+ //#endregion
3671
+ //#region src/lib/deploy-cancel.ts
3672
+ const activeDeploymentIds = /* @__PURE__ */ new Set();
3673
+ let sigintHandlerRegistered = false;
3674
+ function trackDeployment(deploymentId) {
3675
+ activeDeploymentIds.add(deploymentId);
3676
+ }
3677
+ function untrackDeployment(deploymentId) {
3678
+ activeDeploymentIds.delete(deploymentId);
3679
+ }
3680
+ function setupSigintHandler() {
3681
+ if (sigintHandlerRegistered) return;
3682
+ sigintHandlerRegistered = true;
3683
+ process.on("SIGINT", async () => {
3684
+ if (activeDeploymentIds.size === 0) process.exit(130);
3685
+ if (process.stdout.isTTY) console.log(chalk.yellow("\n\nCancelando deploy(s)..."));
3452
3686
  try {
3453
- finalStatus = (await client.deployments.get({ deploymentId })).status;
3454
- try {
3455
- const logs = await client.logs.getBuildLogs({ deploymentId });
3456
- if (logs.buildLogs) {
3457
- const lines = logs.buildLogs.split("\n").filter((l) => l.trim());
3458
- allLogLines.push(...lines);
3459
- for (const line of lines) yield {
3460
- type: "log",
3461
- message: line
3462
- };
3463
- }
3464
- } catch {}
3687
+ const client = await getClient();
3688
+ const cancelPromises = Array.from(activeDeploymentIds).map((deploymentId) => client.deployments.cancel({ deploymentId }).catch(() => {}));
3689
+ await Promise.all(cancelPromises);
3690
+ if (process.stdout.isTTY) console.log(chalk.yellow("Deploy cancelado."));
3465
3691
  } catch {}
3466
- }
3467
- const urls = finalStatus === "LIVE" ? await fetchDeployUrls(client, serviceId) : [];
3468
- yield {
3469
- type: "result",
3470
- message: finalStatus === "LIVE" ? "Deploy concluído" : `Deploy finalizou: ${finalStatus}`,
3471
- data: {
3472
- status: finalStatus,
3473
- logs: allLogLines,
3474
- urls,
3475
- serviceName
3476
- }
3477
- };
3692
+ process.exit(130);
3693
+ });
3478
3694
  }
3479
3695
 
3480
3696
  //#endregion
@@ -3486,7 +3702,7 @@ const LOGO_LINES = [
3486
3702
  ];
3487
3703
  const BRAND_COLOR = "#FF4D00";
3488
3704
  function getVersion() {
3489
- return "0.0.0-beta.16";
3705
+ return "0.0.0-beta.18";
3490
3706
  }
3491
3707
  function printBanner(subtitle) {
3492
3708
  const version = getVersion();
@@ -3508,29 +3724,527 @@ function printBanner(subtitle) {
3508
3724
  console.log();
3509
3725
  }
3510
3726
 
3727
+ //#endregion
3728
+ //#region src/lib/retry.ts
3729
+ async function withRetry(fn, maxRetries = 3) {
3730
+ for (let attempt = 0; attempt <= maxRetries; attempt++) try {
3731
+ return await fn();
3732
+ } catch (error) {
3733
+ if (attempt >= maxRetries) throw error;
3734
+ const rateLimit = isRateLimitError(error);
3735
+ if (rateLimit) {
3736
+ const waitMs = Math.min(rateLimit.retryAfterMs, 3e4);
3737
+ await new Promise((r) => {
3738
+ setTimeout(r, waitMs);
3739
+ });
3740
+ } else {
3741
+ const delay = Math.min(1e3 * Math.pow(2, attempt), 1e4);
3742
+ await new Promise((r) => {
3743
+ setTimeout(r, delay);
3744
+ });
3745
+ }
3746
+ }
3747
+ throw new Error("Max retries exceeded");
3748
+ }
3749
+
3750
+ //#endregion
3751
+ //#region src/lib/deploy-core.ts
3752
+ async function fetchDeployUrls(client, serviceId) {
3753
+ try {
3754
+ return (await client.domains.list({ serviceId })).map((d) => `https://${d.domain}`);
3755
+ } catch {
3756
+ return [];
3757
+ }
3758
+ }
3759
+ /**
3760
+ * Deploy a single service using the full streamDeploymentLogs experience
3761
+ * (BuildProgressRenderer with progress bars, spinner, runtime logs).
3762
+ */
3763
+ async function deploySingleService(service, options) {
3764
+ const { projectRoot } = options;
3765
+ const client = await getClient();
3766
+ const sizeInBytes = await calculateDirectorySize(projectRoot);
3767
+ const sizeMB = Math.round(sizeInBytes / (1024 * 1024) * 10) / 10;
3768
+ const baseTarball = await createBaseTarball(projectRoot);
3769
+ const deployment = await withRetry(() => client.deployments.create({
3770
+ serviceId: service.serviceId,
3771
+ serviceConfig: service.serviceConfig
3772
+ }));
3773
+ const { tarPath, tempDir: overlayTempDir } = await createServiceTarball(baseTarball, service.extraFiles);
3774
+ try {
3775
+ await withRetry(() => uploadTarball(deployment.id, tarPath));
3776
+ } finally {
3777
+ if (overlayTempDir) await rm(overlayTempDir, {
3778
+ recursive: true,
3779
+ force: true
3780
+ }).catch(() => {});
3781
+ await cleanupTarball(baseTarball);
3782
+ }
3783
+ if (sizeMB > 5) info(`Upload concluído (${sizeMB} MB)`);
3784
+ else success(`Upload concluído`);
3785
+ setupSigintHandler();
3786
+ trackDeployment(deployment.id);
3787
+ try {
3788
+ return [await streamDeploymentLogs(deployment.id, service.serviceId, service.serviceName)];
3789
+ } finally {
3790
+ untrackDeployment(deployment.id);
3791
+ }
3792
+ }
3793
+ /**
3794
+ * Deploy multiple services in parallel with progress tracking.
3795
+ */
3796
+ async function deployMultipleServices(services, options) {
3797
+ const { projectRoot, output } = options;
3798
+ const client = await getClient();
3799
+ const isTTY = process.stdout.isTTY && !process.env.GITHUB_ACTIONS;
3800
+ setupSigintHandler();
3801
+ const sizeInBytes = await calculateDirectorySize(projectRoot);
3802
+ const sizeMB = Math.round(sizeInBytes / (1024 * 1024) * 10) / 10;
3803
+ const baseTarball = await createBaseTarball(projectRoot);
3804
+ const progressMap = /* @__PURE__ */ new Map();
3805
+ const deploymentPromises = services.map(async (service) => {
3806
+ try {
3807
+ const deployment = await withRetry(() => client.deployments.create({
3808
+ serviceId: service.serviceId,
3809
+ serviceConfig: service.serviceConfig
3810
+ }));
3811
+ const { tarPath, tempDir: overlayTempDir } = await createServiceTarball(baseTarball, service.extraFiles);
3812
+ try {
3813
+ await withRetry(() => uploadTarball(deployment.id, tarPath));
3814
+ } finally {
3815
+ if (overlayTempDir) await rm(overlayTempDir, {
3816
+ recursive: true,
3817
+ force: true
3818
+ }).catch(() => {});
3819
+ }
3820
+ trackDeployment(deployment.id);
3821
+ progressMap.set(service.serviceId, {
3822
+ serviceName: service.serviceName,
3823
+ status: "QUEUED",
3824
+ logLines: [],
3825
+ completed: false,
3826
+ success: false
3827
+ });
3828
+ output.uploadComplete(service.serviceName, sizeMB);
3829
+ return {
3830
+ service,
3831
+ deploymentId: deployment.id
3832
+ };
3833
+ } catch (error) {
3834
+ output.uploadFailed(service.serviceName);
3835
+ progressMap.set(service.serviceId, {
3836
+ serviceName: service.serviceName,
3837
+ status: "FAILED",
3838
+ logLines: [],
3839
+ completed: true,
3840
+ success: false
3841
+ });
3842
+ throw error;
3843
+ }
3844
+ });
3845
+ const deployments = await Promise.allSettled(deploymentPromises);
3846
+ await cleanupTarball(baseTarball);
3847
+ const activeDeployments = deployments.filter((d) => d.status === "fulfilled").map((d) => d.value);
3848
+ if (activeDeployments.length === 0) {
3849
+ output.allUploadsFailed();
3850
+ process.exit(1);
3851
+ }
3852
+ output.monitoringStart(activeDeployments.length);
3853
+ if (isTTY) output.initProgress(progressMap);
3854
+ const streamPromises = activeDeployments.map(async ({ service, deploymentId }) => {
3855
+ try {
3856
+ await new Promise((resolve$1) => {
3857
+ setTimeout(resolve$1, 1e3);
3858
+ });
3859
+ const stream = await client.logs.streamBuildLogs({ deploymentId });
3860
+ for await (const event of stream) {
3861
+ const progress = progressMap.get(service.serviceId);
3862
+ if (!progress) continue;
3863
+ if (event.type === "status") {
3864
+ progress.status = event.content;
3865
+ if (TERMINAL_STATUSES.has(event.content)) {
3866
+ progress.completed = true;
3867
+ progress.success = event.content === "LIVE";
3868
+ }
3869
+ if (!isTTY) output.statusUpdate(service.serviceName, event.content);
3870
+ } else if (event.type === "log") {
3871
+ const newLines = event.content.split("\n").filter((l) => l.trim());
3872
+ progress.logLines.push(...newLines);
3873
+ if (!isTTY) for (const line of newLines) output.logLine(service.serviceName, line);
3874
+ }
3875
+ if (isTTY) output.renderProgress(progressMap);
3876
+ }
3877
+ } catch (error) {
3878
+ const errorMessage = error instanceof Error ? error.message : String(error);
3879
+ const progress = progressMap.get(service.serviceId);
3880
+ if (progress && !progress.completed) {
3881
+ output.streamError(service.serviceName, errorMessage);
3882
+ progress.status = "FAILED";
3883
+ progress.completed = true;
3884
+ if (isTTY) output.renderProgress(progressMap);
3885
+ }
3886
+ } finally {
3887
+ untrackDeployment(deploymentId);
3888
+ }
3889
+ });
3890
+ await Promise.all(streamPromises);
3891
+ if (isTTY) output.renderProgress(progressMap);
3892
+ const successfulEntries = Array.from(progressMap.entries()).filter(([, p]) => p.success);
3893
+ const failedEntries = Array.from(progressMap.entries()).filter(([, p]) => !p.success);
3894
+ const urlMap = /* @__PURE__ */ new Map();
3895
+ await Promise.all(successfulEntries.map(async ([serviceId]) => {
3896
+ const urls = await fetchDeployUrls(client, serviceId);
3897
+ if (urls.length > 0) urlMap.set(serviceId, urls);
3898
+ }));
3899
+ const successful = successfulEntries.map(([serviceId, p]) => ({
3900
+ serviceId,
3901
+ serviceName: p.serviceName,
3902
+ status: p.status,
3903
+ logLines: p.logLines.filter((l) => l.trim()),
3904
+ urls: urlMap.get(serviceId) ?? []
3905
+ }));
3906
+ const failed = failedEntries.map(([serviceId, p]) => ({
3907
+ serviceId,
3908
+ serviceName: p.serviceName,
3909
+ status: p.status,
3910
+ logLines: p.logLines.filter((l) => l.trim()),
3911
+ urls: []
3912
+ }));
3913
+ output.printSummary(successful, failed);
3914
+ if (successful.length > 0) output.printFollowUp();
3915
+ const results = [];
3916
+ for (const [serviceId, progress] of progressMap) results.push({
3917
+ status: progress.status,
3918
+ logs: progress.logLines.filter((l) => l.trim()),
3919
+ urls: urlMap.get(serviceId) ?? [],
3920
+ serviceName: progress.serviceName
3921
+ });
3922
+ if (failed.length > 0 && !process.env.VELOZ_MCP) process.exit(1);
3923
+ return results;
3924
+ }
3925
+ /**
3926
+ * Unified deploy function for 1-to-N services.
3927
+ *
3928
+ * - Single service: uses the full streamDeploymentLogs experience
3929
+ * (BuildProgressRenderer with progress bars, spinner, runtime logs)
3930
+ * - Multiple services: parallel deploy with compact progress tracking
3931
+ */
3932
+ async function deployServices(services, options) {
3933
+ if (services.length === 1) return deploySingleService(services[0], options);
3934
+ return deployMultipleServices(services, options);
3935
+ }
3936
+
3937
+ //#endregion
3938
+ //#region src/lib/deploy-output-tty.ts
3939
+ var TtyOutput = class {
3940
+ prevLineCount = 0;
3941
+ startDeploy(serviceCount) {
3942
+ console.log(chalk.cyan(`\nIniciando deploy de ${serviceCount} serviço(s)...\n`));
3943
+ }
3944
+ uploadComplete(serviceName, sizeMB) {
3945
+ console.log(`${chalk.green("✓")} ${chalk.bold(serviceName)}: Upload concluído ${chalk.dim(`(${sizeMB} MB)`)}`);
3946
+ }
3947
+ uploadFailed(serviceName) {
3948
+ console.log(`${chalk.red("✗")} ${chalk.bold(serviceName)}: Falha ao iniciar deploy`);
3949
+ }
3950
+ allUploadsFailed() {
3951
+ console.error(chalk.red("\n✗ Todos os deploys falharam ao iniciar."));
3952
+ }
3953
+ monitoringStart(_activeCount) {
3954
+ console.log(chalk.cyan(`\nMonitorando progresso dos deploys:\n`));
3955
+ console.log(chalk.dim("─".repeat(50)) + "\n");
3956
+ }
3957
+ statusUpdate(_serviceName, _status) {}
3958
+ logLine(_serviceName, _line) {}
3959
+ streamError(_serviceName, _error) {}
3960
+ initProgress(entries) {
3961
+ this.prevLineCount = this.doRenderProgress(entries, 0);
3962
+ }
3963
+ renderProgress(entries) {
3964
+ this.prevLineCount = this.doRenderProgress(entries, this.prevLineCount);
3965
+ }
3966
+ doRenderProgress(progressMap, prevLineCount) {
3967
+ for (let i = 0; i < prevLineCount; i++) process.stdout.write("\x1B[1A\x1B[2K");
3968
+ let lineCount = 0;
3969
+ for (const [, progress] of progressMap) {
3970
+ const icon = statusIcons[progress.status] || chalk.gray("○");
3971
+ const label = statusLabels[progress.status] || progress.status;
3972
+ process.stdout.write(`${icon} ${chalk.bold(progress.serviceName)}: ${label}\n`);
3973
+ lineCount++;
3974
+ if (progress.status === "BUILDING" || progress.status === "BUILD_FAILED") {
3975
+ const nonEmptyLines = progress.logLines.filter((l) => l.trim());
3976
+ if (nonEmptyLines.length > 0) {
3977
+ const tail = nonEmptyLines.slice(-3);
3978
+ for (const line of tail) {
3979
+ process.stdout.write(` ${chalk.dim(line)}\n`);
3980
+ lineCount++;
3981
+ }
3982
+ } else if (progress.status === "BUILDING") {
3983
+ process.stdout.write(` ${chalk.dim("Aguardando logs do build...")}\n`);
3984
+ lineCount++;
3985
+ }
3986
+ } else if (progress.status === "QUEUED") {
3987
+ process.stdout.write(` ${chalk.dim("Na fila para compilação...")}\n`);
3988
+ lineCount++;
3989
+ }
3990
+ }
3991
+ return lineCount;
3992
+ }
3993
+ buildStart(_serviceName) {}
3994
+ buildEnd() {}
3995
+ printSummary(successful, failed) {
3996
+ console.log(chalk.dim("\n" + "─".repeat(50)));
3997
+ if (successful.length > 0) {
3998
+ console.log(chalk.green(`\n✓ ${successful.length} serviço(s) com deploy concluído:\n`));
3999
+ for (const entry of successful) {
4000
+ console.log(` ${chalk.green("✓")} ${chalk.bold(entry.serviceName)}`);
4001
+ for (const url of entry.urls) console.log(` ${chalk.cyan(url)}`);
4002
+ }
4003
+ }
4004
+ if (failed.length > 0) {
4005
+ console.log(chalk.red(`\n✗ ${failed.length} serviço(s) falhou(aram):\n`));
4006
+ for (const entry of failed) {
4007
+ console.log(` ${chalk.red("✗")} ${chalk.bold(entry.serviceName)} ${chalk.dim(`(${entry.status})`)}`);
4008
+ if (entry.logLines.length > 0) {
4009
+ console.log(chalk.red(` ${"─".repeat(50)}`));
4010
+ console.log(chalk.red.bold(" Logs de build:"));
4011
+ console.log(chalk.red(` ${"─".repeat(50)}`));
4012
+ for (const line of entry.logLines) if (line.trim()) console.log(` ${chalk.dim(line)}`);
4013
+ console.log(chalk.red(` ${"─".repeat(50)}`));
4014
+ }
4015
+ const hints = getFailureHints(entry.status);
4016
+ for (const hint of hints) console.log(chalk.yellow(` → ${hint}`));
4017
+ }
4018
+ }
4019
+ }
4020
+ printFollowUp() {
4021
+ console.log(chalk.cyan("\nℹ Use 'veloz logs -f' para acompanhar os logs de execução."));
4022
+ }
4023
+ };
4024
+
4025
+ //#endregion
4026
+ //#region src/lib/deploy-output-plain.ts
4027
+ var PlainOutput = class {
4028
+ startDeploy(serviceCount) {
4029
+ process.stdout.write(`Iniciando deploy de ${serviceCount} serviço(s)\n`);
4030
+ }
4031
+ uploadComplete(serviceName, sizeMB) {
4032
+ process.stdout.write(`✓ ${serviceName}: Upload concluído (${sizeMB} MB)\n`);
4033
+ }
4034
+ uploadFailed(serviceName) {
4035
+ process.stdout.write(`✗ ${serviceName}: Falha ao iniciar deploy\n`);
4036
+ }
4037
+ allUploadsFailed() {
4038
+ process.stdout.write("✗ Todos os deploys falharam ao iniciar.\n");
4039
+ }
4040
+ monitoringStart(activeCount) {
4041
+ process.stdout.write(`Monitorando ${activeCount} deploy(s)\n`);
4042
+ }
4043
+ statusUpdate(serviceName, status) {
4044
+ const label = statusLabels[status] || status;
4045
+ process.stdout.write(`[${serviceName}] ${label}\n`);
4046
+ }
4047
+ logLine(serviceName, line) {
4048
+ process.stdout.write(`[${serviceName}] ${line}\n`);
4049
+ }
4050
+ streamError(serviceName, error) {
4051
+ process.stdout.write(`✗ Erro no streaming de logs para ${serviceName}: ${error}\n`);
4052
+ }
4053
+ initProgress(_entries) {}
4054
+ renderProgress(_entries) {}
4055
+ buildStart(_serviceName) {}
4056
+ buildEnd() {}
4057
+ printSummary(successful, failed) {
4058
+ if (successful.length > 0) {
4059
+ process.stdout.write(`\n${successful.length} serviço(s) com deploy concluído:\n`);
4060
+ for (const entry of successful) {
4061
+ process.stdout.write(` ✓ ${entry.serviceName}\n`);
4062
+ for (const url of entry.urls) process.stdout.write(` ${url}\n`);
4063
+ }
4064
+ }
4065
+ if (failed.length > 0) {
4066
+ process.stdout.write(`\n${failed.length} serviço(s) falhou(aram):\n`);
4067
+ for (const entry of failed) {
4068
+ process.stdout.write(`\n ✗ ${entry.serviceName} (${entry.status})\n`);
4069
+ const hints = getFailureHints(entry.status);
4070
+ for (const hint of hints) process.stdout.write(` → ${hint}\n`);
4071
+ }
4072
+ }
4073
+ }
4074
+ printFollowUp() {
4075
+ process.stdout.write("Use 'veloz logs -f' para acompanhar os logs de execução.\n");
4076
+ }
4077
+ };
4078
+
4079
+ //#endregion
4080
+ //#region src/lib/deploy-output-gha.ts
4081
+ var GhaOutput = class {
4082
+ startDeploy(serviceCount) {
4083
+ process.stdout.write(`Iniciando deploy de ${serviceCount} serviço(s)\n`);
4084
+ }
4085
+ uploadComplete(serviceName, sizeMB) {
4086
+ process.stdout.write(`✓ ${serviceName}: Upload concluído (${sizeMB} MB)\n`);
4087
+ }
4088
+ uploadFailed(serviceName) {
4089
+ process.stdout.write(`::error::${serviceName}: Falha ao iniciar deploy\n`);
4090
+ }
4091
+ allUploadsFailed() {
4092
+ process.stdout.write("::error::Todos os deploys falharam ao iniciar.\n");
4093
+ }
4094
+ monitoringStart(activeCount) {
4095
+ process.stdout.write(`Monitorando ${activeCount} deploy(s)\n`);
4096
+ }
4097
+ statusUpdate(serviceName, status) {
4098
+ const label = statusLabels[status] || status;
4099
+ process.stdout.write(`[${serviceName}] ${label}\n`);
4100
+ }
4101
+ logLine(serviceName, line) {
4102
+ process.stdout.write(`[${serviceName}] ${line}\n`);
4103
+ }
4104
+ streamError(serviceName, error) {
4105
+ process.stdout.write(`::error::Erro no streaming de logs para ${serviceName}: ${error}\n`);
4106
+ }
4107
+ initProgress(_entries) {}
4108
+ renderProgress(_entries) {}
4109
+ buildStart(serviceName) {
4110
+ startGroup(serviceName ? `Build: ${serviceName}` : "Build");
4111
+ }
4112
+ buildEnd() {
4113
+ endGroup();
4114
+ }
4115
+ printSummary(successful, failed) {
4116
+ if (successful.length > 0) {
4117
+ process.stdout.write(`\n✓ ${successful.length} serviço(s) com deploy concluído\n`);
4118
+ for (const entry of successful) {
4119
+ process.stdout.write(` ✓ ${entry.serviceName}\n`);
4120
+ for (const url of entry.urls) process.stdout.write(` ${url}\n`);
4121
+ }
4122
+ }
4123
+ if (failed.length > 0) {
4124
+ process.stdout.write(`\n✗ ${failed.length} serviço(s) falhou(aram)\n`);
4125
+ for (const entry of failed) {
4126
+ process.stdout.write(`::error::${entry.serviceName} falhou (${entry.status})\n`);
4127
+ const hints = getFailureHints(entry.status);
4128
+ for (const hint of hints) process.stdout.write(` ${hint}\n`);
4129
+ }
4130
+ }
4131
+ }
4132
+ printFollowUp() {
4133
+ process.stdout.write("Use 'veloz logs -f' para acompanhar os logs de execução.\n");
4134
+ }
4135
+ };
4136
+
4137
+ //#endregion
4138
+ //#region src/lib/deploy-output-mcp.ts
4139
+ var McpOutput = class {
4140
+ startDeploy(serviceCount) {
4141
+ log(`[deploy] Iniciando deploy de ${serviceCount} serviço(s)`);
4142
+ }
4143
+ uploadComplete(serviceName, sizeMB) {
4144
+ log(`[deploy] ✓ ${serviceName}: Upload concluído (${sizeMB} MB)`);
4145
+ }
4146
+ uploadFailed(serviceName) {
4147
+ log(`[deploy] ✗ ${serviceName}: Falha ao iniciar deploy`);
4148
+ }
4149
+ allUploadsFailed() {
4150
+ throw new Error("Todos os deploys falharam ao iniciar.");
4151
+ }
4152
+ monitoringStart(activeCount) {
4153
+ log(`[deploy] Monitorando ${activeCount} deploy(s)`);
4154
+ }
4155
+ statusUpdate(serviceName, status) {
4156
+ log(`[deploy] [${serviceName}] ${statusLabels[status] || status}`);
4157
+ }
4158
+ logLine(serviceName, line) {
4159
+ log(`[build] [${serviceName}] ${line}`);
4160
+ }
4161
+ streamError(serviceName, error) {
4162
+ log(`[deploy] ✗ Erro no streaming de logs para ${serviceName}: ${error}`);
4163
+ }
4164
+ initProgress(_entries) {}
4165
+ renderProgress(_entries) {}
4166
+ buildStart(_serviceName) {}
4167
+ buildEnd() {}
4168
+ printSummary(successful, failed) {
4169
+ if (successful.length > 0) {
4170
+ log(`[deploy] ✓ ${successful.length} serviço(s) com deploy concluído`);
4171
+ for (const entry of successful) {
4172
+ log(`[deploy] ✓ ${entry.serviceName}`);
4173
+ for (const url of entry.urls) log(`[deploy] ${url}`);
4174
+ }
4175
+ }
4176
+ if (failed.length > 0) {
4177
+ log(`[deploy] ✗ ${failed.length} serviço(s) falhou(aram)`);
4178
+ for (const entry of failed) {
4179
+ log(`[deploy] ✗ ${entry.serviceName} (${entry.status})`);
4180
+ const hints = getFailureHints(entry.status);
4181
+ for (const hint of hints) log(`[deploy] → ${hint}`);
4182
+ }
4183
+ }
4184
+ }
4185
+ printFollowUp() {
4186
+ log("[deploy] Use 'veloz logs -f' para acompanhar os logs de execução.");
4187
+ }
4188
+ };
4189
+
4190
+ //#endregion
4191
+ //#region src/lib/deploy-output.ts
4192
+ function getFailureHints(status) {
4193
+ switch (status) {
4194
+ case "BUILD_FAILED": return [
4195
+ "Verifique os logs de build acima para erros de compilação",
4196
+ "Teste o build localmente: rode o comando de build do seu projeto",
4197
+ "Use 'veloz config show' para verificar as configurações"
4198
+ ];
4199
+ case "DEPLOY_FAILED": return [
4200
+ "O build passou mas o serviço falhou ao iniciar",
4201
+ "Verifique se a porta configurada está correta: 'veloz config show'",
4202
+ "Veja os logs de runtime: 'veloz logs -f'"
4203
+ ];
4204
+ case "CANCELLED": return ["Deploy cancelado. Execute 'veloz deploy' para tentar novamente."];
4205
+ default: return ["Execute 'veloz logs -f' para mais detalhes."];
4206
+ }
4207
+ }
4208
+ function createDeployOutput() {
4209
+ if (isMcpMode()) return new McpOutput();
4210
+ if (process.env.GITHUB_ACTIONS === "true") return new GhaOutput();
4211
+ if (process.stdout.isTTY) return new TtyOutput();
4212
+ return new PlainOutput();
4213
+ }
4214
+
3511
4215
  //#endregion
3512
4216
  //#region src/lib/deploy-config.ts
3513
4217
  function resolveServiceConf(velozConfig, serviceId) {
3514
4218
  if (!velozConfig) return void 0;
3515
4219
  for (const [, conf] of Object.entries(velozConfig.services)) if (conf.id === serviceId) {
3516
4220
  const merged = mergeServiceWithDefaults(conf, velozConfig.defaults);
4221
+ const build = merged.build;
4222
+ const isDockerfile = build?.method === "dockerfile";
3517
4223
  return {
3518
4224
  type: merged.type?.toUpperCase(),
3519
4225
  branch: merged.branch,
3520
- buildCommand: merged.build?.command ?? void 0,
4226
+ buildCommand: build?.command ?? void 0,
3521
4227
  startCommand: merged.runtime?.command ?? void 0,
4228
+ preStartCommand: merged.runtime?.preStartCommand ?? void 0,
3522
4229
  port: merged.runtime?.port ?? void 0,
3523
4230
  rootDirectory: merged.root,
4231
+ docker: isDockerfile ? {
4232
+ dockerfile: build.dockerfile ?? "Dockerfile",
4233
+ context: build.context ?? merged.root ?? "."
4234
+ } : void 0,
3524
4235
  instanceCount: merged.resources?.instances ?? void 0,
3525
4236
  cpuLimit: merged.resources?.cpu ?? void 0,
3526
4237
  memoryLimit: merged.resources?.memory ?? void 0,
3527
4238
  healthCheckPath: merged.runtime?.healthCheck?.path ?? null,
3528
- aptPackages: merged.build?.aptPackages ?? void 0,
3529
- nodeVersion: merged.build?.nodeVersion ?? void 0,
3530
- nixpkgsArchive: merged.build?.nixpkgsArchive ?? void 0,
3531
- packageManager: merged.build?.packageManager,
3532
- installCommand: merged.build?.installCommand ?? void 0,
3533
- volumes: merged.volumes ?? void 0
4239
+ aptPackages: build?.aptPackages ?? void 0,
4240
+ nodeVersion: build?.nodeVersion ?? void 0,
4241
+ nixpkgsArchive: build?.nixpkgsArchive ?? void 0,
4242
+ packageManager: build?.packageManager,
4243
+ installCommand: build?.installCommand ?? void 0,
4244
+ volumes: merged.volumes?.map((v) => ({
4245
+ ...v,
4246
+ sizeGb: v.sizeGb ?? 10
4247
+ })) ?? void 0
3534
4248
  };
3535
4249
  }
3536
4250
  }
@@ -3958,7 +4672,7 @@ async function autoUpdate() {
3958
4672
  if (process.env.VELOZ_MCP === "true") return;
3959
4673
  const pm = detectPackageManager();
3960
4674
  if (!pm) return;
3961
- const currentVersion = "0.0.0-beta.16";
4675
+ const currentVersion = "0.0.0-beta.18";
3962
4676
  const latestVersion = await fetchLatestVersion();
3963
4677
  if (!latestVersion || latestVersion === currentVersion) return;
3964
4678
  const installCmd = getInstallCommand(pm, latestVersion);
@@ -3990,26 +4704,27 @@ async function provisionDatabases(config, opts) {
3990
4704
  text: "Verificando bancos de dados...",
3991
4705
  fn: () => client.databases.list({ projectId })
3992
4706
  });
3993
- let updatedConfig = { ...config };
4707
+ const idUpdates = {};
3994
4708
  for (const [key, dbConfig] of entries) {
3995
4709
  const existing = serverDatabases.find((d) => dbConfig.id && d.id === dbConfig.id || d.name === key);
3996
4710
  if (existing) {
3997
- if (!dbConfig.id || dbConfig.id !== existing.id) updatedConfig = {
3998
- ...updatedConfig,
3999
- databases: {
4000
- ...updatedConfig.databases,
4001
- [key]: {
4002
- ...dbConfig,
4003
- id: existing.id
4004
- }
4005
- }
4006
- };
4711
+ if (!dbConfig.id || dbConfig.id !== existing.id) idUpdates[key] = existing.id;
4007
4712
  if (existing.databaseStatus === "FAILED") {
4008
4713
  warn(`Banco de dados "${key}" está com status FAILED no servidor.`);
4009
4714
  console.log(chalk.dim(` Use 'veloz db restart ${key}' para tentar reiniciar.`));
4010
4715
  console.log();
4011
- } else if (existing.databaseStatus === "PENDING" || existing.databaseStatus === "PROVISIONING" || existing.databaseStatus === "WAITING_ON_PROVIDER") info(`Banco de dados "${key}" ainda está sendo provisionado (${existing.databaseStatus === "WAITING_ON_PROVIDER" ? "levando mais tempo" : existing.databaseStatus}).`);
4012
- else info(`Banco de dados "${key}" encontrado no servidor (${chalk.green("LIVE")}).`);
4716
+ continue;
4717
+ }
4718
+ if (existing.databaseStatus === "PENDING" || existing.databaseStatus === "PROVISIONING" || existing.databaseStatus === "WAITING_ON_PROVIDER") {
4719
+ info(`Banco de dados "${key}" ainda está sendo provisionado (${existing.databaseStatus === "WAITING_ON_PROVIDER" ? "levando mais tempo" : existing.databaseStatus}).`);
4720
+ continue;
4721
+ }
4722
+ const serviceId = existing.id;
4723
+ const isLive = existing.databaseStatus === "LIVE";
4724
+ await updateDatabaseResources(client, key, dbConfig, existing, serviceId, isLive);
4725
+ await updateDatabasePooler(client, key, dbConfig, existing, serviceId, isLive);
4726
+ await updateDatabaseStorage(client, key, dbConfig, serviceId);
4727
+ info(`Banco de dados "${key}" encontrado no servidor (${chalk.green(existing.databaseStatus ?? "LIVE")}).`);
4013
4728
  continue;
4014
4729
  }
4015
4730
  console.log();
@@ -4035,8 +4750,7 @@ async function provisionDatabases(config, opts) {
4035
4750
  engine: dbConfig.engine,
4036
4751
  engineVersion: dbConfig.version,
4037
4752
  storage: dbConfig.storage,
4038
- cpuLimit: dbConfig.resources?.cpu,
4039
- memoryLimit: dbConfig.resources?.memory,
4753
+ size: dbConfig.size ?? "essencial",
4040
4754
  poolerEnabled: dbConfig.pooler?.enabled,
4041
4755
  poolerPoolMode: dbConfig.pooler?.poolMode,
4042
4756
  poolerDefaultPoolSize: dbConfig.pooler?.defaultPoolSize,
@@ -4044,29 +4758,128 @@ async function provisionDatabases(config, opts) {
4044
4758
  })
4045
4759
  });
4046
4760
  success(`Banco de dados "${key}" criado (provisionando...).`);
4047
- updatedConfig = {
4048
- ...updatedConfig,
4049
- databases: {
4050
- ...updatedConfig.databases,
4051
- [key]: {
4052
- ...dbConfig,
4053
- id: db.id
4054
- }
4055
- }
4056
- };
4761
+ idUpdates[key] = db.id;
4057
4762
  } catch (error) {
4058
4763
  warn(`Falha ao criar banco de dados "${key}": ${error instanceof Error ? error.message : String(error)}`);
4059
4764
  console.log(chalk.dim(` Continuando com o deploy dos serviços...`));
4060
4765
  console.log();
4061
4766
  }
4062
4767
  }
4063
- if (JSON.stringify(updatedConfig.databases) !== JSON.stringify(config.databases)) try {
4064
- saveConfig(updatedConfig);
4768
+ if (Object.keys(idUpdates).length > 0) try {
4769
+ patchConfig((raw) => {
4770
+ raw.databases ??= {};
4771
+ for (const [key, id] of Object.entries(idUpdates)) if (raw.databases[key]) raw.databases[key].id = id;
4772
+ else {
4773
+ const dbConfig = entries.find(([k]) => k === key)?.[1];
4774
+ if (dbConfig) raw.databases[key] = {
4775
+ engine: dbConfig.engine,
4776
+ id
4777
+ };
4778
+ }
4779
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
4780
+ });
4065
4781
  info(`Arquivo veloz.json atualizado com IDs dos bancos de dados.`);
4066
4782
  } catch (error) {
4067
4783
  warn(`Não foi possível atualizar veloz.json: ${error instanceof Error ? error.message : String(error)}`);
4068
4784
  }
4069
- return updatedConfig;
4785
+ const updatedDatabases = { ...databases };
4786
+ for (const [key, id] of Object.entries(idUpdates)) if (updatedDatabases[key]) updatedDatabases[key] = {
4787
+ ...updatedDatabases[key],
4788
+ id
4789
+ };
4790
+ return {
4791
+ ...config,
4792
+ databases: updatedDatabases
4793
+ };
4794
+ }
4795
+ /**
4796
+ * Compare desired size tier from config against the server state and call
4797
+ * updateResources if they differ.
4798
+ */
4799
+ async function updateDatabaseResources(client, key, dbConfig, existing, serviceId, isLive) {
4800
+ const desiredSize = dbConfig.size;
4801
+ if (!desiredSize) return;
4802
+ if ((existing.size ?? resolveDatabaseSize(existing.cpuLimit, existing.memoryLimit)) === desiredSize) return;
4803
+ if (!isLive) {
4804
+ warn(`Banco de dados "${key}" não está LIVE — não é possível alterar recursos agora.`);
4805
+ return;
4806
+ }
4807
+ const sizeConfig = DATABASE_SIZES[desiredSize];
4808
+ if (!sizeConfig) {
4809
+ warn(`Tamanho "${desiredSize}" inválido para banco de dados "${key}".`);
4810
+ return;
4811
+ }
4812
+ try {
4813
+ await withSpinner({
4814
+ text: `Atualizando recursos de "${key}" para ${sizeConfig.label} (${sizeConfig.cpuLabel}, ${sizeConfig.memoryLabel})...`,
4815
+ fn: () => client.databases.updateResources({
4816
+ serviceId,
4817
+ size: desiredSize
4818
+ })
4819
+ });
4820
+ success(`Recursos de "${key}" atualizados para ${sizeConfig.label}.`);
4821
+ } catch (error) {
4822
+ warn(`Falha ao atualizar recursos de "${key}": ${error instanceof Error ? error.message : String(error)}`);
4823
+ }
4824
+ }
4825
+ /**
4826
+ * Compare desired pooler settings from config against the server state and call
4827
+ * updatePooler if they differ.
4828
+ */
4829
+ async function updateDatabasePooler(client, key, dbConfig, existing, serviceId, isLive) {
4830
+ const desiredPooler = dbConfig.pooler;
4831
+ if (!desiredPooler) return;
4832
+ const enabledChanged = desiredPooler.enabled !== existing.poolerEnabled;
4833
+ const poolModeChanged = desiredPooler.poolMode !== void 0 && desiredPooler.poolMode !== existing.poolerPoolMode;
4834
+ if (!enabledChanged && !poolModeChanged) return;
4835
+ if (!isLive) {
4836
+ warn(`Banco de dados "${key}" não está LIVE — não é possível alterar o pooler agora.`);
4837
+ return;
4838
+ }
4839
+ if (existing.engine !== "postgresql") {
4840
+ warn(`Connection pooler só é suportado para PostgreSQL (banco "${key}" usa ${existing.engine}).`);
4841
+ return;
4842
+ }
4843
+ try {
4844
+ await withSpinner({
4845
+ text: `Atualizando pooler de "${key}"...`,
4846
+ fn: () => client.databases.updatePooler({
4847
+ serviceId,
4848
+ enabled: desiredPooler.enabled,
4849
+ poolMode: desiredPooler.poolMode,
4850
+ defaultPoolSize: desiredPooler.defaultPoolSize,
4851
+ maxClientConn: desiredPooler.maxClientConn
4852
+ })
4853
+ });
4854
+ success(`Pooler de "${key}" ${desiredPooler.enabled ? "ativado" : "desativado"}.`);
4855
+ } catch (error) {
4856
+ warn(`Falha ao atualizar pooler de "${key}": ${error instanceof Error ? error.message : String(error)}`);
4857
+ }
4858
+ }
4859
+ /**
4860
+ * Compare desired storage size from config against the server's volume and call
4861
+ * volumes.update to resize if the config requests a larger volume.
4862
+ */
4863
+ async function updateDatabaseStorage(client, key, dbConfig, serviceId) {
4864
+ const desiredStorage = dbConfig.storage;
4865
+ if (!desiredStorage) return;
4866
+ const desiredSizeGb = parseInt(desiredStorage.replace("Gi", ""), 10);
4867
+ if (isNaN(desiredSizeGb) || desiredSizeGb <= 0) return;
4868
+ const dataVolume = (await client.volumes.list({ serviceId })).find((v) => v.name === "data");
4869
+ if (!dataVolume) return;
4870
+ if (desiredSizeGb <= dataVolume.sizeGb) return;
4871
+ try {
4872
+ await withSpinner({
4873
+ text: `Redimensionando armazenamento de "${key}" de ${dataVolume.sizeGb} GB para ${desiredSizeGb} GB...`,
4874
+ fn: () => client.volumes.update({
4875
+ volumeId: dataVolume.id,
4876
+ sizeGb: desiredSizeGb
4877
+ })
4878
+ });
4879
+ success(`Armazenamento de "${key}" atualizado para ${desiredSizeGb} GB.`);
4880
+ } catch (error) {
4881
+ warn(`Falha ao redimensionar armazenamento de "${key}": ${error instanceof Error ? error.message : String(error)}`);
4882
+ }
4070
4883
  }
4071
4884
  /**
4072
4885
  * Get hint messages about auto-injected DATABASE_URLs for display during env var prompts.
@@ -4098,6 +4911,14 @@ const SERVICE_TYPE_LABELS = {
4098
4911
  * the server will generate one with nixpacks.
4099
4912
  */
4100
4913
  function prepareExtraFiles(_detection, serviceConfig) {
4914
+ if (serviceConfig?.docker) {
4915
+ const dockerfilePath = resolve(process.cwd(), serviceConfig.docker.dockerfile);
4916
+ if (!existsSync(dockerfilePath)) throw new Error(`Dockerfile não encontrado: ${serviceConfig.docker.dockerfile}`);
4917
+ return [{
4918
+ name: "Dockerfile",
4919
+ content: readFileSync(dockerfilePath, "utf-8")
4920
+ }];
4921
+ }
4101
4922
  if (existsSync(resolve(process.cwd(), "Dockerfile"))) return [];
4102
4923
  const rootDir = serviceConfig?.rootDirectory || ".";
4103
4924
  const serviceDockerfilePath = resolve(process.cwd(), rootDir, "Dockerfile");
@@ -4169,39 +4990,6 @@ async function triggerDeploy(serviceId, serviceName, preDetection) {
4169
4990
  untrackDeployment(deployment.id);
4170
4991
  }
4171
4992
  }
4172
- /**
4173
- * MCP variant of triggerDeploy — yields progress events as async generator.
4174
- * Upload phase is awaited, then build log streaming is delegated to the
4175
- * streaming generator.
4176
- */
4177
- async function* triggerDeployMcp(serviceId, serviceName) {
4178
- const client = await getClient();
4179
- const serviceConf = resolveServiceConf(loadConfig(), serviceId);
4180
- const extraFiles = prepareExtraFiles(detectLocalRepo(), serviceConf);
4181
- yield {
4182
- type: "status",
4183
- message: "Iniciando deploy..."
4184
- };
4185
- const deployment = await withRetry(() => client.deployments.create({
4186
- serviceId,
4187
- serviceConfig: serviceConf
4188
- }));
4189
- yield {
4190
- type: "status",
4191
- message: "Fazendo upload do código..."
4192
- };
4193
- await withRetry(() => uploadSource(deployment.id, process.cwd(), extraFiles));
4194
- yield {
4195
- type: "status",
4196
- message: "Upload concluído. Aguardando build..."
4197
- };
4198
- trackDeployment(deployment.id);
4199
- try {
4200
- yield* streamDeploymentLogsMcp(deployment.id, serviceId, serviceName);
4201
- } finally {
4202
- untrackDeployment(deployment.id);
4203
- }
4204
- }
4205
4993
  function warnIfEphemeralFsDetected(detection, serviceConf, serviceLabel) {
4206
4994
  if (!detection.usesNodeFs || (serviceConf?.volumes?.length ?? 0) > 0) return;
4207
4995
  warn(`Uso de fs/node:fs detectado${serviceLabel ? ` no serviço ${chalk.bold(serviceLabel)}` : ""}. O filesystem do container é efêmero; configure um volume em veloz.json ou use 'veloz volumes create'.`);
@@ -4212,7 +5000,7 @@ async function maybeConfigurePersistentVolume(serviceConfig, detection, opts, se
4212
5000
  if (!await promptConfirm(`Deseja adicionar um volume persistente para ${serviceLabel}?`, true)) return;
4213
5001
  const name = await prompt(`Nome do volume ${chalk.dim("(data)")}:`) || "data";
4214
5002
  const mountPath = await prompt(`Mount path ${chalk.dim("(/data)")}:`) || "/data";
4215
- const sizeInput = await prompt(`Tamanho em GB ${chalk.dim("(1)")}:`);
5003
+ const sizeInput = await prompt(`Tamanho em GB ${chalk.dim("(10)")}:`);
4216
5004
  const parsedSize = Number.parseInt(sizeInput || "10", 10);
4217
5005
  serviceConfig.volumes = [{
4218
5006
  name,
@@ -4258,7 +5046,12 @@ async function findServicesFromConfig() {
4258
5046
  }
4259
5047
  }
4260
5048
  if (configUpdated) {
4261
- saveConfig(config);
5049
+ const serviceIdMap = {};
5050
+ for (const [key, svc] of Object.entries(config.services)) if (svc.id) serviceIdMap[key] = svc.id;
5051
+ patchConfig((raw) => {
5052
+ for (const [key, id] of Object.entries(serviceIdMap)) if (raw.services[key]) raw.services[key].id = id;
5053
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
5054
+ });
4262
5055
  info(`Arquivo ${getConfigFileName()} atualizado com IDs dos serviços.`);
4263
5056
  }
4264
5057
  }
@@ -4452,6 +5245,7 @@ async function createServiceFlow(projectId, projectName, repoName, opts = {}) {
4452
5245
  const allApps = detection.monorepoApps.map((a) => ({
4453
5246
  name: a.name,
4454
5247
  root: a.path,
5248
+ type: a.framework?.type ?? "WEB",
4455
5249
  framework: a.framework?.name ?? null,
4456
5250
  buildCommand: a.framework?.buildCommand ?? null,
4457
5251
  startCommand: a.framework?.startCommand ?? null,
@@ -4517,7 +5311,7 @@ async function createServiceFlow(projectId, projectName, repoName, opts = {}) {
4517
5311
  fn: () => withRetry(() => client.services.create({
4518
5312
  projectId,
4519
5313
  name: app.name,
4520
- type: "WEB",
5314
+ type: app.type,
4521
5315
  branch,
4522
5316
  rootDirectory: app.root,
4523
5317
  buildCommand: app.buildCommand ?? void 0,
@@ -4552,13 +5346,15 @@ async function createServiceFlow(projectId, projectName, repoName, opts = {}) {
4552
5346
  await promptEnvVars(service$1.id, serviceDetection.envVars.map((v) => v.key), config$1);
4553
5347
  }
4554
5348
  config$1 = await provisionDatabases(config$1, { yes: opts.yes ?? false });
4555
- await deployServicesInParallel(createdServices.map(({ service: service$1, app }) => ({
5349
+ await deployServices(createdServices.map(({ service: service$1, app }) => ({
4556
5350
  serviceId: service$1.id,
4557
5351
  serviceName: app.name,
4558
- path: resolve(process.cwd(), app.root),
4559
5352
  serviceConfig: resolveServiceConf(config$1, service$1.id),
4560
5353
  extraFiles: prepareExtraFiles(detectLocalRepo(app.root), { rootDirectory: app.root })
4561
- })));
5354
+ })), {
5355
+ projectRoot: process.cwd(),
5356
+ output: createDeployOutput()
5357
+ });
4562
5358
  return createdServices[createdServices.length - 1]?.service.id || "";
4563
5359
  }
4564
5360
  const fw = detection.framework;
@@ -4698,13 +5494,10 @@ async function createEnvironmentFlow(rawConfig, envName, opts) {
4698
5494
  },
4699
5495
  services: envServices
4700
5496
  };
4701
- saveConfig({
4702
- ...rawConfig,
4703
- environments: {
4704
- ...rawConfig.environments,
4705
- [envName]: envOverride
4706
- },
4707
- updated: (/* @__PURE__ */ new Date()).toISOString()
5497
+ patchConfig((raw) => {
5498
+ raw.environments ??= {};
5499
+ raw.environments[envName] = envOverride;
5500
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
4708
5501
  });
4709
5502
  success(`Ambiente "${envName}" salvo em ${getConfigFileName()}`);
4710
5503
  setActiveEnv(envName);
@@ -4730,6 +5523,7 @@ async function addServiceFlow(existingConfig, opts) {
4730
5523
  const availableApps = detection.monorepoApps.map((a) => ({
4731
5524
  name: a.name,
4732
5525
  root: a.path,
5526
+ type: a.framework?.type ?? "WEB",
4733
5527
  framework: a.framework?.name ?? null,
4734
5528
  buildCommand: a.framework?.buildCommand ?? null,
4735
5529
  startCommand: a.framework?.startCommand ?? null,
@@ -4797,7 +5591,7 @@ async function addServiceFlow(existingConfig, opts) {
4797
5591
  fn: () => withRetry(() => client.services.create({
4798
5592
  projectId,
4799
5593
  name: app.name,
4800
- type: "WEB",
5594
+ type: app.type,
4801
5595
  branch,
4802
5596
  rootDirectory: app.root,
4803
5597
  buildCommand: app.buildCommand ?? void 0,
@@ -4824,8 +5618,11 @@ async function addServiceFlow(existingConfig, opts) {
4824
5618
  };
4825
5619
  await maybeConfigurePersistentVolume(updatedConfig$1.services[app.root], detectLocalRepo(app.root), opts, app.name);
4826
5620
  }
4827
- updatedConfig$1.updated = (/* @__PURE__ */ new Date()).toISOString();
4828
- saveConfig(updatedConfig$1);
5621
+ const newServices = Object.fromEntries(createdServices.map(({ app }) => [app.root, updatedConfig$1.services[app.root]]));
5622
+ patchConfig((raw) => {
5623
+ for (const [key, value] of Object.entries(newServices)) if (value) raw.services[key] = value;
5624
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
5625
+ });
4829
5626
  info(`Arquivo ${getConfigFileName()} atualizado com ${createdServices.length} novo(s) serviço(s).`);
4830
5627
  if (!opts.yes) for (const { service: service$1, app } of createdServices) {
4831
5628
  console.log(chalk.cyan(`\n── Configurando variáveis: ${app.name} ──\n`));
@@ -4833,13 +5630,15 @@ async function addServiceFlow(existingConfig, opts) {
4833
5630
  await promptEnvVars(service$1.id, serviceDetection.envVars.map((v) => v.key), updatedConfig$1);
4834
5631
  }
4835
5632
  await provisionDatabases(updatedConfig$1, { yes: opts.yes ?? false });
4836
- await deployServicesInParallel(createdServices.map(({ service: service$1, app }) => ({
5633
+ await deployServices(createdServices.map(({ service: service$1, app }) => ({
4837
5634
  serviceId: service$1.id,
4838
5635
  serviceName: app.name,
4839
- path: resolve(process.cwd(), app.root),
4840
5636
  serviceConfig: resolveServiceConf(updatedConfig$1, service$1.id),
4841
5637
  extraFiles: prepareExtraFiles(detectLocalRepo(app.root), { rootDirectory: app.root })
4842
- })));
5638
+ })), {
5639
+ projectRoot: process.cwd(),
5640
+ output: createDeployOutput()
5641
+ });
4843
5642
  return;
4844
5643
  }
4845
5644
  const fw = detection.framework;
@@ -4926,7 +5725,11 @@ async function addServiceFlow(existingConfig, opts) {
4926
5725
  updated: (/* @__PURE__ */ new Date()).toISOString()
4927
5726
  };
4928
5727
  await maybeConfigurePersistentVolume(updatedConfig.services[serviceKey], detection, opts, service.name);
4929
- saveConfig(updatedConfig);
5728
+ const newServiceEntry = updatedConfig.services[serviceKey];
5729
+ patchConfig((raw) => {
5730
+ raw.services[serviceKey] = newServiceEntry;
5731
+ raw.updated = (/* @__PURE__ */ new Date()).toISOString();
5732
+ });
4930
5733
  info(`Arquivo ${getConfigFileName()} atualizado.`);
4931
5734
  await provisionDatabases(updatedConfig, { yes: opts.yes ?? false });
4932
5735
  await triggerDeploy(service.id, service.name);
@@ -4981,17 +5784,32 @@ function registerDeploy(cli$1) {
4981
5784
  async function* mcpDeployFlow(opts) {
4982
5785
  const configuredServices = await findServicesFromConfig();
4983
5786
  if (configuredServices.length === 0) throw new Error("Nenhum serviço configurado. Execute 'veloz deploy' no terminal para configurar o projeto primeiro.");
5787
+ const currentConfig = loadConfig();
5788
+ if (currentConfig) await provisionDatabases(currentConfig, { yes: true });
5789
+ let servicesToProcess = configuredServices;
4984
5790
  if (opts.service) {
4985
5791
  const found = configuredServices.find((s) => s.key === opts.service || s.serviceName.toLowerCase() === opts.service.toLowerCase() || s.serviceId === opts.service);
4986
5792
  if (!found) {
5793
+ if ((currentConfig?.databases ? Object.keys(currentConfig.databases).find((k) => k === opts.service || k.toLowerCase() === opts.service.toLowerCase()) : void 0) && currentConfig) {
5794
+ await provisionDatabases(currentConfig, { yes: true });
5795
+ return;
5796
+ }
4987
5797
  const available = configuredServices.map((s) => `${s.key} (${s.serviceName})`).join(", ");
4988
- throw new Error(`Serviço '${opts.service}' não encontrado. Disponíveis: ${available}`);
5798
+ const dbNames = currentConfig?.databases ? Object.keys(currentConfig.databases) : [];
5799
+ const hint = dbNames.length > 0 ? `\nBancos de dados: ${dbNames.join(", ")} (use 'veloz db update' para gerenciar)` : "";
5800
+ throw new Error(`Serviço '${opts.service}' não encontrado. Disponíveis: ${available}${hint}`);
4989
5801
  }
4990
- yield* triggerDeployMcp(found.serviceId, found.serviceName);
4991
- return;
5802
+ servicesToProcess = [found];
4992
5803
  }
4993
- if (configuredServices.length === 1) yield* triggerDeployMcp(configuredServices[0].serviceId, configuredServices[0].serviceName);
4994
- else for (const svc of configuredServices) yield* triggerDeployMcp(svc.serviceId, svc.serviceName);
5804
+ const results = await deployServices(await computeExtraFilesForServices(servicesToProcess), {
5805
+ projectRoot: process.cwd(),
5806
+ output: createDeployOutput()
5807
+ });
5808
+ for (const result of results) yield {
5809
+ type: "result",
5810
+ message: result.status === "LIVE" ? "Deploy concluído" : `Deploy finalizou: ${result.status}`,
5811
+ data: result
5812
+ };
4995
5813
  }
4996
5814
  async function cliDeployFlow(opts) {
4997
5815
  if (opts.verbose) process.env.VELOZ_VERBOSE = "true";
@@ -5026,10 +5844,21 @@ async function cliDeployFlow(opts) {
5026
5844
  if (opts.service) {
5027
5845
  const found = configuredServices.find((s) => s.key === opts.service || s.serviceName.toLowerCase() === opts.service.toLowerCase() || s.serviceId === opts.service);
5028
5846
  if (!found) {
5847
+ const dbMatch = currentConfig?.databases ? Object.keys(currentConfig.databases).find((k) => k === opts.service || k.toLowerCase() === opts.service.toLowerCase()) : void 0;
5848
+ if (dbMatch && currentConfig) {
5849
+ info(`"${dbMatch}" é um banco de dados. Aplicando alterações de configuração...`);
5850
+ await provisionDatabases(currentConfig, { yes: opts.yes ?? false });
5851
+ return;
5852
+ }
5029
5853
  const available = configuredServices.map((s) => ` • ${s.key} (${s.serviceName})`).join("\n");
5030
- throw new Error(`Serviço '${opts.service}' não encontrado.\n\nServiços disponíveis:\n${available}`);
5854
+ const dbNames = currentConfig?.databases ? Object.keys(currentConfig.databases) : [];
5855
+ const dbHint = dbNames.length > 0 ? `\n\nBancos de dados:\n${dbNames.map((n) => ` • ${n} (banco de dados)`).join("\n")}` : "";
5856
+ throw new Error(`Serviço '${opts.service}' não encontrado.\n\nServiços disponíveis:\n${available}${dbHint}`);
5031
5857
  }
5032
- await triggerDeploy(found.serviceId, found.serviceName);
5858
+ await deployServices(await computeExtraFilesForServices([found]), {
5859
+ projectRoot: process.cwd(),
5860
+ output: createDeployOutput()
5861
+ });
5033
5862
  return;
5034
5863
  }
5035
5864
  if (opts.all || opts.yes || configuredServices.length === 1) {
@@ -5045,8 +5874,10 @@ async function cliDeployFlow(opts) {
5045
5874
  return;
5046
5875
  }
5047
5876
  }
5048
- if (configuredServices.length === 1) await triggerDeploy(configuredServices[0].serviceId, configuredServices[0].serviceName);
5049
- else await deployServicesInParallel(await computeExtraFilesForServices(configuredServices));
5877
+ await deployServices(await computeExtraFilesForServices(configuredServices), {
5878
+ projectRoot: process.cwd(),
5879
+ output: createDeployOutput()
5880
+ });
5050
5881
  return;
5051
5882
  }
5052
5883
  console.log(chalk.bold("\nServiços disponíveis:\n"));
@@ -5062,8 +5893,11 @@ async function cliDeployFlow(opts) {
5062
5893
  info("Nenhum serviço selecionado.");
5063
5894
  return;
5064
5895
  }
5065
- if (selectedServices.length === 1) await triggerDeploy(selectedServices[0].serviceId, selectedServices[0].serviceName);
5066
- else await deployServicesInParallel(await computeExtraFilesForServices(selectedServices));
5896
+ await deployServices(await computeExtraFilesForServices(selectedServices), {
5897
+ projectRoot: process.cwd(),
5898
+ output: createDeployOutput()
5899
+ });
5900
+ return;
5067
5901
  }
5068
5902
  if (!isGitRepo()) throw new Error("Este diretório não é um repositório git. Inicialize com `git init` e adicione um remote.");
5069
5903
  info("Detectando repositório git...");
@@ -5364,7 +6198,35 @@ function registerPull(cli$1) {
5364
6198
  else config = buildFreshConfig(project, services, databases);
5365
6199
  if (existingConfig && !c.options.force && isInteractive()) config = await pruneRemovedEntries(config, existingConfig, services, databases);
5366
6200
  config.updated = (/* @__PURE__ */ new Date()).toISOString();
5367
- saveConfig(config);
6201
+ if (existingConfig && !c.options.force) patchConfig((raw) => {
6202
+ raw.project = config.project;
6203
+ for (const [key, svc] of Object.entries(config.services)) if (raw.services[key]) {
6204
+ raw.services[key].id = svc.id;
6205
+ raw.services[key].name = svc.name;
6206
+ } else raw.services[key] = svc;
6207
+ for (const key of Object.keys(raw.services)) if (!config.services[key]) delete raw.services[key];
6208
+ const mergedDatabases = config.databases ?? {};
6209
+ if (Object.keys(mergedDatabases).length > 0) {
6210
+ raw.databases ??= {};
6211
+ for (const [key, db] of Object.entries(mergedDatabases)) if (raw.databases[key]) {
6212
+ raw.databases[key].id = db.id;
6213
+ raw.databases[key].name = db.name;
6214
+ raw.databases[key].engine = db.engine;
6215
+ if (db.version) raw.databases[key].version = db.version;
6216
+ } else {
6217
+ raw.databases[key] = {
6218
+ id: db.id,
6219
+ name: db.name,
6220
+ engine: db.engine
6221
+ };
6222
+ if (db.version) raw.databases[key].version = db.version;
6223
+ }
6224
+ for (const key of Object.keys(raw.databases)) if (!mergedDatabases[key]) delete raw.databases[key];
6225
+ if (Object.keys(raw.databases).length === 0) raw.databases = void 0;
6226
+ }
6227
+ raw.updated = config.updated;
6228
+ });
6229
+ else saveConfig(config);
5368
6230
  success(`${getConfigFileName()} atualizado com sucesso.`);
5369
6231
  const svcCount = Object.keys(config.services).length;
5370
6232
  const dbCount = Object.keys(config.databases ?? {}).length;
@@ -5464,7 +6326,7 @@ async function pruneRemovedEntries(config, existingConfig, services, databases)
5464
6326
  //#region src/index.ts
5465
6327
  if (process.argv.includes("--mcp")) process.env.VELOZ_MCP = "true";
5466
6328
  const cli = Cli.create("veloz", {
5467
- version: "0.0.0-beta.16",
6329
+ version: "0.0.0-beta.18",
5468
6330
  description: "CLI da plataforma Veloz — deploy rápido para o Brasil",
5469
6331
  env: z.object({ VELOZ_ENV: z.string().optional().describe("Ambiente alvo (ex: preview, staging)") })
5470
6332
  });