onveloz 0.0.0-beta.20 → 0.0.0-beta.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.mjs +348 -209
  2. package/package.json +1 -1
package/dist/index.mjs CHANGED
@@ -179,6 +179,56 @@ function resolveDatabaseSize(cpu, memory, engine) {
179
179
  for (const [key, tier] of Object.entries(sizes)) if (tier.cpu === cpu && tier.memory === memory) return key;
180
180
  return null;
181
181
  }
182
+ const SERVICE_SIZES = {
183
+ basico: {
184
+ label: "Básico",
185
+ cpu: "100m",
186
+ memory: "256Mi",
187
+ cpuLabel: "0.1 vCPU",
188
+ memoryLabel: "256 MB"
189
+ },
190
+ essencial: {
191
+ label: "Essencial",
192
+ cpu: "500m",
193
+ memory: "512Mi",
194
+ cpuLabel: "0.5 vCPU",
195
+ memoryLabel: "512 MB"
196
+ },
197
+ turbo: {
198
+ label: "Turbo",
199
+ cpu: "1",
200
+ memory: "1Gi",
201
+ cpuLabel: "1 vCPU",
202
+ memoryLabel: "1 GB"
203
+ },
204
+ "turbo-plus": {
205
+ label: "Turbo Plus",
206
+ cpu: "1500m",
207
+ memory: "2Gi",
208
+ cpuLabel: "1.5 vCPU",
209
+ memoryLabel: "2 GB"
210
+ },
211
+ nitro: {
212
+ label: "Nitro",
213
+ cpu: "2",
214
+ memory: "4Gi",
215
+ cpuLabel: "2 vCPU",
216
+ memoryLabel: "4 GB"
217
+ },
218
+ "nitro-plus": {
219
+ label: "Nitro Plus",
220
+ cpu: "4",
221
+ memory: "8Gi",
222
+ cpuLabel: "4 vCPU",
223
+ memoryLabel: "8 GB"
224
+ }
225
+ };
226
+ const SERVICE_SIZE_KEYS = Object.keys(SERVICE_SIZES);
227
+ /** Reverse-lookup: find service size key from cpu/memory pair. */
228
+ function resolveServiceSizeFromResources(cpu, memory) {
229
+ for (const [key, tier] of Object.entries(SERVICE_SIZES)) if (tier.cpu === cpu && tier.memory === memory) return key;
230
+ return null;
231
+ }
182
232
 
183
233
  //#endregion
184
234
  //#region ../../packages/config/veloz-config.ts
@@ -215,17 +265,20 @@ const RuntimeConfigSchema = z$1.object({
215
265
  timeout: z$1.number().default(10).optional()
216
266
  }).optional()
217
267
  });
268
+ const SERVICE_SIZE_ENUM = SERVICE_SIZE_KEYS.map((k) => k);
269
+ const ServiceSizeSchema = z$1.enum(SERVICE_SIZE_ENUM);
218
270
  const ResourcesSchema = z$1.object({
219
271
  instances: z$1.number().min(1).max(10).default(1).optional(),
220
- cpu: z$1.string().regex(/^[0-9]+(\.[0-9]+)?|[0-9]+m$/).default("500m").optional(),
221
- memory: z$1.string().regex(/^[0-9]+(Mi|Gi)$/).default("512Mi").optional(),
272
+ size: ServiceSizeSchema.optional(),
273
+ cpu: z$1.string().optional(),
274
+ memory: z$1.string().optional(),
222
275
  autoscale: z$1.object({
223
276
  enabled: z$1.boolean().default(false).optional(),
224
277
  minInstances: z$1.number().min(1).default(1).optional(),
225
278
  maxInstances: z$1.number().min(1).max(20).default(3).optional(),
226
279
  targetCPU: z$1.number().min(10).max(90).default(70).optional()
227
280
  }).optional()
228
- });
281
+ }).transform(({ cpu: _cpu, memory: _memory, ...rest }) => rest);
229
282
  const EnvVarDefinitionSchema = z$1.object({
230
283
  description: z$1.string().optional(),
231
284
  required: z$1.boolean().default(false).optional(),
@@ -1698,8 +1751,10 @@ function printServiceConfig(service) {
1698
1751
  console.log(` ${chalk.bold("Pre-Start Cmd:")} ${formatValue(service.preStartCommand)}`);
1699
1752
  console.log(` ${chalk.bold("Porta:")} ${formatValue(service.port)}`);
1700
1753
  console.log(` ${chalk.bold("Instâncias:")} ${formatValue(service.instanceCount)}`);
1701
- console.log(` ${chalk.bold("CPU Limit:")} ${formatValue(service.cpuLimit)}`);
1702
- console.log(` ${chalk.bold("Memory Limit:")} ${formatValue(service.memoryLimit)}`);
1754
+ const size = service.size;
1755
+ const sizeTier = size ? SERVICE_SIZES[size] : null;
1756
+ const sizeDisplay = sizeTier ? `${size} (${sizeTier.cpuLabel} · ${sizeTier.memoryLabel})` : size;
1757
+ console.log(` ${chalk.bold("Tamanho:")} ${formatValue(sizeDisplay)}`);
1703
1758
  }
1704
1759
  const configGroup = Cli.create("config", { description: "Gerenciar configuração de serviços" });
1705
1760
  configGroup.command("show", {
@@ -1716,8 +1771,7 @@ configGroup.command("show", {
1716
1771
  preStartCommand: z.string().nullable(),
1717
1772
  port: z.number().nullable(),
1718
1773
  instanceCount: z.number().nullable(),
1719
- cpuLimit: z.string().nullable(),
1720
- memoryLimit: z.string().nullable()
1774
+ size: z.string().nullable()
1721
1775
  })),
1722
1776
  async run(c) {
1723
1777
  const { services } = resolveAllServices(c.options.service);
@@ -1745,8 +1799,7 @@ configGroup.command("show", {
1745
1799
  preStartCommand: svc.preStartCommand ?? null,
1746
1800
  port: svc.port ?? null,
1747
1801
  instanceCount: svc.instanceCount ?? null,
1748
- cpuLimit: svc.cpuLimit ?? null,
1749
- memoryLimit: svc.memoryLimit ?? null
1802
+ size: svc.size ?? null
1750
1803
  }));
1751
1804
  }
1752
1805
  });
@@ -1761,8 +1814,7 @@ configGroup.command("set", {
1761
1814
  port: z.string().optional().describe("Porta do serviço"),
1762
1815
  root: z.string().optional().describe("Diretório raiz"),
1763
1816
  instances: z.string().optional().describe("Número de instâncias"),
1764
- cpu: z.string().optional().describe("Limite de CPU (ex: 500m, 1)"),
1765
- memory: z.string().optional().describe("Limite de memória (ex: 512Mi, 1Gi)"),
1817
+ size: z.string().optional().describe("Tamanho (basico, essencial, turbo, turbo-plus, nitro, nitro-plus)"),
1766
1818
  branch: z.string().optional().describe("Branch do Git"),
1767
1819
  service: z.string().optional().describe("Serviço alvo (chave ou nome)")
1768
1820
  }),
@@ -1785,8 +1837,7 @@ configGroup.command("set", {
1785
1837
  if (c.options.port) updates.port = parseInt(c.options.port, 10);
1786
1838
  if (c.options.root !== void 0) updates.rootDirectory = c.options.root === "/" ? null : c.options.root;
1787
1839
  if (c.options.instances) updates.instanceCount = parseInt(c.options.instances, 10);
1788
- if (c.options.cpu) updates.cpuLimit = c.options.cpu;
1789
- if (c.options.memory) updates.memoryLimit = c.options.memory;
1840
+ if (c.options.size) updates.size = c.options.size;
1790
1841
  if (c.options.branch) updates.branch = c.options.branch;
1791
1842
  if (Object.keys(updates).length === 0) {
1792
1843
  warn("Nenhuma configuração fornecida para atualizar.");
@@ -1839,10 +1890,13 @@ configGroup.command("edit", {
1839
1890
  if (rootDir) updates.rootDirectory = rootDir === "/" ? null : rootDir;
1840
1891
  const instances = await prompt(`Número de instâncias ${chalk.dim(`(${svc.instanceCount})`)}: `);
1841
1892
  if (instances) updates.instanceCount = parseInt(instances, 10);
1842
- const cpu = await prompt(`Limite de CPU ${chalk.dim(`(${svc.cpuLimit})`)}: `);
1843
- if (cpu) updates.cpuLimit = cpu;
1844
- const memory = await prompt(`Limite de memória ${chalk.dim(`(${svc.memoryLimit})`)}: `);
1845
- if (memory) updates.memoryLimit = memory;
1893
+ const currentSize = svc.size;
1894
+ const sizeOptions = Object.entries(SERVICE_SIZES).map(([key, tier]) => ({
1895
+ value: key,
1896
+ label: `${tier.label} — ${tier.cpuLabel} · ${tier.memoryLabel}`
1897
+ }));
1898
+ const selectedSize = await promptSelect(`Tamanho ${chalk.dim(`(${currentSize ?? "—"})`)}`, sizeOptions);
1899
+ if (selectedSize && selectedSize !== currentSize) updates.size = selectedSize;
1846
1900
  if (Object.keys(updates).length === 0) {
1847
1901
  info("Nenhuma alteração realizada.");
1848
1902
  return;
@@ -3200,71 +3254,146 @@ logsGroup.command("search", {
3200
3254
  const QUERY_HELP_SECTIONS = [
3201
3255
  {
3202
3256
  title: "Filtros básicos",
3203
- content: ` palavra Busca por palavra em qualquer campo
3204
- "frase exata" Busca por frase exata
3205
- exact("texto") Match exato (sem tokenização)
3206
- prefix("iní") Busca por prefixo
3207
- ~"regex" Expressão regular (RE2 syntax)`
3257
+ content: ` palavra Busca por palavra (tokenizada)
3258
+ "frase exata" Busca por frase exata
3259
+ exact("texto") Match exato (sem tokenização)
3260
+ iní* Busca por prefixo (wildcard)
3261
+ i("texto") Busca case-insensitive
3262
+ ~"regex" Expressão regular (RE2 syntax)
3263
+ * Todos os logs`
3208
3264
  },
3209
3265
  {
3210
3266
  title: "Filtros por campo",
3211
- content: ` campo:valor Match exato no campo
3212
- campo:~"regex" Regex no campo
3213
- campo:* Campo existe (não vazio)`
3267
+ content: ` campo:valor Match por palavra no campo
3268
+ campo:exact("val") Match exato no campo
3269
+ campo:iní* Prefixo no campo (wildcard)
3270
+ campo:i("VAL") Case-insensitive no campo
3271
+ campo:re("regex") Regex no campo
3272
+ campo:~"regex" Regex no campo (atalho)
3273
+ campo:* Campo existe (não vazio)`
3214
3274
  },
3215
3275
  {
3216
3276
  title: "Filtros por tempo",
3217
- content: ` _time:5m Últimos 5 minutos
3218
- _time:1h Última hora
3219
- _time:24h Últimas 24 horas
3220
- _time:[início, fim] Intervalo ISO 8601`
3277
+ content: ` _time:5m Últimos 5 minutos
3278
+ _time:1h Última hora
3279
+ _time:24h Últimas 24 horas
3280
+ _time:1d Último dia
3281
+ _time:1w Última semana`
3221
3282
  },
3222
3283
  {
3223
3284
  title: "Operadores lógicos",
3224
- content: ` termo1 termo2 AND implícito (espaço)
3225
- termo1 OR termo2 OR explícito
3226
- NOT termo Negação
3227
- -termo Negação (atalho)
3228
- !termo Negação (atalho)`
3285
+ content: ` A B AND implícito (espaço)
3286
+ A AND B AND explícito
3287
+ (A OR B) OR (use parênteses para agrupar)
3288
+ NOT A Negação
3289
+ -A Negação (atalho)
3290
+ !A Negação (atalho)`
3229
3291
  },
3230
3292
  {
3231
- title: "Pipes",
3232
- content: ` | stats count() by (campo) Agregar por campo
3233
- | sort by (campo) desc Ordenar resultados
3234
- | uniq by (campo) Valores únicos
3235
- | top N by (campo) Top N por campo
3236
- | fields campo1, campo2 Selecionar campos
3237
- | limit N Limitar resultados
3238
- | extract "padrão" Extrair campos com regex
3239
- | unpack_json Expandir JSON embarcado
3240
- | math expr Calcular expressões
3241
- | format "template" Formatar saída
3242
- | filter condição Filtrar pós-processamento
3243
- | replace ("de", "para") Substituir texto
3244
- | replace_regexp ("re", "sub") Substituir com regex`
3293
+ title: "Pipes — Transformação",
3294
+ content: ` | fields a, b Selecionar campos
3295
+ | delete a, b Remover campos
3296
+ | rename a as b Renomear campo
3297
+ | copy a as b Copiar campo
3298
+ | limit N Limitar resultados
3299
+ | offset N Pular N resultados
3300
+ | sort by (campo) desc Ordenar resultados
3301
+ | uniq by (campo) Valores únicos
3302
+ | top N by (campo) Top N por campo
3303
+ | filter condição Filtrar pós-processamento
3304
+ | unpack_json Expandir JSON embarcado
3305
+ | extract "a<campo>b" Extrair campos via padrão
3306
+ | len(campo) as x Tamanho de um campo
3307
+ | format "<a>/<b>" Formatar _msg via template
3308
+ | replace ("de", "para") Substituir texto
3309
+ | replace_regexp ("re", "sub") Substituir via regex
3310
+ | math expr Calcular expressões
3311
+ | drop_empty_fields Remover campos vazios
3312
+ | field_names Listar nomes de campos
3313
+ | field_values campo Listar valores de um campo`
3245
3314
  },
3246
3315
  {
3247
- title: "Funções de agregação (stats)",
3248
- content: ` count() Contagem
3249
- sum(campo) Soma
3250
- avg(campo) Média
3251
- min(campo) Mínimo
3252
- max(campo) Máximo
3253
- median(campo) Mediana
3254
- quantile(N, campo) Percentil
3255
- count_uniq(campo) Valores únicos
3256
- uniq_values(campo) Lista valores únicos
3257
- rate(campo) Taxa por segundo`
3316
+ title: "Pipes Agregação (stats)",
3317
+ content: ` Sintaxe: | stats by (campo) func() as alias
3318
+
3319
+ count() Contagem de logs
3320
+ count_uniq(campo) Contagem de valores únicos
3321
+ uniq_values(campo) Lista de valores únicos
3322
+ values(campo) Todos os valores
3323
+ sum(campo) Soma
3324
+ avg(campo) Média
3325
+ min(campo) Mínimo
3326
+ max(campo) Máximo
3327
+ median(campo) Mediana
3328
+ quantile(N, campo) Percentil (0.0 a 1.0)
3329
+ rate() Taxa por segundo
3330
+ rate_sum(campo) Taxa da soma por segundo
3331
+ sum_len(campo) Soma dos tamanhos
3332
+ row_any() Exemplo por grupo
3333
+ row_min(campo) Linha com menor valor
3334
+ row_max(campo) Linha com maior valor
3335
+
3336
+ Importante: by() vem ANTES das funções
3337
+ Correto: | stats by (campo) count()
3338
+ Errado: | stats count() by (campo)`
3339
+ },
3340
+ {
3341
+ title: "Logs JSON (unpack_json / extract)",
3342
+ content: ` Os logs são armazenados como texto bruto em _msg.
3343
+ Para consultar campos dentro de logs JSON, use unpack_json
3344
+ para extrair os campos como colunas consultáveis.
3345
+
3346
+ | unpack_json
3347
+ Expande o JSON de _msg em campos de primeiro nível.
3348
+ Após isso, você pode filtrar, agregar e selecionar
3349
+ os campos extraídos normalmente.
3350
+
3351
+ | extract "<prefixo><campo><sufixo>"
3352
+ Extrai campos via padrão de texto (não precisa ser JSON).
3353
+ Exemplo: | extract "level=<level>"
3354
+
3355
+ Dica: unpack_json só extrai o primeiro nível do JSON.
3356
+ Campos aninhados continuam como strings JSON.`
3357
+ },
3358
+ {
3359
+ title: "Encadeamento de pipes",
3360
+ content: ` Pipes podem ser encadeados com | para criar pipelines.
3361
+ O resultado de cada pipe alimenta o próximo.
3362
+
3363
+ | stats by (x) count() as n | sort by (n) desc
3364
+ | stats by (x) count() as n | filter n:>100
3365
+ | stats by (x) count() as n | math n / 60 as per_min
3366
+ | unpack_json | fields method, status | limit 10
3367
+ | unpack_json | stats by (level) count()`
3258
3368
  },
3259
3369
  {
3260
3370
  title: "Exemplos práticos",
3261
- content: ` error Logs com "error"
3262
- "connection refused" Frase exata
3263
- error NOT timeout Erros exceto timeouts
3264
- ~"status=[45]\\d{2}" Status 4xx/5xx via regex
3265
- _time:1h error | stats count() by (level) Contagem de erros por nível na última hora
3266
- _time:24h | top 10 by (message) Top 10 mensagens do dia
3267
- error | sort by (_time) desc | limit 50 Últimos 50 erros`
3371
+ content: ` Busca simples:
3372
+ error Logs com "error"
3373
+ "connection refused" Frase exata
3374
+ error NOT timeout Erros exceto timeouts
3375
+ i("warning") Case-insensitive
3376
+ ~"status=[45]\\d{2}" Regex (RE2)
3377
+
3378
+ Agregação (by() vem antes das funções):
3379
+ | stats count() Total de logs
3380
+ | stats by (pod) count() Contagem por pod
3381
+ | stats count() as n, min(_time), max(_time)
3382
+ | top 5 by (pod) Top 5 pods por volume
3383
+
3384
+ Trabalhando com logs JSON:
3385
+ | unpack_json | fields level, msg
3386
+ | unpack_json | stats by (level) count()
3387
+ | unpack_json | filter status:~"[45]\\d{2}"
3388
+ | unpack_json | stats by (method) count() as n | sort by (n) desc
3389
+
3390
+ Pipelines compostos:
3391
+ | stats by (pod) count() as n | filter n:>100 | sort by (n) desc
3392
+ | sort by (_time) desc | limit 50
3393
+ | unpack_json | stats by (method, status) count()
3394
+
3395
+ Referência completa:
3396
+ https://docs.victoriametrics.com/victorialogs/logsql/`
3268
3397
  }
3269
3398
  ];
3270
3399
  logsGroup.command("query-help", {
@@ -3286,6 +3415,7 @@ logsGroup.command("query-help", {
3286
3415
  console.log();
3287
3416
  }
3288
3417
  console.log(chalk.dim(" Uso: veloz logs search <consulta> [--start ISO] [--end ISO] [--limit N]"));
3418
+ console.log(chalk.dim(" Docs: https://docs.victoriametrics.com/victorialogs/logsql/"));
3289
3419
  console.log();
3290
3420
  }
3291
3421
  return { sections: QUERY_HELP_SECTIONS };
@@ -3896,54 +4026,6 @@ async function createBaseTarball(directory) {
3896
4026
  throw err;
3897
4027
  }
3898
4028
  }
3899
- /**
3900
- * Create a service-specific tarball. If extraFiles need injection, creates
3901
- * an overlay tarball using hardlinks from the base. Otherwise, returns the
3902
- * base tarball path directly.
3903
- */
3904
- async function createServiceTarball(base, extraFiles) {
3905
- const injectedFiles = [];
3906
- if (extraFiles) for (const file of extraFiles) {
3907
- if (existsSync(join(base.directory, file.name))) continue;
3908
- injectedFiles.push(file);
3909
- }
3910
- if (injectedFiles.length === 0) return {
3911
- tarPath: base.tarPath,
3912
- tempDir: null
3913
- };
3914
- const tempDir = await mkdtemp(join(tmpdir(), "veloz-overlay-"));
3915
- const tarPath = join(tempDir, "source.tar.gz");
3916
- const stagingDir = join(tempDir, "staging");
3917
- await mkdir(stagingDir, { recursive: true });
3918
- try {
3919
- const relativePaths = [...base.relativePaths];
3920
- for (const rel of relativePaths) {
3921
- const src = join(base.directory, rel);
3922
- const dest = join(stagingDir, rel);
3923
- await mkdir(dirname(dest), { recursive: true });
3924
- await link(src, dest);
3925
- }
3926
- for (const file of injectedFiles) {
3927
- writeFileSync(join(stagingDir, file.name), file.content);
3928
- if (!relativePaths.includes(file.name)) relativePaths.push(file.name);
3929
- }
3930
- await tar.create({
3931
- gzip: true,
3932
- file: tarPath,
3933
- cwd: stagingDir
3934
- }, relativePaths);
3935
- return {
3936
- tarPath,
3937
- tempDir
3938
- };
3939
- } catch (err) {
3940
- await rm(tempDir, {
3941
- recursive: true,
3942
- force: true
3943
- }).catch(() => {});
3944
- throw err;
3945
- }
3946
- }
3947
4029
  /** Upload a pre-built tarball for a deployment. */
3948
4030
  async function uploadTarball(deploymentId, tarPath) {
3949
4031
  const client = await getClient();
@@ -4159,7 +4241,9 @@ function cleanDisplayLine(text) {
4159
4241
  return text;
4160
4242
  }
4161
4243
  function parseBuildLine(raw) {
4162
- const trimmed = raw.trim();
4244
+ let trimmed = raw.trim();
4245
+ const tsPrefix = TIMESTAMP_RE.exec(trimmed);
4246
+ if (tsPrefix) trimmed = tsPrefix[2];
4163
4247
  const bkMatch = BUILDKIT_PREFIX_RE.exec(trimmed);
4164
4248
  if (bkMatch) {
4165
4249
  const stepNum = parseInt(bkMatch[1], 10);
@@ -4206,10 +4290,9 @@ function parseBuildLine(raw) {
4206
4290
  text: ""
4207
4291
  };
4208
4292
  }
4209
- const tsMatch = TIMESTAMP_RE.exec(trimmed);
4210
- if (tsMatch) return {
4293
+ if (tsPrefix) return {
4211
4294
  kind: "platform",
4212
- message: tsMatch[2]
4295
+ message: trimmed
4213
4296
  };
4214
4297
  return {
4215
4298
  kind: "other",
@@ -4257,6 +4340,8 @@ var BuildProgressRenderer = class {
4257
4340
  spinnerText = "Aguardando início do build...";
4258
4341
  /** External ora spinner reference — used to pause/resume during runtime log output */
4259
4342
  externalSpinner = null;
4343
+ /** Pending render scheduled via setTimeout for batching rapid updates */
4344
+ pendingRender = null;
4260
4345
  constructor(serviceName) {
4261
4346
  this.serviceName = serviceName;
4262
4347
  this.startSpinner();
@@ -4269,13 +4354,17 @@ var BuildProgressRenderer = class {
4269
4354
  this.spinnerInterval = setInterval(() => {
4270
4355
  this.spinnerFrame = (this.spinnerFrame + 1) % SPINNER_FRAMES.length;
4271
4356
  this.render();
4272
- }, 80);
4357
+ }, 120);
4273
4358
  }
4274
4359
  stopSpinner() {
4275
4360
  if (this.spinnerInterval) {
4276
4361
  clearInterval(this.spinnerInterval);
4277
4362
  this.spinnerInterval = null;
4278
4363
  }
4364
+ if (this.pendingRender) {
4365
+ clearTimeout(this.pendingRender);
4366
+ this.pendingRender = null;
4367
+ }
4279
4368
  }
4280
4369
  setBuilding() {
4281
4370
  this.phase = "building";
@@ -4341,7 +4430,7 @@ var BuildProgressRenderer = class {
4341
4430
  case "output":
4342
4431
  case "other": break;
4343
4432
  }
4344
- this.render();
4433
+ this.scheduleRender();
4345
4434
  }
4346
4435
  printRuntimeLine(line) {
4347
4436
  const parsed = parseBuildLine(line);
@@ -4362,18 +4451,30 @@ var BuildProgressRenderer = class {
4362
4451
  isStageComplete(stage) {
4363
4452
  return stage.steps.size >= stage.total;
4364
4453
  }
4454
+ /**
4455
+ * Schedule a render on the next tick — batches rapid processLine() calls
4456
+ * so we don't re-render for every single log line in a burst.
4457
+ */
4458
+ scheduleRender() {
4459
+ if (this.pendingRender) return;
4460
+ this.pendingRender = setTimeout(() => {
4461
+ this.pendingRender = null;
4462
+ this.render();
4463
+ }, 0);
4464
+ }
4365
4465
  render() {
4366
- if (this.renderLineCount > 0) process.stdout.write(`\x1b[${this.renderLineCount}A\x1b[J`);
4466
+ const buf = [];
4467
+ if (this.renderLineCount > 0) buf.push(`\x1b[${this.renderLineCount}A\x1b[J`);
4367
4468
  let lines = 0;
4368
4469
  const label = this.serviceName ? `BUILD ${chalk.dim(`(${this.serviceName})`)}` : "BUILD";
4369
- process.stdout.write(`${chalk.cyan.bold(` ${label}`)}\n`);
4470
+ buf.push(`${chalk.cyan.bold(` ${label}`)}\n`);
4370
4471
  lines++;
4371
4472
  for (const msg of this.platformMessages) {
4372
- process.stdout.write(` ${msg}\n`);
4473
+ buf.push(` ${msg}\n`);
4373
4474
  lines++;
4374
4475
  }
4375
4476
  if (this.stageOrder.length > 0) {
4376
- process.stdout.write("\n");
4477
+ buf.push("\n");
4377
4478
  lines++;
4378
4479
  }
4379
4480
  const maxNameLen = Math.max(...this.stageOrder.map((n) => n.length), 4);
@@ -4385,31 +4486,34 @@ var BuildProgressRenderer = class {
4385
4486
  const allDone = complete && !allCached;
4386
4487
  const bar = renderProgressBar(stage.steps.size, stage.total, allCached, allDone);
4387
4488
  const paddedName = chalk.bold(stageName.padEnd(maxNameLen));
4388
- process.stdout.write(` ${paddedName} ${bar}\n`);
4489
+ buf.push(` ${paddedName} ${bar}\n`);
4389
4490
  lines++;
4390
4491
  const sortedSteps = [...stage.steps.entries()].sort((a, b) => a[0] - b[0]);
4492
+ const spinnerChar = SPINNER_FRAMES[this.spinnerFrame];
4391
4493
  for (const [stepNum, command] of sortedSteps) {
4392
4494
  let stepStatus = "";
4393
4495
  for (const [bkNum, dockerStep] of stage.stepNumMap.entries()) if (dockerStep === stepNum) {
4394
4496
  if (stage.cachedStepNums.has(bkNum)) stepStatus = ` ${BRAND("◆")}`;
4395
4497
  else if (stage.doneStepNums.has(bkNum)) stepStatus = ` ${chalk.green("✓")}`;
4498
+ else stepStatus = ` ${chalk.cyan(spinnerChar)}`;
4396
4499
  break;
4397
4500
  }
4398
- process.stdout.write(` ${command}${stepStatus}\n`);
4501
+ buf.push(` ${command}${stepStatus}\n`);
4399
4502
  lines++;
4400
4503
  }
4401
4504
  if (i < this.stageOrder.length - 1) {
4402
- process.stdout.write("\n");
4505
+ buf.push("\n");
4403
4506
  lines++;
4404
4507
  }
4405
4508
  }
4406
4509
  if (this.spinnerInterval) {
4407
4510
  if (!(this.stageOrder.length > 0 && this.stageOrder.every((name) => this.isStageComplete(this.stages.get(name))))) {
4408
4511
  const frame = SPINNER_FRAMES[this.spinnerFrame];
4409
- process.stdout.write(`\n ${chalk.cyan(frame)} ${this.spinnerText}\n`);
4512
+ buf.push(`\n ${chalk.cyan(frame)} ${this.spinnerText}\n`);
4410
4513
  lines += 2;
4411
4514
  }
4412
4515
  }
4516
+ process.stdout.write(buf.join(""));
4413
4517
  this.renderLineCount = lines;
4414
4518
  }
4415
4519
  };
@@ -4571,7 +4675,7 @@ const LOGO_LINES = [
4571
4675
  ];
4572
4676
  const BRAND_COLOR = "#FF4D00";
4573
4677
  function getVersion() {
4574
- return "0.0.0-beta.20";
4678
+ return "0.0.0-beta.22";
4575
4679
  }
4576
4680
  function printBanner(subtitle) {
4577
4681
  const version = getVersion();
@@ -4626,6 +4730,21 @@ async function fetchDeployUrls(client, serviceId) {
4626
4730
  }
4627
4731
  }
4628
4732
  /**
4733
+ * Upload tarball to a shared S3 key and return the objectKey.
4734
+ * All deployments in this batch will reference the same source.
4735
+ */
4736
+ async function uploadSharedSource(client, projectId, tarPath) {
4737
+ const { uploadUrl, objectKey } = await client.deployments.getSharedUploadUrl({ projectId });
4738
+ const fileBuffer = readFileSync(tarPath);
4739
+ const putResponse = await fetch(uploadUrl, {
4740
+ method: "PUT",
4741
+ headers: { "Content-Type": "application/gzip" },
4742
+ body: fileBuffer
4743
+ });
4744
+ if (!putResponse.ok) throw new Error(`Upload falhou: ${putResponse.status}`);
4745
+ return objectKey;
4746
+ }
4747
+ /**
4629
4748
  * Deploy a single service using the full streamDeploymentLogs experience
4630
4749
  * (BuildProgressRenderer with progress bars, spinner, runtime logs).
4631
4750
  */
@@ -4639,14 +4758,9 @@ async function deploySingleService(service, options) {
4639
4758
  serviceId: service.serviceId,
4640
4759
  serviceConfig: service.serviceConfig
4641
4760
  }));
4642
- const { tarPath, tempDir: overlayTempDir } = await createServiceTarball(baseTarball, service.extraFiles);
4643
4761
  try {
4644
- await withRetry(() => uploadTarball(deployment.id, tarPath));
4762
+ await withRetry(() => uploadTarball(deployment.id, baseTarball.tarPath));
4645
4763
  } finally {
4646
- if (overlayTempDir) await rm(overlayTempDir, {
4647
- recursive: true,
4648
- force: true
4649
- }).catch(() => {});
4650
4764
  await cleanupTarball(baseTarball);
4651
4765
  }
4652
4766
  if (sizeMB > 5) info(`Upload concluído (${sizeMB} MB)`);
@@ -4661,15 +4775,22 @@ async function deploySingleService(service, options) {
4661
4775
  }
4662
4776
  /**
4663
4777
  * Deploy multiple services in parallel with progress tracking.
4778
+ * Uploads the source tarball once and shares it across all deployments.
4664
4779
  */
4665
4780
  async function deployMultipleServices(services, options) {
4666
- const { projectRoot, output } = options;
4781
+ const { projectRoot, projectId, output } = options;
4667
4782
  const client = await getClient();
4668
4783
  const isTTY = process.stdout.isTTY && !process.env.GITHUB_ACTIONS;
4669
4784
  setupSigintHandler();
4670
4785
  const sizeInBytes = await calculateDirectorySize(projectRoot);
4671
4786
  const sizeMB = Math.round(sizeInBytes / (1024 * 1024) * 10) / 10;
4672
4787
  const baseTarball = await createBaseTarball(projectRoot);
4788
+ let sharedObjectKey;
4789
+ try {
4790
+ sharedObjectKey = await withRetry(() => uploadSharedSource(client, projectId, baseTarball.tarPath));
4791
+ } finally {
4792
+ await cleanupTarball(baseTarball);
4793
+ }
4673
4794
  const progressMap = /* @__PURE__ */ new Map();
4674
4795
  const deploymentPromises = services.map(async (service) => {
4675
4796
  try {
@@ -4677,15 +4798,10 @@ async function deployMultipleServices(services, options) {
4677
4798
  serviceId: service.serviceId,
4678
4799
  serviceConfig: service.serviceConfig
4679
4800
  }));
4680
- const { tarPath, tempDir: overlayTempDir } = await createServiceTarball(baseTarball, service.extraFiles);
4681
- try {
4682
- await withRetry(() => uploadTarball(deployment.id, tarPath));
4683
- } finally {
4684
- if (overlayTempDir) await rm(overlayTempDir, {
4685
- recursive: true,
4686
- force: true
4687
- }).catch(() => {});
4688
- }
4801
+ await withRetry(() => client.deployments.startBuild({
4802
+ deploymentId: deployment.id,
4803
+ objectKey: sharedObjectKey
4804
+ }));
4689
4805
  trackDeployment(deployment.id);
4690
4806
  progressMap.set(service.serviceId, {
4691
4807
  serviceName: service.serviceName,
@@ -4711,9 +4827,7 @@ async function deployMultipleServices(services, options) {
4711
4827
  throw error;
4712
4828
  }
4713
4829
  });
4714
- const deployments = await Promise.allSettled(deploymentPromises);
4715
- await cleanupTarball(baseTarball);
4716
- const activeDeployments = deployments.filter((d) => d.status === "fulfilled").map((d) => d.value);
4830
+ const activeDeployments = (await Promise.allSettled(deploymentPromises)).filter((d) => d.status === "fulfilled").map((d) => d.value);
4717
4831
  if (activeDeployments.length === 0) {
4718
4832
  output.allUploadsFailed();
4719
4833
  process.exit(1);
@@ -4797,6 +4911,7 @@ async function deployMultipleServices(services, options) {
4797
4911
  * - Single service: uses the full streamDeploymentLogs experience
4798
4912
  * (BuildProgressRenderer with progress bars, spinner, runtime logs)
4799
4913
  * - Multiple services: parallel deploy with compact progress tracking
4914
+ * (single upload, shared source tarball)
4800
4915
  */
4801
4916
  async function deployServices(services, options) {
4802
4917
  if (services.length === 1) return deploySingleService(services[0], options);
@@ -4833,30 +4948,32 @@ var TtyOutput = class {
4833
4948
  this.prevLineCount = this.doRenderProgress(entries, this.prevLineCount);
4834
4949
  }
4835
4950
  doRenderProgress(progressMap, prevLineCount) {
4836
- for (let i = 0; i < prevLineCount; i++) process.stdout.write("\x1B[1A\x1B[2K");
4951
+ const buf = [];
4952
+ if (prevLineCount > 0) buf.push(`\x1b[${prevLineCount}A\x1b[J`);
4837
4953
  let lineCount = 0;
4838
4954
  for (const [, progress] of progressMap) {
4839
4955
  const icon = statusIcons[progress.status] || chalk.gray("○");
4840
4956
  const label = statusLabels[progress.status] || progress.status;
4841
- process.stdout.write(`${icon} ${chalk.bold(progress.serviceName)}: ${label}\n`);
4957
+ buf.push(`${icon} ${chalk.bold(progress.serviceName)}: ${label}\n`);
4842
4958
  lineCount++;
4843
4959
  if (progress.status === "BUILDING" || progress.status === "BUILD_FAILED") {
4844
4960
  const nonEmptyLines = progress.logLines.filter((l) => l.trim());
4845
4961
  if (nonEmptyLines.length > 0) {
4846
4962
  const tail = nonEmptyLines.slice(-3);
4847
4963
  for (const line of tail) {
4848
- process.stdout.write(` ${chalk.dim(line)}\n`);
4964
+ buf.push(` ${chalk.dim(line)}\n`);
4849
4965
  lineCount++;
4850
4966
  }
4851
4967
  } else if (progress.status === "BUILDING") {
4852
- process.stdout.write(` ${chalk.dim("Aguardando logs do build...")}\n`);
4968
+ buf.push(` ${chalk.dim("Aguardando logs do build...")}\n`);
4853
4969
  lineCount++;
4854
4970
  }
4855
4971
  } else if (progress.status === "QUEUED") {
4856
- process.stdout.write(` ${chalk.dim("Na fila para compilação...")}\n`);
4972
+ buf.push(` ${chalk.dim("Na fila para compilação...")}\n`);
4857
4973
  lineCount++;
4858
4974
  }
4859
4975
  }
4976
+ process.stdout.write(buf.join(""));
4860
4977
  return lineCount;
4861
4978
  }
4862
4979
  buildStart(_serviceName) {}
@@ -5088,7 +5205,6 @@ function resolveServiceConf(velozConfig, serviceId) {
5088
5205
  for (const [, conf] of Object.entries(velozConfig.services)) if (conf.id === serviceId) {
5089
5206
  const merged = mergeServiceWithDefaults(conf, velozConfig.defaults);
5090
5207
  const build = merged.build;
5091
- const isDockerfile = build?.method === "dockerfile";
5092
5208
  return {
5093
5209
  type: merged.type?.toUpperCase(),
5094
5210
  branch: merged.branch,
@@ -5096,14 +5212,9 @@ function resolveServiceConf(velozConfig, serviceId) {
5096
5212
  startCommand: merged.runtime?.command ?? void 0,
5097
5213
  preStartCommand: merged.runtime?.preStartCommand ?? void 0,
5098
5214
  port: merged.runtime?.port ?? void 0,
5099
- rootDirectory: merged.root,
5100
- docker: isDockerfile ? {
5101
- dockerfile: build.dockerfile ?? "Dockerfile",
5102
- context: build.context ?? merged.root ?? "."
5103
- } : void 0,
5215
+ rootDirectory: merged.root ?? "/",
5104
5216
  instanceCount: merged.resources?.instances ?? void 0,
5105
- cpuLimit: merged.resources?.cpu ?? void 0,
5106
- memoryLimit: merged.resources?.memory ?? void 0,
5217
+ size: merged.resources?.size ?? void 0,
5107
5218
  healthCheckPath: merged.runtime?.healthCheck?.path ?? null,
5108
5219
  aptPackages: build?.aptPackages ?? void 0,
5109
5220
  nodeVersion: build?.nodeVersion ?? void 0,
@@ -5117,6 +5228,55 @@ function resolveServiceConf(velozConfig, serviceId) {
5117
5228
  };
5118
5229
  }
5119
5230
  }
5231
+ function migrateResources(resources, label) {
5232
+ if (!resources.cpu && !resources.memory) return false;
5233
+ if (resources.size) {
5234
+ delete resources.cpu;
5235
+ delete resources.memory;
5236
+ return true;
5237
+ }
5238
+ if (resources.cpu && resources.memory) {
5239
+ const size = resolveServiceSizeFromResources(resources.cpu, resources.memory);
5240
+ if (size) {
5241
+ resources.size = size;
5242
+ delete resources.cpu;
5243
+ delete resources.memory;
5244
+ info(`${label}: recursos migrados para size "${size}"`);
5245
+ return true;
5246
+ }
5247
+ }
5248
+ warn(`${label}: campos "cpu"/"memory" removidos. Use "size" (basico, essencial, turbo, turbo-plus, nitro, nitro-plus).`);
5249
+ delete resources.cpu;
5250
+ delete resources.memory;
5251
+ return true;
5252
+ }
5253
+ /**
5254
+ * Migrate veloz.json from legacy cpu/memory fields to named size tiers.
5255
+ * Rewrites the file in-place if changes are made.
5256
+ */
5257
+ function migrateResourcesConfig() {
5258
+ const path = getConfigPath();
5259
+ let raw;
5260
+ try {
5261
+ raw = readFileSync(path, "utf-8");
5262
+ } catch {
5263
+ return;
5264
+ }
5265
+ const config = JSON.parse(raw);
5266
+ let changed = false;
5267
+ if (config.defaults?.resources) {
5268
+ if (migrateResources(config.defaults.resources, "defaults")) changed = true;
5269
+ }
5270
+ if (config.services) {
5271
+ for (const [key, svc] of Object.entries(config.services)) if (svc.resources) {
5272
+ if (migrateResources(svc.resources, `serviço "${key}"`)) changed = true;
5273
+ }
5274
+ }
5275
+ if (changed) {
5276
+ writeFileSync(path, JSON.stringify(config, null, 2), "utf-8");
5277
+ info("veloz.json atualizado para o novo formato de recursos.");
5278
+ }
5279
+ }
5120
5280
 
5121
5281
  //#endregion
5122
5282
  //#region src/lib/deploy-checks.ts
@@ -5541,7 +5701,7 @@ async function autoUpdate() {
5541
5701
  if (process.env.VELOZ_MCP === "true") return;
5542
5702
  const pm = detectPackageManager();
5543
5703
  if (!pm) return;
5544
- const currentVersion = "0.0.0-beta.20";
5704
+ const currentVersion = "0.0.0-beta.22";
5545
5705
  const latestVersion = await fetchLatestVersion();
5546
5706
  if (!latestVersion || latestVersion === currentVersion) return;
5547
5707
  const installCmd = getInstallCommand(pm, latestVersion);
@@ -5774,31 +5934,8 @@ const SERVICE_TYPE_LABELS = {
5774
5934
  WORKER: "Worker",
5775
5935
  DATABASE: "Banco de Dados"
5776
5936
  };
5777
- /**
5778
- * If a Dockerfile exists in a subdirectory (rootDirectory), copy it to tar root
5779
- * so BuildKit can find it. If no Dockerfile exists anywhere, return nothing —
5780
- * the server will generate one with nixpacks.
5781
- */
5782
- function prepareExtraFiles(_detection, serviceConfig) {
5783
- if (serviceConfig?.docker) {
5784
- const dockerfilePath = resolve(process.cwd(), serviceConfig.docker.dockerfile);
5785
- if (existsSync(dockerfilePath)) return [{
5786
- name: "Dockerfile",
5787
- content: readFileSync(dockerfilePath, "utf-8")
5788
- }];
5789
- }
5790
- if (existsSync(resolve(process.cwd(), "Dockerfile"))) return [];
5791
- const rootDir = serviceConfig?.rootDirectory || ".";
5792
- const serviceDockerfilePath = resolve(process.cwd(), rootDir, "Dockerfile");
5793
- if (rootDir !== "." && existsSync(serviceDockerfilePath)) return [{
5794
- name: "Dockerfile",
5795
- content: readFileSync(serviceDockerfilePath, "utf-8")
5796
- }];
5797
- return [];
5798
- }
5799
- async function computeExtraFilesForServices(services) {
5937
+ function prepareServicesForDeploy(services) {
5800
5938
  const velozConfig = loadConfig();
5801
- const results = [];
5802
5939
  const allWarnings = [];
5803
5940
  for (const svc of services) {
5804
5941
  const warnings = runPreDeployChecks(resolveServiceConf(velozConfig, svc.serviceId)?.rootDirectory || ".");
@@ -5811,17 +5948,15 @@ async function computeExtraFilesForServices(services) {
5811
5948
  console.log(chalk.yellow(`\n ${chalk.bold(service)}:`));
5812
5949
  printDeployWarnings(warnings);
5813
5950
  }
5814
- for (const svc of services) {
5951
+ return services.map((svc) => {
5815
5952
  const serviceConf = resolveServiceConf(velozConfig, svc.serviceId);
5816
- const detection = detectLocalRepo(serviceConf?.rootDirectory || ".");
5817
- warnIfEphemeralFsDetected(detection, serviceConf, svc.serviceName);
5818
- results.push({
5819
- ...svc,
5820
- serviceConfig: serviceConf,
5821
- extraFiles: prepareExtraFiles(detection, serviceConf)
5822
- });
5823
- }
5824
- return results;
5953
+ warnIfEphemeralFsDetected(detectLocalRepo(serviceConf?.rootDirectory || "."), serviceConf, svc.serviceName);
5954
+ return {
5955
+ serviceId: svc.serviceId,
5956
+ serviceName: svc.serviceName,
5957
+ serviceConfig: serviceConf
5958
+ };
5959
+ });
5825
5960
  }
5826
5961
  async function triggerDeploy(serviceId, serviceName, preDetection) {
5827
5962
  const sizeInBytes = await calculateDirectorySize(process.cwd());
@@ -5829,9 +5964,7 @@ async function triggerDeploy(serviceId, serviceName, preDetection) {
5829
5964
  const client = await getClient();
5830
5965
  const velozConfig = loadConfig();
5831
5966
  const serviceConf = resolveServiceConf(velozConfig, serviceId);
5832
- const detection = preDetection ?? detectLocalRepo();
5833
- warnIfEphemeralFsDetected(detection, serviceConf, serviceName ?? void 0);
5834
- const extraFiles = prepareExtraFiles(detection, serviceConf);
5967
+ warnIfEphemeralFsDetected(preDetection ?? detectLocalRepo(), serviceConf, serviceName ?? void 0);
5835
5968
  const warnings = runPreDeployChecks(serviceConf?.rootDirectory || ".");
5836
5969
  if (warnings.length > 0) printDeployWarnings(warnings);
5837
5970
  const spinUpload = spinner(serviceName ? `Fazendo upload ${chalk.bold(serviceName)}...` : "Fazendo upload do código...");
@@ -5842,7 +5975,7 @@ async function triggerDeploy(serviceId, serviceName, preDetection) {
5842
5975
  serviceConfig: serviceConf
5843
5976
  }));
5844
5977
  spinUpload.text = "Fazendo upload do código...";
5845
- await withRetry(() => uploadSource(deployment.id, process.cwd(), extraFiles));
5978
+ await withRetry(() => uploadSource(deployment.id, process.cwd()));
5846
5979
  spinUpload.stop();
5847
5980
  success(serviceName ? `Upload de ${chalk.bold(serviceName)} concluído` : "Upload concluído");
5848
5981
  setupSigintHandler();
@@ -6217,10 +6350,10 @@ async function createServiceFlow(projectId, projectName, repoName, opts = {}) {
6217
6350
  await deployServices(createdServices.map(({ service: service$1, app }) => ({
6218
6351
  serviceId: service$1.id,
6219
6352
  serviceName: app.name,
6220
- serviceConfig: resolveServiceConf(config$1, service$1.id),
6221
- extraFiles: prepareExtraFiles(detectLocalRepo(app.root), { rootDirectory: app.root })
6353
+ serviceConfig: resolveServiceConf(config$1, service$1.id)
6222
6354
  })), {
6223
6355
  projectRoot: process.cwd(),
6356
+ projectId: config$1.project.id,
6224
6357
  output: createDeployOutput()
6225
6358
  });
6226
6359
  return createdServices[createdServices.length - 1]?.service.id || "";
@@ -6501,10 +6634,10 @@ async function addServiceFlow(existingConfig, opts) {
6501
6634
  await deployServices(createdServices.map(({ service: service$1, app }) => ({
6502
6635
  serviceId: service$1.id,
6503
6636
  serviceName: app.name,
6504
- serviceConfig: resolveServiceConf(updatedConfig$1, service$1.id),
6505
- extraFiles: prepareExtraFiles(detectLocalRepo(app.root), { rootDirectory: app.root })
6637
+ serviceConfig: resolveServiceConf(updatedConfig$1, service$1.id)
6506
6638
  })), {
6507
6639
  projectRoot: process.cwd(),
6640
+ projectId: updatedConfig$1.project.id,
6508
6641
  output: createDeployOutput()
6509
6642
  });
6510
6643
  return;
@@ -6650,6 +6783,7 @@ function registerDeploy(cli$1) {
6650
6783
  });
6651
6784
  }
6652
6785
  async function* mcpDeployFlow(opts) {
6786
+ migrateResourcesConfig();
6653
6787
  const configuredServices = await findServicesFromConfig();
6654
6788
  if (configuredServices.length === 0) throw new Error("Nenhum serviço configurado. Execute 'veloz deploy' no terminal para configurar o projeto primeiro.");
6655
6789
  const currentConfig = loadConfig();
@@ -6669,8 +6803,9 @@ async function* mcpDeployFlow(opts) {
6669
6803
  }
6670
6804
  servicesToProcess = [found];
6671
6805
  }
6672
- const results = await deployServices(await computeExtraFilesForServices(servicesToProcess), {
6806
+ const results = await deployServices(await prepareServicesForDeploy(servicesToProcess), {
6673
6807
  projectRoot: process.cwd(),
6808
+ projectId: currentConfig.project.id,
6674
6809
  output: createDeployOutput()
6675
6810
  });
6676
6811
  for (const result of results) yield {
@@ -6685,6 +6820,7 @@ async function cliDeployFlow(opts) {
6685
6820
  await autoUpdate();
6686
6821
  await requireAuth$1({ nonInteractive: opts.yes });
6687
6822
  await resolveOrgIfNeeded(opts.yes);
6823
+ migrateResourcesConfig();
6688
6824
  const activeEnv = getActiveEnv();
6689
6825
  if (activeEnv) {
6690
6826
  const rawConfig = loadRawConfig();
@@ -6723,8 +6859,9 @@ async function cliDeployFlow(opts) {
6723
6859
  const dbHint = dbNames.length > 0 ? `\n\nBancos de dados:\n${dbNames.map((n) => ` • ${n} (banco de dados)`).join("\n")}` : "";
6724
6860
  throw new Error(`Serviço '${opts.service}' não encontrado.\n\nServiços disponíveis:\n${available}${dbHint}`);
6725
6861
  }
6726
- await deployServices(await computeExtraFilesForServices([found]), {
6862
+ await deployServices(await prepareServicesForDeploy([found]), {
6727
6863
  projectRoot: process.cwd(),
6864
+ projectId: currentConfig.project.id,
6728
6865
  output: createDeployOutput()
6729
6866
  });
6730
6867
  return;
@@ -6742,8 +6879,9 @@ async function cliDeployFlow(opts) {
6742
6879
  return;
6743
6880
  }
6744
6881
  }
6745
- await deployServices(await computeExtraFilesForServices(configuredServices), {
6882
+ await deployServices(await prepareServicesForDeploy(configuredServices), {
6746
6883
  projectRoot: process.cwd(),
6884
+ projectId: currentConfig.project.id,
6747
6885
  output: createDeployOutput()
6748
6886
  });
6749
6887
  return;
@@ -6761,8 +6899,9 @@ async function cliDeployFlow(opts) {
6761
6899
  info("Nenhum serviço selecionado.");
6762
6900
  return;
6763
6901
  }
6764
- await deployServices(await computeExtraFilesForServices(selectedServices), {
6902
+ await deployServices(await prepareServicesForDeploy(selectedServices), {
6765
6903
  projectRoot: process.cwd(),
6904
+ projectId: currentConfig.project.id,
6766
6905
  output: createDeployOutput()
6767
6906
  });
6768
6907
  return;
@@ -7117,7 +7256,7 @@ async function pruneRemovedEntries(config, existingConfig, services, databases)
7117
7256
  //#region src/index.ts
7118
7257
  if (process.argv.includes("--mcp")) process.env.VELOZ_MCP = "true";
7119
7258
  const cli = Cli.create("veloz", {
7120
- version: "0.0.0-beta.20",
7259
+ version: "0.0.0-beta.22",
7121
7260
  description: "CLI da plataforma Veloz — deploy rápido para o Brasil",
7122
7261
  env: z.object({ VELOZ_ENV: z.string().optional().describe("Ambiente alvo (ex: preview, staging)") })
7123
7262
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "onveloz",
3
- "version": "0.0.0-beta.20",
3
+ "version": "0.0.0-beta.22",
4
4
  "description": "CLI da plataforma Veloz — deploy rápido para o Brasil",
5
5
  "keywords": [
6
6
  "brasil",