@aws/ml-container-creator 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/bin/cli.js +88 -86
  2. package/config/bootstrap-stack.json +211 -0
  3. package/config/parameter-schema.json +88 -0
  4. package/infra/ci-harness/bin/ci-harness.ts +26 -0
  5. package/infra/ci-harness/buildspec.yml +352 -0
  6. package/infra/ci-harness/cdk.json +27 -0
  7. package/infra/ci-harness/lambda/scanner/index.ts +199 -0
  8. package/infra/ci-harness/lib/ci-harness-stack.ts +609 -0
  9. package/infra/ci-harness/package-lock.json +3979 -0
  10. package/infra/ci-harness/package.json +32 -0
  11. package/infra/ci-harness/tsconfig.json +38 -0
  12. package/package.json +13 -3
  13. package/src/app.js +318 -318
  14. package/src/copy-tpl.js +19 -19
  15. package/src/lib/asset-manager.js +74 -74
  16. package/src/lib/aws-profile-parser.js +45 -45
  17. package/src/lib/bootstrap-command-handler.js +560 -547
  18. package/src/lib/bootstrap-config.js +45 -45
  19. package/src/lib/ci-register-helpers.js +19 -19
  20. package/src/lib/ci-report-helpers.js +37 -37
  21. package/src/lib/ci-stage-helpers.js +49 -49
  22. package/src/lib/comment-generator.js +4 -4
  23. package/src/lib/config-manager.js +105 -105
  24. package/src/lib/deployment-config-resolver.js +10 -10
  25. package/src/lib/deployment-registry.js +153 -153
  26. package/src/lib/engine-prefix-resolver.js +8 -8
  27. package/src/lib/key-value-parser.js +6 -6
  28. package/src/lib/manifest-cli.js +108 -108
  29. package/src/lib/prompt-runner.js +224 -224
  30. package/src/lib/prompts.js +121 -121
  31. package/src/lib/registry-command-handler.js +174 -174
  32. package/src/lib/registry-loader.js +52 -52
  33. package/src/lib/sensitive-redactor.js +9 -9
  34. package/src/lib/template-engine.js +1 -1
  35. package/src/lib/template-manager.js +62 -62
  36. package/src/prompt-adapter.js +18 -18
@@ -35,7 +35,7 @@ function loadInstanceTypeRegistry() {
35
35
  ? entry.accelerator || entry.hardware
36
36
  : 'None',
37
37
  useCase: entry.notes || entry.tags?.join(', ') || '',
38
- category: entry.category || 'cpu',
38
+ category: entry.category || 'cpu'
39
39
  };
40
40
  }
41
41
  return registry;
@@ -192,8 +192,8 @@ const enginePrompts = [
192
192
  { name: 'TensorFlow', value: 'tensorflow' }
193
193
  ],
194
194
  when: (answers) => {
195
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
196
- return architecture === 'http'
195
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
196
+ return architecture === 'http';
197
197
  }
198
198
  }
199
199
  ];
@@ -245,75 +245,75 @@ const modelFormatPrompts = [
245
245
  message: 'In which format is your model serialized?',
246
246
  choices: (answers) => {
247
247
  // Derive architecture from deploymentConfig
248
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
249
- const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-')
248
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
249
+ const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-');
250
250
 
251
251
  // For http architecture, use engine to determine formats
252
252
  if (architecture === 'http') {
253
- const engine = answers.engine
253
+ const engine = answers.engine;
254
254
  const formatMap = {
255
255
  'xgboost': ['json', 'model', 'ubj'],
256
256
  'sklearn': ['pkl', 'joblib'],
257
257
  'tensorflow': ['keras', 'h5', 'SavedModel']
258
- }
259
- return formatMap[engine] || []
258
+ };
259
+ return formatMap[engine] || [];
260
260
  }
261
261
 
262
262
  // For triton architecture, use backend-specific formats
263
263
  if (architecture === 'triton') {
264
264
  // FIL backend has multiple format choices
265
265
  if (backend === 'fil') {
266
- return ['xgboost_json', 'xgboost_ubj', 'lightgbm_txt']
266
+ return ['xgboost_json', 'xgboost_ubj', 'lightgbm_txt'];
267
267
  }
268
268
  // Python backend has multiple format choices
269
269
  if (backend === 'python') {
270
- return ['pkl', 'joblib', 'custom']
270
+ return ['pkl', 'joblib', 'custom'];
271
271
  }
272
272
  // Other Triton backends have auto-set formats (handled in when clause)
273
- return []
273
+ return [];
274
274
  }
275
275
 
276
276
  // Legacy support for old format (should not be reached with new configs)
277
- const framework = answers.framework || architecture
277
+ const framework = answers.framework || architecture;
278
278
  const formatMap = {
279
279
  'xgboost': ['json', 'model', 'ubj'],
280
280
  'sklearn': ['pkl', 'joblib'],
281
281
  'tensorflow': ['keras', 'h5', 'SavedModel']
282
- }
283
- return formatMap[framework] || []
282
+ };
283
+ return formatMap[framework] || [];
284
284
  },
285
285
  when: answers => {
286
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
287
- const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-')
286
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
287
+ const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-');
288
288
 
289
289
  // Skip for transformers (they use HF Hub)
290
290
  if (architecture === 'transformers') {
291
- return false
291
+ return false;
292
292
  }
293
293
 
294
294
  // Skip for diffusors (they use HF Hub)
295
295
  if (architecture === 'diffusors') {
296
- return false
296
+ return false;
297
297
  }
298
298
 
299
299
  // For http architecture, always show
300
300
  if (architecture === 'http') {
301
- return true
301
+ return true;
302
302
  }
303
303
 
304
304
  // For triton architecture, only show for backends with multiple format choices
305
305
  if (architecture === 'triton') {
306
306
  // FIL and Python backends have multiple format choices
307
307
  if (backend === 'fil' || backend === 'python') {
308
- return true
308
+ return true;
309
309
  }
310
310
  // Other backends have auto-set formats
311
- return false
311
+ return false;
312
312
  }
313
313
 
314
314
  // Legacy support
315
- const framework = answers.framework || architecture
316
- return framework !== 'transformers'
315
+ const framework = answers.framework || architecture;
316
+ return framework !== 'transformers';
317
317
  }
318
318
  },
319
319
  {
@@ -323,55 +323,55 @@ const modelFormatPrompts = [
323
323
  choices: (answers) => {
324
324
  // Use MCP model-picker choices when available
325
325
  if (answers._mcpModelChoices && answers._mcpModelChoices.length > 0) {
326
- return [...answers._mcpModelChoices, 'Custom (enter manually)']
326
+ return [...answers._mcpModelChoices, 'Custom (enter manually)'];
327
327
  }
328
328
  // Fallback to hardcoded defaults based on architecture
329
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
329
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
330
330
  if (architecture === 'diffusors') {
331
331
  return [
332
332
  'stabilityai/stable-diffusion-3.5-medium',
333
333
  'black-forest-labs/FLUX.1-schnell',
334
334
  'black-forest-labs/FLUX.1-dev',
335
335
  'Custom (enter manually)'
336
- ]
336
+ ];
337
337
  }
338
338
  return [
339
339
  'openai/gpt-oss-20b',
340
340
  'meta-llama/Llama-3.2-3B-Instruct',
341
341
  'meta-llama/Llama-3.2-1B-Instruct',
342
342
  'Custom (enter manually)'
343
- ]
343
+ ];
344
344
  },
345
345
  default: (answers) => {
346
346
  if (answers._mcpModelChoices && answers._mcpModelChoices.length > 0) {
347
- return answers._mcpModelChoices[0]
347
+ return answers._mcpModelChoices[0];
348
348
  }
349
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
349
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
350
350
  if (architecture === 'diffusors') {
351
- return 'stabilityai/stable-diffusion-3.5-medium'
351
+ return 'stabilityai/stable-diffusion-3.5-medium';
352
352
  }
353
- return 'openai/gpt-oss-20b'
353
+ return 'openai/gpt-oss-20b';
354
354
  },
355
355
  when: answers => {
356
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
357
- const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-')
356
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
357
+ const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-');
358
358
 
359
359
  // Show for transformers architecture
360
360
  if (architecture === 'transformers') {
361
- return true
361
+ return true;
362
362
  }
363
363
 
364
364
  // Show for diffusors architecture (reuse HuggingFace model selection)
365
365
  if (architecture === 'diffusors') {
366
- return true
366
+ return true;
367
367
  }
368
368
 
369
369
  // Show for Triton LLM backends (vllm, tensorrtllm)
370
370
  if (architecture === 'triton' && (backend === 'vllm' || backend === 'tensorrtllm')) {
371
- return true
371
+ return true;
372
372
  }
373
373
 
374
- return false
374
+ return false;
375
375
  }
376
376
  },
377
377
  {
@@ -380,34 +380,34 @@ const modelFormatPrompts = [
380
380
  message: 'Enter the model path:',
381
381
  validate: (input) => {
382
382
  if (!input || input.trim() === '') {
383
- return 'Model name is required'
383
+ return 'Model name is required';
384
384
  }
385
385
  // Basic validation - must contain a slash (org/model, hub/model, s3://path, etc.)
386
386
  if (!input.includes('/')) {
387
- return 'Please use the full model path (e.g., microsoft/DialoGPT-medium, jumpstart-hub://my-hub/my-model)'
387
+ return 'Please use the full model path (e.g., microsoft/DialoGPT-medium, jumpstart-hub://my-hub/my-model)';
388
388
  }
389
- return true
389
+ return true;
390
390
  },
391
391
  when: answers => {
392
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
393
- const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-')
392
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
393
+ const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-');
394
394
 
395
395
  // Show for transformers with custom model selection
396
396
  if (architecture === 'transformers' && answers.modelName === 'Custom (enter manually)') {
397
- return true
397
+ return true;
398
398
  }
399
399
 
400
400
  // Show for diffusors with custom model selection
401
401
  if (architecture === 'diffusors' && answers.modelName === 'Custom (enter manually)') {
402
- return true
402
+ return true;
403
403
  }
404
404
 
405
405
  // Show for Triton LLM backends with custom model selection
406
406
  if (architecture === 'triton' && (backend === 'vllm' || backend === 'tensorrtllm') && answers.modelName === 'Custom (enter manually)') {
407
- return true
407
+ return true;
408
408
  }
409
409
 
410
- return false
410
+ return false;
411
411
  }
412
412
  }
413
413
  ];
@@ -433,8 +433,8 @@ const modelLoadStrategyPrompts = [
433
433
  ],
434
434
  default: 'runtime',
435
435
  when: (answers) => {
436
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
437
- return architecture === 'transformers' || architecture === 'diffusors'
436
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
437
+ return architecture === 'transformers' || architecture === 'diffusors';
438
438
  }
439
439
  }
440
440
  ];
@@ -476,58 +476,58 @@ const hfTokenPrompts = [
476
476
  name: 'hfToken',
477
477
  message: 'HuggingFace token (enter token, "$HF_TOKEN" for env var, or leave empty):',
478
478
  when: (answers) => {
479
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
480
- const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-')
479
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
480
+ const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-');
481
481
 
482
482
  // Prompt for transformers architecture
483
- const isTransformers = architecture === 'transformers'
483
+ const isTransformers = architecture === 'transformers';
484
484
 
485
485
  // Prompt for diffusors architecture (uses HuggingFace Hub)
486
- const isDiffusors = architecture === 'diffusors'
486
+ const isDiffusors = architecture === 'diffusors';
487
487
 
488
488
  // Prompt for Triton LLM backends (vllm, tensorrtllm)
489
489
  // Requirements: 9.1, 9.2
490
- const isTritonLlm = architecture === 'triton' && (backend === 'vllm' || backend === 'tensorrtllm')
490
+ const isTritonLlm = architecture === 'triton' && (backend === 'vllm' || backend === 'tensorrtllm');
491
491
 
492
492
  if (!isTransformers && !isDiffusors && !isTritonLlm) {
493
- return false
493
+ return false;
494
494
  }
495
495
 
496
496
  // Skip HF token prompt for non-HuggingFace model sources
497
497
  // (S3, JumpStart, Private Hub, Registry models don't need HF auth)
498
- const modelSource = answers.modelSource
498
+ const modelSource = answers.modelSource;
499
499
  if (modelSource && modelSource !== 'huggingface') {
500
- return false
500
+ return false;
501
501
  }
502
502
 
503
503
  // Display security warning before prompting
504
- console.log('\n🔐 HuggingFace Authentication')
505
- console.log(' Many models (e.g. Llama, Mistral) are gated and require a token.')
506
- console.log('⚠️ Security Note: The token will be baked into the Docker image.')
507
- console.log(' Anyone with access to the image can extract the token using \'docker inspect\'.')
508
- console.log(' For CI/CD pipelines, use "$HF_TOKEN" to reference an environment variable.')
509
- console.log(' This keeps the token out of the image and allows rotation without rebuilding.\n')
504
+ console.log('\n🔐 HuggingFace Authentication');
505
+ console.log(' Many models (e.g. Llama, Mistral) are gated and require a token.');
506
+ console.log('⚠️ Security Note: The token will be baked into the Docker image.');
507
+ console.log(' Anyone with access to the image can extract the token using \'docker inspect\'.');
508
+ console.log(' For CI/CD pipelines, use "$HF_TOKEN" to reference an environment variable.');
509
+ console.log(' This keeps the token out of the image and allows rotation without rebuilding.\n');
510
510
 
511
- return true
511
+ return true;
512
512
  },
513
513
  validate: (input) => {
514
514
  // Empty is valid (not all models require auth)
515
515
  if (!input || input.trim() === '') {
516
- return true
516
+ return true;
517
517
  }
518
518
 
519
519
  // $HF_TOKEN reference is valid
520
520
  if (input.trim() === '$HF_TOKEN') {
521
- return true
521
+ return true;
522
522
  }
523
523
 
524
524
  // Direct token should start with hf_ (warning only, not blocking)
525
525
  if (!input.startsWith('hf_')) {
526
- console.warn('\n⚠️ Warning: HuggingFace tokens typically start with "hf_"')
527
- console.warn(' If this is intentional, you can ignore this warning.')
526
+ console.warn('\n⚠️ Warning: HuggingFace tokens typically start with "hf_"');
527
+ console.warn(' If this is intentional, you can ignore this warning.');
528
528
  }
529
529
 
530
- return true // Always return true (non-blocking validation)
530
+ return true; // Always return true (non-blocking validation)
531
531
  }
532
532
  }
533
533
  ];
@@ -538,42 +538,42 @@ const ngcApiKeyPrompts = [
538
538
  name: 'ngcApiKey',
539
539
  message: 'NVIDIA NGC API key (enter key, "$NGC_API_KEY" for env var, or leave empty):',
540
540
  when: (answers) => {
541
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
542
- const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-')
541
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
542
+ const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-');
543
543
 
544
544
  // Never prompt for NGC key for Triton configs (public images)
545
545
  // Requirements: 9.2
546
546
  if (architecture === 'triton') {
547
- return false
547
+ return false;
548
548
  }
549
549
 
550
550
  // Never prompt for NGC key for diffusors configs (public Docker Hub images)
551
551
  if (architecture === 'diffusors') {
552
- return false
552
+ return false;
553
553
  }
554
554
 
555
555
  // Only prompt for transformers-tensorrt-llm
556
556
  if (architecture === 'transformers' && backend === 'tensorrt-llm') {
557
- console.log('\n🔐 NVIDIA NGC Authentication')
558
- console.log(' TensorRT-LLM base images are hosted on NVIDIA NGC and require an API key.')
559
- console.log(' 1. Create account at: https://ngc.nvidia.com/')
560
- console.log(' 2. Generate API key in account settings')
561
- console.log(' For CI/CD pipelines, use "$NGC_API_KEY" to reference an environment variable.\n')
562
- return true
557
+ console.log('\n🔐 NVIDIA NGC Authentication');
558
+ console.log(' TensorRT-LLM base images are hosted on NVIDIA NGC and require an API key.');
559
+ console.log(' 1. Create account at: https://ngc.nvidia.com/');
560
+ console.log(' 2. Generate API key in account settings');
561
+ console.log(' For CI/CD pipelines, use "$NGC_API_KEY" to reference an environment variable.\n');
562
+ return true;
563
563
  }
564
564
 
565
- return false
565
+ return false;
566
566
  },
567
567
  validate: (input) => {
568
568
  if (!input || input.trim() === '') {
569
- return true
569
+ return true;
570
570
  }
571
571
 
572
572
  if (input.trim() === '$NGC_API_KEY') {
573
- return true
573
+ return true;
574
574
  }
575
575
 
576
- return true
576
+ return true;
577
577
  }
578
578
  }
579
579
  ];
@@ -585,31 +585,31 @@ const modulePrompts = [
585
585
  message: 'Include sample Abalone classifier?',
586
586
  default: false,
587
587
  when: (answers) => {
588
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
589
- const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-')
588
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
589
+ const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-');
590
590
 
591
591
  // Never for transformers
592
592
  if (architecture === 'transformers') {
593
- return false
593
+ return false;
594
594
  }
595
595
 
596
596
  // Never for diffusors (diffusion models cannot be trained inline)
597
597
  if (architecture === 'diffusors') {
598
- return false
598
+ return false;
599
599
  }
600
600
 
601
601
  // For Triton, check if backend supports sample model
602
602
  if (architecture === 'triton') {
603
603
  // Triton LLM backends don't support sample model
604
604
  if (backend === 'vllm' || backend === 'tensorrtllm' || backend === 'pytorch') {
605
- return false
605
+ return false;
606
606
  }
607
607
  // Other Triton backends support sample model
608
- return true
608
+ return true;
609
609
  }
610
610
 
611
611
  // For http architecture, always show
612
- return true
612
+ return true;
613
613
  }
614
614
  },
615
615
  {
@@ -617,31 +617,31 @@ const modulePrompts = [
617
617
  name: 'testTypes',
618
618
  message: 'Test type?',
619
619
  choices: (answers) => {
620
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
621
- const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-')
620
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
621
+ const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-');
622
622
 
623
623
  // Transformers and Triton LLM backends only support hosted endpoint tests
624
624
  if (architecture === 'transformers') {
625
- return ['hosted-model-endpoint']
625
+ return ['hosted-model-endpoint'];
626
626
  }
627
627
  if (architecture === 'triton' && (backend === 'vllm' || backend === 'tensorrtllm')) {
628
- return ['hosted-model-endpoint']
628
+ return ['hosted-model-endpoint'];
629
629
  }
630
630
 
631
- return ['local-model-cli', 'local-model-server', 'hosted-model-endpoint']
631
+ return ['local-model-cli', 'local-model-server', 'hosted-model-endpoint'];
632
632
  },
633
633
  default: (answers) => {
634
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
635
- const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-')
634
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
635
+ const backend = answers.backend || answers.deploymentConfig?.split('-').slice(1).join('-');
636
636
 
637
637
  if (architecture === 'transformers') {
638
- return ['hosted-model-endpoint']
638
+ return ['hosted-model-endpoint'];
639
639
  }
640
640
  if (architecture === 'triton' && (backend === 'vllm' || backend === 'tensorrtllm')) {
641
- return ['hosted-model-endpoint']
641
+ return ['hosted-model-endpoint'];
642
642
  }
643
643
 
644
- return ['local-model-cli', 'local-model-server', 'hosted-model-endpoint']
644
+ return ['local-model-cli', 'local-model-server', 'hosted-model-endpoint'];
645
645
  }
646
646
  }
647
647
  ];
@@ -662,13 +662,13 @@ const infraRegionAndTargetPrompts = [
662
662
  message: 'Target AWS region?',
663
663
  choices: (answers) => {
664
664
  // If a bootstrap profile set a region, include it in choices
665
- const bootstrapRegion = answers._bootstrapRegion
666
- const choices = ['us-east-1']
665
+ const bootstrapRegion = answers._bootstrapRegion;
666
+ const choices = ['us-east-1'];
667
667
  if (bootstrapRegion && bootstrapRegion !== 'us-east-1') {
668
- choices.unshift({ name: `${bootstrapRegion} (from bootstrap profile)`, value: bootstrapRegion })
668
+ choices.unshift({ name: `${bootstrapRegion} (from bootstrap profile)`, value: bootstrapRegion });
669
669
  }
670
- choices.push({ name: 'Custom...', value: 'custom' })
671
- return choices
670
+ choices.push({ name: 'Custom...', value: 'custom' });
671
+ return choices;
672
672
  },
673
673
  default: (answers) => answers._bootstrapRegion || 'us-east-1'
674
674
  },
@@ -1045,16 +1045,16 @@ const destinationPrompts = [
1045
1045
  */
1046
1046
  function formatImageChoices(entries, isTransformer) {
1047
1047
  return entries.map(entry => {
1048
- const cuda = entry.labels.cuda_version || '-'
1049
- const python = entry.labels.python_version || '-'
1050
- const date = entry.created.slice(0, 10)
1048
+ const cuda = entry.labels.cuda_version || '-';
1049
+ const python = entry.labels.python_version || '-';
1050
+ const date = entry.created.slice(0, 10);
1051
1051
 
1052
1052
  const name = isTransformer
1053
1053
  ? `${entry.repository.padEnd(30)} ${entry.tag.padEnd(16)} ${entry.architecture.padEnd(7)} ${cuda.padEnd(6)} ${python.padEnd(8)} ${date}`
1054
- : `${entry.repository.padEnd(30)} ${entry.tag.padEnd(16)} ${entry.architecture.padEnd(7)} ${python.padEnd(8)} ${date}`
1054
+ : `${entry.repository.padEnd(30)} ${entry.tag.padEnd(16)} ${entry.architecture.padEnd(7)} ${python.padEnd(8)} ${date}`;
1055
1055
 
1056
- return { name, value: entry.image }
1057
- })
1056
+ return { name, value: entry.image };
1057
+ });
1058
1058
  }
1059
1059
 
1060
1060
  /**
@@ -1068,12 +1068,12 @@ const baseImageSearchPrompts = [
1068
1068
  message: '🔌 Search for a Python base image (e.g. "3.11", "3.10", or leave empty for all):',
1069
1069
  default: '',
1070
1070
  when: (answers) => {
1071
- const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0]
1071
+ const architecture = answers.architecture || answers.deploymentConfig?.split('-')[0];
1072
1072
  // Skip for transformers (uses model-server images) and triton (uses NGC images)
1073
- return architecture !== 'transformers' && architecture !== 'triton'
1073
+ return architecture !== 'transformers' && architecture !== 'triton';
1074
1074
  }
1075
1075
  }
1076
- ]
1076
+ ];
1077
1077
 
1078
1078
  /**
1079
1079
  * Base image selection prompt (all frameworks)
@@ -1085,11 +1085,11 @@ const baseImagePrompts = [
1085
1085
  name: 'baseImage',
1086
1086
  message: 'Select base container image:',
1087
1087
  choices: (answers) => {
1088
- const mcpChoices = answers._mcpBaseImageChoices || []
1089
- return [...mcpChoices, { name: 'Custom (enter your own)', value: 'custom' }]
1088
+ const mcpChoices = answers._mcpBaseImageChoices || [];
1089
+ return [...mcpChoices, { name: 'Custom (enter your own)', value: 'custom' }];
1090
1090
  },
1091
1091
  when: (answers) => {
1092
- return answers._mcpBaseImageChoices && answers._mcpBaseImageChoices.length > 0
1092
+ return answers._mcpBaseImageChoices && answers._mcpBaseImageChoices.length > 0;
1093
1093
  }
1094
1094
  },
1095
1095
  {
@@ -1098,17 +1098,17 @@ const baseImagePrompts = [
1098
1098
  message: 'Enter custom base container image (e.g. myrepo/myimage:v1):',
1099
1099
  validate: (input) => {
1100
1100
  if (!input || input.trim() === '') {
1101
- return 'Base image is required'
1101
+ return 'Base image is required';
1102
1102
  }
1103
- const pattern = /^[a-zA-Z0-9][a-zA-Z0-9._\-\/]*(:[a-zA-Z0-9._\-]+)?$/
1103
+ const pattern = /^[a-zA-Z0-9][a-zA-Z0-9._\-/]*(:[a-zA-Z0-9._-]+)?$/;
1104
1104
  if (!pattern.test(input.trim())) {
1105
- return 'Invalid image format. Expected: [registry/]repository[:tag]'
1105
+ return 'Invalid image format. Expected: [registry/]repository[:tag]';
1106
1106
  }
1107
- return true
1107
+ return true;
1108
1108
  },
1109
1109
  when: (answers) => answers.baseImage === 'custom'
1110
1110
  }
1111
- ]
1111
+ ];
1112
1112
 
1113
1113
  export {
1114
1114
  deploymentConfigPrompts,