@salesforce/plugin-agent 1.27.3 → 1.27.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -339,14 +339,16 @@
339
339
  "preview:agent"
340
340
  ]
341
341
  },
342
- "agent:test:create": {
342
+ "agent:generate:agent-spec": {
343
343
  "aliases": [],
344
344
  "args": {},
345
- "description": "To run this command, you must have an agent test spec file, which is a YAML file that lists the test cases for testing a specific agent. Use the \"agent generate test-spec\" CLI command to generate a test spec file. Then specify the file to this command with the --spec flag, or run this command with no flags to be prompted.\n\nWhen this command completes, your org contains the new agent test, which you can view and edit using the Testing Center UI. This command also retrieves the metadata component (AiEvaluationDefinition) associated with the new test to your local Salesforce DX project and displays its filename.\n\nAfter you've created the test in the org, use the \"agent test run\" command to run it.",
345
+ "description": "An agent spec is a YAML-formatted file that contains basic information about the agent, such as its role, company description, and an AI-generated list of topics based on this information. Topics define the range of jobs your agent can handle.\n\nUse flags, such as --role and --company-description, to provide details about your company and the role that the agent plays in your company. If you prefer, you can also be prompted for the basic information; use --full-interview to be prompted for all required and optional properties. Upon command execution, the large language model (LLM) associated with your org uses the provided information to generate a list of topics for the agent. Because the LLM uses the company and role information to generate the topics, we recommend that you provide accurate, complete, and specific details so the LLM generates the best and most relevant topics. Once generated, you can edit the spec file; for example, you can remove topics that don't apply or change a topic's description.\n\nYou can also iterate the spec generation process by using the --spec flag to pass an existing agent spec file to this command, and then using the --role, --company-description, etc, flags to refine your agent properties. Iteratively improving the description of your agent allows the LLM to generate progressively better topics.\n\nYou can also specify other agent properties, such as a custom prompt template, how to ground the prompt template to add context to the agent's prompts, the tone of the prompts, and the username of a user in the org to assign to the agent.\n\nWhen your agent spec is ready, generate an authoring bundle from it by passing the spec file to the --spec flag of the \"agent generate authoring-bundle\" CLI command. An authoring bundle is a metadata type that contains an Agent Script file, which is the blueprint for an agent. (While not recommended, you can also use the agent spec file to immediately create an agent with the \"agent create\" command. We don't recommend this workflow because these types of agents don't use Agent Script, and are thus less flexible and more difficult to maintain.)",
346
346
  "examples": [
347
- "Create an agent test interactively and be prompted for the test spec and API name of the test in the org; use the default org:\n<%= config.bin %> <%= command.id %>",
348
- "Create an agent test and use flags to specify all required information; if a test with same API name already exists in the org, overwrite it without confirmation. Use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --force-overwrite --target-org my-org",
349
- "Preview what the agent test metadata (AiEvaluationDefinition) looks like without deploying it to your default org:\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --preview"
347
+ "Generate an agent spec in the default location and use flags to specify the agent properties, such as its role and your company details; use your default org:\n<%= config.bin %> <%= command.id %> --type customer --role \"Field customer complaints and manage employee schedules.\" --company-name \"Coral Cloud Resorts\" --company-description \"Provide customers with exceptional destination activities, unforgettable experiences, and reservation services.\"",
348
+ "Generate an agent spec by being prompted for the required agent properties and generate a maxiumum of 5 topics; write the generated file to the \"specs/resortManagerSpec.yaml\" file and use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --max-topics 5 --output-file specs/resortManagerAgent.yaml --target-org my-org",
349
+ "Be prompted for all required and optional agent properties; use your default org:\n<%= config.bin %> <%= command.id %> --full-interview",
350
+ "Specify an existing agent spec file called \"specs/resortManagerAgent.yaml\", and then overwrite it with a new version that contains newly AI-generated topics based on the updated role information passed in with the --role flag:\n<%= config.bin %> <%= command.id %> --spec specs/resortManagerAgent.yaml --output-file specs/resortManagerAgent.yaml --role \"Field customer complaints, manage employee schedules, and ensure all resort operations are running smoothly\"",
351
+ "Specify that the conversational tone of the agent is formal and to attach the \"resortmanager@myorg.com\" username to it; be prompted for the required properties and use your default org:\n<%= config.bin %> <%= command.id %> --tone formal --agent-user resortmanager@myorg.com"
350
352
  ],
351
353
  "flags": {
352
354
  "json": {
@@ -364,20 +366,6 @@
364
366
  "multiple": false,
365
367
  "type": "option"
366
368
  },
367
- "api-name": {
368
- "name": "api-name",
369
- "summary": "API name of the new test; the API name must not exist in the org.",
370
- "hasDynamicHelp": false,
371
- "multiple": false,
372
- "type": "option"
373
- },
374
- "spec": {
375
- "name": "spec",
376
- "summary": "Path to the test spec YAML file.",
377
- "hasDynamicHelp": false,
378
- "multiple": false,
379
- "type": "option"
380
- },
381
369
  "target-org": {
382
370
  "char": "o",
383
371
  "name": "target-org",
@@ -395,53 +383,164 @@
395
383
  "multiple": false,
396
384
  "type": "option"
397
385
  },
398
- "preview": {
399
- "name": "preview",
400
- "summary": "Preview the test metadata file (AiEvaluationDefinition) without deploying to your org.",
386
+ "type": {
387
+ "name": "type",
388
+ "summary": "Type of agent to create. Internal types are copilots used internally by your company and customer types are the agents you create for your customers.",
389
+ "hasDynamicHelp": false,
390
+ "multiple": false,
391
+ "options": [
392
+ "customer",
393
+ "internal"
394
+ ],
395
+ "type": "option"
396
+ },
397
+ "role": {
398
+ "name": "role",
399
+ "summary": "Role of the agent.",
400
+ "hasDynamicHelp": false,
401
+ "multiple": false,
402
+ "type": "option"
403
+ },
404
+ "company-name": {
405
+ "name": "company-name",
406
+ "summary": "Name of your company.",
407
+ "hasDynamicHelp": false,
408
+ "multiple": false,
409
+ "type": "option"
410
+ },
411
+ "company-description": {
412
+ "name": "company-description",
413
+ "summary": "Description of your company.",
414
+ "hasDynamicHelp": false,
415
+ "multiple": false,
416
+ "type": "option"
417
+ },
418
+ "company-website": {
419
+ "name": "company-website",
420
+ "summary": "Website URL of your company.",
421
+ "hasDynamicHelp": false,
422
+ "multiple": false,
423
+ "type": "option"
424
+ },
425
+ "max-topics": {
426
+ "name": "max-topics",
427
+ "summary": "Maximum number of topics to generate in the agent spec; default is 5.",
428
+ "hasDynamicHelp": false,
429
+ "multiple": false,
430
+ "type": "option"
431
+ },
432
+ "agent-user": {
433
+ "name": "agent-user",
434
+ "summary": "Username of a user in your org to assign to your agent; determines what your agent can access and do.",
435
+ "hasDynamicHelp": false,
436
+ "multiple": false,
437
+ "type": "option"
438
+ },
439
+ "enrich-logs": {
440
+ "name": "enrich-logs",
441
+ "summary": "Adds agent conversation data to event logs so you can view all agent session activity in one place.",
442
+ "hasDynamicHelp": false,
443
+ "multiple": false,
444
+ "options": [
445
+ "true",
446
+ "false"
447
+ ],
448
+ "type": "option"
449
+ },
450
+ "tone": {
451
+ "name": "tone",
452
+ "summary": "Conversational style of the agent, such as how it expresses your brand personality in its messages through word choice, punctuation, and sentence structure.",
453
+ "hasDynamicHelp": false,
454
+ "multiple": false,
455
+ "options": [
456
+ "formal",
457
+ "casual",
458
+ "neutral"
459
+ ],
460
+ "type": "option"
461
+ },
462
+ "spec": {
463
+ "name": "spec",
464
+ "summary": "Agent spec file, in YAML format, to use as input to the command.",
465
+ "hasDynamicHelp": false,
466
+ "multiple": false,
467
+ "type": "option"
468
+ },
469
+ "output-file": {
470
+ "name": "output-file",
471
+ "summary": "Path for the generated YAML agent spec file; can be an absolute or relative path.",
472
+ "default": "specs/agentSpec.yaml",
473
+ "hasDynamicHelp": false,
474
+ "multiple": false,
475
+ "type": "option"
476
+ },
477
+ "full-interview": {
478
+ "name": "full-interview",
479
+ "summary": "Prompt for both required and optional flags.",
401
480
  "allowNo": false,
402
481
  "type": "boolean"
403
482
  },
483
+ "prompt-template": {
484
+ "name": "prompt-template",
485
+ "summary": "API name of a customized prompt template to use instead of the default prompt template.",
486
+ "hasDynamicHelp": false,
487
+ "multiple": false,
488
+ "type": "option"
489
+ },
490
+ "grounding-context": {
491
+ "dependsOn": [
492
+ "prompt-template"
493
+ ],
494
+ "name": "grounding-context",
495
+ "summary": "Context information and personalization that's added to your prompts when using a custom prompt template.",
496
+ "hasDynamicHelp": false,
497
+ "multiple": false,
498
+ "type": "option"
499
+ },
404
500
  "force-overwrite": {
405
501
  "name": "force-overwrite",
406
- "summary": "Don't prompt for confirmation when overwriting an existing test (based on API name) in your org.",
502
+ "summary": "Don't prompt the user to confirm that an existing spec file will be overwritten.",
407
503
  "allowNo": false,
408
504
  "type": "boolean"
409
505
  }
410
506
  },
411
507
  "hasDynamicHelp": true,
412
508
  "hiddenAliases": [],
413
- "id": "agent:test:create",
509
+ "id": "agent:generate:agent-spec",
414
510
  "pluginAlias": "@salesforce/plugin-agent",
415
511
  "pluginName": "@salesforce/plugin-agent",
416
512
  "pluginType": "core",
417
513
  "strict": true,
418
- "summary": "Create an agent test in your org using a local test spec YAML file.",
514
+ "summary": "Generate an agent spec, which is a YAML file that captures what an agent can do.",
419
515
  "enableJsonFlag": true,
516
+ "requiresProject": true,
420
517
  "isESM": true,
421
518
  "relativePath": [
422
519
  "lib",
423
520
  "commands",
424
521
  "agent",
425
- "test",
426
- "create.js"
522
+ "generate",
523
+ "agent-spec.js"
427
524
  ],
428
525
  "aliasPermutations": [],
429
526
  "permutations": [
430
- "agent:test:create",
431
- "test:agent:create",
432
- "test:create:agent",
433
- "agent:create:test",
434
- "create:agent:test",
435
- "create:test:agent"
527
+ "agent:generate:agent-spec",
528
+ "generate:agent:agent-spec",
529
+ "generate:agent-spec:agent",
530
+ "agent:agent-spec:generate",
531
+ "agent-spec:agent:generate",
532
+ "agent-spec:generate:agent"
436
533
  ]
437
534
  },
438
- "agent:test:list": {
535
+ "agent:generate:authoring-bundle": {
439
536
  "aliases": [],
440
537
  "args": {},
441
- "description": "The command outputs a table with the name (API name) of each test along with its unique ID and the date it was created in the org.",
538
+ "description": "Authoring bundles are metadata components that contain an agent's Agent Script file. The Agent Script file is the agent's blueprint; it fully describes what the agent can do using the Agent Script language.\n\nUse this command to generate a new authoring bundle based on an agent spec YAML file, which you create with the \"agent generate agent-spec\" command. The agent spec YAML file is a high-level description of the agent; it describes its essence rather than exactly what it can do. The resulting Agent Script file is customized to reflect what's in the agent spec file. You can also create an authoring bundle without an agent spec file by specifying the \"--no-spec\" flag; in this case, the resulting Agent Script file is just the default boilerplate.\n\nThe metadata type for authoring bundles is aiAuthoringBundle, which consist of a standard \"<bundle-api-name>.bundle-meta.xml\" metadata file and the Agent Script file (with extension \".agent\"). When you run this command, the new authoring bundle is generated in the force-app/main/default/aiAuthoringBundles/<bundle-api-name> directory. Use the --output-dir flag to generate them elsewhere.\n\nAfter you generate the initial authoring bundle, code the Agent Script file so your agent behaves exactly as you want. The Agent Script file generated by this command is just a first draft of your agent! Interactively test the agent by conversing with it using the \"agent preview\" command. Then publish the agent to your org with the \"agent publish authoring-bundle\" command.\n\nThis command requires an org because it uses it to access an LLM for generating the Agent Script file.",
442
539
  "examples": [
443
- "List the agent tests in your default org:\n<%= config.bin %> <%= command.id %>",
444
- "List the agent tests in an org with alias \"my-org\"\"\n<%= config.bin %> <%= command.id %> --target-org my-org"
540
+ "Generate an authoring bundle by being prompted for all required values, such as the agent spec YAML file, the bundle name, and the API name; use your default org:\n<%= config.bin %> <%= command.id %>",
541
+ "Generate an authoring bundle without using an agent spec file; give the bundle the label \"My Authoring Bundle\" and use your default org:\n<%= config.bin %> <%= command.id %> --no-spec --name \"My Authoring Bundle\"",
542
+ "Generate an authoring bundle from the \"specs/agentSpec.yaml\" agent spec YAML file and give it the label \"My Authoring Bundle\"; use your default org:\n<%= config.bin %> <%= command.id %> --spec specs/agentSpec.yaml --name \"My Authoring Bundle\"",
543
+ "Similar to previous example, but generate the authoring bundle files in the \"other-package-dir/main/default\" package directory; use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --spec specs/agentSpec.yaml --name \"My Authoring Bundle\" --output-dir other-package-dir/main/default --target-org my-dev-org"
445
544
  ],
446
545
  "flags": {
447
546
  "json": {
@@ -469,49 +568,101 @@
469
568
  "multiple": false,
470
569
  "type": "option"
471
570
  },
571
+ "api-name": {
572
+ "name": "api-name",
573
+ "summary": "API name of the new authoring bundle; if not specified, the API name is derived from the authoring bundle name (label); the API name can't exist in the org.",
574
+ "hasDynamicHelp": false,
575
+ "multiple": false,
576
+ "type": "option"
577
+ },
472
578
  "api-version": {
473
579
  "description": "Override the api version used for api requests made by this command",
474
580
  "name": "api-version",
475
581
  "hasDynamicHelp": false,
476
582
  "multiple": false,
477
583
  "type": "option"
584
+ },
585
+ "spec": {
586
+ "char": "f",
587
+ "name": "spec",
588
+ "summary": "Path to the agent spec YAML file. If you don't specify the flag, the command provides a list that you can choose from. Use the --no-spec flag to skip using an agent spec entirely.",
589
+ "hasDynamicHelp": false,
590
+ "multiple": false,
591
+ "type": "option"
592
+ },
593
+ "no-spec": {
594
+ "name": "no-spec",
595
+ "summary": "Skip prompting for an agent spec and use the default Agent Script boilerplate in the generated authoring bundle.",
596
+ "allowNo": false,
597
+ "type": "boolean"
598
+ },
599
+ "output-dir": {
600
+ "char": "d",
601
+ "name": "output-dir",
602
+ "summary": "Directory where the authoring bundle files are generated.",
603
+ "hasDynamicHelp": false,
604
+ "multiple": false,
605
+ "type": "option"
606
+ },
607
+ "name": {
608
+ "char": "n",
609
+ "name": "name",
610
+ "summary": "Name (label) of the authoring bundle; if not specified, you're prompted for the name.",
611
+ "hasDynamicHelp": false,
612
+ "multiple": false,
613
+ "type": "option"
478
614
  }
479
615
  },
480
616
  "hasDynamicHelp": true,
481
617
  "hiddenAliases": [],
482
- "id": "agent:test:list",
618
+ "id": "agent:generate:authoring-bundle",
483
619
  "pluginAlias": "@salesforce/plugin-agent",
484
620
  "pluginName": "@salesforce/plugin-agent",
485
621
  "pluginType": "core",
486
622
  "strict": true,
487
- "summary": "List the available agent tests in your org.",
623
+ "summary": "Generate an authoring bundle from an existing agent spec YAML file.",
488
624
  "enableJsonFlag": true,
625
+ "requiresProject": true,
626
+ "FLAGGABLE_PROMPTS": {
627
+ "name": {
628
+ "message": "Name (label) of the authoring bundle; if not specified, you're prompted for the name.",
629
+ "promptMessage": "Name (label) of the authoring bundle",
630
+ "required": true
631
+ },
632
+ "api-name": {
633
+ "message": "API name of the new authoring bundle; if not specified, the API name is derived from the authoring bundle name (label); the API name can't exist in the org.",
634
+ "promptMessage": "API name of the new authoring bundle"
635
+ },
636
+ "spec": {
637
+ "message": "Path to the agent spec YAML file. If you don't specify the flag, the command provides a list that you can choose from. Use the --no-spec flag to skip using an agent spec entirely.",
638
+ "promptMessage": "Path to the agent spec YAML file",
639
+ "required": true
640
+ }
641
+ },
489
642
  "isESM": true,
490
643
  "relativePath": [
491
644
  "lib",
492
645
  "commands",
493
646
  "agent",
494
- "test",
495
- "list.js"
647
+ "generate",
648
+ "authoring-bundle.js"
496
649
  ],
497
650
  "aliasPermutations": [],
498
651
  "permutations": [
499
- "agent:test:list",
500
- "test:agent:list",
501
- "test:list:agent",
502
- "agent:list:test",
503
- "list:agent:test",
504
- "list:test:agent"
652
+ "agent:generate:authoring-bundle",
653
+ "generate:agent:authoring-bundle",
654
+ "generate:authoring-bundle:agent",
655
+ "agent:authoring-bundle:generate",
656
+ "authoring-bundle:agent:generate",
657
+ "authoring-bundle:generate:agent"
505
658
  ]
506
659
  },
507
- "agent:test:results": {
660
+ "agent:generate:template": {
508
661
  "aliases": [],
509
662
  "args": {},
510
- "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
663
+ "description": "At a high-level, agents are defined by the Bot, BotVersion, and GenAiPlannerBundle metadata types. The GenAiPlannerBundle type in turn defines the agent's topics and actions. This command uses the metadata files for these three types, located in your local DX project, to generate a BotTemplate file for a specific agent (Bot). You then use the BotTemplate file, along with the GenAiPlannerBundle file that references the BotTemplate, to package the template in a managed package that you can share between orgs or on AppExchange.\n\nUse the --agent-file flag to specify the relative or full pathname of the Bot metadata file, such as force-app/main/default/bots/My_Awesome_Agent/My_Awesome_Agent.bot-meta.xml. A single Bot can have multiple BotVersions, so use the --agent-version flag to specify the version. The corresponding BotVersion file must exist locally. For example, if you specify \"--agent-version 4\", then the file force-app/main/default/bots/My_Awesome_Agent/v4.botVersion-meta.xml must exist.\n\nThe new BotTemplate file is generated in the \"botTemplates\" directory in your local package directory, and has the name <Agent_API_name>_v<Version>_Template.botTemplate-meta.xml, such as force-app/main/default/botTemplates/My_Awesome_Agent_v4_Template.botTemplate-meta.xml. The command displays the full pathname of the generated files when it completes.",
511
664
  "examples": [
512
- "Get the results of an agent test run in your default org using its job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
513
- "Get the results of the most recently run agent test in an org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --use-most-recent --target-org my-org",
514
- "Get the results of the most recently run agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
665
+ "Generate an agent template from a Bot metadata file in your DX project that corresponds to the My_Awesome_Agent agent; use version 1 of the agent.\n<%= config.bin %> <%= command.id %> --agent-file force-app/main/default/bots/My_Awesome_Agent/My_Awesome_Agent.bot-meta.xml --agent-version 1"
515
666
  ],
516
667
  "flags": {
517
668
  "json": {
@@ -529,16 +680,6 @@
529
680
  "multiple": false,
530
681
  "type": "option"
531
682
  },
532
- "target-org": {
533
- "char": "o",
534
- "name": "target-org",
535
- "noCacheDefault": true,
536
- "required": true,
537
- "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
538
- "hasDynamicHelp": true,
539
- "multiple": false,
540
- "type": "option"
541
- },
542
683
  "api-version": {
543
684
  "description": "Override the api version used for api requests made by this command",
544
685
  "name": "api-version",
@@ -546,90 +687,62 @@
546
687
  "multiple": false,
547
688
  "type": "option"
548
689
  },
549
- "job-id": {
550
- "char": "i",
551
- "name": "job-id",
690
+ "agent-version": {
691
+ "name": "agent-version",
552
692
  "required": true,
553
- "summary": "Job ID of the completed agent test run.",
554
- "hasDynamicHelp": false,
555
- "multiple": false,
556
- "type": "option"
557
- },
558
- "result-format": {
559
- "name": "result-format",
560
- "summary": "Format of the agent test run results.",
561
- "default": "human",
693
+ "summary": "Version of the agent (BotVersion).",
562
694
  "hasDynamicHelp": false,
563
695
  "multiple": false,
564
- "options": [
565
- "json",
566
- "human",
567
- "junit",
568
- "tap"
569
- ],
570
696
  "type": "option"
571
697
  },
572
- "output-dir": {
573
- "char": "d",
574
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
575
- "name": "output-dir",
576
- "summary": "Directory to write the agent test results into.",
698
+ "agent-file": {
699
+ "char": "f",
700
+ "name": "agent-file",
701
+ "required": true,
702
+ "summary": "Path to an agent (Bot) metadata file.",
577
703
  "hasDynamicHelp": false,
578
704
  "multiple": false,
579
705
  "type": "option"
580
- },
581
- "verbose": {
582
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
583
- "name": "verbose",
584
- "summary": "Show generated data in the test results output.",
585
- "allowNo": false,
586
- "type": "boolean"
587
706
  }
588
707
  },
589
- "hasDynamicHelp": true,
708
+ "hasDynamicHelp": false,
590
709
  "hiddenAliases": [],
591
- "id": "agent:test:results",
710
+ "id": "agent:generate:template",
592
711
  "pluginAlias": "@salesforce/plugin-agent",
593
712
  "pluginName": "@salesforce/plugin-agent",
594
713
  "pluginType": "core",
595
714
  "strict": true,
596
- "summary": "Get the results of a completed agent test run.",
715
+ "summary": "Generate an agent template from an existing agent in your DX project so you can then package the template in a managed package.",
597
716
  "enableJsonFlag": true,
717
+ "requiresProject": true,
598
718
  "isESM": true,
599
719
  "relativePath": [
600
720
  "lib",
601
721
  "commands",
602
722
  "agent",
603
- "test",
604
- "results.js"
723
+ "generate",
724
+ "template.js"
605
725
  ],
606
726
  "aliasPermutations": [],
607
727
  "permutations": [
608
- "agent:test:results",
609
- "test:agent:results",
610
- "test:results:agent",
611
- "agent:results:test",
612
- "results:agent:test",
613
- "results:test:agent"
728
+ "agent:generate:template",
729
+ "generate:agent:template",
730
+ "generate:template:agent",
731
+ "agent:template:generate",
732
+ "template:agent:generate",
733
+ "template:generate:agent"
614
734
  ]
615
735
  },
616
- "agent:test:resume": {
736
+ "agent:generate:test-spec": {
617
737
  "aliases": [],
618
738
  "args": {},
619
- "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nUse the --wait flag to specify the number of minutes for this command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, the CLI returns control of the terminal to you, and you must run \"agent test resume\" again.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
739
+ "description": "The first step when using Salesforce CLI to create an agent test in your org is to use this interactive command to generate a local YAML-formatted test spec file. The test spec YAML file contains information about the agent being tested, such as its API name, and then one or more test cases. This command uses the metadata components in your DX project when prompting for information, such as the agent API name; it doesn't look in your org.\n\nTo generate a specific agent test case, this command prompts you for this information; when possible, the command provides a list of options for you to choose from:\n\n- Utterance: Natural language statement, question, or command used to test the agent.\n- Expected topic: API name of the topic you expect the agent to use when responding to the utterance.\n- Expected actions: One or more API names of the expection actions the agent takes.\n- Expected outcome: Natural language description of the outcome you expect.\n- (Optional) Custom evaluation: Test an agent's response for specific strings or numbers.\n- (Optional) Conversation history: Boilerplate for additional context you can add to the test in the form of a conversation history.\n\nWhen your test spec is ready, you then run the \"agent test create\" command to actually create the test in your org and synchronize the metadata with your DX project. The metadata type for an agent test is AiEvaluationDefinition.\n\nIf you have an existing AiEvaluationDefinition metadata XML file in your DX project, you can generate its equivalent YAML test spec file with the --from-definition flag.",
620
740
  "examples": [
621
- "Resume an agent test in your default org using a job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
622
- "Resume the most recently-run agent test in an org with alias \"my-org\" org; wait 10 minutes for the tests to finish:\n<%= config.bin %> <%= command.id %> --use-most-recent --wait 10 --target-org my-org",
623
- "Resume the most recent agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
741
+ "Generate an agent test spec YAML file interactively:\n<%= config.bin %> <%= command.id %>",
742
+ "Generate an agent test spec YAML file and specify a name for the new file; if the file exists, overwrite it without confirmation:\n<%= config.bin %> <%= command.id %> --output-file specs/Resort_Manager-new-version-testSpec.yaml --force-overwrite",
743
+ "Generate an agent test spec YAML file from an existing AiEvaluationDefinition metadata XML file in your DX project:\n<%= config.bin %> <%= command.id %> --from-definition force-app//main/default/aiEvaluationDefinitions/Resort_Manager_Tests.aiEvaluationDefinition-meta.xml"
624
744
  ],
625
745
  "flags": {
626
- "json": {
627
- "description": "Format output as json.",
628
- "helpGroup": "GLOBAL",
629
- "name": "json",
630
- "allowNo": false,
631
- "type": "boolean"
632
- },
633
746
  "flags-dir": {
634
747
  "helpGroup": "GLOBAL",
635
748
  "name": "flags-dir",
@@ -638,113 +751,63 @@
638
751
  "multiple": false,
639
752
  "type": "option"
640
753
  },
641
- "target-org": {
642
- "char": "o",
643
- "name": "target-org",
644
- "noCacheDefault": true,
645
- "required": true,
646
- "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
647
- "hasDynamicHelp": true,
648
- "multiple": false,
649
- "type": "option"
650
- },
651
- "api-version": {
652
- "description": "Override the api version used for api requests made by this command",
653
- "name": "api-version",
654
- "hasDynamicHelp": false,
655
- "multiple": false,
656
- "type": "option"
657
- },
658
- "job-id": {
659
- "char": "i",
660
- "name": "job-id",
661
- "summary": "Job ID of the original agent test run.",
754
+ "from-definition": {
755
+ "char": "d",
756
+ "name": "from-definition",
757
+ "summary": "Filepath to the AIEvaluationDefinition metadata XML file in your DX project that you want to convert to a test spec YAML file.",
662
758
  "hasDynamicHelp": false,
663
759
  "multiple": false,
664
760
  "type": "option"
665
761
  },
666
- "use-most-recent": {
667
- "char": "r",
668
- "name": "use-most-recent",
669
- "summary": "Use the job ID of the most recent agent test run.",
762
+ "force-overwrite": {
763
+ "name": "force-overwrite",
764
+ "summary": "Don't prompt for confirmation when overwriting an existing test spec YAML file.",
670
765
  "allowNo": false,
671
766
  "type": "boolean"
672
767
  },
673
- "wait": {
674
- "char": "w",
675
- "name": "wait",
676
- "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
677
- "default": "5 minutes",
678
- "hasDynamicHelp": true,
679
- "multiple": false,
680
- "type": "option"
681
- },
682
- "result-format": {
683
- "name": "result-format",
684
- "summary": "Format of the agent test run results.",
685
- "default": "human",
686
- "hasDynamicHelp": false,
687
- "multiple": false,
688
- "options": [
689
- "json",
690
- "human",
691
- "junit",
692
- "tap"
693
- ],
694
- "type": "option"
695
- },
696
- "output-dir": {
697
- "char": "d",
698
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
699
- "name": "output-dir",
700
- "summary": "Directory to write the agent test results into.",
768
+ "output-file": {
769
+ "char": "f",
770
+ "name": "output-file",
771
+ "summary": "Name of the generated test spec YAML file. Default value is \"specs/<AGENT_API_NAME>-testSpec.yaml\".",
701
772
  "hasDynamicHelp": false,
702
773
  "multiple": false,
703
774
  "type": "option"
704
- },
705
- "verbose": {
706
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
707
- "name": "verbose",
708
- "summary": "Show generated data in the test results output.",
709
- "allowNo": false,
710
- "type": "boolean"
711
775
  }
712
776
  },
713
- "hasDynamicHelp": true,
777
+ "hasDynamicHelp": false,
714
778
  "hiddenAliases": [],
715
- "id": "agent:test:resume",
779
+ "id": "agent:generate:test-spec",
716
780
  "pluginAlias": "@salesforce/plugin-agent",
717
781
  "pluginName": "@salesforce/plugin-agent",
718
782
  "pluginType": "core",
719
783
  "strict": true,
720
- "summary": "Resume an agent test that you previously started in your org so you can view the test results.",
721
- "enableJsonFlag": true,
784
+ "summary": "Generate an agent test spec, which is a YAML file that lists the test cases for testing a specific agent.",
785
+ "enableJsonFlag": false,
722
786
  "isESM": true,
723
787
  "relativePath": [
724
788
  "lib",
725
789
  "commands",
726
790
  "agent",
727
- "test",
728
- "resume.js"
791
+ "generate",
792
+ "test-spec.js"
729
793
  ],
730
794
  "aliasPermutations": [],
731
795
  "permutations": [
732
- "agent:test:resume",
733
- "test:agent:resume",
734
- "test:resume:agent",
735
- "agent:resume:test",
736
- "resume:agent:test",
737
- "resume:test:agent"
796
+ "agent:generate:test-spec",
797
+ "generate:agent:test-spec",
798
+ "generate:test-spec:agent",
799
+ "agent:test-spec:generate",
800
+ "test-spec:agent:generate",
801
+ "test-spec:generate:agent"
738
802
  ]
739
803
  },
740
- "agent:test:run": {
804
+ "agent:publish:authoring-bundle": {
741
805
  "aliases": [],
742
806
  "args": {},
743
- "description": "Use the --api-name flag to specify the name of the agent test you want to run. Use the output of the \"agent test list\" command to get the names of all the available agent tests in your org.\n\nBy default, this command starts the agent test in your org, but it doesn't wait for the test to finish. Instead, it displays the \"agent test resume\" command, with a job ID, that you execute to see the results of the test run, and then returns control of the terminal window to you. Use the --wait flag to specify the number of minutes for the command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, run \"agent test resume\".\n\nBy default, this command outputs test results in human-readable tables for each test case, if the test completes in time. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
807
+ "description": "An authoring bundle is a metadata type (named aiAuthoringBundle) that provides the blueprint for an agent. The metadata type contains two files: the standard metatada XML file and an Agent Script file (extension \".agent\") that fully describes the agent using the Agent Script language.\n\nWhen you publish an authoring bundle to your org, a number of things happen. First, this command validates that the Agent Script file successfully compiles. If there are compilation errors, the command exits and you must fix the Agent Script file to continue. Once the Agent Script file compiles, then it's published to the org, which in turn creates new associated metadata (Bot, BotVersion, GenAiX), or new versions of the metadata if the agent already exists. The new or updated metadata is retrieved back to your DX project; specify the --skip-retrieve flag to skip this step. Finally, the authoring bundle metadata (AiAuthoringBundle) is deployed to your org.\n\nThis command uses the API name of the authoring bundle.",
744
808
  "examples": [
745
- "Start an agent test called Resort_Manager_Test for an agent in your default org, don't wait for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test",
746
- "Start an agent test for an agent in an org with alias \"my-org\" and wait for 10 minutes for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --target-org my-org",
747
- "Start an agent test and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --output-dir ./test-results --result-format json"
809
+ "Publish an authoring bundle by being prompted for its API name; use your default org:\n<%= config.bin %> <%= command.id %>",
810
+ "Publish an authoring bundle with API name MyAuthoringBundle to the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name MyAuthoringbundle --target-org my-dev-org"
748
811
  ],
749
812
  "flags": {
750
813
  "json": {
@@ -782,84 +845,60 @@
782
845
  "api-name": {
783
846
  "char": "n",
784
847
  "name": "api-name",
785
- "summary": "API name of the agent test to run; corresponds to the name of the AiEvaluationDefinition metadata component that implements the agent test.",
786
- "hasDynamicHelp": false,
787
- "multiple": false,
788
- "type": "option"
789
- },
790
- "wait": {
791
- "char": "w",
792
- "name": "wait",
793
- "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
794
- "hasDynamicHelp": true,
795
- "multiple": false,
796
- "type": "option"
797
- },
798
- "result-format": {
799
- "name": "result-format",
800
- "summary": "Format of the agent test run results.",
801
- "default": "human",
802
- "hasDynamicHelp": false,
803
- "multiple": false,
804
- "options": [
805
- "json",
806
- "human",
807
- "junit",
808
- "tap"
809
- ],
810
- "type": "option"
811
- },
812
- "output-dir": {
813
- "char": "d",
814
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
815
- "name": "output-dir",
816
- "summary": "Directory to write the agent test results into.",
848
+ "summary": "API name of the authoring bundle you want to publish; if not specified, the command provides a list that you can choose from.",
817
849
  "hasDynamicHelp": false,
818
850
  "multiple": false,
819
851
  "type": "option"
820
852
  },
821
- "verbose": {
822
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
823
- "name": "verbose",
824
- "summary": "Show generated data in the test results output.",
853
+ "skip-retrieve": {
854
+ "name": "skip-retrieve",
855
+ "summary": "Don't retrieve the metadata associated with the agent to your DX project.",
825
856
  "allowNo": false,
826
857
  "type": "boolean"
827
858
  }
828
859
  },
829
860
  "hasDynamicHelp": true,
830
861
  "hiddenAliases": [],
831
- "id": "agent:test:run",
862
+ "id": "agent:publish:authoring-bundle",
832
863
  "pluginAlias": "@salesforce/plugin-agent",
833
864
  "pluginName": "@salesforce/plugin-agent",
834
865
  "pluginType": "core",
835
866
  "strict": true,
836
- "summary": "Start an agent test in your org.",
867
+ "summary": "Publish an authoring bundle to your org, which results in a new agent or a new version of an existing agent.",
837
868
  "enableJsonFlag": true,
869
+ "requiresProject": true,
870
+ "FLAGGABLE_PROMPTS": {
871
+ "api-name": {
872
+ "message": "API name of the authoring bundle you want to publish; if not specified, the command provides a list that you can choose from.",
873
+ "promptMessage": "API name of the authoring bundle to publish"
874
+ }
875
+ },
838
876
  "isESM": true,
839
877
  "relativePath": [
840
878
  "lib",
841
879
  "commands",
842
880
  "agent",
843
- "test",
844
- "run.js"
881
+ "publish",
882
+ "authoring-bundle.js"
845
883
  ],
846
884
  "aliasPermutations": [],
847
885
  "permutations": [
848
- "agent:test:run",
849
- "test:agent:run",
850
- "test:run:agent",
851
- "agent:run:test",
852
- "run:agent:test",
853
- "run:test:agent"
886
+ "agent:publish:authoring-bundle",
887
+ "publish:agent:authoring-bundle",
888
+ "publish:authoring-bundle:agent",
889
+ "agent:authoring-bundle:publish",
890
+ "authoring-bundle:agent:publish",
891
+ "authoring-bundle:publish:agent"
854
892
  ]
855
893
  },
856
- "agent:publish:authoring-bundle": {
894
+ "agent:test:create": {
857
895
  "aliases": [],
858
896
  "args": {},
859
- "description": "An authoring bundle is a metadata type (named aiAuthoringBundle) that provides the blueprint for an agent. The metadata type contains two files: the standard metatada XML file and an Agent Script file (extension \".agent\") that fully describes the agent using the Agent Script language.\n\nWhen you publish an authoring bundle to your org, a number of things happen. First, this command validates that the Agent Script file successfully compiles. If there are compilation errors, the command exits and you must fix the Agent Script file to continue. Once the Agent Script file compiles, then it's published to the org, which in turn creates new associated metadata (Bot, BotVersion, GenAiX), or new versions of the metadata if the agent already exists. The new or updated metadata is retrieved back to your DX project; specify the --skip-retrieve flag to skip this step. Finally, the authoring bundle metadata (AiAuthoringBundle) is deployed to your org.\n\nThis command uses the API name of the authoring bundle.",
897
+ "description": "To run this command, you must have an agent test spec file, which is a YAML file that lists the test cases for testing a specific agent. Use the \"agent generate test-spec\" CLI command to generate a test spec file. Then specify the file to this command with the --spec flag, or run this command with no flags to be prompted.\n\nWhen this command completes, your org contains the new agent test, which you can view and edit using the Testing Center UI. This command also retrieves the metadata component (AiEvaluationDefinition) associated with the new test to your local Salesforce DX project and displays its filename.\n\nAfter you've created the test in the org, use the \"agent test run\" command to run it.",
860
898
  "examples": [
861
- "Publish an authoring bundle by being prompted for its API name; use your default org:\n<%= config.bin %> <%= command.id %>",
862
- "Publish an authoring bundle with API name MyAuthoringBundle to the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name MyAuthoringbundle --target-org my-dev-org"
899
+ "Create an agent test interactively and be prompted for the test spec and API name of the test in the org; use the default org:\n<%= config.bin %> <%= command.id %>",
900
+ "Create an agent test and use flags to specify all required information; if a test with same API name already exists in the org, overwrite it without confirmation. Use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --force-overwrite --target-org my-org",
901
+ "Preview what the agent test metadata (AiEvaluationDefinition) looks like without deploying it to your default org:\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --preview"
863
902
  ],
864
903
  "flags": {
865
904
  "json": {
@@ -877,6 +916,20 @@
877
916
  "multiple": false,
878
917
  "type": "option"
879
918
  },
919
+ "api-name": {
920
+ "name": "api-name",
921
+ "summary": "API name of the new test; the API name must not exist in the org.",
922
+ "hasDynamicHelp": false,
923
+ "multiple": false,
924
+ "type": "option"
925
+ },
926
+ "spec": {
927
+ "name": "spec",
928
+ "summary": "Path to the test spec YAML file.",
929
+ "hasDynamicHelp": false,
930
+ "multiple": false,
931
+ "type": "option"
932
+ },
880
933
  "target-org": {
881
934
  "char": "o",
882
935
  "name": "target-org",
@@ -894,65 +947,53 @@
894
947
  "multiple": false,
895
948
  "type": "option"
896
949
  },
897
- "api-name": {
898
- "char": "n",
899
- "name": "api-name",
900
- "summary": "API name of the authoring bundle you want to publish; if not specified, the command provides a list that you can choose from.",
901
- "hasDynamicHelp": false,
902
- "multiple": false,
903
- "type": "option"
950
+ "preview": {
951
+ "name": "preview",
952
+ "summary": "Preview the test metadata file (AiEvaluationDefinition) without deploying to your org.",
953
+ "allowNo": false,
954
+ "type": "boolean"
904
955
  },
905
- "skip-retrieve": {
906
- "name": "skip-retrieve",
907
- "summary": "Don't retrieve the metadata associated with the agent to your DX project.",
956
+ "force-overwrite": {
957
+ "name": "force-overwrite",
958
+ "summary": "Don't prompt for confirmation when overwriting an existing test (based on API name) in your org.",
908
959
  "allowNo": false,
909
960
  "type": "boolean"
910
961
  }
911
962
  },
912
963
  "hasDynamicHelp": true,
913
964
  "hiddenAliases": [],
914
- "id": "agent:publish:authoring-bundle",
965
+ "id": "agent:test:create",
915
966
  "pluginAlias": "@salesforce/plugin-agent",
916
967
  "pluginName": "@salesforce/plugin-agent",
917
968
  "pluginType": "core",
918
969
  "strict": true,
919
- "summary": "Publish an authoring bundle to your org, which results in a new agent or a new version of an existing agent.",
970
+ "summary": "Create an agent test in your org using a local test spec YAML file.",
920
971
  "enableJsonFlag": true,
921
- "requiresProject": true,
922
- "FLAGGABLE_PROMPTS": {
923
- "api-name": {
924
- "message": "API name of the authoring bundle you want to publish; if not specified, the command provides a list that you can choose from.",
925
- "promptMessage": "API name of the authoring bundle to publish"
926
- }
927
- },
928
972
  "isESM": true,
929
973
  "relativePath": [
930
974
  "lib",
931
975
  "commands",
932
976
  "agent",
933
- "publish",
934
- "authoring-bundle.js"
977
+ "test",
978
+ "create.js"
935
979
  ],
936
980
  "aliasPermutations": [],
937
981
  "permutations": [
938
- "agent:publish:authoring-bundle",
939
- "publish:agent:authoring-bundle",
940
- "publish:authoring-bundle:agent",
941
- "agent:authoring-bundle:publish",
942
- "authoring-bundle:agent:publish",
943
- "authoring-bundle:publish:agent"
982
+ "agent:test:create",
983
+ "test:agent:create",
984
+ "test:create:agent",
985
+ "agent:create:test",
986
+ "create:agent:test",
987
+ "create:test:agent"
944
988
  ]
945
989
  },
946
- "agent:generate:agent-spec": {
990
+ "agent:test:list": {
947
991
  "aliases": [],
948
992
  "args": {},
949
- "description": "An agent spec is a YAML-formatted file that contains basic information about the agent, such as its role, company description, and an AI-generated list of topics based on this information. Topics define the range of jobs your agent can handle.\n\nUse flags, such as --role and --company-description, to provide details about your company and the role that the agent plays in your company. If you prefer, you can also be prompted for the basic information; use --full-interview to be prompted for all required and optional properties. Upon command execution, the large language model (LLM) associated with your org uses the provided information to generate a list of topics for the agent. Because the LLM uses the company and role information to generate the topics, we recommend that you provide accurate, complete, and specific details so the LLM generates the best and most relevant topics. Once generated, you can edit the spec file; for example, you can remove topics that don't apply or change a topic's description.\n\nYou can also iterate the spec generation process by using the --spec flag to pass an existing agent spec file to this command, and then using the --role, --company-description, etc, flags to refine your agent properties. Iteratively improving the description of your agent allows the LLM to generate progressively better topics.\n\nYou can also specify other agent properties, such as a custom prompt template, how to ground the prompt template to add context to the agent's prompts, the tone of the prompts, and the username of a user in the org to assign to the agent.\n\nWhen your agent spec is ready, generate an authoring bundle from it by passing the spec file to the --spec flag of the \"agent generate authoring-bundle\" CLI command. An authoring bundle is a metadata type that contains an Agent Script file, which is the blueprint for an agent. (While not recommended, you can also use the agent spec file to immediately create an agent with the \"agent create\" command. We don't recommend this workflow because these types of agents don't use Agent Script, and are thus less flexible and more difficult to maintain.)",
993
+ "description": "The command outputs a table with the name (API name) of each test along with its unique ID and the date it was created in the org.",
950
994
  "examples": [
951
- "Generate an agent spec in the default location and use flags to specify the agent properties, such as its role and your company details; use your default org:\n<%= config.bin %> <%= command.id %> --type customer --role \"Field customer complaints and manage employee schedules.\" --company-name \"Coral Cloud Resorts\" --company-description \"Provide customers with exceptional destination activities, unforgettable experiences, and reservation services.\"",
952
- "Generate an agent spec by being prompted for the required agent properties and generate a maxiumum of 5 topics; write the generated file to the \"specs/resortManagerSpec.yaml\" file and use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --max-topics 5 --output-file specs/resortManagerAgent.yaml --target-org my-org",
953
- "Be prompted for all required and optional agent properties; use your default org:\n<%= config.bin %> <%= command.id %> --full-interview",
954
- "Specify an existing agent spec file called \"specs/resortManagerAgent.yaml\", and then overwrite it with a new version that contains newly AI-generated topics based on the updated role information passed in with the --role flag:\n<%= config.bin %> <%= command.id %> --spec specs/resortManagerAgent.yaml --output-file specs/resortManagerAgent.yaml --role \"Field customer complaints, manage employee schedules, and ensure all resort operations are running smoothly\"",
955
- "Specify that the conversational tone of the agent is formal and to attach the \"resortmanager@myorg.com\" username to it; be prompted for the required properties and use your default org:\n<%= config.bin %> <%= command.id %> --tone formal --agent-user resortmanager@myorg.com"
995
+ "List the agent tests in your default org:\n<%= config.bin %> <%= command.id %>",
996
+ "List the agent tests in an org with alias \"my-org\"\"\n<%= config.bin %> <%= command.id %> --target-org my-org"
956
997
  ],
957
998
  "flags": {
958
999
  "json": {
@@ -986,164 +1027,43 @@
986
1027
  "hasDynamicHelp": false,
987
1028
  "multiple": false,
988
1029
  "type": "option"
989
- },
990
- "type": {
991
- "name": "type",
992
- "summary": "Type of agent to create. Internal types are copilots used internally by your company and customer types are the agents you create for your customers.",
993
- "hasDynamicHelp": false,
994
- "multiple": false,
995
- "options": [
996
- "customer",
997
- "internal"
998
- ],
999
- "type": "option"
1000
- },
1001
- "role": {
1002
- "name": "role",
1003
- "summary": "Role of the agent.",
1004
- "hasDynamicHelp": false,
1005
- "multiple": false,
1006
- "type": "option"
1007
- },
1008
- "company-name": {
1009
- "name": "company-name",
1010
- "summary": "Name of your company.",
1011
- "hasDynamicHelp": false,
1012
- "multiple": false,
1013
- "type": "option"
1014
- },
1015
- "company-description": {
1016
- "name": "company-description",
1017
- "summary": "Description of your company.",
1018
- "hasDynamicHelp": false,
1019
- "multiple": false,
1020
- "type": "option"
1021
- },
1022
- "company-website": {
1023
- "name": "company-website",
1024
- "summary": "Website URL of your company.",
1025
- "hasDynamicHelp": false,
1026
- "multiple": false,
1027
- "type": "option"
1028
- },
1029
- "max-topics": {
1030
- "name": "max-topics",
1031
- "summary": "Maximum number of topics to generate in the agent spec; default is 5.",
1032
- "hasDynamicHelp": false,
1033
- "multiple": false,
1034
- "type": "option"
1035
- },
1036
- "agent-user": {
1037
- "name": "agent-user",
1038
- "summary": "Username of a user in your org to assign to your agent; determines what your agent can access and do.",
1039
- "hasDynamicHelp": false,
1040
- "multiple": false,
1041
- "type": "option"
1042
- },
1043
- "enrich-logs": {
1044
- "name": "enrich-logs",
1045
- "summary": "Adds agent conversation data to event logs so you can view all agent session activity in one place.",
1046
- "hasDynamicHelp": false,
1047
- "multiple": false,
1048
- "options": [
1049
- "true",
1050
- "false"
1051
- ],
1052
- "type": "option"
1053
- },
1054
- "tone": {
1055
- "name": "tone",
1056
- "summary": "Conversational style of the agent, such as how it expresses your brand personality in its messages through word choice, punctuation, and sentence structure.",
1057
- "hasDynamicHelp": false,
1058
- "multiple": false,
1059
- "options": [
1060
- "formal",
1061
- "casual",
1062
- "neutral"
1063
- ],
1064
- "type": "option"
1065
- },
1066
- "spec": {
1067
- "name": "spec",
1068
- "summary": "Agent spec file, in YAML format, to use as input to the command.",
1069
- "hasDynamicHelp": false,
1070
- "multiple": false,
1071
- "type": "option"
1072
- },
1073
- "output-file": {
1074
- "name": "output-file",
1075
- "summary": "Path for the generated YAML agent spec file; can be an absolute or relative path.",
1076
- "default": "specs/agentSpec.yaml",
1077
- "hasDynamicHelp": false,
1078
- "multiple": false,
1079
- "type": "option"
1080
- },
1081
- "full-interview": {
1082
- "name": "full-interview",
1083
- "summary": "Prompt for both required and optional flags.",
1084
- "allowNo": false,
1085
- "type": "boolean"
1086
- },
1087
- "prompt-template": {
1088
- "name": "prompt-template",
1089
- "summary": "API name of a customized prompt template to use instead of the default prompt template.",
1090
- "hasDynamicHelp": false,
1091
- "multiple": false,
1092
- "type": "option"
1093
- },
1094
- "grounding-context": {
1095
- "dependsOn": [
1096
- "prompt-template"
1097
- ],
1098
- "name": "grounding-context",
1099
- "summary": "Context information and personalization that's added to your prompts when using a custom prompt template.",
1100
- "hasDynamicHelp": false,
1101
- "multiple": false,
1102
- "type": "option"
1103
- },
1104
- "force-overwrite": {
1105
- "name": "force-overwrite",
1106
- "summary": "Don't prompt the user to confirm that an existing spec file will be overwritten.",
1107
- "allowNo": false,
1108
- "type": "boolean"
1109
1030
  }
1110
1031
  },
1111
1032
  "hasDynamicHelp": true,
1112
1033
  "hiddenAliases": [],
1113
- "id": "agent:generate:agent-spec",
1034
+ "id": "agent:test:list",
1114
1035
  "pluginAlias": "@salesforce/plugin-agent",
1115
1036
  "pluginName": "@salesforce/plugin-agent",
1116
1037
  "pluginType": "core",
1117
1038
  "strict": true,
1118
- "summary": "Generate an agent spec, which is a YAML file that captures what an agent can do.",
1039
+ "summary": "List the available agent tests in your org.",
1119
1040
  "enableJsonFlag": true,
1120
- "requiresProject": true,
1121
1041
  "isESM": true,
1122
1042
  "relativePath": [
1123
1043
  "lib",
1124
1044
  "commands",
1125
1045
  "agent",
1126
- "generate",
1127
- "agent-spec.js"
1046
+ "test",
1047
+ "list.js"
1128
1048
  ],
1129
1049
  "aliasPermutations": [],
1130
1050
  "permutations": [
1131
- "agent:generate:agent-spec",
1132
- "generate:agent:agent-spec",
1133
- "generate:agent-spec:agent",
1134
- "agent:agent-spec:generate",
1135
- "agent-spec:agent:generate",
1136
- "agent-spec:generate:agent"
1051
+ "agent:test:list",
1052
+ "test:agent:list",
1053
+ "test:list:agent",
1054
+ "agent:list:test",
1055
+ "list:agent:test",
1056
+ "list:test:agent"
1137
1057
  ]
1138
1058
  },
1139
- "agent:generate:authoring-bundle": {
1059
+ "agent:test:results": {
1140
1060
  "aliases": [],
1141
1061
  "args": {},
1142
- "description": "Authoring bundles are metadata components that contain an agent's Agent Script file. The Agent Script file is the agent's blueprint; it fully describes what the agent can do using the Agent Script language.\n\nUse this command to generate a new authoring bundle based on an agent spec YAML file, which you create with the \"agent generate agent-spec\" command. The agent spec YAML file is a high-level description of the agent; it describes its essence rather than exactly what it can do.\n\nThe metadata type for authoring bundles is aiAuthoringBundle, which consist of a standard \"<bundle-api-name>.bundle-meta.xml\" metadata file and the Agent Script file (with extension \".agent\"). When you run this command, the new authoring bundle is generated in the force-app/main/default/aiAuthoringBundles/<bundle-api-name> directory. Use the --output-dir flag to generate them elsewhere.\n\nAfter you generate the initial authoring bundle, code the Agent Script file so your agent behaves exactly as you want. The Agent Script file generated by this command is just a first draft of your agent! Interactively test the agent by conversing with it using the \"agent preview\" command. Then publish the agent to your org with the \"agent publish authoring-bundle\" command.\n\nThis command requires an org because it uses it to access an LLM for generating the Agent Script file.",
1062
+ "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1143
1063
  "examples": [
1144
- "Generate an authoring bundle by being prompted for all required values, such as the agent spec YAML file, the bundle name, and the API name; use your default org:\n<%= config.bin %> <%= command.id %>",
1145
- "Generate an authoring bundle from the \"specs/agentSpec.yaml\" agent spec YAML file and give it the label \"My Authoring Bundle\"; use your default org:\n<%= config.bin %> <%= command.id %> --spec specs/agentSpec.yaml --name \"My Authoring Bundle\"",
1146
- "Similar to previous example, but generate the authoring bundle files in the \"other-package-dir/main/default\" package directory; use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --spec specs/agentSpec.yaml --name \"My Authoring Bundle\" --output-dir other-package-dir/main/default --target-org my-dev-org"
1064
+ "Get the results of an agent test run in your default org using its job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1065
+ "Get the results of the most recently run agent test in an org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --use-most-recent --target-org my-org",
1066
+ "Get the results of the most recently run agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1147
1067
  ],
1148
1068
  "flags": {
1149
1069
  "json": {
@@ -1171,95 +1091,88 @@
1171
1091
  "multiple": false,
1172
1092
  "type": "option"
1173
1093
  },
1174
- "api-name": {
1175
- "name": "api-name",
1176
- "summary": "API name of the new authoring bundle; if not specified, the API name is derived from the authoring bundle name (label); the API name can't exist in the org.",
1094
+ "api-version": {
1095
+ "description": "Override the api version used for api requests made by this command",
1096
+ "name": "api-version",
1177
1097
  "hasDynamicHelp": false,
1178
1098
  "multiple": false,
1179
1099
  "type": "option"
1180
1100
  },
1181
- "api-version": {
1182
- "description": "Override the api version used for api requests made by this command",
1183
- "name": "api-version",
1101
+ "job-id": {
1102
+ "char": "i",
1103
+ "name": "job-id",
1104
+ "required": true,
1105
+ "summary": "Job ID of the completed agent test run.",
1184
1106
  "hasDynamicHelp": false,
1185
1107
  "multiple": false,
1186
1108
  "type": "option"
1187
1109
  },
1188
- "spec": {
1189
- "char": "f",
1190
- "name": "spec",
1191
- "summary": "Path to the agent spec YAML file; if not specified, the command provides a list that you can choose from.",
1110
+ "result-format": {
1111
+ "name": "result-format",
1112
+ "summary": "Format of the agent test run results.",
1113
+ "default": "human",
1192
1114
  "hasDynamicHelp": false,
1193
1115
  "multiple": false,
1116
+ "options": [
1117
+ "json",
1118
+ "human",
1119
+ "junit",
1120
+ "tap"
1121
+ ],
1194
1122
  "type": "option"
1195
1123
  },
1196
1124
  "output-dir": {
1197
1125
  "char": "d",
1126
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1198
1127
  "name": "output-dir",
1199
- "summary": "Directory where the authoring bundle files are generated.",
1128
+ "summary": "Directory to write the agent test results into.",
1200
1129
  "hasDynamicHelp": false,
1201
1130
  "multiple": false,
1202
1131
  "type": "option"
1203
1132
  },
1204
- "name": {
1205
- "char": "n",
1206
- "name": "name",
1207
- "summary": "Name (label) of the authoring bundle; if not specified, you're prompted for the name.",
1208
- "hasDynamicHelp": false,
1209
- "multiple": false,
1210
- "type": "option"
1133
+ "verbose": {
1134
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1135
+ "name": "verbose",
1136
+ "summary": "Show generated data in the test results output.",
1137
+ "allowNo": false,
1138
+ "type": "boolean"
1211
1139
  }
1212
1140
  },
1213
1141
  "hasDynamicHelp": true,
1214
1142
  "hiddenAliases": [],
1215
- "id": "agent:generate:authoring-bundle",
1143
+ "id": "agent:test:results",
1216
1144
  "pluginAlias": "@salesforce/plugin-agent",
1217
1145
  "pluginName": "@salesforce/plugin-agent",
1218
1146
  "pluginType": "core",
1219
1147
  "strict": true,
1220
- "summary": "Generate an authoring bundle from an existing agent spec YAML file.",
1148
+ "summary": "Get the results of a completed agent test run.",
1221
1149
  "enableJsonFlag": true,
1222
- "requiresProject": true,
1223
- "FLAGGABLE_PROMPTS": {
1224
- "name": {
1225
- "message": "Name (label) of the authoring bundle; if not specified, you're prompted for the name.",
1226
- "promptMessage": "Name (label) of the authoring bundle",
1227
- "required": true
1228
- },
1229
- "api-name": {
1230
- "message": "API name of the new authoring bundle; if not specified, the API name is derived from the authoring bundle name (label); the API name can't exist in the org.",
1231
- "promptMessage": "API name of the new authoring bundle"
1232
- },
1233
- "spec": {
1234
- "message": "Path to the agent spec YAML file; if not specified, the command provides a list that you can choose from.",
1235
- "promptMessage": "Path to the agent spec YAML file",
1236
- "required": true
1237
- }
1238
- },
1239
1150
  "isESM": true,
1240
1151
  "relativePath": [
1241
1152
  "lib",
1242
1153
  "commands",
1243
1154
  "agent",
1244
- "generate",
1245
- "authoring-bundle.js"
1155
+ "test",
1156
+ "results.js"
1246
1157
  ],
1247
1158
  "aliasPermutations": [],
1248
1159
  "permutations": [
1249
- "agent:generate:authoring-bundle",
1250
- "generate:agent:authoring-bundle",
1251
- "generate:authoring-bundle:agent",
1252
- "agent:authoring-bundle:generate",
1253
- "authoring-bundle:agent:generate",
1254
- "authoring-bundle:generate:agent"
1160
+ "agent:test:results",
1161
+ "test:agent:results",
1162
+ "test:results:agent",
1163
+ "agent:results:test",
1164
+ "results:agent:test",
1165
+ "results:test:agent"
1255
1166
  ]
1256
1167
  },
1257
- "agent:generate:template": {
1168
+ "agent:test:resume": {
1258
1169
  "aliases": [],
1259
1170
  "args": {},
1260
- "description": "At a high-level, agents are defined by the Bot, BotVersion, and GenAiPlannerBundle metadata types. The GenAiPlannerBundle type in turn defines the agent's topics and actions. This command uses the metadata files for these three types, located in your local DX project, to generate a BotTemplate file for a specific agent (Bot). You then use the BotTemplate file, along with the GenAiPlannerBundle file that references the BotTemplate, to package the template in a managed package that you can share between orgs or on AppExchange.\n\nUse the --agent-file flag to specify the relative or full pathname of the Bot metadata file, such as force-app/main/default/bots/My_Awesome_Agent/My_Awesome_Agent.bot-meta.xml. A single Bot can have multiple BotVersions, so use the --agent-version flag to specify the version. The corresponding BotVersion file must exist locally. For example, if you specify \"--agent-version 4\", then the file force-app/main/default/bots/My_Awesome_Agent/v4.botVersion-meta.xml must exist.\n\nThe new BotTemplate file is generated in the \"botTemplates\" directory in your local package directory, and has the name <Agent_API_name>_v<Version>_Template.botTemplate-meta.xml, such as force-app/main/default/botTemplates/My_Awesome_Agent_v4_Template.botTemplate-meta.xml. The command displays the full pathname of the generated files when it completes.",
1171
+ "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nUse the --wait flag to specify the number of minutes for this command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, the CLI returns control of the terminal to you, and you must run \"agent test resume\" again.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1261
1172
  "examples": [
1262
- "Generate an agent template from a Bot metadata file in your DX project that corresponds to the My_Awesome_Agent agent; use version 1 of the agent.\n<%= config.bin %> <%= command.id %> --agent-file force-app/main/default/bots/My_Awesome_Agent/My_Awesome_Agent.bot-meta.xml --agent-version 1"
1173
+ "Resume an agent test in your default org using a job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1174
+ "Resume the most recently-run agent test in an org with alias \"my-org\" org; wait 10 minutes for the tests to finish:\n<%= config.bin %> <%= command.id %> --use-most-recent --wait 10 --target-org my-org",
1175
+ "Resume the most recent agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1263
1176
  ],
1264
1177
  "flags": {
1265
1178
  "json": {
@@ -1277,6 +1190,16 @@
1277
1190
  "multiple": false,
1278
1191
  "type": "option"
1279
1192
  },
1193
+ "target-org": {
1194
+ "char": "o",
1195
+ "name": "target-org",
1196
+ "noCacheDefault": true,
1197
+ "required": true,
1198
+ "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
1199
+ "hasDynamicHelp": true,
1200
+ "multiple": false,
1201
+ "type": "option"
1202
+ },
1280
1203
  "api-version": {
1281
1204
  "description": "Override the api version used for api requests made by this command",
1282
1205
  "name": "api-version",
@@ -1284,62 +1207,105 @@
1284
1207
  "multiple": false,
1285
1208
  "type": "option"
1286
1209
  },
1287
- "agent-version": {
1288
- "name": "agent-version",
1289
- "required": true,
1290
- "summary": "Version of the agent (BotVersion).",
1210
+ "job-id": {
1211
+ "char": "i",
1212
+ "name": "job-id",
1213
+ "summary": "Job ID of the original agent test run.",
1291
1214
  "hasDynamicHelp": false,
1292
1215
  "multiple": false,
1293
1216
  "type": "option"
1294
1217
  },
1295
- "agent-file": {
1296
- "char": "f",
1297
- "name": "agent-file",
1298
- "required": true,
1299
- "summary": "Path to an agent (Bot) metadata file.",
1218
+ "use-most-recent": {
1219
+ "char": "r",
1220
+ "name": "use-most-recent",
1221
+ "summary": "Use the job ID of the most recent agent test run.",
1222
+ "allowNo": false,
1223
+ "type": "boolean"
1224
+ },
1225
+ "wait": {
1226
+ "char": "w",
1227
+ "name": "wait",
1228
+ "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1229
+ "default": "5 minutes",
1230
+ "hasDynamicHelp": true,
1231
+ "multiple": false,
1232
+ "type": "option"
1233
+ },
1234
+ "result-format": {
1235
+ "name": "result-format",
1236
+ "summary": "Format of the agent test run results.",
1237
+ "default": "human",
1300
1238
  "hasDynamicHelp": false,
1301
1239
  "multiple": false,
1240
+ "options": [
1241
+ "json",
1242
+ "human",
1243
+ "junit",
1244
+ "tap"
1245
+ ],
1302
1246
  "type": "option"
1247
+ },
1248
+ "output-dir": {
1249
+ "char": "d",
1250
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1251
+ "name": "output-dir",
1252
+ "summary": "Directory to write the agent test results into.",
1253
+ "hasDynamicHelp": false,
1254
+ "multiple": false,
1255
+ "type": "option"
1256
+ },
1257
+ "verbose": {
1258
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1259
+ "name": "verbose",
1260
+ "summary": "Show generated data in the test results output.",
1261
+ "allowNo": false,
1262
+ "type": "boolean"
1303
1263
  }
1304
1264
  },
1305
- "hasDynamicHelp": false,
1265
+ "hasDynamicHelp": true,
1306
1266
  "hiddenAliases": [],
1307
- "id": "agent:generate:template",
1267
+ "id": "agent:test:resume",
1308
1268
  "pluginAlias": "@salesforce/plugin-agent",
1309
1269
  "pluginName": "@salesforce/plugin-agent",
1310
1270
  "pluginType": "core",
1311
1271
  "strict": true,
1312
- "summary": "Generate an agent template from an existing agent in your DX project so you can then package the template in a managed package.",
1272
+ "summary": "Resume an agent test that you previously started in your org so you can view the test results.",
1313
1273
  "enableJsonFlag": true,
1314
- "requiresProject": true,
1315
1274
  "isESM": true,
1316
1275
  "relativePath": [
1317
1276
  "lib",
1318
1277
  "commands",
1319
1278
  "agent",
1320
- "generate",
1321
- "template.js"
1279
+ "test",
1280
+ "resume.js"
1322
1281
  ],
1323
1282
  "aliasPermutations": [],
1324
1283
  "permutations": [
1325
- "agent:generate:template",
1326
- "generate:agent:template",
1327
- "generate:template:agent",
1328
- "agent:template:generate",
1329
- "template:agent:generate",
1330
- "template:generate:agent"
1284
+ "agent:test:resume",
1285
+ "test:agent:resume",
1286
+ "test:resume:agent",
1287
+ "agent:resume:test",
1288
+ "resume:agent:test",
1289
+ "resume:test:agent"
1331
1290
  ]
1332
1291
  },
1333
- "agent:generate:test-spec": {
1292
+ "agent:test:run": {
1334
1293
  "aliases": [],
1335
1294
  "args": {},
1336
- "description": "The first step when using Salesforce CLI to create an agent test in your org is to use this interactive command to generate a local YAML-formatted test spec file. The test spec YAML file contains information about the agent being tested, such as its API name, and then one or more test cases. This command uses the metadata components in your DX project when prompting for information, such as the agent API name; it doesn't look in your org.\n\nTo generate a specific agent test case, this command prompts you for this information; when possible, the command provides a list of options for you to choose from:\n\n- Utterance: Natural language statement, question, or command used to test the agent.\n- Expected topic: API name of the topic you expect the agent to use when responding to the utterance.\n- Expected actions: One or more API names of the expection actions the agent takes.\n- Expected outcome: Natural language description of the outcome you expect.\n- (Optional) Custom evaluation: Test an agent's response for specific strings or numbers.\n- (Optional) Conversation history: Boilerplate for additional context you can add to the test in the form of a conversation history.\n\nWhen your test spec is ready, you then run the \"agent test create\" command to actually create the test in your org and synchronize the metadata with your DX project. The metadata type for an agent test is AiEvaluationDefinition.\n\nIf you have an existing AiEvaluationDefinition metadata XML file in your DX project, you can generate its equivalent YAML test spec file with the --from-definition flag.",
1295
+ "description": "Use the --api-name flag to specify the name of the agent test you want to run. Use the output of the \"agent test list\" command to get the names of all the available agent tests in your org.\n\nBy default, this command starts the agent test in your org, but it doesn't wait for the test to finish. Instead, it displays the \"agent test resume\" command, with a job ID, that you execute to see the results of the test run, and then returns control of the terminal window to you. Use the --wait flag to specify the number of minutes for the command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, run \"agent test resume\".\n\nBy default, this command outputs test results in human-readable tables for each test case, if the test completes in time. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1337
1296
  "examples": [
1338
- "Generate an agent test spec YAML file interactively:\n<%= config.bin %> <%= command.id %>",
1339
- "Generate an agent test spec YAML file and specify a name for the new file; if the file exists, overwrite it without confirmation:\n<%= config.bin %> <%= command.id %> --output-file specs/Resort_Manager-new-version-testSpec.yaml --force-overwrite",
1340
- "Generate an agent test spec YAML file from an existing AiEvaluationDefinition metadata XML file in your DX project:\n<%= config.bin %> <%= command.id %> --from-definition force-app//main/default/aiEvaluationDefinitions/Resort_Manager_Tests.aiEvaluationDefinition-meta.xml"
1297
+ "Start an agent test called Resort_Manager_Test for an agent in your default org, don't wait for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test",
1298
+ "Start an agent test for an agent in an org with alias \"my-org\" and wait for 10 minutes for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --target-org my-org",
1299
+ "Start an agent test and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --output-dir ./test-results --result-format json"
1341
1300
  ],
1342
1301
  "flags": {
1302
+ "json": {
1303
+ "description": "Format output as json.",
1304
+ "helpGroup": "GLOBAL",
1305
+ "name": "json",
1306
+ "allowNo": false,
1307
+ "type": "boolean"
1308
+ },
1343
1309
  "flags-dir": {
1344
1310
  "helpGroup": "GLOBAL",
1345
1311
  "name": "flags-dir",
@@ -1348,54 +1314,95 @@
1348
1314
  "multiple": false,
1349
1315
  "type": "option"
1350
1316
  },
1351
- "from-definition": {
1352
- "char": "d",
1353
- "name": "from-definition",
1354
- "summary": "Filepath to the AIEvaluationDefinition metadata XML file in your DX project that you want to convert to a test spec YAML file.",
1317
+ "target-org": {
1318
+ "char": "o",
1319
+ "name": "target-org",
1320
+ "noCacheDefault": true,
1321
+ "required": true,
1322
+ "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
1323
+ "hasDynamicHelp": true,
1324
+ "multiple": false,
1325
+ "type": "option"
1326
+ },
1327
+ "api-version": {
1328
+ "description": "Override the api version used for api requests made by this command",
1329
+ "name": "api-version",
1355
1330
  "hasDynamicHelp": false,
1356
1331
  "multiple": false,
1357
1332
  "type": "option"
1358
1333
  },
1359
- "force-overwrite": {
1360
- "name": "force-overwrite",
1361
- "summary": "Don't prompt for confirmation when overwriting an existing test spec YAML file.",
1362
- "allowNo": false,
1363
- "type": "boolean"
1334
+ "api-name": {
1335
+ "char": "n",
1336
+ "name": "api-name",
1337
+ "summary": "API name of the agent test to run; corresponds to the name of the AiEvaluationDefinition metadata component that implements the agent test.",
1338
+ "hasDynamicHelp": false,
1339
+ "multiple": false,
1340
+ "type": "option"
1364
1341
  },
1365
- "output-file": {
1366
- "char": "f",
1367
- "name": "output-file",
1368
- "summary": "Name of the generated test spec YAML file. Default value is \"specs/<AGENT_API_NAME>-testSpec.yaml\".",
1342
+ "wait": {
1343
+ "char": "w",
1344
+ "name": "wait",
1345
+ "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1346
+ "hasDynamicHelp": true,
1347
+ "multiple": false,
1348
+ "type": "option"
1349
+ },
1350
+ "result-format": {
1351
+ "name": "result-format",
1352
+ "summary": "Format of the agent test run results.",
1353
+ "default": "human",
1354
+ "hasDynamicHelp": false,
1355
+ "multiple": false,
1356
+ "options": [
1357
+ "json",
1358
+ "human",
1359
+ "junit",
1360
+ "tap"
1361
+ ],
1362
+ "type": "option"
1363
+ },
1364
+ "output-dir": {
1365
+ "char": "d",
1366
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1367
+ "name": "output-dir",
1368
+ "summary": "Directory to write the agent test results into.",
1369
1369
  "hasDynamicHelp": false,
1370
1370
  "multiple": false,
1371
1371
  "type": "option"
1372
+ },
1373
+ "verbose": {
1374
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1375
+ "name": "verbose",
1376
+ "summary": "Show generated data in the test results output.",
1377
+ "allowNo": false,
1378
+ "type": "boolean"
1372
1379
  }
1373
1380
  },
1374
- "hasDynamicHelp": false,
1381
+ "hasDynamicHelp": true,
1375
1382
  "hiddenAliases": [],
1376
- "id": "agent:generate:test-spec",
1383
+ "id": "agent:test:run",
1377
1384
  "pluginAlias": "@salesforce/plugin-agent",
1378
1385
  "pluginName": "@salesforce/plugin-agent",
1379
1386
  "pluginType": "core",
1380
1387
  "strict": true,
1381
- "summary": "Generate an agent test spec, which is a YAML file that lists the test cases for testing a specific agent.",
1382
- "enableJsonFlag": false,
1388
+ "summary": "Start an agent test in your org.",
1389
+ "enableJsonFlag": true,
1383
1390
  "isESM": true,
1384
1391
  "relativePath": [
1385
1392
  "lib",
1386
1393
  "commands",
1387
1394
  "agent",
1388
- "generate",
1389
- "test-spec.js"
1395
+ "test",
1396
+ "run.js"
1390
1397
  ],
1391
1398
  "aliasPermutations": [],
1392
1399
  "permutations": [
1393
- "agent:generate:test-spec",
1394
- "generate:agent:test-spec",
1395
- "generate:test-spec:agent",
1396
- "agent:test-spec:generate",
1397
- "test-spec:agent:generate",
1398
- "test-spec:generate:agent"
1400
+ "agent:test:run",
1401
+ "test:agent:run",
1402
+ "test:run:agent",
1403
+ "agent:run:test",
1404
+ "run:agent:test",
1405
+ "run:test:agent"
1399
1406
  ]
1400
1407
  },
1401
1408
  "agent:validate:authoring-bundle": {
@@ -1483,5 +1490,5 @@
1483
1490
  ]
1484
1491
  }
1485
1492
  },
1486
- "version": "1.27.3"
1493
+ "version": "1.27.5"
1487
1494
  }