@salesforce/plugin-agent 1.28.0 → 1.29.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -623,22 +623,6 @@
623
623
  "summary": "Generate an authoring bundle from an existing agent spec YAML file.",
624
624
  "enableJsonFlag": true,
625
625
  "requiresProject": true,
626
- "FLAGGABLE_PROMPTS": {
627
- "name": {
628
- "message": "Name (label) of the authoring bundle; if not specified, you're prompted for the name.",
629
- "promptMessage": "Name (label) of the authoring bundle",
630
- "required": true
631
- },
632
- "api-name": {
633
- "message": "API name of the new authoring bundle; if not specified, the API name is derived from the authoring bundle name (label); the API name can't exist in the org.",
634
- "promptMessage": "API name of the new authoring bundle"
635
- },
636
- "spec": {
637
- "message": "Path to the agent spec YAML file. If you don't specify the flag, the command provides a list that you can choose from. Use the --no-spec flag to skip using an agent spec entirely.",
638
- "promptMessage": "Path to the agent spec YAML file",
639
- "required": true
640
- }
641
- },
642
626
  "isESM": true,
643
627
  "relativePath": [
644
628
  "lib",
@@ -891,14 +875,14 @@
891
875
  "authoring-bundle:publish:agent"
892
876
  ]
893
877
  },
894
- "agent:preview:end": {
878
+ "agent:test:create": {
895
879
  "aliases": [],
896
880
  "args": {},
897
- "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to end it. This command also displays the local directory where the session trace files are stored. \n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to end the session. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
881
+ "description": "To run this command, you must have an agent test spec file, which is a YAML file that lists the test cases for testing a specific agent. Use the \"agent generate test-spec\" CLI command to generate a test spec file. Then specify the file to this command with the --spec flag, or run this command with no flags to be prompted.\n\nWhen this command completes, your org contains the new agent test, which you can view and edit using the Testing Center UI. This command also retrieves the metadata component (AiEvaluationDefinition) associated with the new test to your local Salesforce DX project and displays its filename.\n\nAfter you've created the test in the org, use the \"agent test run\" command to run it.",
898
882
  "examples": [
899
- "End a preview session of a published agent by specifying its session ID and API name ; use the default org:\n<%= config.bin %> <%= command.id %> --session-id <SESSION_ID> --api-name My_Published_Agent",
900
- "Similar to previous example, but don't specify a session ID; you get an error if the published agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent --target-org my-dev-org",
901
- "End a preview session of an agent using its authoring bundle API name; you get an error if the agent has more than one active session.\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Local_Agent"
883
+ "Create an agent test interactively and be prompted for the test spec and API name of the test in the org; use the default org:\n<%= config.bin %> <%= command.id %>",
884
+ "Create an agent test and use flags to specify all required information; if a test with same API name already exists in the org, overwrite it without confirmation. Use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --force-overwrite --target-org my-org",
885
+ "Preview what the agent test metadata (AiEvaluationDefinition) looks like without deploying it to your default org:\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --preview"
902
886
  ],
903
887
  "flags": {
904
888
  "json": {
@@ -916,6 +900,20 @@
916
900
  "multiple": false,
917
901
  "type": "option"
918
902
  },
903
+ "api-name": {
904
+ "name": "api-name",
905
+ "summary": "API name of the new test; the API name must not exist in the org.",
906
+ "hasDynamicHelp": false,
907
+ "multiple": false,
908
+ "type": "option"
909
+ },
910
+ "spec": {
911
+ "name": "spec",
912
+ "summary": "Path to the test spec YAML file.",
913
+ "hasDynamicHelp": false,
914
+ "multiple": false,
915
+ "type": "option"
916
+ },
919
917
  "target-org": {
920
918
  "char": "o",
921
919
  "name": "target-org",
@@ -933,67 +931,53 @@
933
931
  "multiple": false,
934
932
  "type": "option"
935
933
  },
936
- "session-id": {
937
- "name": "session-id",
938
- "required": false,
939
- "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see the list of all sessions.",
940
- "hasDynamicHelp": false,
941
- "multiple": false,
942
- "type": "option"
943
- },
944
- "api-name": {
945
- "char": "n",
946
- "name": "api-name",
947
- "summary": "API name of the activated published agent you want to preview.",
948
- "hasDynamicHelp": false,
949
- "multiple": false,
950
- "type": "option"
934
+ "preview": {
935
+ "name": "preview",
936
+ "summary": "Preview the test metadata file (AiEvaluationDefinition) without deploying to your org.",
937
+ "allowNo": false,
938
+ "type": "boolean"
951
939
  },
952
- "authoring-bundle": {
953
- "name": "authoring-bundle",
954
- "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
955
- "hasDynamicHelp": false,
956
- "multiple": false,
957
- "type": "option"
940
+ "force-overwrite": {
941
+ "name": "force-overwrite",
942
+ "summary": "Don't prompt for confirmation when overwriting an existing test (based on API name) in your org.",
943
+ "allowNo": false,
944
+ "type": "boolean"
958
945
  }
959
946
  },
960
947
  "hasDynamicHelp": true,
961
948
  "hiddenAliases": [],
962
- "id": "agent:preview:end",
949
+ "id": "agent:test:create",
963
950
  "pluginAlias": "@salesforce/plugin-agent",
964
951
  "pluginName": "@salesforce/plugin-agent",
965
952
  "pluginType": "core",
966
- "state": "beta",
967
953
  "strict": true,
968
- "summary": "End an existing programmatic agent preview session and get trace location.",
954
+ "summary": "Create an agent test in your org using a local test spec YAML file.",
969
955
  "enableJsonFlag": true,
970
- "requiresProject": true,
971
956
  "isESM": true,
972
957
  "relativePath": [
973
958
  "lib",
974
959
  "commands",
975
960
  "agent",
976
- "preview",
977
- "end.js"
961
+ "test",
962
+ "create.js"
978
963
  ],
979
964
  "aliasPermutations": [],
980
965
  "permutations": [
981
- "agent:preview:end",
982
- "preview:agent:end",
983
- "preview:end:agent",
984
- "agent:end:preview",
985
- "end:agent:preview",
986
- "end:preview:agent"
966
+ "agent:test:create",
967
+ "test:agent:create",
968
+ "test:create:agent",
969
+ "agent:create:test",
970
+ "create:agent:test",
971
+ "create:test:agent"
987
972
  ]
988
973
  },
989
- "agent:preview:send": {
974
+ "agent:test:list": {
990
975
  "aliases": [],
991
976
  "args": {},
992
- "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to send the agent a message (utterance). This command then displays the agent's response.\n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to send a message. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
977
+ "description": "The command outputs a table with the name (API name) of each test along with its unique ID and the date it was created in the org.",
993
978
  "examples": [
994
- "Send a message to an activated published agent using its API name and session ID; use the default org:\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --session-id <SESSION_ID>",
995
- "Similar to previous example, but don't specify a session ID; you get an error if the agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --target-org my-dev-org",
996
- "Send a message to an agent using its authoring bundle API name; you get an error if the agent has more than one active session:\n<%= config.bin %> <%= command.id %> --utterance \"what can you help me with?\" --authoring-bundle My_Local_Agent"
979
+ "List the agent tests in your default org:\n<%= config.bin %> <%= command.id %>",
980
+ "List the agent tests in an org with alias \"my-org\"\"\n<%= config.bin %> <%= command.id %> --target-org my-org"
997
981
  ],
998
982
  "flags": {
999
983
  "json": {
@@ -1027,75 +1011,43 @@
1027
1011
  "hasDynamicHelp": false,
1028
1012
  "multiple": false,
1029
1013
  "type": "option"
1030
- },
1031
- "session-id": {
1032
- "name": "session-id",
1033
- "required": false,
1034
- "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see list of all sessions.",
1035
- "hasDynamicHelp": false,
1036
- "multiple": false,
1037
- "type": "option"
1038
- },
1039
- "utterance": {
1040
- "char": "u",
1041
- "name": "utterance",
1042
- "required": true,
1043
- "summary": "Utterance to send to the agent, enclosed in double quotes.",
1044
- "hasDynamicHelp": false,
1045
- "multiple": false,
1046
- "type": "option"
1047
- },
1048
- "api-name": {
1049
- "char": "n",
1050
- "name": "api-name",
1051
- "summary": "API name of the activated published agent you want to preview.",
1052
- "hasDynamicHelp": false,
1053
- "multiple": false,
1054
- "type": "option"
1055
- },
1056
- "authoring-bundle": {
1057
- "name": "authoring-bundle",
1058
- "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1059
- "hasDynamicHelp": false,
1060
- "multiple": false,
1061
- "type": "option"
1062
1014
  }
1063
1015
  },
1064
1016
  "hasDynamicHelp": true,
1065
1017
  "hiddenAliases": [],
1066
- "id": "agent:preview:send",
1018
+ "id": "agent:test:list",
1067
1019
  "pluginAlias": "@salesforce/plugin-agent",
1068
1020
  "pluginName": "@salesforce/plugin-agent",
1069
1021
  "pluginType": "core",
1070
- "state": "beta",
1071
1022
  "strict": true,
1072
- "summary": "Send a message to an existing agent preview session.",
1023
+ "summary": "List the available agent tests in your org.",
1073
1024
  "enableJsonFlag": true,
1074
- "requiresProject": true,
1075
1025
  "isESM": true,
1076
1026
  "relativePath": [
1077
1027
  "lib",
1078
1028
  "commands",
1079
1029
  "agent",
1080
- "preview",
1081
- "send.js"
1030
+ "test",
1031
+ "list.js"
1082
1032
  ],
1083
1033
  "aliasPermutations": [],
1084
1034
  "permutations": [
1085
- "agent:preview:send",
1086
- "preview:agent:send",
1087
- "preview:send:agent",
1088
- "agent:send:preview",
1089
- "send:agent:preview",
1090
- "send:preview:agent"
1035
+ "agent:test:list",
1036
+ "test:agent:list",
1037
+ "test:list:agent",
1038
+ "agent:list:test",
1039
+ "list:agent:test",
1040
+ "list:test:agent"
1091
1041
  ]
1092
1042
  },
1093
- "agent:preview:sessions": {
1043
+ "agent:test:results": {
1094
1044
  "aliases": [],
1095
1045
  "args": {},
1096
- "description": "This command lists the agent preview sessions that were started with the \"agent preview start\" command and are still in the local cache. Use this command to discover specific session IDs that you can pass to the \"agent preview send\" or \"agent preview end\" commands with the --session-id flag.\n\nProgrammatic agent preview sessions can be started for both published activated agents and by using an agent's local authoring bundle, which contains its Agent Script file. In this command's output table, the Agent column contains either the API name of the authoring bundle or the published agent, whichever was used when starting the session. In the table, if the same API name has multiple rows with different session IDs, then it means that you previously started multiple preview sessions with the associated agent.",
1046
+ "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1097
1047
  "examples": [
1098
- "List all cached agent preview sessions:\n<%= config.bin %> <%= command.id %>"
1048
+ "Get the results of an agent test run in your default org using its job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1049
+ "Get the results of the most recently run agent test in an org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --use-most-recent --target-org my-org",
1050
+ "Get the results of the most recently run agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1099
1051
  ],
1100
1052
  "flags": {
1101
1053
  "json": {
@@ -1112,45 +1064,99 @@
1112
1064
  "hasDynamicHelp": false,
1113
1065
  "multiple": false,
1114
1066
  "type": "option"
1067
+ },
1068
+ "target-org": {
1069
+ "char": "o",
1070
+ "name": "target-org",
1071
+ "noCacheDefault": true,
1072
+ "required": true,
1073
+ "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
1074
+ "hasDynamicHelp": true,
1075
+ "multiple": false,
1076
+ "type": "option"
1077
+ },
1078
+ "api-version": {
1079
+ "description": "Override the api version used for api requests made by this command",
1080
+ "name": "api-version",
1081
+ "hasDynamicHelp": false,
1082
+ "multiple": false,
1083
+ "type": "option"
1084
+ },
1085
+ "job-id": {
1086
+ "char": "i",
1087
+ "name": "job-id",
1088
+ "required": true,
1089
+ "summary": "Job ID of the completed agent test run.",
1090
+ "hasDynamicHelp": false,
1091
+ "multiple": false,
1092
+ "type": "option"
1093
+ },
1094
+ "result-format": {
1095
+ "name": "result-format",
1096
+ "summary": "Format of the agent test run results.",
1097
+ "default": "human",
1098
+ "hasDynamicHelp": false,
1099
+ "multiple": false,
1100
+ "options": [
1101
+ "json",
1102
+ "human",
1103
+ "junit",
1104
+ "tap"
1105
+ ],
1106
+ "type": "option"
1107
+ },
1108
+ "output-dir": {
1109
+ "char": "d",
1110
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1111
+ "name": "output-dir",
1112
+ "summary": "Directory to write the agent test results into.",
1113
+ "hasDynamicHelp": false,
1114
+ "multiple": false,
1115
+ "type": "option"
1116
+ },
1117
+ "verbose": {
1118
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1119
+ "name": "verbose",
1120
+ "summary": "Show generated data in the test results output.",
1121
+ "allowNo": false,
1122
+ "type": "boolean"
1115
1123
  }
1116
1124
  },
1117
- "hasDynamicHelp": false,
1125
+ "hasDynamicHelp": true,
1118
1126
  "hiddenAliases": [],
1119
- "id": "agent:preview:sessions",
1127
+ "id": "agent:test:results",
1120
1128
  "pluginAlias": "@salesforce/plugin-agent",
1121
1129
  "pluginName": "@salesforce/plugin-agent",
1122
1130
  "pluginType": "core",
1123
- "state": "beta",
1124
1131
  "strict": true,
1125
- "summary": "List all known programmatic agent preview sessions.",
1132
+ "summary": "Get the results of a completed agent test run.",
1126
1133
  "enableJsonFlag": true,
1127
- "requiresProject": true,
1128
1134
  "isESM": true,
1129
1135
  "relativePath": [
1130
1136
  "lib",
1131
1137
  "commands",
1132
1138
  "agent",
1133
- "preview",
1134
- "sessions.js"
1139
+ "test",
1140
+ "results.js"
1135
1141
  ],
1136
1142
  "aliasPermutations": [],
1137
1143
  "permutations": [
1138
- "agent:preview:sessions",
1139
- "preview:agent:sessions",
1140
- "preview:sessions:agent",
1141
- "agent:sessions:preview",
1142
- "sessions:agent:preview",
1143
- "sessions:preview:agent"
1144
+ "agent:test:results",
1145
+ "test:agent:results",
1146
+ "test:results:agent",
1147
+ "agent:results:test",
1148
+ "results:agent:test",
1149
+ "results:test:agent"
1144
1150
  ]
1145
1151
  },
1146
- "agent:preview:start": {
1152
+ "agent:test:resume": {
1147
1153
  "aliases": [],
1148
1154
  "args": {},
1149
- "description": "This command outputs a session ID that you then use with the \"agent preview send\" command to send an utterance to the agent. Use the \"agent preview sessions\" command to list all active sessions and the \"agent preview end\" command to end a specific session.\n\nIdentify the agent you want to start previewing with either the --authoring-bundle flag to specify a local authoring bundle's API name or --api-name to specify an activated published agent's API name. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory. \n\nWhen starting a preview session using the authoring bundle, which contains the agent's Agent Script file, the preview uses mocked actions by default. Specify --use-live-actions for live mode, which uses the real Apex classes, flows, etc, in the org for the actions.",
1155
+ "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nUse the --wait flag to specify the number of minutes for this command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, the CLI returns control of the terminal to you, and you must run \"agent test resume\" again.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1150
1156
  "examples": [
1151
- "Start a programmatic agent preview session by specifying an authoring bundle; uses mocked actions by default. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --target-org my-dev-org",
1152
- "Similar to previous example but use live actions and the default org:\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --use-live-actions",
1153
- "Start a preview session with an activated published agent:\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent"
1157
+ "Resume an agent test in your default org using a job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1158
+ "Resume the most recently-run agent test in an org with alias \"my-org\" org; wait 10 minutes for the tests to finish:\n<%= config.bin %> <%= command.id %> --use-most-recent --wait 10 --target-org my-org",
1159
+ "Resume the most recent agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1154
1160
  ],
1155
1161
  "flags": {
1156
1162
  "json": {
@@ -1185,65 +1191,96 @@
1185
1191
  "multiple": false,
1186
1192
  "type": "option"
1187
1193
  },
1188
- "api-name": {
1189
- "char": "n",
1190
- "name": "api-name",
1191
- "summary": "API name of the activated published agent you want to preview.",
1194
+ "job-id": {
1195
+ "char": "i",
1196
+ "name": "job-id",
1197
+ "summary": "Job ID of the original agent test run.",
1192
1198
  "hasDynamicHelp": false,
1193
1199
  "multiple": false,
1194
1200
  "type": "option"
1195
1201
  },
1196
- "authoring-bundle": {
1197
- "name": "authoring-bundle",
1198
- "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1202
+ "use-most-recent": {
1203
+ "char": "r",
1204
+ "name": "use-most-recent",
1205
+ "summary": "Use the job ID of the most recent agent test run.",
1206
+ "allowNo": false,
1207
+ "type": "boolean"
1208
+ },
1209
+ "wait": {
1210
+ "char": "w",
1211
+ "name": "wait",
1212
+ "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1213
+ "default": "5 minutes",
1214
+ "hasDynamicHelp": true,
1215
+ "multiple": false,
1216
+ "type": "option"
1217
+ },
1218
+ "result-format": {
1219
+ "name": "result-format",
1220
+ "summary": "Format of the agent test run results.",
1221
+ "default": "human",
1199
1222
  "hasDynamicHelp": false,
1200
1223
  "multiple": false,
1224
+ "options": [
1225
+ "json",
1226
+ "human",
1227
+ "junit",
1228
+ "tap"
1229
+ ],
1201
1230
  "type": "option"
1202
1231
  },
1203
- "use-live-actions": {
1204
- "name": "use-live-actions",
1205
- "summary": "Use real actions in the org; if not specified, preview uses AI to simulate (mock) actions.",
1232
+ "output-dir": {
1233
+ "char": "d",
1234
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1235
+ "name": "output-dir",
1236
+ "summary": "Directory to write the agent test results into.",
1237
+ "hasDynamicHelp": false,
1238
+ "multiple": false,
1239
+ "type": "option"
1240
+ },
1241
+ "verbose": {
1242
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1243
+ "name": "verbose",
1244
+ "summary": "Show generated data in the test results output.",
1206
1245
  "allowNo": false,
1207
1246
  "type": "boolean"
1208
1247
  }
1209
1248
  },
1210
1249
  "hasDynamicHelp": true,
1211
1250
  "hiddenAliases": [],
1212
- "id": "agent:preview:start",
1251
+ "id": "agent:test:resume",
1213
1252
  "pluginAlias": "@salesforce/plugin-agent",
1214
1253
  "pluginName": "@salesforce/plugin-agent",
1215
1254
  "pluginType": "core",
1216
- "state": "beta",
1217
1255
  "strict": true,
1218
- "summary": "Start a programmatic agent preview session.",
1256
+ "summary": "Resume an agent test that you previously started in your org so you can view the test results.",
1219
1257
  "enableJsonFlag": true,
1220
- "requiresProject": true,
1221
1258
  "isESM": true,
1222
1259
  "relativePath": [
1223
1260
  "lib",
1224
1261
  "commands",
1225
1262
  "agent",
1226
- "preview",
1227
- "start.js"
1263
+ "test",
1264
+ "resume.js"
1228
1265
  ],
1229
1266
  "aliasPermutations": [],
1230
1267
  "permutations": [
1231
- "agent:preview:start",
1232
- "preview:agent:start",
1233
- "preview:start:agent",
1234
- "agent:start:preview",
1235
- "start:agent:preview",
1236
- "start:preview:agent"
1268
+ "agent:test:resume",
1269
+ "test:agent:resume",
1270
+ "test:resume:agent",
1271
+ "agent:resume:test",
1272
+ "resume:agent:test",
1273
+ "resume:test:agent"
1237
1274
  ]
1238
1275
  },
1239
- "agent:test:create": {
1276
+ "agent:test:run": {
1240
1277
  "aliases": [],
1241
1278
  "args": {},
1242
- "description": "To run this command, you must have an agent test spec file, which is a YAML file that lists the test cases for testing a specific agent. Use the \"agent generate test-spec\" CLI command to generate a test spec file. Then specify the file to this command with the --spec flag, or run this command with no flags to be prompted.\n\nWhen this command completes, your org contains the new agent test, which you can view and edit using the Testing Center UI. This command also retrieves the metadata component (AiEvaluationDefinition) associated with the new test to your local Salesforce DX project and displays its filename.\n\nAfter you've created the test in the org, use the \"agent test run\" command to run it.",
1279
+ "description": "Use the --api-name flag to specify the name of the agent test you want to run. Use the output of the \"agent test list\" command to get the names of all the available agent tests in your org.\n\nBy default, this command starts the agent test in your org, but it doesn't wait for the test to finish. Instead, it displays the \"agent test resume\" command, with a job ID, that you execute to see the results of the test run, and then returns control of the terminal window to you. Use the --wait flag to specify the number of minutes for the command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, run \"agent test resume\".\n\nBy default, this command outputs test results in human-readable tables for each test case, if the test completes in time. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1243
1280
  "examples": [
1244
- "Create an agent test interactively and be prompted for the test spec and API name of the test in the org; use the default org:\n<%= config.bin %> <%= command.id %>",
1245
- "Create an agent test and use flags to specify all required information; if a test with same API name already exists in the org, overwrite it without confirmation. Use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --force-overwrite --target-org my-org",
1246
- "Preview what the agent test metadata (AiEvaluationDefinition) looks like without deploying it to your default org:\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --preview"
1281
+ "Start an agent test called Resort_Manager_Test for an agent in your default org, don't wait for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test",
1282
+ "Start an agent test for an agent in an org with alias \"my-org\" and wait for 10 minutes for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --target-org my-org",
1283
+ "Start an agent test and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --output-dir ./test-results --result-format json"
1247
1284
  ],
1248
1285
  "flags": {
1249
1286
  "json": {
@@ -1261,20 +1298,6 @@
1261
1298
  "multiple": false,
1262
1299
  "type": "option"
1263
1300
  },
1264
- "api-name": {
1265
- "name": "api-name",
1266
- "summary": "API name of the new test; the API name must not exist in the org.",
1267
- "hasDynamicHelp": false,
1268
- "multiple": false,
1269
- "type": "option"
1270
- },
1271
- "spec": {
1272
- "name": "spec",
1273
- "summary": "Path to the test spec YAML file.",
1274
- "hasDynamicHelp": false,
1275
- "multiple": false,
1276
- "type": "option"
1277
- },
1278
1301
  "target-org": {
1279
1302
  "char": "o",
1280
1303
  "name": "target-org",
@@ -1292,27 +1315,61 @@
1292
1315
  "multiple": false,
1293
1316
  "type": "option"
1294
1317
  },
1295
- "preview": {
1296
- "name": "preview",
1297
- "summary": "Preview the test metadata file (AiEvaluationDefinition) without deploying to your org.",
1298
- "allowNo": false,
1299
- "type": "boolean"
1318
+ "api-name": {
1319
+ "char": "n",
1320
+ "name": "api-name",
1321
+ "summary": "API name of the agent test to run; corresponds to the name of the AiEvaluationDefinition metadata component that implements the agent test.",
1322
+ "hasDynamicHelp": false,
1323
+ "multiple": false,
1324
+ "type": "option"
1300
1325
  },
1301
- "force-overwrite": {
1302
- "name": "force-overwrite",
1303
- "summary": "Don't prompt for confirmation when overwriting an existing test (based on API name) in your org.",
1326
+ "wait": {
1327
+ "char": "w",
1328
+ "name": "wait",
1329
+ "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1330
+ "hasDynamicHelp": true,
1331
+ "multiple": false,
1332
+ "type": "option"
1333
+ },
1334
+ "result-format": {
1335
+ "name": "result-format",
1336
+ "summary": "Format of the agent test run results.",
1337
+ "default": "human",
1338
+ "hasDynamicHelp": false,
1339
+ "multiple": false,
1340
+ "options": [
1341
+ "json",
1342
+ "human",
1343
+ "junit",
1344
+ "tap"
1345
+ ],
1346
+ "type": "option"
1347
+ },
1348
+ "output-dir": {
1349
+ "char": "d",
1350
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1351
+ "name": "output-dir",
1352
+ "summary": "Directory to write the agent test results into.",
1353
+ "hasDynamicHelp": false,
1354
+ "multiple": false,
1355
+ "type": "option"
1356
+ },
1357
+ "verbose": {
1358
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1359
+ "name": "verbose",
1360
+ "summary": "Show generated data in the test results output.",
1304
1361
  "allowNo": false,
1305
1362
  "type": "boolean"
1306
1363
  }
1307
1364
  },
1308
1365
  "hasDynamicHelp": true,
1309
1366
  "hiddenAliases": [],
1310
- "id": "agent:test:create",
1367
+ "id": "agent:test:run",
1311
1368
  "pluginAlias": "@salesforce/plugin-agent",
1312
1369
  "pluginName": "@salesforce/plugin-agent",
1313
1370
  "pluginType": "core",
1314
1371
  "strict": true,
1315
- "summary": "Create an agent test in your org using a local test spec YAML file.",
1372
+ "summary": "Start an agent test in your org.",
1316
1373
  "enableJsonFlag": true,
1317
1374
  "isESM": true,
1318
1375
  "relativePath": [
@@ -1320,25 +1377,26 @@
1320
1377
  "commands",
1321
1378
  "agent",
1322
1379
  "test",
1323
- "create.js"
1380
+ "run.js"
1324
1381
  ],
1325
1382
  "aliasPermutations": [],
1326
1383
  "permutations": [
1327
- "agent:test:create",
1328
- "test:agent:create",
1329
- "test:create:agent",
1330
- "agent:create:test",
1331
- "create:agent:test",
1332
- "create:test:agent"
1384
+ "agent:test:run",
1385
+ "test:agent:run",
1386
+ "test:run:agent",
1387
+ "agent:run:test",
1388
+ "run:agent:test",
1389
+ "run:test:agent"
1333
1390
  ]
1334
1391
  },
1335
- "agent:test:list": {
1392
+ "agent:preview:end": {
1336
1393
  "aliases": [],
1337
1394
  "args": {},
1338
- "description": "The command outputs a table with the name (API name) of each test along with its unique ID and the date it was created in the org.",
1395
+ "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to end it. This command also displays the local directory where the session trace files are stored. \n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to end the session. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
1339
1396
  "examples": [
1340
- "List the agent tests in your default org:\n<%= config.bin %> <%= command.id %>",
1341
- "List the agent tests in an org with alias \"my-org\"\"\n<%= config.bin %> <%= command.id %> --target-org my-org"
1397
+ "End a preview session of a published agent by specifying its session ID and API name ; use the default org:\n<%= config.bin %> <%= command.id %> --session-id <SESSION_ID> --api-name My_Published_Agent",
1398
+ "Similar to previous example, but don't specify a session ID; you get an error if the published agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent --target-org my-dev-org",
1399
+ "End a preview session of an agent using its authoring bundle API name; you get an error if the agent has more than one active session.\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Local_Agent"
1342
1400
  ],
1343
1401
  "flags": {
1344
1402
  "json": {
@@ -1372,43 +1430,68 @@
1372
1430
  "hasDynamicHelp": false,
1373
1431
  "multiple": false,
1374
1432
  "type": "option"
1433
+ },
1434
+ "session-id": {
1435
+ "name": "session-id",
1436
+ "required": false,
1437
+ "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see the list of all sessions.",
1438
+ "hasDynamicHelp": false,
1439
+ "multiple": false,
1440
+ "type": "option"
1441
+ },
1442
+ "api-name": {
1443
+ "char": "n",
1444
+ "name": "api-name",
1445
+ "summary": "API name of the activated published agent you want to preview.",
1446
+ "hasDynamicHelp": false,
1447
+ "multiple": false,
1448
+ "type": "option"
1449
+ },
1450
+ "authoring-bundle": {
1451
+ "name": "authoring-bundle",
1452
+ "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1453
+ "hasDynamicHelp": false,
1454
+ "multiple": false,
1455
+ "type": "option"
1375
1456
  }
1376
1457
  },
1377
1458
  "hasDynamicHelp": true,
1378
1459
  "hiddenAliases": [],
1379
- "id": "agent:test:list",
1460
+ "id": "agent:preview:end",
1380
1461
  "pluginAlias": "@salesforce/plugin-agent",
1381
1462
  "pluginName": "@salesforce/plugin-agent",
1382
1463
  "pluginType": "core",
1464
+ "state": "beta",
1383
1465
  "strict": true,
1384
- "summary": "List the available agent tests in your org.",
1466
+ "summary": "End an existing programmatic agent preview session and get trace location.",
1385
1467
  "enableJsonFlag": true,
1468
+ "requiresProject": true,
1386
1469
  "isESM": true,
1387
1470
  "relativePath": [
1388
1471
  "lib",
1389
1472
  "commands",
1390
1473
  "agent",
1391
- "test",
1392
- "list.js"
1474
+ "preview",
1475
+ "end.js"
1393
1476
  ],
1394
1477
  "aliasPermutations": [],
1395
1478
  "permutations": [
1396
- "agent:test:list",
1397
- "test:agent:list",
1398
- "test:list:agent",
1399
- "agent:list:test",
1400
- "list:agent:test",
1401
- "list:test:agent"
1479
+ "agent:preview:end",
1480
+ "preview:agent:end",
1481
+ "preview:end:agent",
1482
+ "agent:end:preview",
1483
+ "end:agent:preview",
1484
+ "end:preview:agent"
1402
1485
  ]
1403
1486
  },
1404
- "agent:test:results": {
1487
+ "agent:preview:send": {
1405
1488
  "aliases": [],
1406
1489
  "args": {},
1407
- "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1490
+ "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to send the agent a message (utterance). This command then displays the agent's response.\n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to send a message. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
1408
1491
  "examples": [
1409
- "Get the results of an agent test run in your default org using its job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1410
- "Get the results of the most recently run agent test in an org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --use-most-recent --target-org my-org",
1411
- "Get the results of the most recently run agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1492
+ "Send a message to an activated published agent using its API name and session ID; use the default org:\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --session-id <SESSION_ID>",
1493
+ "Similar to previous example, but don't specify a session ID; you get an error if the agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --target-org my-dev-org",
1494
+ "Send a message to an agent using its authoring bundle API name; you get an error if the agent has more than one active session:\n<%= config.bin %> <%= command.id %> --utterance \"what can you help me with?\" --authoring-bundle My_Local_Agent"
1412
1495
  ],
1413
1496
  "flags": {
1414
1497
  "json": {
@@ -1443,81 +1526,74 @@
1443
1526
  "multiple": false,
1444
1527
  "type": "option"
1445
1528
  },
1446
- "job-id": {
1447
- "char": "i",
1448
- "name": "job-id",
1449
- "required": true,
1450
- "summary": "Job ID of the completed agent test run.",
1529
+ "session-id": {
1530
+ "name": "session-id",
1531
+ "required": false,
1532
+ "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see list of all sessions.",
1451
1533
  "hasDynamicHelp": false,
1452
1534
  "multiple": false,
1453
1535
  "type": "option"
1454
1536
  },
1455
- "result-format": {
1456
- "name": "result-format",
1457
- "summary": "Format of the agent test run results.",
1458
- "default": "human",
1537
+ "utterance": {
1538
+ "char": "u",
1539
+ "name": "utterance",
1540
+ "required": true,
1541
+ "summary": "Utterance to send to the agent, enclosed in double quotes.",
1459
1542
  "hasDynamicHelp": false,
1460
1543
  "multiple": false,
1461
- "options": [
1462
- "json",
1463
- "human",
1464
- "junit",
1465
- "tap"
1466
- ],
1467
1544
  "type": "option"
1468
1545
  },
1469
- "output-dir": {
1470
- "char": "d",
1471
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1472
- "name": "output-dir",
1473
- "summary": "Directory to write the agent test results into.",
1546
+ "api-name": {
1547
+ "char": "n",
1548
+ "name": "api-name",
1549
+ "summary": "API name of the activated published agent you want to preview.",
1474
1550
  "hasDynamicHelp": false,
1475
1551
  "multiple": false,
1476
1552
  "type": "option"
1477
1553
  },
1478
- "verbose": {
1479
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1480
- "name": "verbose",
1481
- "summary": "Show generated data in the test results output.",
1482
- "allowNo": false,
1483
- "type": "boolean"
1554
+ "authoring-bundle": {
1555
+ "name": "authoring-bundle",
1556
+ "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1557
+ "hasDynamicHelp": false,
1558
+ "multiple": false,
1559
+ "type": "option"
1484
1560
  }
1485
1561
  },
1486
1562
  "hasDynamicHelp": true,
1487
1563
  "hiddenAliases": [],
1488
- "id": "agent:test:results",
1564
+ "id": "agent:preview:send",
1489
1565
  "pluginAlias": "@salesforce/plugin-agent",
1490
1566
  "pluginName": "@salesforce/plugin-agent",
1491
1567
  "pluginType": "core",
1568
+ "state": "beta",
1492
1569
  "strict": true,
1493
- "summary": "Get the results of a completed agent test run.",
1570
+ "summary": "Send a message to an existing agent preview session.",
1494
1571
  "enableJsonFlag": true,
1572
+ "requiresProject": true,
1495
1573
  "isESM": true,
1496
1574
  "relativePath": [
1497
1575
  "lib",
1498
1576
  "commands",
1499
1577
  "agent",
1500
- "test",
1501
- "results.js"
1578
+ "preview",
1579
+ "send.js"
1502
1580
  ],
1503
1581
  "aliasPermutations": [],
1504
1582
  "permutations": [
1505
- "agent:test:results",
1506
- "test:agent:results",
1507
- "test:results:agent",
1508
- "agent:results:test",
1509
- "results:agent:test",
1510
- "results:test:agent"
1583
+ "agent:preview:send",
1584
+ "preview:agent:send",
1585
+ "preview:send:agent",
1586
+ "agent:send:preview",
1587
+ "send:agent:preview",
1588
+ "send:preview:agent"
1511
1589
  ]
1512
1590
  },
1513
- "agent:test:resume": {
1591
+ "agent:preview:sessions": {
1514
1592
  "aliases": [],
1515
1593
  "args": {},
1516
- "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nUse the --wait flag to specify the number of minutes for this command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, the CLI returns control of the terminal to you, and you must run \"agent test resume\" again.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1594
+ "description": "This command lists the agent preview sessions that were started with the \"agent preview start\" command and are still in the local cache. Use this command to discover specific session IDs that you can pass to the \"agent preview send\" or \"agent preview end\" commands with the --session-id flag.\n\nProgrammatic agent preview sessions can be started for both published activated agents and by using an agent's local authoring bundle, which contains its Agent Script file. In this command's output table, the Agent column contains either the API name of the authoring bundle or the published agent, whichever was used when starting the session. In the table, if the same API name has multiple rows with different session IDs, then it means that you previously started multiple preview sessions with the associated agent.",
1517
1595
  "examples": [
1518
- "Resume an agent test in your default org using a job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1519
- "Resume the most recently-run agent test in an org with alias \"my-org\" org; wait 10 minutes for the tests to finish:\n<%= config.bin %> <%= command.id %> --use-most-recent --wait 10 --target-org my-org",
1520
- "Resume the most recent agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1596
+ "List all cached agent preview sessions:\n<%= config.bin %> <%= command.id %>"
1521
1597
  ],
1522
1598
  "flags": {
1523
1599
  "json": {
@@ -1534,114 +1610,45 @@
1534
1610
  "hasDynamicHelp": false,
1535
1611
  "multiple": false,
1536
1612
  "type": "option"
1537
- },
1538
- "target-org": {
1539
- "char": "o",
1540
- "name": "target-org",
1541
- "noCacheDefault": true,
1542
- "required": true,
1543
- "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
1544
- "hasDynamicHelp": true,
1545
- "multiple": false,
1546
- "type": "option"
1547
- },
1548
- "api-version": {
1549
- "description": "Override the api version used for api requests made by this command",
1550
- "name": "api-version",
1551
- "hasDynamicHelp": false,
1552
- "multiple": false,
1553
- "type": "option"
1554
- },
1555
- "job-id": {
1556
- "char": "i",
1557
- "name": "job-id",
1558
- "summary": "Job ID of the original agent test run.",
1559
- "hasDynamicHelp": false,
1560
- "multiple": false,
1561
- "type": "option"
1562
- },
1563
- "use-most-recent": {
1564
- "char": "r",
1565
- "name": "use-most-recent",
1566
- "summary": "Use the job ID of the most recent agent test run.",
1567
- "allowNo": false,
1568
- "type": "boolean"
1569
- },
1570
- "wait": {
1571
- "char": "w",
1572
- "name": "wait",
1573
- "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1574
- "default": "5 minutes",
1575
- "hasDynamicHelp": true,
1576
- "multiple": false,
1577
- "type": "option"
1578
- },
1579
- "result-format": {
1580
- "name": "result-format",
1581
- "summary": "Format of the agent test run results.",
1582
- "default": "human",
1583
- "hasDynamicHelp": false,
1584
- "multiple": false,
1585
- "options": [
1586
- "json",
1587
- "human",
1588
- "junit",
1589
- "tap"
1590
- ],
1591
- "type": "option"
1592
- },
1593
- "output-dir": {
1594
- "char": "d",
1595
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1596
- "name": "output-dir",
1597
- "summary": "Directory to write the agent test results into.",
1598
- "hasDynamicHelp": false,
1599
- "multiple": false,
1600
- "type": "option"
1601
- },
1602
- "verbose": {
1603
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1604
- "name": "verbose",
1605
- "summary": "Show generated data in the test results output.",
1606
- "allowNo": false,
1607
- "type": "boolean"
1608
1613
  }
1609
1614
  },
1610
- "hasDynamicHelp": true,
1615
+ "hasDynamicHelp": false,
1611
1616
  "hiddenAliases": [],
1612
- "id": "agent:test:resume",
1617
+ "id": "agent:preview:sessions",
1613
1618
  "pluginAlias": "@salesforce/plugin-agent",
1614
1619
  "pluginName": "@salesforce/plugin-agent",
1615
1620
  "pluginType": "core",
1621
+ "state": "beta",
1616
1622
  "strict": true,
1617
- "summary": "Resume an agent test that you previously started in your org so you can view the test results.",
1623
+ "summary": "List all known programmatic agent preview sessions.",
1618
1624
  "enableJsonFlag": true,
1625
+ "requiresProject": true,
1619
1626
  "isESM": true,
1620
1627
  "relativePath": [
1621
1628
  "lib",
1622
1629
  "commands",
1623
1630
  "agent",
1624
- "test",
1625
- "resume.js"
1631
+ "preview",
1632
+ "sessions.js"
1626
1633
  ],
1627
1634
  "aliasPermutations": [],
1628
1635
  "permutations": [
1629
- "agent:test:resume",
1630
- "test:agent:resume",
1631
- "test:resume:agent",
1632
- "agent:resume:test",
1633
- "resume:agent:test",
1634
- "resume:test:agent"
1636
+ "agent:preview:sessions",
1637
+ "preview:agent:sessions",
1638
+ "preview:sessions:agent",
1639
+ "agent:sessions:preview",
1640
+ "sessions:agent:preview",
1641
+ "sessions:preview:agent"
1635
1642
  ]
1636
1643
  },
1637
- "agent:test:run": {
1644
+ "agent:preview:start": {
1638
1645
  "aliases": [],
1639
1646
  "args": {},
1640
- "description": "Use the --api-name flag to specify the name of the agent test you want to run. Use the output of the \"agent test list\" command to get the names of all the available agent tests in your org.\n\nBy default, this command starts the agent test in your org, but it doesn't wait for the test to finish. Instead, it displays the \"agent test resume\" command, with a job ID, that you execute to see the results of the test run, and then returns control of the terminal window to you. Use the --wait flag to specify the number of minutes for the command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, run \"agent test resume\".\n\nBy default, this command outputs test results in human-readable tables for each test case, if the test completes in time. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1647
+ "description": "This command outputs a session ID that you then use with the \"agent preview send\" command to send an utterance to the agent. Use the \"agent preview sessions\" command to list all active sessions and the \"agent preview end\" command to end a specific session.\n\nIdentify the agent you want to start previewing with either the --authoring-bundle flag to specify a local authoring bundle's API name or --api-name to specify an activated published agent's API name. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory. \n\nWhen starting a preview session using the authoring bundle, which contains the agent's Agent Script file, the preview uses mocked actions by default. Specify --use-live-actions for live mode, which uses the real Apex classes, flows, etc, in the org for the actions.",
1641
1648
  "examples": [
1642
- "Start an agent test called Resort_Manager_Test for an agent in your default org, don't wait for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test",
1643
- "Start an agent test for an agent in an org with alias \"my-org\" and wait for 10 minutes for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --target-org my-org",
1644
- "Start an agent test and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --output-dir ./test-results --result-format json"
1649
+ "Start a programmatic agent preview session by specifying an authoring bundle; uses mocked actions by default. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --target-org my-dev-org",
1650
+ "Similar to previous example but use live actions and the default org:\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --use-live-actions",
1651
+ "Start a preview session with an activated published agent:\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent"
1645
1652
  ],
1646
1653
  "flags": {
1647
1654
  "json": {
@@ -1679,75 +1686,52 @@
1679
1686
  "api-name": {
1680
1687
  "char": "n",
1681
1688
  "name": "api-name",
1682
- "summary": "API name of the agent test to run; corresponds to the name of the AiEvaluationDefinition metadata component that implements the agent test.",
1683
- "hasDynamicHelp": false,
1684
- "multiple": false,
1685
- "type": "option"
1686
- },
1687
- "wait": {
1688
- "char": "w",
1689
- "name": "wait",
1690
- "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1691
- "hasDynamicHelp": true,
1692
- "multiple": false,
1693
- "type": "option"
1694
- },
1695
- "result-format": {
1696
- "name": "result-format",
1697
- "summary": "Format of the agent test run results.",
1698
- "default": "human",
1689
+ "summary": "API name of the activated published agent you want to preview.",
1699
1690
  "hasDynamicHelp": false,
1700
1691
  "multiple": false,
1701
- "options": [
1702
- "json",
1703
- "human",
1704
- "junit",
1705
- "tap"
1706
- ],
1707
1692
  "type": "option"
1708
1693
  },
1709
- "output-dir": {
1710
- "char": "d",
1711
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1712
- "name": "output-dir",
1713
- "summary": "Directory to write the agent test results into.",
1694
+ "authoring-bundle": {
1695
+ "name": "authoring-bundle",
1696
+ "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1714
1697
  "hasDynamicHelp": false,
1715
1698
  "multiple": false,
1716
1699
  "type": "option"
1717
1700
  },
1718
- "verbose": {
1719
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1720
- "name": "verbose",
1721
- "summary": "Show generated data in the test results output.",
1701
+ "use-live-actions": {
1702
+ "name": "use-live-actions",
1703
+ "summary": "Use real actions in the org; if not specified, preview uses AI to simulate (mock) actions.",
1722
1704
  "allowNo": false,
1723
1705
  "type": "boolean"
1724
1706
  }
1725
1707
  },
1726
1708
  "hasDynamicHelp": true,
1727
1709
  "hiddenAliases": [],
1728
- "id": "agent:test:run",
1710
+ "id": "agent:preview:start",
1729
1711
  "pluginAlias": "@salesforce/plugin-agent",
1730
1712
  "pluginName": "@salesforce/plugin-agent",
1731
1713
  "pluginType": "core",
1714
+ "state": "beta",
1732
1715
  "strict": true,
1733
- "summary": "Start an agent test in your org.",
1716
+ "summary": "Start a programmatic agent preview session.",
1734
1717
  "enableJsonFlag": true,
1718
+ "requiresProject": true,
1735
1719
  "isESM": true,
1736
1720
  "relativePath": [
1737
1721
  "lib",
1738
1722
  "commands",
1739
1723
  "agent",
1740
- "test",
1741
- "run.js"
1724
+ "preview",
1725
+ "start.js"
1742
1726
  ],
1743
1727
  "aliasPermutations": [],
1744
1728
  "permutations": [
1745
- "agent:test:run",
1746
- "test:agent:run",
1747
- "test:run:agent",
1748
- "agent:run:test",
1749
- "run:agent:test",
1750
- "run:test:agent"
1729
+ "agent:preview:start",
1730
+ "preview:agent:start",
1731
+ "preview:start:agent",
1732
+ "agent:start:preview",
1733
+ "start:agent:preview",
1734
+ "start:preview:agent"
1751
1735
  ]
1752
1736
  },
1753
1737
  "agent:validate:authoring-bundle": {
@@ -1835,5 +1819,5 @@
1835
1819
  ]
1836
1820
  }
1837
1821
  },
1838
- "version": "1.28.0"
1822
+ "version": "1.29.0"
1839
1823
  }