@salesforce/plugin-agent 1.29.0 → 1.29.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +19 -19
  2. package/oclif.manifest.json +356 -356
  3. package/package.json +6 -6
@@ -875,14 +875,14 @@
875
875
  "authoring-bundle:publish:agent"
876
876
  ]
877
877
  },
878
- "agent:test:create": {
878
+ "agent:preview:end": {
879
879
  "aliases": [],
880
880
  "args": {},
881
- "description": "To run this command, you must have an agent test spec file, which is a YAML file that lists the test cases for testing a specific agent. Use the \"agent generate test-spec\" CLI command to generate a test spec file. Then specify the file to this command with the --spec flag, or run this command with no flags to be prompted.\n\nWhen this command completes, your org contains the new agent test, which you can view and edit using the Testing Center UI. This command also retrieves the metadata component (AiEvaluationDefinition) associated with the new test to your local Salesforce DX project and displays its filename.\n\nAfter you've created the test in the org, use the \"agent test run\" command to run it.",
881
+ "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to end it. This command also displays the local directory where the session trace files are stored. \n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to end the session. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
882
882
  "examples": [
883
- "Create an agent test interactively and be prompted for the test spec and API name of the test in the org; use the default org:\n<%= config.bin %> <%= command.id %>",
884
- "Create an agent test and use flags to specify all required information; if a test with same API name already exists in the org, overwrite it without confirmation. Use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --force-overwrite --target-org my-org",
885
- "Preview what the agent test metadata (AiEvaluationDefinition) looks like without deploying it to your default org:\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --preview"
883
+ "End a preview session of a published agent by specifying its session ID and API name ; use the default org:\n<%= config.bin %> <%= command.id %> --session-id <SESSION_ID> --api-name My_Published_Agent",
884
+ "Similar to previous example, but don't specify a session ID; you get an error if the published agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent --target-org my-dev-org",
885
+ "End a preview session of an agent using its authoring bundle API name; you get an error if the agent has more than one active session.\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Local_Agent"
886
886
  ],
887
887
  "flags": {
888
888
  "json": {
@@ -900,20 +900,6 @@
900
900
  "multiple": false,
901
901
  "type": "option"
902
902
  },
903
- "api-name": {
904
- "name": "api-name",
905
- "summary": "API name of the new test; the API name must not exist in the org.",
906
- "hasDynamicHelp": false,
907
- "multiple": false,
908
- "type": "option"
909
- },
910
- "spec": {
911
- "name": "spec",
912
- "summary": "Path to the test spec YAML file.",
913
- "hasDynamicHelp": false,
914
- "multiple": false,
915
- "type": "option"
916
- },
917
903
  "target-org": {
918
904
  "char": "o",
919
905
  "name": "target-org",
@@ -931,53 +917,67 @@
931
917
  "multiple": false,
932
918
  "type": "option"
933
919
  },
934
- "preview": {
935
- "name": "preview",
936
- "summary": "Preview the test metadata file (AiEvaluationDefinition) without deploying to your org.",
937
- "allowNo": false,
938
- "type": "boolean"
920
+ "session-id": {
921
+ "name": "session-id",
922
+ "required": false,
923
+ "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see the list of all sessions.",
924
+ "hasDynamicHelp": false,
925
+ "multiple": false,
926
+ "type": "option"
939
927
  },
940
- "force-overwrite": {
941
- "name": "force-overwrite",
942
- "summary": "Don't prompt for confirmation when overwriting an existing test (based on API name) in your org.",
943
- "allowNo": false,
944
- "type": "boolean"
928
+ "api-name": {
929
+ "char": "n",
930
+ "name": "api-name",
931
+ "summary": "API name of the activated published agent you want to preview.",
932
+ "hasDynamicHelp": false,
933
+ "multiple": false,
934
+ "type": "option"
935
+ },
936
+ "authoring-bundle": {
937
+ "name": "authoring-bundle",
938
+ "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
939
+ "hasDynamicHelp": false,
940
+ "multiple": false,
941
+ "type": "option"
945
942
  }
946
943
  },
947
944
  "hasDynamicHelp": true,
948
945
  "hiddenAliases": [],
949
- "id": "agent:test:create",
946
+ "id": "agent:preview:end",
950
947
  "pluginAlias": "@salesforce/plugin-agent",
951
948
  "pluginName": "@salesforce/plugin-agent",
952
949
  "pluginType": "core",
950
+ "state": "beta",
953
951
  "strict": true,
954
- "summary": "Create an agent test in your org using a local test spec YAML file.",
952
+ "summary": "End an existing programmatic agent preview session and get trace location.",
955
953
  "enableJsonFlag": true,
954
+ "requiresProject": true,
956
955
  "isESM": true,
957
956
  "relativePath": [
958
957
  "lib",
959
958
  "commands",
960
959
  "agent",
961
- "test",
962
- "create.js"
960
+ "preview",
961
+ "end.js"
963
962
  ],
964
963
  "aliasPermutations": [],
965
964
  "permutations": [
966
- "agent:test:create",
967
- "test:agent:create",
968
- "test:create:agent",
969
- "agent:create:test",
970
- "create:agent:test",
971
- "create:test:agent"
965
+ "agent:preview:end",
966
+ "preview:agent:end",
967
+ "preview:end:agent",
968
+ "agent:end:preview",
969
+ "end:agent:preview",
970
+ "end:preview:agent"
972
971
  ]
973
972
  },
974
- "agent:test:list": {
973
+ "agent:preview:send": {
975
974
  "aliases": [],
976
975
  "args": {},
977
- "description": "The command outputs a table with the name (API name) of each test along with its unique ID and the date it was created in the org.",
976
+ "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to send the agent a message (utterance). This command then displays the agent's response.\n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to send a message. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
978
977
  "examples": [
979
- "List the agent tests in your default org:\n<%= config.bin %> <%= command.id %>",
980
- "List the agent tests in an org with alias \"my-org\"\"\n<%= config.bin %> <%= command.id %> --target-org my-org"
978
+ "Send a message to an activated published agent using its API name and session ID; use the default org:\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --session-id <SESSION_ID>",
979
+ "Similar to previous example, but don't specify a session ID; you get an error if the agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --target-org my-dev-org",
980
+ "Send a message to an agent using its authoring bundle API name; you get an error if the agent has more than one active session:\n<%= config.bin %> <%= command.id %> --utterance \"what can you help me with?\" --authoring-bundle My_Local_Agent"
981
981
  ],
982
982
  "flags": {
983
983
  "json": {
@@ -1011,43 +1011,75 @@
1011
1011
  "hasDynamicHelp": false,
1012
1012
  "multiple": false,
1013
1013
  "type": "option"
1014
+ },
1015
+ "session-id": {
1016
+ "name": "session-id",
1017
+ "required": false,
1018
+ "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see list of all sessions.",
1019
+ "hasDynamicHelp": false,
1020
+ "multiple": false,
1021
+ "type": "option"
1022
+ },
1023
+ "utterance": {
1024
+ "char": "u",
1025
+ "name": "utterance",
1026
+ "required": true,
1027
+ "summary": "Utterance to send to the agent, enclosed in double quotes.",
1028
+ "hasDynamicHelp": false,
1029
+ "multiple": false,
1030
+ "type": "option"
1031
+ },
1032
+ "api-name": {
1033
+ "char": "n",
1034
+ "name": "api-name",
1035
+ "summary": "API name of the activated published agent you want to preview.",
1036
+ "hasDynamicHelp": false,
1037
+ "multiple": false,
1038
+ "type": "option"
1039
+ },
1040
+ "authoring-bundle": {
1041
+ "name": "authoring-bundle",
1042
+ "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1043
+ "hasDynamicHelp": false,
1044
+ "multiple": false,
1045
+ "type": "option"
1014
1046
  }
1015
1047
  },
1016
1048
  "hasDynamicHelp": true,
1017
1049
  "hiddenAliases": [],
1018
- "id": "agent:test:list",
1050
+ "id": "agent:preview:send",
1019
1051
  "pluginAlias": "@salesforce/plugin-agent",
1020
1052
  "pluginName": "@salesforce/plugin-agent",
1021
1053
  "pluginType": "core",
1054
+ "state": "beta",
1022
1055
  "strict": true,
1023
- "summary": "List the available agent tests in your org.",
1056
+ "summary": "Send a message to an existing agent preview session.",
1024
1057
  "enableJsonFlag": true,
1058
+ "requiresProject": true,
1025
1059
  "isESM": true,
1026
1060
  "relativePath": [
1027
1061
  "lib",
1028
1062
  "commands",
1029
1063
  "agent",
1030
- "test",
1031
- "list.js"
1064
+ "preview",
1065
+ "send.js"
1032
1066
  ],
1033
1067
  "aliasPermutations": [],
1034
1068
  "permutations": [
1035
- "agent:test:list",
1036
- "test:agent:list",
1037
- "test:list:agent",
1038
- "agent:list:test",
1039
- "list:agent:test",
1040
- "list:test:agent"
1069
+ "agent:preview:send",
1070
+ "preview:agent:send",
1071
+ "preview:send:agent",
1072
+ "agent:send:preview",
1073
+ "send:agent:preview",
1074
+ "send:preview:agent"
1041
1075
  ]
1042
1076
  },
1043
- "agent:test:results": {
1077
+ "agent:preview:sessions": {
1044
1078
  "aliases": [],
1045
1079
  "args": {},
1046
- "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1080
+ "description": "This command lists the agent preview sessions that were started with the \"agent preview start\" command and are still in the local cache. Use this command to discover specific session IDs that you can pass to the \"agent preview send\" or \"agent preview end\" commands with the --session-id flag.\n\nProgrammatic agent preview sessions can be started for both published activated agents and by using an agent's local authoring bundle, which contains its Agent Script file. In this command's output table, the Agent column contains either the API name of the authoring bundle or the published agent, whichever was used when starting the session. In the table, if the same API name has multiple rows with different session IDs, then it means that you previously started multiple preview sessions with the associated agent.",
1047
1081
  "examples": [
1048
- "Get the results of an agent test run in your default org using its job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1049
- "Get the results of the most recently run agent test in an org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --use-most-recent --target-org my-org",
1050
- "Get the results of the most recently run agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1082
+ "List all cached agent preview sessions:\n<%= config.bin %> <%= command.id %>"
1051
1083
  ],
1052
1084
  "flags": {
1053
1085
  "json": {
@@ -1064,99 +1096,45 @@
1064
1096
  "hasDynamicHelp": false,
1065
1097
  "multiple": false,
1066
1098
  "type": "option"
1067
- },
1068
- "target-org": {
1069
- "char": "o",
1070
- "name": "target-org",
1071
- "noCacheDefault": true,
1072
- "required": true,
1073
- "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
1074
- "hasDynamicHelp": true,
1075
- "multiple": false,
1076
- "type": "option"
1077
- },
1078
- "api-version": {
1079
- "description": "Override the api version used for api requests made by this command",
1080
- "name": "api-version",
1081
- "hasDynamicHelp": false,
1082
- "multiple": false,
1083
- "type": "option"
1084
- },
1085
- "job-id": {
1086
- "char": "i",
1087
- "name": "job-id",
1088
- "required": true,
1089
- "summary": "Job ID of the completed agent test run.",
1090
- "hasDynamicHelp": false,
1091
- "multiple": false,
1092
- "type": "option"
1093
- },
1094
- "result-format": {
1095
- "name": "result-format",
1096
- "summary": "Format of the agent test run results.",
1097
- "default": "human",
1098
- "hasDynamicHelp": false,
1099
- "multiple": false,
1100
- "options": [
1101
- "json",
1102
- "human",
1103
- "junit",
1104
- "tap"
1105
- ],
1106
- "type": "option"
1107
- },
1108
- "output-dir": {
1109
- "char": "d",
1110
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1111
- "name": "output-dir",
1112
- "summary": "Directory to write the agent test results into.",
1113
- "hasDynamicHelp": false,
1114
- "multiple": false,
1115
- "type": "option"
1116
- },
1117
- "verbose": {
1118
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1119
- "name": "verbose",
1120
- "summary": "Show generated data in the test results output.",
1121
- "allowNo": false,
1122
- "type": "boolean"
1123
1099
  }
1124
1100
  },
1125
- "hasDynamicHelp": true,
1101
+ "hasDynamicHelp": false,
1126
1102
  "hiddenAliases": [],
1127
- "id": "agent:test:results",
1103
+ "id": "agent:preview:sessions",
1128
1104
  "pluginAlias": "@salesforce/plugin-agent",
1129
1105
  "pluginName": "@salesforce/plugin-agent",
1130
1106
  "pluginType": "core",
1107
+ "state": "beta",
1131
1108
  "strict": true,
1132
- "summary": "Get the results of a completed agent test run.",
1109
+ "summary": "List all known programmatic agent preview sessions.",
1133
1110
  "enableJsonFlag": true,
1111
+ "requiresProject": true,
1134
1112
  "isESM": true,
1135
1113
  "relativePath": [
1136
1114
  "lib",
1137
1115
  "commands",
1138
1116
  "agent",
1139
- "test",
1140
- "results.js"
1117
+ "preview",
1118
+ "sessions.js"
1141
1119
  ],
1142
1120
  "aliasPermutations": [],
1143
1121
  "permutations": [
1144
- "agent:test:results",
1145
- "test:agent:results",
1146
- "test:results:agent",
1147
- "agent:results:test",
1148
- "results:agent:test",
1149
- "results:test:agent"
1122
+ "agent:preview:sessions",
1123
+ "preview:agent:sessions",
1124
+ "preview:sessions:agent",
1125
+ "agent:sessions:preview",
1126
+ "sessions:agent:preview",
1127
+ "sessions:preview:agent"
1150
1128
  ]
1151
1129
  },
1152
- "agent:test:resume": {
1130
+ "agent:preview:start": {
1153
1131
  "aliases": [],
1154
1132
  "args": {},
1155
- "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nUse the --wait flag to specify the number of minutes for this command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, the CLI returns control of the terminal to you, and you must run \"agent test resume\" again.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1133
+ "description": "This command outputs a session ID that you then use with the \"agent preview send\" command to send an utterance to the agent. Use the \"agent preview sessions\" command to list all active sessions and the \"agent preview end\" command to end a specific session.\n\nIdentify the agent you want to start previewing with either the --authoring-bundle flag to specify a local authoring bundle's API name or --api-name to specify an activated published agent's API name. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory. \n\nWhen starting a preview session using the authoring bundle, which contains the agent's Agent Script file, the preview uses mocked actions by default. Specify --use-live-actions for live mode, which uses the real Apex classes, flows, etc, in the org for the actions.",
1156
1134
  "examples": [
1157
- "Resume an agent test in your default org using a job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1158
- "Resume the most recently-run agent test in an org with alias \"my-org\" org; wait 10 minutes for the tests to finish:\n<%= config.bin %> <%= command.id %> --use-most-recent --wait 10 --target-org my-org",
1159
- "Resume the most recent agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1135
+ "Start a programmatic agent preview session by specifying an authoring bundle; uses mocked actions by default. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --target-org my-dev-org",
1136
+ "Similar to previous example but use live actions and the default org:\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --use-live-actions",
1137
+ "Start a preview session with an activated published agent:\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent"
1160
1138
  ],
1161
1139
  "flags": {
1162
1140
  "json": {
@@ -1191,96 +1169,65 @@
1191
1169
  "multiple": false,
1192
1170
  "type": "option"
1193
1171
  },
1194
- "job-id": {
1195
- "char": "i",
1196
- "name": "job-id",
1197
- "summary": "Job ID of the original agent test run.",
1172
+ "api-name": {
1173
+ "char": "n",
1174
+ "name": "api-name",
1175
+ "summary": "API name of the activated published agent you want to preview.",
1198
1176
  "hasDynamicHelp": false,
1199
1177
  "multiple": false,
1200
1178
  "type": "option"
1201
1179
  },
1202
- "use-most-recent": {
1203
- "char": "r",
1204
- "name": "use-most-recent",
1205
- "summary": "Use the job ID of the most recent agent test run.",
1206
- "allowNo": false,
1207
- "type": "boolean"
1208
- },
1209
- "wait": {
1210
- "char": "w",
1211
- "name": "wait",
1212
- "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1213
- "default": "5 minutes",
1214
- "hasDynamicHelp": true,
1215
- "multiple": false,
1216
- "type": "option"
1217
- },
1218
- "result-format": {
1219
- "name": "result-format",
1220
- "summary": "Format of the agent test run results.",
1221
- "default": "human",
1222
- "hasDynamicHelp": false,
1223
- "multiple": false,
1224
- "options": [
1225
- "json",
1226
- "human",
1227
- "junit",
1228
- "tap"
1229
- ],
1230
- "type": "option"
1231
- },
1232
- "output-dir": {
1233
- "char": "d",
1234
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1235
- "name": "output-dir",
1236
- "summary": "Directory to write the agent test results into.",
1180
+ "authoring-bundle": {
1181
+ "name": "authoring-bundle",
1182
+ "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1237
1183
  "hasDynamicHelp": false,
1238
1184
  "multiple": false,
1239
1185
  "type": "option"
1240
1186
  },
1241
- "verbose": {
1242
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1243
- "name": "verbose",
1244
- "summary": "Show generated data in the test results output.",
1187
+ "use-live-actions": {
1188
+ "name": "use-live-actions",
1189
+ "summary": "Use real actions in the org; if not specified, preview uses AI to simulate (mock) actions.",
1245
1190
  "allowNo": false,
1246
1191
  "type": "boolean"
1247
1192
  }
1248
1193
  },
1249
1194
  "hasDynamicHelp": true,
1250
1195
  "hiddenAliases": [],
1251
- "id": "agent:test:resume",
1196
+ "id": "agent:preview:start",
1252
1197
  "pluginAlias": "@salesforce/plugin-agent",
1253
1198
  "pluginName": "@salesforce/plugin-agent",
1254
1199
  "pluginType": "core",
1200
+ "state": "beta",
1255
1201
  "strict": true,
1256
- "summary": "Resume an agent test that you previously started in your org so you can view the test results.",
1202
+ "summary": "Start a programmatic agent preview session.",
1257
1203
  "enableJsonFlag": true,
1204
+ "requiresProject": true,
1258
1205
  "isESM": true,
1259
1206
  "relativePath": [
1260
1207
  "lib",
1261
1208
  "commands",
1262
1209
  "agent",
1263
- "test",
1264
- "resume.js"
1210
+ "preview",
1211
+ "start.js"
1265
1212
  ],
1266
1213
  "aliasPermutations": [],
1267
1214
  "permutations": [
1268
- "agent:test:resume",
1269
- "test:agent:resume",
1270
- "test:resume:agent",
1271
- "agent:resume:test",
1272
- "resume:agent:test",
1273
- "resume:test:agent"
1215
+ "agent:preview:start",
1216
+ "preview:agent:start",
1217
+ "preview:start:agent",
1218
+ "agent:start:preview",
1219
+ "start:agent:preview",
1220
+ "start:preview:agent"
1274
1221
  ]
1275
1222
  },
1276
- "agent:test:run": {
1223
+ "agent:test:create": {
1277
1224
  "aliases": [],
1278
1225
  "args": {},
1279
- "description": "Use the --api-name flag to specify the name of the agent test you want to run. Use the output of the \"agent test list\" command to get the names of all the available agent tests in your org.\n\nBy default, this command starts the agent test in your org, but it doesn't wait for the test to finish. Instead, it displays the \"agent test resume\" command, with a job ID, that you execute to see the results of the test run, and then returns control of the terminal window to you. Use the --wait flag to specify the number of minutes for the command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, run \"agent test resume\".\n\nBy default, this command outputs test results in human-readable tables for each test case, if the test completes in time. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1226
+ "description": "To run this command, you must have an agent test spec file, which is a YAML file that lists the test cases for testing a specific agent. Use the \"agent generate test-spec\" CLI command to generate a test spec file. Then specify the file to this command with the --spec flag, or run this command with no flags to be prompted.\n\nWhen this command completes, your org contains the new agent test, which you can view and edit using the Testing Center UI. This command also retrieves the metadata component (AiEvaluationDefinition) associated with the new test to your local Salesforce DX project and displays its filename.\n\nAfter you've created the test in the org, use the \"agent test run\" command to run it.",
1280
1227
  "examples": [
1281
- "Start an agent test called Resort_Manager_Test for an agent in your default org, don't wait for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test",
1282
- "Start an agent test for an agent in an org with alias \"my-org\" and wait for 10 minutes for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --target-org my-org",
1283
- "Start an agent test and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --output-dir ./test-results --result-format json"
1228
+ "Create an agent test interactively and be prompted for the test spec and API name of the test in the org; use the default org:\n<%= config.bin %> <%= command.id %>",
1229
+ "Create an agent test and use flags to specify all required information; if a test with same API name already exists in the org, overwrite it without confirmation. Use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --force-overwrite --target-org my-org",
1230
+ "Preview what the agent test metadata (AiEvaluationDefinition) looks like without deploying it to your default org:\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --preview"
1284
1231
  ],
1285
1232
  "flags": {
1286
1233
  "json": {
@@ -1298,6 +1245,20 @@
1298
1245
  "multiple": false,
1299
1246
  "type": "option"
1300
1247
  },
1248
+ "api-name": {
1249
+ "name": "api-name",
1250
+ "summary": "API name of the new test; the API name must not exist in the org.",
1251
+ "hasDynamicHelp": false,
1252
+ "multiple": false,
1253
+ "type": "option"
1254
+ },
1255
+ "spec": {
1256
+ "name": "spec",
1257
+ "summary": "Path to the test spec YAML file.",
1258
+ "hasDynamicHelp": false,
1259
+ "multiple": false,
1260
+ "type": "option"
1261
+ },
1301
1262
  "target-org": {
1302
1263
  "char": "o",
1303
1264
  "name": "target-org",
@@ -1315,61 +1276,27 @@
1315
1276
  "multiple": false,
1316
1277
  "type": "option"
1317
1278
  },
1318
- "api-name": {
1319
- "char": "n",
1320
- "name": "api-name",
1321
- "summary": "API name of the agent test to run; corresponds to the name of the AiEvaluationDefinition metadata component that implements the agent test.",
1322
- "hasDynamicHelp": false,
1323
- "multiple": false,
1324
- "type": "option"
1325
- },
1326
- "wait": {
1327
- "char": "w",
1328
- "name": "wait",
1329
- "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1330
- "hasDynamicHelp": true,
1331
- "multiple": false,
1332
- "type": "option"
1333
- },
1334
- "result-format": {
1335
- "name": "result-format",
1336
- "summary": "Format of the agent test run results.",
1337
- "default": "human",
1338
- "hasDynamicHelp": false,
1339
- "multiple": false,
1340
- "options": [
1341
- "json",
1342
- "human",
1343
- "junit",
1344
- "tap"
1345
- ],
1346
- "type": "option"
1347
- },
1348
- "output-dir": {
1349
- "char": "d",
1350
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1351
- "name": "output-dir",
1352
- "summary": "Directory to write the agent test results into.",
1353
- "hasDynamicHelp": false,
1354
- "multiple": false,
1355
- "type": "option"
1279
+ "preview": {
1280
+ "name": "preview",
1281
+ "summary": "Preview the test metadata file (AiEvaluationDefinition) without deploying to your org.",
1282
+ "allowNo": false,
1283
+ "type": "boolean"
1356
1284
  },
1357
- "verbose": {
1358
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1359
- "name": "verbose",
1360
- "summary": "Show generated data in the test results output.",
1285
+ "force-overwrite": {
1286
+ "name": "force-overwrite",
1287
+ "summary": "Don't prompt for confirmation when overwriting an existing test (based on API name) in your org.",
1361
1288
  "allowNo": false,
1362
1289
  "type": "boolean"
1363
1290
  }
1364
1291
  },
1365
1292
  "hasDynamicHelp": true,
1366
1293
  "hiddenAliases": [],
1367
- "id": "agent:test:run",
1294
+ "id": "agent:test:create",
1368
1295
  "pluginAlias": "@salesforce/plugin-agent",
1369
1296
  "pluginName": "@salesforce/plugin-agent",
1370
1297
  "pluginType": "core",
1371
1298
  "strict": true,
1372
- "summary": "Start an agent test in your org.",
1299
+ "summary": "Create an agent test in your org using a local test spec YAML file.",
1373
1300
  "enableJsonFlag": true,
1374
1301
  "isESM": true,
1375
1302
  "relativePath": [
@@ -1377,26 +1304,25 @@
1377
1304
  "commands",
1378
1305
  "agent",
1379
1306
  "test",
1380
- "run.js"
1307
+ "create.js"
1381
1308
  ],
1382
1309
  "aliasPermutations": [],
1383
1310
  "permutations": [
1384
- "agent:test:run",
1385
- "test:agent:run",
1386
- "test:run:agent",
1387
- "agent:run:test",
1388
- "run:agent:test",
1389
- "run:test:agent"
1311
+ "agent:test:create",
1312
+ "test:agent:create",
1313
+ "test:create:agent",
1314
+ "agent:create:test",
1315
+ "create:agent:test",
1316
+ "create:test:agent"
1390
1317
  ]
1391
1318
  },
1392
- "agent:preview:end": {
1319
+ "agent:test:list": {
1393
1320
  "aliases": [],
1394
1321
  "args": {},
1395
- "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to end it. This command also displays the local directory where the session trace files are stored. \n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to end the session. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
1322
+ "description": "The command outputs a table with the name (API name) of each test along with its unique ID and the date it was created in the org.",
1396
1323
  "examples": [
1397
- "End a preview session of a published agent by specifying its session ID and API name ; use the default org:\n<%= config.bin %> <%= command.id %> --session-id <SESSION_ID> --api-name My_Published_Agent",
1398
- "Similar to previous example, but don't specify a session ID; you get an error if the published agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent --target-org my-dev-org",
1399
- "End a preview session of an agent using its authoring bundle API name; you get an error if the agent has more than one active session.\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Local_Agent"
1324
+ "List the agent tests in your default org:\n<%= config.bin %> <%= command.id %>",
1325
+ "List the agent tests in an org with alias \"my-org\"\"\n<%= config.bin %> <%= command.id %> --target-org my-org"
1400
1326
  ],
1401
1327
  "flags": {
1402
1328
  "json": {
@@ -1430,68 +1356,43 @@
1430
1356
  "hasDynamicHelp": false,
1431
1357
  "multiple": false,
1432
1358
  "type": "option"
1433
- },
1434
- "session-id": {
1435
- "name": "session-id",
1436
- "required": false,
1437
- "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see the list of all sessions.",
1438
- "hasDynamicHelp": false,
1439
- "multiple": false,
1440
- "type": "option"
1441
- },
1442
- "api-name": {
1443
- "char": "n",
1444
- "name": "api-name",
1445
- "summary": "API name of the activated published agent you want to preview.",
1446
- "hasDynamicHelp": false,
1447
- "multiple": false,
1448
- "type": "option"
1449
- },
1450
- "authoring-bundle": {
1451
- "name": "authoring-bundle",
1452
- "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1453
- "hasDynamicHelp": false,
1454
- "multiple": false,
1455
- "type": "option"
1456
1359
  }
1457
1360
  },
1458
1361
  "hasDynamicHelp": true,
1459
1362
  "hiddenAliases": [],
1460
- "id": "agent:preview:end",
1363
+ "id": "agent:test:list",
1461
1364
  "pluginAlias": "@salesforce/plugin-agent",
1462
1365
  "pluginName": "@salesforce/plugin-agent",
1463
1366
  "pluginType": "core",
1464
- "state": "beta",
1465
1367
  "strict": true,
1466
- "summary": "End an existing programmatic agent preview session and get trace location.",
1368
+ "summary": "List the available agent tests in your org.",
1467
1369
  "enableJsonFlag": true,
1468
- "requiresProject": true,
1469
1370
  "isESM": true,
1470
1371
  "relativePath": [
1471
1372
  "lib",
1472
1373
  "commands",
1473
1374
  "agent",
1474
- "preview",
1475
- "end.js"
1375
+ "test",
1376
+ "list.js"
1476
1377
  ],
1477
1378
  "aliasPermutations": [],
1478
1379
  "permutations": [
1479
- "agent:preview:end",
1480
- "preview:agent:end",
1481
- "preview:end:agent",
1482
- "agent:end:preview",
1483
- "end:agent:preview",
1484
- "end:preview:agent"
1380
+ "agent:test:list",
1381
+ "test:agent:list",
1382
+ "test:list:agent",
1383
+ "agent:list:test",
1384
+ "list:agent:test",
1385
+ "list:test:agent"
1485
1386
  ]
1486
1387
  },
1487
- "agent:preview:send": {
1388
+ "agent:test:results": {
1488
1389
  "aliases": [],
1489
1390
  "args": {},
1490
- "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to send the agent a message (utterance). This command then displays the agent's response.\n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to send a message. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
1391
+ "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1491
1392
  "examples": [
1492
- "Send a message to an activated published agent using its API name and session ID; use the default org:\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --session-id <SESSION_ID>",
1493
- "Similar to previous example, but don't specify a session ID; you get an error if the agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --target-org my-dev-org",
1494
- "Send a message to an agent using its authoring bundle API name; you get an error if the agent has more than one active session:\n<%= config.bin %> <%= command.id %> --utterance \"what can you help me with?\" --authoring-bundle My_Local_Agent"
1393
+ "Get the results of an agent test run in your default org using its job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1394
+ "Get the results of the most recently run agent test in an org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --use-most-recent --target-org my-org",
1395
+ "Get the results of the most recently run agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1495
1396
  ],
1496
1397
  "flags": {
1497
1398
  "json": {
@@ -1526,74 +1427,81 @@
1526
1427
  "multiple": false,
1527
1428
  "type": "option"
1528
1429
  },
1529
- "session-id": {
1530
- "name": "session-id",
1531
- "required": false,
1532
- "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see list of all sessions.",
1533
- "hasDynamicHelp": false,
1534
- "multiple": false,
1535
- "type": "option"
1536
- },
1537
- "utterance": {
1538
- "char": "u",
1539
- "name": "utterance",
1430
+ "job-id": {
1431
+ "char": "i",
1432
+ "name": "job-id",
1540
1433
  "required": true,
1541
- "summary": "Utterance to send to the agent, enclosed in double quotes.",
1434
+ "summary": "Job ID of the completed agent test run.",
1542
1435
  "hasDynamicHelp": false,
1543
1436
  "multiple": false,
1544
1437
  "type": "option"
1545
1438
  },
1546
- "api-name": {
1547
- "char": "n",
1548
- "name": "api-name",
1549
- "summary": "API name of the activated published agent you want to preview.",
1439
+ "result-format": {
1440
+ "name": "result-format",
1441
+ "summary": "Format of the agent test run results.",
1442
+ "default": "human",
1550
1443
  "hasDynamicHelp": false,
1551
1444
  "multiple": false,
1445
+ "options": [
1446
+ "json",
1447
+ "human",
1448
+ "junit",
1449
+ "tap"
1450
+ ],
1552
1451
  "type": "option"
1553
1452
  },
1554
- "authoring-bundle": {
1555
- "name": "authoring-bundle",
1556
- "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1453
+ "output-dir": {
1454
+ "char": "d",
1455
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1456
+ "name": "output-dir",
1457
+ "summary": "Directory to write the agent test results into.",
1557
1458
  "hasDynamicHelp": false,
1558
1459
  "multiple": false,
1559
1460
  "type": "option"
1461
+ },
1462
+ "verbose": {
1463
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1464
+ "name": "verbose",
1465
+ "summary": "Show generated data in the test results output.",
1466
+ "allowNo": false,
1467
+ "type": "boolean"
1560
1468
  }
1561
1469
  },
1562
1470
  "hasDynamicHelp": true,
1563
1471
  "hiddenAliases": [],
1564
- "id": "agent:preview:send",
1472
+ "id": "agent:test:results",
1565
1473
  "pluginAlias": "@salesforce/plugin-agent",
1566
1474
  "pluginName": "@salesforce/plugin-agent",
1567
1475
  "pluginType": "core",
1568
- "state": "beta",
1569
1476
  "strict": true,
1570
- "summary": "Send a message to an existing agent preview session.",
1477
+ "summary": "Get the results of a completed agent test run.",
1571
1478
  "enableJsonFlag": true,
1572
- "requiresProject": true,
1573
1479
  "isESM": true,
1574
1480
  "relativePath": [
1575
1481
  "lib",
1576
1482
  "commands",
1577
1483
  "agent",
1578
- "preview",
1579
- "send.js"
1484
+ "test",
1485
+ "results.js"
1580
1486
  ],
1581
1487
  "aliasPermutations": [],
1582
1488
  "permutations": [
1583
- "agent:preview:send",
1584
- "preview:agent:send",
1585
- "preview:send:agent",
1586
- "agent:send:preview",
1587
- "send:agent:preview",
1588
- "send:preview:agent"
1489
+ "agent:test:results",
1490
+ "test:agent:results",
1491
+ "test:results:agent",
1492
+ "agent:results:test",
1493
+ "results:agent:test",
1494
+ "results:test:agent"
1589
1495
  ]
1590
1496
  },
1591
- "agent:preview:sessions": {
1497
+ "agent:test:resume": {
1592
1498
  "aliases": [],
1593
1499
  "args": {},
1594
- "description": "This command lists the agent preview sessions that were started with the \"agent preview start\" command and are still in the local cache. Use this command to discover specific session IDs that you can pass to the \"agent preview send\" or \"agent preview end\" commands with the --session-id flag.\n\nProgrammatic agent preview sessions can be started for both published activated agents and by using an agent's local authoring bundle, which contains its Agent Script file. In this command's output table, the Agent column contains either the API name of the authoring bundle or the published agent, whichever was used when starting the session. In the table, if the same API name has multiple rows with different session IDs, then it means that you previously started multiple preview sessions with the associated agent.",
1500
+ "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nUse the --wait flag to specify the number of minutes for this command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, the CLI returns control of the terminal to you, and you must run \"agent test resume\" again.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1595
1501
  "examples": [
1596
- "List all cached agent preview sessions:\n<%= config.bin %> <%= command.id %>"
1502
+ "Resume an agent test in your default org using a job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1503
+ "Resume the most recently-run agent test in an org with alias \"my-org\" org; wait 10 minutes for the tests to finish:\n<%= config.bin %> <%= command.id %> --use-most-recent --wait 10 --target-org my-org",
1504
+ "Resume the most recent agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1597
1505
  ],
1598
1506
  "flags": {
1599
1507
  "json": {
@@ -1610,45 +1518,114 @@
1610
1518
  "hasDynamicHelp": false,
1611
1519
  "multiple": false,
1612
1520
  "type": "option"
1521
+ },
1522
+ "target-org": {
1523
+ "char": "o",
1524
+ "name": "target-org",
1525
+ "noCacheDefault": true,
1526
+ "required": true,
1527
+ "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
1528
+ "hasDynamicHelp": true,
1529
+ "multiple": false,
1530
+ "type": "option"
1531
+ },
1532
+ "api-version": {
1533
+ "description": "Override the api version used for api requests made by this command",
1534
+ "name": "api-version",
1535
+ "hasDynamicHelp": false,
1536
+ "multiple": false,
1537
+ "type": "option"
1538
+ },
1539
+ "job-id": {
1540
+ "char": "i",
1541
+ "name": "job-id",
1542
+ "summary": "Job ID of the original agent test run.",
1543
+ "hasDynamicHelp": false,
1544
+ "multiple": false,
1545
+ "type": "option"
1546
+ },
1547
+ "use-most-recent": {
1548
+ "char": "r",
1549
+ "name": "use-most-recent",
1550
+ "summary": "Use the job ID of the most recent agent test run.",
1551
+ "allowNo": false,
1552
+ "type": "boolean"
1553
+ },
1554
+ "wait": {
1555
+ "char": "w",
1556
+ "name": "wait",
1557
+ "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1558
+ "default": "5 minutes",
1559
+ "hasDynamicHelp": true,
1560
+ "multiple": false,
1561
+ "type": "option"
1562
+ },
1563
+ "result-format": {
1564
+ "name": "result-format",
1565
+ "summary": "Format of the agent test run results.",
1566
+ "default": "human",
1567
+ "hasDynamicHelp": false,
1568
+ "multiple": false,
1569
+ "options": [
1570
+ "json",
1571
+ "human",
1572
+ "junit",
1573
+ "tap"
1574
+ ],
1575
+ "type": "option"
1576
+ },
1577
+ "output-dir": {
1578
+ "char": "d",
1579
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1580
+ "name": "output-dir",
1581
+ "summary": "Directory to write the agent test results into.",
1582
+ "hasDynamicHelp": false,
1583
+ "multiple": false,
1584
+ "type": "option"
1585
+ },
1586
+ "verbose": {
1587
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1588
+ "name": "verbose",
1589
+ "summary": "Show generated data in the test results output.",
1590
+ "allowNo": false,
1591
+ "type": "boolean"
1613
1592
  }
1614
1593
  },
1615
- "hasDynamicHelp": false,
1594
+ "hasDynamicHelp": true,
1616
1595
  "hiddenAliases": [],
1617
- "id": "agent:preview:sessions",
1596
+ "id": "agent:test:resume",
1618
1597
  "pluginAlias": "@salesforce/plugin-agent",
1619
1598
  "pluginName": "@salesforce/plugin-agent",
1620
1599
  "pluginType": "core",
1621
- "state": "beta",
1622
1600
  "strict": true,
1623
- "summary": "List all known programmatic agent preview sessions.",
1601
+ "summary": "Resume an agent test that you previously started in your org so you can view the test results.",
1624
1602
  "enableJsonFlag": true,
1625
- "requiresProject": true,
1626
1603
  "isESM": true,
1627
1604
  "relativePath": [
1628
1605
  "lib",
1629
1606
  "commands",
1630
1607
  "agent",
1631
- "preview",
1632
- "sessions.js"
1608
+ "test",
1609
+ "resume.js"
1633
1610
  ],
1634
1611
  "aliasPermutations": [],
1635
1612
  "permutations": [
1636
- "agent:preview:sessions",
1637
- "preview:agent:sessions",
1638
- "preview:sessions:agent",
1639
- "agent:sessions:preview",
1640
- "sessions:agent:preview",
1641
- "sessions:preview:agent"
1613
+ "agent:test:resume",
1614
+ "test:agent:resume",
1615
+ "test:resume:agent",
1616
+ "agent:resume:test",
1617
+ "resume:agent:test",
1618
+ "resume:test:agent"
1642
1619
  ]
1643
1620
  },
1644
- "agent:preview:start": {
1621
+ "agent:test:run": {
1645
1622
  "aliases": [],
1646
1623
  "args": {},
1647
- "description": "This command outputs a session ID that you then use with the \"agent preview send\" command to send an utterance to the agent. Use the \"agent preview sessions\" command to list all active sessions and the \"agent preview end\" command to end a specific session.\n\nIdentify the agent you want to start previewing with either the --authoring-bundle flag to specify a local authoring bundle's API name or --api-name to specify an activated published agent's API name. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory. \n\nWhen starting a preview session using the authoring bundle, which contains the agent's Agent Script file, the preview uses mocked actions by default. Specify --use-live-actions for live mode, which uses the real Apex classes, flows, etc, in the org for the actions.",
1624
+ "description": "Use the --api-name flag to specify the name of the agent test you want to run. Use the output of the \"agent test list\" command to get the names of all the available agent tests in your org.\n\nBy default, this command starts the agent test in your org, but it doesn't wait for the test to finish. Instead, it displays the \"agent test resume\" command, with a job ID, that you execute to see the results of the test run, and then returns control of the terminal window to you. Use the --wait flag to specify the number of minutes for the command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, run \"agent test resume\".\n\nBy default, this command outputs test results in human-readable tables for each test case, if the test completes in time. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1648
1625
  "examples": [
1649
- "Start a programmatic agent preview session by specifying an authoring bundle; uses mocked actions by default. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --target-org my-dev-org",
1650
- "Similar to previous example but use live actions and the default org:\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --use-live-actions",
1651
- "Start a preview session with an activated published agent:\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent"
1626
+ "Start an agent test called Resort_Manager_Test for an agent in your default org, don't wait for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test",
1627
+ "Start an agent test for an agent in an org with alias \"my-org\" and wait for 10 minutes for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --target-org my-org",
1628
+ "Start an agent test and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --output-dir ./test-results --result-format json"
1652
1629
  ],
1653
1630
  "flags": {
1654
1631
  "json": {
@@ -1686,52 +1663,75 @@
1686
1663
  "api-name": {
1687
1664
  "char": "n",
1688
1665
  "name": "api-name",
1689
- "summary": "API name of the activated published agent you want to preview.",
1666
+ "summary": "API name of the agent test to run; corresponds to the name of the AiEvaluationDefinition metadata component that implements the agent test.",
1690
1667
  "hasDynamicHelp": false,
1691
1668
  "multiple": false,
1692
1669
  "type": "option"
1693
1670
  },
1694
- "authoring-bundle": {
1695
- "name": "authoring-bundle",
1696
- "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1671
+ "wait": {
1672
+ "char": "w",
1673
+ "name": "wait",
1674
+ "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1675
+ "hasDynamicHelp": true,
1676
+ "multiple": false,
1677
+ "type": "option"
1678
+ },
1679
+ "result-format": {
1680
+ "name": "result-format",
1681
+ "summary": "Format of the agent test run results.",
1682
+ "default": "human",
1697
1683
  "hasDynamicHelp": false,
1698
1684
  "multiple": false,
1685
+ "options": [
1686
+ "json",
1687
+ "human",
1688
+ "junit",
1689
+ "tap"
1690
+ ],
1699
1691
  "type": "option"
1700
1692
  },
1701
- "use-live-actions": {
1702
- "name": "use-live-actions",
1703
- "summary": "Use real actions in the org; if not specified, preview uses AI to simulate (mock) actions.",
1693
+ "output-dir": {
1694
+ "char": "d",
1695
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1696
+ "name": "output-dir",
1697
+ "summary": "Directory to write the agent test results into.",
1698
+ "hasDynamicHelp": false,
1699
+ "multiple": false,
1700
+ "type": "option"
1701
+ },
1702
+ "verbose": {
1703
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1704
+ "name": "verbose",
1705
+ "summary": "Show generated data in the test results output.",
1704
1706
  "allowNo": false,
1705
1707
  "type": "boolean"
1706
1708
  }
1707
1709
  },
1708
1710
  "hasDynamicHelp": true,
1709
1711
  "hiddenAliases": [],
1710
- "id": "agent:preview:start",
1712
+ "id": "agent:test:run",
1711
1713
  "pluginAlias": "@salesforce/plugin-agent",
1712
1714
  "pluginName": "@salesforce/plugin-agent",
1713
1715
  "pluginType": "core",
1714
- "state": "beta",
1715
1716
  "strict": true,
1716
- "summary": "Start a programmatic agent preview session.",
1717
+ "summary": "Start an agent test in your org.",
1717
1718
  "enableJsonFlag": true,
1718
- "requiresProject": true,
1719
1719
  "isESM": true,
1720
1720
  "relativePath": [
1721
1721
  "lib",
1722
1722
  "commands",
1723
1723
  "agent",
1724
- "preview",
1725
- "start.js"
1724
+ "test",
1725
+ "run.js"
1726
1726
  ],
1727
1727
  "aliasPermutations": [],
1728
1728
  "permutations": [
1729
- "agent:preview:start",
1730
- "preview:agent:start",
1731
- "preview:start:agent",
1732
- "agent:start:preview",
1733
- "start:agent:preview",
1734
- "start:preview:agent"
1729
+ "agent:test:run",
1730
+ "test:agent:run",
1731
+ "test:run:agent",
1732
+ "agent:run:test",
1733
+ "run:agent:test",
1734
+ "run:test:agent"
1735
1735
  ]
1736
1736
  },
1737
1737
  "agent:validate:authoring-bundle": {
@@ -1819,5 +1819,5 @@
1819
1819
  ]
1820
1820
  }
1821
1821
  },
1822
- "version": "1.29.0"
1822
+ "version": "1.29.2"
1823
1823
  }