@salesforce/plugin-agent 1.32.21 → 1.33.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -868,13 +868,14 @@
868
868
  "test-spec:generate:agent"
869
869
  ]
870
870
  },
871
- "agent:publish:authoring-bundle": {
871
+ "agent:preview:end": {
872
872
  "aliases": [],
873
873
  "args": {},
874
- "description": "An authoring bundle is a metadata type (named aiAuthoringBundle) that provides the blueprint for an agent. The metadata type contains two files: the standard metatada XML file and an Agent Script file (extension \".agent\") that fully describes the agent using the Agent Script language.\n\nWhen you publish an authoring bundle to your org, a number of things happen. First, this command validates that the Agent Script file successfully compiles. If there are compilation errors, the command exits and you must fix the Agent Script file to continue. Once the Agent Script file compiles, then it's published to the org, which in turn creates new associated metadata (Bot, BotVersion, GenAiX), or new versions of the metadata if the agent already exists. The new or updated metadata is retrieved back to your DX project; specify the --skip-retrieve flag to skip this step. Finally, the authoring bundle metadata (AiAuthoringBundle) is deployed to your org.\n\nThis command uses the API name of the authoring bundle.",
874
+ "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to end it. This command also displays the local directory where the session trace files are stored.\n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to end the session. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
875
875
  "examples": [
876
- "Publish an authoring bundle by being prompted for its API name; use your default org:\n<%= config.bin %> <%= command.id %>",
877
- "Publish an authoring bundle with API name MyAuthoringBundle to the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name MyAuthoringbundle --target-org my-dev-org"
876
+ "End a preview session of a published agent by specifying its session ID and API name ; use the default org:\n<%= config.bin %> <%= command.id %> --session-id <SESSION_ID> --api-name My_Published_Agent",
877
+ "Similar to previous example, but don't specify a session ID; you get an error if the published agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent --target-org my-dev-org",
878
+ "End a preview session of an agent using its authoring bundle API name; you get an error if the agent has more than one active session.\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Local_Agent"
878
879
  ],
879
880
  "flags": {
880
881
  "json": {
@@ -909,63 +910,96 @@
909
910
  "multiple": false,
910
911
  "type": "option"
911
912
  },
913
+ "session-id": {
914
+ "name": "session-id",
915
+ "required": false,
916
+ "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see the list of all sessions.",
917
+ "hasDynamicHelp": false,
918
+ "multiple": false,
919
+ "type": "option"
920
+ },
912
921
  "api-name": {
913
922
  "char": "n",
914
923
  "name": "api-name",
915
- "summary": "API name of the authoring bundle you want to publish; if not specified, the command provides a list that you can choose from.",
924
+ "summary": "API name of the activated published agent you want to preview.",
916
925
  "hasDynamicHelp": false,
917
926
  "multiple": false,
918
927
  "type": "option"
919
928
  },
920
- "skip-retrieve": {
921
- "name": "skip-retrieve",
922
- "summary": "Don't retrieve the metadata associated with the agent to your DX project.",
923
- "allowNo": false,
924
- "type": "boolean"
929
+ "authoring-bundle": {
930
+ "name": "authoring-bundle",
931
+ "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
932
+ "hasDynamicHelp": false,
933
+ "multiple": false,
934
+ "type": "option"
925
935
  }
926
936
  },
927
937
  "hasDynamicHelp": true,
928
938
  "hiddenAliases": [],
929
- "id": "agent:publish:authoring-bundle",
939
+ "id": "agent:preview:end",
930
940
  "pluginAlias": "@salesforce/plugin-agent",
931
941
  "pluginName": "@salesforce/plugin-agent",
932
942
  "pluginType": "core",
933
943
  "strict": true,
934
- "summary": "Publish an authoring bundle to your org, which results in a new agent or a new version of an existing agent.",
944
+ "summary": "End an existing programmatic agent preview session and get trace location.",
935
945
  "enableJsonFlag": true,
936
946
  "requiresProject": true,
937
- "FLAGGABLE_PROMPTS": {
938
- "api-name": {
939
- "message": "API name of the authoring bundle you want to publish; if not specified, the command provides a list that you can choose from.",
940
- "promptMessage": "API name of the authoring bundle to publish"
941
- }
947
+ "envVariablesSection": {
948
+ "header": "ENVIRONMENT VARIABLES",
949
+ "body": [
950
+ {
951
+ "name": "SF_TARGET_ORG",
952
+ "description": "Username or alias of your default org. Overrides the target-org configuration variable."
953
+ }
954
+ ]
955
+ },
956
+ "errorCodes": {
957
+ "header": "ERROR CODES",
958
+ "body": [
959
+ {
960
+ "name": "Succeeded (0)",
961
+ "description": "Preview session ended successfully and traces saved."
962
+ },
963
+ {
964
+ "name": "NotFound (2)",
965
+ "description": "Agent not found, or no preview session exists for this agent."
966
+ },
967
+ {
968
+ "name": "PreviewEndFailed (4)",
969
+ "description": "Failed to end the preview session."
970
+ },
971
+ {
972
+ "name": "SessionAmbiguous (5)",
973
+ "description": "Multiple preview sessions found; specify --session-id to choose one."
974
+ }
975
+ ]
942
976
  },
943
977
  "isESM": true,
944
978
  "relativePath": [
945
979
  "lib",
946
980
  "commands",
947
981
  "agent",
948
- "publish",
949
- "authoring-bundle.js"
982
+ "preview",
983
+ "end.js"
950
984
  ],
951
985
  "aliasPermutations": [],
952
986
  "permutations": [
953
- "agent:publish:authoring-bundle",
954
- "publish:agent:authoring-bundle",
955
- "publish:authoring-bundle:agent",
956
- "agent:authoring-bundle:publish",
957
- "authoring-bundle:agent:publish",
958
- "authoring-bundle:publish:agent"
987
+ "agent:preview:end",
988
+ "preview:agent:end",
989
+ "preview:end:agent",
990
+ "agent:end:preview",
991
+ "end:agent:preview",
992
+ "end:preview:agent"
959
993
  ]
960
994
  },
961
- "agent:test:create": {
995
+ "agent:preview:send": {
962
996
  "aliases": [],
963
997
  "args": {},
964
- "description": "To run this command, you must have an agent test spec file, which is a YAML file that lists the test cases for testing a specific agent. Use the \"agent generate test-spec\" CLI command to generate a test spec file. Then specify the file to this command with the --spec flag, or run this command with no flags to be prompted.\n\nWhen this command completes, your org contains the new agent test, which you can view and edit using the Testing Center UI. This command also retrieves the metadata component (AiEvaluationDefinition) associated with the new test to your local Salesforce DX project and displays its filename.\n\nAfter you've created the test in the org, use the \"agent test run\" command to run it.",
998
+ "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to send the agent a message (utterance). This command then displays the agent's response.\n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to send a message. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
965
999
  "examples": [
966
- "Create an agent test interactively and be prompted for the test spec and API name of the test in the org; use the default org:\n<%= config.bin %> <%= command.id %>",
967
- "Create an agent test and use flags to specify all required information; if a test with same API name already exists in the org, overwrite it without confirmation. Use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --force-overwrite --target-org my-org",
968
- "Preview what the agent test metadata (AiEvaluationDefinition) looks like without deploying it to your default org:\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --preview"
1000
+ "Send a message to an activated published agent using its API name and session ID; use the default org:\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --session-id <SESSION_ID>",
1001
+ "Similar to previous example, but don't specify a session ID; you get an error if the agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --target-org my-dev-org",
1002
+ "Send a message to an agent using its authoring bundle API name; you get an error if the agent has more than one active session:\n<%= config.bin %> <%= command.id %> --utterance \"what can you help me with?\" --authoring-bundle My_Local_Agent"
969
1003
  ],
970
1004
  "flags": {
971
1005
  "json": {
@@ -983,20 +1017,6 @@
983
1017
  "multiple": false,
984
1018
  "type": "option"
985
1019
  },
986
- "api-name": {
987
- "name": "api-name",
988
- "summary": "API name of the new test; the API name must not exist in the org.",
989
- "hasDynamicHelp": false,
990
- "multiple": false,
991
- "type": "option"
992
- },
993
- "spec": {
994
- "name": "spec",
995
- "summary": "Path to the test spec YAML file.",
996
- "hasDynamicHelp": false,
997
- "multiple": false,
998
- "type": "option"
999
- },
1000
1020
  "target-org": {
1001
1021
  "char": "o",
1002
1022
  "name": "target-org",
@@ -1014,28 +1034,49 @@
1014
1034
  "multiple": false,
1015
1035
  "type": "option"
1016
1036
  },
1017
- "preview": {
1018
- "name": "preview",
1019
- "summary": "Preview the test metadata file (AiEvaluationDefinition) without deploying to your org.",
1020
- "allowNo": false,
1021
- "type": "boolean"
1037
+ "session-id": {
1038
+ "name": "session-id",
1039
+ "required": false,
1040
+ "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see list of all sessions.",
1041
+ "hasDynamicHelp": false,
1042
+ "multiple": false,
1043
+ "type": "option"
1022
1044
  },
1023
- "force-overwrite": {
1024
- "name": "force-overwrite",
1025
- "summary": "Don't prompt for confirmation when overwriting an existing test (based on API name) in your org.",
1026
- "allowNo": false,
1027
- "type": "boolean"
1045
+ "utterance": {
1046
+ "char": "u",
1047
+ "name": "utterance",
1048
+ "required": true,
1049
+ "summary": "Utterance to send to the agent, enclosed in double quotes.",
1050
+ "hasDynamicHelp": false,
1051
+ "multiple": false,
1052
+ "type": "option"
1053
+ },
1054
+ "api-name": {
1055
+ "char": "n",
1056
+ "name": "api-name",
1057
+ "summary": "API name of the activated published agent you want to preview.",
1058
+ "hasDynamicHelp": false,
1059
+ "multiple": false,
1060
+ "type": "option"
1061
+ },
1062
+ "authoring-bundle": {
1063
+ "name": "authoring-bundle",
1064
+ "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1065
+ "hasDynamicHelp": false,
1066
+ "multiple": false,
1067
+ "type": "option"
1028
1068
  }
1029
1069
  },
1030
1070
  "hasDynamicHelp": true,
1031
1071
  "hiddenAliases": [],
1032
- "id": "agent:test:create",
1072
+ "id": "agent:preview:send",
1033
1073
  "pluginAlias": "@salesforce/plugin-agent",
1034
1074
  "pluginName": "@salesforce/plugin-agent",
1035
1075
  "pluginType": "core",
1036
1076
  "strict": true,
1037
- "summary": "Create an agent test in your org using a local test spec YAML file.",
1077
+ "summary": "Send a message to an existing agent preview session.",
1038
1078
  "enableJsonFlag": true,
1079
+ "requiresProject": true,
1039
1080
  "envVariablesSection": {
1040
1081
  "header": "ENVIRONMENT VARIABLES",
1041
1082
  "body": [
@@ -1050,19 +1091,19 @@
1050
1091
  "body": [
1051
1092
  {
1052
1093
  "name": "Succeeded (0)",
1053
- "description": "Test created and deployed successfully."
1094
+ "description": "Message sent successfully and agent response received."
1054
1095
  },
1055
1096
  {
1056
- "name": "Failed (1)",
1057
- "description": "Test validation errors or metadata format issues."
1097
+ "name": "NotFound (2)",
1098
+ "description": "Agent not found, or no preview session exists for this agent."
1058
1099
  },
1059
1100
  {
1060
- "name": "NotFound (2)",
1061
- "description": "Test spec file not found or org connection failed."
1101
+ "name": "PreviewSendFailed (4)",
1102
+ "description": "Failed to send message or receive response from the preview session."
1062
1103
  },
1063
1104
  {
1064
- "name": "DeploymentFailed (4)",
1065
- "description": "Deployment failed due to API or network errors."
1105
+ "name": "SessionAmbiguous (5)",
1106
+ "description": "Multiple preview sessions found; specify --session-id to choose one."
1066
1107
  }
1067
1108
  ]
1068
1109
  },
@@ -1071,26 +1112,25 @@
1071
1112
  "lib",
1072
1113
  "commands",
1073
1114
  "agent",
1074
- "test",
1075
- "create.js"
1115
+ "preview",
1116
+ "send.js"
1076
1117
  ],
1077
1118
  "aliasPermutations": [],
1078
1119
  "permutations": [
1079
- "agent:test:create",
1080
- "test:agent:create",
1081
- "test:create:agent",
1082
- "agent:create:test",
1083
- "create:agent:test",
1084
- "create:test:agent"
1120
+ "agent:preview:send",
1121
+ "preview:agent:send",
1122
+ "preview:send:agent",
1123
+ "agent:send:preview",
1124
+ "send:agent:preview",
1125
+ "send:preview:agent"
1085
1126
  ]
1086
1127
  },
1087
- "agent:test:list": {
1128
+ "agent:preview:sessions": {
1088
1129
  "aliases": [],
1089
1130
  "args": {},
1090
- "description": "The command outputs a table with the name (API name) of each test along with its unique ID and the date it was created in the org.",
1131
+ "description": "This command lists the agent preview sessions that were started with the \"agent preview start\" command and are still in the local cache. Use this command to discover specific session IDs that you can pass to the \"agent preview send\" or \"agent preview end\" commands with the --session-id flag.\n\nProgrammatic agent preview sessions can be started for both published activated agents and by using an agent's local authoring bundle, which contains its Agent Script file. In this command's output table, the Agent column contains either the API name of the authoring bundle or the published agent, whichever was used when starting the session. In the table, if the same API name has multiple rows with different session IDs, then it means that you previously started multiple preview sessions with the associated agent.",
1091
1132
  "examples": [
1092
- "List the agent tests in your default org:\n<%= config.bin %> <%= command.id %>",
1093
- "List the agent tests in an org with alias \"my-org\"\"\n<%= config.bin %> <%= command.id %> --target-org my-org"
1133
+ "List all cached agent preview sessions:\n<%= config.bin %> <%= command.id %>"
1094
1134
  ],
1095
1135
  "flags": {
1096
1136
  "json": {
@@ -1107,53 +1147,24 @@
1107
1147
  "hasDynamicHelp": false,
1108
1148
  "multiple": false,
1109
1149
  "type": "option"
1110
- },
1111
- "target-org": {
1112
- "char": "o",
1113
- "name": "target-org",
1114
- "noCacheDefault": true,
1115
- "required": true,
1116
- "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
1117
- "hasDynamicHelp": true,
1118
- "multiple": false,
1119
- "type": "option"
1120
- },
1121
- "api-version": {
1122
- "description": "Override the api version used for api requests made by this command",
1123
- "name": "api-version",
1124
- "hasDynamicHelp": false,
1125
- "multiple": false,
1126
- "type": "option"
1127
1150
  }
1128
1151
  },
1129
- "hasDynamicHelp": true,
1152
+ "hasDynamicHelp": false,
1130
1153
  "hiddenAliases": [],
1131
- "id": "agent:test:list",
1154
+ "id": "agent:preview:sessions",
1132
1155
  "pluginAlias": "@salesforce/plugin-agent",
1133
1156
  "pluginName": "@salesforce/plugin-agent",
1134
1157
  "pluginType": "core",
1135
1158
  "strict": true,
1136
- "summary": "List the available agent tests in your org.",
1159
+ "summary": "List all known programmatic agent preview sessions.",
1137
1160
  "enableJsonFlag": true,
1138
- "envVariablesSection": {
1139
- "header": "ENVIRONMENT VARIABLES",
1140
- "body": [
1141
- {
1142
- "name": "SF_TARGET_ORG",
1143
- "description": "Username or alias of your default org. Overrides the target-org configuration variable."
1144
- }
1145
- ]
1146
- },
1161
+ "requiresProject": true,
1147
1162
  "errorCodes": {
1148
1163
  "header": "ERROR CODES",
1149
1164
  "body": [
1150
1165
  {
1151
1166
  "name": "Succeeded (0)",
1152
- "description": "Agent tests listed successfully."
1153
- },
1154
- {
1155
- "name": "Failed (4)",
1156
- "description": "Failed to retrieve agent tests due to API or network errors."
1167
+ "description": "Sessions listed successfully (or empty list if no active sessions)."
1157
1168
  }
1158
1169
  ]
1159
1170
  },
@@ -1162,27 +1173,27 @@
1162
1173
  "lib",
1163
1174
  "commands",
1164
1175
  "agent",
1165
- "test",
1166
- "list.js"
1176
+ "preview",
1177
+ "sessions.js"
1167
1178
  ],
1168
1179
  "aliasPermutations": [],
1169
1180
  "permutations": [
1170
- "agent:test:list",
1171
- "test:agent:list",
1172
- "test:list:agent",
1173
- "agent:list:test",
1174
- "list:agent:test",
1175
- "list:test:agent"
1181
+ "agent:preview:sessions",
1182
+ "preview:agent:sessions",
1183
+ "preview:sessions:agent",
1184
+ "agent:sessions:preview",
1185
+ "sessions:agent:preview",
1186
+ "sessions:preview:agent"
1176
1187
  ]
1177
1188
  },
1178
- "agent:test:results": {
1189
+ "agent:preview:start": {
1179
1190
  "aliases": [],
1180
1191
  "args": {},
1181
- "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1192
+ "description": "This command outputs a session ID that you then use with the \"agent preview send\" command to send an utterance to the agent. Use the \"agent preview sessions\" command to list all active sessions and the \"agent preview end\" command to end a specific session.\n\nIdentify the agent you want to start previewing with either the --authoring-bundle flag to specify a local authoring bundle's API name or --api-name to specify an activated published agent's API name. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.\n\nWhen starting a preview session with --authoring-bundle, you must explicitly specify the execution mode using one of these flags:\n\n- --use-live-actions: Executes real Apex classes, flows, and other actions in the org. This surfaces compile and validation errors during preview.\n- --simulate-actions: Uses AI to simulate action execution without calling real implementations.\n\nPublished agents (--api-name) always use live actions. The mode flags are optional and have no effect for published agents.",
1182
1193
  "examples": [
1183
- "Get the results of an agent test run in your default org using its job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1184
- "Get the results of the most recently run agent test in an org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --use-most-recent --target-org my-org",
1185
- "Get the results of the most recently run agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1194
+ "Start a programmatic agent preview session by specifying an authoring bundle; use simulated actions. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --target-org my-dev-org --simulate-actions",
1195
+ "Similar to previous example but use live actions and the default org:\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --use-live-actions",
1196
+ "Start a preview session with an activated published agent (always uses live actions):\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent"
1186
1197
  ],
1187
1198
  "flags": {
1188
1199
  "json": {
@@ -1217,55 +1228,50 @@
1217
1228
  "multiple": false,
1218
1229
  "type": "option"
1219
1230
  },
1220
- "job-id": {
1221
- "char": "i",
1222
- "name": "job-id",
1223
- "required": true,
1224
- "summary": "Job ID of the completed agent test run.",
1231
+ "api-name": {
1232
+ "char": "n",
1233
+ "name": "api-name",
1234
+ "summary": "API name of the activated published agent you want to preview.",
1225
1235
  "hasDynamicHelp": false,
1226
1236
  "multiple": false,
1227
1237
  "type": "option"
1228
1238
  },
1229
- "result-format": {
1230
- "name": "result-format",
1231
- "summary": "Format of the agent test run results.",
1232
- "default": "human",
1239
+ "authoring-bundle": {
1240
+ "name": "authoring-bundle",
1241
+ "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1233
1242
  "hasDynamicHelp": false,
1234
1243
  "multiple": false,
1235
- "options": [
1236
- "json",
1237
- "human",
1238
- "junit",
1239
- "tap"
1240
- ],
1241
1244
  "type": "option"
1242
1245
  },
1243
- "output-dir": {
1244
- "char": "d",
1245
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1246
- "name": "output-dir",
1247
- "summary": "Directory to write the agent test results into.",
1248
- "hasDynamicHelp": false,
1249
- "multiple": false,
1250
- "type": "option"
1246
+ "use-live-actions": {
1247
+ "exclusive": [
1248
+ "simulate-actions"
1249
+ ],
1250
+ "name": "use-live-actions",
1251
+ "summary": "Execute real actions in the org (Apex classes, flows, etc.). Required with --authoring-bundle.",
1252
+ "allowNo": false,
1253
+ "type": "boolean"
1251
1254
  },
1252
- "verbose": {
1253
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1254
- "name": "verbose",
1255
- "summary": "Show generated data in the test results output.",
1255
+ "simulate-actions": {
1256
+ "exclusive": [
1257
+ "use-live-actions"
1258
+ ],
1259
+ "name": "simulate-actions",
1260
+ "summary": "Use AI to simulate action execution instead of calling real actions. Required with --authoring-bundle.",
1256
1261
  "allowNo": false,
1257
1262
  "type": "boolean"
1258
1263
  }
1259
1264
  },
1260
1265
  "hasDynamicHelp": true,
1261
1266
  "hiddenAliases": [],
1262
- "id": "agent:test:results",
1267
+ "id": "agent:preview:start",
1263
1268
  "pluginAlias": "@salesforce/plugin-agent",
1264
1269
  "pluginName": "@salesforce/plugin-agent",
1265
1270
  "pluginType": "core",
1266
1271
  "strict": true,
1267
- "summary": "Get the results of a completed agent test run.",
1272
+ "summary": "Start a programmatic agent preview session.",
1268
1273
  "enableJsonFlag": true,
1274
+ "requiresProject": true,
1269
1275
  "envVariablesSection": {
1270
1276
  "header": "ENVIRONMENT VARIABLES",
1271
1277
  "body": [
@@ -1280,15 +1286,23 @@
1280
1286
  "body": [
1281
1287
  {
1282
1288
  "name": "Succeeded (0)",
1283
- "description": "Results retrieved successfully. Test results (passed/failed) are in the output."
1289
+ "description": "Preview session started successfully."
1290
+ },
1291
+ {
1292
+ "name": "Failed (1)",
1293
+ "description": "Agent Script compilation failed (syntax errors in the script)."
1284
1294
  },
1285
1295
  {
1286
1296
  "name": "NotFound (2)",
1287
- "description": "Job ID not found or invalid."
1297
+ "description": "Agent not found, or compilation API returned HTTP 404 (endpoint may not be available in your org or region)."
1288
1298
  },
1289
1299
  {
1290
- "name": "Failed (4)",
1291
- "description": "Failed to retrieve results due to API or network errors."
1300
+ "name": "ServerError (3)",
1301
+ "description": "Compilation API returned HTTP 500 (server error during compilation)."
1302
+ },
1303
+ {
1304
+ "name": "PreviewStartFailed (4)",
1305
+ "description": "Preview session failed to start due to API or network errors."
1292
1306
  }
1293
1307
  ]
1294
1308
  },
@@ -1297,27 +1311,26 @@
1297
1311
  "lib",
1298
1312
  "commands",
1299
1313
  "agent",
1300
- "test",
1301
- "results.js"
1314
+ "preview",
1315
+ "start.js"
1302
1316
  ],
1303
1317
  "aliasPermutations": [],
1304
1318
  "permutations": [
1305
- "agent:test:results",
1306
- "test:agent:results",
1307
- "test:results:agent",
1308
- "agent:results:test",
1309
- "results:agent:test",
1310
- "results:test:agent"
1319
+ "agent:preview:start",
1320
+ "preview:agent:start",
1321
+ "preview:start:agent",
1322
+ "agent:start:preview",
1323
+ "start:agent:preview",
1324
+ "start:preview:agent"
1311
1325
  ]
1312
1326
  },
1313
- "agent:test:resume": {
1327
+ "agent:publish:authoring-bundle": {
1314
1328
  "aliases": [],
1315
1329
  "args": {},
1316
- "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nUse the --wait flag to specify the number of minutes for this command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, the CLI returns control of the terminal to you, and you must run \"agent test resume\" again.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1330
+ "description": "An authoring bundle is a metadata type (named aiAuthoringBundle) that provides the blueprint for an agent. The metadata type contains two files: the standard metatada XML file and an Agent Script file (extension \".agent\") that fully describes the agent using the Agent Script language.\n\nWhen you publish an authoring bundle to your org, a number of things happen. First, this command validates that the Agent Script file successfully compiles. If there are compilation errors, the command exits and you must fix the Agent Script file to continue. Once the Agent Script file compiles, then it's published to the org, which in turn creates new associated metadata (Bot, BotVersion, GenAiX), or new versions of the metadata if the agent already exists. The new or updated metadata is retrieved back to your DX project; specify the --skip-retrieve flag to skip this step. Finally, the authoring bundle metadata (AiAuthoringBundle) is deployed to your org.\n\nThis command uses the API name of the authoring bundle.",
1317
1331
  "examples": [
1318
- "Resume an agent test in your default org using a job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1319
- "Resume the most recently-run agent test in an org with alias \"my-org\" org; wait 10 minutes for the tests to finish:\n<%= config.bin %> <%= command.id %> --use-most-recent --wait 10 --target-org my-org",
1320
- "Resume the most recent agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1332
+ "Publish an authoring bundle by being prompted for its API name; use your default org:\n<%= config.bin %> <%= command.id %>",
1333
+ "Publish an authoring bundle with API name MyAuthoringBundle to the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name MyAuthoringbundle --target-org my-dev-org"
1321
1334
  ],
1322
1335
  "flags": {
1323
1336
  "json": {
@@ -1352,69 +1365,132 @@
1352
1365
  "multiple": false,
1353
1366
  "type": "option"
1354
1367
  },
1355
- "job-id": {
1356
- "char": "i",
1357
- "name": "job-id",
1358
- "summary": "Job ID of the original agent test run.",
1368
+ "api-name": {
1369
+ "char": "n",
1370
+ "name": "api-name",
1371
+ "summary": "API name of the authoring bundle you want to publish; if not specified, the command provides a list that you can choose from.",
1359
1372
  "hasDynamicHelp": false,
1360
1373
  "multiple": false,
1361
1374
  "type": "option"
1362
1375
  },
1363
- "use-most-recent": {
1364
- "char": "r",
1365
- "name": "use-most-recent",
1366
- "summary": "Use the job ID of the most recent agent test run.",
1376
+ "skip-retrieve": {
1377
+ "name": "skip-retrieve",
1378
+ "summary": "Don't retrieve the metadata associated with the agent to your DX project.",
1379
+ "allowNo": false,
1380
+ "type": "boolean"
1381
+ }
1382
+ },
1383
+ "hasDynamicHelp": true,
1384
+ "hiddenAliases": [],
1385
+ "id": "agent:publish:authoring-bundle",
1386
+ "pluginAlias": "@salesforce/plugin-agent",
1387
+ "pluginName": "@salesforce/plugin-agent",
1388
+ "pluginType": "core",
1389
+ "strict": true,
1390
+ "summary": "Publish an authoring bundle to your org, which results in a new agent or a new version of an existing agent.",
1391
+ "enableJsonFlag": true,
1392
+ "requiresProject": true,
1393
+ "FLAGGABLE_PROMPTS": {
1394
+ "api-name": {
1395
+ "message": "API name of the authoring bundle you want to publish; if not specified, the command provides a list that you can choose from.",
1396
+ "promptMessage": "API name of the authoring bundle to publish"
1397
+ }
1398
+ },
1399
+ "isESM": true,
1400
+ "relativePath": [
1401
+ "lib",
1402
+ "commands",
1403
+ "agent",
1404
+ "publish",
1405
+ "authoring-bundle.js"
1406
+ ],
1407
+ "aliasPermutations": [],
1408
+ "permutations": [
1409
+ "agent:publish:authoring-bundle",
1410
+ "publish:agent:authoring-bundle",
1411
+ "publish:authoring-bundle:agent",
1412
+ "agent:authoring-bundle:publish",
1413
+ "authoring-bundle:agent:publish",
1414
+ "authoring-bundle:publish:agent"
1415
+ ]
1416
+ },
1417
+ "agent:test:create": {
1418
+ "aliases": [],
1419
+ "args": {},
1420
+ "description": "To run this command, you must have an agent test spec file, which is a YAML file that lists the test cases for testing a specific agent. Use the \"agent generate test-spec\" CLI command to generate a test spec file. Then specify the file to this command with the --spec flag, or run this command with no flags to be prompted.\n\nWhen this command completes, your org contains the new agent test, which you can view and edit using the Testing Center UI. This command also retrieves the metadata component (AiEvaluationDefinition) associated with the new test to your local Salesforce DX project and displays its filename.\n\nAfter you've created the test in the org, use the \"agent test run\" command to run it.",
1421
+ "examples": [
1422
+ "Create an agent test interactively and be prompted for the test spec and API name of the test in the org; use the default org:\n<%= config.bin %> <%= command.id %>",
1423
+ "Create an agent test and use flags to specify all required information; if a test with same API name already exists in the org, overwrite it without confirmation. Use the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --force-overwrite --target-org my-org",
1424
+ "Preview what the agent test metadata (AiEvaluationDefinition) looks like without deploying it to your default org:\n<%= config.bin %> <%= command.id %> --spec specs/Resort_Manager-testSpec.yaml --api-name Resort_Manager_Test --preview"
1425
+ ],
1426
+ "flags": {
1427
+ "json": {
1428
+ "description": "Format output as json.",
1429
+ "helpGroup": "GLOBAL",
1430
+ "name": "json",
1367
1431
  "allowNo": false,
1368
1432
  "type": "boolean"
1369
1433
  },
1370
- "wait": {
1371
- "char": "w",
1372
- "name": "wait",
1373
- "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1374
- "default": "5 minutes",
1375
- "hasDynamicHelp": true,
1434
+ "flags-dir": {
1435
+ "helpGroup": "GLOBAL",
1436
+ "name": "flags-dir",
1437
+ "summary": "Import flag values from a directory.",
1438
+ "hasDynamicHelp": false,
1376
1439
  "multiple": false,
1377
1440
  "type": "option"
1378
1441
  },
1379
- "result-format": {
1380
- "name": "result-format",
1381
- "summary": "Format of the agent test run results.",
1382
- "default": "human",
1442
+ "api-name": {
1443
+ "name": "api-name",
1444
+ "summary": "API name of the new test; the API name must not exist in the org.",
1383
1445
  "hasDynamicHelp": false,
1384
1446
  "multiple": false,
1385
- "options": [
1386
- "json",
1387
- "human",
1388
- "junit",
1389
- "tap"
1390
- ],
1391
1447
  "type": "option"
1392
1448
  },
1393
- "output-dir": {
1394
- "char": "d",
1395
- "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1396
- "name": "output-dir",
1397
- "summary": "Directory to write the agent test results into.",
1449
+ "spec": {
1450
+ "name": "spec",
1451
+ "summary": "Path to the test spec YAML file.",
1398
1452
  "hasDynamicHelp": false,
1399
1453
  "multiple": false,
1400
1454
  "type": "option"
1401
1455
  },
1402
- "verbose": {
1403
- "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1404
- "name": "verbose",
1405
- "summary": "Show generated data in the test results output.",
1456
+ "target-org": {
1457
+ "char": "o",
1458
+ "name": "target-org",
1459
+ "noCacheDefault": true,
1460
+ "required": true,
1461
+ "summary": "Username or alias of the target org. Not required if the `target-org` configuration variable is already set.",
1462
+ "hasDynamicHelp": true,
1463
+ "multiple": false,
1464
+ "type": "option"
1465
+ },
1466
+ "api-version": {
1467
+ "description": "Override the api version used for api requests made by this command",
1468
+ "name": "api-version",
1469
+ "hasDynamicHelp": false,
1470
+ "multiple": false,
1471
+ "type": "option"
1472
+ },
1473
+ "preview": {
1474
+ "name": "preview",
1475
+ "summary": "Preview the test metadata file (AiEvaluationDefinition) without deploying to your org.",
1476
+ "allowNo": false,
1477
+ "type": "boolean"
1478
+ },
1479
+ "force-overwrite": {
1480
+ "name": "force-overwrite",
1481
+ "summary": "Don't prompt for confirmation when overwriting an existing test (based on API name) in your org.",
1406
1482
  "allowNo": false,
1407
1483
  "type": "boolean"
1408
1484
  }
1409
1485
  },
1410
1486
  "hasDynamicHelp": true,
1411
1487
  "hiddenAliases": [],
1412
- "id": "agent:test:resume",
1488
+ "id": "agent:test:create",
1413
1489
  "pluginAlias": "@salesforce/plugin-agent",
1414
1490
  "pluginName": "@salesforce/plugin-agent",
1415
1491
  "pluginType": "core",
1416
1492
  "strict": true,
1417
- "summary": "Resume an agent test that you previously started in your org so you can view the test results.",
1493
+ "summary": "Create an agent test in your org using a local test spec YAML file.",
1418
1494
  "enableJsonFlag": true,
1419
1495
  "envVariablesSection": {
1420
1496
  "header": "ENVIRONMENT VARIABLES",
@@ -1430,19 +1506,19 @@
1430
1506
  "body": [
1431
1507
  {
1432
1508
  "name": "Succeeded (0)",
1433
- "description": "Test completed successfully (with test results in the output)."
1509
+ "description": "Test created and deployed successfully."
1434
1510
  },
1435
1511
  {
1436
1512
  "name": "Failed (1)",
1437
- "description": "Tests encountered execution errors (test cases with ERROR status)."
1513
+ "description": "Test validation errors or metadata format issues."
1438
1514
  },
1439
1515
  {
1440
1516
  "name": "NotFound (2)",
1441
- "description": "Job ID not found or invalid."
1517
+ "description": "Test spec file not found or org connection failed."
1442
1518
  },
1443
1519
  {
1444
- "name": "OperationFailed (4)",
1445
- "description": "Failed to poll test due to API or network errors."
1520
+ "name": "DeploymentFailed (4)",
1521
+ "description": "Deployment failed due to API or network errors."
1446
1522
  }
1447
1523
  ]
1448
1524
  },
@@ -1452,29 +1528,25 @@
1452
1528
  "commands",
1453
1529
  "agent",
1454
1530
  "test",
1455
- "resume.js"
1531
+ "create.js"
1456
1532
  ],
1457
1533
  "aliasPermutations": [],
1458
1534
  "permutations": [
1459
- "agent:test:resume",
1460
- "test:agent:resume",
1461
- "test:resume:agent",
1462
- "agent:resume:test",
1463
- "resume:agent:test",
1464
- "resume:test:agent"
1535
+ "agent:test:create",
1536
+ "test:agent:create",
1537
+ "test:create:agent",
1538
+ "agent:create:test",
1539
+ "create:agent:test",
1540
+ "create:test:agent"
1465
1541
  ]
1466
1542
  },
1467
- "agent:test:run-eval": {
1543
+ "agent:test:list": {
1468
1544
  "aliases": [],
1469
1545
  "args": {},
1470
- "description": "Execute rich evaluation tests against an Agentforce agent using the Einstein Evaluation API. Supports both YAML test specs (same format as `sf agent generate test-spec`) and JSON payloads.\n\nWhen you provide a YAML test spec, the command automatically translates test cases into Evaluation API calls and infers the agent name from the spec's `subjectName` field. This means you can use the same test spec with both `sf agent test run` and `sf agent test run-eval`. YAML test specs also support contextVariables, which allow you to inject contextual data (such as CaseId or RoutableId) into agent sessions for testing with different contexts.\n\nWhen you provide a JSON payload, it's sent directly to the API with optional normalization. The normalizer auto-corrects common field name mistakes, converts shorthand references to JSONPath, and injects defaults. Use `--no-normalize` to disable this auto-normalization. JSON payloads can also include context_variables on agent.create_session steps for the same contextual testing capabilities.\n\nSupports 8+ evaluator types, including topic routing assertions, action invocation checks, string/numeric assertions, semantic similarity scoring, and LLM-based quality ratings.",
1546
+ "description": "The command outputs a table with the name (API name) of each test along with its unique ID and the date it was created in the org.",
1471
1547
  "examples": [
1472
- "Run tests using a YAML test spec on the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec tests/my-agent-testSpec.yaml --target-org my-org",
1473
- "Run tests using a YAML spec with explicit agent name override; use your default org:\n<%= config.bin %> <%= command.id %> --spec tests/my-agent-testSpec.yaml --api-name My_Agent --target-org my-org",
1474
- "Run tests using a JSON payload:\n<%= config.bin %> <%= command.id %> --spec tests/eval-payload.json --target-org my-org",
1475
- "Run tests and output results in JUnit format; useful for continuous integration and deployment (CI/CD):\n<%= config.bin %> <%= command.id %> --spec tests/my-agent-testSpec.yaml --target-org my-org --result-format junit",
1476
- "Run tests with contextVariables to inject contextual data into agent sessions (add contextVariables to test cases in your YAML spec):\n<%= config.bin %> <%= command.id %> --spec tests/agent-with-context.yaml --target-org my-org",
1477
- "Pipe JSON payload from stdin (--spec flag is automatically populated from stdin):\n$ echo '{\"tests\":[...]}' | <%= config.bin %> <%= command.id %> --spec --target-org my-org"
1548
+ "List the agent tests in your default org:\n<%= config.bin %> <%= command.id %>",
1549
+ "List the agent tests in an org with alias \"my-org\"\"\n<%= config.bin %> <%= command.id %> --target-org my-org"
1478
1550
  ],
1479
1551
  "flags": {
1480
1552
  "json": {
@@ -1508,72 +1580,16 @@
1508
1580
  "hasDynamicHelp": false,
1509
1581
  "multiple": false,
1510
1582
  "type": "option"
1511
- },
1512
- "spec": {
1513
- "char": "s",
1514
- "name": "spec",
1515
- "required": true,
1516
- "summary": "Path to test spec file (YAML or JSON). Supports reading from stdin when piping content.",
1517
- "hasDynamicHelp": false,
1518
- "multiple": false,
1519
- "type": "option"
1520
- },
1521
- "api-name": {
1522
- "char": "n",
1523
- "name": "api-name",
1524
- "summary": "Agent DeveloperName (also called API name) to resolve agent_id and agent_version_id. Auto-inferred from the YAML spec's subjectName.",
1525
- "hasDynamicHelp": false,
1526
- "multiple": false,
1527
- "type": "option"
1528
- },
1529
- "wait": {
1530
- "char": "w",
1531
- "name": "wait",
1532
- "summary": "Number of minutes to wait for results.",
1533
- "default": 10,
1534
- "hasDynamicHelp": false,
1535
- "multiple": false,
1536
- "type": "option"
1537
- },
1538
- "result-format": {
1539
- "name": "result-format",
1540
- "summary": "Format of the agent test run results.",
1541
- "default": "human",
1542
- "hasDynamicHelp": false,
1543
- "multiple": false,
1544
- "options": [
1545
- "json",
1546
- "human",
1547
- "junit",
1548
- "tap"
1549
- ],
1550
- "type": "option"
1551
- },
1552
- "batch-size": {
1553
- "name": "batch-size",
1554
- "summary": "Number of tests per API request (max 5).",
1555
- "default": 5,
1556
- "hasDynamicHelp": false,
1557
- "multiple": false,
1558
- "type": "option"
1559
- },
1560
- "no-normalize": {
1561
- "name": "no-normalize",
1562
- "summary": "Disable auto-normalization of field names and shorthand references.",
1563
- "allowNo": false,
1564
- "type": "boolean"
1565
1583
  }
1566
1584
  },
1567
1585
  "hasDynamicHelp": true,
1568
- "hidden": true,
1569
1586
  "hiddenAliases": [],
1570
- "id": "agent:test:run-eval",
1587
+ "id": "agent:test:list",
1571
1588
  "pluginAlias": "@salesforce/plugin-agent",
1572
1589
  "pluginName": "@salesforce/plugin-agent",
1573
1590
  "pluginType": "core",
1574
- "state": "beta",
1575
1591
  "strict": true,
1576
- "summary": "Run evaluation tests against an Agentforce agent.",
1592
+ "summary": "List the available agent tests in your org.",
1577
1593
  "enableJsonFlag": true,
1578
1594
  "envVariablesSection": {
1579
1595
  "header": "ENVIRONMENT VARIABLES",
@@ -1589,19 +1605,11 @@
1589
1605
  "body": [
1590
1606
  {
1591
1607
  "name": "Succeeded (0)",
1592
- "description": "Tests completed successfully. Test results (passed/failed) are in the JSON output."
1593
- },
1594
- {
1595
- "name": "Failed (1)",
1596
- "description": "Tests encountered execution errors (tests couldn't run properly)."
1597
- },
1598
- {
1599
- "name": "NotFound (2)",
1600
- "description": "Agent not found, spec file not found, or invalid agent name."
1608
+ "description": "Agent tests listed successfully."
1601
1609
  },
1602
1610
  {
1603
- "name": "OperationFailed (4)",
1604
- "description": "Failed to execute tests due to API or network errors."
1611
+ "name": "Failed (4)",
1612
+ "description": "Failed to retrieve agent tests due to API or network errors."
1605
1613
  }
1606
1614
  ]
1607
1615
  },
@@ -1611,26 +1619,26 @@
1611
1619
  "commands",
1612
1620
  "agent",
1613
1621
  "test",
1614
- "run-eval.js"
1622
+ "list.js"
1615
1623
  ],
1616
1624
  "aliasPermutations": [],
1617
1625
  "permutations": [
1618
- "agent:test:run-eval",
1619
- "test:agent:run-eval",
1620
- "test:run-eval:agent",
1621
- "agent:run-eval:test",
1622
- "run-eval:agent:test",
1623
- "run-eval:test:agent"
1626
+ "agent:test:list",
1627
+ "test:agent:list",
1628
+ "test:list:agent",
1629
+ "agent:list:test",
1630
+ "list:agent:test",
1631
+ "list:test:agent"
1624
1632
  ]
1625
1633
  },
1626
- "agent:test:run": {
1634
+ "agent:test:results": {
1627
1635
  "aliases": [],
1628
1636
  "args": {},
1629
- "description": "Use the --api-name flag to specify the name of the agent test you want to run. Use the output of the \"agent test list\" command to get the names of all the available agent tests in your org.\n\nBy default, this command starts the agent test in your org, but it doesn't wait for the test to finish. Instead, it displays the \"agent test resume\" command, with a job ID, that you execute to see the results of the test run, and then returns control of the terminal window to you. Use the --wait flag to specify the number of minutes for the command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, run \"agent test resume\".\n\nBy default, this command outputs test results in human-readable tables for each test case, if the test completes in time. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1637
+ "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1630
1638
  "examples": [
1631
- "Start an agent test called Resort_Manager_Test for an agent in your default org, don't wait for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test",
1632
- "Start an agent test for an agent in an org with alias \"my-org\" and wait for 10 minutes for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --target-org my-org",
1633
- "Start an agent test and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --output-dir ./test-results --result-format json"
1639
+ "Get the results of an agent test run in your default org using its job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1640
+ "Get the results of the most recently run agent test in an org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --use-most-recent --target-org my-org",
1641
+ "Get the results of the most recently run agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1634
1642
  ],
1635
1643
  "flags": {
1636
1644
  "json": {
@@ -1665,22 +1673,15 @@
1665
1673
  "multiple": false,
1666
1674
  "type": "option"
1667
1675
  },
1668
- "api-name": {
1669
- "char": "n",
1670
- "name": "api-name",
1671
- "summary": "API name of the agent test to run; corresponds to the name of the AiEvaluationDefinition metadata component that implements the agent test.",
1676
+ "job-id": {
1677
+ "char": "i",
1678
+ "name": "job-id",
1679
+ "required": true,
1680
+ "summary": "Job ID of the completed agent test run.",
1672
1681
  "hasDynamicHelp": false,
1673
1682
  "multiple": false,
1674
1683
  "type": "option"
1675
1684
  },
1676
- "wait": {
1677
- "char": "w",
1678
- "name": "wait",
1679
- "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1680
- "hasDynamicHelp": true,
1681
- "multiple": false,
1682
- "type": "option"
1683
- },
1684
1685
  "result-format": {
1685
1686
  "name": "result-format",
1686
1687
  "summary": "Format of the agent test run results.",
@@ -1714,12 +1715,12 @@
1714
1715
  },
1715
1716
  "hasDynamicHelp": true,
1716
1717
  "hiddenAliases": [],
1717
- "id": "agent:test:run",
1718
+ "id": "agent:test:results",
1718
1719
  "pluginAlias": "@salesforce/plugin-agent",
1719
1720
  "pluginName": "@salesforce/plugin-agent",
1720
1721
  "pluginType": "core",
1721
1722
  "strict": true,
1722
- "summary": "Start an agent test in your org.",
1723
+ "summary": "Get the results of a completed agent test run.",
1723
1724
  "enableJsonFlag": true,
1724
1725
  "envVariablesSection": {
1725
1726
  "header": "ENVIRONMENT VARIABLES",
@@ -1735,19 +1736,15 @@
1735
1736
  "body": [
1736
1737
  {
1737
1738
  "name": "Succeeded (0)",
1738
- "description": "Test started successfully (without --wait), or test completed successfully (with --wait)."
1739
- },
1740
- {
1741
- "name": "Failed (1)",
1742
- "description": "Tests encountered execution errors (test cases with ERROR status when using --wait)."
1739
+ "description": "Results retrieved successfully. Test results (passed/failed) are in the output."
1743
1740
  },
1744
1741
  {
1745
1742
  "name": "NotFound (2)",
1746
- "description": "Test definition not found or invalid test name."
1743
+ "description": "Job ID not found or invalid."
1747
1744
  },
1748
1745
  {
1749
- "name": "OperationFailed (4)",
1750
- "description": "Failed to start or poll test due to API or network errors."
1746
+ "name": "Failed (4)",
1747
+ "description": "Failed to retrieve results due to API or network errors."
1751
1748
  }
1752
1749
  ]
1753
1750
  },
@@ -1757,26 +1754,26 @@
1757
1754
  "commands",
1758
1755
  "agent",
1759
1756
  "test",
1760
- "run.js"
1757
+ "results.js"
1761
1758
  ],
1762
1759
  "aliasPermutations": [],
1763
1760
  "permutations": [
1764
- "agent:test:run",
1765
- "test:agent:run",
1766
- "test:run:agent",
1767
- "agent:run:test",
1768
- "run:agent:test",
1769
- "run:test:agent"
1761
+ "agent:test:results",
1762
+ "test:agent:results",
1763
+ "test:results:agent",
1764
+ "agent:results:test",
1765
+ "results:agent:test",
1766
+ "results:test:agent"
1770
1767
  ]
1771
1768
  },
1772
- "agent:preview:end": {
1769
+ "agent:test:resume": {
1773
1770
  "aliases": [],
1774
1771
  "args": {},
1775
- "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to end it. This command also displays the local directory where the session trace files are stored.\n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to end the session. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
1772
+ "description": "This command requires a job ID, which the original \"agent test run\" command displays when it completes. You can also use the --use-most-recent flag to see results for the most recently run agent test.\n\nUse the --wait flag to specify the number of minutes for this command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, the CLI returns control of the terminal to you, and you must run \"agent test resume\" again.\n\nBy default, this command outputs test results in human-readable tables for each test case. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
1776
1773
  "examples": [
1777
- "End a preview session of a published agent by specifying its session ID and API name ; use the default org:\n<%= config.bin %> <%= command.id %> --session-id <SESSION_ID> --api-name My_Published_Agent",
1778
- "Similar to previous example, but don't specify a session ID; you get an error if the published agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent --target-org my-dev-org",
1779
- "End a preview session of an agent using its authoring bundle API name; you get an error if the agent has more than one active session.\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Local_Agent"
1774
+ "Resume an agent test in your default org using a job ID:\n<%= config.bin %> <%= command.id %> --job-id 4KBfake0000003F4AQ",
1775
+ "Resume the most recently-run agent test in an org with alias \"my-org\" org; wait 10 minutes for the tests to finish:\n<%= config.bin %> <%= command.id %> --use-most-recent --wait 10 --target-org my-org",
1776
+ "Resume the most recent agent test in your default org, and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --use-most-recent --output-dir ./test-results --result-format json"
1780
1777
  ],
1781
1778
  "flags": {
1782
1779
  "json": {
@@ -1811,40 +1808,70 @@
1811
1808
  "multiple": false,
1812
1809
  "type": "option"
1813
1810
  },
1814
- "session-id": {
1815
- "name": "session-id",
1816
- "required": false,
1817
- "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see the list of all sessions.",
1811
+ "job-id": {
1812
+ "char": "i",
1813
+ "name": "job-id",
1814
+ "summary": "Job ID of the original agent test run.",
1818
1815
  "hasDynamicHelp": false,
1819
1816
  "multiple": false,
1820
1817
  "type": "option"
1821
1818
  },
1822
- "api-name": {
1823
- "char": "n",
1824
- "name": "api-name",
1825
- "summary": "API name of the activated published agent you want to preview.",
1819
+ "use-most-recent": {
1820
+ "char": "r",
1821
+ "name": "use-most-recent",
1822
+ "summary": "Use the job ID of the most recent agent test run.",
1823
+ "allowNo": false,
1824
+ "type": "boolean"
1825
+ },
1826
+ "wait": {
1827
+ "char": "w",
1828
+ "name": "wait",
1829
+ "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
1830
+ "default": "5 minutes",
1831
+ "hasDynamicHelp": true,
1832
+ "multiple": false,
1833
+ "type": "option"
1834
+ },
1835
+ "result-format": {
1836
+ "name": "result-format",
1837
+ "summary": "Format of the agent test run results.",
1838
+ "default": "human",
1826
1839
  "hasDynamicHelp": false,
1827
1840
  "multiple": false,
1841
+ "options": [
1842
+ "json",
1843
+ "human",
1844
+ "junit",
1845
+ "tap"
1846
+ ],
1828
1847
  "type": "option"
1829
1848
  },
1830
- "authoring-bundle": {
1831
- "name": "authoring-bundle",
1832
- "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1849
+ "output-dir": {
1850
+ "char": "d",
1851
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
1852
+ "name": "output-dir",
1853
+ "summary": "Directory to write the agent test results into.",
1833
1854
  "hasDynamicHelp": false,
1834
1855
  "multiple": false,
1835
1856
  "type": "option"
1857
+ },
1858
+ "verbose": {
1859
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
1860
+ "name": "verbose",
1861
+ "summary": "Show generated data in the test results output.",
1862
+ "allowNo": false,
1863
+ "type": "boolean"
1836
1864
  }
1837
1865
  },
1838
1866
  "hasDynamicHelp": true,
1839
1867
  "hiddenAliases": [],
1840
- "id": "agent:preview:end",
1868
+ "id": "agent:test:resume",
1841
1869
  "pluginAlias": "@salesforce/plugin-agent",
1842
1870
  "pluginName": "@salesforce/plugin-agent",
1843
1871
  "pluginType": "core",
1844
1872
  "strict": true,
1845
- "summary": "End an existing programmatic agent preview session and get trace location.",
1873
+ "summary": "Resume an agent test that you previously started in your org so you can view the test results.",
1846
1874
  "enableJsonFlag": true,
1847
- "requiresProject": true,
1848
1875
  "envVariablesSection": {
1849
1876
  "header": "ENVIRONMENT VARIABLES",
1850
1877
  "body": [
@@ -1859,19 +1886,19 @@
1859
1886
  "body": [
1860
1887
  {
1861
1888
  "name": "Succeeded (0)",
1862
- "description": "Preview session ended successfully and traces saved."
1889
+ "description": "Test completed successfully (with test results in the output)."
1863
1890
  },
1864
1891
  {
1865
- "name": "NotFound (2)",
1866
- "description": "Agent not found, or no preview session exists for this agent."
1892
+ "name": "Failed (1)",
1893
+ "description": "Tests encountered execution errors (test cases with ERROR status)."
1867
1894
  },
1868
1895
  {
1869
- "name": "PreviewEndFailed (4)",
1870
- "description": "Failed to end the preview session."
1896
+ "name": "NotFound (2)",
1897
+ "description": "Job ID not found or invalid."
1871
1898
  },
1872
1899
  {
1873
- "name": "SessionAmbiguous (5)",
1874
- "description": "Multiple preview sessions found; specify --session-id to choose one."
1900
+ "name": "OperationFailed (4)",
1901
+ "description": "Failed to poll test due to API or network errors."
1875
1902
  }
1876
1903
  ]
1877
1904
  },
@@ -1880,27 +1907,30 @@
1880
1907
  "lib",
1881
1908
  "commands",
1882
1909
  "agent",
1883
- "preview",
1884
- "end.js"
1910
+ "test",
1911
+ "resume.js"
1885
1912
  ],
1886
1913
  "aliasPermutations": [],
1887
1914
  "permutations": [
1888
- "agent:preview:end",
1889
- "preview:agent:end",
1890
- "preview:end:agent",
1891
- "agent:end:preview",
1892
- "end:agent:preview",
1893
- "end:preview:agent"
1915
+ "agent:test:resume",
1916
+ "test:agent:resume",
1917
+ "test:resume:agent",
1918
+ "agent:resume:test",
1919
+ "resume:agent:test",
1920
+ "resume:test:agent"
1894
1921
  ]
1895
1922
  },
1896
- "agent:preview:send": {
1923
+ "agent:test:run-eval": {
1897
1924
  "aliases": [],
1898
1925
  "args": {},
1899
- "description": "You must have previously started a programmatic agent preview session with the \"agent preview start\" command to then use this command to send the agent a message (utterance). This command then displays the agent's response.\n\nThe original \"agent preview start\" command outputs a session ID which you then use with the --session-id flag of this command to send a message. You don't have to specify the --session-id flag if an agent has only one active preview session. You must also use either the --authoring-bundle or --api-name flag to specify the API name of the authoring bundle or the published agent, respecitvely. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.",
1926
+ "description": "Execute rich evaluation tests against an Agentforce agent using the Einstein Evaluation API. Supports both YAML test specs (same format as `sf agent generate test-spec`) and JSON payloads.\n\nWhen you provide a YAML test spec, the command automatically translates test cases into Evaluation API calls and infers the agent name from the spec's `subjectName` field. This means you can use the same test spec with both `sf agent test run` and `sf agent test run-eval`. YAML test specs also support contextVariables, which allow you to inject contextual data (such as CaseId or RoutableId) into agent sessions for testing with different contexts.\n\nWhen you provide a JSON payload, it's sent directly to the API with optional normalization. The normalizer auto-corrects common field name mistakes, converts shorthand references to JSONPath, and injects defaults. Use `--no-normalize` to disable this auto-normalization. JSON payloads can also include context_variables on agent.create_session steps for the same contextual testing capabilities.\n\nSupports 8+ evaluator types, including topic routing assertions, action invocation checks, string/numeric assertions, semantic similarity scoring, and LLM-based quality ratings.",
1900
1927
  "examples": [
1901
- "Send a message to an activated published agent using its API name and session ID; use the default org:\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --session-id <SESSION_ID>",
1902
- "Similar to previous example, but don't specify a session ID; you get an error if the agent has more than one active session. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --utterance \"What can you help me with?\" --api-name My_Published_Agent --target-org my-dev-org",
1903
- "Send a message to an agent using its authoring bundle API name; you get an error if the agent has more than one active session:\n<%= config.bin %> <%= command.id %> --utterance \"what can you help me with?\" --authoring-bundle My_Local_Agent"
1928
+ "Run tests using a YAML test spec on the org with alias \"my-org\":\n<%= config.bin %> <%= command.id %> --spec tests/my-agent-testSpec.yaml --target-org my-org",
1929
+ "Run tests using a YAML spec with explicit agent name override; use your default org:\n<%= config.bin %> <%= command.id %> --spec tests/my-agent-testSpec.yaml --api-name My_Agent --target-org my-org",
1930
+ "Run tests using a JSON payload:\n<%= config.bin %> <%= command.id %> --spec tests/eval-payload.json --target-org my-org",
1931
+ "Run tests and output results in JUnit format; useful for continuous integration and deployment (CI/CD):\n<%= config.bin %> <%= command.id %> --spec tests/my-agent-testSpec.yaml --target-org my-org --result-format junit",
1932
+ "Run tests with contextVariables to inject contextual data into agent sessions (add contextVariables to test cases in your YAML spec):\n<%= config.bin %> <%= command.id %> --spec tests/agent-with-context.yaml --target-org my-org",
1933
+ "Pipe JSON payload from stdin (--spec flag is automatically populated from stdin):\n$ echo '{\"tests\":[...]}' | <%= config.bin %> <%= command.id %> --spec --target-org my-org"
1904
1934
  ],
1905
1935
  "flags": {
1906
1936
  "json": {
@@ -1935,19 +1965,11 @@
1935
1965
  "multiple": false,
1936
1966
  "type": "option"
1937
1967
  },
1938
- "session-id": {
1939
- "name": "session-id",
1940
- "required": false,
1941
- "summary": "Session ID outputted by \"agent preview start\". Not required when the agent has exactly one active session. Run \"agent preview sessions\" to see list of all sessions.",
1942
- "hasDynamicHelp": false,
1943
- "multiple": false,
1944
- "type": "option"
1945
- },
1946
- "utterance": {
1947
- "char": "u",
1948
- "name": "utterance",
1968
+ "spec": {
1969
+ "char": "s",
1970
+ "name": "spec",
1949
1971
  "required": true,
1950
- "summary": "Utterance to send to the agent, enclosed in double quotes.",
1972
+ "summary": "Path to test spec file (YAML or JSON). Supports reading from stdin when piping content.",
1951
1973
  "hasDynamicHelp": false,
1952
1974
  "multiple": false,
1953
1975
  "type": "option"
@@ -1955,29 +1977,60 @@
1955
1977
  "api-name": {
1956
1978
  "char": "n",
1957
1979
  "name": "api-name",
1958
- "summary": "API name of the activated published agent you want to preview.",
1980
+ "summary": "Agent DeveloperName (also called API name) to resolve agent_id and agent_version_id. Auto-inferred from the YAML spec's subjectName.",
1959
1981
  "hasDynamicHelp": false,
1960
1982
  "multiple": false,
1961
1983
  "type": "option"
1962
1984
  },
1963
- "authoring-bundle": {
1964
- "name": "authoring-bundle",
1965
- "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
1985
+ "wait": {
1986
+ "char": "w",
1987
+ "name": "wait",
1988
+ "summary": "Number of minutes to wait for results.",
1989
+ "default": 10,
1990
+ "hasDynamicHelp": false,
1991
+ "multiple": false,
1992
+ "type": "option"
1993
+ },
1994
+ "result-format": {
1995
+ "name": "result-format",
1996
+ "summary": "Format of the agent test run results.",
1997
+ "default": "human",
1998
+ "hasDynamicHelp": false,
1999
+ "multiple": false,
2000
+ "options": [
2001
+ "json",
2002
+ "human",
2003
+ "junit",
2004
+ "tap"
2005
+ ],
2006
+ "type": "option"
2007
+ },
2008
+ "batch-size": {
2009
+ "name": "batch-size",
2010
+ "summary": "Number of tests per API request (max 5).",
2011
+ "default": 5,
1966
2012
  "hasDynamicHelp": false,
1967
2013
  "multiple": false,
1968
2014
  "type": "option"
2015
+ },
2016
+ "no-normalize": {
2017
+ "name": "no-normalize",
2018
+ "summary": "Disable auto-normalization of field names and shorthand references.",
2019
+ "allowNo": false,
2020
+ "type": "boolean"
1969
2021
  }
1970
2022
  },
1971
2023
  "hasDynamicHelp": true,
2024
+ "hidden": true,
1972
2025
  "hiddenAliases": [],
1973
- "id": "agent:preview:send",
2026
+ "id": "agent:test:run-eval",
1974
2027
  "pluginAlias": "@salesforce/plugin-agent",
1975
2028
  "pluginName": "@salesforce/plugin-agent",
1976
2029
  "pluginType": "core",
2030
+ "state": "beta",
1977
2031
  "strict": true,
1978
- "summary": "Send a message to an existing agent preview session.",
2032
+ "summary": "Run evaluation tests against an Agentforce agent.",
1979
2033
  "enableJsonFlag": true,
1980
- "requiresProject": true,
1981
2034
  "envVariablesSection": {
1982
2035
  "header": "ENVIRONMENT VARIABLES",
1983
2036
  "body": [
@@ -1992,80 +2045,19 @@
1992
2045
  "body": [
1993
2046
  {
1994
2047
  "name": "Succeeded (0)",
1995
- "description": "Message sent successfully and agent response received."
2048
+ "description": "Tests completed successfully. Test results (passed/failed) are in the JSON output."
1996
2049
  },
1997
2050
  {
1998
- "name": "NotFound (2)",
1999
- "description": "Agent not found, or no preview session exists for this agent."
2051
+ "name": "Failed (1)",
2052
+ "description": "Tests encountered execution errors (tests couldn't run properly)."
2000
2053
  },
2001
2054
  {
2002
- "name": "PreviewSendFailed (4)",
2003
- "description": "Failed to send message or receive response from the preview session."
2055
+ "name": "NotFound (2)",
2056
+ "description": "Agent not found, spec file not found, or invalid agent name."
2004
2057
  },
2005
2058
  {
2006
- "name": "SessionAmbiguous (5)",
2007
- "description": "Multiple preview sessions found; specify --session-id to choose one."
2008
- }
2009
- ]
2010
- },
2011
- "isESM": true,
2012
- "relativePath": [
2013
- "lib",
2014
- "commands",
2015
- "agent",
2016
- "preview",
2017
- "send.js"
2018
- ],
2019
- "aliasPermutations": [],
2020
- "permutations": [
2021
- "agent:preview:send",
2022
- "preview:agent:send",
2023
- "preview:send:agent",
2024
- "agent:send:preview",
2025
- "send:agent:preview",
2026
- "send:preview:agent"
2027
- ]
2028
- },
2029
- "agent:preview:sessions": {
2030
- "aliases": [],
2031
- "args": {},
2032
- "description": "This command lists the agent preview sessions that were started with the \"agent preview start\" command and are still in the local cache. Use this command to discover specific session IDs that you can pass to the \"agent preview send\" or \"agent preview end\" commands with the --session-id flag.\n\nProgrammatic agent preview sessions can be started for both published activated agents and by using an agent's local authoring bundle, which contains its Agent Script file. In this command's output table, the Agent column contains either the API name of the authoring bundle or the published agent, whichever was used when starting the session. In the table, if the same API name has multiple rows with different session IDs, then it means that you previously started multiple preview sessions with the associated agent.",
2033
- "examples": [
2034
- "List all cached agent preview sessions:\n<%= config.bin %> <%= command.id %>"
2035
- ],
2036
- "flags": {
2037
- "json": {
2038
- "description": "Format output as json.",
2039
- "helpGroup": "GLOBAL",
2040
- "name": "json",
2041
- "allowNo": false,
2042
- "type": "boolean"
2043
- },
2044
- "flags-dir": {
2045
- "helpGroup": "GLOBAL",
2046
- "name": "flags-dir",
2047
- "summary": "Import flag values from a directory.",
2048
- "hasDynamicHelp": false,
2049
- "multiple": false,
2050
- "type": "option"
2051
- }
2052
- },
2053
- "hasDynamicHelp": false,
2054
- "hiddenAliases": [],
2055
- "id": "agent:preview:sessions",
2056
- "pluginAlias": "@salesforce/plugin-agent",
2057
- "pluginName": "@salesforce/plugin-agent",
2058
- "pluginType": "core",
2059
- "strict": true,
2060
- "summary": "List all known programmatic agent preview sessions.",
2061
- "enableJsonFlag": true,
2062
- "requiresProject": true,
2063
- "errorCodes": {
2064
- "header": "ERROR CODES",
2065
- "body": [
2066
- {
2067
- "name": "Succeeded (0)",
2068
- "description": "Sessions listed successfully (or empty list if no active sessions)."
2059
+ "name": "OperationFailed (4)",
2060
+ "description": "Failed to execute tests due to API or network errors."
2069
2061
  }
2070
2062
  ]
2071
2063
  },
@@ -2074,27 +2066,27 @@
2074
2066
  "lib",
2075
2067
  "commands",
2076
2068
  "agent",
2077
- "preview",
2078
- "sessions.js"
2069
+ "test",
2070
+ "run-eval.js"
2079
2071
  ],
2080
2072
  "aliasPermutations": [],
2081
2073
  "permutations": [
2082
- "agent:preview:sessions",
2083
- "preview:agent:sessions",
2084
- "preview:sessions:agent",
2085
- "agent:sessions:preview",
2086
- "sessions:agent:preview",
2087
- "sessions:preview:agent"
2074
+ "agent:test:run-eval",
2075
+ "test:agent:run-eval",
2076
+ "test:run-eval:agent",
2077
+ "agent:run-eval:test",
2078
+ "run-eval:agent:test",
2079
+ "run-eval:test:agent"
2088
2080
  ]
2089
2081
  },
2090
- "agent:preview:start": {
2082
+ "agent:test:run": {
2091
2083
  "aliases": [],
2092
2084
  "args": {},
2093
- "description": "This command outputs a session ID that you then use with the \"agent preview send\" command to send an utterance to the agent. Use the \"agent preview sessions\" command to list all active sessions and the \"agent preview end\" command to end a specific session.\n\nIdentify the agent you want to start previewing with either the --authoring-bundle flag to specify a local authoring bundle's API name or --api-name to specify an activated published agent's API name. To find either API name, navigate to your package directory in your DX project. The API name of an authoring bundle is the same as its directory name under the \"aiAuthoringBundles\" metadata directory. Similarly, the published agent's API name is the same as its directory name under the \"Bots\" metadata directory.\n\nWhen starting a preview session with --authoring-bundle, you must explicitly specify the execution mode using one of these flags:\n\n- --use-live-actions: Executes real Apex classes, flows, and other actions in the org. This surfaces compile and validation errors during preview.\n- --simulate-actions: Uses AI to simulate action execution without calling real implementations.\n\nPublished agents (--api-name) always use live actions. The mode flags are optional and have no effect for published agents.",
2085
+ "description": "Use the --api-name flag to specify the name of the agent test you want to run. Use the output of the \"agent test list\" command to get the names of all the available agent tests in your org.\n\nBy default, this command starts the agent test in your org, but it doesn't wait for the test to finish. Instead, it displays the \"agent test resume\" command, with a job ID, that you execute to see the results of the test run, and then returns control of the terminal window to you. Use the --wait flag to specify the number of minutes for the command to wait for the agent test to complete; if the test completes by the end of the wait time, the command displays the test results. If not, run \"agent test resume\".\n\nBy default, this command outputs test results in human-readable tables for each test case, if the test completes in time. The tables show whether the test case passed, the expected and actual values, the test score, how long the test took, and more. Use the --result-format to display the test results in JSON or Junit format. Use the --output-dir flag to write the results to a file rather than to the terminal.",
2094
2086
  "examples": [
2095
- "Start a programmatic agent preview session by specifying an authoring bundle; use simulated actions. Use the org with alias \"my-dev-org\":\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --target-org my-dev-org --simulate-actions",
2096
- "Similar to previous example but use live actions and the default org:\n<%= config.bin %> <%= command.id %> --authoring-bundle My_Agent_Bundle --use-live-actions",
2097
- "Start a preview session with an activated published agent (always uses live actions):\n<%= config.bin %> <%= command.id %> --api-name My_Published_Agent"
2087
+ "Start an agent test called Resort_Manager_Test for an agent in your default org, don't wait for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test",
2088
+ "Start an agent test for an agent in an org with alias \"my-org\" and wait for 10 minutes for the test to finish:\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --target-org my-org",
2089
+ "Start an agent test and write the JSON-formatted results into a directory called \"test-results\":\n<%= config.bin %> <%= command.id %> --api-name Resort_Manager_Test --wait 10 --output-dir ./test-results --result-format json"
2098
2090
  ],
2099
2091
  "flags": {
2100
2092
  "json": {
@@ -2132,47 +2124,59 @@
2132
2124
  "api-name": {
2133
2125
  "char": "n",
2134
2126
  "name": "api-name",
2135
- "summary": "API name of the activated published agent you want to preview.",
2127
+ "summary": "API name of the agent test to run; corresponds to the name of the AiEvaluationDefinition metadata component that implements the agent test.",
2136
2128
  "hasDynamicHelp": false,
2137
2129
  "multiple": false,
2138
2130
  "type": "option"
2139
2131
  },
2140
- "authoring-bundle": {
2141
- "name": "authoring-bundle",
2142
- "summary": "API name of the authoring bundle metadata component that contains the agent's Agent Script file.",
2143
- "hasDynamicHelp": false,
2132
+ "wait": {
2133
+ "char": "w",
2134
+ "name": "wait",
2135
+ "summary": "Number of minutes to wait for the command to complete and display results to the terminal window.",
2136
+ "hasDynamicHelp": true,
2144
2137
  "multiple": false,
2145
2138
  "type": "option"
2146
2139
  },
2147
- "use-live-actions": {
2148
- "exclusive": [
2149
- "simulate-actions"
2140
+ "result-format": {
2141
+ "name": "result-format",
2142
+ "summary": "Format of the agent test run results.",
2143
+ "default": "human",
2144
+ "hasDynamicHelp": false,
2145
+ "multiple": false,
2146
+ "options": [
2147
+ "json",
2148
+ "human",
2149
+ "junit",
2150
+ "tap"
2150
2151
  ],
2151
- "name": "use-live-actions",
2152
- "summary": "Execute real actions in the org (Apex classes, flows, etc.). Required with --authoring-bundle.",
2153
- "allowNo": false,
2154
- "type": "boolean"
2152
+ "type": "option"
2155
2153
  },
2156
- "simulate-actions": {
2157
- "exclusive": [
2158
- "use-live-actions"
2159
- ],
2160
- "name": "simulate-actions",
2161
- "summary": "Use AI to simulate action execution instead of calling real actions. Required with --authoring-bundle.",
2154
+ "output-dir": {
2155
+ "char": "d",
2156
+ "description": "If the agent test run completes, write the results to the specified directory. If the test is still running, the test results aren't written.",
2157
+ "name": "output-dir",
2158
+ "summary": "Directory to write the agent test results into.",
2159
+ "hasDynamicHelp": false,
2160
+ "multiple": false,
2161
+ "type": "option"
2162
+ },
2163
+ "verbose": {
2164
+ "description": "When enabled, includes detailed generated data (such as invoked actions) in the human-readable test results output. This is useful for debugging test failures and understanding what actions were actually invoked during the test run.\n\nThe generated data is in JSON format and includes the Apex classes or Flows that were invoked, the Salesforce objects that were touched, and so on. Use the JSON structure of this information to build the test case JSONPath expression when using custom evaluations.",
2165
+ "name": "verbose",
2166
+ "summary": "Show generated data in the test results output.",
2162
2167
  "allowNo": false,
2163
2168
  "type": "boolean"
2164
2169
  }
2165
2170
  },
2166
2171
  "hasDynamicHelp": true,
2167
2172
  "hiddenAliases": [],
2168
- "id": "agent:preview:start",
2173
+ "id": "agent:test:run",
2169
2174
  "pluginAlias": "@salesforce/plugin-agent",
2170
2175
  "pluginName": "@salesforce/plugin-agent",
2171
2176
  "pluginType": "core",
2172
2177
  "strict": true,
2173
- "summary": "Start a programmatic agent preview session.",
2178
+ "summary": "Start an agent test in your org.",
2174
2179
  "enableJsonFlag": true,
2175
- "requiresProject": true,
2176
2180
  "envVariablesSection": {
2177
2181
  "header": "ENVIRONMENT VARIABLES",
2178
2182
  "body": [
@@ -2187,23 +2191,19 @@
2187
2191
  "body": [
2188
2192
  {
2189
2193
  "name": "Succeeded (0)",
2190
- "description": "Preview session started successfully."
2194
+ "description": "Test started successfully (without --wait), or test completed successfully (with --wait)."
2191
2195
  },
2192
2196
  {
2193
2197
  "name": "Failed (1)",
2194
- "description": "Agent Script compilation failed (syntax errors in the script)."
2198
+ "description": "Tests encountered execution errors (test cases with ERROR status when using --wait)."
2195
2199
  },
2196
2200
  {
2197
2201
  "name": "NotFound (2)",
2198
- "description": "Agent not found, or compilation API returned HTTP 404 (endpoint may not be available in your org or region)."
2199
- },
2200
- {
2201
- "name": "ServerError (3)",
2202
- "description": "Compilation API returned HTTP 500 (server error during compilation)."
2202
+ "description": "Test definition not found or invalid test name."
2203
2203
  },
2204
2204
  {
2205
- "name": "PreviewStartFailed (4)",
2206
- "description": "Preview session failed to start due to API or network errors."
2205
+ "name": "OperationFailed (4)",
2206
+ "description": "Failed to start or poll test due to API or network errors."
2207
2207
  }
2208
2208
  ]
2209
2209
  },
@@ -2212,17 +2212,17 @@
2212
2212
  "lib",
2213
2213
  "commands",
2214
2214
  "agent",
2215
- "preview",
2216
- "start.js"
2215
+ "test",
2216
+ "run.js"
2217
2217
  ],
2218
2218
  "aliasPermutations": [],
2219
2219
  "permutations": [
2220
- "agent:preview:start",
2221
- "preview:agent:start",
2222
- "preview:start:agent",
2223
- "agent:start:preview",
2224
- "start:agent:preview",
2225
- "start:preview:agent"
2220
+ "agent:test:run",
2221
+ "test:agent:run",
2222
+ "test:run:agent",
2223
+ "agent:run:test",
2224
+ "run:agent:test",
2225
+ "run:test:agent"
2226
2226
  ]
2227
2227
  },
2228
2228
  "agent:validate:authoring-bundle": {
@@ -2340,5 +2340,5 @@
2340
2340
  ]
2341
2341
  }
2342
2342
  },
2343
- "version": "1.32.21"
2343
+ "version": "1.33.0"
2344
2344
  }