@arizeai/phoenix-evals 0.8.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dist/esm/__generated__/default_templates/TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.d.ts +3 -0
  2. package/dist/esm/__generated__/default_templates/TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.d.ts.map +1 -0
  3. package/dist/esm/__generated__/default_templates/TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.js +86 -0
  4. package/dist/esm/__generated__/default_templates/TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.js.map +1 -0
  5. package/dist/esm/__generated__/default_templates/index.d.ts +1 -0
  6. package/dist/esm/__generated__/default_templates/index.d.ts.map +1 -1
  7. package/dist/esm/__generated__/default_templates/index.js +1 -0
  8. package/dist/esm/__generated__/default_templates/index.js.map +1 -1
  9. package/dist/esm/llm/createToolResponseHandlingEvaluator.d.ts +78 -0
  10. package/dist/esm/llm/createToolResponseHandlingEvaluator.d.ts.map +1 -0
  11. package/dist/esm/llm/createToolResponseHandlingEvaluator.js +59 -0
  12. package/dist/esm/llm/createToolResponseHandlingEvaluator.js.map +1 -0
  13. package/dist/esm/llm/createToolSelectionEvaluator.d.ts +64 -0
  14. package/dist/esm/llm/createToolSelectionEvaluator.d.ts.map +1 -0
  15. package/dist/esm/llm/createToolSelectionEvaluator.js +50 -0
  16. package/dist/esm/llm/createToolSelectionEvaluator.js.map +1 -0
  17. package/dist/esm/llm/index.d.ts +2 -0
  18. package/dist/esm/llm/index.d.ts.map +1 -1
  19. package/dist/esm/llm/index.js +2 -0
  20. package/dist/esm/llm/index.js.map +1 -1
  21. package/dist/esm/tsconfig.esm.tsbuildinfo +1 -1
  22. package/dist/src/__generated__/default_templates/TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.d.ts +3 -0
  23. package/dist/src/__generated__/default_templates/TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.d.ts.map +1 -0
  24. package/dist/src/__generated__/default_templates/TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.js +89 -0
  25. package/dist/src/__generated__/default_templates/TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.js.map +1 -0
  26. package/dist/src/__generated__/default_templates/index.d.ts +1 -0
  27. package/dist/src/__generated__/default_templates/index.d.ts.map +1 -1
  28. package/dist/src/__generated__/default_templates/index.js +3 -1
  29. package/dist/src/__generated__/default_templates/index.js.map +1 -1
  30. package/dist/src/llm/createToolResponseHandlingEvaluator.d.ts +78 -0
  31. package/dist/src/llm/createToolResponseHandlingEvaluator.d.ts.map +1 -0
  32. package/dist/src/llm/createToolResponseHandlingEvaluator.js +70 -0
  33. package/dist/src/llm/createToolResponseHandlingEvaluator.js.map +1 -0
  34. package/dist/src/llm/createToolSelectionEvaluator.d.ts +64 -0
  35. package/dist/src/llm/createToolSelectionEvaluator.d.ts.map +1 -0
  36. package/dist/src/llm/createToolSelectionEvaluator.js +61 -0
  37. package/dist/src/llm/createToolSelectionEvaluator.js.map +1 -0
  38. package/dist/src/llm/index.d.ts +2 -0
  39. package/dist/src/llm/index.d.ts.map +1 -1
  40. package/dist/src/llm/index.js +2 -0
  41. package/dist/src/llm/index.js.map +1 -1
  42. package/dist/tsconfig.tsbuildinfo +1 -1
  43. package/package.json +1 -1
  44. package/src/__generated__/default_templates/TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.ts +88 -0
  45. package/src/__generated__/default_templates/index.ts +1 -0
  46. package/src/llm/createToolResponseHandlingEvaluator.ts +109 -0
  47. package/src/llm/createToolSelectionEvaluator.ts +93 -0
  48. package/src/llm/index.ts +2 -0
@@ -0,0 +1,88 @@
1
+ // This file is generated. Do not edit by hand.
2
+
3
+ import type { ClassificationEvaluatorConfig } from "../types";
4
+
5
+ export const TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG: ClassificationEvaluatorConfig = {
6
+ name: "tool_response_handling",
7
+ description: "For determining if an AI agent properly handled a tool's response, including error handling, data extraction, transformation, and safe information disclosure. Requires conversation context, the tool call(s), the tool result(s), and the agent's output.",
8
+ optimizationDirection: "MAXIMIZE",
9
+ template: [
10
+ {
11
+ role: "user",
12
+ content: `
13
+ You are an impartial judge evaluating an AI agent's handling of a tool's response. Your task is to determine whether the agent correctly processed the tool result to produce an appropriate output.
14
+
15
+ IMPORTANT - Scope of Evaluation:
16
+ - You are ONLY evaluating how the agent handled the tool response, NOT whether the right tool was selected or whether the tool was invoked correctly.
17
+ - This evaluation focuses on what happens AFTER the tool returns a result.
18
+
19
+ IMPORTANT - Multi-Tool Handling:
20
+ - The agent may make MULTIPLE tool calls in a single interaction. This is valid and expected.
21
+ - When multiple tools are called, evaluate how the agent handled ALL tool results together.
22
+ - Return "correct" only if the agent properly handled ALL tool results.
23
+ - Return "incorrect" if the agent mishandled ANY tool result.
24
+
25
+ IMPORTANT - Error Response Handling:
26
+ - Tool results may contain errors (rate limits, timeouts, not found, invalid arguments, etc.).
27
+ - The agent's output may include retries, follow-up tool calls, or a final response to the user.
28
+ - Evaluate the ENTIRE handling sequence, not just the final message.
29
+ - Appropriate error handling includes:
30
+ - Retrying on transient errors (rate limits, timeouts)
31
+ - Correcting arguments after invalid argument errors
32
+ - Informing the user appropriately when errors are not recoverable
33
+ - NOT making repeated identical calls that continue to fail
34
+
35
+ Criteria for CORRECT handling:
36
+ - Data is extracted accurately from the tool result (no hallucination of data that wasn't returned)
37
+ - Dates, numbers, and structured fields are properly transformed and formatted
38
+ - Results are accurately summarized to address the user's original query
39
+ - Error responses are handled appropriately (retries for transient errors, corrections for invalid arguments)
40
+ - No repeated identical calls after non-retryable errors
41
+ - No disclosure of sensitive/internal information (database credentials, internal URLs, PII, API keys, etc.)
42
+ - The agent's response actually uses the tool result rather than ignoring it
43
+
44
+ Criteria for INCORRECT handling:
45
+ - Hallucinated data: The output includes information not present in the tool result
46
+ - Misinterpretation: The meaning of the tool result is misrepresented or reversed
47
+ - Improper transformation: Dates, numbers, or structured data are incorrectly converted
48
+ - Missing retry: Failed to retry on retryable errors (rate limits, timeouts)
49
+ - Missing correction: Failed to correct arguments after invalid argument errors
50
+ - Futile retries: Repeated identical calls that continue to fail
51
+ - Information disclosure: Leaked sensitive information (credentials, internal URLs, PII)
52
+ - Ignored results: The agent's response doesn't incorporate the tool result
53
+ - Incomplete handling: Only some tool results are used when multiple tools were called
54
+
55
+ Before providing your final judgment, explain your reasoning and consider:
56
+ - Does the output accurately reflect what the tool returned?
57
+ - Are there any fabricated details not in the tool result?
58
+ - Were errors handled appropriately?
59
+ - Is sensitive information properly protected?
60
+ - Does the output actually address the user's query using the tool data?
61
+
62
+ <data>
63
+ <input>
64
+ {{input}}
65
+ </input>
66
+
67
+ <tool_call>
68
+ {{toolCall}}
69
+ </tool_call>
70
+
71
+ <tool_result>
72
+ {{toolResult}}
73
+ </tool_result>
74
+
75
+ <output>
76
+ {{output}}
77
+ </output>
78
+ </data>
79
+
80
+ Given the above data, did the agent handle the tool response correctly or incorrectly?
81
+ `,
82
+ },
83
+ ],
84
+ choices: {
85
+ "correct": 1,
86
+ "incorrect": 0
87
+ },
88
+ };
@@ -5,4 +5,5 @@ export { DOCUMENT_RELEVANCE_CLASSIFICATION_EVALUATOR_CONFIG } from "./DOCUMENT_R
5
5
  export { FAITHFULNESS_CLASSIFICATION_EVALUATOR_CONFIG } from "./FAITHFULNESS_CLASSIFICATION_EVALUATOR_CONFIG";
6
6
  export { HALLUCINATION_CLASSIFICATION_EVALUATOR_CONFIG } from "./HALLUCINATION_CLASSIFICATION_EVALUATOR_CONFIG";
7
7
  export { TOOL_INVOCATION_CLASSIFICATION_EVALUATOR_CONFIG } from "./TOOL_INVOCATION_CLASSIFICATION_EVALUATOR_CONFIG";
8
+ export { TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG } from "./TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG";
8
9
  export { TOOL_SELECTION_CLASSIFICATION_EVALUATOR_CONFIG } from "./TOOL_SELECTION_CLASSIFICATION_EVALUATOR_CONFIG";
@@ -0,0 +1,109 @@
1
+ import { TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG } from "../__generated__/default_templates";
2
+ import { CreateClassificationEvaluatorArgs } from "../types/evals";
3
+
4
+ import { ClassificationEvaluator } from "./ClassificationEvaluator";
5
+ import { createClassificationEvaluator } from "./createClassificationEvaluator";
6
+
7
+ export interface ToolResponseHandlingEvaluatorArgs<
8
+ RecordType extends Record<string, unknown> =
9
+ ToolResponseHandlingEvaluationRecord,
10
+ > extends Omit<
11
+ CreateClassificationEvaluatorArgs<RecordType>,
12
+ "promptTemplate" | "choices" | "optimizationDirection" | "name"
13
+ > {
14
+ optimizationDirection?: CreateClassificationEvaluatorArgs<RecordType>["optimizationDirection"];
15
+ name?: CreateClassificationEvaluatorArgs<RecordType>["name"];
16
+ choices?: CreateClassificationEvaluatorArgs<RecordType>["choices"];
17
+ promptTemplate?: CreateClassificationEvaluatorArgs<RecordType>["promptTemplate"];
18
+ }
19
+
20
+ /**
21
+ * A record to be evaluated by the tool response handling evaluator.
22
+ */
23
+ export type ToolResponseHandlingEvaluationRecord = {
24
+ /**
25
+ * The user query or conversation context.
26
+ */
27
+ input: string;
28
+ /**
29
+ * The tool invocation(s) made by the agent, including arguments.
30
+ */
31
+ toolCall: string;
32
+ /**
33
+ * The tool's response (data, errors, or partial results).
34
+ */
35
+ toolResult: string;
36
+ /**
37
+ * The agent's handling after receiving the tool result
38
+ * (may include retries, follow-ups, or final response).
39
+ */
40
+ output: string;
41
+ };
42
+
43
+ /**
44
+ * Creates a tool response handling evaluator function.
45
+ *
46
+ * This function returns an evaluator that determines whether an AI agent properly
47
+ * handled a tool's response, including error handling, data extraction,
48
+ * transformation, and safe information disclosure.
49
+ *
50
+ * @param args - The arguments for creating the tool response handling evaluator.
51
+ * @param args.model - The model to use for classification.
52
+ * @param args.choices - The possible classification choices (defaults to correct/incorrect).
53
+ * @param args.promptTemplate - The prompt template to use.
54
+ * @param args.telemetry - The telemetry to use for the evaluator.
55
+ *
56
+ * @returns An evaluator function that takes a {@link ToolResponseHandlingEvaluationRecord}
57
+ * and returns a classification result indicating whether the tool response handling
58
+ * is correct or incorrect.
59
+ *
60
+ * @example
61
+ * ```ts
62
+ * const evaluator = createToolResponseHandlingEvaluator({ model: openai("gpt-4o-mini") });
63
+ *
64
+ * // Example: Correct extraction from tool result
65
+ * const result = await evaluator.evaluate({
66
+ * input: "What's the weather in Seattle?",
67
+ * toolCall: 'get_weather(location="Seattle")',
68
+ * toolResult: JSON.stringify({
69
+ * temperature: 58,
70
+ * unit: "fahrenheit",
71
+ * conditions: "partly cloudy"
72
+ * }),
73
+ * output: "The weather in Seattle is 58°F and partly cloudy."
74
+ * });
75
+ * console.log(result.label); // "correct"
76
+ *
77
+ * // Example: Hallucinated data (incorrect)
78
+ * const resultHallucinated = await evaluator.evaluate({
79
+ * input: "What restaurants are nearby?",
80
+ * toolCall: 'search_restaurants(location="downtown")',
81
+ * toolResult: JSON.stringify({
82
+ * results: [{ name: "Cafe Luna", rating: 4.2 }]
83
+ * }),
84
+ * output: "I found Cafe Luna (4.2 stars) and Mario's Italian (4.8 stars) nearby."
85
+ * });
86
+ * console.log(resultHallucinated.label); // "incorrect" - Mario's was hallucinated
87
+ * ```
88
+ */
89
+ export function createToolResponseHandlingEvaluator<
90
+ RecordType extends Record<string, unknown> =
91
+ ToolResponseHandlingEvaluationRecord,
92
+ >(
93
+ args: ToolResponseHandlingEvaluatorArgs<RecordType>
94
+ ): ClassificationEvaluator<RecordType> {
95
+ const {
96
+ choices = TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.choices,
97
+ promptTemplate = TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.template,
98
+ optimizationDirection = TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.optimizationDirection,
99
+ name = TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG.name,
100
+ ...rest
101
+ } = args;
102
+ return createClassificationEvaluator<RecordType>({
103
+ ...rest,
104
+ promptTemplate,
105
+ choices,
106
+ optimizationDirection,
107
+ name,
108
+ });
109
+ }
@@ -0,0 +1,93 @@
1
+ import { TOOL_SELECTION_CLASSIFICATION_EVALUATOR_CONFIG } from "../__generated__/default_templates";
2
+ import { CreateClassificationEvaluatorArgs } from "../types/evals";
3
+
4
+ import { ClassificationEvaluator } from "./ClassificationEvaluator";
5
+ import { createClassificationEvaluator } from "./createClassificationEvaluator";
6
+
7
+ export interface ToolSelectionEvaluatorArgs<
8
+ RecordType extends Record<string, unknown> = ToolSelectionEvaluationRecord,
9
+ > extends Omit<
10
+ CreateClassificationEvaluatorArgs<RecordType>,
11
+ "promptTemplate" | "choices" | "optimizationDirection" | "name"
12
+ > {
13
+ optimizationDirection?: CreateClassificationEvaluatorArgs<RecordType>["optimizationDirection"];
14
+ name?: CreateClassificationEvaluatorArgs<RecordType>["name"];
15
+ choices?: CreateClassificationEvaluatorArgs<RecordType>["choices"];
16
+ promptTemplate?: CreateClassificationEvaluatorArgs<RecordType>["promptTemplate"];
17
+ }
18
+
19
+ /**
20
+ * A record to be evaluated by the tool selection evaluator.
21
+ */
22
+ export type ToolSelectionEvaluationRecord = {
23
+ /**
24
+ * The input query or conversation context.
25
+ */
26
+ input: string;
27
+ /**
28
+ * The available tools that the LLM could use.
29
+ */
30
+ availableTools: string;
31
+ /**
32
+ * The tool or tools selected by the LLM.
33
+ */
34
+ toolSelection: string;
35
+ };
36
+
37
+ /**
38
+ * Creates a tool selection evaluator function.
39
+ *
40
+ * This function returns an evaluator that determines whether the correct tool
41
+ * was selected for a given context. Unlike the tool invocation evaluator which
42
+ * checks if the tool was called correctly with proper arguments, this evaluator
43
+ * focuses on whether the right tool was chosen in the first place.
44
+ *
45
+ * The evaluator checks for:
46
+ * - Whether the LLM chose the best available tool for the user query
47
+ * - Whether the tool name exists in the available tools list
48
+ * - Whether the correct number of tools were selected for the task
49
+ * - Whether the tool selection is safe and appropriate
50
+ *
51
+ * @param args - The arguments for creating the tool selection evaluator.
52
+ * @param args.model - The model to use for classification.
53
+ * @param args.choices - The possible classification choices (defaults to correct/incorrect).
54
+ * @param args.promptTemplate - The prompt template to use (defaults to TOOL_SELECTION_TEMPLATE).
55
+ * @param args.telemetry - The telemetry to use for the evaluator.
56
+ *
57
+ * @returns An evaluator function that takes a {@link ToolSelectionEvaluationRecord} and returns
58
+ * a classification result indicating whether the tool selection is correct or incorrect.
59
+ *
60
+ * @example
61
+ * ```ts
62
+ * const evaluator = createToolSelectionEvaluator({ model: openai("gpt-4o-mini") });
63
+ *
64
+ * const result = await evaluator.evaluate({
65
+ * input: "User: What is the weather in San Francisco?",
66
+ * availableTools: `WeatherTool: Get the current weather for a location.
67
+ * NewsTool: Stay connected to global events with our up-to-date news around the world.
68
+ * MusicTool: Create playlists, search for music, and check the latest music trends.`,
69
+ * toolSelection: "WeatherTool"
70
+ * });
71
+ * console.log(result.label); // "correct" or "incorrect"
72
+ * ```
73
+ */
74
+ export function createToolSelectionEvaluator<
75
+ RecordType extends Record<string, unknown> = ToolSelectionEvaluationRecord,
76
+ >(
77
+ args: ToolSelectionEvaluatorArgs<RecordType>
78
+ ): ClassificationEvaluator<RecordType> {
79
+ const {
80
+ choices = TOOL_SELECTION_CLASSIFICATION_EVALUATOR_CONFIG.choices,
81
+ promptTemplate = TOOL_SELECTION_CLASSIFICATION_EVALUATOR_CONFIG.template,
82
+ optimizationDirection = TOOL_SELECTION_CLASSIFICATION_EVALUATOR_CONFIG.optimizationDirection,
83
+ name = TOOL_SELECTION_CLASSIFICATION_EVALUATOR_CONFIG.name,
84
+ ...rest
85
+ } = args;
86
+ return createClassificationEvaluator<RecordType>({
87
+ ...rest,
88
+ promptTemplate,
89
+ choices,
90
+ optimizationDirection,
91
+ name,
92
+ });
93
+ }
package/src/llm/index.ts CHANGED
@@ -6,5 +6,7 @@ export * from "./createDocumentRelevanceEvaluator";
6
6
  export * from "./createFaithfulnessEvaluator";
7
7
  export * from "./createHallucinationEvaluator"; // Deprecated: use createFaithfulnessEvaluator
8
8
  export * from "./createToolInvocationEvaluator";
9
+ export * from "./createToolResponseHandlingEvaluator";
10
+ export * from "./createToolSelectionEvaluator";
9
11
  export * from "./generateClassification";
10
12
  export * from "./LLMEvaluator";