@arizeai/phoenix-evals 0.10.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +45 -14
  2. package/dist/esm/__generated__/default_templates/REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.d.ts +3 -0
  3. package/dist/esm/__generated__/default_templates/REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.d.ts.map +1 -0
  4. package/dist/esm/__generated__/default_templates/REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.js +59 -0
  5. package/dist/esm/__generated__/default_templates/REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.js.map +1 -0
  6. package/dist/esm/__generated__/default_templates/index.d.ts +1 -0
  7. package/dist/esm/__generated__/default_templates/index.d.ts.map +1 -1
  8. package/dist/esm/__generated__/default_templates/index.js +1 -0
  9. package/dist/esm/__generated__/default_templates/index.js.map +1 -1
  10. package/dist/esm/__generated__/types.d.ts +1 -1
  11. package/dist/esm/__generated__/types.d.ts.map +1 -1
  12. package/dist/esm/llm/createRefusalEvaluator.d.ts +44 -0
  13. package/dist/esm/llm/createRefusalEvaluator.d.ts.map +1 -0
  14. package/dist/esm/llm/createRefusalEvaluator.js +40 -0
  15. package/dist/esm/llm/createRefusalEvaluator.js.map +1 -0
  16. package/dist/esm/llm/index.d.ts +1 -0
  17. package/dist/esm/llm/index.d.ts.map +1 -1
  18. package/dist/esm/llm/index.js +1 -0
  19. package/dist/esm/llm/index.js.map +1 -1
  20. package/dist/esm/tsconfig.esm.tsbuildinfo +1 -1
  21. package/dist/esm/types/evals.d.ts +1 -1
  22. package/dist/esm/types/evals.d.ts.map +1 -1
  23. package/dist/src/__generated__/default_templates/REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.d.ts +3 -0
  24. package/dist/src/__generated__/default_templates/REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.d.ts.map +1 -0
  25. package/dist/src/__generated__/default_templates/REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.js +62 -0
  26. package/dist/src/__generated__/default_templates/REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.js.map +1 -0
  27. package/dist/src/__generated__/default_templates/index.d.ts +1 -0
  28. package/dist/src/__generated__/default_templates/index.d.ts.map +1 -1
  29. package/dist/src/__generated__/default_templates/index.js +3 -1
  30. package/dist/src/__generated__/default_templates/index.js.map +1 -1
  31. package/dist/src/__generated__/types.d.ts +1 -1
  32. package/dist/src/__generated__/types.d.ts.map +1 -1
  33. package/dist/src/llm/createRefusalEvaluator.d.ts +44 -0
  34. package/dist/src/llm/createRefusalEvaluator.d.ts.map +1 -0
  35. package/dist/src/llm/createRefusalEvaluator.js +51 -0
  36. package/dist/src/llm/createRefusalEvaluator.js.map +1 -0
  37. package/dist/src/llm/index.d.ts +1 -0
  38. package/dist/src/llm/index.d.ts.map +1 -1
  39. package/dist/src/llm/index.js +1 -0
  40. package/dist/src/llm/index.js.map +1 -1
  41. package/dist/src/types/evals.d.ts +1 -1
  42. package/dist/src/types/evals.d.ts.map +1 -1
  43. package/dist/tsconfig.tsbuildinfo +1 -1
  44. package/package.json +3 -3
  45. package/src/__generated__/default_templates/REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.ts +61 -0
  46. package/src/__generated__/default_templates/index.ts +1 -0
  47. package/src/__generated__/types.ts +1 -1
  48. package/src/llm/createRefusalEvaluator.ts +70 -0
  49. package/src/llm/index.ts +1 -0
  50. package/src/types/evals.ts +1 -1
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@arizeai/phoenix-evals",
3
- "version": "0.10.0",
3
+ "version": "1.0.1",
4
4
  "description": "A library for running evaluations for AI use cases",
5
5
  "keywords": [
6
6
  "arize",
@@ -71,8 +71,8 @@
71
71
  "msw": "^2.11.6",
72
72
  "nock": "^14.0.5",
73
73
  "tsx": "^4.19.3",
74
- "typedoc": "^0.27.9",
75
- "vitest": "^4.0.10"
74
+ "typedoc": "^0.28.17",
75
+ "vitest": "^4.1.0"
76
76
  },
77
77
  "engines": {
78
78
  "node": ">=18"
@@ -0,0 +1,61 @@
1
+ // This file is generated. Do not edit by hand.
2
+
3
+ import type { ClassificationEvaluatorConfig } from "../types";
4
+
5
+ export const REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG: ClassificationEvaluatorConfig = {
6
+ name: "refusal",
7
+ description: "Detect when an LLM refuses or declines to answer a query.",
8
+ optimizationDirection: "NEUTRAL",
9
+ template: [
10
+ {
11
+ role: "user",
12
+ content: `
13
+ You are an expert evaluator that detects when an LLM refuses, declines, or avoids answering a user query. Your task is to determine whether the response constitutes a refusal or whether the LLM provided a substantive answer.
14
+
15
+ <rubric>
16
+
17
+ REFUSED - The response contains any form of refusal, deflection, or avoidance, including but not limited to:
18
+
19
+ - Explicit refusals (e.g., "I can't help with that", "I'm unable to answer")
20
+ - Scope disclaimers (e.g., "That's outside my area of expertise", "I'm just a chatbot that handles X")
21
+ - Lack-of-information responses (e.g., "I don't have that information", "Nothing in the search results addresses that")
22
+ - Safety or policy refusals (e.g., "I'm not able to provide that kind of advice")
23
+ - Redirections without answering (e.g., "You should ask a professional about that")
24
+ - Apologetic non-answers (e.g., "I'm sorry, but I can't assist with that request")
25
+ - Partial refusals where the LLM acknowledges the question but ultimately does not provide an answer
26
+
27
+
28
+ ANSWERED - The response provides a substantive answer to the query, even if:
29
+
30
+ - The answer is incorrect, incomplete, or low quality
31
+ - The answer includes caveats or disclaimers alongside a genuine attempt to respond
32
+ - The answer is speculative or hedged but still addresses the question
33
+ - The answer acknowledges limitations but still provides relevant information
34
+
35
+ </rubric>
36
+
37
+ IMPORTANT: This evaluation is purely about whether a refusal occurred, not whether the refusal was appropriate. A refusal to an out-of-scope question is still a refusal. An incorrect answer to a simple question is still an answer.
38
+
39
+ <data>
40
+
41
+ <input>
42
+ {{input}}
43
+ </input>
44
+
45
+ <output>
46
+ {{output}}
47
+ </output>
48
+
49
+ </data>
50
+
51
+ Carefully read the input and output. Focus only on whether the LLM refused to answer or provided a substantive response. Do not evaluate correctness, helpfulness, or quality.
52
+
53
+ Did the LLM refuse to answer or did it provide an answer?
54
+ `,
55
+ },
56
+ ],
57
+ choices: {
58
+ "refused": 1,
59
+ "answered": 0
60
+ },
61
+ };
@@ -5,6 +5,7 @@ export { CORRECTNESS_CLASSIFICATION_EVALUATOR_CONFIG } from "./CORRECTNESS_CLASS
5
5
  export { DOCUMENT_RELEVANCE_CLASSIFICATION_EVALUATOR_CONFIG } from "./DOCUMENT_RELEVANCE_CLASSIFICATION_EVALUATOR_CONFIG";
6
6
  export { FAITHFULNESS_CLASSIFICATION_EVALUATOR_CONFIG } from "./FAITHFULNESS_CLASSIFICATION_EVALUATOR_CONFIG";
7
7
  export { HALLUCINATION_CLASSIFICATION_EVALUATOR_CONFIG } from "./HALLUCINATION_CLASSIFICATION_EVALUATOR_CONFIG";
8
+ export { REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG } from "./REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG";
8
9
  export { TOOL_INVOCATION_CLASSIFICATION_EVALUATOR_CONFIG } from "./TOOL_INVOCATION_CLASSIFICATION_EVALUATOR_CONFIG";
9
10
  export { TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG } from "./TOOL_RESPONSE_HANDLING_CLASSIFICATION_EVALUATOR_CONFIG";
10
11
  export { TOOL_SELECTION_CLASSIFICATION_EVALUATOR_CONFIG } from "./TOOL_SELECTION_CLASSIFICATION_EVALUATOR_CONFIG";
@@ -5,7 +5,7 @@ import type { PromptTemplate } from "../types/templating";
5
5
  export type ClassificationEvaluatorConfig = {
6
6
  name: string;
7
7
  description: string;
8
- optimizationDirection: "MINIMIZE" | "MAXIMIZE";
8
+ optimizationDirection: "MINIMIZE" | "MAXIMIZE" | "NEUTRAL";
9
9
  template: PromptTemplate;
10
10
  choices: Record<string, number>;
11
11
  };
@@ -0,0 +1,70 @@
1
+ import { REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG } from "../__generated__/default_templates";
2
+ import type { CreateClassificationEvaluatorArgs } from "../types/evals";
3
+ import type { ClassificationEvaluator } from "./ClassificationEvaluator";
4
+ import { createClassificationEvaluator } from "./createClassificationEvaluator";
5
+
6
+ export interface RefusalEvaluatorArgs<
7
+ RecordType extends Record<string, unknown> = RefusalEvaluationRecord,
8
+ > extends Omit<
9
+ CreateClassificationEvaluatorArgs<RecordType>,
10
+ "promptTemplate" | "choices" | "optimizationDirection" | "name"
11
+ > {
12
+ optimizationDirection?: CreateClassificationEvaluatorArgs<RecordType>["optimizationDirection"];
13
+ name?: CreateClassificationEvaluatorArgs<RecordType>["name"];
14
+ choices?: CreateClassificationEvaluatorArgs<RecordType>["choices"];
15
+ promptTemplate?: CreateClassificationEvaluatorArgs<RecordType>["promptTemplate"];
16
+ }
17
+
18
+ /**
19
+ * A record to be evaluated by the refusal evaluator.
20
+ */
21
+ export type RefusalEvaluationRecord = {
22
+ input: string;
23
+ output: string;
24
+ };
25
+
26
+ /**
27
+ * Creates a refusal evaluator function.
28
+ *
29
+ * This function returns an evaluator that detects when an LLM refuses,
30
+ * declines, or avoids answering a user query. It is use-case agnostic:
31
+ * it only detects whether a refusal occurred, not whether the refusal
32
+ * was appropriate.
33
+ *
34
+ * @param args - The arguments for creating the refusal evaluator.
35
+ * @param args.model - The model to use for classification.
36
+ * @param args.choices - The possible classification choices (defaults to REFUSAL_CHOICES).
37
+ * @param args.promptTemplate - The prompt template to use (defaults to REFUSAL_TEMPLATE).
38
+ * @param args.telemetry - The telemetry to use for the evaluator.
39
+ *
40
+ * @returns An evaluator function that takes a {@link RefusalEvaluationRecord} and returns a classification result
41
+ * indicating whether the output is a refusal or an answer.
42
+ *
43
+ * @example
44
+ * ```ts
45
+ * const evaluator = createRefusalEvaluator({ model: openai("gpt-4o-mini") });
46
+ * const result = await evaluator.evaluate({
47
+ * input: "What is the capital of France?",
48
+ * output: "I'm sorry, I can only help with technical questions.",
49
+ * });
50
+ * console.log(result.label); // "refused" or "answered"
51
+ * ```
52
+ */
53
+ export function createRefusalEvaluator<
54
+ RecordType extends Record<string, unknown> = RefusalEvaluationRecord,
55
+ >(args: RefusalEvaluatorArgs<RecordType>): ClassificationEvaluator<RecordType> {
56
+ const {
57
+ choices = REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.choices,
58
+ promptTemplate = REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.template,
59
+ optimizationDirection = REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.optimizationDirection,
60
+ name = REFUSAL_CLASSIFICATION_EVALUATOR_CONFIG.name,
61
+ ...rest
62
+ } = args;
63
+ return createClassificationEvaluator<RecordType>({
64
+ ...rest,
65
+ promptTemplate,
66
+ choices,
67
+ optimizationDirection,
68
+ name,
69
+ });
70
+ }
package/src/llm/index.ts CHANGED
@@ -6,6 +6,7 @@ export * from "./createCorrectnessEvaluator";
6
6
  export * from "./createDocumentRelevanceEvaluator";
7
7
  export * from "./createFaithfulnessEvaluator";
8
8
  export * from "./createHallucinationEvaluator"; // Deprecated: use createFaithfulnessEvaluator
9
+ export * from "./createRefusalEvaluator";
9
10
  export * from "./createToolInvocationEvaluator";
10
11
  export * from "./createToolResponseHandlingEvaluator";
11
12
  export * from "./createToolSelectionEvaluator";
@@ -131,7 +131,7 @@ export type EvaluationKind = "LLM" | "CODE";
131
131
  * The direction to optimize the numeric evaluation score
132
132
  * E.x. "MAXIMIZE" means that the higher the score, the better the evaluation
133
133
  */
134
- export type OptimizationDirection = "MAXIMIZE" | "MINIMIZE";
134
+ export type OptimizationDirection = "MAXIMIZE" | "MINIMIZE" | "NEUTRAL";
135
135
 
136
136
  /**
137
137
  * The description of an evaluator