@arizeai/phoenix-evals 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -91,25 +91,58 @@ See the complete example in [`examples/classifier_example.ts`](examples/classifi
91
91
 
92
92
  The library includes several pre-built evaluators for common evaluation tasks. These evaluators come with optimized prompts and can be used directly with any AI SDK model.
93
93
 
94
+ All pre-built evaluators are available from the `@arizeai/phoenix-evals/llm` module:
95
+
96
+ | Evaluator | Function | Description |
97
+ | ---------------------- | ------------------------------------- | --------------------------------------------------------------------------------- |
98
+ | Faithfulness | `createFaithfulnessEvaluator` | Detects hallucinations — checks if the output is grounded in the provided context |
99
+ | Conciseness | `createConcisenessEvaluator` | Evaluates whether the response is appropriately concise |
100
+ | Correctness | `createCorrectnessEvaluator` | Checks if the output is factually correct given the input |
101
+ | Document Relevance | `createDocumentRelevanceEvaluator` | Measures how relevant a retrieved document is to the query |
102
+ | Refusal | `createRefusalEvaluator` | Detects whether the model refused to answer |
103
+ | Tool Invocation | `createToolInvocationEvaluator` | Evaluates whether the correct tool was invoked with the right arguments |
104
+ | Tool Selection | `createToolSelectionEvaluator` | Checks whether the right tool was selected for the task |
105
+ | Tool Response Handling | `createToolResponseHandlingEvaluator` | Evaluates how well the model uses a tool's response |
106
+
94
107
  ```typescript
95
- import { createFaithfulnessEvaluator } from "@arizeai/phoenix-evals/llm";
108
+ import {
109
+ createFaithfulnessEvaluator,
110
+ createConcisenessEvaluator,
111
+ createCorrectnessEvaluator,
112
+ createDocumentRelevanceEvaluator,
113
+ createRefusalEvaluator,
114
+ } from "@arizeai/phoenix-evals/llm";
96
115
  import { openai } from "@ai-sdk/openai";
97
- const model = openai("gpt-4o-mini");
98
116
 
99
- // Faithfulness Detection
100
- const faithfulnessEvaluator = createFaithfulnessEvaluator({
101
- model,
102
- });
117
+ const model = openai("gpt-4o-mini");
103
118
 
104
- // Use the evaluators
105
- const result = await faithfulnessEvaluator({
119
+ // Faithfulness: checks if the output is grounded in the context
120
+ const faithfulnessEvaluator = createFaithfulnessEvaluator({ model });
121
+ const faithfulnessResult = await faithfulnessEvaluator.evaluate({
106
122
  input: "What is the capital of France?",
107
123
  context: "France is a country in Europe. Paris is its capital city.",
108
124
  output: "The capital of France is London.",
109
125
  });
110
-
111
- console.log(result);
126
+ console.log(faithfulnessResult);
112
127
  // Output: { label: "unfaithful", score: 0, explanation: "..." }
128
+
129
+ // Correctness: checks if the output is factually correct
130
+ const correctnessEvaluator = createCorrectnessEvaluator({ model });
131
+ const correctnessResult = await correctnessEvaluator.evaluate({
132
+ input: "What is the capital of France?",
133
+ output: "Paris is the capital of France.",
134
+ });
135
+ console.log(correctnessResult);
136
+ // Output: { label: "correct", score: 1, explanation: "..." }
137
+
138
+ // Document Relevance: checks if a retrieved document is relevant to the query
139
+ const relevanceEvaluator = createDocumentRelevanceEvaluator({ model });
140
+ const relevanceResult = await relevanceEvaluator.evaluate({
141
+ input: "What is the capital of France?",
142
+ documentText: "Paris is the capital of France and a major European city.",
143
+ });
144
+ console.log(relevanceResult);
145
+ // Output: { label: "relevant", score: 1, explanation: "..." }
113
146
  ```
114
147
 
115
148
  ### Data Mapping
@@ -117,10 +150,8 @@ console.log(result);
117
150
  When your data structure doesn't match what an evaluator expects, use `bindEvaluator` to map your fields to the evaluator's expected input format:
118
151
 
119
152
  ```typescript
120
- import {
121
- bindEvaluator,
122
- createFaithfulnessEvaluator,
123
- } from "@arizeai/phoenix-evals";
153
+ import { bindEvaluator } from "@arizeai/phoenix-evals";
154
+ import { createFaithfulnessEvaluator } from "@arizeai/phoenix-evals/llm";
124
155
  import { openai } from "@ai-sdk/openai";
125
156
 
126
157
  const model = openai("gpt-4o-mini");