@arizeai/phoenix-evals 0.0.7 → 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/esm/default_templates/DOCUMENT_RELEVANCY_TEMPLATE.d.ts +6 -0
- package/dist/esm/default_templates/DOCUMENT_RELEVANCY_TEMPLATE.d.ts.map +1 -0
- package/dist/esm/default_templates/DOCUMENT_RELEVANCY_TEMPLATE.js +25 -0
- package/dist/esm/default_templates/DOCUMENT_RELEVANCY_TEMPLATE.js.map +1 -0
- package/dist/esm/default_templates/index.d.ts +1 -0
- package/dist/esm/default_templates/index.d.ts.map +1 -1
- package/dist/esm/default_templates/index.js +1 -0
- package/dist/esm/default_templates/index.js.map +1 -1
- package/dist/esm/llm/createClassifier.d.ts +1 -1
- package/dist/esm/llm/createClassifier.d.ts.map +1 -1
- package/dist/esm/llm/createClassifier.js.map +1 -1
- package/dist/esm/llm/createDocumentRelevancyEvaluator.d.ts +40 -0
- package/dist/esm/llm/createDocumentRelevancyEvaluator.d.ts.map +1 -0
- package/dist/esm/llm/createDocumentRelevancyEvaluator.js +39 -0
- package/dist/esm/llm/createDocumentRelevancyEvaluator.js.map +1 -0
- package/dist/esm/llm/createHallucinationEvaluator.d.ts +10 -1
- package/dist/esm/llm/createHallucinationEvaluator.d.ts.map +1 -1
- package/dist/esm/llm/createHallucinationEvaluator.js.map +1 -1
- package/dist/esm/llm/index.d.ts +1 -0
- package/dist/esm/llm/index.d.ts.map +1 -1
- package/dist/esm/llm/index.js +1 -0
- package/dist/esm/llm/index.js.map +1 -1
- package/dist/esm/tsconfig.esm.tsbuildinfo +1 -1
- package/dist/esm/types/evals.d.ts +4 -4
- package/dist/esm/types/evals.d.ts.map +1 -1
- package/dist/esm/types/prompts.d.ts +7 -6
- package/dist/esm/types/prompts.d.ts.map +1 -1
- package/dist/src/default_templates/DOCUMENT_RELEVANCY_TEMPLATE.d.ts +6 -0
- package/dist/src/default_templates/DOCUMENT_RELEVANCY_TEMPLATE.d.ts.map +1 -0
- package/dist/src/default_templates/DOCUMENT_RELEVANCY_TEMPLATE.js +28 -0
- package/dist/src/default_templates/DOCUMENT_RELEVANCY_TEMPLATE.js.map +1 -0
- package/dist/src/default_templates/index.d.ts +1 -0
- package/dist/src/default_templates/index.d.ts.map +1 -1
- package/dist/src/default_templates/index.js +1 -0
- package/dist/src/default_templates/index.js.map +1 -1
- package/dist/src/llm/createClassifier.d.ts +1 -1
- package/dist/src/llm/createClassifier.d.ts.map +1 -1
- package/dist/src/llm/createClassifier.js.map +1 -1
- package/dist/src/llm/createDocumentRelevancyEvaluator.d.ts +40 -0
- package/dist/src/llm/createDocumentRelevancyEvaluator.d.ts.map +1 -0
- package/dist/src/llm/createDocumentRelevancyEvaluator.js +49 -0
- package/dist/src/llm/createDocumentRelevancyEvaluator.js.map +1 -0
- package/dist/src/llm/createHallucinationEvaluator.d.ts +10 -1
- package/dist/src/llm/createHallucinationEvaluator.d.ts.map +1 -1
- package/dist/src/llm/createHallucinationEvaluator.js.map +1 -1
- package/dist/src/llm/index.d.ts +1 -0
- package/dist/src/llm/index.d.ts.map +1 -1
- package/dist/src/llm/index.js +1 -0
- package/dist/src/llm/index.js.map +1 -1
- package/dist/src/types/evals.d.ts +4 -4
- package/dist/src/types/evals.d.ts.map +1 -1
- package/dist/src/types/prompts.d.ts +7 -6
- package/dist/src/types/prompts.d.ts.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +5 -5
- package/src/default_templates/DOCUMENT_RELEVANCY_TEMPLATE.ts +25 -0
- package/src/default_templates/index.ts +1 -0
- package/src/llm/createClassifier.ts +3 -6
- package/src/llm/createDocumentRelevancyEvaluator.ts +64 -0
- package/src/llm/createHallucinationEvaluator.ts +12 -2
- package/src/llm/index.ts +1 -0
- package/src/types/evals.ts +5 -7
- package/src/types/prompts.ts +7 -6
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@arizeai/phoenix-evals",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.8",
|
|
4
4
|
"description": "A library for running evaluations for AI use cases",
|
|
5
5
|
"main": "dist/src/index.js",
|
|
6
6
|
"module": "dist/esm/index.js",
|
|
@@ -41,7 +41,7 @@
|
|
|
41
41
|
"author": "oss@arize.com",
|
|
42
42
|
"license": "ELv2",
|
|
43
43
|
"devDependencies": {
|
|
44
|
-
"@ai-sdk/openai": "^
|
|
44
|
+
"@ai-sdk/openai": "^2.0.0",
|
|
45
45
|
"@arizeai/openinference-instrumentation-openai": "^2.3.0",
|
|
46
46
|
"@types/mustache": "^4.2.6",
|
|
47
47
|
"@types/node": "^24.0.10",
|
|
@@ -50,16 +50,16 @@
|
|
|
50
50
|
"typedoc": "^0.27.9",
|
|
51
51
|
"typescript": "^5.8.2",
|
|
52
52
|
"vitest": "^2.1.9",
|
|
53
|
-
"@arizeai/phoenix-client": "2.
|
|
53
|
+
"@arizeai/phoenix-client": "2.4.0"
|
|
54
54
|
},
|
|
55
55
|
"engines": {
|
|
56
56
|
"node": ">=18"
|
|
57
57
|
},
|
|
58
58
|
"dependencies": {
|
|
59
59
|
"@opentelemetry/api": "^1.9.0",
|
|
60
|
-
"ai": "^
|
|
60
|
+
"ai": "^5.0.0",
|
|
61
61
|
"mustache": "^4.2.0",
|
|
62
|
-
"zod": "^
|
|
62
|
+
"zod": "^4.0.14"
|
|
63
63
|
},
|
|
64
64
|
"scripts": {
|
|
65
65
|
"clean": "rimraf dist",
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
export const DOCUMENT_RELEVANCY_TEMPLATE = `
|
|
2
|
+
You are comparing a document to a question and trying to determine if the document text
|
|
3
|
+
contains information relevant to answering the question. Here is the data:
|
|
4
|
+
|
|
5
|
+
[BEGIN DATA]
|
|
6
|
+
************
|
|
7
|
+
[Question]: {{input}}
|
|
8
|
+
************
|
|
9
|
+
[Document text]: {{documentText}}
|
|
10
|
+
************
|
|
11
|
+
[END DATA]
|
|
12
|
+
|
|
13
|
+
Compare the Question above to the Document text. You must determine whether the Document text
|
|
14
|
+
contains information that can answer the Question. Please focus on whether the very specific
|
|
15
|
+
question can be answered by the information in the Document text.
|
|
16
|
+
Your response must be single word, either "relevant" or "unrelated",
|
|
17
|
+
and should not contain any text or characters aside from that word.
|
|
18
|
+
"unrelated" means that the document text does not contain an answer to the Question.
|
|
19
|
+
"relevant" means the document text contains an answer to the Question.
|
|
20
|
+
`;
|
|
21
|
+
|
|
22
|
+
export const DOCUMENT_RELEVANCY_CHOICES = {
|
|
23
|
+
relevant: 1,
|
|
24
|
+
unrelated: 0,
|
|
25
|
+
};
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import {
|
|
2
2
|
ClassificationChoicesMap,
|
|
3
|
-
EvaluationArgs,
|
|
4
3
|
EvaluationResult,
|
|
5
4
|
CreateClassifierArgs,
|
|
6
5
|
EvaluatorFn,
|
|
@@ -25,14 +24,12 @@ function choicesToLabels(
|
|
|
25
24
|
/**
|
|
26
25
|
* A function that serves as a factory that will output a classification evaluator
|
|
27
26
|
*/
|
|
28
|
-
export function createClassifier<
|
|
27
|
+
export function createClassifier<ExampleType extends Record<string, unknown>>(
|
|
29
28
|
args: CreateClassifierArgs
|
|
30
|
-
): EvaluatorFn<
|
|
29
|
+
): EvaluatorFn<ExampleType> {
|
|
31
30
|
const { model, choices, promptTemplate, ...rest } = args;
|
|
32
31
|
|
|
33
|
-
return async (
|
|
34
|
-
args: EvaluationArgs<OutputType, InputType>
|
|
35
|
-
): Promise<EvaluationResult> => {
|
|
32
|
+
return async (args: ExampleType): Promise<EvaluationResult> => {
|
|
36
33
|
const templateVariables = {
|
|
37
34
|
...args,
|
|
38
35
|
};
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import { createClassifier } from "./createClassifier";
|
|
2
|
+
import { CreateClassifierArgs, EvaluatorFn } from "../types/evals";
|
|
3
|
+
import {
|
|
4
|
+
DOCUMENT_RELEVANCY_TEMPLATE,
|
|
5
|
+
DOCUMENT_RELEVANCY_CHOICES,
|
|
6
|
+
} from "../default_templates/DOCUMENT_RELEVANCY_TEMPLATE";
|
|
7
|
+
|
|
8
|
+
export interface DocumentRelevancyEvaluatorArgs
|
|
9
|
+
extends Omit<CreateClassifierArgs, "promptTemplate" | "choices"> {
|
|
10
|
+
choices?: CreateClassifierArgs["choices"];
|
|
11
|
+
promptTemplate?: CreateClassifierArgs["promptTemplate"];
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* An example to be evaluated by the document relevancy evaluator.
|
|
16
|
+
*/
|
|
17
|
+
export type DocumentRelevancyExample = {
|
|
18
|
+
input: string;
|
|
19
|
+
documentText: string;
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Creates a document relevancy evaluator function.
|
|
24
|
+
*
|
|
25
|
+
* This function returns an evaluator that determines whether a given document text
|
|
26
|
+
* is relevant to a provided input question. The evaluator uses a classification model
|
|
27
|
+
* and a prompt template to make its determination.
|
|
28
|
+
*
|
|
29
|
+
* @param args - The arguments for creating the document relevancy evaluator.
|
|
30
|
+
* @param args.model - The model to use for classification.
|
|
31
|
+
* @param args.choices - The possible classification choices (defaults to DOCUMENT_RELEVANCY_CHOICES).
|
|
32
|
+
* @param args.promptTemplate - The prompt template to use (defaults to DOCUMENT_RELEVANCY_TEMPLATE).
|
|
33
|
+
* @param args.telemetry - The telemetry to use for the evaluator.
|
|
34
|
+
*
|
|
35
|
+
* @returns An evaluator function that takes a {@link DocumentRelevancyExample} and returns a classification result
|
|
36
|
+
* indicating whether the document is relevant to the input question.
|
|
37
|
+
*
|
|
38
|
+
* @example
|
|
39
|
+
* ```ts
|
|
40
|
+
* const evaluator = createDocumentRelevancyEvaluator({ model: openai("gpt-4o-mini") });
|
|
41
|
+
* const result = await evaluator({
|
|
42
|
+
* input: "What is the capital of France?",
|
|
43
|
+
* documentText: "Paris is the capital and most populous city of France.",
|
|
44
|
+
* });
|
|
45
|
+
* console.log(result.label); // "relevant" or "unrelated"
|
|
46
|
+
* ```
|
|
47
|
+
*/
|
|
48
|
+
export function createDocumentRelevancyEvaluator(
|
|
49
|
+
args: DocumentRelevancyEvaluatorArgs
|
|
50
|
+
): EvaluatorFn<DocumentRelevancyExample> {
|
|
51
|
+
const {
|
|
52
|
+
choices = DOCUMENT_RELEVANCY_CHOICES,
|
|
53
|
+
promptTemplate = DOCUMENT_RELEVANCY_TEMPLATE,
|
|
54
|
+
...rest
|
|
55
|
+
} = args;
|
|
56
|
+
const documentRelevancyEvaluatorFn =
|
|
57
|
+
createClassifier<DocumentRelevancyExample>({
|
|
58
|
+
...args,
|
|
59
|
+
promptTemplate,
|
|
60
|
+
choices,
|
|
61
|
+
...rest,
|
|
62
|
+
});
|
|
63
|
+
return documentRelevancyEvaluatorFn;
|
|
64
|
+
}
|
|
@@ -10,6 +10,16 @@ export interface HallucinationEvaluatorArgs
|
|
|
10
10
|
choices?: CreateClassifierArgs["choices"];
|
|
11
11
|
promptTemplate?: CreateClassifierArgs["promptTemplate"];
|
|
12
12
|
}
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* An example to be evaluated by the hallucination evaluator.
|
|
16
|
+
*/
|
|
17
|
+
export type HallucinationExample = {
|
|
18
|
+
input: string;
|
|
19
|
+
output: string;
|
|
20
|
+
reference?: string;
|
|
21
|
+
context?: string;
|
|
22
|
+
};
|
|
13
23
|
/**
|
|
14
24
|
* Creates a function that evaluates whether an answer is factual or hallucinated based on a query and reference text.
|
|
15
25
|
*
|
|
@@ -18,13 +28,13 @@ export interface HallucinationEvaluatorArgs
|
|
|
18
28
|
*/
|
|
19
29
|
export function createHallucinationEvaluator(
|
|
20
30
|
args: HallucinationEvaluatorArgs
|
|
21
|
-
): EvaluatorFn<
|
|
31
|
+
): EvaluatorFn<HallucinationExample> {
|
|
22
32
|
const {
|
|
23
33
|
choices = HALLUCINATION_CHOICES,
|
|
24
34
|
promptTemplate = HALLUCINATION_TEMPLATE,
|
|
25
35
|
...rest
|
|
26
36
|
} = args;
|
|
27
|
-
const hallucinationEvaluatorFn = createClassifier<
|
|
37
|
+
const hallucinationEvaluatorFn = createClassifier<HallucinationExample>({
|
|
28
38
|
...args,
|
|
29
39
|
promptTemplate,
|
|
30
40
|
choices,
|
package/src/llm/index.ts
CHANGED
package/src/types/evals.ts
CHANGED
|
@@ -2,9 +2,9 @@ import { LanguageModel } from "ai";
|
|
|
2
2
|
import { WithTelemetry } from "./otel";
|
|
3
3
|
|
|
4
4
|
/**
|
|
5
|
-
*
|
|
5
|
+
* A specific AI example that is under evaluation
|
|
6
6
|
*/
|
|
7
|
-
export interface
|
|
7
|
+
export interface ExampleRecord<OutputType, InputType> {
|
|
8
8
|
output: OutputType;
|
|
9
9
|
expected?: OutputType;
|
|
10
10
|
input?: InputType;
|
|
@@ -15,9 +15,7 @@ export interface WithLLM {
|
|
|
15
15
|
model: LanguageModel;
|
|
16
16
|
}
|
|
17
17
|
|
|
18
|
-
export interface LLMEvaluationArgs
|
|
19
|
-
extends EvaluationArgs<OutputType, InputType>,
|
|
20
|
-
WithLLM {}
|
|
18
|
+
export interface LLMEvaluationArgs extends WithLLM {}
|
|
21
19
|
|
|
22
20
|
/**
|
|
23
21
|
* The result of an evaluation
|
|
@@ -80,6 +78,6 @@ export interface CreateClassifierArgs extends WithTelemetry {
|
|
|
80
78
|
promptTemplate: string;
|
|
81
79
|
}
|
|
82
80
|
|
|
83
|
-
export type EvaluatorFn<
|
|
84
|
-
args:
|
|
81
|
+
export type EvaluatorFn<ExampleType extends Record<string, unknown>> = (
|
|
82
|
+
args: ExampleType
|
|
85
83
|
) => Promise<EvaluationResult>;
|
package/src/types/prompts.ts
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { ModelMessage } from "ai";
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
|
-
* Prompt part of the AI function options.
|
|
5
|
-
* It contains a system message, a simple text prompt, or a list of messages.
|
|
6
|
-
*
|
|
4
|
+
* Prompt part of the AI function options for model generation.
|
|
5
|
+
* It contains a system message, a simple text prompt, or a list of model messages.
|
|
6
|
+
* Uses ModelMessage format compatible with AI SDK v5 generateObject function.
|
|
7
7
|
*/
|
|
8
8
|
export interface WithPrompt {
|
|
9
9
|
/**
|
|
@@ -15,7 +15,8 @@ export interface WithPrompt {
|
|
|
15
15
|
*/
|
|
16
16
|
prompt?: string;
|
|
17
17
|
/**
|
|
18
|
-
* A list of messages. You can either use `prompt` or `messages` but not both.
|
|
18
|
+
* A list of model messages. You can either use `prompt` or `messages` but not both.
|
|
19
|
+
* Uses ModelMessage format for compatibility with AI SDK v5.
|
|
19
20
|
*/
|
|
20
|
-
messages?: Array<
|
|
21
|
+
messages?: Array<ModelMessage>;
|
|
21
22
|
}
|