braintrust 0.0.60 → 0.0.62

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
- import { Experiment, Span } from "./logger";
1
+ import chalk from "chalk";
2
+ import { Experiment, ExperimentSummary, InitOptions, Span } from "./logger";
2
3
  import { Score } from "autoevals";
3
4
  import { ProgressReporter } from "./progress";
4
5
  export type Metadata = Record<string, unknown>;
@@ -17,28 +18,39 @@ export type EvalScorerArgs<Input, Output> = EvalCase<Input, Output> & {
17
18
  output: Output;
18
19
  };
19
20
  export type EvalScorer<Input, Output> = ((args: EvalScorerArgs<Input, Output>) => Score) | ((args: EvalScorerArgs<Input, Output>) => Promise<Score>);
21
+ /**
22
+ * Additional metadata for the eval definition, such as experiment name.
23
+ */
24
+ export interface EvalMetadata {
25
+ experimentName?: string;
26
+ }
27
+ export declare function evalMetadataToInitOptions(metadata: EvalMetadata | undefined): InitOptions;
20
28
  /**
21
29
  * An evaluator is a collection of functions that can be used to evaluate a model.
22
30
  * It consists of:
23
31
  * - `data`, a function that returns a list of inputs, expected outputs, and metadata
24
32
  * - `task`, a function that takes an input and returns an output
25
33
  * - `scores`, a set of functions that take an input, output, and expected value and return a score
34
+ * - `metadata`, optional additional metadata for the eval definition, such as experiment name.
26
35
  */
27
36
  export interface Evaluator<Input, Output> {
28
37
  data: EvalData<Input, Output>;
29
38
  task: EvalTask<Input, Output>;
30
39
  scores: EvalScorer<Input, Output>[];
40
+ metadata?: EvalMetadata;
31
41
  }
32
42
  export type EvaluatorDef<Input, Output> = {
33
- name: string;
43
+ projectName: string;
44
+ evalName: string;
34
45
  } & Evaluator<Input, Output>;
35
46
  export type EvaluatorFile = {
36
- [evaluator: string]: EvaluatorDef<any, any>;
47
+ [evalName: string]: EvaluatorDef<any, any>;
37
48
  };
38
49
  declare global {
39
50
  var _evals: EvaluatorFile;
51
+ var _lazy_load: boolean;
40
52
  }
41
- export declare function Eval<Input, Output>(name: string, evaluator: Evaluator<Input, Output>): void;
53
+ export declare function Eval<Input, Output>(name: string, evaluator: Evaluator<Input, Output>): Promise<void | ExperimentSummary>;
42
54
  export declare function getLoadedEvals(): EvaluatorFile;
43
55
  export interface Filter {
44
56
  path: string[];
@@ -60,5 +72,15 @@ export declare function runEvaluator(experiment: Experiment | null, evaluator: E
60
72
  scores: Record<string, number>;
61
73
  error: unknown;
62
74
  }[];
63
- summary: import("./logger").ExperimentSummary | null;
75
+ summary: ExperimentSummary | null;
64
76
  }>;
77
+ export declare const error: chalk.Chalk;
78
+ export declare const warning: chalk.Chalk;
79
+ export declare function logError(e: unknown, verbose: boolean): void;
80
+ export declare function reportEvaluatorResult(evaluatorName: string | number, evaluatorResult: {
81
+ results: {
82
+ scores: Record<string, number>;
83
+ error: unknown;
84
+ }[];
85
+ summary: unknown;
86
+ }, verbose: boolean): void;
package/dist/index.d.ts CHANGED
@@ -33,4 +33,4 @@
33
33
  * @module braintrust
34
34
  */
35
35
  export * from "./logger";
36
- export { Evaluator, EvalTask, Eval, EvalScorerArgs } from "./framework";
36
+ export { Evaluator, EvalTask, Eval, EvalMetadata, EvalScorerArgs } from "./framework";