braintrust 0.0.21 → 0.0.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -32,205 +32,5 @@
32
32
  *
33
33
  * @module braintrust
34
34
  */
35
- export declare class Project {
36
- name: string;
37
- id: string;
38
- org_id: string;
39
- constructor(name: string, id: string, org_id: string);
40
- }
41
- /**
42
- * Log in, and then initialize a new experiment in a specified project. If the project does not exist, it will be created.
43
- *
44
- * @param project The name of the project to create the experiment in.
45
- * @param options Additional options for configuring init().
46
- * @param options.experiment The name of the experiment to create. If not specified, a name will be generated automatically.
47
- * @param options.description An optional description of the experiment.
48
- * @param options.update If the experiment already exists, continue logging to it.
49
- * @param options.baseExperiment An optional experiment name to use as a base. If specified, the new experiment will be summarized and compared to this
50
- * experiment. Otherwise, it will pick an experiment by finding the closest ancestor on the default (e.g. main) branch.
51
- * @param options.apiUrl The URL of the BrainTrust API. Defaults to https://www.braintrustdata.com.
52
- * @param options.apiKey The API key to use. If the parameter is not specified, will try to use the `BRAINTRUST_API_KEY` environment variable. If no API
53
- * key is specified, will prompt the user to login.
54
- * @param options.orgName (Optional) The name of a specific organization to connect to. This is useful if you belong to multiple.
55
- * @param options.disableCache Do not use cached login information.
56
- * @returns The newly created Experiment.
57
- */
58
- export declare function init(project: string, options?: {
59
- readonly experiment?: string;
60
- readonly description?: string;
61
- readonly update?: boolean;
62
- readonly baseExperiment?: string;
63
- readonly apiUrl?: string;
64
- readonly apiKey?: string;
65
- readonly orgName?: string;
66
- readonly disableCache?: boolean;
67
- }): Promise<Experiment>;
68
- /**
69
- * Log into BrainTrust. This will prompt you for your API token, which you can find at
70
- * https://www.braintrustdata.com/app/token. This method is called automatically by `init()`.
71
- *
72
- * @param options Options for configuring login().
73
- * @param options.apiUrl The URL of the BrainTrust API. Defaults to https://www.braintrustdata.com.
74
- * @param options.apiKey The API key to use. If the parameter is not specified, will try to use the `BRAINTRUST_API_KEY` environment variable. If no API
75
- * key is specified, will prompt the user to login.
76
- * @param options.orgName (Optional) The name of a specific organization to connect to. This is useful if you belong to multiple.
77
- * @param options.disableCache Do not use cached login information.
78
- * @param options.forceLogin Login again, even if you have already logged in (by default, this function will exit quickly if you have already logged in)
79
- */
80
- export declare function login(options?: {
81
- apiUrl?: string;
82
- apiKey?: string;
83
- orgName?: string;
84
- disableCache?: boolean;
85
- forceLogin?: boolean;
86
- }): Promise<void>;
87
- /**
88
- * Log a single event to the current experiment. The event will be batched and uploaded behind the scenes.
89
- *
90
- * @param event The event to log.
91
- * @param event.inputs The arguments that uniquely define a test case (an arbitrary, JSON serializable object). Later on,
92
- * BrainTrust will use the `inputs` to know whether two test casess are the same between experiments, so they should
93
- * not contain experiment-specific state. A simple rule of thumb is that if you run the same experiment twice, the
94
- * `inputs` should be identical.
95
- * @param event.output The output of your application, including post-processing (an arbitrary, JSON serializable object),
96
- * that allows you to determine whether the result is correct or not. For example, in an app that generates SQL queries,
97
- * the `output` should be the _result_ of the SQL query generated by the model, not the query itself, because there may
98
- * be multiple valid queries that answer a single question.
99
- * @param event.expected The ground truth value (an arbitrary, JSON serializable object) that you'd compare to `output` to
100
- * determine if your `output` value is correct or not. BrainTrust currently does not compare `output` to `expected` for
101
- * you, since there are so many different ways to do that correctly. Instead, these values are just used to help you
102
- * navigate your experiments while digging into analyses. However, we may later use these values to re-score outputs or
103
- * fine-tune your models.
104
- * @param event.scores A dictionary of numeric values (between 0 and 1) to log. The scores should give you a variety of signals
105
- * that help you determine how accurate the outputs are compared to what you expect and diagnose failures. For example, a
106
- * summarization app might have one score that tells you how accurate the summary is, and another that measures the word similarity
107
- * between the generated and grouth truth summary. The word similarity score could help you determine whether the summarization was
108
- * covering similar concepts or not. You can use these scores to help you sort, filter, and compare experiments.
109
- * @param event.metadata (Optional) a dictionary with additional data about the test example, model outputs, or just
110
- * about anything else that's relevant, that you can use to help find and analyze examples later. For example, you could log the
111
- * `prompt`, example's `id`, or anything else that would be useful to slice/dice later. The values in `metadata` can be any
112
- * JSON-serializable type, but its keys must be strings.
113
- * @param event.id (Optional) a unique identifier for the event. If you don't provide one, BrainTrust will generate one for you.
114
- * @returns The `id` of the logged event.
115
- */
116
- export declare function log(options: {
117
- readonly inputs: unknown;
118
- readonly output: unknown;
119
- readonly expected: unknown;
120
- readonly scores: Record<string, number>;
121
- readonly metadata?: Record<string, unknown>;
122
- readonly id?: string;
123
- }): string;
124
- /**
125
- * Summarize the current experiment, including the scores (compared to the closest reference experiment) and metadata.
126
- *
127
- * @param options Options for summarizing the experiment.
128
- * @param options.summarizeScores Whether to summarize the scores. If False, only the metadata will be returned.
129
- * @param options.comparisonExperimentId The experiment to compare against. If None, the most recent experiment on the origin's main branch will be used.
130
- * @returns A summary of the experiment, including the scores (compared to the closest reference experiment) and metadata.
131
- */
132
- export declare function summarize(options?: {
133
- readonly summarizeScores?: boolean;
134
- readonly comparisonExperimentId?: string;
135
- }): Promise<ExperimentSummary>;
136
- /**
137
- * An experiment is a collection of logged events, such as model inputs and outputs, which represent
138
- * a snapshot of your application at a particular point in time. An experiment is meant to capture more
139
- * than just the model you use, and includes the data you use to test, pre- and post- processing code,
140
- * comparison metrics (scores), and any other metadata you want to include.
141
- *
142
- * Experiments are associated with a project, and two experiments are meant to be easily comparable via
143
- * their `inputs`. You can change the attributes of the experiments in a project (e.g. scoring functions)
144
- * over time, simply by changing what you log.
145
- *
146
- * You should not create `Experiment` objects directly. Instead, use the `braintrust.init()` method.
147
- */
148
- export declare class Experiment {
149
- readonly project: Project;
150
- readonly id: string;
151
- readonly name: string;
152
- readonly user_id: string;
153
- private logger;
154
- constructor(project: Project, id: string, name: string, user_id: string);
155
- /**
156
- * Log a single event to the experiment. The event will be batched and uploaded behind the scenes.
157
- *
158
- * @param event The event to log.
159
- * @param event.inputs The arguments that uniquely define a test case (an arbitrary, JSON serializable object). Later on,
160
- * BrainTrust will use the `inputs` to know whether two test casess are the same between experiments, so they should
161
- * not contain experiment-specific state. A simple rule of thumb is that if you run the same experiment twice, the
162
- * `inputs` should be identical.
163
- * @param event.output The output of your application, including post-processing (an arbitrary, JSON serializable object),
164
- * that allows you to determine whether the result is correct or not. For example, in an app that generates SQL queries,
165
- * the `output` should be the _result_ of the SQL query generated by the model, not the query itself, because there may
166
- * be multiple valid queries that answer a single question.
167
- * @param event.expected The ground truth value (an arbitrary, JSON serializable object) that you'd compare to `output` to
168
- * determine if your `output` value is correct or not. BrainTrust currently does not compare `output` to `expected` for
169
- * you, since there are so many different ways to do that correctly. Instead, these values are just used to help you
170
- * navigate your experiments while digging into analyses. However, we may later use these values to re-score outputs or
171
- * fine-tune your models.
172
- * @param event.scores A dictionary of numeric values (between 0 and 1) to log. The scores should give you a variety of signals
173
- * that help you determine how accurate the outputs are compared to what you expect and diagnose failures. For example, a
174
- * summarization app might have one score that tells you how accurate the summary is, and another that measures the word similarity
175
- * between the generated and grouth truth summary. The word similarity score could help you determine whether the summarization was
176
- * covering similar concepts or not. You can use these scores to help you sort, filter, and compare experiments.
177
- * @param event.metadata (Optional) a dictionary with additional data about the test example, model outputs, or just
178
- * about anything else that's relevant, that you can use to help find and analyze examples later. For example, you could log the
179
- * `prompt`, example's `id`, or anything else that would be useful to slice/dice later. The values in `metadata` can be any
180
- * JSON-serializable type, but its keys must be strings.
181
- * @param event.id (Optional) a unique identifier for the event. If you don't provide one, BrainTrust will generate one for you.
182
- * @returns The `id` of the logged event.
183
- */
184
- log({ inputs, output, expected, scores, metadata, id, }: {
185
- readonly inputs: unknown;
186
- readonly output: unknown;
187
- readonly expected: unknown;
188
- readonly scores: Record<string, number>;
189
- readonly metadata?: Record<string, unknown>;
190
- readonly id?: string;
191
- }): string;
192
- /**
193
- * Summarize the experiment, including the scores (compared to the closest reference experiment) and metadata.
194
- *
195
- * @param options Options for summarizing the experiment.
196
- * @param options.summarizeScores Whether to summarize the scores. If False, only the metadata will be returned.
197
- * @param options.comparisonExperimentId The experiment to compare against. If None, the most recent experiment on the origin's main branch will be used.
198
- * @returns A summary of the experiment, including the scores (compared to the closest reference experiment) and metadata.
199
- */
200
- summarize(options?: {
201
- readonly summarizeScores?: boolean;
202
- readonly comparisonExperimentId?: string;
203
- }): Promise<ExperimentSummary>;
204
- }
205
- /**
206
- * Summary of a score's performance.
207
- * @property name Name of the score.
208
- * @property score Average score across all examples.
209
- * @property diff Difference in score between the current and reference experiment.
210
- * @property improvements Number of improvements in the score.
211
- * @property regressions Number of regressions in the score.
212
- */
213
- export interface ScoreSummary {
214
- name: string;
215
- score: number;
216
- diff: number;
217
- improvements: number;
218
- regressions: number;
219
- }
220
- /**
221
- * Summary of an experiment's scores and metadata.
222
- * @property projectName Name of the project that the experiment belongs to.
223
- * @property experimentName Name of the experiment.
224
- * @property projectUrl URL to the project's page in the BrainTrust app.
225
- * @property experimentUrl URL to the experiment's page in the BrainTrust app.
226
- * @property comparisonExperimentName The experiment scores are baselined against.
227
- * @property scores Summary of the experiment's scores.
228
- */
229
- export interface ExperimentSummary {
230
- projectName: string;
231
- experimentName: string;
232
- projectUrl: string;
233
- experimentUrl: string;
234
- comparisonExperimentName: string | undefined;
235
- scores: Record<string, ScoreSummary> | undefined;
236
- }
35
+ export * from "./logger";
36
+ export { Evaluator, EvalTask, Eval, EvalScorerArgs } from "./framework";