braintrust 0.0.20 → 0.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -32,199 +32,5 @@
32
32
  *
33
33
  * @module braintrust
34
34
  */
35
- export declare class Project {
36
- name: string;
37
- id: string;
38
- org_id: string;
39
- constructor(name: string, id: string, org_id: string);
40
- }
41
- /**
42
- * Log in, and then initialize a new experiment in a specified project. If the project does not exist, it will be created.
43
- *
44
- * @param project The name of the project to create the experiment in.
45
- * @param options Additional options for configuring init().
46
- * @param options.experiment The name of the experiment to create. If not specified, a name will be generated automatically.
47
- * @param options.description An optional description of the experiment.
48
- * @param options.baseExperiment An optional experiment name to use as a base. If specified, the new experiment will be summarized and compared to this
49
- * experiment. Otherwise, it will pick an experiment by finding the closest ancestor on the default (e.g. main) branch.
50
- * @param options.apiUrl The URL of the BrainTrust API. Defaults to https://www.braintrustdata.com.
51
- * @param options.apiKey The API key to use. If the parameter is not specified, will try to use the `BRAINTRUST_API_KEY` environment variable. If no API
52
- * key is specified, will prompt the user to login.
53
- * @param options.orgName (Optional) The name of a specific organization to connect to. This is useful if you belong to multiple.
54
- * @param options.disableCache Do not use cached login information.
55
- * @returns The newly created Experiment.
56
- */
57
- export declare function init(project: string, options?: {
58
- readonly experiment?: string;
59
- readonly description?: string;
60
- readonly baseExperiment?: string;
61
- readonly apiUrl?: string;
62
- readonly apiKey?: string;
63
- readonly orgName?: string;
64
- readonly disableCache?: boolean;
65
- }): Promise<Experiment>;
66
- /**
67
- * Log into BrainTrust. This will prompt you for your API token, which you can find at
68
- * https://www.braintrustdata.com/app/token. This method is called automatically by `init()`.
69
- *
70
- * @param options Options for configuring login().
71
- * @param options.apiUrl The URL of the BrainTrust API. Defaults to https://www.braintrustdata.com.
72
- * @param options.apiKey The API key to use. If the parameter is not specified, will try to use the `BRAINTRUST_API_KEY` environment variable. If no API
73
- * key is specified, will prompt the user to login.
74
- * @param options.orgName (Optional) The name of a specific organization to connect to. This is useful if you belong to multiple.
75
- * @param options.disableCache Do not use cached login information.
76
- * @param options.forceLogin Login again, even if you have already logged in (by default, this function will exit quickly if you have already logged in)
77
- */
78
- export declare function login(options?: {
79
- apiUrl?: string;
80
- apiKey?: string;
81
- orgName?: string;
82
- disableCache?: boolean;
83
- forceLogin?: boolean;
84
- }): Promise<void>;
85
- /**
86
- * Log a single event to the current experiment. The event will be batched and uploaded behind the scenes.
87
- *
88
- * @param event The event to log.
89
- * @param event.inputs The arguments that uniquely define a test case (an arbitrary, JSON serializable object). Later on,
90
- * BrainTrust will use the `inputs` to know whether two test casess are the same between experiments, so they should
91
- * not contain experiment-specific state. A simple rule of thumb is that if you run the same experiment twice, the
92
- * `inputs` should be identical.
93
- * @param event.output The output of your application, including post-processing (an arbitrary, JSON serializable object),
94
- * that allows you to determine whether the result is correct or not. For example, in an app that generates SQL queries,
95
- * the `output` should be the _result_ of the SQL query generated by the model, not the query itself, because there may
96
- * be multiple valid queries that answer a single question.
97
- * @param event.expected The ground truth value (an arbitrary, JSON serializable object) that you'd compare to `output` to
98
- * determine if your `output` value is correct or not. BrainTrust currently does not compare `output` to `expected` for
99
- * you, since there are so many different ways to do that correctly. Instead, these values are just used to help you
100
- * navigate your experiments while digging into analyses. However, we may later use these values to re-score outputs or
101
- * fine-tune your models.
102
- * @param event.scores A dictionary of numeric values (between 0 and 1) to log. The scores should give you a variety of signals
103
- * that help you determine how accurate the outputs are compared to what you expect and diagnose failures. For example, a
104
- * summarization app might have one score that tells you how accurate the summary is, and another that measures the word similarity
105
- * between the generated and grouth truth summary. The word similarity score could help you determine whether the summarization was
106
- * covering similar concepts or not. You can use these scores to help you sort, filter, and compare experiments.
107
- * @param event.metadata (Optional) a dictionary with additional data about the test example, model outputs, or just
108
- * about anything else that's relevant, that you can use to help find and analyze examples later. For example, you could log the
109
- * `prompt`, example's `id`, or anything else that would be useful to slice/dice later. The values in `metadata` can be any
110
- * JSON-serializable type, but its keys must be strings.
111
- * @returns The `id` of the logged event.
112
- */
113
- export declare function log(options: {
114
- readonly inputs: unknown;
115
- readonly output: unknown;
116
- readonly expected: unknown;
117
- readonly scores: Record<string, number>;
118
- readonly metadata?: Record<string, unknown>;
119
- }): string;
120
- /**
121
- * Summarize the current experiment, including the scores (compared to the closest reference experiment) and metadata.
122
- *
123
- * @param options Options for summarizing the experiment.
124
- * @param options.summarizeScores Whether to summarize the scores. If False, only the metadata will be returned.
125
- * @param options.comparisonExperimentId The experiment to compare against. If None, the most recent experiment on the origin's main branch will be used.
126
- * @returns A summary of the experiment, including the scores (compared to the closest reference experiment) and metadata.
127
- */
128
- export declare function summarize(options?: {
129
- readonly summarizeScores?: boolean;
130
- readonly comparisonExperimentId?: string;
131
- }): Promise<ExperimentSummary>;
132
- /**
133
- * An experiment is a collection of logged events, such as model inputs and outputs, which represent
134
- * a snapshot of your application at a particular point in time. An experiment is meant to capture more
135
- * than just the model you use, and includes the data you use to test, pre- and post- processing code,
136
- * comparison metrics (scores), and any other metadata you want to include.
137
- *
138
- * Experiments are associated with a project, and two experiments are meant to be easily comparable via
139
- * their `inputs`. You can change the attributes of the experiments in a project (e.g. scoring functions)
140
- * over time, simply by changing what you log.
141
- *
142
- * You should not create `Experiment` objects directly. Instead, use the `braintrust.init()` method.
143
- */
144
- export declare class Experiment {
145
- readonly project: Project;
146
- readonly id: string;
147
- readonly name: string;
148
- readonly user_id: string;
149
- private logger;
150
- constructor(project: Project, id: string, name: string, user_id: string);
151
- /**
152
- * Log a single event to the experiment. The event will be batched and uploaded behind the scenes.
153
- *
154
- * @param event The event to log.
155
- * @param event.inputs The arguments that uniquely define a test case (an arbitrary, JSON serializable object). Later on,
156
- * BrainTrust will use the `inputs` to know whether two test casess are the same between experiments, so they should
157
- * not contain experiment-specific state. A simple rule of thumb is that if you run the same experiment twice, the
158
- * `inputs` should be identical.
159
- * @param event.output The output of your application, including post-processing (an arbitrary, JSON serializable object),
160
- * that allows you to determine whether the result is correct or not. For example, in an app that generates SQL queries,
161
- * the `output` should be the _result_ of the SQL query generated by the model, not the query itself, because there may
162
- * be multiple valid queries that answer a single question.
163
- * @param event.expected The ground truth value (an arbitrary, JSON serializable object) that you'd compare to `output` to
164
- * determine if your `output` value is correct or not. BrainTrust currently does not compare `output` to `expected` for
165
- * you, since there are so many different ways to do that correctly. Instead, these values are just used to help you
166
- * navigate your experiments while digging into analyses. However, we may later use these values to re-score outputs or
167
- * fine-tune your models.
168
- * @param event.scores A dictionary of numeric values (between 0 and 1) to log. The scores should give you a variety of signals
169
- * that help you determine how accurate the outputs are compared to what you expect and diagnose failures. For example, a
170
- * summarization app might have one score that tells you how accurate the summary is, and another that measures the word similarity
171
- * between the generated and grouth truth summary. The word similarity score could help you determine whether the summarization was
172
- * covering similar concepts or not. You can use these scores to help you sort, filter, and compare experiments.
173
- * @param event.metadata (Optional) a dictionary with additional data about the test example, model outputs, or just
174
- * about anything else that's relevant, that you can use to help find and analyze examples later. For example, you could log the
175
- * `prompt`, example's `id`, or anything else that would be useful to slice/dice later. The values in `metadata` can be any
176
- * JSON-serializable type, but its keys must be strings.
177
- * @returns The `id` of the logged event.
178
- */
179
- log({ inputs, output, expected, scores, metadata, }: {
180
- readonly inputs: unknown;
181
- readonly output: unknown;
182
- readonly expected: unknown;
183
- readonly scores: Record<string, number>;
184
- readonly metadata?: Record<string, unknown>;
185
- }): string;
186
- /**
187
- * Summarize the experiment, including the scores (compared to the closest reference experiment) and metadata.
188
- *
189
- * @param options Options for summarizing the experiment.
190
- * @param options.summarizeScores Whether to summarize the scores. If False, only the metadata will be returned.
191
- * @param options.comparisonExperimentId The experiment to compare against. If None, the most recent experiment on the origin's main branch will be used.
192
- * @returns A summary of the experiment, including the scores (compared to the closest reference experiment) and metadata.
193
- */
194
- summarize(options?: {
195
- readonly summarizeScores?: boolean;
196
- readonly comparisonExperimentId?: string;
197
- }): Promise<ExperimentSummary>;
198
- }
199
- /**
200
- * Summary of a score's performance.
201
- * @property name Name of the score.
202
- * @property score Average score across all examples.
203
- * @property diff Difference in score between the current and reference experiment.
204
- * @property improvements Number of improvements in the score.
205
- * @property regressions Number of regressions in the score.
206
- */
207
- export interface ScoreSummary {
208
- name: string;
209
- score: number;
210
- diff: number;
211
- improvements: number;
212
- regressions: number;
213
- }
214
- /**
215
- * Summary of an experiment's scores and metadata.
216
- * @property projectName Name of the project that the experiment belongs to.
217
- * @property experimentName Name of the experiment.
218
- * @property projectUrl URL to the project's page in the BrainTrust app.
219
- * @property experimentUrl URL to the experiment's page in the BrainTrust app.
220
- * @property comparisonExperimentName The experiment scores are baselined against.
221
- * @property scores Summary of the experiment's scores.
222
- */
223
- export interface ExperimentSummary {
224
- projectName: string;
225
- experimentName: string;
226
- projectUrl: string;
227
- experimentUrl: string;
228
- comparisonExperimentName: string | undefined;
229
- scores: Record<string, ScoreSummary> | undefined;
230
- }
35
+ export * from "./logger";
36
+ export { Evaluator, EvalTask, Eval, EvalScorerArgs } from "./framework";