braintrust 0.0.2 → 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api.d.ts +0 -0
- package/dist/api.js +1 -0
- package/dist/dep.d.ts +52 -0
- package/dist/dep.js +56 -0
- package/dist/index.d.ts +167 -7
- package/dist/index.js +587 -14
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/docs/.nojekyll +1 -0
- package/docs/assets/highlight.css +57 -0
- package/docs/assets/main.js +58 -0
- package/docs/assets/search.js +1 -0
- package/docs/assets/style.css +1367 -0
- package/docs/classes/Experiment.html +250 -0
- package/docs/classes/HTTPConnection.html +191 -0
- package/docs/functions/init.html +67 -0
- package/docs/functions/initProject.html +58 -0
- package/docs/functions/log.html +73 -0
- package/docs/functions/login.html +63 -0
- package/docs/functions/summarize.html +63 -0
- package/docs/index.html +111 -0
- package/docs/interfaces/Project.html +87 -0
- package/docs/modules.html +63 -0
- package/package.json +12 -1
package/dist/api.d.ts
ADDED
|
File without changes
|
package/dist/api.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"use strict";
|
package/dist/dep.d.ts
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
export declare class Experiment {
|
|
2
|
+
private _project;
|
|
3
|
+
constructor(project: string);
|
|
4
|
+
get project(): string;
|
|
5
|
+
/**
|
|
6
|
+
* Log a single event to the experiment. The event will be batched and uploaded behind the scenes.
|
|
7
|
+
*
|
|
8
|
+
* @param values
|
|
9
|
+
* @param values.inputs The arguments that uniquely define a test case (an arbitrary, JSON serializable object). Later on,
|
|
10
|
+
* BrainTrust will use the `inputs` to know whether two test casess are the same between experiments, so they should
|
|
11
|
+
* not contain experiment-specific state. A simple rule of thumb is that if you run the same experiment twice, the
|
|
12
|
+
* `inputs` should be identical.
|
|
13
|
+
* @param values.output The output of your application, including post-processing (an arbitrary, JSON serializable object),
|
|
14
|
+
* that allows you to determine whether the result is correct or not. For example, in an app that generates SQL queries,
|
|
15
|
+
* the `output` should be the _result_ of the SQL query generated by the model, not the query itself, because there may
|
|
16
|
+
* be multiple valid queries that answer a single question.
|
|
17
|
+
* @param values.expected The ground truth value (an arbitrary, JSON serializable object) that you'd compare to `output` to
|
|
18
|
+
* determine if your `output` value is correct or not. BrainTrust currently does not compare `output` to `expected` for
|
|
19
|
+
* you, since there are so many different ways to do that correctly. Instead, these values are just used to help you
|
|
20
|
+
* navigate your experiments while digging into analyses. However, we may later use these values to re-score outputs or
|
|
21
|
+
* fine-tune your models.
|
|
22
|
+
* @param values.scores A dictionary of numeric values (between 0 and 1) to log. The scores should give you a variety of signals
|
|
23
|
+
* that help you determine how accurate the outputs are compared to what you expect and diagnose failures. For example, a
|
|
24
|
+
* summarization app might have one score that tells you how accurate the summary is, and another that measures the word similarity
|
|
25
|
+
* between the generated and grouth truth summary. The word similarity score could help you determine whether the summarization was
|
|
26
|
+
* covering similar concepts or not. You can use these scores to help you sort, filter, and compare experiments.
|
|
27
|
+
* @param values.metadata (Optional) a dictionary with additional data about the test example, model outputs, or just
|
|
28
|
+
* about anything else that's relevant, that you can use to help find and analyze examples later. For example, you could log the
|
|
29
|
+
* `prompt`, example's `id`, or anything else that would be useful to slice/dice later. The values in `metadata` can be any
|
|
30
|
+
* JSON-serializable type, but its keys must be strings.
|
|
31
|
+
* @returns The `id` of the logged event.
|
|
32
|
+
*/
|
|
33
|
+
log({ inputs, output, expected, scores, metadata, }: {
|
|
34
|
+
readonly inputs: unknown;
|
|
35
|
+
readonly output: unknown;
|
|
36
|
+
readonly expected: unknown;
|
|
37
|
+
readonly scores: Record<string, number>;
|
|
38
|
+
readonly metadata?: Record<string, unknown>;
|
|
39
|
+
}): string;
|
|
40
|
+
/**
|
|
41
|
+
* Summarize the experiment, including the scores (compared to the closest reference experiment) and metadata.
|
|
42
|
+
*
|
|
43
|
+
* @param options
|
|
44
|
+
* @param summarize_scores Whether to summarize the scores. If False, only the metadata will be returned.
|
|
45
|
+
* @param comparison_experiment_id The experiment to compare against. If None, the most recent experiment on the origin's main branch will be used.
|
|
46
|
+
* @returns `ExperimentSummary`
|
|
47
|
+
*/
|
|
48
|
+
summarize(options?: {
|
|
49
|
+
readonly summarizeScores?: boolean;
|
|
50
|
+
readonly comparisonExperimentId?: string;
|
|
51
|
+
} | undefined): string;
|
|
52
|
+
}
|
package/dist/dep.js
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Experiment = void 0;
|
|
4
|
+
class Experiment {
|
|
5
|
+
constructor(project) {
|
|
6
|
+
this._project = project;
|
|
7
|
+
}
|
|
8
|
+
get project() {
|
|
9
|
+
return this._project;
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Log a single event to the experiment. The event will be batched and uploaded behind the scenes.
|
|
13
|
+
*
|
|
14
|
+
* @param values
|
|
15
|
+
* @param values.inputs The arguments that uniquely define a test case (an arbitrary, JSON serializable object). Later on,
|
|
16
|
+
* BrainTrust will use the `inputs` to know whether two test casess are the same between experiments, so they should
|
|
17
|
+
* not contain experiment-specific state. A simple rule of thumb is that if you run the same experiment twice, the
|
|
18
|
+
* `inputs` should be identical.
|
|
19
|
+
* @param values.output The output of your application, including post-processing (an arbitrary, JSON serializable object),
|
|
20
|
+
* that allows you to determine whether the result is correct or not. For example, in an app that generates SQL queries,
|
|
21
|
+
* the `output` should be the _result_ of the SQL query generated by the model, not the query itself, because there may
|
|
22
|
+
* be multiple valid queries that answer a single question.
|
|
23
|
+
* @param values.expected The ground truth value (an arbitrary, JSON serializable object) that you'd compare to `output` to
|
|
24
|
+
* determine if your `output` value is correct or not. BrainTrust currently does not compare `output` to `expected` for
|
|
25
|
+
* you, since there are so many different ways to do that correctly. Instead, these values are just used to help you
|
|
26
|
+
* navigate your experiments while digging into analyses. However, we may later use these values to re-score outputs or
|
|
27
|
+
* fine-tune your models.
|
|
28
|
+
* @param values.scores A dictionary of numeric values (between 0 and 1) to log. The scores should give you a variety of signals
|
|
29
|
+
* that help you determine how accurate the outputs are compared to what you expect and diagnose failures. For example, a
|
|
30
|
+
* summarization app might have one score that tells you how accurate the summary is, and another that measures the word similarity
|
|
31
|
+
* between the generated and grouth truth summary. The word similarity score could help you determine whether the summarization was
|
|
32
|
+
* covering similar concepts or not. You can use these scores to help you sort, filter, and compare experiments.
|
|
33
|
+
* @param values.metadata (Optional) a dictionary with additional data about the test example, model outputs, or just
|
|
34
|
+
* about anything else that's relevant, that you can use to help find and analyze examples later. For example, you could log the
|
|
35
|
+
* `prompt`, example's `id`, or anything else that would be useful to slice/dice later. The values in `metadata` can be any
|
|
36
|
+
* JSON-serializable type, but its keys must be strings.
|
|
37
|
+
* @returns The `id` of the logged event.
|
|
38
|
+
*/
|
|
39
|
+
log({ inputs, output, expected, scores, metadata, }) {
|
|
40
|
+
// TODO
|
|
41
|
+
(() => ({ inputs, output, expected, scores, metadata }))();
|
|
42
|
+
return "foo";
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Summarize the experiment, including the scores (compared to the closest reference experiment) and metadata.
|
|
46
|
+
*
|
|
47
|
+
* @param options
|
|
48
|
+
* @param summarize_scores Whether to summarize the scores. If False, only the metadata will be returned.
|
|
49
|
+
* @param comparison_experiment_id The experiment to compare against. If None, the most recent experiment on the origin's main branch will be used.
|
|
50
|
+
* @returns `ExperimentSummary`
|
|
51
|
+
*/
|
|
52
|
+
summarize(options = undefined) {
|
|
53
|
+
return "Summary!";
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
exports.Experiment = Experiment;
|
package/dist/index.d.ts
CHANGED
|
@@ -1,8 +1,48 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* A Node.js library for logging data to BrainTrust.
|
|
3
|
+
*
|
|
4
|
+
* ### Quickstart
|
|
5
|
+
*
|
|
6
|
+
* Install the library with npm (or yarn).
|
|
7
|
+
*
|
|
8
|
+
* ```bash
|
|
9
|
+
* npm install braintrust
|
|
10
|
+
* ```
|
|
11
|
+
*
|
|
12
|
+
* Then, run a simple experiment with the following code (replace `YOUR_API_KEY` with
|
|
13
|
+
* your BrainTrust API key):
|
|
14
|
+
*
|
|
15
|
+
* ```javascript
|
|
16
|
+
* const braintrust = require("braintrust");
|
|
17
|
+
*
|
|
18
|
+
* const experiment = await braintrust.init("NodeTest", {api_key: "YOUR_API_KEY"});
|
|
19
|
+
* experiment.log({
|
|
20
|
+
* inputs: {test: 1},
|
|
21
|
+
* output: "foo",
|
|
22
|
+
* expected: "bar",
|
|
23
|
+
* scores: {
|
|
24
|
+
* n: 0.5,
|
|
25
|
+
* },
|
|
26
|
+
* metadata: {
|
|
27
|
+
* id: 1,
|
|
28
|
+
* },
|
|
29
|
+
* });
|
|
30
|
+
* console.log(await experiment.summarize());
|
|
31
|
+
* ```
|
|
32
|
+
*
|
|
33
|
+
* @module braintrust
|
|
34
|
+
*/
|
|
35
|
+
export declare class Project {
|
|
36
|
+
name: string;
|
|
37
|
+
id: string;
|
|
38
|
+
org_id: string;
|
|
39
|
+
constructor(name: string, id: string, org_id: string);
|
|
40
|
+
}
|
|
1
41
|
/**
|
|
2
42
|
* Log in, and then initialize a new experiment in a specified project. If the project does not exist, it will be created.
|
|
3
43
|
*
|
|
4
44
|
* @param project The name of the project to create the experiment in.
|
|
5
|
-
* @param options
|
|
45
|
+
* @param options Additional options for configuring init().
|
|
6
46
|
* @param options.experiment The name of the experiment to create. If not specified, a name will be generated automatically.
|
|
7
47
|
* @param options.description An optional description of the experiment.
|
|
8
48
|
* @param options.base_experiment An optional experiment name to use as a base. If specified, the new experiment will be summarized and compared to this
|
|
@@ -14,7 +54,7 @@
|
|
|
14
54
|
* @param options.disable_cache Do not use cached login information.
|
|
15
55
|
* @returns The experiment object.
|
|
16
56
|
*/
|
|
17
|
-
export declare function init(project: string,
|
|
57
|
+
export declare function init(project: string, options?: {
|
|
18
58
|
readonly experiment?: string;
|
|
19
59
|
readonly description?: string;
|
|
20
60
|
readonly base_experiment?: string;
|
|
@@ -22,11 +62,98 @@ export declare function init(project: string, { experiment, }: {
|
|
|
22
62
|
readonly api_key?: string;
|
|
23
63
|
readonly org_name?: string;
|
|
24
64
|
readonly disable_cache?: boolean;
|
|
25
|
-
}): Experiment
|
|
65
|
+
}): Promise<Experiment>;
|
|
66
|
+
/**
|
|
67
|
+
* Log into BrainTrust. This will prompt you for your API token, which you can find at
|
|
68
|
+
* https://www.braintrustdata.com/app/token. This method is called automatically by `init()`.
|
|
69
|
+
*
|
|
70
|
+
* @param options
|
|
71
|
+
* @param options.api_url The URL of the BrainTrust API. Defaults to https://www.braintrustdata.com.
|
|
72
|
+
* @param options.api_key The API key to use. If the parameter is not specified, will try to use the `BRAINTRUST_API_KEY` environment variable. If no API
|
|
73
|
+
* key is specified, will prompt the user to login.
|
|
74
|
+
* @param options.org_name (Optional) The name of a specific organization to connect to. This is useful if you belong to multiple.
|
|
75
|
+
* @param options.disable_cache Do not use cached login information.
|
|
76
|
+
* @param options.force_login Login again, even if you have already logged in (by default, this function will exit quickly if you have already logged in)
|
|
77
|
+
* @returns
|
|
78
|
+
*/
|
|
79
|
+
export declare function login(options?: {
|
|
80
|
+
api_url?: string;
|
|
81
|
+
api_key?: string;
|
|
82
|
+
org_name?: string;
|
|
83
|
+
disable_cache?: boolean;
|
|
84
|
+
force_login?: boolean;
|
|
85
|
+
} | undefined): Promise<void>;
|
|
86
|
+
/**
|
|
87
|
+
* Log a single event to the current experiment. The event will be batched and uploaded behind the scenes.
|
|
88
|
+
*
|
|
89
|
+
* @param values
|
|
90
|
+
* @param values.inputs The arguments that uniquely define a test case (an arbitrary, JSON serializable object). Later on,
|
|
91
|
+
* BrainTrust will use the `inputs` to know whether two test casess are the same between experiments, so they should
|
|
92
|
+
* not contain experiment-specific state. A simple rule of thumb is that if you run the same experiment twice, the
|
|
93
|
+
* `inputs` should be identical.
|
|
94
|
+
* @param values.output The output of your application, including post-processing (an arbitrary, JSON serializable object),
|
|
95
|
+
* that allows you to determine whether the result is correct or not. For example, in an app that generates SQL queries,
|
|
96
|
+
* the `output` should be the _result_ of the SQL query generated by the model, not the query itself, because there may
|
|
97
|
+
* be multiple valid queries that answer a single question.
|
|
98
|
+
* @param values.expected The ground truth value (an arbitrary, JSON serializable object) that you'd compare to `output` to
|
|
99
|
+
* determine if your `output` value is correct or not. BrainTrust currently does not compare `output` to `expected` for
|
|
100
|
+
* you, since there are so many different ways to do that correctly. Instead, these values are just used to help you
|
|
101
|
+
* navigate your experiments while digging into analyses. However, we may later use these values to re-score outputs or
|
|
102
|
+
* fine-tune your models.
|
|
103
|
+
* @param values.scores A dictionary of numeric values (between 0 and 1) to log. The scores should give you a variety of signals
|
|
104
|
+
* that help you determine how accurate the outputs are compared to what you expect and diagnose failures. For example, a
|
|
105
|
+
* summarization app might have one score that tells you how accurate the summary is, and another that measures the word similarity
|
|
106
|
+
* between the generated and grouth truth summary. The word similarity score could help you determine whether the summarization was
|
|
107
|
+
* covering similar concepts or not. You can use these scores to help you sort, filter, and compare experiments.
|
|
108
|
+
* @param values.metadata (Optional) a dictionary with additional data about the test example, model outputs, or just
|
|
109
|
+
* about anything else that's relevant, that you can use to help find and analyze examples later. For example, you could log the
|
|
110
|
+
* `prompt`, example's `id`, or anything else that would be useful to slice/dice later. The values in `metadata` can be any
|
|
111
|
+
* JSON-serializable type, but its keys must be strings.
|
|
112
|
+
* @returns The `id` of the logged event.
|
|
113
|
+
*/
|
|
114
|
+
export declare function log(options: {
|
|
115
|
+
readonly inputs: unknown;
|
|
116
|
+
readonly output: unknown;
|
|
117
|
+
readonly expected: unknown;
|
|
118
|
+
readonly scores: Record<string, number>;
|
|
119
|
+
readonly metadata?: Record<string, unknown>;
|
|
120
|
+
}): string;
|
|
121
|
+
/**
|
|
122
|
+
* Summarize the current experiment, including the scores (compared to the closest reference experiment) and metadata.
|
|
123
|
+
*
|
|
124
|
+
* @param options
|
|
125
|
+
* @param summarize_scores Whether to summarize the scores. If False, only the metadata will be returned.
|
|
126
|
+
* @param comparison_experiment_id The experiment to compare against. If None, the most recent experiment on the origin's main branch will be used.
|
|
127
|
+
* @returns `ExperimentSummary`
|
|
128
|
+
*/
|
|
129
|
+
export declare function summarize(options?: {
|
|
130
|
+
readonly summarizeScores?: boolean;
|
|
131
|
+
readonly comparisonExperimentId?: string;
|
|
132
|
+
} | undefined): Promise<ExperimentSummary>;
|
|
133
|
+
/**
|
|
134
|
+
* An experiment is a collection of logged events, such as model inputs and outputs, which represent
|
|
135
|
+
* a snapshot of your application at a particular point in time. An experiment is meant to capture more
|
|
136
|
+
* than just the model you use, and includes the data you use to test, pre- and post- processing code,
|
|
137
|
+
* comparison metrics (scores), and any other metadata you want to include.
|
|
138
|
+
*
|
|
139
|
+
* Experiments are associated with a project, and two experiments are meant to be easily comparable via
|
|
140
|
+
* their `inputs`. You can change the attributes of the experiments in a project (e.g. scoring functions)
|
|
141
|
+
* over time, simply by changing what you log.
|
|
142
|
+
*
|
|
143
|
+
* You should not create `Experiment` objects directly. Instead, use the `braintrust.init()` method.
|
|
144
|
+
*/
|
|
26
145
|
export declare class Experiment {
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
146
|
+
readonly project: Project;
|
|
147
|
+
readonly id: string;
|
|
148
|
+
readonly name: string;
|
|
149
|
+
readonly user_id: string;
|
|
150
|
+
private logger;
|
|
151
|
+
constructor(project: Project, id: string, name: string, user_id: string);
|
|
152
|
+
static init(project: Project, { name, description, base_experiment, }?: {
|
|
153
|
+
name?: string;
|
|
154
|
+
description?: string;
|
|
155
|
+
base_experiment?: string;
|
|
156
|
+
}): Promise<Experiment>;
|
|
30
157
|
/**
|
|
31
158
|
* Log a single event to the experiment. The event will be batched and uploaded behind the scenes.
|
|
32
159
|
*
|
|
@@ -73,5 +200,38 @@ export declare class Experiment {
|
|
|
73
200
|
summarize(options?: {
|
|
74
201
|
readonly summarizeScores?: boolean;
|
|
75
202
|
readonly comparisonExperimentId?: string;
|
|
76
|
-
} | undefined):
|
|
203
|
+
} | undefined): Promise<ExperimentSummary>;
|
|
204
|
+
}
|
|
205
|
+
/**
|
|
206
|
+
* Summary of a score's performance.
|
|
207
|
+
* @property name Name of the score.
|
|
208
|
+
* @property score Average score across all examples.
|
|
209
|
+
* @property diff Difference in score between the current and reference experiment.
|
|
210
|
+
* @property improvements Number of improvements in the score.
|
|
211
|
+
* @property regressions Number of regressions in the score.
|
|
212
|
+
*/
|
|
213
|
+
interface ScoreSummary {
|
|
214
|
+
name: string;
|
|
215
|
+
score: number;
|
|
216
|
+
diff: number;
|
|
217
|
+
improvements: number;
|
|
218
|
+
regressions: number;
|
|
219
|
+
}
|
|
220
|
+
/**
|
|
221
|
+
* Summary of an experiment's scores and metadata.
|
|
222
|
+
* @property projectName Name of the project that the experiment belongs to.
|
|
223
|
+
* @property experimentName Name of the experiment.
|
|
224
|
+
* @property projectUrl URL to the project's page in the BrainTrust app.
|
|
225
|
+
* @property experimentUrl URL to the experiment's page in the BrainTrust app.
|
|
226
|
+
* @property comparisonExperimentName The experiment scores are baselined against.
|
|
227
|
+
* @property scores Summary of the experiment's scores.
|
|
228
|
+
*/
|
|
229
|
+
interface ExperimentSummary {
|
|
230
|
+
projectName: string;
|
|
231
|
+
experimentName: string;
|
|
232
|
+
projectUrl: string;
|
|
233
|
+
experimentUrl: string;
|
|
234
|
+
comparisonExperimentName: string | undefined;
|
|
235
|
+
scores: Record<string, ScoreSummary> | undefined;
|
|
77
236
|
}
|
|
237
|
+
export {};
|