langchain 0.1.5 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,69 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ProgressBar = void 0;
4
+ class ProgressBar {
5
+ constructor(props) {
6
+ Object.defineProperty(this, "total", {
7
+ enumerable: true,
8
+ configurable: true,
9
+ writable: true,
10
+ value: void 0
11
+ });
12
+ Object.defineProperty(this, "current", {
13
+ enumerable: true,
14
+ configurable: true,
15
+ writable: true,
16
+ value: void 0
17
+ });
18
+ Object.defineProperty(this, "barLength", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: void 0
23
+ });
24
+ Object.defineProperty(this, "format", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: void 0
29
+ });
30
+ const { total, format, barLength } = props;
31
+ this.total = total;
32
+ this.current = 0;
33
+ this.barLength = barLength ?? 40;
34
+ this.format = format || "{bar} {percentage}% | {value}/{total}";
35
+ }
36
+ initialize() {
37
+ this.update({ current: 0 });
38
+ }
39
+ update({ current, formatArgs, }) {
40
+ this.current = current;
41
+ const ratio = this.current / this.total;
42
+ const filledBarLength = Math.round(ratio * this.barLength);
43
+ const emptyBarLength = this.barLength - filledBarLength;
44
+ const filledBar = "▓".repeat(filledBarLength);
45
+ const emptyBar = "░".repeat(emptyBarLength);
46
+ const percentage = (ratio * 100).toFixed(2);
47
+ let formattedString = this.format
48
+ .replace("{bar}", `${filledBar}${emptyBar}`)
49
+ .replace("{percentage}", percentage)
50
+ .replace("{value}", this.current.toString())
51
+ .replace("{total}", this.total.toString());
52
+ if (formatArgs) {
53
+ for (const key in formatArgs) {
54
+ if (Object.prototype.hasOwnProperty.call(formatArgs, key)) {
55
+ formattedString = formattedString.replace(`{${key}}`, formatArgs[key].toString());
56
+ }
57
+ }
58
+ }
59
+ console.log(formattedString);
60
+ }
61
+ increment({ formatArgs, } = {}) {
62
+ this.update({ current: this.current + 1, formatArgs });
63
+ }
64
+ complete({ formatArgs } = {}) {
65
+ this.update({ current: this.total, formatArgs });
66
+ console.log("\nCompleted");
67
+ }
68
+ }
69
+ exports.ProgressBar = ProgressBar;
@@ -0,0 +1,22 @@
1
+ export declare class ProgressBar {
2
+ total: number;
3
+ current: number;
4
+ barLength: number;
5
+ format: string;
6
+ constructor(props: {
7
+ total: number;
8
+ format?: string;
9
+ barLength?: number;
10
+ });
11
+ initialize(): void;
12
+ update({ current, formatArgs, }: {
13
+ current: number;
14
+ formatArgs?: Record<string, string>;
15
+ }): void;
16
+ increment({ formatArgs, }?: {
17
+ formatArgs?: Record<string, string>;
18
+ }): void;
19
+ complete({ formatArgs }?: {
20
+ formatArgs?: Record<string, string>;
21
+ }): void;
22
+ }
@@ -0,0 +1,65 @@
1
+ export class ProgressBar {
2
+ constructor(props) {
3
+ Object.defineProperty(this, "total", {
4
+ enumerable: true,
5
+ configurable: true,
6
+ writable: true,
7
+ value: void 0
8
+ });
9
+ Object.defineProperty(this, "current", {
10
+ enumerable: true,
11
+ configurable: true,
12
+ writable: true,
13
+ value: void 0
14
+ });
15
+ Object.defineProperty(this, "barLength", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: void 0
20
+ });
21
+ Object.defineProperty(this, "format", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: void 0
26
+ });
27
+ const { total, format, barLength } = props;
28
+ this.total = total;
29
+ this.current = 0;
30
+ this.barLength = barLength ?? 40;
31
+ this.format = format || "{bar} {percentage}% | {value}/{total}";
32
+ }
33
+ initialize() {
34
+ this.update({ current: 0 });
35
+ }
36
+ update({ current, formatArgs, }) {
37
+ this.current = current;
38
+ const ratio = this.current / this.total;
39
+ const filledBarLength = Math.round(ratio * this.barLength);
40
+ const emptyBarLength = this.barLength - filledBarLength;
41
+ const filledBar = "▓".repeat(filledBarLength);
42
+ const emptyBar = "░".repeat(emptyBarLength);
43
+ const percentage = (ratio * 100).toFixed(2);
44
+ let formattedString = this.format
45
+ .replace("{bar}", `${filledBar}${emptyBar}`)
46
+ .replace("{percentage}", percentage)
47
+ .replace("{value}", this.current.toString())
48
+ .replace("{total}", this.total.toString());
49
+ if (formatArgs) {
50
+ for (const key in formatArgs) {
51
+ if (Object.prototype.hasOwnProperty.call(formatArgs, key)) {
52
+ formattedString = formattedString.replace(`{${key}}`, formatArgs[key].toString());
53
+ }
54
+ }
55
+ }
56
+ console.log(formattedString);
57
+ }
58
+ increment({ formatArgs, } = {}) {
59
+ this.update({ current: this.current + 1, formatArgs });
60
+ }
61
+ complete({ formatArgs } = {}) {
62
+ this.update({ current: this.total, formatArgs });
63
+ console.log("\nCompleted");
64
+ }
65
+ }
@@ -0,0 +1,353 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.runOnDataset = void 0;
4
+ const messages_1 = require("@langchain/core/messages");
5
+ const runnables_1 = require("@langchain/core/runnables");
6
+ const run_collector_1 = require("@langchain/core/tracers/run_collector");
7
+ const tracer_langchain_1 = require("@langchain/core/tracers/tracer_langchain");
8
+ const langsmith_1 = require("langsmith");
9
+ const loader_js_1 = require("../evaluation/loader.cjs");
10
+ const name_generation_js_1 = require("./name_generation.cjs");
11
+ const progress_js_1 = require("./progress.cjs");
12
+ /**
13
+ * Wraps an evaluator function + implements the RunEvaluator interface.
14
+ */
15
+ class DynamicRunEvaluator {
16
+ constructor(evaluator) {
17
+ Object.defineProperty(this, "evaluator", {
18
+ enumerable: true,
19
+ configurable: true,
20
+ writable: true,
21
+ value: void 0
22
+ });
23
+ this.evaluator = new runnables_1.RunnableLambda({ func: evaluator });
24
+ }
25
+ /**
26
+ * Evaluates a run with an optional example and returns the evaluation result.
27
+ * @param run The run to evaluate.
28
+ * @param example The optional example to use for evaluation.
29
+ * @returns A promise that resolves to the evaluation result.
30
+ */
31
+ async evaluateRun(run, example) {
32
+ return await this.evaluator.invoke({ run, example });
33
+ }
34
+ }
35
+ function isLLMStringEvaluator(evaluator) {
36
+ return evaluator && typeof evaluator.evaluateStrings === "function";
37
+ }
38
+ /**
39
+ * Wraps an off-the-shelf evaluator (loaded using loadEvaluator; of EvaluatorType[T])
40
+ * and composes with a prepareData function so the user can prepare the trace and
41
+ * dataset data for the evaluator.
42
+ */
43
+ class PreparedRunEvaluator {
44
+ constructor(evaluator, evaluationName, formatEvaluatorInputs) {
45
+ Object.defineProperty(this, "evaluator", {
46
+ enumerable: true,
47
+ configurable: true,
48
+ writable: true,
49
+ value: void 0
50
+ });
51
+ Object.defineProperty(this, "formatEvaluatorInputs", {
52
+ enumerable: true,
53
+ configurable: true,
54
+ writable: true,
55
+ value: void 0
56
+ });
57
+ Object.defineProperty(this, "isStringEvaluator", {
58
+ enumerable: true,
59
+ configurable: true,
60
+ writable: true,
61
+ value: void 0
62
+ });
63
+ Object.defineProperty(this, "evaluationName", {
64
+ enumerable: true,
65
+ configurable: true,
66
+ writable: true,
67
+ value: void 0
68
+ });
69
+ this.evaluator = evaluator;
70
+ this.isStringEvaluator = typeof evaluator?.evaluateStrings === "function";
71
+ this.evaluationName = evaluationName;
72
+ this.formatEvaluatorInputs = formatEvaluatorInputs;
73
+ }
74
+ static async fromEvalConfig(config) {
75
+ const evaluatorType = typeof config === "string" ? config : config.evaluatorType;
76
+ const evalConfig = typeof config === "string" ? {} : config;
77
+ const evaluator = await (0, loader_js_1.loadEvaluator)(evaluatorType, evalConfig);
78
+ const feedbackKey = evalConfig?.feedbackKey ?? evaluator?.evaluationName;
79
+ if (!feedbackKey) {
80
+ throw new Error(`Evaluator of type ${evaluatorType} must have an evaluationName` +
81
+ ` or feedbackKey. Please manually provide a feedbackKey in the EvalConfig.`);
82
+ }
83
+ if (!isLLMStringEvaluator(evaluator)) {
84
+ throw new Error(`Evaluator of type ${evaluatorType} not yet supported. ` +
85
+ "Please use a string evaluator, or implement your " +
86
+ "evaluation logic as a customEvaluator.");
87
+ }
88
+ return new PreparedRunEvaluator(evaluator, feedbackKey, evalConfig?.formatEvaluatorInputs);
89
+ }
90
+ /**
91
+ * Evaluates a run with an optional example and returns the evaluation result.
92
+ * @param run The run to evaluate.
93
+ * @param example The optional example to use for evaluation.
94
+ * @returns A promise that resolves to the evaluation result.
95
+ */
96
+ async evaluateRun(run, example) {
97
+ const { prediction, input, reference } = this.formatEvaluatorInputs({
98
+ rawInput: run.inputs,
99
+ rawPrediction: run.outputs,
100
+ rawReferenceOutput: example?.outputs,
101
+ run,
102
+ });
103
+ if (this.isStringEvaluator) {
104
+ const evalResult = await this.evaluator.evaluateStrings({
105
+ prediction: prediction,
106
+ reference: reference,
107
+ input: input,
108
+ });
109
+ return {
110
+ key: this.evaluationName,
111
+ comment: evalResult?.reasoning,
112
+ ...evalResult,
113
+ };
114
+ }
115
+ throw new Error("Evaluator not yet supported. " +
116
+ "Please use a string evaluator, or implement your " +
117
+ "evaluation logic as a customEvaluator.");
118
+ }
119
+ }
120
+ class LoadedEvalConfig {
121
+ constructor(evaluators) {
122
+ Object.defineProperty(this, "evaluators", {
123
+ enumerable: true,
124
+ configurable: true,
125
+ writable: true,
126
+ value: evaluators
127
+ });
128
+ }
129
+ static async fromRunEvalConfig(config) {
130
+ // Custom evaluators are applied "as-is"
131
+ const customEvaluators = config?.customEvaluators?.map((evaluator) => {
132
+ if (typeof evaluator === "function") {
133
+ return new DynamicRunEvaluator(evaluator);
134
+ }
135
+ else {
136
+ return evaluator;
137
+ }
138
+ });
139
+ const offTheShelfEvaluators = await Promise.all(config?.evaluators?.map(async (evaluator) => await PreparedRunEvaluator.fromEvalConfig(evaluator)) ?? []);
140
+ return new LoadedEvalConfig((customEvaluators ?? []).concat(offTheShelfEvaluators ?? []));
141
+ }
142
+ }
143
+ /**
144
+ * Internals expect a constructor () -> Runnable. This function wraps/coerces
145
+ * the provided LangChain object, custom function, or factory function into
146
+ * a constructor of a runnable.
147
+ * @param modelOrFactory The model or factory to create a wrapped model from.
148
+ * @returns A function that returns the wrapped model.
149
+ * @throws Error if the modelOrFactory is invalid.
150
+ */
151
+ const createWrappedModel = async (modelOrFactory) => {
152
+ if (runnables_1.Runnable.isRunnable(modelOrFactory)) {
153
+ return () => modelOrFactory;
154
+ }
155
+ if (typeof modelOrFactory === "function") {
156
+ try {
157
+ // If it works with no arguments, assume it's a factory
158
+ let res = modelOrFactory();
159
+ if (res &&
160
+ typeof res.then === "function") {
161
+ res = await res;
162
+ }
163
+ return modelOrFactory;
164
+ }
165
+ catch (err) {
166
+ // Otherwise, it's a custom UDF, and we'll wrap
167
+ // in a lambda
168
+ const wrappedModel = new runnables_1.RunnableLambda({ func: modelOrFactory });
169
+ return () => wrappedModel;
170
+ }
171
+ }
172
+ throw new Error("Invalid modelOrFactory");
173
+ };
174
+ const loadExamples = async ({ datasetName, client, projectName, }) => {
175
+ const exampleIterator = client.listExamples({ datasetName });
176
+ const configs = [];
177
+ const runCollectors = [];
178
+ const examples = [];
179
+ for await (const example of exampleIterator) {
180
+ const runCollector = new run_collector_1.RunCollectorCallbackHandler({
181
+ exampleId: example.id,
182
+ });
183
+ configs.push({
184
+ callbacks: [
185
+ new tracer_langchain_1.LangChainTracer({ exampleId: example.id, projectName }),
186
+ runCollector,
187
+ ],
188
+ });
189
+ examples.push(example);
190
+ runCollectors.push(runCollector);
191
+ }
192
+ return {
193
+ configs,
194
+ examples,
195
+ runCollectors,
196
+ };
197
+ };
198
+ const applyEvaluators = async ({ evaluation, runs, examples, client, }) => {
199
+ // TODO: Parallelize and/or put in callbacks to speed up evals.
200
+ const { evaluators } = evaluation;
201
+ const progress = new progress_js_1.ProgressBar({
202
+ total: examples.length,
203
+ format: "Running Evaluators: {bar} {percentage}% | {value}/{total}\n",
204
+ });
205
+ const results = {};
206
+ for (let i = 0; i < runs.length; i += 1) {
207
+ const run = runs[i];
208
+ const example = examples[i];
209
+ const evaluatorResults = await Promise.all(evaluators.map((evaluator) => client.evaluateRun(run, evaluator, {
210
+ referenceExample: example,
211
+ loadChildRuns: false,
212
+ })));
213
+ progress.increment();
214
+ results[example.id] = {
215
+ execution_time: run?.end_time && run.start_time
216
+ ? run.end_time - run.start_time
217
+ : undefined,
218
+ feedback: evaluatorResults,
219
+ run_id: run.id,
220
+ };
221
+ }
222
+ return results;
223
+ };
224
+ const getExamplesInputs = (examples, chainOrFactory, dataType) => {
225
+ if (dataType === "chat") {
226
+ // For some batty reason, we store the chat dataset differently.
227
+ // { type: "system", data: { content: inputs.input } },
228
+ // But we need to create AIMesage, SystemMessage, etc.
229
+ return examples.map(({ inputs }) => (0, messages_1.mapStoredMessagesToChatMessages)(inputs.input));
230
+ }
231
+ // If it's a language model and ALL example inputs have a single value,
232
+ // then we can be friendly and flatten the inputs to a list of strings.
233
+ const isLanguageModel = typeof chainOrFactory === "object" &&
234
+ typeof chainOrFactory._llmType === "function";
235
+ if (isLanguageModel &&
236
+ examples.every(({ inputs }) => Object.keys(inputs).length === 1)) {
237
+ return examples.map(({ inputs }) => Object.values(inputs)[0]);
238
+ }
239
+ return examples.map(({ inputs }) => inputs);
240
+ };
241
+ /**
242
+ * Evaluates a given model or chain against a specified LangSmith dataset.
243
+ *
244
+ * This function fetches example records from the specified dataset,
245
+ * runs the model or chain against each example, and returns the evaluation
246
+ * results.
247
+ *
248
+ * @param chainOrFactory - A model or factory/constructor function to be evaluated. It can be a
249
+ * Runnable instance, a factory function that returns a Runnable, or a user-defined
250
+ * function or factory.
251
+ *
252
+ * @param datasetName - The name of the dataset against which the evaluation will be
253
+ * performed. This dataset should already be defined and contain the relevant data
254
+ * for evaluation.
255
+ *
256
+ * @param options - (Optional) Additional parameters for the evaluation process:
257
+ * - `evaluation` (RunEvalConfig): Configuration for the evaluation, including
258
+ * standard and custom evaluators.
259
+ * - `projectName` (string): Name of the project for logging and tracking.
260
+ * - `projectMetadata` (Record<string, unknown>): Additional metadata for the project.
261
+ * - `client` (Client): Client instance for LangChain service interaction.
262
+ * - `maxConcurrency` (number): Maximum concurrency level for dataset processing.
263
+ *
264
+ * @returns A promise that resolves to an `EvalResults` object. This object includes
265
+ * detailed results of the evaluation, such as execution time, run IDs, and feedback
266
+ * for each entry in the dataset.
267
+ *
268
+ * @example
269
+ * ```typescript
270
+ * // Example usage for evaluating a model on a dataset
271
+ * async function evaluateModel() {
272
+ * const chain = /* ...create your model or chain...*\//
273
+ * const datasetName = 'example-dataset';
274
+ * const client = new Client(/* ...config... *\//);
275
+ *
276
+ * const evaluationConfig = new RunEvalConfig({
277
+ * evaluators: [/* ...evaluators... *\//],
278
+ * customEvaluators: [/* ...custom evaluators... *\//],
279
+ * });
280
+ *
281
+ * const results = await runOnDataset(chain, datasetName, {
282
+ * evaluationConfig,
283
+ * client,
284
+ * });
285
+ *
286
+ * console.log('Evaluation Results:', results);
287
+ * }
288
+ *
289
+ * evaluateModel();
290
+ * ```
291
+ * In this example, `runOnDataset` is used to evaluate a language model (or a chain of models) against
292
+ * a dataset named 'example-dataset'. The evaluation process is configured using `RunEvalConfig`, which can
293
+ * include both standard and custom evaluators. The `Client` instance is used to interact with LangChain services.
294
+ * The function returns the evaluation results, which can be logged or further processed as needed.
295
+ */
296
+ const runOnDataset = async (chainOrFactory, datasetName, { evaluationConfig, projectName, projectMetadata, client, maxConcurrency, }) => {
297
+ const wrappedModel = await createWrappedModel(chainOrFactory);
298
+ const testClient = client ?? new langsmith_1.Client();
299
+ const testProjectName = projectName ?? (0, name_generation_js_1.randomName)();
300
+ const dataset = await testClient.readDataset({ datasetName });
301
+ const datasetId = dataset.id;
302
+ const testConcurrency = maxConcurrency ?? 5;
303
+ const { configs, examples, runCollectors } = await loadExamples({
304
+ datasetName,
305
+ client: testClient,
306
+ projectName: testProjectName,
307
+ maxConcurrency: testConcurrency,
308
+ });
309
+ await testClient.createProject({
310
+ projectName: testProjectName,
311
+ referenceDatasetId: datasetId,
312
+ projectExtra: { metadata: { ...projectMetadata } },
313
+ });
314
+ const wrappedRunnable = new runnables_1.RunnableLambda({
315
+ func: wrappedModel,
316
+ }).withConfig({ runName: "evaluationRun" });
317
+ const runInputs = getExamplesInputs(examples, chainOrFactory, dataset.data_type);
318
+ const progress = new progress_js_1.ProgressBar({
319
+ total: runInputs.length,
320
+ format: "Predicting: {bar} {percentage}% | {value}/{total}",
321
+ });
322
+ // TODO: Collect the runs as well.
323
+ await wrappedRunnable
324
+ .withListeners({
325
+ onEnd: () => progress.increment(),
326
+ })
327
+ // TODO: Insert evaluation inline for immediate feedback.
328
+ .batch(runInputs, configs, {
329
+ maxConcurrency,
330
+ returnExceptions: true,
331
+ });
332
+ progress.complete();
333
+ const runs = [];
334
+ for (let i = 0; i < examples.length; i += 1) {
335
+ runs.push(runCollectors[i].tracedRuns[0]);
336
+ }
337
+ let evalResults = {};
338
+ if (evaluationConfig) {
339
+ const loadedEvalConfig = await LoadedEvalConfig.fromRunEvalConfig(evaluationConfig);
340
+ evalResults = await applyEvaluators({
341
+ evaluation: loadedEvalConfig,
342
+ runs,
343
+ examples,
344
+ client: testClient,
345
+ });
346
+ }
347
+ const results = {
348
+ projectName: testProjectName,
349
+ results: evalResults ?? {},
350
+ };
351
+ return results;
352
+ };
353
+ exports.runOnDataset = runOnDataset;
@@ -0,0 +1,77 @@
1
+ import { Runnable } from "@langchain/core/runnables";
2
+ import { Client, Feedback } from "langsmith";
3
+ import { RunEvalConfig } from "./config.js";
4
+ export type ChainOrFactory = Runnable | (() => Runnable) | ((obj: any) => any) | ((obj: any) => Promise<any>) | (() => (obj: unknown) => unknown) | (() => (obj: unknown) => Promise<unknown>);
5
+ export type RunOnDatasetParams = {
6
+ evaluationConfig?: RunEvalConfig;
7
+ projectMetadata?: Record<string, unknown>;
8
+ projectName?: string;
9
+ client?: Client;
10
+ maxConcurrency?: number;
11
+ };
12
+ export type EvalResults = {
13
+ projectName: string;
14
+ results: {
15
+ [key: string]: {
16
+ execution_time?: number;
17
+ run_id: string;
18
+ feedback: Feedback[];
19
+ };
20
+ };
21
+ };
22
+ /**
23
+ * Evaluates a given model or chain against a specified LangSmith dataset.
24
+ *
25
+ * This function fetches example records from the specified dataset,
26
+ * runs the model or chain against each example, and returns the evaluation
27
+ * results.
28
+ *
29
+ * @param chainOrFactory - A model or factory/constructor function to be evaluated. It can be a
30
+ * Runnable instance, a factory function that returns a Runnable, or a user-defined
31
+ * function or factory.
32
+ *
33
+ * @param datasetName - The name of the dataset against which the evaluation will be
34
+ * performed. This dataset should already be defined and contain the relevant data
35
+ * for evaluation.
36
+ *
37
+ * @param options - (Optional) Additional parameters for the evaluation process:
38
+ * - `evaluation` (RunEvalConfig): Configuration for the evaluation, including
39
+ * standard and custom evaluators.
40
+ * - `projectName` (string): Name of the project for logging and tracking.
41
+ * - `projectMetadata` (Record<string, unknown>): Additional metadata for the project.
42
+ * - `client` (Client): Client instance for LangChain service interaction.
43
+ * - `maxConcurrency` (number): Maximum concurrency level for dataset processing.
44
+ *
45
+ * @returns A promise that resolves to an `EvalResults` object. This object includes
46
+ * detailed results of the evaluation, such as execution time, run IDs, and feedback
47
+ * for each entry in the dataset.
48
+ *
49
+ * @example
50
+ * ```typescript
51
+ * // Example usage for evaluating a model on a dataset
52
+ * async function evaluateModel() {
53
+ * const chain = /* ...create your model or chain...*\//
54
+ * const datasetName = 'example-dataset';
55
+ * const client = new Client(/* ...config... *\//);
56
+ *
57
+ * const evaluationConfig = new RunEvalConfig({
58
+ * evaluators: [/* ...evaluators... *\//],
59
+ * customEvaluators: [/* ...custom evaluators... *\//],
60
+ * });
61
+ *
62
+ * const results = await runOnDataset(chain, datasetName, {
63
+ * evaluationConfig,
64
+ * client,
65
+ * });
66
+ *
67
+ * console.log('Evaluation Results:', results);
68
+ * }
69
+ *
70
+ * evaluateModel();
71
+ * ```
72
+ * In this example, `runOnDataset` is used to evaluate a language model (or a chain of models) against
73
+ * a dataset named 'example-dataset'. The evaluation process is configured using `RunEvalConfig`, which can
74
+ * include both standard and custom evaluators. The `Client` instance is used to interact with LangChain services.
75
+ * The function returns the evaluation results, which can be logged or further processed as needed.
76
+ */
77
+ export declare const runOnDataset: (chainOrFactory: ChainOrFactory, datasetName: string, { evaluationConfig, projectName, projectMetadata, client, maxConcurrency, }: RunOnDatasetParams) => Promise<EvalResults>;