peerbench 0.0.6 → 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/aggregators/abstract.d.ts +10 -0
- package/dist/aggregators/index.d.ts +2 -67
- package/dist/aggregators/llm/avg.d.ts +26 -0
- package/dist/benchmarks/examples/echo-basic/index.d.ts +4 -0
- package/dist/benchmarks/examples/echo-basic/runner.d.ts +273 -0
- package/dist/benchmarks/examples/echo-basic/schema-sets/echo.v1.d.ts +241 -0
- package/dist/benchmarks/examples/echo-basic/storages/json.d.ts +14 -0
- package/dist/benchmarks/examples/echo-basic/storages/text.d.ts +24 -0
- package/dist/benchmarks/examples/exact-match-scorer/index.d.ts +4 -0
- package/dist/benchmarks/examples/exact-match-scorer/runner.d.ts +428 -0
- package/dist/benchmarks/examples/exact-match-scorer/schema-sets/exact-match.v1.d.ts +287 -0
- package/dist/benchmarks/examples/exact-match-scorer/scorer.d.ts +30 -0
- package/dist/benchmarks/examples/exact-match-scorer/storages/json.d.ts +8 -0
- package/dist/benchmarks/examples/text-transform/index.d.ts +4 -0
- package/dist/benchmarks/examples/text-transform/runner.d.ts +524 -0
- package/dist/benchmarks/examples/text-transform/schema-sets/echo.v1.d.ts +211 -0
- package/dist/benchmarks/examples/text-transform/schema-sets/namespace.d.ts +1 -0
- package/dist/benchmarks/examples/text-transform/schema-sets/reverse.v1.d.ts +216 -0
- package/dist/benchmarks/examples/text-transform/storages/json.d.ts +9 -0
- package/dist/benchmarks/index.d.ts +1 -1667
- package/dist/benchmarks/index.js +4 -4
- package/dist/benchmarks/peerbench/index.d.ts +5 -0
- package/dist/benchmarks/peerbench/runner.d.ts +754 -0
- package/dist/benchmarks/peerbench/schema-sets/mcq.v1.d.ts +261 -0
- package/dist/benchmarks/peerbench/schema-sets/multi-turn.v1.d.ts +351 -0
- package/dist/benchmarks/peerbench/schema-sets/qa.v1.d.ts +256 -0
- package/dist/benchmarks/peerbench/storages/json.d.ts +10 -0
- package/dist/{chunk-DNGT4SJC.js → chunk-3JHDJEY3.js} +16 -7
- package/dist/chunk-3JHDJEY3.js.map +1 -0
- package/dist/{chunk-3JF7SHLC.js → chunk-SMLNDQFX.js} +16 -7
- package/dist/chunk-SMLNDQFX.js.map +1 -0
- package/dist/constants.d.ts +4 -0
- package/dist/errors/index.d.ts +2 -0
- package/dist/errors/peerbench.d.ts +6 -0
- package/dist/errors/polyfill.d.ts +1 -0
- package/dist/examples/basic.d.ts +1 -0
- package/dist/helpers/define-runner.d.ts +45 -0
- package/dist/helpers/index.d.ts +1 -0
- package/dist/index.d.ts +6 -101
- package/dist/providers/abstract/llm.d.ts +20 -0
- package/dist/providers/abstract/provider.d.ts +14 -0
- package/dist/providers/example/echo.d.ts +12 -0
- package/dist/providers/example/restapi.d.ts +37 -0
- package/dist/providers/index.d.ts +5 -84
- package/dist/providers/index.js +1 -1
- package/dist/providers/mastra.d.ts +40 -0
- package/dist/providers/openai.d.ts +29 -0
- package/dist/providers/openrouter.d.ts +27 -0
- package/dist/schemas/extensions/index.d.ts +18 -22
- package/dist/schemas/extensions/response/llm.d.ts +14 -0
- package/dist/schemas/extensions/score/llm-as-a-judge-scorer.d.ts +15 -0
- package/dist/schemas/id.d.ts +2 -0
- package/dist/schemas/index.d.ts +4 -200
- package/dist/schemas/llm/index.d.ts +2 -116
- package/dist/schemas/llm/simple-system-prompt.d.ts +51 -0
- package/dist/schemas/llm/system-prompt.d.ts +59 -0
- package/dist/schemas/response.d.ts +63 -0
- package/dist/schemas/schema-definer.d.ts +47 -0
- package/dist/schemas/score.d.ts +73 -0
- package/dist/schemas/test-case.d.ts +57 -0
- package/dist/scorers/abstract.d.ts +16 -0
- package/dist/scorers/index.d.ts +4 -64
- package/dist/scorers/index.js +1 -1
- package/dist/scorers/llm-judge.d.ts +55 -0
- package/dist/scorers/mcq.d.ts +19 -0
- package/dist/scorers/mcq.test.d.ts +1 -0
- package/dist/scorers/regex.d.ts +58 -0
- package/dist/scorers/regex.test.d.ts +1 -0
- package/dist/storages/abstract.d.ts +7 -0
- package/dist/storages/examples/http.d.ts +1 -0
- package/dist/storages/examples/sqlite.d.ts +1 -0
- package/dist/storages/file.d.ts +43 -0
- package/dist/storages/http.d.ts +22 -0
- package/dist/storages/index.d.ts +5 -69
- package/dist/storages/json-file.d.ts +21 -0
- package/dist/storages/sqlite.d.ts +41 -0
- package/dist/types/index.d.ts +17 -0
- package/dist/types/runner.d.ts +18 -0
- package/dist/utilities.d.ts +9 -0
- package/dist/utils/id-generator.d.ts +2 -0
- package/dist/utils/index.d.ts +5 -0
- package/dist/utils/json.d.ts +17 -0
- package/dist/utils/llm.d.ts +7 -0
- package/dist/{rate-limiter-CSmVIRsM.d.ts → utils/rate-limiter.d.ts} +3 -5
- package/dist/utils/sleep.d.ts +1 -0
- package/dist/utils/string.d.ts +8 -0
- package/package.json +3 -3
- package/dist/abstract-Dec9Sc5O.d.ts +0 -12
- package/dist/chunk-3JF7SHLC.js.map +0 -1
- package/dist/chunk-DNGT4SJC.js.map +0 -1
- package/dist/index-BAioQhp2.d.ts +0 -27
- package/dist/json-file-Bgv9TLcX.d.ts +0 -74
- package/dist/llm-BND163ns.d.ts +0 -23
- package/dist/llm-judge-BS_oNYUK.d.ts +0 -67
- package/dist/provider-BDjGp2y-.d.ts +0 -10
package/dist/index.d.ts
CHANGED
|
@@ -1,101 +1,6 @@
|
|
|
1
|
-
|
|
2
|
-
export
|
|
3
|
-
export
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
declare function sleep(ms: number, signal?: AbortSignal): Promise<void>;
|
|
9
|
-
|
|
10
|
-
/**
|
|
11
|
-
* Tries to repair and parse LLM response as a JSON object. LLM must
|
|
12
|
-
* be configured to return a JSON object. This function only helps to
|
|
13
|
-
* get rid out of some additional formatting (e.g. ```json) and repair
|
|
14
|
-
* the JSON syntax (e.g missing comma, single quotes instead double).
|
|
15
|
-
*/
|
|
16
|
-
declare function parseResponseAsJSON<T>(response: string): T | undefined;
|
|
17
|
-
|
|
18
|
-
/**
|
|
19
|
-
* Converts the given byte array to a string
|
|
20
|
-
*/
|
|
21
|
-
declare function bufferToString(buffer: Uint8Array, encoding?: BufferEncoding): string;
|
|
22
|
-
/**
|
|
23
|
-
* Converts the given string to a byte array
|
|
24
|
-
*/
|
|
25
|
-
declare function stringToBuffer(str: string): Uint8Array;
|
|
26
|
-
|
|
27
|
-
declare const idGeneratorUUIDv7: IdGenerator;
|
|
28
|
-
|
|
29
|
-
type Runner<TTestCase extends z__default.ZodObject, TResponse extends z__default.ZodObject, TScore extends z__default.ZodObject, TProvider extends AbstractProvider, TScorer extends AbstractScorer, TRunConfig extends Record<string, unknown>> = (params: {
|
|
30
|
-
testCase: z__default.infer<TTestCase>;
|
|
31
|
-
provider: TProvider;
|
|
32
|
-
scorer?: TScorer;
|
|
33
|
-
runConfig: TRunConfig;
|
|
34
|
-
idGenerators?: {
|
|
35
|
-
response?: IdGenerator;
|
|
36
|
-
score?: IdGenerator;
|
|
37
|
-
};
|
|
38
|
-
}) => Promise<{
|
|
39
|
-
response: z__default.infer<TResponse>;
|
|
40
|
-
score?: z__default.infer<TScore>;
|
|
41
|
-
}>;
|
|
42
|
-
type InferRunConfig<TRunConfigSchema extends z__default.ZodRawShape> = z__default.infer<z__default.ZodObject<TRunConfigSchema>>;
|
|
43
|
-
|
|
44
|
-
declare class PeerbenchError extends Error {
|
|
45
|
-
code: number;
|
|
46
|
-
constructor(message?: string, options?: ErrorOptions & {
|
|
47
|
-
code: number;
|
|
48
|
-
});
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
declare function captureStackTrace(error: Error, constructor: Function): void;
|
|
52
|
-
|
|
53
|
-
declare const PEERBENCH_NAMESPACE: "peerbench.ai";
|
|
54
|
-
declare const CATEGORIES: {
|
|
55
|
-
LLM: string;
|
|
56
|
-
};
|
|
57
|
-
|
|
58
|
-
declare function defineRunner<const TProviders extends ProviderCtor[], const TScorers extends ScorerCtor[], const TSchemaSets extends SchemaSetDefinition[], const TRunConfigSchema extends z__default.ZodRawShape = {}>(config: {
|
|
59
|
-
schemaSets: TSchemaSets;
|
|
60
|
-
providers: TProviders;
|
|
61
|
-
scorers: TScorers;
|
|
62
|
-
runConfigSchema?: TRunConfigSchema;
|
|
63
|
-
/**
|
|
64
|
-
* @default true
|
|
65
|
-
*/
|
|
66
|
-
parseRunConfig?: boolean;
|
|
67
|
-
defaults?: {
|
|
68
|
-
scorer?: InstanceType<TScorers[number]>;
|
|
69
|
-
responseIdGenerator?: IdGenerator;
|
|
70
|
-
scoreIdGenerator?: IdGenerator;
|
|
71
|
-
};
|
|
72
|
-
}, fn: Runner<TSchemaSets[number]["testCase"], TSchemaSets[number]["response"], TSchemaSets[number]["score"], InstanceType<TProviders[number]>, InstanceType<TScorers[number]>, InferRunConfig<TRunConfigSchema>>): ((params: Parameters<typeof fn>[0]) => Promise<{
|
|
73
|
-
response: z__default.core.output<TSchemaSets[number]["response"]>;
|
|
74
|
-
score?: z__default.core.output<TSchemaSets[number]["score"]> | undefined;
|
|
75
|
-
}>) & {
|
|
76
|
-
/**
|
|
77
|
-
* The configuration that was used to define the runner.
|
|
78
|
-
*/
|
|
79
|
-
config: {
|
|
80
|
-
runConfigSchema: z__default.ZodObject<{ -readonly [P in keyof TRunConfigSchema]: TRunConfigSchema[P]; }, z__default.core.$strip>;
|
|
81
|
-
schemaSets: TSchemaSets;
|
|
82
|
-
providers: TProviders;
|
|
83
|
-
scorers: TScorers;
|
|
84
|
-
/**
|
|
85
|
-
* @default true
|
|
86
|
-
*/
|
|
87
|
-
parseRunConfig?: boolean;
|
|
88
|
-
defaults?: {
|
|
89
|
-
scorer?: InstanceType<TScorers[number]>;
|
|
90
|
-
responseIdGenerator?: IdGenerator;
|
|
91
|
-
scoreIdGenerator?: IdGenerator;
|
|
92
|
-
};
|
|
93
|
-
};
|
|
94
|
-
};
|
|
95
|
-
type SchemaSetDefinition<TTestCase extends z__default.ZodObject = z__default.ZodObject, TResponse extends z__default.ZodObject = z__default.ZodObject, TScore extends z__default.ZodObject = z__default.ZodObject> = {
|
|
96
|
-
testCase: TTestCase;
|
|
97
|
-
response: TResponse;
|
|
98
|
-
score: TScore;
|
|
99
|
-
};
|
|
100
|
-
|
|
101
|
-
export { CATEGORIES, IdGenerator, type InferRunConfig, PEERBENCH_NAMESPACE, PeerbenchError, ProviderCtor, type Runner, ScorerCtor, bufferToString, captureStackTrace, defineRunner, idGeneratorUUIDv7, parseResponseAsJSON, sleep, stringToBuffer };
|
|
1
|
+
export * from "./types";
|
|
2
|
+
export * from "./errors";
|
|
3
|
+
export * from "./utils";
|
|
4
|
+
export * from "./utilities";
|
|
5
|
+
export * from "./constants";
|
|
6
|
+
export * from "./helpers";
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { AbstractProvider, ProviderResponse } from "./provider";
|
|
2
|
+
import { ResponseFormatJSONObject, ResponseFormatJSONSchema, ResponseFormatText } from "openai/resources/shared";
|
|
3
|
+
import { ChatCompletionMessageParam } from "openai/resources/chat/completions";
|
|
4
|
+
export declare abstract class AbstractLLMProvider extends AbstractProvider {
|
|
5
|
+
abstract forward(args: LLMProviderForwardArgs): Promise<ChatResponse>;
|
|
6
|
+
}
|
|
7
|
+
export type LLMProviderForwardArgs = {
|
|
8
|
+
messages: ChatCompletionMessageParam[];
|
|
9
|
+
model: string;
|
|
10
|
+
abortSignal?: AbortSignal;
|
|
11
|
+
temperature?: number;
|
|
12
|
+
responseFormat?: ResponseFormatText | ResponseFormatJSONSchema | ResponseFormatJSONObject;
|
|
13
|
+
};
|
|
14
|
+
export type ChatResponse = ProviderResponse<string> & {
|
|
15
|
+
inputTokensUsed?: number;
|
|
16
|
+
outputTokensUsed?: number;
|
|
17
|
+
inputCost?: string;
|
|
18
|
+
outputCost?: string;
|
|
19
|
+
metadata?: Record<string, unknown>;
|
|
20
|
+
};
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
export declare abstract class AbstractProvider {
|
|
2
|
+
readonly kind: string;
|
|
3
|
+
constructor();
|
|
4
|
+
static withKind<TKind extends string, TThis extends abstract new (...args: any[]) => AbstractProvider>(this: TThis, kind: TKind): (new () => InstanceType<TThis> & {
|
|
5
|
+
readonly kind: TKind;
|
|
6
|
+
}) & {
|
|
7
|
+
readonly kind: TKind;
|
|
8
|
+
};
|
|
9
|
+
}
|
|
10
|
+
export type ProviderResponse<TData = unknown> = {
|
|
11
|
+
startedAt: number;
|
|
12
|
+
completedAt: number;
|
|
13
|
+
data: TData;
|
|
14
|
+
};
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { AbstractLLMProvider, ChatResponse, LLMProviderForwardArgs } from "../abstract/llm";
|
|
2
|
+
/**
|
|
3
|
+
* Example provider implementation for local testing and as a reference.
|
|
4
|
+
*
|
|
5
|
+
* - Extends `AbstractLLMProvider`
|
|
6
|
+
* - Implements `forward({ messages, model, ... })`
|
|
7
|
+
* - Does not perform any network calls
|
|
8
|
+
*/
|
|
9
|
+
export declare class ExampleEchoLLMProvider extends AbstractLLMProvider {
|
|
10
|
+
readonly kind = "example.echo";
|
|
11
|
+
forward(args: LLMProviderForwardArgs): Promise<ChatResponse>;
|
|
12
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import { AbstractLLMProvider, ChatResponse, LLMProviderForwardArgs } from "../abstract/llm";
|
|
2
|
+
/**
|
|
3
|
+
* Example "custom REST API LLM agent provider".
|
|
4
|
+
*
|
|
5
|
+
* Sometimes you don't call a model API directly. You call your own REST API, and *it* talks to the model.
|
|
6
|
+
* That REST API can hide secrets, run tools, do retrieval, apply guardrails, and whatever else your
|
|
7
|
+
* product needs.
|
|
8
|
+
*
|
|
9
|
+
* In the SDK we still want a clean abstraction, so we model that REST API as an `AbstractLLMProvider`.
|
|
10
|
+
* The runner (or host app) still passes `messages + model`, and the provider still returns one final string.
|
|
11
|
+
*
|
|
12
|
+
* If you’re implementing your own provider, this is the only part that matters: translate
|
|
13
|
+
* `LLMProviderForwardArgs` into your HTTP request, then translate your HTTP response back into
|
|
14
|
+
* `ChatResponse`.
|
|
15
|
+
*/
|
|
16
|
+
export declare class ExampleRestApiLLMAgentProvider extends AbstractLLMProvider {
|
|
17
|
+
readonly kind = "example.restapi.agent";
|
|
18
|
+
private readonly baseUrl;
|
|
19
|
+
private readonly apiKey?;
|
|
20
|
+
private readonly headers?;
|
|
21
|
+
constructor(config: ExampleRestApiAgentProviderConfig);
|
|
22
|
+
forward(args: LLMProviderForwardArgs): Promise<ChatResponse>;
|
|
23
|
+
}
|
|
24
|
+
type ExampleRestApiAgentProviderConfig = {
|
|
25
|
+
/**
|
|
26
|
+
* Base URL of your REST service.
|
|
27
|
+
* Example: `https://my-company.internal/api`
|
|
28
|
+
*/
|
|
29
|
+
baseUrl: string;
|
|
30
|
+
/**
|
|
31
|
+
* Optional authentication.
|
|
32
|
+
* Many internal services accept `Authorization: Bearer <token>`, but you can also pass custom headers.
|
|
33
|
+
*/
|
|
34
|
+
apiKey?: string;
|
|
35
|
+
headers?: Record<string, string>;
|
|
36
|
+
};
|
|
37
|
+
export {};
|
|
@@ -1,84 +1,5 @@
|
|
|
1
|
-
|
|
2
|
-
export
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
import { ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject } from 'openai/resources/shared';
|
|
7
|
-
|
|
8
|
-
declare class MastraProvider extends AbstractLLMProvider {
|
|
9
|
-
readonly kind = "mastra";
|
|
10
|
-
private readonly endpoint;
|
|
11
|
-
private readonly authToken?;
|
|
12
|
-
private client;
|
|
13
|
-
private memory?;
|
|
14
|
-
constructor(params: {
|
|
15
|
-
endpoint: string;
|
|
16
|
-
authToken?: string;
|
|
17
|
-
memory?: AgentMemoryOption;
|
|
18
|
-
});
|
|
19
|
-
forward(args: LLMProviderForwardArgs & {
|
|
20
|
-
memory?: AgentMemoryOption;
|
|
21
|
-
/**
|
|
22
|
-
* The model that will be used as the brain for the agent.
|
|
23
|
-
*/
|
|
24
|
-
modelName?: string;
|
|
25
|
-
}): Promise<ChatResponse>;
|
|
26
|
-
getAgentInfo(args: {
|
|
27
|
-
agentId: string;
|
|
28
|
-
runtimeContext?: MastraRuntimeContext;
|
|
29
|
-
}): Promise<GetAgentResponse>;
|
|
30
|
-
getAgents(args?: {
|
|
31
|
-
runtimeContext?: MastraRuntimeContext;
|
|
32
|
-
partial?: boolean;
|
|
33
|
-
}): Promise<Record<string, GetAgentResponse>>;
|
|
34
|
-
}
|
|
35
|
-
type AgentMemoryOption = Parameters<Parameters<MastraClient["getAgent"]>["0"] extends string ? ReturnType<MastraClient["getAgent"]>["generate"] : never>[0] extends {
|
|
36
|
-
memory?: infer M;
|
|
37
|
-
} ? M : never;
|
|
38
|
-
type MastraRuntimeContext = Parameters<Parameters<MastraClient["getAgent"]>["0"] extends string ? ReturnType<MastraClient["getAgent"]>["generate"] : never>[0] extends {
|
|
39
|
-
runtimeContext?: infer R;
|
|
40
|
-
} ? R : never;
|
|
41
|
-
|
|
42
|
-
declare class OpenAIProvider extends AbstractLLMProvider {
|
|
43
|
-
readonly kind: "peerbench.ai/llm/openai";
|
|
44
|
-
private client;
|
|
45
|
-
private rateLimiter;
|
|
46
|
-
private maxRetries;
|
|
47
|
-
constructor(config: {
|
|
48
|
-
apiKey: string;
|
|
49
|
-
baseURL: string;
|
|
50
|
-
maxRetries?: number;
|
|
51
|
-
timeout?: number;
|
|
52
|
-
rateLimiter?: RateLimiter;
|
|
53
|
-
});
|
|
54
|
-
forward(args: {
|
|
55
|
-
messages: ChatCompletionMessageParam[];
|
|
56
|
-
model: string;
|
|
57
|
-
abortSignal?: AbortSignal;
|
|
58
|
-
temperature?: number;
|
|
59
|
-
responseFormat?: ResponseFormatText | ResponseFormatJSONSchema | ResponseFormatJSONObject;
|
|
60
|
-
}): Promise<ChatResponse>;
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
declare class OpenRouterProvider extends AbstractLLMProvider {
|
|
64
|
-
readonly kind: "peerbench.ai/llm/openrouter.ai";
|
|
65
|
-
private models;
|
|
66
|
-
private modelsCachePromise;
|
|
67
|
-
private modelsUpdatedAt;
|
|
68
|
-
private openAIProvider;
|
|
69
|
-
constructor(config: {
|
|
70
|
-
apiKey: string;
|
|
71
|
-
maxRetries?: number;
|
|
72
|
-
timeout?: number;
|
|
73
|
-
rateLimiter?: RateLimiter;
|
|
74
|
-
});
|
|
75
|
-
forward(args: LLMProviderForwardArgs): Promise<ChatResponse>;
|
|
76
|
-
/**
|
|
77
|
-
* Updates the cache that holds information about OpenRouter models
|
|
78
|
-
* including pricing information. It will be valid for 24 hours as
|
|
79
|
-
* long as the instance of this Provider object is alive.
|
|
80
|
-
*/
|
|
81
|
-
private updateModelsCache;
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
export { AbstractLLMProvider, type AgentMemoryOption, ChatResponse, LLMProviderForwardArgs, MastraProvider, OpenAIProvider, OpenRouterProvider };
|
|
1
|
+
export * from "./abstract/llm";
|
|
2
|
+
export * from "./abstract/provider";
|
|
3
|
+
export * from "./mastra";
|
|
4
|
+
export * from "./openai";
|
|
5
|
+
export * from "./openrouter";
|
package/dist/providers/index.js
CHANGED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import { AbstractLLMProvider, type ChatResponse, type LLMProviderForwardArgs } from "./abstract/llm";
|
|
2
|
+
import { MastraClient, type GetAgentResponse } from "@mastra/client-js";
|
|
3
|
+
declare const MastraProvider_base: (new () => AbstractLLMProvider & {
|
|
4
|
+
readonly kind: "peerbench.ai/llm/mastra";
|
|
5
|
+
}) & {
|
|
6
|
+
readonly kind: "peerbench.ai/llm/mastra";
|
|
7
|
+
};
|
|
8
|
+
export declare class MastraProvider extends MastraProvider_base {
|
|
9
|
+
private readonly endpoint;
|
|
10
|
+
private readonly authToken?;
|
|
11
|
+
private client;
|
|
12
|
+
private memory?;
|
|
13
|
+
constructor(params: {
|
|
14
|
+
endpoint: string;
|
|
15
|
+
authToken?: string;
|
|
16
|
+
memory?: AgentMemoryOption;
|
|
17
|
+
});
|
|
18
|
+
forward(args: LLMProviderForwardArgs & {
|
|
19
|
+
memory?: AgentMemoryOption;
|
|
20
|
+
/**
|
|
21
|
+
* The model that will be used as the brain for the agent.
|
|
22
|
+
*/
|
|
23
|
+
modelName?: string;
|
|
24
|
+
}): Promise<ChatResponse>;
|
|
25
|
+
getAgentInfo(args: {
|
|
26
|
+
agentId: string;
|
|
27
|
+
runtimeContext?: MastraRuntimeContext;
|
|
28
|
+
}): Promise<GetAgentResponse>;
|
|
29
|
+
getAgents(args?: {
|
|
30
|
+
runtimeContext?: MastraRuntimeContext;
|
|
31
|
+
partial?: boolean;
|
|
32
|
+
}): Promise<Record<string, GetAgentResponse>>;
|
|
33
|
+
}
|
|
34
|
+
export type AgentMemoryOption = Parameters<Parameters<MastraClient["getAgent"]>["0"] extends string ? ReturnType<MastraClient["getAgent"]>["generate"] : never>[0] extends {
|
|
35
|
+
memory?: infer M;
|
|
36
|
+
} ? M : never;
|
|
37
|
+
type MastraRuntimeContext = Parameters<Parameters<MastraClient["getAgent"]>["0"] extends string ? ReturnType<MastraClient["getAgent"]>["generate"] : never>[0] extends {
|
|
38
|
+
runtimeContext?: infer R;
|
|
39
|
+
} ? R : never;
|
|
40
|
+
export {};
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { RateLimiter } from "../utils";
|
|
2
|
+
import { ChatCompletionMessageParam } from "openai/resources/chat/completions";
|
|
3
|
+
import { ResponseFormatJSONObject, ResponseFormatJSONSchema, ResponseFormatText } from "openai/resources/shared";
|
|
4
|
+
import { AbstractLLMProvider, ChatResponse } from "./abstract/llm";
|
|
5
|
+
declare const OpenAIProvider_base: (new () => AbstractLLMProvider & {
|
|
6
|
+
readonly kind: "peerbench.ai/llm/openai";
|
|
7
|
+
}) & {
|
|
8
|
+
readonly kind: "peerbench.ai/llm/openai";
|
|
9
|
+
};
|
|
10
|
+
export declare class OpenAIProvider extends OpenAIProvider_base {
|
|
11
|
+
private client;
|
|
12
|
+
private rateLimiter;
|
|
13
|
+
private maxRetries;
|
|
14
|
+
constructor(config: {
|
|
15
|
+
apiKey: string;
|
|
16
|
+
baseURL: string;
|
|
17
|
+
maxRetries?: number;
|
|
18
|
+
timeout?: number;
|
|
19
|
+
rateLimiter?: RateLimiter;
|
|
20
|
+
});
|
|
21
|
+
forward(args: {
|
|
22
|
+
messages: ChatCompletionMessageParam[];
|
|
23
|
+
model: string;
|
|
24
|
+
abortSignal?: AbortSignal;
|
|
25
|
+
temperature?: number;
|
|
26
|
+
responseFormat?: ResponseFormatText | ResponseFormatJSONSchema | ResponseFormatJSONObject;
|
|
27
|
+
}): Promise<ChatResponse>;
|
|
28
|
+
}
|
|
29
|
+
export {};
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { AbstractLLMProvider, ChatResponse, LLMProviderForwardArgs } from "./abstract/llm";
|
|
2
|
+
import { RateLimiter } from "../utils";
|
|
3
|
+
declare const OpenRouterProvider_base: (new () => AbstractLLMProvider & {
|
|
4
|
+
readonly kind: "peerbench.ai/llm/openrouter.ai";
|
|
5
|
+
}) & {
|
|
6
|
+
readonly kind: "peerbench.ai/llm/openrouter.ai";
|
|
7
|
+
};
|
|
8
|
+
export declare class OpenRouterProvider extends OpenRouterProvider_base {
|
|
9
|
+
private models;
|
|
10
|
+
private modelsCachePromise;
|
|
11
|
+
private modelsUpdatedAt;
|
|
12
|
+
private openAIProvider;
|
|
13
|
+
constructor(config: {
|
|
14
|
+
apiKey: string;
|
|
15
|
+
maxRetries?: number;
|
|
16
|
+
timeout?: number;
|
|
17
|
+
rateLimiter?: RateLimiter;
|
|
18
|
+
});
|
|
19
|
+
forward(args: LLMProviderForwardArgs): Promise<ChatResponse>;
|
|
20
|
+
/**
|
|
21
|
+
* Updates the cache that holds information about OpenRouter models
|
|
22
|
+
* including pricing information. It will be valid for 24 hours as
|
|
23
|
+
* long as the instance of this Provider object is alive.
|
|
24
|
+
*/
|
|
25
|
+
private updateModelsCache;
|
|
26
|
+
}
|
|
27
|
+
export {};
|
|
@@ -1,28 +1,24 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
declare const ScoreExtensions: {
|
|
1
|
+
export declare const ScoreExtensions: {
|
|
4
2
|
ExtensionLLMAsAJudgeScoreFieldsV1: {
|
|
5
|
-
scorerAISystemPrompt:
|
|
6
|
-
scorerAISystemPromptId:
|
|
7
|
-
scorerAIProvider:
|
|
8
|
-
scorerAIModelSlug:
|
|
9
|
-
scorerAIInputTokensUsed:
|
|
10
|
-
scorerAIOutputTokensUsed:
|
|
11
|
-
scorerAIInputCost:
|
|
12
|
-
scorerAIOutputCost:
|
|
3
|
+
scorerAISystemPrompt: import("zod").ZodOptional<import("zod").ZodString>;
|
|
4
|
+
scorerAISystemPromptId: import("zod").ZodOptional<import("zod").ZodString>;
|
|
5
|
+
scorerAIProvider: import("zod").ZodOptional<import("zod").ZodString>;
|
|
6
|
+
scorerAIModelSlug: import("zod").ZodOptional<import("zod").ZodString>;
|
|
7
|
+
scorerAIInputTokensUsed: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
8
|
+
scorerAIOutputTokensUsed: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
9
|
+
scorerAIInputCost: import("zod").ZodOptional<import("zod").ZodString>;
|
|
10
|
+
scorerAIOutputCost: import("zod").ZodOptional<import("zod").ZodString>;
|
|
13
11
|
};
|
|
14
12
|
};
|
|
15
|
-
declare const ResponseExtensions: {
|
|
13
|
+
export declare const ResponseExtensions: {
|
|
16
14
|
ExtensionLLMResponseFieldsV1: {
|
|
17
|
-
data:
|
|
18
|
-
modelSlug:
|
|
19
|
-
provider:
|
|
20
|
-
systemPromptId:
|
|
21
|
-
inputTokensUsed:
|
|
22
|
-
outputTokensUsed:
|
|
23
|
-
inputCost:
|
|
24
|
-
outputCost:
|
|
15
|
+
data: import("zod").ZodString;
|
|
16
|
+
modelSlug: import("zod").ZodString;
|
|
17
|
+
provider: import("zod").ZodString;
|
|
18
|
+
systemPromptId: import("zod").ZodOptional<import("zod").ZodString>;
|
|
19
|
+
inputTokensUsed: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
20
|
+
outputTokensUsed: import("zod").ZodOptional<import("zod").ZodNumber>;
|
|
21
|
+
inputCost: import("zod").ZodOptional<import("zod").ZodString>;
|
|
22
|
+
outputCost: import("zod").ZodOptional<import("zod").ZodString>;
|
|
25
23
|
};
|
|
26
24
|
};
|
|
27
|
-
|
|
28
|
-
export { ResponseExtensions, ScoreExtensions };
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import z from "zod";
|
|
2
|
+
/**
|
|
3
|
+
* Provides a set of fields that holds information about the LLM and its response.
|
|
4
|
+
*/
|
|
5
|
+
export declare const ExtensionLLMResponseFieldsV1: {
|
|
6
|
+
data: z.ZodString;
|
|
7
|
+
modelSlug: z.ZodString;
|
|
8
|
+
provider: z.ZodString;
|
|
9
|
+
systemPromptId: z.ZodOptional<z.ZodString>;
|
|
10
|
+
inputTokensUsed: z.ZodOptional<z.ZodNumber>;
|
|
11
|
+
outputTokensUsed: z.ZodOptional<z.ZodNumber>;
|
|
12
|
+
inputCost: z.ZodOptional<z.ZodString>;
|
|
13
|
+
outputCost: z.ZodOptional<z.ZodString>;
|
|
14
|
+
};
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import z from "zod";
|
|
2
|
+
/**
|
|
3
|
+
* Provides a set of fields that holds information about the LLM model
|
|
4
|
+
* that was used to judge the response.
|
|
5
|
+
*/
|
|
6
|
+
export declare const ExtensionLLMAsAJudgeScoreFieldsV1: {
|
|
7
|
+
scorerAISystemPrompt: z.ZodOptional<z.ZodString>;
|
|
8
|
+
scorerAISystemPromptId: z.ZodOptional<z.ZodString>;
|
|
9
|
+
scorerAIProvider: z.ZodOptional<z.ZodString>;
|
|
10
|
+
scorerAIModelSlug: z.ZodOptional<z.ZodString>;
|
|
11
|
+
scorerAIInputTokensUsed: z.ZodOptional<z.ZodNumber>;
|
|
12
|
+
scorerAIOutputTokensUsed: z.ZodOptional<z.ZodNumber>;
|
|
13
|
+
scorerAIInputCost: z.ZodOptional<z.ZodString>;
|
|
14
|
+
scorerAIOutputCost: z.ZodOptional<z.ZodString>;
|
|
15
|
+
};
|