peerbench 0.0.1 → 0.0.2-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/README.md +308 -2
  2. package/dist/abstract-Dec9Sc5O.d.ts +12 -0
  3. package/dist/benchmarks/index.d.ts +1698 -0
  4. package/dist/benchmarks/index.js +915 -0
  5. package/dist/benchmarks/index.js.map +1 -0
  6. package/dist/catalogs/index.d.ts +75 -0
  7. package/dist/catalogs/index.js +88 -0
  8. package/dist/catalogs/index.js.map +1 -0
  9. package/dist/chunk-22HU24QF.js +8 -0
  10. package/dist/chunk-22HU24QF.js.map +1 -0
  11. package/dist/chunk-232PY7K3.js +50 -0
  12. package/dist/chunk-232PY7K3.js.map +1 -0
  13. package/dist/chunk-7TREBPSJ.js +26 -0
  14. package/dist/chunk-7TREBPSJ.js.map +1 -0
  15. package/dist/chunk-DUBKY73H.js +128 -0
  16. package/dist/chunk-DUBKY73H.js.map +1 -0
  17. package/dist/chunk-GVF4YZF3.js +15 -0
  18. package/dist/chunk-GVF4YZF3.js.map +1 -0
  19. package/dist/chunk-HJH3SW3L.js +103 -0
  20. package/dist/chunk-HJH3SW3L.js.map +1 -0
  21. package/dist/chunk-IUN2IUCS.js +58 -0
  22. package/dist/chunk-IUN2IUCS.js.map +1 -0
  23. package/dist/chunk-PZ5AY32C.js +10 -0
  24. package/dist/chunk-PZ5AY32C.js.map +1 -0
  25. package/dist/chunk-VBOM2YEG.js +47 -0
  26. package/dist/chunk-VBOM2YEG.js.map +1 -0
  27. package/dist/chunk-ZJWSK4VO.js +11 -0
  28. package/dist/chunk-ZJWSK4VO.js.map +1 -0
  29. package/dist/data-BmN5WjZ4.d.ts +57 -0
  30. package/dist/generic-array-DLHWSvf1.d.ts +22 -0
  31. package/dist/index-WiPjF2AL.d.ts +15 -0
  32. package/dist/index.d.ts +38 -3845
  33. package/dist/index.js +40 -3557
  34. package/dist/index.js.map +1 -1
  35. package/dist/llm-DNj_tp2T.d.ts +22 -0
  36. package/dist/llm-judge-DIG1f1Az.d.ts +67 -0
  37. package/dist/provider-BDjGp2y-.d.ts +10 -0
  38. package/dist/providers/index.d.ts +72 -0
  39. package/dist/providers/index.js +263 -0
  40. package/dist/providers/index.js.map +1 -0
  41. package/dist/rate-limiter-CSmVIRsM.d.ts +60 -0
  42. package/dist/schemas/extensions/index.d.ts +14 -0
  43. package/dist/schemas/extensions/index.js +13 -0
  44. package/dist/schemas/extensions/index.js.map +1 -0
  45. package/dist/schemas/index.d.ts +233 -0
  46. package/dist/schemas/index.js +27 -0
  47. package/dist/schemas/index.js.map +1 -0
  48. package/dist/schemas/llm/index.d.ts +98 -0
  49. package/dist/schemas/llm/index.js +37 -0
  50. package/dist/schemas/llm/index.js.map +1 -0
  51. package/dist/scorers/index.d.ts +63 -0
  52. package/dist/scorers/index.js +494 -0
  53. package/dist/scorers/index.js.map +1 -0
  54. package/dist/simple-system-prompt-CzPYuvo0.d.ts +49 -0
  55. package/dist/system-prompt--0FdPWqK.d.ts +58 -0
  56. package/dist/utilities-BrRH32rD.d.ts +30 -0
  57. package/package.json +39 -21
  58. package/LICENSE +0 -21
@@ -0,0 +1,22 @@
1
+ import { A as AbstractProvider, P as ProviderResponse } from './provider-BDjGp2y-.js';
2
+ import { ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject } from 'openai/resources/shared';
3
+ import { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
4
+
5
+ declare abstract class AbstractLLMProvider extends AbstractProvider {
6
+ abstract forward(args: LLMProviderForwardArgs): Promise<ChatResponse>;
7
+ }
8
+ type LLMProviderForwardArgs = {
9
+ messages: ChatCompletionMessageParam[];
10
+ model: string;
11
+ abortSignal?: AbortSignal;
12
+ temperature?: number;
13
+ responseFormat?: ResponseFormatText | ResponseFormatJSONSchema | ResponseFormatJSONObject;
14
+ };
15
+ type ChatResponse = ProviderResponse<string> & {
16
+ inputTokensUsed?: number;
17
+ outputTokensUsed?: number;
18
+ inputCost?: string;
19
+ outputCost?: string;
20
+ };
21
+
22
+ export { AbstractLLMProvider as A, type ChatResponse as C, type LLMProviderForwardArgs as L };
@@ -0,0 +1,67 @@
1
+ import { A as AbstractScorer, B as BaseScorerResult } from './abstract-Dec9Sc5O.js';
2
+ import { A as AbstractLLMProvider } from './llm-DNj_tp2T.js';
3
+ import { R as RateLimiter } from './rate-limiter-CSmVIRsM.js';
4
+
5
+ type MCQScorerParams = {
6
+ response: string;
7
+ choices: Record<string, string>;
8
+ correctAnswers: string[];
9
+ };
10
+ declare class MCQScorer extends AbstractScorer {
11
+ readonly kind = "mcq";
12
+ private regexScorer;
13
+ score(params: MCQScorerParams): Promise<BaseScorerResult & {
14
+ extractedAnswers: string[];
15
+ }>;
16
+ private buildPatternsForAnswer;
17
+ }
18
+
19
+ declare class LLMJudgeScorer extends AbstractScorer {
20
+ readonly kind = "llmJudge";
21
+ private provider;
22
+ constructor(provider: AbstractLLMProvider);
23
+ score(params: LLMJudgeScorerParams): Promise<LLMJudgeScorerResult | null>;
24
+ }
25
+ type LLMJudgeCriterion = {
26
+ id: string;
27
+ description: string;
28
+ weight?: number;
29
+ scale?: {
30
+ min: number;
31
+ max: number;
32
+ };
33
+ };
34
+ type LLMJudgeScorerResult = BaseScorerResult & {
35
+ provider: string;
36
+ inputTokensUsed?: number;
37
+ outputTokensUsed?: number;
38
+ inputCost?: string;
39
+ outputCost?: string;
40
+ verdict?: "pass" | "borderline" | "fail";
41
+ };
42
+ type LLMJudgeScorerParams = {
43
+ task: string;
44
+ candidateAnswer: string;
45
+ referenceAnswer?: string;
46
+ model: string;
47
+ /**
48
+ * The rubric used for judging (defaults to a generic set).
49
+ */
50
+ criteria?: LLMJudgeCriterion[];
51
+ /**
52
+ * Optional extra context that the judge can use (constraints, references, etc.).
53
+ */
54
+ meta?: Record<string, unknown>;
55
+ /**
56
+ * Optional rate limiter wrapper for provider calls.
57
+ */
58
+ rateLimiter?: RateLimiter;
59
+ /**
60
+ * Optional prompt tweaks.
61
+ */
62
+ systemPrompt?: string;
63
+ promptPrefix?: string;
64
+ promptSuffix?: string;
65
+ };
66
+
67
+ export { LLMJudgeScorer as L, type MCQScorerParams as M, MCQScorer as a, type LLMJudgeCriterion as b, type LLMJudgeScorerResult as c, type LLMJudgeScorerParams as d };
@@ -0,0 +1,10 @@
1
+ declare abstract class AbstractProvider {
2
+ abstract readonly kind: string;
3
+ }
4
+ type ProviderResponse<TData = unknown> = {
5
+ startedAt: number;
6
+ completedAt: number;
7
+ data: TData;
8
+ };
9
+
10
+ export { AbstractProvider as A, type ProviderResponse as P };
@@ -0,0 +1,72 @@
1
+ import { A as AbstractLLMProvider, L as LLMProviderForwardArgs, C as ChatResponse } from '../llm-DNj_tp2T.js';
2
+ export { A as AbstractProvider, P as ProviderResponse } from '../provider-BDjGp2y-.js';
3
+ import { R as RateLimiter } from '../rate-limiter-CSmVIRsM.js';
4
+ import { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
5
+ import { ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject } from 'openai/resources/shared';
6
+ import { ChatCompletionMessageParam as ChatCompletionMessageParam$1 } from 'openai/resources/index';
7
+
8
+ declare class MastraProvider extends AbstractLLMProvider {
9
+ readonly kind = "mastra";
10
+ private readonly endpoint;
11
+ private readonly authToken;
12
+ private readonly agentName;
13
+ private client?;
14
+ private clientInitPromise?;
15
+ private warnedAboutSystemMessages;
16
+ private warnedAboutResponseFormat;
17
+ constructor(params: {
18
+ endpoint: string;
19
+ authToken: string;
20
+ agentName: string;
21
+ });
22
+ private getClient;
23
+ forward(args: LLMProviderForwardArgs): Promise<ChatResponse>;
24
+ }
25
+
26
+ declare class OpenAIProvider extends AbstractLLMProvider {
27
+ readonly kind = "openai";
28
+ private client;
29
+ private rateLimiter;
30
+ private maxRetries;
31
+ constructor(config: {
32
+ apiKey: string;
33
+ baseURL: string;
34
+ maxRetries?: number;
35
+ timeout?: number;
36
+ rateLimiter?: RateLimiter;
37
+ });
38
+ forward(args: {
39
+ messages: ChatCompletionMessageParam[];
40
+ model: string;
41
+ abortSignal?: AbortSignal;
42
+ temperature?: number;
43
+ responseFormat?: ResponseFormatText | ResponseFormatJSONSchema | ResponseFormatJSONObject;
44
+ }): Promise<ChatResponse>;
45
+ }
46
+
47
+ declare class OpenRouterProvider extends OpenAIProvider {
48
+ private models;
49
+ private modelsCachePromise;
50
+ private modelsUpdatedAt;
51
+ constructor(config: {
52
+ apiKey: string;
53
+ maxRetries?: number;
54
+ timeout?: number;
55
+ rateLimiter?: RateLimiter;
56
+ });
57
+ forward(args: {
58
+ messages: ChatCompletionMessageParam$1[];
59
+ model: string;
60
+ abortSignal?: AbortSignal;
61
+ temperature?: number;
62
+ responseFormat?: ResponseFormatText | ResponseFormatJSONSchema | ResponseFormatJSONObject;
63
+ }): Promise<ChatResponse>;
64
+ /**
65
+ * Updates the cache that holds information about OpenRouter models
66
+ * including pricing information. It will be valid for 24 hours as
67
+ * long as the instance of this Provider object is alive.
68
+ */
69
+ private updateModelsCache;
70
+ }
71
+
72
+ export { AbstractLLMProvider, ChatResponse, LLMProviderForwardArgs, MastraProvider, OpenAIProvider, OpenRouterProvider };
@@ -0,0 +1,263 @@
1
+ import {
2
+ RateLimiter
3
+ } from "../chunk-DUBKY73H.js";
4
+ import "../chunk-PZ5AY32C.js";
5
+
6
+ // src/providers/abstract/provider.ts
7
+ var AbstractProvider = class {
8
+ };
9
+
10
+ // src/providers/abstract/llm.ts
11
+ var AbstractLLMProvider = class extends AbstractProvider {
12
+ };
13
+
14
+ // src/providers/mastra.ts
15
+ var MastraProvider = class extends AbstractLLMProvider {
16
+ kind = "mastra";
17
+ endpoint;
18
+ authToken;
19
+ agentName;
20
+ client;
21
+ clientInitPromise;
22
+ warnedAboutSystemMessages = false;
23
+ warnedAboutResponseFormat = false;
24
+ constructor(params) {
25
+ super();
26
+ this.endpoint = params.endpoint;
27
+ this.authToken = params.authToken;
28
+ this.agentName = params.agentName;
29
+ }
30
+ async getClient() {
31
+ if (this.client) return this.client;
32
+ if (this.clientInitPromise) return this.clientInitPromise;
33
+ this.clientInitPromise = import("@mastra/client-js").then((mod) => {
34
+ const MastraClient = mod?.MastraClient;
35
+ if (!MastraClient) {
36
+ throw new Error(`@mastra/client-js did not export MastraClient`);
37
+ }
38
+ this.client = new MastraClient({
39
+ baseUrl: this.endpoint,
40
+ headers: {
41
+ Authorization: `Bearer ${this.authToken}`
42
+ }
43
+ });
44
+ return this.client;
45
+ }).catch((err) => {
46
+ this.clientInitPromise = void 0;
47
+ throw new Error(
48
+ `MastraProvider requires optional peer dependency "@mastra/client-js" to be installed. Install it in your host application: npm i @mastra/client-js. Underlying error: ${err instanceof Error ? err.message : String(err)}`
49
+ );
50
+ });
51
+ return this.clientInitPromise;
52
+ }
53
+ async forward(args) {
54
+ const startedAt = Date.now();
55
+ if (!this.warnedAboutSystemMessages && args.messages.some((m) => m.role === "system")) {
56
+ this.warnedAboutSystemMessages = true;
57
+ console.warn(
58
+ `Mastra provider: system messages are ignored (agent "${this.agentName}" has baked-in prompts).`
59
+ );
60
+ }
61
+ if (!this.warnedAboutResponseFormat && args.responseFormat) {
62
+ this.warnedAboutResponseFormat = true;
63
+ console.warn(
64
+ `Mastra provider: responseFormat is ignored (configure structured output in the Mastra agent).`
65
+ );
66
+ }
67
+ const apiMessages = args.messages.filter((m) => m.role === "user" || m.role === "assistant").map((m) => ({
68
+ role: m.role,
69
+ content: String(m.content ?? "")
70
+ }));
71
+ const generateOptions = { messages: apiMessages };
72
+ const parts = String(args.model ?? "").split("/");
73
+ if (parts.length === 2) {
74
+ const [providerId, modelId] = parts;
75
+ generateOptions.runtimeContext = {
76
+ "provider-id": providerId,
77
+ "model-id": modelId
78
+ };
79
+ } else if (args.model && args.model !== this.agentName) {
80
+ generateOptions.runtimeContext = {
81
+ "model-id": args.model
82
+ };
83
+ }
84
+ const client = await this.getClient();
85
+ const agent = client.getAgent(this.agentName);
86
+ const requestOptions = args.abortSignal ? { signal: args.abortSignal } : void 0;
87
+ const response = await agent.generate(generateOptions, requestOptions);
88
+ const completedAt = Date.now();
89
+ const text = response?.text ?? "";
90
+ return {
91
+ data: String(text),
92
+ startedAt,
93
+ completedAt
94
+ };
95
+ }
96
+ };
97
+
98
+ // src/providers/openai.ts
99
+ import OpenAI, { APIError } from "openai";
100
+ var OpenAIProvider = class extends AbstractLLMProvider {
101
+ kind = "openai";
102
+ client;
103
+ rateLimiter;
104
+ maxRetries;
105
+ constructor(config) {
106
+ super();
107
+ this.maxRetries = config.maxRetries ?? 3;
108
+ this.rateLimiter = config.rateLimiter ?? new RateLimiter({
109
+ maxWeight: 20,
110
+ timeWindow: 3e3
111
+ });
112
+ this.client = new OpenAI({
113
+ baseURL: config.baseURL,
114
+ apiKey: config.apiKey,
115
+ timeout: config.timeout,
116
+ dangerouslyAllowBrowser: true
117
+ });
118
+ }
119
+ async forward(args) {
120
+ let retryCount = this.maxRetries;
121
+ while (retryCount > 0) {
122
+ let startedAt = /* @__PURE__ */ new Date();
123
+ try {
124
+ const response = await this.rateLimiter.execute(
125
+ async () => {
126
+ startedAt = /* @__PURE__ */ new Date();
127
+ return await this.client.chat.completions.create(
128
+ {
129
+ model: args.model,
130
+ messages: args.messages,
131
+ temperature: args.temperature,
132
+ response_format: args.responseFormat
133
+ },
134
+ // Signal for request
135
+ { signal: args.abortSignal }
136
+ );
137
+ },
138
+ // Signal for rate limiting
139
+ { signal: args.abortSignal }
140
+ );
141
+ if ("error" in response) {
142
+ const err = response.error;
143
+ throw new Error(
144
+ `${err.message} - Code ${err.code} - ${JSON.stringify(err)}`
145
+ );
146
+ }
147
+ if (!response?.choices?.[0]?.message?.content) {
148
+ throw new Error("No content returned from the model");
149
+ }
150
+ return {
151
+ data: response.choices[0].message.content,
152
+ inputTokensUsed: response?.usage?.prompt_tokens,
153
+ outputTokensUsed: response?.usage?.completion_tokens,
154
+ startedAt: startedAt.getTime(),
155
+ completedAt: Date.now()
156
+ };
157
+ } catch (err) {
158
+ if (err instanceof APIError && err.status === 401) {
159
+ throw new Error(`Invalid credentials provided`, { cause: err });
160
+ }
161
+ retryCount--;
162
+ if (err instanceof SyntaxError) {
163
+ console.debug(err);
164
+ continue;
165
+ }
166
+ if (retryCount !== 0) {
167
+ continue;
168
+ }
169
+ throw new Error(
170
+ `Failed to forward prompt to the model: ${err instanceof Error ? err.message : err}`,
171
+ { cause: err }
172
+ );
173
+ }
174
+ }
175
+ throw new Error(
176
+ `Failed to forward prompt to the model: Max retries reached`,
177
+ { cause: new Error("Max retries reached") }
178
+ );
179
+ }
180
+ };
181
+
182
+ // src/providers/openrouter.ts
183
+ import axios from "axios";
184
+ import Decimal from "decimal.js";
185
+ var baseURL = "https://openrouter.ai/api/v1";
186
+ var MODELS_CACHE_TTL = 1e3 * 60 * 60 * 24;
187
+ var OpenRouterProvider = class extends OpenAIProvider {
188
+ models = void 0;
189
+ modelsCachePromise = Promise.resolve(void 0);
190
+ modelsUpdatedAt = 0;
191
+ constructor(config) {
192
+ super({
193
+ baseURL,
194
+ apiKey: config.apiKey,
195
+ maxRetries: config.maxRetries,
196
+ timeout: config.timeout,
197
+ rateLimiter: config.rateLimiter
198
+ });
199
+ }
200
+ async forward(args) {
201
+ const [response] = await Promise.all([
202
+ super.forward(args),
203
+ this.updateModelsCache().catch(() => {
204
+ })
205
+ ]);
206
+ const modelInfo = this.models?.data.find((m) => m.id === args.model);
207
+ let inputCost = void 0;
208
+ let outputCost = void 0;
209
+ if (modelInfo !== void 0) {
210
+ if (response.inputTokensUsed !== void 0) {
211
+ inputCost = new Decimal(modelInfo.pricing.prompt).mul(response.inputTokensUsed).toFixed(10);
212
+ }
213
+ if (response.outputTokensUsed !== void 0) {
214
+ outputCost = new Decimal(modelInfo.pricing.completion).mul(response.outputTokensUsed).toFixed(10);
215
+ }
216
+ }
217
+ return {
218
+ ...response,
219
+ inputCost,
220
+ outputCost
221
+ };
222
+ }
223
+ /**
224
+ * Updates the cache that holds information about OpenRouter models
225
+ * including pricing information. It will be valid for 24 hours as
226
+ * long as the instance of this Provider object is alive.
227
+ */
228
+ async updateModelsCache() {
229
+ this.modelsCachePromise = this.modelsCachePromise.then(async () => {
230
+ if (
231
+ // The data presented in the cache
232
+ this.models !== void 0 && // The cache is still valid
233
+ Date.now() - this.modelsUpdatedAt < MODELS_CACHE_TTL
234
+ ) {
235
+ return this.models;
236
+ }
237
+ return axios.get(`${baseURL}/models`).then((res) => res.data).then((data) => {
238
+ data = {
239
+ data: data.data.filter(
240
+ (m) => m.architecture.input_modalities.includes("text") && m.architecture.output_modalities.includes("text") && // These models are "fast apply model" and don't support multi turn conversations so don't include them
241
+ ![
242
+ "morph/morph-v3-large",
243
+ "morph/morph-v3-fast",
244
+ "relace/relace-apply-3"
245
+ ].includes(m.id)
246
+ )
247
+ };
248
+ this.models = data;
249
+ this.modelsUpdatedAt = Date.now();
250
+ return data;
251
+ });
252
+ }).catch(() => void 0);
253
+ await this.modelsCachePromise;
254
+ }
255
+ };
256
+ export {
257
+ AbstractLLMProvider,
258
+ AbstractProvider,
259
+ MastraProvider,
260
+ OpenAIProvider,
261
+ OpenRouterProvider
262
+ };
263
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/providers/abstract/provider.ts","../../src/providers/abstract/llm.ts","../../src/providers/mastra.ts","../../src/providers/openai.ts","../../src/providers/openrouter.ts"],"sourcesContent":["export abstract class AbstractProvider {\n abstract readonly kind: string;\n}\n\nexport type ProviderResponse<TData = unknown> = {\n startedAt: number;\n completedAt: number;\n data: TData;\n};\n","import { AbstractProvider, ProviderResponse } from \"./provider\";\nimport {\n ResponseFormatJSONObject,\n ResponseFormatJSONSchema,\n ResponseFormatText,\n} from \"openai/resources/shared\";\nimport { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\n\nexport abstract class AbstractLLMProvider extends AbstractProvider {\n abstract forward(args: LLMProviderForwardArgs): Promise<ChatResponse>;\n}\n\nexport type LLMProviderForwardArgs = {\n messages: ChatCompletionMessageParam[];\n model: string;\n abortSignal?: AbortSignal;\n temperature?: number;\n responseFormat?:\n | ResponseFormatText\n | ResponseFormatJSONSchema\n | ResponseFormatJSONObject;\n};\n\nexport type ChatResponse = ProviderResponse<string> & {\n inputTokensUsed?: number;\n outputTokensUsed?: number;\n inputCost?: string;\n outputCost?: string;\n};\n","import {\n AbstractLLMProvider,\n type ChatResponse,\n type LLMProviderForwardArgs,\n} from \"./abstract/llm\";\n\nexport class MastraProvider extends AbstractLLMProvider {\n override readonly kind = \"mastra\";\n\n private readonly endpoint: string;\n private readonly authToken: string;\n private readonly agentName: string;\n private client?: any;\n private clientInitPromise?: Promise<any>;\n private warnedAboutSystemMessages = false;\n private warnedAboutResponseFormat = false;\n\n constructor(params: { endpoint: string; authToken: string; agentName: string }) {\n super();\n this.endpoint = params.endpoint;\n this.authToken = params.authToken;\n this.agentName = params.agentName;\n }\n\n private async getClient(): Promise<any> {\n if (this.client) return this.client;\n if (this.clientInitPromise) return this.clientInitPromise;\n\n this.clientInitPromise = import(\"@mastra/client-js\")\n .then((mod) => {\n const MastraClient = (mod as any)?.MastraClient;\n if (!MastraClient) {\n throw new Error(`@mastra/client-js did not export MastraClient`);\n }\n\n this.client = new MastraClient({\n baseUrl: this.endpoint,\n headers: {\n Authorization: `Bearer ${this.authToken}`,\n },\n });\n\n return this.client;\n })\n .catch((err) => {\n this.clientInitPromise = undefined;\n throw new Error(\n `MastraProvider requires optional peer dependency \"@mastra/client-js\" to be installed. ` +\n `Install it in your host application: npm i @mastra/client-js. ` +\n `Underlying error: ${err instanceof Error ? err.message : String(err)}`\n );\n });\n\n return this.clientInitPromise;\n }\n\n override async forward(args: LLMProviderForwardArgs): Promise<ChatResponse> {\n const startedAt = Date.now();\n\n if (!this.warnedAboutSystemMessages && args.messages.some((m) => m.role === \"system\")) {\n this.warnedAboutSystemMessages = true;\n console.warn(\n `Mastra provider: system messages are ignored (agent \"${this.agentName}\" has baked-in prompts).`\n );\n }\n\n if (!this.warnedAboutResponseFormat && args.responseFormat) {\n this.warnedAboutResponseFormat = true;\n console.warn(\n `Mastra provider: responseFormat is ignored (configure structured output in the Mastra agent).`\n );\n }\n\n const apiMessages = args.messages\n .filter((m) => m.role === \"user\" || m.role === \"assistant\")\n .map((m) => ({\n role: m.role as \"user\" | \"assistant\",\n content: String((m as any).content ?? \"\"),\n }));\n\n const generateOptions: any = { messages: apiMessages };\n\n // Allow runtime model override via runtimeContext.\n // Model format: \"provider/modelId\" (e.g., \"openai/gpt-4o\", \"anthropic/claude-3.5-sonnet\").\n const parts = String(args.model ?? \"\").split(\"/\");\n if (parts.length === 2) {\n const [providerId, modelId] = parts;\n generateOptions.runtimeContext = {\n \"provider-id\": providerId,\n \"model-id\": modelId,\n };\n } else if (args.model && args.model !== this.agentName) {\n generateOptions.runtimeContext = {\n \"model-id\": args.model,\n };\n }\n\n const client = await this.getClient();\n const agent = client.getAgent(this.agentName);\n const requestOptions = args.abortSignal ? ({ signal: args.abortSignal } as any) : undefined;\n const response = await agent.generate(generateOptions, requestOptions);\n\n const completedAt = Date.now();\n const text = (response as any)?.text ?? \"\";\n\n return {\n data: String(text),\n startedAt,\n completedAt,\n };\n }\n}\n","import { RateLimiter } from \"@/utils\";\nimport { ChatCompletionMessageParam } from \"openai/resources/chat/completions\";\nimport {\n ResponseFormatJSONObject,\n ResponseFormatJSONSchema,\n ResponseFormatText,\n} from \"openai/resources/shared\";\nimport OpenAI, { APIError } from \"openai\";\nimport { AbstractLLMProvider, ChatResponse } from \"./abstract/llm\";\n\nexport class OpenAIProvider extends AbstractLLMProvider {\n override readonly kind = \"openai\";\n\n private client: OpenAI;\n private rateLimiter: RateLimiter;\n private maxRetries: number;\n\n constructor(config: {\n apiKey: string;\n baseURL: string;\n maxRetries?: number;\n timeout?: number;\n rateLimiter?: RateLimiter;\n }) {\n super();\n this.maxRetries = config.maxRetries ?? 3;\n this.rateLimiter =\n config.rateLimiter ??\n new RateLimiter({\n maxWeight: 20,\n timeWindow: 3_000,\n });\n\n this.client = new OpenAI({\n baseURL: config.baseURL,\n apiKey: config.apiKey,\n timeout: config.timeout,\n dangerouslyAllowBrowser: true,\n });\n }\n\n async forward(args: {\n messages: ChatCompletionMessageParam[];\n model: string;\n abortSignal?: AbortSignal;\n temperature?: number;\n responseFormat?:\n | ResponseFormatText\n | ResponseFormatJSONSchema\n | ResponseFormatJSONObject;\n }): Promise<ChatResponse> {\n let retryCount = this.maxRetries;\n while (retryCount > 0) {\n let startedAt: Date = new Date();\n\n try {\n const response = await this.rateLimiter.execute(\n async () => {\n // Capture the start time of the request\n startedAt = new Date();\n return await this.client.chat.completions.create(\n {\n model: args.model,\n messages: args.messages,\n temperature: args.temperature,\n response_format: args.responseFormat,\n },\n // Signal for request\n { signal: args.abortSignal }\n );\n },\n // Signal for rate limiting\n { signal: args.abortSignal }\n );\n\n if (\"error\" in response) {\n const err = response.error as any;\n throw new Error(\n `${err.message} - Code ${err.code} - ${JSON.stringify(err)}`\n );\n }\n\n if (!response?.choices?.[0]?.message?.content) {\n throw new Error(\"No content returned from the model\");\n }\n\n return {\n data: response.choices[0].message.content,\n\n inputTokensUsed: response?.usage?.prompt_tokens,\n outputTokensUsed: response?.usage?.completion_tokens,\n\n startedAt: startedAt.getTime(),\n completedAt: Date.now(),\n };\n } catch (err) {\n if (err instanceof APIError && err.status === 401) {\n throw new Error(`Invalid credentials provided`, { cause: err });\n }\n\n retryCount--;\n\n // More likely an empty HTTP response returned by the Provider\n // and it couldn't be parsed as JSON by the OpenAI SDK. We need to retry the request\n // More info can be found in the following links:\n // https://www.reddit.com/r/SillyTavernAI/comments/1ik95vr/deepseek_r1_on_openrouter_returning_blank_messages/\n // https://github.com/cline/cline/issues/60\n if (err instanceof SyntaxError) {\n console.debug(err);\n continue;\n }\n\n // If it was another error, just continue until we run out of retries\n if (retryCount !== 0) {\n continue;\n }\n\n throw new Error(\n `Failed to forward prompt to the model: ${err instanceof Error ? err.message : err}`,\n { cause: err }\n );\n }\n }\n\n throw new Error(\n `Failed to forward prompt to the model: Max retries reached`,\n { cause: new Error(\"Max retries reached\") }\n );\n }\n}\n","import { ChatResponse } from \"./abstract/llm\";\nimport { RateLimiter } from \"@/utils\";\nimport { OpenAIProvider } from \"./openai\";\nimport { ChatCompletionMessageParam } from \"openai/resources/index\";\nimport {\n ResponseFormatText,\n ResponseFormatJSONSchema,\n ResponseFormatJSONObject,\n} from \"openai/resources/shared\";\nimport axios from \"axios\";\nimport Decimal from \"decimal.js\";\n\nconst baseURL = \"https://openrouter.ai/api/v1\";\nconst MODELS_CACHE_TTL = 1000 * 60 * 60 * 24; // 24 hours\n\nexport class OpenRouterProvider extends OpenAIProvider {\n private models: ModelsResponse | undefined = undefined;\n private modelsCachePromise: Promise<ModelsResponse | undefined> =\n Promise.resolve(undefined);\n private modelsUpdatedAt = 0;\n\n constructor(config: {\n apiKey: string;\n maxRetries?: number;\n timeout?: number;\n rateLimiter?: RateLimiter;\n }) {\n super({\n baseURL,\n apiKey: config.apiKey,\n maxRetries: config.maxRetries,\n timeout: config.timeout,\n rateLimiter: config.rateLimiter,\n });\n }\n\n override async forward(args: {\n messages: ChatCompletionMessageParam[];\n model: string;\n abortSignal?: AbortSignal;\n temperature?: number;\n responseFormat?:\n | ResponseFormatText\n | ResponseFormatJSONSchema\n | ResponseFormatJSONObject;\n }): Promise<ChatResponse> {\n // Update models cache concurrently (non-blocking)\n const [response] = await Promise.all([\n super.forward(args),\n this.updateModelsCache().catch(() => {\n // Silently fail if cache update fails\n }),\n ]);\n\n // Get the model info from the cache\n const modelInfo = this.models?.data.find((m) => m.id === args.model);\n let inputCost: string | undefined = undefined;\n let outputCost: string | undefined = undefined;\n\n if (modelInfo !== undefined) {\n // Use Decimal.js for more accurate calculation\n if (response.inputTokensUsed !== undefined) {\n inputCost = new Decimal(modelInfo.pricing.prompt)\n .mul(response.inputTokensUsed)\n .toFixed(10);\n }\n if (response.outputTokensUsed !== undefined) {\n outputCost = new Decimal(modelInfo.pricing.completion)\n .mul(response.outputTokensUsed)\n .toFixed(10);\n }\n }\n\n return {\n ...response,\n inputCost,\n outputCost,\n };\n }\n\n /**\n * Updates the cache that holds information about OpenRouter models\n * including pricing information. It will be valid for 24 hours as\n * long as the instance of this Provider object is alive.\n */\n private async updateModelsCache() {\n // Chain each update method call to the promise.\n // This approach prevents race conditions between multiple calls.\n // Since each call is chained to the end of the previous one,\n // each promise makes a request only if the models cache is not updated\n // in the last call. Otherwise it simply resolves to the cached value.\n this.modelsCachePromise = this.modelsCachePromise\n .then(async () => {\n if (\n // The data presented in the cache\n this.models !== undefined &&\n // The cache is still valid\n Date.now() - this.modelsUpdatedAt < MODELS_CACHE_TTL\n ) {\n return this.models;\n }\n\n // If the cache is not valid, update it\n return axios\n .get<ModelsResponse>(`${baseURL}/models`)\n .then((res) => res.data)\n .then((data) => {\n // Only get the models that supports text input and output\n data = {\n data: data.data.filter(\n (m) =>\n m.architecture.input_modalities.includes(\"text\") &&\n m.architecture.output_modalities.includes(\"text\") &&\n // These models are \"fast apply model\" and don't support multi turn conversations so don't include them\n ![\n \"morph/morph-v3-large\",\n \"morph/morph-v3-fast\",\n \"relace/relace-apply-3\",\n ].includes(m.id)\n ),\n };\n\n this.models = data;\n this.modelsUpdatedAt = Date.now();\n\n return data;\n });\n })\n .catch(() => undefined);\n\n // Wait for the promise chain to resolve\n await this.modelsCachePromise;\n }\n}\n\ntype PutModality = \"text\" | \"image\" | \"file\" | \"audio\";\ntype Modality = \"text->text\" | \"text+image->text\" | \"text+image->text+image\";\ntype ModelsResponse = {\n data: {\n readonly id: string;\n readonly canonical_slug: string;\n readonly hugging_face_id: null | string;\n readonly name: string;\n readonly created: number;\n readonly description: string;\n readonly context_length: number;\n readonly architecture: {\n readonly modality: Modality;\n readonly input_modalities: PutModality[];\n readonly output_modalities: PutModality[];\n readonly instruct_type: null | string;\n };\n readonly pricing: {\n readonly prompt: string;\n readonly completion: string;\n readonly request?: string;\n readonly image?: string;\n readonly web_search?: string;\n readonly internal_reasoning?: string;\n readonly input_cache_read?: string;\n readonly input_cache_write?: string;\n readonly audio?: string;\n };\n }[];\n};\n"],"mappings":";;;;;;AAAO,IAAe,mBAAf,MAAgC;AAEvC;;;ACMO,IAAe,sBAAf,cAA2C,iBAAiB;AAEnE;;;ACJO,IAAM,iBAAN,cAA6B,oBAAoB;AAAA,EACpC,OAAO;AAAA,EAER;AAAA,EACA;AAAA,EACA;AAAA,EACT;AAAA,EACA;AAAA,EACA,4BAA4B;AAAA,EAC5B,4BAA4B;AAAA,EAEpC,YAAY,QAAoE;AAC9E,UAAM;AACN,SAAK,WAAW,OAAO;AACvB,SAAK,YAAY,OAAO;AACxB,SAAK,YAAY,OAAO;AAAA,EAC1B;AAAA,EAEA,MAAc,YAA0B;AACtC,QAAI,KAAK,OAAQ,QAAO,KAAK;AAC7B,QAAI,KAAK,kBAAmB,QAAO,KAAK;AAExC,SAAK,oBAAoB,OAAO,mBAAmB,EAChD,KAAK,CAAC,QAAQ;AACb,YAAM,eAAgB,KAAa;AACnC,UAAI,CAAC,cAAc;AACjB,cAAM,IAAI,MAAM,+CAA+C;AAAA,MACjE;AAEA,WAAK,SAAS,IAAI,aAAa;AAAA,QAC7B,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,UACP,eAAe,UAAU,KAAK,SAAS;AAAA,QACzC;AAAA,MACF,CAAC;AAED,aAAO,KAAK;AAAA,IACd,CAAC,EACA,MAAM,CAAC,QAAQ;AACd,WAAK,oBAAoB;AACzB,YAAM,IAAI;AAAA,QACR,yKAEuB,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG,CAAC;AAAA,MACzE;AAAA,IACF,CAAC;AAEH,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,MAAe,QAAQ,MAAqD;AAC1E,UAAM,YAAY,KAAK,IAAI;AAE3B,QAAI,CAAC,KAAK,6BAA6B,KAAK,SAAS,KAAK,CAAC,MAAM,EAAE,SAAS,QAAQ,GAAG;AACrF,WAAK,4BAA4B;AACjC,cAAQ;AAAA,QACN,wDAAwD,KAAK,SAAS;AAAA,MACxE;AAAA,IACF;AAEA,QAAI,CAAC,KAAK,6BAA6B,KAAK,gBAAgB;AAC1D,WAAK,4BAA4B;AACjC,cAAQ;AAAA,QACN;AAAA,MACF;AAAA,IACF;AAEA,UAAM,cAAc,KAAK,SACtB,OAAO,CAAC,MAAM,EAAE,SAAS,UAAU,EAAE,SAAS,WAAW,EACzD,IAAI,CAAC,OAAO;AAAA,MACX,MAAM,EAAE;AAAA,MACR,SAAS,OAAQ,EAAU,WAAW,EAAE;AAAA,IAC1C,EAAE;AAEJ,UAAM,kBAAuB,EAAE,UAAU,YAAY;AAIrD,UAAM,QAAQ,OAAO,KAAK,SAAS,EAAE,EAAE,MAAM,GAAG;AAChD,QAAI,MAAM,WAAW,GAAG;AACtB,YAAM,CAAC,YAAY,OAAO,IAAI;AAC9B,sBAAgB,iBAAiB;AAAA,QAC/B,eAAe;AAAA,QACf,YAAY;AAAA,MACd;AAAA,IACF,WAAW,KAAK,SAAS,KAAK,UAAU,KAAK,WAAW;AACtD,sBAAgB,iBAAiB;AAAA,QAC/B,YAAY,KAAK;AAAA,MACnB;AAAA,IACF;AAEA,UAAM,SAAS,MAAM,KAAK,UAAU;AACpC,UAAM,QAAQ,OAAO,SAAS,KAAK,SAAS;AAC5C,UAAM,iBAAiB,KAAK,cAAe,EAAE,QAAQ,KAAK,YAAY,IAAY;AAClF,UAAM,WAAW,MAAM,MAAM,SAAS,iBAAiB,cAAc;AAErE,UAAM,cAAc,KAAK,IAAI;AAC7B,UAAM,OAAQ,UAAkB,QAAQ;AAExC,WAAO;AAAA,MACL,MAAM,OAAO,IAAI;AAAA,MACjB;AAAA,MACA;AAAA,IACF;AAAA,EACF;AACF;;;ACxGA,OAAO,UAAU,gBAAgB;AAG1B,IAAM,iBAAN,cAA6B,oBAAoB;AAAA,EACpC,OAAO;AAAA,EAEjB;AAAA,EACA;AAAA,EACA;AAAA,EAER,YAAY,QAMT;AACD,UAAM;AACN,SAAK,aAAa,OAAO,cAAc;AACvC,SAAK,cACH,OAAO,eACP,IAAI,YAAY;AAAA,MACd,WAAW;AAAA,MACX,YAAY;AAAA,IACd,CAAC;AAEH,SAAK,SAAS,IAAI,OAAO;AAAA,MACvB,SAAS,OAAO;AAAA,MAChB,QAAQ,OAAO;AAAA,MACf,SAAS,OAAO;AAAA,MAChB,yBAAyB;AAAA,IAC3B,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,QAAQ,MASY;AACxB,QAAI,aAAa,KAAK;AACtB,WAAO,aAAa,GAAG;AACrB,UAAI,YAAkB,oBAAI,KAAK;AAE/B,UAAI;AACF,cAAM,WAAW,MAAM,KAAK,YAAY;AAAA,UACtC,YAAY;AAEV,wBAAY,oBAAI,KAAK;AACrB,mBAAO,MAAM,KAAK,OAAO,KAAK,YAAY;AAAA,cACxC;AAAA,gBACE,OAAO,KAAK;AAAA,gBACZ,UAAU,KAAK;AAAA,gBACf,aAAa,KAAK;AAAA,gBAClB,iBAAiB,KAAK;AAAA,cACxB;AAAA;AAAA,cAEA,EAAE,QAAQ,KAAK,YAAY;AAAA,YAC7B;AAAA,UACF;AAAA;AAAA,UAEA,EAAE,QAAQ,KAAK,YAAY;AAAA,QAC7B;AAEA,YAAI,WAAW,UAAU;AACvB,gBAAM,MAAM,SAAS;AACrB,gBAAM,IAAI;AAAA,YACR,GAAG,IAAI,OAAO,WAAW,IAAI,IAAI,MAAM,KAAK,UAAU,GAAG,CAAC;AAAA,UAC5D;AAAA,QACF;AAEA,YAAI,CAAC,UAAU,UAAU,CAAC,GAAG,SAAS,SAAS;AAC7C,gBAAM,IAAI,MAAM,oCAAoC;AAAA,QACtD;AAEA,eAAO;AAAA,UACL,MAAM,SAAS,QAAQ,CAAC,EAAE,QAAQ;AAAA,UAElC,iBAAiB,UAAU,OAAO;AAAA,UAClC,kBAAkB,UAAU,OAAO;AAAA,UAEnC,WAAW,UAAU,QAAQ;AAAA,UAC7B,aAAa,KAAK,IAAI;AAAA,QACxB;AAAA,MACF,SAAS,KAAK;AACZ,YAAI,eAAe,YAAY,IAAI,WAAW,KAAK;AACjD,gBAAM,IAAI,MAAM,gCAAgC,EAAE,OAAO,IAAI,CAAC;AAAA,QAChE;AAEA;AAOA,YAAI,eAAe,aAAa;AAC9B,kBAAQ,MAAM,GAAG;AACjB;AAAA,QACF;AAGA,YAAI,eAAe,GAAG;AACpB;AAAA,QACF;AAEA,cAAM,IAAI;AAAA,UACR,0CAA0C,eAAe,QAAQ,IAAI,UAAU,GAAG;AAAA,UAClF,EAAE,OAAO,IAAI;AAAA,QACf;AAAA,MACF;AAAA,IACF;AAEA,UAAM,IAAI;AAAA,MACR;AAAA,MACA,EAAE,OAAO,IAAI,MAAM,qBAAqB,EAAE;AAAA,IAC5C;AAAA,EACF;AACF;;;ACxHA,OAAO,WAAW;AAClB,OAAO,aAAa;AAEpB,IAAM,UAAU;AAChB,IAAM,mBAAmB,MAAO,KAAK,KAAK;AAEnC,IAAM,qBAAN,cAAiC,eAAe;AAAA,EAC7C,SAAqC;AAAA,EACrC,qBACN,QAAQ,QAAQ,MAAS;AAAA,EACnB,kBAAkB;AAAA,EAE1B,YAAY,QAKT;AACD,UAAM;AAAA,MACJ;AAAA,MACA,QAAQ,OAAO;AAAA,MACf,YAAY,OAAO;AAAA,MACnB,SAAS,OAAO;AAAA,MAChB,aAAa,OAAO;AAAA,IACtB,CAAC;AAAA,EACH;AAAA,EAEA,MAAe,QAAQ,MASG;AAExB,UAAM,CAAC,QAAQ,IAAI,MAAM,QAAQ,IAAI;AAAA,MACnC,MAAM,QAAQ,IAAI;AAAA,MAClB,KAAK,kBAAkB,EAAE,MAAM,MAAM;AAAA,MAErC,CAAC;AAAA,IACH,CAAC;AAGD,UAAM,YAAY,KAAK,QAAQ,KAAK,KAAK,CAAC,MAAM,EAAE,OAAO,KAAK,KAAK;AACnE,QAAI,YAAgC;AACpC,QAAI,aAAiC;AAErC,QAAI,cAAc,QAAW;AAE3B,UAAI,SAAS,oBAAoB,QAAW;AAC1C,oBAAY,IAAI,QAAQ,UAAU,QAAQ,MAAM,EAC7C,IAAI,SAAS,eAAe,EAC5B,QAAQ,EAAE;AAAA,MACf;AACA,UAAI,SAAS,qBAAqB,QAAW;AAC3C,qBAAa,IAAI,QAAQ,UAAU,QAAQ,UAAU,EAClD,IAAI,SAAS,gBAAgB,EAC7B,QAAQ,EAAE;AAAA,MACf;AAAA,IACF;AAEA,WAAO;AAAA,MACL,GAAG;AAAA,MACH;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAc,oBAAoB;AAMhC,SAAK,qBAAqB,KAAK,mBAC5B,KAAK,YAAY;AAChB;AAAA;AAAA,QAEE,KAAK,WAAW;AAAA,QAEhB,KAAK,IAAI,IAAI,KAAK,kBAAkB;AAAA,QACpC;AACA,eAAO,KAAK;AAAA,MACd;AAGA,aAAO,MACJ,IAAoB,GAAG,OAAO,SAAS,EACvC,KAAK,CAAC,QAAQ,IAAI,IAAI,EACtB,KAAK,CAAC,SAAS;AAEd,eAAO;AAAA,UACL,MAAM,KAAK,KAAK;AAAA,YACd,CAAC,MACC,EAAE,aAAa,iBAAiB,SAAS,MAAM,KAC/C,EAAE,aAAa,kBAAkB,SAAS,MAAM;AAAA,YAEhD,CAAC;AAAA,cACC;AAAA,cACA;AAAA,cACA;AAAA,YACF,EAAE,SAAS,EAAE,EAAE;AAAA,UACnB;AAAA,QACF;AAEA,aAAK,SAAS;AACd,aAAK,kBAAkB,KAAK,IAAI;AAEhC,eAAO;AAAA,MACT,CAAC;AAAA,IACL,CAAC,EACA,MAAM,MAAM,MAAS;AAGxB,UAAM,KAAK;AAAA,EACb;AACF;","names":[]}
@@ -0,0 +1,60 @@
1
+ interface RateLimiterOptions {
2
+ /**
3
+ * Maximum weight of calls allowed within a time window. Each call has a weight of 1 by default.
4
+ * Rate limiting is disabled when this is 0 or negative.
5
+ * @default 25
6
+ */
7
+ maxWeight?: number;
8
+ /**
9
+ * Time window in milliseconds for rate limiting
10
+ * @default 1000
11
+ */
12
+ timeWindow?: number;
13
+ }
14
+ interface RateLimiterCallOptions {
15
+ /**
16
+ * Weight of the call. Rate limiting is applied based on the total weight of the calls.
17
+ * @default 1
18
+ */
19
+ weight?: number;
20
+ /**
21
+ * Abort signal for cancellation
22
+ */
23
+ signal?: AbortSignal;
24
+ }
25
+ /**
26
+ * Generic rate limiter. It can be used to limit async function calls
27
+ * by a given number of calls within a time window.
28
+ */
29
+ declare class RateLimiter {
30
+ maxWeight: number;
31
+ timeWindow: number;
32
+ private timestamps;
33
+ constructor(options?: RateLimiterOptions);
34
+ /**
35
+ * Checks if rate limiting is disabled
36
+ */
37
+ isDisabled(): boolean;
38
+ /**
39
+ * Disables rate limiting. Set `maxWeight` to re-enable it.
40
+ */
41
+ disable(): void;
42
+ /**
43
+ * Returns how many weight of calls are there in the current time window
44
+ */
45
+ getCurrentCalls(): number;
46
+ /**
47
+ * Executes the given function with rate limiting applied
48
+ */
49
+ execute<T = unknown>(func: () => Promise<T>, { weight, signal }?: RateLimiterCallOptions): Promise<T>;
50
+ /**
51
+ * Waits until rate limit allows the specified weight of calls
52
+ */
53
+ private waitForRateLimit;
54
+ /**
55
+ * Resets the rate limited weight of calls
56
+ */
57
+ reset(): void;
58
+ }
59
+
60
+ export { RateLimiter as R, type RateLimiterOptions as a, type RateLimiterCallOptions as b };
@@ -0,0 +1,14 @@
1
+ import * as z from 'zod';
2
+
3
+ declare const ScoreExtensions: {
4
+ ExtensionAIScorerFieldsV1: {
5
+ scorerAIProvider: z.ZodOptional<z.ZodString>;
6
+ scorerAIModelSlug: z.ZodOptional<z.ZodString>;
7
+ scorerAIInputTokensUsed: z.ZodOptional<z.ZodNumber>;
8
+ scorerAIOutputTokensUsed: z.ZodOptional<z.ZodNumber>;
9
+ scorerAIInputCost: z.ZodOptional<z.ZodString>;
10
+ scorerAIOutputCost: z.ZodOptional<z.ZodString>;
11
+ };
12
+ };
13
+
14
+ export { ScoreExtensions };
@@ -0,0 +1,13 @@
1
+ import {
2
+ ExtensionLLMAsAJudgeScorerFieldsV1
3
+ } from "../../chunk-GVF4YZF3.js";
4
+ import "../../chunk-PZ5AY32C.js";
5
+
6
+ // src/schemas/extensions/index.ts
7
+ var ScoreExtensions = {
8
+ ExtensionAIScorerFieldsV1: ExtensionLLMAsAJudgeScorerFieldsV1
9
+ };
10
+ export {
11
+ ScoreExtensions
12
+ };
13
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../src/schemas/extensions/index.ts"],"sourcesContent":["import { ExtensionLLMAsAJudgeScorerFieldsV1 } from \"./score/llm-as-a-judge-scorer\";\n\nexport const ScoreExtensions = {\n ExtensionAIScorerFieldsV1: ExtensionLLMAsAJudgeScorerFieldsV1,\n};\n"],"mappings":";;;;;;AAEO,IAAM,kBAAkB;AAAA,EAC7B,2BAA2B;AAC7B;","names":[]}