@providerprotocol/ai 0.0.10 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/dist/index.d.ts +7 -1
  2. package/dist/index.js +37 -9
  3. package/dist/index.js.map +1 -1
  4. package/package.json +1 -10
  5. package/src/anthropic/index.ts +0 -3
  6. package/src/core/image.ts +0 -188
  7. package/src/core/llm.ts +0 -624
  8. package/src/core/provider.ts +0 -92
  9. package/src/google/index.ts +0 -3
  10. package/src/http/errors.ts +0 -112
  11. package/src/http/fetch.ts +0 -210
  12. package/src/http/index.ts +0 -31
  13. package/src/http/keys.ts +0 -136
  14. package/src/http/retry.ts +0 -205
  15. package/src/http/sse.ts +0 -136
  16. package/src/index.ts +0 -32
  17. package/src/ollama/index.ts +0 -3
  18. package/src/openai/index.ts +0 -39
  19. package/src/openrouter/index.ts +0 -11
  20. package/src/providers/anthropic/index.ts +0 -17
  21. package/src/providers/anthropic/llm.ts +0 -196
  22. package/src/providers/anthropic/transform.ts +0 -434
  23. package/src/providers/anthropic/types.ts +0 -213
  24. package/src/providers/google/index.ts +0 -17
  25. package/src/providers/google/llm.ts +0 -203
  26. package/src/providers/google/transform.ts +0 -447
  27. package/src/providers/google/types.ts +0 -214
  28. package/src/providers/ollama/index.ts +0 -43
  29. package/src/providers/ollama/llm.ts +0 -272
  30. package/src/providers/ollama/transform.ts +0 -434
  31. package/src/providers/ollama/types.ts +0 -260
  32. package/src/providers/openai/index.ts +0 -186
  33. package/src/providers/openai/llm.completions.ts +0 -201
  34. package/src/providers/openai/llm.responses.ts +0 -211
  35. package/src/providers/openai/transform.completions.ts +0 -561
  36. package/src/providers/openai/transform.responses.ts +0 -708
  37. package/src/providers/openai/types.ts +0 -1249
  38. package/src/providers/openrouter/index.ts +0 -177
  39. package/src/providers/openrouter/llm.completions.ts +0 -201
  40. package/src/providers/openrouter/llm.responses.ts +0 -211
  41. package/src/providers/openrouter/transform.completions.ts +0 -538
  42. package/src/providers/openrouter/transform.responses.ts +0 -742
  43. package/src/providers/openrouter/types.ts +0 -717
  44. package/src/providers/xai/index.ts +0 -223
  45. package/src/providers/xai/llm.completions.ts +0 -201
  46. package/src/providers/xai/llm.messages.ts +0 -195
  47. package/src/providers/xai/llm.responses.ts +0 -211
  48. package/src/providers/xai/transform.completions.ts +0 -565
  49. package/src/providers/xai/transform.messages.ts +0 -448
  50. package/src/providers/xai/transform.responses.ts +0 -678
  51. package/src/providers/xai/types.ts +0 -938
  52. package/src/types/content.ts +0 -133
  53. package/src/types/errors.ts +0 -85
  54. package/src/types/index.ts +0 -105
  55. package/src/types/llm.ts +0 -211
  56. package/src/types/messages.ts +0 -205
  57. package/src/types/provider.ts +0 -195
  58. package/src/types/schema.ts +0 -58
  59. package/src/types/stream.ts +0 -146
  60. package/src/types/thread.ts +0 -226
  61. package/src/types/tool.ts +0 -88
  62. package/src/types/turn.ts +0 -118
  63. package/src/utils/id.ts +0 -28
  64. package/src/xai/index.ts +0 -41
package/src/core/llm.ts DELETED
@@ -1,624 +0,0 @@
1
- import type {
2
- LLMOptions,
3
- LLMInstance,
4
- LLMRequest,
5
- LLMResponse,
6
- InferenceInput,
7
- BoundLLMModel,
8
- LLMCapabilities,
9
- } from '../types/llm.ts';
10
- import type { UserMessage, AssistantMessage } from '../types/messages.ts';
11
- import type { ContentBlock, TextBlock } from '../types/content.ts';
12
- import type { Tool, ToolExecution, ToolResult } from '../types/tool.ts';
13
- import type { Turn, TokenUsage } from '../types/turn.ts';
14
- import type { StreamResult, StreamEvent } from '../types/stream.ts';
15
- import type { Thread } from '../types/thread.ts';
16
- import type { ProviderConfig } from '../types/provider.ts';
17
- import { UPPError } from '../types/errors.ts';
18
- import {
19
- Message,
20
- UserMessage as UserMessageClass,
21
- ToolResultMessage,
22
- isUserMessage,
23
- isAssistantMessage,
24
- } from '../types/messages.ts';
25
- import { createTurn, aggregateUsage, emptyUsage } from '../types/turn.ts';
26
- import { createStreamResult } from '../types/stream.ts';
27
- import { generateShortId } from '../utils/id.ts';
28
-
29
- /**
30
- * Default maximum iterations for tool execution
31
- */
32
- const DEFAULT_MAX_ITERATIONS = 10;
33
-
34
- /**
35
- * Create an LLM instance
36
- */
37
- export function llm<TParams = unknown>(
38
- options: LLMOptions<TParams>
39
- ): LLMInstance<TParams> {
40
- const { model: modelRef, config = {}, params, system, tools, toolStrategy, structure } = options;
41
-
42
- // Validate that the provider supports LLM
43
- const provider = modelRef.provider;
44
- if (!provider.modalities.llm) {
45
- throw new UPPError(
46
- `Provider '${provider.name}' does not support LLM modality`,
47
- 'INVALID_REQUEST',
48
- provider.name,
49
- 'llm'
50
- );
51
- }
52
-
53
- // Bind the model
54
- const boundModel = provider.modalities.llm.bind(modelRef.modelId) as BoundLLMModel<TParams>;
55
-
56
- // Validate capabilities at bind time
57
- const capabilities = boundModel.capabilities;
58
-
59
- // Check for structured output capability
60
- if (structure && !capabilities.structuredOutput) {
61
- throw new UPPError(
62
- `Provider '${provider.name}' does not support structured output`,
63
- 'INVALID_REQUEST',
64
- provider.name,
65
- 'llm'
66
- );
67
- }
68
-
69
- // Check for tools capability
70
- if (tools && tools.length > 0 && !capabilities.tools) {
71
- throw new UPPError(
72
- `Provider '${provider.name}' does not support tools`,
73
- 'INVALID_REQUEST',
74
- provider.name,
75
- 'llm'
76
- );
77
- }
78
-
79
- // Build the instance
80
- const instance: LLMInstance<TParams> = {
81
- model: boundModel,
82
- system,
83
- params,
84
- capabilities,
85
-
86
- async generate(
87
- historyOrInput: Message[] | Thread | InferenceInput,
88
- ...inputs: InferenceInput[]
89
- ): Promise<Turn> {
90
- const { history, messages } = parseInputs(historyOrInput, inputs);
91
- return executeGenerate(
92
- boundModel,
93
- config,
94
- system,
95
- params,
96
- tools,
97
- toolStrategy,
98
- structure,
99
- history,
100
- messages
101
- );
102
- },
103
-
104
- stream(
105
- historyOrInput: Message[] | Thread | InferenceInput,
106
- ...inputs: InferenceInput[]
107
- ): StreamResult {
108
- // Check streaming capability
109
- if (!capabilities.streaming) {
110
- throw new UPPError(
111
- `Provider '${provider.name}' does not support streaming`,
112
- 'INVALID_REQUEST',
113
- provider.name,
114
- 'llm'
115
- );
116
- }
117
- const { history, messages } = parseInputs(historyOrInput, inputs);
118
- return executeStream(
119
- boundModel,
120
- config,
121
- system,
122
- params,
123
- tools,
124
- toolStrategy,
125
- structure,
126
- history,
127
- messages
128
- );
129
- },
130
- };
131
-
132
- return instance;
133
- }
134
-
135
- /**
136
- * Type guard to check if a value is a Message instance.
137
- * Uses instanceof for class instances, with fallback to timestamp check
138
- * for deserialized/reconstructed Message objects.
139
- */
140
- function isMessageInstance(value: unknown): value is Message {
141
- if (value instanceof Message) {
142
- return true;
143
- }
144
- // Fallback for deserialized Messages that aren't class instances:
145
- // Messages have 'timestamp' (Date), ContentBlocks don't
146
- if (
147
- typeof value === 'object' &&
148
- value !== null &&
149
- 'timestamp' in value &&
150
- 'type' in value &&
151
- 'id' in value
152
- ) {
153
- const obj = value as Record<string, unknown>;
154
- // Message types are 'user', 'assistant', 'tool_result'
155
- // ContentBlock types are 'text', 'image', 'audio', 'video', 'binary'
156
- const messageTypes = ['user', 'assistant', 'tool_result'];
157
- return messageTypes.includes(obj.type as string);
158
- }
159
- return false;
160
- }
161
-
162
- /**
163
- * Parse inputs to determine history and new messages
164
- */
165
- function parseInputs(
166
- historyOrInput: Message[] | Thread | InferenceInput,
167
- inputs: InferenceInput[]
168
- ): { history: Message[]; messages: Message[] } {
169
- // Check if it's a Thread first (has 'messages' array property)
170
- if (
171
- typeof historyOrInput === 'object' &&
172
- historyOrInput !== null &&
173
- 'messages' in historyOrInput &&
174
- Array.isArray((historyOrInput as Thread).messages)
175
- ) {
176
- const thread = historyOrInput as Thread;
177
- const newMessages = inputs.map(inputToMessage);
178
- return { history: [...thread.messages], messages: newMessages };
179
- }
180
-
181
- // Check if first arg is Message[] (history)
182
- if (Array.isArray(historyOrInput)) {
183
- // Empty array is empty history
184
- if (historyOrInput.length === 0) {
185
- const newMessages = inputs.map(inputToMessage);
186
- return { history: [], messages: newMessages };
187
- }
188
- const first = historyOrInput[0];
189
- if (isMessageInstance(first)) {
190
- // It's history (Message[])
191
- const newMessages = inputs.map(inputToMessage);
192
- return { history: historyOrInput as Message[], messages: newMessages };
193
- }
194
- }
195
-
196
- // It's input (no history) - could be string, single Message, or ContentBlock
197
- const allInputs = [historyOrInput as InferenceInput, ...inputs];
198
- const newMessages = allInputs.map(inputToMessage);
199
- return { history: [], messages: newMessages };
200
- }
201
-
202
- /**
203
- * Convert an InferenceInput to a Message
204
- */
205
- function inputToMessage(input: InferenceInput): Message {
206
- if (typeof input === 'string') {
207
- return new UserMessageClass(input);
208
- }
209
-
210
- // It's already a Message
211
- if ('type' in input && 'id' in input && 'timestamp' in input) {
212
- return input as Message;
213
- }
214
-
215
- // It's a ContentBlock - wrap in UserMessage
216
- const block = input as ContentBlock;
217
- if (block.type === 'text') {
218
- return new UserMessageClass((block as TextBlock).text);
219
- }
220
-
221
- return new UserMessageClass([block as any]);
222
- }
223
-
224
- /**
225
- * Execute a non-streaming generate call with tool loop
226
- */
227
- async function executeGenerate<TParams>(
228
- model: BoundLLMModel<TParams>,
229
- config: ProviderConfig,
230
- system: string | undefined,
231
- params: TParams | undefined,
232
- tools: Tool[] | undefined,
233
- toolStrategy: LLMOptions<TParams>['toolStrategy'],
234
- structure: LLMOptions<TParams>['structure'],
235
- history: Message[],
236
- newMessages: Message[]
237
- ): Promise<Turn> {
238
- // Validate media capabilities for all input messages
239
- validateMediaCapabilities(
240
- [...history, ...newMessages],
241
- model.capabilities,
242
- model.provider.name
243
- );
244
- const maxIterations = toolStrategy?.maxIterations ?? DEFAULT_MAX_ITERATIONS;
245
- const allMessages: Message[] = [...history, ...newMessages];
246
- const toolExecutions: ToolExecution[] = [];
247
- const usages: TokenUsage[] = [];
248
- let cycles = 0;
249
-
250
- // Track structured data from responses (providers handle extraction)
251
- let structuredData: unknown;
252
-
253
- // Tool loop
254
- while (cycles < maxIterations + 1) {
255
- cycles++;
256
-
257
- const request: LLMRequest<TParams> = {
258
- messages: allMessages,
259
- system,
260
- params,
261
- tools,
262
- structure,
263
- config,
264
- };
265
-
266
- const response = await model.complete(request);
267
- usages.push(response.usage);
268
- allMessages.push(response.message);
269
-
270
- // Track structured data from provider (if present)
271
- if (response.data !== undefined) {
272
- structuredData = response.data;
273
- }
274
-
275
- // Check for tool calls
276
- if (response.message.hasToolCalls && tools && tools.length > 0) {
277
- // If provider already extracted structured data, don't try to execute tool calls
278
- // (some providers use tool calls internally for structured output)
279
- if (response.data !== undefined) {
280
- break;
281
- }
282
-
283
- // Check if we've hit max iterations (subtract 1 because we already incremented)
284
- if (cycles >= maxIterations) {
285
- await toolStrategy?.onMaxIterations?.(maxIterations);
286
- throw new UPPError(
287
- `Tool execution exceeded maximum iterations (${maxIterations})`,
288
- 'INVALID_REQUEST',
289
- model.provider.name,
290
- 'llm'
291
- );
292
- }
293
-
294
- // Execute tools
295
- const results = await executeTools(
296
- response.message,
297
- tools,
298
- toolStrategy,
299
- toolExecutions
300
- );
301
-
302
- // Add tool results
303
- allMessages.push(new ToolResultMessage(results));
304
-
305
- continue;
306
- }
307
-
308
- // No tool calls - we're done
309
- break;
310
- }
311
-
312
- // Use structured data from provider if structure was requested
313
- const data = structure ? structuredData : undefined;
314
-
315
- return createTurn(
316
- allMessages.slice(history.length), // Only messages from this turn
317
- toolExecutions,
318
- aggregateUsage(usages),
319
- cycles,
320
- data
321
- );
322
- }
323
-
324
- /**
325
- * Execute a streaming generate call with tool loop
326
- */
327
- function executeStream<TParams>(
328
- model: BoundLLMModel<TParams>,
329
- config: ProviderConfig,
330
- system: string | undefined,
331
- params: TParams | undefined,
332
- tools: Tool[] | undefined,
333
- toolStrategy: LLMOptions<TParams>['toolStrategy'],
334
- structure: LLMOptions<TParams>['structure'],
335
- history: Message[],
336
- newMessages: Message[]
337
- ): StreamResult {
338
- // Validate media capabilities for all input messages
339
- validateMediaCapabilities(
340
- [...history, ...newMessages],
341
- model.capabilities,
342
- model.provider.name
343
- );
344
-
345
- const abortController = new AbortController();
346
-
347
- // Shared state between generator and turn promise
348
- const allMessages: Message[] = [...history, ...newMessages];
349
- const toolExecutions: ToolExecution[] = [];
350
- const usages: TokenUsage[] = [];
351
- let cycles = 0;
352
- let generatorError: Error | null = null;
353
- let structuredData: unknown; // Providers extract this
354
-
355
- // Deferred to signal when generator completes
356
- let resolveGenerator: () => void;
357
- let rejectGenerator: (error: Error) => void;
358
- const generatorDone = new Promise<void>((resolve, reject) => {
359
- resolveGenerator = resolve;
360
- rejectGenerator = reject;
361
- });
362
-
363
- const maxIterations = toolStrategy?.maxIterations ?? DEFAULT_MAX_ITERATIONS;
364
-
365
- // Create the async generator - this is the ONLY place that calls the API
366
- async function* generateStream(): AsyncGenerator<StreamEvent, void, unknown> {
367
- try {
368
- while (cycles < maxIterations + 1) {
369
- cycles++;
370
-
371
- const request: LLMRequest<TParams> = {
372
- messages: allMessages,
373
- system,
374
- params,
375
- tools,
376
- structure,
377
- config,
378
- signal: abortController.signal,
379
- };
380
-
381
- const streamResult = model.stream(request);
382
-
383
- // Forward stream events
384
- for await (const event of streamResult) {
385
- yield event;
386
- }
387
-
388
- // Get the response
389
- const response = await streamResult.response;
390
- usages.push(response.usage);
391
- allMessages.push(response.message);
392
-
393
- // Track structured data from provider (if present)
394
- if (response.data !== undefined) {
395
- structuredData = response.data;
396
- }
397
-
398
- // Check for tool calls
399
- if (response.message.hasToolCalls && tools && tools.length > 0) {
400
- // If provider already extracted structured data, don't try to execute tool calls
401
- // (some providers use tool calls internally for structured output)
402
- if (response.data !== undefined) {
403
- break;
404
- }
405
-
406
- if (cycles >= maxIterations) {
407
- await toolStrategy?.onMaxIterations?.(maxIterations);
408
- throw new UPPError(
409
- `Tool execution exceeded maximum iterations (${maxIterations})`,
410
- 'INVALID_REQUEST',
411
- model.provider.name,
412
- 'llm'
413
- );
414
- }
415
-
416
- // Execute tools
417
- const results = await executeTools(
418
- response.message,
419
- tools,
420
- toolStrategy,
421
- toolExecutions
422
- );
423
-
424
- // Add tool results
425
- allMessages.push(new ToolResultMessage(results));
426
-
427
- continue;
428
- }
429
-
430
- break;
431
- }
432
- resolveGenerator();
433
- } catch (error) {
434
- generatorError = error as Error;
435
- rejectGenerator(error as Error);
436
- throw error;
437
- }
438
- }
439
-
440
- // Turn promise waits for the generator to complete, then builds the Turn
441
- const turnPromise = (async (): Promise<Turn> => {
442
- await generatorDone;
443
-
444
- if (generatorError) {
445
- throw generatorError;
446
- }
447
-
448
- // Use structured data from provider if structure was requested
449
- const data = structure ? structuredData : undefined;
450
-
451
- return createTurn(
452
- allMessages.slice(history.length),
453
- toolExecutions,
454
- aggregateUsage(usages),
455
- cycles,
456
- data
457
- );
458
- })();
459
-
460
- return createStreamResult(generateStream(), turnPromise, abortController);
461
- }
462
-
463
- /**
464
- * Execute tools from an assistant message
465
- */
466
- async function executeTools(
467
- message: AssistantMessage,
468
- tools: Tool[],
469
- toolStrategy: LLMOptions<unknown>['toolStrategy'],
470
- executions: ToolExecution[]
471
- ): Promise<ToolResult[]> {
472
- const toolCalls = message.toolCalls ?? [];
473
- const results: ToolResult[] = [];
474
-
475
- // Build tool map
476
- const toolMap = new Map(tools.map((t) => [t.name, t]));
477
-
478
- // Execute tools (in parallel)
479
- const promises = toolCalls.map(async (call) => {
480
- const tool = toolMap.get(call.toolName);
481
- if (!tool) {
482
- return {
483
- toolCallId: call.toolCallId,
484
- result: `Tool '${call.toolName}' not found`,
485
- isError: true,
486
- };
487
- }
488
-
489
- const startTime = Date.now();
490
-
491
- // Notify strategy
492
- await toolStrategy?.onToolCall?.(tool, call.arguments);
493
-
494
- // Check before call
495
- if (toolStrategy?.onBeforeCall) {
496
- const shouldRun = await toolStrategy.onBeforeCall(tool, call.arguments);
497
- if (!shouldRun) {
498
- return {
499
- toolCallId: call.toolCallId,
500
- result: 'Tool execution skipped',
501
- isError: true,
502
- };
503
- }
504
- }
505
-
506
- // Check approval
507
- let approved = true;
508
- if (tool.approval) {
509
- try {
510
- approved = await tool.approval(call.arguments);
511
- } catch (error) {
512
- // Approval threw - propagate
513
- throw error;
514
- }
515
- }
516
-
517
- if (!approved) {
518
- const execution: ToolExecution = {
519
- toolName: tool.name,
520
- toolCallId: call.toolCallId,
521
- arguments: call.arguments,
522
- result: 'Tool execution denied',
523
- isError: true,
524
- duration: Date.now() - startTime,
525
- approved: false,
526
- };
527
- executions.push(execution);
528
-
529
- return {
530
- toolCallId: call.toolCallId,
531
- result: 'Tool execution denied by approval handler',
532
- isError: true,
533
- };
534
- }
535
-
536
- // Execute tool
537
- try {
538
- const result = await tool.run(call.arguments);
539
-
540
- await toolStrategy?.onAfterCall?.(tool, call.arguments, result);
541
-
542
- const execution: ToolExecution = {
543
- toolName: tool.name,
544
- toolCallId: call.toolCallId,
545
- arguments: call.arguments,
546
- result,
547
- isError: false,
548
- duration: Date.now() - startTime,
549
- approved,
550
- };
551
- executions.push(execution);
552
-
553
- return {
554
- toolCallId: call.toolCallId,
555
- result,
556
- isError: false,
557
- };
558
- } catch (error) {
559
- await toolStrategy?.onError?.(tool, call.arguments, error as Error);
560
-
561
- const errorMessage = error instanceof Error ? error.message : String(error);
562
-
563
- const execution: ToolExecution = {
564
- toolName: tool.name,
565
- toolCallId: call.toolCallId,
566
- arguments: call.arguments,
567
- result: errorMessage,
568
- isError: true,
569
- duration: Date.now() - startTime,
570
- approved,
571
- };
572
- executions.push(execution);
573
-
574
- return {
575
- toolCallId: call.toolCallId,
576
- result: errorMessage,
577
- isError: true,
578
- };
579
- }
580
- });
581
-
582
- results.push(...(await Promise.all(promises)));
583
- return results;
584
- }
585
-
586
- /**
587
- * Check if messages contain media that requires specific capabilities
588
- */
589
- function validateMediaCapabilities(
590
- messages: Message[],
591
- capabilities: LLMCapabilities,
592
- providerName: string
593
- ): void {
594
- for (const msg of messages) {
595
- if (!isUserMessage(msg)) continue;
596
-
597
- for (const block of msg.content) {
598
- if (block.type === 'image' && !capabilities.imageInput) {
599
- throw new UPPError(
600
- `Provider '${providerName}' does not support image input`,
601
- 'INVALID_REQUEST',
602
- providerName,
603
- 'llm'
604
- );
605
- }
606
- if (block.type === 'video' && !capabilities.videoInput) {
607
- throw new UPPError(
608
- `Provider '${providerName}' does not support video input`,
609
- 'INVALID_REQUEST',
610
- providerName,
611
- 'llm'
612
- );
613
- }
614
- if (block.type === 'audio' && !capabilities.audioInput) {
615
- throw new UPPError(
616
- `Provider '${providerName}' does not support audio input`,
617
- 'INVALID_REQUEST',
618
- providerName,
619
- 'llm'
620
- );
621
- }
622
- }
623
- }
624
- }
@@ -1,92 +0,0 @@
1
- import type {
2
- Provider,
3
- ModelReference,
4
- LLMHandler,
5
- EmbeddingHandler,
6
- ImageHandler,
7
- LLMProvider,
8
- EmbeddingProvider,
9
- ImageProvider,
10
- } from '../types/provider.ts';
11
-
12
- /**
13
- * Options for creating a provider
14
- */
15
- export interface CreateProviderOptions {
16
- name: string;
17
- version: string;
18
- modalities: {
19
- llm?: LLMHandler;
20
- embedding?: EmbeddingHandler;
21
- image?: ImageHandler;
22
- };
23
- }
24
-
25
- /**
26
- * Create a provider factory function
27
- *
28
- * @typeParam TOptions - Provider-specific options type (defaults to unknown)
29
- * @param options - Provider configuration
30
- * @returns Provider function with modalities attached
31
- *
32
- * @example
33
- * ```ts
34
- * // Basic provider without options
35
- * const anthropic = createProvider({
36
- * name: 'anthropic',
37
- * version: '1.0.0',
38
- * modalities: { llm: createLLMHandler() },
39
- * });
40
- *
41
- * // Provider with custom options (typically needs custom factory)
42
- * interface MyProviderOptions { api?: 'v1' | 'v2' }
43
- * const myProvider = createProvider<MyProviderOptions>({
44
- * name: 'my-provider',
45
- * version: '1.0.0',
46
- * modalities: { llm: createLLMHandler() },
47
- * });
48
- * ```
49
- */
50
- export function createProvider<TOptions = unknown>(
51
- options: CreateProviderOptions
52
- ): Provider<TOptions> {
53
- // Create the base function that accepts optional provider-specific options
54
- const fn = function (modelId: string, _options?: TOptions): ModelReference<TOptions> {
55
- return { modelId, provider };
56
- };
57
-
58
- // Define properties, including overriding the read-only 'name' property
59
- Object.defineProperties(fn, {
60
- name: {
61
- value: options.name,
62
- writable: false,
63
- configurable: true,
64
- },
65
- version: {
66
- value: options.version,
67
- writable: false,
68
- configurable: true,
69
- },
70
- modalities: {
71
- value: options.modalities,
72
- writable: false,
73
- configurable: true,
74
- },
75
- });
76
-
77
- const provider = fn as Provider<TOptions>;
78
-
79
- // Inject provider reference into handlers so bind() can return
80
- // models with the correct provider reference (spec compliance)
81
- if (options.modalities.llm?._setProvider) {
82
- options.modalities.llm._setProvider(provider as unknown as LLMProvider);
83
- }
84
- if (options.modalities.embedding?._setProvider) {
85
- options.modalities.embedding._setProvider(provider as unknown as EmbeddingProvider);
86
- }
87
- if (options.modalities.image?._setProvider) {
88
- options.modalities.image._setProvider(provider as unknown as ImageProvider);
89
- }
90
-
91
- return provider;
92
- }
@@ -1,3 +0,0 @@
1
- // Re-export from providers/google
2
- export { google } from '../providers/google/index.ts';
3
- export type { GoogleLLMParams } from '../providers/google/index.ts';