@providerprotocol/ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +84 -0
  3. package/dist/anthropic/index.d.ts +41 -0
  4. package/dist/anthropic/index.js +500 -0
  5. package/dist/anthropic/index.js.map +1 -0
  6. package/dist/chunk-CUCRF5W6.js +136 -0
  7. package/dist/chunk-CUCRF5W6.js.map +1 -0
  8. package/dist/chunk-FTFX2VET.js +424 -0
  9. package/dist/chunk-FTFX2VET.js.map +1 -0
  10. package/dist/chunk-QUUX4G7U.js +117 -0
  11. package/dist/chunk-QUUX4G7U.js.map +1 -0
  12. package/dist/chunk-Y6Q7JCNP.js +39 -0
  13. package/dist/chunk-Y6Q7JCNP.js.map +1 -0
  14. package/dist/google/index.d.ts +69 -0
  15. package/dist/google/index.js +517 -0
  16. package/dist/google/index.js.map +1 -0
  17. package/dist/http/index.d.ts +61 -0
  18. package/dist/http/index.js +43 -0
  19. package/dist/http/index.js.map +1 -0
  20. package/dist/index.d.ts +792 -0
  21. package/dist/index.js +898 -0
  22. package/dist/index.js.map +1 -0
  23. package/dist/openai/index.d.ts +204 -0
  24. package/dist/openai/index.js +1340 -0
  25. package/dist/openai/index.js.map +1 -0
  26. package/dist/provider-CUJWjgNl.d.ts +192 -0
  27. package/dist/retry-I2661_rv.d.ts +118 -0
  28. package/package.json +88 -0
  29. package/src/anthropic/index.ts +3 -0
  30. package/src/core/image.ts +188 -0
  31. package/src/core/llm.ts +619 -0
  32. package/src/core/provider.ts +92 -0
  33. package/src/google/index.ts +3 -0
  34. package/src/http/errors.ts +112 -0
  35. package/src/http/fetch.ts +210 -0
  36. package/src/http/index.ts +31 -0
  37. package/src/http/keys.ts +136 -0
  38. package/src/http/retry.ts +205 -0
  39. package/src/http/sse.ts +136 -0
  40. package/src/index.ts +32 -0
  41. package/src/openai/index.ts +9 -0
  42. package/src/providers/anthropic/index.ts +17 -0
  43. package/src/providers/anthropic/llm.ts +196 -0
  44. package/src/providers/anthropic/transform.ts +452 -0
  45. package/src/providers/anthropic/types.ts +213 -0
  46. package/src/providers/google/index.ts +17 -0
  47. package/src/providers/google/llm.ts +203 -0
  48. package/src/providers/google/transform.ts +487 -0
  49. package/src/providers/google/types.ts +214 -0
  50. package/src/providers/openai/index.ts +151 -0
  51. package/src/providers/openai/llm.completions.ts +201 -0
  52. package/src/providers/openai/llm.responses.ts +211 -0
  53. package/src/providers/openai/transform.completions.ts +628 -0
  54. package/src/providers/openai/transform.responses.ts +718 -0
  55. package/src/providers/openai/types.ts +711 -0
  56. package/src/types/content.ts +133 -0
  57. package/src/types/errors.ts +85 -0
  58. package/src/types/index.ts +105 -0
  59. package/src/types/llm.ts +211 -0
  60. package/src/types/messages.ts +182 -0
  61. package/src/types/provider.ts +195 -0
  62. package/src/types/schema.ts +58 -0
  63. package/src/types/stream.ts +146 -0
  64. package/src/types/thread.ts +226 -0
  65. package/src/types/tool.ts +88 -0
  66. package/src/types/turn.ts +118 -0
  67. package/src/utils/id.ts +28 -0
@@ -0,0 +1,619 @@
1
+ import type {
2
+ LLMOptions,
3
+ LLMInstance,
4
+ LLMRequest,
5
+ LLMResponse,
6
+ InferenceInput,
7
+ BoundLLMModel,
8
+ LLMCapabilities,
9
+ } from '../types/llm.ts';
10
+ import type { UserMessage, AssistantMessage } from '../types/messages.ts';
11
+ import type { ContentBlock, TextBlock } from '../types/content.ts';
12
+ import type { Tool, ToolExecution, ToolResult } from '../types/tool.ts';
13
+ import type { Turn, TokenUsage } from '../types/turn.ts';
14
+ import type { StreamResult, StreamEvent } from '../types/stream.ts';
15
+ import type { Thread } from '../types/thread.ts';
16
+ import type { ProviderConfig } from '../types/provider.ts';
17
+ import { UPPError } from '../types/errors.ts';
18
+ import {
19
+ Message,
20
+ UserMessage as UserMessageClass,
21
+ ToolResultMessage,
22
+ isUserMessage,
23
+ isAssistantMessage,
24
+ } from '../types/messages.ts';
25
+ import { createTurn, aggregateUsage, emptyUsage } from '../types/turn.ts';
26
+ import { createStreamResult } from '../types/stream.ts';
27
+ import { generateShortId } from '../utils/id.ts';
28
+
29
+ /**
30
+ * Default maximum iterations for tool execution
31
+ */
32
+ const DEFAULT_MAX_ITERATIONS = 10;
33
+
34
+ /**
35
+ * Create an LLM instance
36
+ */
37
+ export function llm<TParams = unknown>(
38
+ options: LLMOptions<TParams>
39
+ ): LLMInstance<TParams> {
40
+ const { model: modelRef, config = {}, params, system, tools, toolStrategy, structure } = options;
41
+
42
+ // Validate that the provider supports LLM
43
+ const provider = modelRef.provider;
44
+ if (!provider.modalities.llm) {
45
+ throw new UPPError(
46
+ `Provider '${provider.name}' does not support LLM modality`,
47
+ 'INVALID_REQUEST',
48
+ provider.name,
49
+ 'llm'
50
+ );
51
+ }
52
+
53
+ // Bind the model
54
+ const boundModel = provider.modalities.llm.bind(modelRef.modelId) as BoundLLMModel<TParams>;
55
+
56
+ // Validate capabilities at bind time
57
+ const capabilities = boundModel.capabilities;
58
+
59
+ // Check for structured output capability
60
+ if (structure && !capabilities.structuredOutput) {
61
+ throw new UPPError(
62
+ `Provider '${provider.name}' does not support structured output`,
63
+ 'INVALID_REQUEST',
64
+ provider.name,
65
+ 'llm'
66
+ );
67
+ }
68
+
69
+ // Check for tools capability
70
+ if (tools && tools.length > 0 && !capabilities.tools) {
71
+ throw new UPPError(
72
+ `Provider '${provider.name}' does not support tools`,
73
+ 'INVALID_REQUEST',
74
+ provider.name,
75
+ 'llm'
76
+ );
77
+ }
78
+
79
+ // Build the instance
80
+ const instance: LLMInstance<TParams> = {
81
+ model: boundModel,
82
+ system,
83
+ params,
84
+ capabilities,
85
+
86
+ async generate(
87
+ historyOrInput: Message[] | Thread | InferenceInput,
88
+ ...inputs: InferenceInput[]
89
+ ): Promise<Turn> {
90
+ const { history, messages } = parseInputs(historyOrInput, inputs);
91
+ return executeGenerate(
92
+ boundModel,
93
+ config,
94
+ system,
95
+ params,
96
+ tools,
97
+ toolStrategy,
98
+ structure,
99
+ history,
100
+ messages
101
+ );
102
+ },
103
+
104
+ stream(
105
+ historyOrInput: Message[] | Thread | InferenceInput,
106
+ ...inputs: InferenceInput[]
107
+ ): StreamResult {
108
+ // Check streaming capability
109
+ if (!capabilities.streaming) {
110
+ throw new UPPError(
111
+ `Provider '${provider.name}' does not support streaming`,
112
+ 'INVALID_REQUEST',
113
+ provider.name,
114
+ 'llm'
115
+ );
116
+ }
117
+ const { history, messages } = parseInputs(historyOrInput, inputs);
118
+ return executeStream(
119
+ boundModel,
120
+ config,
121
+ system,
122
+ params,
123
+ tools,
124
+ toolStrategy,
125
+ structure,
126
+ history,
127
+ messages
128
+ );
129
+ },
130
+ };
131
+
132
+ return instance;
133
+ }
134
+
135
+ /**
136
+ * Type guard to check if a value is a Message instance.
137
+ * Uses instanceof for class instances, with fallback to timestamp check
138
+ * for deserialized/reconstructed Message objects.
139
+ */
140
+ function isMessageInstance(value: unknown): value is Message {
141
+ if (value instanceof Message) {
142
+ return true;
143
+ }
144
+ // Fallback for deserialized Messages that aren't class instances:
145
+ // Messages have 'timestamp' (Date), ContentBlocks don't
146
+ if (
147
+ typeof value === 'object' &&
148
+ value !== null &&
149
+ 'timestamp' in value &&
150
+ 'type' in value &&
151
+ 'id' in value
152
+ ) {
153
+ const obj = value as Record<string, unknown>;
154
+ // Message types are 'user', 'assistant', 'tool_result'
155
+ // ContentBlock types are 'text', 'image', 'audio', 'video', 'binary'
156
+ const messageTypes = ['user', 'assistant', 'tool_result'];
157
+ return messageTypes.includes(obj.type as string);
158
+ }
159
+ return false;
160
+ }
161
+
162
+ /**
163
+ * Parse inputs to determine history and new messages
164
+ */
165
+ function parseInputs(
166
+ historyOrInput: Message[] | Thread | InferenceInput,
167
+ inputs: InferenceInput[]
168
+ ): { history: Message[]; messages: Message[] } {
169
+ // Check if it's a Thread first (has 'messages' array property)
170
+ if (
171
+ typeof historyOrInput === 'object' &&
172
+ historyOrInput !== null &&
173
+ 'messages' in historyOrInput &&
174
+ Array.isArray((historyOrInput as Thread).messages)
175
+ ) {
176
+ const thread = historyOrInput as Thread;
177
+ const newMessages = inputs.map(inputToMessage);
178
+ return { history: [...thread.messages], messages: newMessages };
179
+ }
180
+
181
+ // Check if first arg is Message[] (history)
182
+ if (Array.isArray(historyOrInput) && historyOrInput.length > 0) {
183
+ const first = historyOrInput[0];
184
+ if (isMessageInstance(first)) {
185
+ // It's history (Message[])
186
+ const newMessages = inputs.map(inputToMessage);
187
+ return { history: historyOrInput as Message[], messages: newMessages };
188
+ }
189
+ }
190
+
191
+ // It's input (no history) - could be string, single Message, or ContentBlock
192
+ const allInputs = [historyOrInput as InferenceInput, ...inputs];
193
+ const newMessages = allInputs.map(inputToMessage);
194
+ return { history: [], messages: newMessages };
195
+ }
196
+
197
+ /**
198
+ * Convert an InferenceInput to a Message
199
+ */
200
+ function inputToMessage(input: InferenceInput): Message {
201
+ if (typeof input === 'string') {
202
+ return new UserMessageClass(input);
203
+ }
204
+
205
+ // It's already a Message
206
+ if ('type' in input && 'id' in input && 'timestamp' in input) {
207
+ return input as Message;
208
+ }
209
+
210
+ // It's a ContentBlock - wrap in UserMessage
211
+ const block = input as ContentBlock;
212
+ if (block.type === 'text') {
213
+ return new UserMessageClass((block as TextBlock).text);
214
+ }
215
+
216
+ return new UserMessageClass([block as any]);
217
+ }
218
+
219
+ /**
220
+ * Execute a non-streaming generate call with tool loop
221
+ */
222
+ async function executeGenerate<TParams>(
223
+ model: BoundLLMModel<TParams>,
224
+ config: ProviderConfig,
225
+ system: string | undefined,
226
+ params: TParams | undefined,
227
+ tools: Tool[] | undefined,
228
+ toolStrategy: LLMOptions<TParams>['toolStrategy'],
229
+ structure: LLMOptions<TParams>['structure'],
230
+ history: Message[],
231
+ newMessages: Message[]
232
+ ): Promise<Turn> {
233
+ // Validate media capabilities for all input messages
234
+ validateMediaCapabilities(
235
+ [...history, ...newMessages],
236
+ model.capabilities,
237
+ model.provider.name
238
+ );
239
+ const maxIterations = toolStrategy?.maxIterations ?? DEFAULT_MAX_ITERATIONS;
240
+ const allMessages: Message[] = [...history, ...newMessages];
241
+ const toolExecutions: ToolExecution[] = [];
242
+ const usages: TokenUsage[] = [];
243
+ let cycles = 0;
244
+
245
+ // Track structured data from responses (providers handle extraction)
246
+ let structuredData: unknown;
247
+
248
+ // Tool loop
249
+ while (cycles < maxIterations + 1) {
250
+ cycles++;
251
+
252
+ const request: LLMRequest<TParams> = {
253
+ messages: allMessages,
254
+ system,
255
+ params,
256
+ tools,
257
+ structure,
258
+ config,
259
+ };
260
+
261
+ const response = await model.complete(request);
262
+ usages.push(response.usage);
263
+ allMessages.push(response.message);
264
+
265
+ // Track structured data from provider (if present)
266
+ if (response.data !== undefined) {
267
+ structuredData = response.data;
268
+ }
269
+
270
+ // Check for tool calls
271
+ if (response.message.hasToolCalls && tools && tools.length > 0) {
272
+ // If provider already extracted structured data, don't try to execute tool calls
273
+ // (some providers use tool calls internally for structured output)
274
+ if (response.data !== undefined) {
275
+ break;
276
+ }
277
+
278
+ // Check if we've hit max iterations (subtract 1 because we already incremented)
279
+ if (cycles >= maxIterations) {
280
+ await toolStrategy?.onMaxIterations?.(maxIterations);
281
+ throw new UPPError(
282
+ `Tool execution exceeded maximum iterations (${maxIterations})`,
283
+ 'INVALID_REQUEST',
284
+ model.provider.name,
285
+ 'llm'
286
+ );
287
+ }
288
+
289
+ // Execute tools
290
+ const results = await executeTools(
291
+ response.message,
292
+ tools,
293
+ toolStrategy,
294
+ toolExecutions
295
+ );
296
+
297
+ // Add tool results
298
+ allMessages.push(new ToolResultMessage(results));
299
+
300
+ continue;
301
+ }
302
+
303
+ // No tool calls - we're done
304
+ break;
305
+ }
306
+
307
+ // Use structured data from provider if structure was requested
308
+ const data = structure ? structuredData : undefined;
309
+
310
+ return createTurn(
311
+ allMessages.slice(history.length), // Only messages from this turn
312
+ toolExecutions,
313
+ aggregateUsage(usages),
314
+ cycles,
315
+ data
316
+ );
317
+ }
318
+
319
+ /**
320
+ * Execute a streaming generate call with tool loop
321
+ */
322
+ function executeStream<TParams>(
323
+ model: BoundLLMModel<TParams>,
324
+ config: ProviderConfig,
325
+ system: string | undefined,
326
+ params: TParams | undefined,
327
+ tools: Tool[] | undefined,
328
+ toolStrategy: LLMOptions<TParams>['toolStrategy'],
329
+ structure: LLMOptions<TParams>['structure'],
330
+ history: Message[],
331
+ newMessages: Message[]
332
+ ): StreamResult {
333
+ // Validate media capabilities for all input messages
334
+ validateMediaCapabilities(
335
+ [...history, ...newMessages],
336
+ model.capabilities,
337
+ model.provider.name
338
+ );
339
+
340
+ const abortController = new AbortController();
341
+
342
+ // Shared state between generator and turn promise
343
+ const allMessages: Message[] = [...history, ...newMessages];
344
+ const toolExecutions: ToolExecution[] = [];
345
+ const usages: TokenUsage[] = [];
346
+ let cycles = 0;
347
+ let generatorError: Error | null = null;
348
+ let structuredData: unknown; // Providers extract this
349
+
350
+ // Deferred to signal when generator completes
351
+ let resolveGenerator: () => void;
352
+ let rejectGenerator: (error: Error) => void;
353
+ const generatorDone = new Promise<void>((resolve, reject) => {
354
+ resolveGenerator = resolve;
355
+ rejectGenerator = reject;
356
+ });
357
+
358
+ const maxIterations = toolStrategy?.maxIterations ?? DEFAULT_MAX_ITERATIONS;
359
+
360
+ // Create the async generator - this is the ONLY place that calls the API
361
+ async function* generateStream(): AsyncGenerator<StreamEvent, void, unknown> {
362
+ try {
363
+ while (cycles < maxIterations + 1) {
364
+ cycles++;
365
+
366
+ const request: LLMRequest<TParams> = {
367
+ messages: allMessages,
368
+ system,
369
+ params,
370
+ tools,
371
+ structure,
372
+ config,
373
+ signal: abortController.signal,
374
+ };
375
+
376
+ const streamResult = model.stream(request);
377
+
378
+ // Forward stream events
379
+ for await (const event of streamResult) {
380
+ yield event;
381
+ }
382
+
383
+ // Get the response
384
+ const response = await streamResult.response;
385
+ usages.push(response.usage);
386
+ allMessages.push(response.message);
387
+
388
+ // Track structured data from provider (if present)
389
+ if (response.data !== undefined) {
390
+ structuredData = response.data;
391
+ }
392
+
393
+ // Check for tool calls
394
+ if (response.message.hasToolCalls && tools && tools.length > 0) {
395
+ // If provider already extracted structured data, don't try to execute tool calls
396
+ // (some providers use tool calls internally for structured output)
397
+ if (response.data !== undefined) {
398
+ break;
399
+ }
400
+
401
+ if (cycles >= maxIterations) {
402
+ await toolStrategy?.onMaxIterations?.(maxIterations);
403
+ throw new UPPError(
404
+ `Tool execution exceeded maximum iterations (${maxIterations})`,
405
+ 'INVALID_REQUEST',
406
+ model.provider.name,
407
+ 'llm'
408
+ );
409
+ }
410
+
411
+ // Execute tools
412
+ const results = await executeTools(
413
+ response.message,
414
+ tools,
415
+ toolStrategy,
416
+ toolExecutions
417
+ );
418
+
419
+ // Add tool results
420
+ allMessages.push(new ToolResultMessage(results));
421
+
422
+ continue;
423
+ }
424
+
425
+ break;
426
+ }
427
+ resolveGenerator();
428
+ } catch (error) {
429
+ generatorError = error as Error;
430
+ rejectGenerator(error as Error);
431
+ throw error;
432
+ }
433
+ }
434
+
435
+ // Turn promise waits for the generator to complete, then builds the Turn
436
+ const turnPromise = (async (): Promise<Turn> => {
437
+ await generatorDone;
438
+
439
+ if (generatorError) {
440
+ throw generatorError;
441
+ }
442
+
443
+ // Use structured data from provider if structure was requested
444
+ const data = structure ? structuredData : undefined;
445
+
446
+ return createTurn(
447
+ allMessages.slice(history.length),
448
+ toolExecutions,
449
+ aggregateUsage(usages),
450
+ cycles,
451
+ data
452
+ );
453
+ })();
454
+
455
+ return createStreamResult(generateStream(), turnPromise, abortController);
456
+ }
457
+
458
+ /**
459
+ * Execute tools from an assistant message
460
+ */
461
+ async function executeTools(
462
+ message: AssistantMessage,
463
+ tools: Tool[],
464
+ toolStrategy: LLMOptions<unknown>['toolStrategy'],
465
+ executions: ToolExecution[]
466
+ ): Promise<ToolResult[]> {
467
+ const toolCalls = message.toolCalls ?? [];
468
+ const results: ToolResult[] = [];
469
+
470
+ // Build tool map
471
+ const toolMap = new Map(tools.map((t) => [t.name, t]));
472
+
473
+ // Execute tools (in parallel)
474
+ const promises = toolCalls.map(async (call) => {
475
+ const tool = toolMap.get(call.toolName);
476
+ if (!tool) {
477
+ return {
478
+ toolCallId: call.toolCallId,
479
+ result: `Tool '${call.toolName}' not found`,
480
+ isError: true,
481
+ };
482
+ }
483
+
484
+ const startTime = Date.now();
485
+
486
+ // Notify strategy
487
+ await toolStrategy?.onToolCall?.(tool, call.arguments);
488
+
489
+ // Check before call
490
+ if (toolStrategy?.onBeforeCall) {
491
+ const shouldRun = await toolStrategy.onBeforeCall(tool, call.arguments);
492
+ if (!shouldRun) {
493
+ return {
494
+ toolCallId: call.toolCallId,
495
+ result: 'Tool execution skipped',
496
+ isError: true,
497
+ };
498
+ }
499
+ }
500
+
501
+ // Check approval
502
+ let approved = true;
503
+ if (tool.approval) {
504
+ try {
505
+ approved = await tool.approval(call.arguments);
506
+ } catch (error) {
507
+ // Approval threw - propagate
508
+ throw error;
509
+ }
510
+ }
511
+
512
+ if (!approved) {
513
+ const execution: ToolExecution = {
514
+ toolName: tool.name,
515
+ toolCallId: call.toolCallId,
516
+ arguments: call.arguments,
517
+ result: 'Tool execution denied',
518
+ isError: true,
519
+ duration: Date.now() - startTime,
520
+ approved: false,
521
+ };
522
+ executions.push(execution);
523
+
524
+ return {
525
+ toolCallId: call.toolCallId,
526
+ result: 'Tool execution denied by approval handler',
527
+ isError: true,
528
+ };
529
+ }
530
+
531
+ // Execute tool
532
+ try {
533
+ const result = await tool.run(call.arguments);
534
+
535
+ await toolStrategy?.onAfterCall?.(tool, call.arguments, result);
536
+
537
+ const execution: ToolExecution = {
538
+ toolName: tool.name,
539
+ toolCallId: call.toolCallId,
540
+ arguments: call.arguments,
541
+ result,
542
+ isError: false,
543
+ duration: Date.now() - startTime,
544
+ approved,
545
+ };
546
+ executions.push(execution);
547
+
548
+ return {
549
+ toolCallId: call.toolCallId,
550
+ result,
551
+ isError: false,
552
+ };
553
+ } catch (error) {
554
+ await toolStrategy?.onError?.(tool, call.arguments, error as Error);
555
+
556
+ const errorMessage = error instanceof Error ? error.message : String(error);
557
+
558
+ const execution: ToolExecution = {
559
+ toolName: tool.name,
560
+ toolCallId: call.toolCallId,
561
+ arguments: call.arguments,
562
+ result: errorMessage,
563
+ isError: true,
564
+ duration: Date.now() - startTime,
565
+ approved,
566
+ };
567
+ executions.push(execution);
568
+
569
+ return {
570
+ toolCallId: call.toolCallId,
571
+ result: errorMessage,
572
+ isError: true,
573
+ };
574
+ }
575
+ });
576
+
577
+ results.push(...(await Promise.all(promises)));
578
+ return results;
579
+ }
580
+
581
+ /**
582
+ * Check if messages contain media that requires specific capabilities
583
+ */
584
+ function validateMediaCapabilities(
585
+ messages: Message[],
586
+ capabilities: LLMCapabilities,
587
+ providerName: string
588
+ ): void {
589
+ for (const msg of messages) {
590
+ if (!isUserMessage(msg)) continue;
591
+
592
+ for (const block of msg.content) {
593
+ if (block.type === 'image' && !capabilities.imageInput) {
594
+ throw new UPPError(
595
+ `Provider '${providerName}' does not support image input`,
596
+ 'INVALID_REQUEST',
597
+ providerName,
598
+ 'llm'
599
+ );
600
+ }
601
+ if (block.type === 'video' && !capabilities.videoInput) {
602
+ throw new UPPError(
603
+ `Provider '${providerName}' does not support video input`,
604
+ 'INVALID_REQUEST',
605
+ providerName,
606
+ 'llm'
607
+ );
608
+ }
609
+ if (block.type === 'audio' && !capabilities.audioInput) {
610
+ throw new UPPError(
611
+ `Provider '${providerName}' does not support audio input`,
612
+ 'INVALID_REQUEST',
613
+ providerName,
614
+ 'llm'
615
+ );
616
+ }
617
+ }
618
+ }
619
+ }
@@ -0,0 +1,92 @@
1
+ import type {
2
+ Provider,
3
+ ModelReference,
4
+ LLMHandler,
5
+ EmbeddingHandler,
6
+ ImageHandler,
7
+ LLMProvider,
8
+ EmbeddingProvider,
9
+ ImageProvider,
10
+ } from '../types/provider.ts';
11
+
12
+ /**
13
+ * Options for creating a provider
14
+ */
15
+ export interface CreateProviderOptions {
16
+ name: string;
17
+ version: string;
18
+ modalities: {
19
+ llm?: LLMHandler;
20
+ embedding?: EmbeddingHandler;
21
+ image?: ImageHandler;
22
+ };
23
+ }
24
+
25
+ /**
26
+ * Create a provider factory function
27
+ *
28
+ * @typeParam TOptions - Provider-specific options type (defaults to unknown)
29
+ * @param options - Provider configuration
30
+ * @returns Provider function with modalities attached
31
+ *
32
+ * @example
33
+ * ```ts
34
+ * // Basic provider without options
35
+ * const anthropic = createProvider({
36
+ * name: 'anthropic',
37
+ * version: '1.0.0',
38
+ * modalities: { llm: createLLMHandler() },
39
+ * });
40
+ *
41
+ * // Provider with custom options (typically needs custom factory)
42
+ * interface MyProviderOptions { api?: 'v1' | 'v2' }
43
+ * const myProvider = createProvider<MyProviderOptions>({
44
+ * name: 'my-provider',
45
+ * version: '1.0.0',
46
+ * modalities: { llm: createLLMHandler() },
47
+ * });
48
+ * ```
49
+ */
50
+ export function createProvider<TOptions = unknown>(
51
+ options: CreateProviderOptions
52
+ ): Provider<TOptions> {
53
+ // Create the base function that accepts optional provider-specific options
54
+ const fn = function (modelId: string, _options?: TOptions): ModelReference<TOptions> {
55
+ return { modelId, provider };
56
+ };
57
+
58
+ // Define properties, including overriding the read-only 'name' property
59
+ Object.defineProperties(fn, {
60
+ name: {
61
+ value: options.name,
62
+ writable: false,
63
+ configurable: true,
64
+ },
65
+ version: {
66
+ value: options.version,
67
+ writable: false,
68
+ configurable: true,
69
+ },
70
+ modalities: {
71
+ value: options.modalities,
72
+ writable: false,
73
+ configurable: true,
74
+ },
75
+ });
76
+
77
+ const provider = fn as Provider<TOptions>;
78
+
79
+ // Inject provider reference into handlers so bind() can return
80
+ // models with the correct provider reference (spec compliance)
81
+ if (options.modalities.llm?._setProvider) {
82
+ options.modalities.llm._setProvider(provider as unknown as LLMProvider);
83
+ }
84
+ if (options.modalities.embedding?._setProvider) {
85
+ options.modalities.embedding._setProvider(provider as unknown as EmbeddingProvider);
86
+ }
87
+ if (options.modalities.image?._setProvider) {
88
+ options.modalities.image._setProvider(provider as unknown as ImageProvider);
89
+ }
90
+
91
+ return provider;
92
+ }
@@ -0,0 +1,3 @@
1
+ // Re-export from providers/google
2
+ export { google } from '../providers/google/index.ts';
3
+ export type { GoogleLLMParams } from '../providers/google/index.ts';