@providerprotocol/ai 0.0.11 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/dist/anthropic/index.d.ts +51 -15
  2. package/dist/anthropic/index.js +54 -19
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-SUNYWHTH.js → chunk-MOU4U3PO.js} +55 -3
  5. package/dist/chunk-MOU4U3PO.js.map +1 -0
  6. package/dist/{chunk-Y6Q7JCNP.js → chunk-MSR5P65T.js} +1 -1
  7. package/dist/chunk-MSR5P65T.js.map +1 -0
  8. package/dist/{chunk-W4BB4BG2.js → chunk-SVYROCLD.js} +31 -11
  9. package/dist/chunk-SVYROCLD.js.map +1 -0
  10. package/dist/chunk-U4JJC2YX.js +234 -0
  11. package/dist/chunk-U4JJC2YX.js.map +1 -0
  12. package/dist/{chunk-X5G4EHL7.js → chunk-Z7RBRCRN.js} +1 -1
  13. package/dist/chunk-Z7RBRCRN.js.map +1 -0
  14. package/dist/google/index.d.ts +376 -7
  15. package/dist/google/index.js +127 -15
  16. package/dist/google/index.js.map +1 -1
  17. package/dist/http/index.d.ts +222 -25
  18. package/dist/http/index.js +3 -3
  19. package/dist/index.d.ts +1482 -198
  20. package/dist/index.js +233 -49
  21. package/dist/index.js.map +1 -1
  22. package/dist/ollama/index.d.ts +92 -20
  23. package/dist/ollama/index.js +17 -7
  24. package/dist/ollama/index.js.map +1 -1
  25. package/dist/openai/index.d.ts +340 -61
  26. package/dist/openai/index.js +57 -15
  27. package/dist/openai/index.js.map +1 -1
  28. package/dist/openrouter/index.d.ts +107 -51
  29. package/dist/openrouter/index.js +36 -8
  30. package/dist/openrouter/index.js.map +1 -1
  31. package/dist/provider-mKkz7Q9U.d.ts +488 -0
  32. package/dist/retry-Dh70lgr0.d.ts +508 -0
  33. package/dist/xai/index.d.ts +97 -22
  34. package/dist/xai/index.js +55 -19
  35. package/dist/xai/index.js.map +1 -1
  36. package/package.json +8 -12
  37. package/dist/chunk-CUCRF5W6.js +0 -136
  38. package/dist/chunk-CUCRF5W6.js.map +0 -1
  39. package/dist/chunk-SUNYWHTH.js.map +0 -1
  40. package/dist/chunk-W4BB4BG2.js.map +0 -1
  41. package/dist/chunk-X5G4EHL7.js.map +0 -1
  42. package/dist/chunk-Y6Q7JCNP.js.map +0 -1
  43. package/dist/provider-CUJWjgNl.d.ts +0 -192
  44. package/dist/retry-I2661_rv.d.ts +0 -118
  45. package/src/anthropic/index.ts +0 -3
  46. package/src/core/image.ts +0 -188
  47. package/src/core/llm.ts +0 -650
  48. package/src/core/provider.ts +0 -92
  49. package/src/google/index.ts +0 -3
  50. package/src/http/errors.ts +0 -112
  51. package/src/http/fetch.ts +0 -210
  52. package/src/http/index.ts +0 -31
  53. package/src/http/keys.ts +0 -136
  54. package/src/http/retry.ts +0 -205
  55. package/src/http/sse.ts +0 -136
  56. package/src/index.ts +0 -32
  57. package/src/ollama/index.ts +0 -3
  58. package/src/openai/index.ts +0 -39
  59. package/src/openrouter/index.ts +0 -11
  60. package/src/providers/anthropic/index.ts +0 -17
  61. package/src/providers/anthropic/llm.ts +0 -196
  62. package/src/providers/anthropic/transform.ts +0 -434
  63. package/src/providers/anthropic/types.ts +0 -213
  64. package/src/providers/google/index.ts +0 -17
  65. package/src/providers/google/llm.ts +0 -203
  66. package/src/providers/google/transform.ts +0 -447
  67. package/src/providers/google/types.ts +0 -214
  68. package/src/providers/ollama/index.ts +0 -43
  69. package/src/providers/ollama/llm.ts +0 -272
  70. package/src/providers/ollama/transform.ts +0 -434
  71. package/src/providers/ollama/types.ts +0 -260
  72. package/src/providers/openai/index.ts +0 -186
  73. package/src/providers/openai/llm.completions.ts +0 -201
  74. package/src/providers/openai/llm.responses.ts +0 -211
  75. package/src/providers/openai/transform.completions.ts +0 -561
  76. package/src/providers/openai/transform.responses.ts +0 -708
  77. package/src/providers/openai/types.ts +0 -1249
  78. package/src/providers/openrouter/index.ts +0 -177
  79. package/src/providers/openrouter/llm.completions.ts +0 -201
  80. package/src/providers/openrouter/llm.responses.ts +0 -211
  81. package/src/providers/openrouter/transform.completions.ts +0 -538
  82. package/src/providers/openrouter/transform.responses.ts +0 -742
  83. package/src/providers/openrouter/types.ts +0 -717
  84. package/src/providers/xai/index.ts +0 -223
  85. package/src/providers/xai/llm.completions.ts +0 -201
  86. package/src/providers/xai/llm.messages.ts +0 -195
  87. package/src/providers/xai/llm.responses.ts +0 -211
  88. package/src/providers/xai/transform.completions.ts +0 -565
  89. package/src/providers/xai/transform.messages.ts +0 -448
  90. package/src/providers/xai/transform.responses.ts +0 -678
  91. package/src/providers/xai/types.ts +0 -938
  92. package/src/types/content.ts +0 -133
  93. package/src/types/errors.ts +0 -85
  94. package/src/types/index.ts +0 -105
  95. package/src/types/llm.ts +0 -211
  96. package/src/types/messages.ts +0 -205
  97. package/src/types/provider.ts +0 -195
  98. package/src/types/schema.ts +0 -58
  99. package/src/types/stream.ts +0 -188
  100. package/src/types/thread.ts +0 -226
  101. package/src/types/tool.ts +0 -88
  102. package/src/types/turn.ts +0 -118
  103. package/src/utils/id.ts +0 -28
  104. package/src/xai/index.ts +0 -41
package/src/core/llm.ts DELETED
@@ -1,650 +0,0 @@
1
- import type {
2
- LLMOptions,
3
- LLMInstance,
4
- LLMRequest,
5
- LLMResponse,
6
- InferenceInput,
7
- BoundLLMModel,
8
- LLMCapabilities,
9
- } from '../types/llm.ts';
10
- import type { UserMessage, AssistantMessage } from '../types/messages.ts';
11
- import type { ContentBlock, TextBlock } from '../types/content.ts';
12
- import type { Tool, ToolExecution, ToolResult } from '../types/tool.ts';
13
- import type { Turn, TokenUsage } from '../types/turn.ts';
14
- import type { StreamResult, StreamEvent } from '../types/stream.ts';
15
- import type { Thread } from '../types/thread.ts';
16
- import type { ProviderConfig } from '../types/provider.ts';
17
- import { UPPError } from '../types/errors.ts';
18
- import {
19
- Message,
20
- UserMessage as UserMessageClass,
21
- ToolResultMessage,
22
- isUserMessage,
23
- isAssistantMessage,
24
- } from '../types/messages.ts';
25
- import { createTurn, aggregateUsage, emptyUsage } from '../types/turn.ts';
26
- import {
27
- createStreamResult,
28
- toolExecutionStart,
29
- toolExecutionEnd,
30
- } from '../types/stream.ts';
31
- import { generateShortId } from '../utils/id.ts';
32
-
33
- /**
34
- * Default maximum iterations for tool execution
35
- */
36
- const DEFAULT_MAX_ITERATIONS = 10;
37
-
38
- /**
39
- * Create an LLM instance
40
- */
41
- export function llm<TParams = unknown>(
42
- options: LLMOptions<TParams>
43
- ): LLMInstance<TParams> {
44
- const { model: modelRef, config = {}, params, system, tools, toolStrategy, structure } = options;
45
-
46
- // Validate that the provider supports LLM
47
- const provider = modelRef.provider;
48
- if (!provider.modalities.llm) {
49
- throw new UPPError(
50
- `Provider '${provider.name}' does not support LLM modality`,
51
- 'INVALID_REQUEST',
52
- provider.name,
53
- 'llm'
54
- );
55
- }
56
-
57
- // Bind the model
58
- const boundModel = provider.modalities.llm.bind(modelRef.modelId) as BoundLLMModel<TParams>;
59
-
60
- // Validate capabilities at bind time
61
- const capabilities = boundModel.capabilities;
62
-
63
- // Check for structured output capability
64
- if (structure && !capabilities.structuredOutput) {
65
- throw new UPPError(
66
- `Provider '${provider.name}' does not support structured output`,
67
- 'INVALID_REQUEST',
68
- provider.name,
69
- 'llm'
70
- );
71
- }
72
-
73
- // Check for tools capability
74
- if (tools && tools.length > 0 && !capabilities.tools) {
75
- throw new UPPError(
76
- `Provider '${provider.name}' does not support tools`,
77
- 'INVALID_REQUEST',
78
- provider.name,
79
- 'llm'
80
- );
81
- }
82
-
83
- // Build the instance
84
- const instance: LLMInstance<TParams> = {
85
- model: boundModel,
86
- system,
87
- params,
88
- capabilities,
89
-
90
- async generate(
91
- historyOrInput: Message[] | Thread | InferenceInput,
92
- ...inputs: InferenceInput[]
93
- ): Promise<Turn> {
94
- const { history, messages } = parseInputs(historyOrInput, inputs);
95
- return executeGenerate(
96
- boundModel,
97
- config,
98
- system,
99
- params,
100
- tools,
101
- toolStrategy,
102
- structure,
103
- history,
104
- messages
105
- );
106
- },
107
-
108
- stream(
109
- historyOrInput: Message[] | Thread | InferenceInput,
110
- ...inputs: InferenceInput[]
111
- ): StreamResult {
112
- // Check streaming capability
113
- if (!capabilities.streaming) {
114
- throw new UPPError(
115
- `Provider '${provider.name}' does not support streaming`,
116
- 'INVALID_REQUEST',
117
- provider.name,
118
- 'llm'
119
- );
120
- }
121
- const { history, messages } = parseInputs(historyOrInput, inputs);
122
- return executeStream(
123
- boundModel,
124
- config,
125
- system,
126
- params,
127
- tools,
128
- toolStrategy,
129
- structure,
130
- history,
131
- messages
132
- );
133
- },
134
- };
135
-
136
- return instance;
137
- }
138
-
139
- /**
140
- * Type guard to check if a value is a Message instance.
141
- * Uses instanceof for class instances, with fallback to timestamp check
142
- * for deserialized/reconstructed Message objects.
143
- */
144
- function isMessageInstance(value: unknown): value is Message {
145
- if (value instanceof Message) {
146
- return true;
147
- }
148
- // Fallback for deserialized Messages that aren't class instances:
149
- // Messages have 'timestamp' (Date), ContentBlocks don't
150
- if (
151
- typeof value === 'object' &&
152
- value !== null &&
153
- 'timestamp' in value &&
154
- 'type' in value &&
155
- 'id' in value
156
- ) {
157
- const obj = value as Record<string, unknown>;
158
- // Message types are 'user', 'assistant', 'tool_result'
159
- // ContentBlock types are 'text', 'image', 'audio', 'video', 'binary'
160
- const messageTypes = ['user', 'assistant', 'tool_result'];
161
- return messageTypes.includes(obj.type as string);
162
- }
163
- return false;
164
- }
165
-
166
- /**
167
- * Parse inputs to determine history and new messages
168
- */
169
- function parseInputs(
170
- historyOrInput: Message[] | Thread | InferenceInput,
171
- inputs: InferenceInput[]
172
- ): { history: Message[]; messages: Message[] } {
173
- // Check if it's a Thread first (has 'messages' array property)
174
- if (
175
- typeof historyOrInput === 'object' &&
176
- historyOrInput !== null &&
177
- 'messages' in historyOrInput &&
178
- Array.isArray((historyOrInput as Thread).messages)
179
- ) {
180
- const thread = historyOrInput as Thread;
181
- const newMessages = inputs.map(inputToMessage);
182
- return { history: [...thread.messages], messages: newMessages };
183
- }
184
-
185
- // Check if first arg is Message[] (history)
186
- if (Array.isArray(historyOrInput)) {
187
- // Empty array is empty history
188
- if (historyOrInput.length === 0) {
189
- const newMessages = inputs.map(inputToMessage);
190
- return { history: [], messages: newMessages };
191
- }
192
- const first = historyOrInput[0];
193
- if (isMessageInstance(first)) {
194
- // It's history (Message[])
195
- const newMessages = inputs.map(inputToMessage);
196
- return { history: historyOrInput as Message[], messages: newMessages };
197
- }
198
- }
199
-
200
- // It's input (no history) - could be string, single Message, or ContentBlock
201
- const allInputs = [historyOrInput as InferenceInput, ...inputs];
202
- const newMessages = allInputs.map(inputToMessage);
203
- return { history: [], messages: newMessages };
204
- }
205
-
206
- /**
207
- * Convert an InferenceInput to a Message
208
- */
209
- function inputToMessage(input: InferenceInput): Message {
210
- if (typeof input === 'string') {
211
- return new UserMessageClass(input);
212
- }
213
-
214
- // It's already a Message
215
- if ('type' in input && 'id' in input && 'timestamp' in input) {
216
- return input as Message;
217
- }
218
-
219
- // It's a ContentBlock - wrap in UserMessage
220
- const block = input as ContentBlock;
221
- if (block.type === 'text') {
222
- return new UserMessageClass((block as TextBlock).text);
223
- }
224
-
225
- return new UserMessageClass([block as any]);
226
- }
227
-
228
- /**
229
- * Execute a non-streaming generate call with tool loop
230
- */
231
- async function executeGenerate<TParams>(
232
- model: BoundLLMModel<TParams>,
233
- config: ProviderConfig,
234
- system: string | undefined,
235
- params: TParams | undefined,
236
- tools: Tool[] | undefined,
237
- toolStrategy: LLMOptions<TParams>['toolStrategy'],
238
- structure: LLMOptions<TParams>['structure'],
239
- history: Message[],
240
- newMessages: Message[]
241
- ): Promise<Turn> {
242
- // Validate media capabilities for all input messages
243
- validateMediaCapabilities(
244
- [...history, ...newMessages],
245
- model.capabilities,
246
- model.provider.name
247
- );
248
- const maxIterations = toolStrategy?.maxIterations ?? DEFAULT_MAX_ITERATIONS;
249
- const allMessages: Message[] = [...history, ...newMessages];
250
- const toolExecutions: ToolExecution[] = [];
251
- const usages: TokenUsage[] = [];
252
- let cycles = 0;
253
-
254
- // Track structured data from responses (providers handle extraction)
255
- let structuredData: unknown;
256
-
257
- // Tool loop
258
- while (cycles < maxIterations + 1) {
259
- cycles++;
260
-
261
- const request: LLMRequest<TParams> = {
262
- messages: allMessages,
263
- system,
264
- params,
265
- tools,
266
- structure,
267
- config,
268
- };
269
-
270
- const response = await model.complete(request);
271
- usages.push(response.usage);
272
- allMessages.push(response.message);
273
-
274
- // Track structured data from provider (if present)
275
- if (response.data !== undefined) {
276
- structuredData = response.data;
277
- }
278
-
279
- // Check for tool calls
280
- if (response.message.hasToolCalls && tools && tools.length > 0) {
281
- // If provider already extracted structured data, don't try to execute tool calls
282
- // (some providers use tool calls internally for structured output)
283
- if (response.data !== undefined) {
284
- break;
285
- }
286
-
287
- // Check if we've hit max iterations (subtract 1 because we already incremented)
288
- if (cycles >= maxIterations) {
289
- await toolStrategy?.onMaxIterations?.(maxIterations);
290
- throw new UPPError(
291
- `Tool execution exceeded maximum iterations (${maxIterations})`,
292
- 'INVALID_REQUEST',
293
- model.provider.name,
294
- 'llm'
295
- );
296
- }
297
-
298
- // Execute tools
299
- const results = await executeTools(
300
- response.message,
301
- tools,
302
- toolStrategy,
303
- toolExecutions
304
- );
305
-
306
- // Add tool results
307
- allMessages.push(new ToolResultMessage(results));
308
-
309
- continue;
310
- }
311
-
312
- // No tool calls - we're done
313
- break;
314
- }
315
-
316
- // Use structured data from provider if structure was requested
317
- const data = structure ? structuredData : undefined;
318
-
319
- return createTurn(
320
- allMessages.slice(history.length), // Only messages from this turn
321
- toolExecutions,
322
- aggregateUsage(usages),
323
- cycles,
324
- data
325
- );
326
- }
327
-
328
- /**
329
- * Execute a streaming generate call with tool loop
330
- */
331
- function executeStream<TParams>(
332
- model: BoundLLMModel<TParams>,
333
- config: ProviderConfig,
334
- system: string | undefined,
335
- params: TParams | undefined,
336
- tools: Tool[] | undefined,
337
- toolStrategy: LLMOptions<TParams>['toolStrategy'],
338
- structure: LLMOptions<TParams>['structure'],
339
- history: Message[],
340
- newMessages: Message[]
341
- ): StreamResult {
342
- // Validate media capabilities for all input messages
343
- validateMediaCapabilities(
344
- [...history, ...newMessages],
345
- model.capabilities,
346
- model.provider.name
347
- );
348
-
349
- const abortController = new AbortController();
350
-
351
- // Shared state between generator and turn promise
352
- const allMessages: Message[] = [...history, ...newMessages];
353
- const toolExecutions: ToolExecution[] = [];
354
- const usages: TokenUsage[] = [];
355
- let cycles = 0;
356
- let generatorError: Error | null = null;
357
- let structuredData: unknown; // Providers extract this
358
-
359
- // Deferred to signal when generator completes
360
- let resolveGenerator: () => void;
361
- let rejectGenerator: (error: Error) => void;
362
- const generatorDone = new Promise<void>((resolve, reject) => {
363
- resolveGenerator = resolve;
364
- rejectGenerator = reject;
365
- });
366
-
367
- const maxIterations = toolStrategy?.maxIterations ?? DEFAULT_MAX_ITERATIONS;
368
-
369
- // Create the async generator - this is the ONLY place that calls the API
370
- async function* generateStream(): AsyncGenerator<StreamEvent, void, unknown> {
371
- try {
372
- while (cycles < maxIterations + 1) {
373
- cycles++;
374
-
375
- const request: LLMRequest<TParams> = {
376
- messages: allMessages,
377
- system,
378
- params,
379
- tools,
380
- structure,
381
- config,
382
- signal: abortController.signal,
383
- };
384
-
385
- const streamResult = model.stream(request);
386
-
387
- // Forward stream events
388
- for await (const event of streamResult) {
389
- yield event;
390
- }
391
-
392
- // Get the response
393
- const response = await streamResult.response;
394
- usages.push(response.usage);
395
- allMessages.push(response.message);
396
-
397
- // Track structured data from provider (if present)
398
- if (response.data !== undefined) {
399
- structuredData = response.data;
400
- }
401
-
402
- // Check for tool calls
403
- if (response.message.hasToolCalls && tools && tools.length > 0) {
404
- // If provider already extracted structured data, don't try to execute tool calls
405
- // (some providers use tool calls internally for structured output)
406
- if (response.data !== undefined) {
407
- break;
408
- }
409
-
410
- if (cycles >= maxIterations) {
411
- await toolStrategy?.onMaxIterations?.(maxIterations);
412
- throw new UPPError(
413
- `Tool execution exceeded maximum iterations (${maxIterations})`,
414
- 'INVALID_REQUEST',
415
- model.provider.name,
416
- 'llm'
417
- );
418
- }
419
-
420
- // Execute tools with event emission
421
- const toolEvents: StreamEvent[] = [];
422
- const results = await executeTools(
423
- response.message,
424
- tools,
425
- toolStrategy,
426
- toolExecutions,
427
- (event) => toolEvents.push(event)
428
- );
429
-
430
- // Yield tool execution events
431
- for (const event of toolEvents) {
432
- yield event;
433
- }
434
-
435
- // Add tool results
436
- allMessages.push(new ToolResultMessage(results));
437
-
438
- continue;
439
- }
440
-
441
- break;
442
- }
443
- resolveGenerator();
444
- } catch (error) {
445
- generatorError = error as Error;
446
- rejectGenerator(error as Error);
447
- throw error;
448
- }
449
- }
450
-
451
- // Turn promise waits for the generator to complete, then builds the Turn
452
- const turnPromise = (async (): Promise<Turn> => {
453
- await generatorDone;
454
-
455
- if (generatorError) {
456
- throw generatorError;
457
- }
458
-
459
- // Use structured data from provider if structure was requested
460
- const data = structure ? structuredData : undefined;
461
-
462
- return createTurn(
463
- allMessages.slice(history.length),
464
- toolExecutions,
465
- aggregateUsage(usages),
466
- cycles,
467
- data
468
- );
469
- })();
470
-
471
- return createStreamResult(generateStream(), turnPromise, abortController);
472
- }
473
-
474
- /**
475
- * Execute tools from an assistant message
476
- */
477
- async function executeTools(
478
- message: AssistantMessage,
479
- tools: Tool[],
480
- toolStrategy: LLMOptions<unknown>['toolStrategy'],
481
- executions: ToolExecution[],
482
- onEvent?: (event: StreamEvent) => void
483
- ): Promise<ToolResult[]> {
484
- const toolCalls = message.toolCalls ?? [];
485
- const results: ToolResult[] = [];
486
-
487
- // Build tool map
488
- const toolMap = new Map(tools.map((t) => [t.name, t]));
489
-
490
- // Execute tools (in parallel)
491
- const promises = toolCalls.map(async (call, index) => {
492
- const tool = toolMap.get(call.toolName);
493
- if (!tool) {
494
- return {
495
- toolCallId: call.toolCallId,
496
- result: `Tool '${call.toolName}' not found`,
497
- isError: true,
498
- };
499
- }
500
-
501
- const startTime = Date.now();
502
-
503
- // Emit start event
504
- onEvent?.(toolExecutionStart(call.toolCallId, tool.name, startTime, index));
505
-
506
- // Notify strategy
507
- await toolStrategy?.onToolCall?.(tool, call.arguments);
508
-
509
- // Check before call
510
- if (toolStrategy?.onBeforeCall) {
511
- const shouldRun = await toolStrategy.onBeforeCall(tool, call.arguments);
512
- if (!shouldRun) {
513
- const endTime = Date.now();
514
- onEvent?.(toolExecutionEnd(call.toolCallId, tool.name, 'Tool execution skipped', true, endTime, index));
515
- return {
516
- toolCallId: call.toolCallId,
517
- result: 'Tool execution skipped',
518
- isError: true,
519
- };
520
- }
521
- }
522
-
523
- // Check approval
524
- let approved = true;
525
- if (tool.approval) {
526
- try {
527
- approved = await tool.approval(call.arguments);
528
- } catch (error) {
529
- // Approval threw - propagate
530
- throw error;
531
- }
532
- }
533
-
534
- if (!approved) {
535
- const endTime = Date.now();
536
- const execution: ToolExecution = {
537
- toolName: tool.name,
538
- toolCallId: call.toolCallId,
539
- arguments: call.arguments,
540
- result: 'Tool execution denied',
541
- isError: true,
542
- duration: endTime - startTime,
543
- approved: false,
544
- };
545
- executions.push(execution);
546
-
547
- onEvent?.(toolExecutionEnd(call.toolCallId, tool.name, 'Tool execution denied by approval handler', true, endTime, index));
548
-
549
- return {
550
- toolCallId: call.toolCallId,
551
- result: 'Tool execution denied by approval handler',
552
- isError: true,
553
- };
554
- }
555
-
556
- // Execute tool
557
- try {
558
- const result = await tool.run(call.arguments);
559
- const endTime = Date.now();
560
-
561
- await toolStrategy?.onAfterCall?.(tool, call.arguments, result);
562
-
563
- const execution: ToolExecution = {
564
- toolName: tool.name,
565
- toolCallId: call.toolCallId,
566
- arguments: call.arguments,
567
- result,
568
- isError: false,
569
- duration: endTime - startTime,
570
- approved,
571
- };
572
- executions.push(execution);
573
-
574
- onEvent?.(toolExecutionEnd(call.toolCallId, tool.name, result, false, endTime, index));
575
-
576
- return {
577
- toolCallId: call.toolCallId,
578
- result,
579
- isError: false,
580
- };
581
- } catch (error) {
582
- const endTime = Date.now();
583
- await toolStrategy?.onError?.(tool, call.arguments, error as Error);
584
-
585
- const errorMessage = error instanceof Error ? error.message : String(error);
586
-
587
- const execution: ToolExecution = {
588
- toolName: tool.name,
589
- toolCallId: call.toolCallId,
590
- arguments: call.arguments,
591
- result: errorMessage,
592
- isError: true,
593
- duration: endTime - startTime,
594
- approved,
595
- };
596
- executions.push(execution);
597
-
598
- onEvent?.(toolExecutionEnd(call.toolCallId, tool.name, errorMessage, true, endTime, index));
599
-
600
- return {
601
- toolCallId: call.toolCallId,
602
- result: errorMessage,
603
- isError: true,
604
- };
605
- }
606
- });
607
-
608
- results.push(...(await Promise.all(promises)));
609
- return results;
610
- }
611
-
612
- /**
613
- * Check if messages contain media that requires specific capabilities
614
- */
615
- function validateMediaCapabilities(
616
- messages: Message[],
617
- capabilities: LLMCapabilities,
618
- providerName: string
619
- ): void {
620
- for (const msg of messages) {
621
- if (!isUserMessage(msg)) continue;
622
-
623
- for (const block of msg.content) {
624
- if (block.type === 'image' && !capabilities.imageInput) {
625
- throw new UPPError(
626
- `Provider '${providerName}' does not support image input`,
627
- 'INVALID_REQUEST',
628
- providerName,
629
- 'llm'
630
- );
631
- }
632
- if (block.type === 'video' && !capabilities.videoInput) {
633
- throw new UPPError(
634
- `Provider '${providerName}' does not support video input`,
635
- 'INVALID_REQUEST',
636
- providerName,
637
- 'llm'
638
- );
639
- }
640
- if (block.type === 'audio' && !capabilities.audioInput) {
641
- throw new UPPError(
642
- `Provider '${providerName}' does not support audio input`,
643
- 'INVALID_REQUEST',
644
- providerName,
645
- 'llm'
646
- );
647
- }
648
- }
649
- }
650
- }