@providerprotocol/ai 0.0.11 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/dist/index.js +3 -3
  2. package/dist/index.js.map +1 -1
  3. package/package.json +1 -10
  4. package/src/anthropic/index.ts +0 -3
  5. package/src/core/image.ts +0 -188
  6. package/src/core/llm.ts +0 -650
  7. package/src/core/provider.ts +0 -92
  8. package/src/google/index.ts +0 -3
  9. package/src/http/errors.ts +0 -112
  10. package/src/http/fetch.ts +0 -210
  11. package/src/http/index.ts +0 -31
  12. package/src/http/keys.ts +0 -136
  13. package/src/http/retry.ts +0 -205
  14. package/src/http/sse.ts +0 -136
  15. package/src/index.ts +0 -32
  16. package/src/ollama/index.ts +0 -3
  17. package/src/openai/index.ts +0 -39
  18. package/src/openrouter/index.ts +0 -11
  19. package/src/providers/anthropic/index.ts +0 -17
  20. package/src/providers/anthropic/llm.ts +0 -196
  21. package/src/providers/anthropic/transform.ts +0 -434
  22. package/src/providers/anthropic/types.ts +0 -213
  23. package/src/providers/google/index.ts +0 -17
  24. package/src/providers/google/llm.ts +0 -203
  25. package/src/providers/google/transform.ts +0 -447
  26. package/src/providers/google/types.ts +0 -214
  27. package/src/providers/ollama/index.ts +0 -43
  28. package/src/providers/ollama/llm.ts +0 -272
  29. package/src/providers/ollama/transform.ts +0 -434
  30. package/src/providers/ollama/types.ts +0 -260
  31. package/src/providers/openai/index.ts +0 -186
  32. package/src/providers/openai/llm.completions.ts +0 -201
  33. package/src/providers/openai/llm.responses.ts +0 -211
  34. package/src/providers/openai/transform.completions.ts +0 -561
  35. package/src/providers/openai/transform.responses.ts +0 -708
  36. package/src/providers/openai/types.ts +0 -1249
  37. package/src/providers/openrouter/index.ts +0 -177
  38. package/src/providers/openrouter/llm.completions.ts +0 -201
  39. package/src/providers/openrouter/llm.responses.ts +0 -211
  40. package/src/providers/openrouter/transform.completions.ts +0 -538
  41. package/src/providers/openrouter/transform.responses.ts +0 -742
  42. package/src/providers/openrouter/types.ts +0 -717
  43. package/src/providers/xai/index.ts +0 -223
  44. package/src/providers/xai/llm.completions.ts +0 -201
  45. package/src/providers/xai/llm.messages.ts +0 -195
  46. package/src/providers/xai/llm.responses.ts +0 -211
  47. package/src/providers/xai/transform.completions.ts +0 -565
  48. package/src/providers/xai/transform.messages.ts +0 -448
  49. package/src/providers/xai/transform.responses.ts +0 -678
  50. package/src/providers/xai/types.ts +0 -938
  51. package/src/types/content.ts +0 -133
  52. package/src/types/errors.ts +0 -85
  53. package/src/types/index.ts +0 -105
  54. package/src/types/llm.ts +0 -211
  55. package/src/types/messages.ts +0 -205
  56. package/src/types/provider.ts +0 -195
  57. package/src/types/schema.ts +0 -58
  58. package/src/types/stream.ts +0 -188
  59. package/src/types/thread.ts +0 -226
  60. package/src/types/tool.ts +0 -88
  61. package/src/types/turn.ts +0 -118
  62. package/src/utils/id.ts +0 -28
  63. package/src/xai/index.ts +0 -41
@@ -1,203 +0,0 @@
1
- import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
2
- import type { StreamEvent } from '../../types/stream.ts';
3
- import type { LLMProvider } from '../../types/provider.ts';
4
- import { UPPError } from '../../types/errors.ts';
5
- import { resolveApiKey } from '../../http/keys.ts';
6
- import { doFetch, doStreamFetch } from '../../http/fetch.ts';
7
- import { parseSSEStream } from '../../http/sse.ts';
8
- import { normalizeHttpError } from '../../http/errors.ts';
9
- import type { GoogleLLMParams, GoogleResponse, GoogleStreamChunk } from './types.ts';
10
- import {
11
- transformRequest,
12
- transformResponse,
13
- transformStreamChunk,
14
- createStreamState,
15
- buildResponseFromState,
16
- } from './transform.ts';
17
-
18
- const GOOGLE_API_BASE = 'https://generativelanguage.googleapis.com/v1beta';
19
-
20
- /**
21
- * Google API capabilities
22
- */
23
- const GOOGLE_CAPABILITIES: LLMCapabilities = {
24
- streaming: true,
25
- tools: true,
26
- structuredOutput: true,
27
- imageInput: true,
28
- videoInput: true,
29
- audioInput: true,
30
- };
31
-
32
- /**
33
- * Build Google API URL for a model
34
- */
35
- function buildUrl(modelId: string, action: 'generateContent' | 'streamGenerateContent', apiKey: string): string {
36
- const base = `${GOOGLE_API_BASE}/models/${modelId}:${action}`;
37
- return `${base}?key=${apiKey}`;
38
- }
39
-
40
- /**
41
- * Create Google LLM handler
42
- */
43
- export function createLLMHandler(): LLMHandler<GoogleLLMParams> {
44
- // Provider reference injected by createProvider() after construction
45
- let providerRef: LLMProvider<GoogleLLMParams> | null = null;
46
-
47
- return {
48
- _setProvider(provider: LLMProvider<GoogleLLMParams>) {
49
- providerRef = provider;
50
- },
51
-
52
- bind(modelId: string): BoundLLMModel<GoogleLLMParams> {
53
- // Use the injected provider reference (set by createProvider)
54
- if (!providerRef) {
55
- throw new UPPError(
56
- 'Provider reference not set. Handler must be used with createProvider().',
57
- 'INVALID_REQUEST',
58
- 'google',
59
- 'llm'
60
- );
61
- }
62
-
63
- const model: BoundLLMModel<GoogleLLMParams> = {
64
- modelId,
65
- capabilities: GOOGLE_CAPABILITIES,
66
-
67
- get provider(): LLMProvider<GoogleLLMParams> {
68
- return providerRef!;
69
- },
70
-
71
- async complete(request: LLMRequest<GoogleLLMParams>): Promise<LLMResponse> {
72
- const apiKey = await resolveApiKey(
73
- request.config,
74
- 'GOOGLE_API_KEY',
75
- 'google',
76
- 'llm'
77
- );
78
-
79
- const url = request.config.baseUrl
80
- ? `${request.config.baseUrl}/models/${modelId}:generateContent?key=${apiKey}`
81
- : buildUrl(modelId, 'generateContent', apiKey);
82
-
83
- const body = transformRequest(request, modelId);
84
-
85
- const response = await doFetch(
86
- url,
87
- {
88
- method: 'POST',
89
- headers: {
90
- 'Content-Type': 'application/json',
91
- },
92
- body: JSON.stringify(body),
93
- signal: request.signal,
94
- },
95
- request.config,
96
- 'google',
97
- 'llm'
98
- );
99
-
100
- const data = (await response.json()) as GoogleResponse;
101
- return transformResponse(data);
102
- },
103
-
104
- stream(request: LLMRequest<GoogleLLMParams>): LLMStreamResult {
105
- const state = createStreamState();
106
- let responseResolve: (value: LLMResponse) => void;
107
- let responseReject: (error: Error) => void;
108
-
109
- const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
110
- responseResolve = resolve;
111
- responseReject = reject;
112
- });
113
-
114
- async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
115
- try {
116
- const apiKey = await resolveApiKey(
117
- request.config,
118
- 'GOOGLE_API_KEY',
119
- 'google',
120
- 'llm'
121
- );
122
-
123
- const url = request.config.baseUrl
124
- ? `${request.config.baseUrl}/models/${modelId}:streamGenerateContent?alt=sse&key=${apiKey}`
125
- : `${buildUrl(modelId, 'streamGenerateContent', apiKey)}&alt=sse`;
126
-
127
- const body = transformRequest(request, modelId);
128
-
129
- const response = await doStreamFetch(
130
- url,
131
- {
132
- method: 'POST',
133
- headers: {
134
- 'Content-Type': 'application/json',
135
- },
136
- body: JSON.stringify(body),
137
- signal: request.signal,
138
- },
139
- request.config,
140
- 'google',
141
- 'llm'
142
- );
143
-
144
- if (!response.ok) {
145
- const error = await normalizeHttpError(response, 'google', 'llm');
146
- responseReject(error);
147
- throw error;
148
- }
149
-
150
- if (!response.body) {
151
- const error = new UPPError(
152
- 'No response body for streaming request',
153
- 'PROVIDER_ERROR',
154
- 'google',
155
- 'llm'
156
- );
157
- responseReject(error);
158
- throw error;
159
- }
160
-
161
- for await (const data of parseSSEStream(response.body)) {
162
- if (typeof data === 'object' && data !== null) {
163
- const chunk = data as GoogleStreamChunk;
164
-
165
- // Check for error
166
- if ('error' in chunk) {
167
- const error = new UPPError(
168
- (chunk as any).error.message,
169
- 'PROVIDER_ERROR',
170
- 'google',
171
- 'llm'
172
- );
173
- responseReject(error);
174
- throw error;
175
- }
176
-
177
- const events = transformStreamChunk(chunk, state);
178
- for (const event of events) {
179
- yield event;
180
- }
181
- }
182
- }
183
-
184
- responseResolve(buildResponseFromState(state));
185
- } catch (error) {
186
- responseReject(error as Error);
187
- throw error;
188
- }
189
- }
190
-
191
- return {
192
- [Symbol.asyncIterator]() {
193
- return generateEvents();
194
- },
195
- response: responsePromise,
196
- };
197
- },
198
- };
199
-
200
- return model;
201
- },
202
- };
203
- }
@@ -1,447 +0,0 @@
1
- import type { LLMRequest, LLMResponse } from '../../types/llm.ts';
2
- import type { Message } from '../../types/messages.ts';
3
- import type { StreamEvent } from '../../types/stream.ts';
4
- import type { Tool, ToolCall } from '../../types/tool.ts';
5
- import type { TokenUsage } from '../../types/turn.ts';
6
- import type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';
7
- import {
8
- AssistantMessage,
9
- isUserMessage,
10
- isAssistantMessage,
11
- isToolResultMessage,
12
- } from '../../types/messages.ts';
13
- import type {
14
- GoogleLLMParams,
15
- GoogleRequest,
16
- GoogleContent,
17
- GooglePart,
18
- GoogleTool,
19
- GoogleResponse,
20
- GoogleStreamChunk,
21
- GoogleFunctionCallPart,
22
- } from './types.ts';
23
-
24
- /**
25
- * Transform UPP request to Google format
26
- *
27
- * Params are spread into generationConfig to allow pass-through of any Google API fields,
28
- * even those not explicitly defined in our type. This enables developers to
29
- * use new API features without waiting for library updates.
30
- */
31
- export function transformRequest<TParams extends GoogleLLMParams>(
32
- request: LLMRequest<TParams>,
33
- modelId: string
34
- ): GoogleRequest {
35
- const params = (request.params ?? {}) as GoogleLLMParams;
36
-
37
- const googleRequest: GoogleRequest = {
38
- contents: transformMessages(request.messages),
39
- };
40
-
41
- // System instruction (separate from contents in Google)
42
- if (request.system) {
43
- googleRequest.systemInstruction = {
44
- parts: [{ text: request.system }],
45
- };
46
- }
47
-
48
- // Spread params into generationConfig to pass through all fields
49
- const generationConfig: NonNullable<GoogleRequest['generationConfig']> = {
50
- ...params,
51
- };
52
-
53
- // Protocol-level structured output (overrides provider-specific settings)
54
- if (request.structure) {
55
- generationConfig.responseMimeType = 'application/json';
56
- generationConfig.responseSchema = request.structure as unknown as Record<string, unknown>;
57
- }
58
-
59
- if (Object.keys(generationConfig).length > 0) {
60
- googleRequest.generationConfig = generationConfig;
61
- }
62
-
63
- // Tools come from request, not params
64
- if (request.tools && request.tools.length > 0) {
65
- googleRequest.tools = [
66
- {
67
- functionDeclarations: request.tools.map(transformTool),
68
- },
69
- ];
70
- }
71
-
72
- return googleRequest;
73
- }
74
-
75
- /**
76
- * Filter to only valid content blocks with a type property
77
- */
78
- function filterValidContent<T extends { type?: string }>(content: T[]): T[] {
79
- return content.filter((c) => c && typeof c.type === 'string');
80
- }
81
-
82
- /**
83
- * Transform UPP Messages to Google contents
84
- */
85
- function transformMessages(messages: Message[]): GoogleContent[] {
86
- const contents: GoogleContent[] = [];
87
-
88
- for (const msg of messages) {
89
- if (isUserMessage(msg)) {
90
- const validContent = filterValidContent(msg.content);
91
- const parts = validContent.map(transformContentBlock);
92
- // Google requires at least one part - add placeholder if empty
93
- if (parts.length === 0) {
94
- parts.push({ text: '' });
95
- }
96
- contents.push({
97
- role: 'user',
98
- parts,
99
- });
100
- } else if (isAssistantMessage(msg)) {
101
- const validContent = filterValidContent(msg.content);
102
- const parts: GooglePart[] = validContent.map(transformContentBlock);
103
-
104
- // Add function calls - use stored parts with thought signatures if available
105
- const googleMeta = msg.metadata?.google as {
106
- functionCallParts?: Array<{
107
- name: string;
108
- args: Record<string, unknown>;
109
- thoughtSignature?: string;
110
- }>;
111
- } | undefined;
112
-
113
- if (googleMeta?.functionCallParts && googleMeta.functionCallParts.length > 0) {
114
- // Use stored function call parts with thought signatures
115
- for (const fc of googleMeta.functionCallParts) {
116
- const part: GoogleFunctionCallPart = {
117
- functionCall: {
118
- name: fc.name,
119
- args: fc.args,
120
- },
121
- };
122
- if (fc.thoughtSignature) {
123
- part.thoughtSignature = fc.thoughtSignature;
124
- }
125
- parts.push(part);
126
- }
127
- } else if (msg.toolCalls) {
128
- // Fallback: reconstruct from tool calls (no thought signatures)
129
- for (const call of msg.toolCalls) {
130
- parts.push({
131
- functionCall: {
132
- name: call.toolName,
133
- args: call.arguments,
134
- },
135
- });
136
- }
137
- }
138
-
139
- // Google requires at least one part - add placeholder if empty
140
- if (parts.length === 0) {
141
- parts.push({ text: '' });
142
- }
143
-
144
- contents.push({
145
- role: 'model',
146
- parts,
147
- });
148
- } else if (isToolResultMessage(msg)) {
149
- // Function results are sent as user messages in Google
150
- contents.push({
151
- role: 'user',
152
- parts: msg.results.map((result) => ({
153
- functionResponse: {
154
- name: result.toolCallId, // Google uses the function name, but we store it in toolCallId
155
- response:
156
- typeof result.result === 'object'
157
- ? (result.result as Record<string, unknown>)
158
- : { result: result.result },
159
- },
160
- })),
161
- });
162
- }
163
- }
164
-
165
- return contents;
166
- }
167
-
168
- /**
169
- * Transform a content block to Google format
170
- */
171
- function transformContentBlock(block: ContentBlock): GooglePart {
172
- switch (block.type) {
173
- case 'text':
174
- return { text: block.text };
175
-
176
- case 'image': {
177
- const imageBlock = block as ImageBlock;
178
- let data: string;
179
-
180
- if (imageBlock.source.type === 'base64') {
181
- data = imageBlock.source.data;
182
- } else if (imageBlock.source.type === 'bytes') {
183
- data = btoa(
184
- Array.from(imageBlock.source.data)
185
- .map((b) => String.fromCharCode(b))
186
- .join('')
187
- );
188
- } else {
189
- throw new Error('Google API does not support URL image sources directly');
190
- }
191
-
192
- return {
193
- inlineData: {
194
- mimeType: imageBlock.mimeType,
195
- data,
196
- },
197
- };
198
- }
199
-
200
- default:
201
- throw new Error(`Unsupported content type: ${block.type}`);
202
- }
203
- }
204
-
205
- /**
206
- * Transform a UPP Tool to Google format
207
- */
208
- function transformTool(tool: Tool): GoogleTool['functionDeclarations'][0] {
209
- return {
210
- name: tool.name,
211
- description: tool.description,
212
- parameters: {
213
- type: 'object',
214
- properties: tool.parameters.properties,
215
- required: tool.parameters.required,
216
- },
217
- };
218
- }
219
-
220
- /**
221
- * Transform Google response to UPP LLMResponse
222
- */
223
- export function transformResponse(data: GoogleResponse): LLMResponse {
224
- const candidate = data.candidates?.[0];
225
- if (!candidate) {
226
- throw new Error('No candidates in Google response');
227
- }
228
-
229
- const textContent: TextBlock[] = [];
230
- const toolCalls: ToolCall[] = [];
231
- let structuredData: unknown;
232
- // Store original function call parts with thought signatures for echoing back
233
- const functionCallParts: Array<{
234
- name: string;
235
- args: Record<string, unknown>;
236
- thoughtSignature?: string;
237
- }> = [];
238
-
239
- for (const part of candidate.content.parts) {
240
- if ('text' in part) {
241
- textContent.push({ type: 'text', text: part.text });
242
- // Try to parse as JSON for structured output (native JSON mode)
243
- if (structuredData === undefined) {
244
- try {
245
- structuredData = JSON.parse(part.text);
246
- } catch {
247
- // Not valid JSON - that's fine, might not be structured output
248
- }
249
- }
250
- } else if ('functionCall' in part) {
251
- const fc = part as GoogleFunctionCallPart;
252
- toolCalls.push({
253
- toolCallId: fc.functionCall.name, // Google doesn't have call IDs, use name
254
- toolName: fc.functionCall.name,
255
- arguments: fc.functionCall.args,
256
- });
257
- // Store the full part including thought signature
258
- functionCallParts.push({
259
- name: fc.functionCall.name,
260
- args: fc.functionCall.args,
261
- thoughtSignature: fc.thoughtSignature,
262
- });
263
- }
264
- }
265
-
266
- const message = new AssistantMessage(
267
- textContent,
268
- toolCalls.length > 0 ? toolCalls : undefined,
269
- {
270
- metadata: {
271
- google: {
272
- finishReason: candidate.finishReason,
273
- safetyRatings: candidate.safetyRatings,
274
- // Store function call parts with thought signatures for multi-turn
275
- functionCallParts: functionCallParts.length > 0 ? functionCallParts : undefined,
276
- },
277
- },
278
- }
279
- );
280
-
281
- const usage: TokenUsage = {
282
- inputTokens: data.usageMetadata?.promptTokenCount ?? 0,
283
- outputTokens: data.usageMetadata?.candidatesTokenCount ?? 0,
284
- totalTokens: data.usageMetadata?.totalTokenCount ?? 0,
285
- };
286
-
287
- return {
288
- message,
289
- usage,
290
- stopReason: candidate.finishReason ?? 'STOP',
291
- data: structuredData,
292
- };
293
- }
294
-
295
- /**
296
- * State for accumulating streaming response
297
- */
298
- export interface StreamState {
299
- content: string;
300
- toolCalls: Array<{ name: string; args: Record<string, unknown>; thoughtSignature?: string }>;
301
- finishReason: string | null;
302
- inputTokens: number;
303
- outputTokens: number;
304
- isFirstChunk: boolean;
305
- }
306
-
307
- /**
308
- * Create initial stream state
309
- */
310
- export function createStreamState(): StreamState {
311
- return {
312
- content: '',
313
- toolCalls: [],
314
- finishReason: null,
315
- inputTokens: 0,
316
- outputTokens: 0,
317
- isFirstChunk: true,
318
- };
319
- }
320
-
321
- /**
322
- * Transform Google stream chunk to UPP StreamEvents
323
- */
324
- export function transformStreamChunk(
325
- chunk: GoogleStreamChunk,
326
- state: StreamState
327
- ): StreamEvent[] {
328
- const events: StreamEvent[] = [];
329
-
330
- // First chunk - emit message start
331
- if (state.isFirstChunk) {
332
- events.push({ type: 'message_start', index: 0, delta: {} });
333
- state.isFirstChunk = false;
334
- }
335
-
336
- // Usage metadata
337
- if (chunk.usageMetadata) {
338
- state.inputTokens = chunk.usageMetadata.promptTokenCount;
339
- state.outputTokens = chunk.usageMetadata.candidatesTokenCount;
340
- }
341
-
342
- const candidate = chunk.candidates?.[0];
343
- if (!candidate) {
344
- return events;
345
- }
346
-
347
- // Process parts
348
- for (const part of candidate.content?.parts ?? []) {
349
- if ('text' in part) {
350
- state.content += part.text;
351
- events.push({
352
- type: 'text_delta',
353
- index: 0,
354
- delta: { text: part.text },
355
- });
356
- } else if ('functionCall' in part) {
357
- const fc = part as GoogleFunctionCallPart;
358
- // Store with thought signature for echoing back
359
- state.toolCalls.push({
360
- name: fc.functionCall.name,
361
- args: fc.functionCall.args,
362
- thoughtSignature: fc.thoughtSignature,
363
- });
364
- events.push({
365
- type: 'tool_call_delta',
366
- index: state.toolCalls.length - 1,
367
- delta: {
368
- toolCallId: fc.functionCall.name,
369
- toolName: fc.functionCall.name,
370
- argumentsJson: JSON.stringify(fc.functionCall.args),
371
- },
372
- });
373
- }
374
- }
375
-
376
- // Finish reason
377
- if (candidate.finishReason) {
378
- state.finishReason = candidate.finishReason;
379
- events.push({ type: 'message_stop', index: 0, delta: {} });
380
- }
381
-
382
- return events;
383
- }
384
-
385
- /**
386
- * Build LLMResponse from accumulated stream state
387
- */
388
- export function buildResponseFromState(state: StreamState): LLMResponse {
389
- const textContent: TextBlock[] = [];
390
- const toolCalls: ToolCall[] = [];
391
- let structuredData: unknown;
392
- const functionCallParts: Array<{
393
- name: string;
394
- args: Record<string, unknown>;
395
- thoughtSignature?: string;
396
- }> = [];
397
-
398
- if (state.content) {
399
- textContent.push({ type: 'text', text: state.content });
400
- // Try to parse as JSON for structured output (native JSON mode)
401
- try {
402
- structuredData = JSON.parse(state.content);
403
- } catch {
404
- // Not valid JSON - that's fine, might not be structured output
405
- }
406
- }
407
-
408
- for (const tc of state.toolCalls) {
409
- toolCalls.push({
410
- toolCallId: tc.name,
411
- toolName: tc.name,
412
- arguments: tc.args,
413
- });
414
- functionCallParts.push({
415
- name: tc.name,
416
- args: tc.args,
417
- thoughtSignature: tc.thoughtSignature,
418
- });
419
- }
420
-
421
- const message = new AssistantMessage(
422
- textContent,
423
- toolCalls.length > 0 ? toolCalls : undefined,
424
- {
425
- metadata: {
426
- google: {
427
- finishReason: state.finishReason,
428
- // Store function call parts with thought signatures for multi-turn
429
- functionCallParts: functionCallParts.length > 0 ? functionCallParts : undefined,
430
- },
431
- },
432
- }
433
- );
434
-
435
- const usage: TokenUsage = {
436
- inputTokens: state.inputTokens,
437
- outputTokens: state.outputTokens,
438
- totalTokens: state.inputTokens + state.outputTokens,
439
- };
440
-
441
- return {
442
- message,
443
- usage,
444
- stopReason: state.finishReason ?? 'STOP',
445
- data: structuredData,
446
- };
447
- }