@providerprotocol/ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +84 -0
  3. package/dist/anthropic/index.d.ts +41 -0
  4. package/dist/anthropic/index.js +500 -0
  5. package/dist/anthropic/index.js.map +1 -0
  6. package/dist/chunk-CUCRF5W6.js +136 -0
  7. package/dist/chunk-CUCRF5W6.js.map +1 -0
  8. package/dist/chunk-FTFX2VET.js +424 -0
  9. package/dist/chunk-FTFX2VET.js.map +1 -0
  10. package/dist/chunk-QUUX4G7U.js +117 -0
  11. package/dist/chunk-QUUX4G7U.js.map +1 -0
  12. package/dist/chunk-Y6Q7JCNP.js +39 -0
  13. package/dist/chunk-Y6Q7JCNP.js.map +1 -0
  14. package/dist/google/index.d.ts +69 -0
  15. package/dist/google/index.js +517 -0
  16. package/dist/google/index.js.map +1 -0
  17. package/dist/http/index.d.ts +61 -0
  18. package/dist/http/index.js +43 -0
  19. package/dist/http/index.js.map +1 -0
  20. package/dist/index.d.ts +792 -0
  21. package/dist/index.js +898 -0
  22. package/dist/index.js.map +1 -0
  23. package/dist/openai/index.d.ts +204 -0
  24. package/dist/openai/index.js +1340 -0
  25. package/dist/openai/index.js.map +1 -0
  26. package/dist/provider-CUJWjgNl.d.ts +192 -0
  27. package/dist/retry-I2661_rv.d.ts +118 -0
  28. package/package.json +88 -0
  29. package/src/anthropic/index.ts +3 -0
  30. package/src/core/image.ts +188 -0
  31. package/src/core/llm.ts +619 -0
  32. package/src/core/provider.ts +92 -0
  33. package/src/google/index.ts +3 -0
  34. package/src/http/errors.ts +112 -0
  35. package/src/http/fetch.ts +210 -0
  36. package/src/http/index.ts +31 -0
  37. package/src/http/keys.ts +136 -0
  38. package/src/http/retry.ts +205 -0
  39. package/src/http/sse.ts +136 -0
  40. package/src/index.ts +32 -0
  41. package/src/openai/index.ts +9 -0
  42. package/src/providers/anthropic/index.ts +17 -0
  43. package/src/providers/anthropic/llm.ts +196 -0
  44. package/src/providers/anthropic/transform.ts +452 -0
  45. package/src/providers/anthropic/types.ts +213 -0
  46. package/src/providers/google/index.ts +17 -0
  47. package/src/providers/google/llm.ts +203 -0
  48. package/src/providers/google/transform.ts +487 -0
  49. package/src/providers/google/types.ts +214 -0
  50. package/src/providers/openai/index.ts +151 -0
  51. package/src/providers/openai/llm.completions.ts +201 -0
  52. package/src/providers/openai/llm.responses.ts +211 -0
  53. package/src/providers/openai/transform.completions.ts +628 -0
  54. package/src/providers/openai/transform.responses.ts +718 -0
  55. package/src/providers/openai/types.ts +711 -0
  56. package/src/types/content.ts +133 -0
  57. package/src/types/errors.ts +85 -0
  58. package/src/types/index.ts +105 -0
  59. package/src/types/llm.ts +211 -0
  60. package/src/types/messages.ts +182 -0
  61. package/src/types/provider.ts +195 -0
  62. package/src/types/schema.ts +58 -0
  63. package/src/types/stream.ts +146 -0
  64. package/src/types/thread.ts +226 -0
  65. package/src/types/tool.ts +88 -0
  66. package/src/types/turn.ts +118 -0
  67. package/src/utils/id.ts +28 -0
@@ -0,0 +1,452 @@
1
+ import type { LLMRequest, LLMResponse } from '../../types/llm.ts';
2
+ import type { Message } from '../../types/messages.ts';
3
+ import type { StreamEvent } from '../../types/stream.ts';
4
+ import type { Tool, ToolCall } from '../../types/tool.ts';
5
+ import type { TokenUsage } from '../../types/turn.ts';
6
+ import type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';
7
+ import {
8
+ AssistantMessage,
9
+ UserMessage,
10
+ ToolResultMessage,
11
+ isUserMessage,
12
+ isAssistantMessage,
13
+ isToolResultMessage,
14
+ } from '../../types/messages.ts';
15
+ import type {
16
+ AnthropicLLMParams,
17
+ AnthropicRequest,
18
+ AnthropicMessage,
19
+ AnthropicContent,
20
+ AnthropicTool,
21
+ AnthropicResponse,
22
+ AnthropicStreamEvent,
23
+ AnthropicContentBlockDeltaEvent,
24
+ } from './types.ts';
25
+
26
+ /**
27
+ * Transform UPP request to Anthropic format
28
+ */
29
+ export function transformRequest<TParams extends AnthropicLLMParams>(
30
+ request: LLMRequest<TParams>,
31
+ modelId: string
32
+ ): AnthropicRequest {
33
+ const params = (request.params ?? { max_tokens: 4096 }) as AnthropicLLMParams;
34
+
35
+ const anthropicRequest: AnthropicRequest = {
36
+ model: modelId,
37
+ max_tokens: params.max_tokens ?? 4096,
38
+ messages: request.messages.map(transformMessage),
39
+ };
40
+
41
+ // System prompt (top-level in Anthropic)
42
+ if (request.system) {
43
+ anthropicRequest.system = request.system;
44
+ }
45
+
46
+ // Model parameters
47
+ if (params.temperature !== undefined) {
48
+ anthropicRequest.temperature = params.temperature;
49
+ }
50
+ if (params.top_p !== undefined) {
51
+ anthropicRequest.top_p = params.top_p;
52
+ }
53
+ if (params.top_k !== undefined) {
54
+ anthropicRequest.top_k = params.top_k;
55
+ }
56
+ if (params.stop_sequences) {
57
+ anthropicRequest.stop_sequences = params.stop_sequences;
58
+ }
59
+ if (params.metadata) {
60
+ anthropicRequest.metadata = params.metadata;
61
+ }
62
+ if (params.thinking) {
63
+ anthropicRequest.thinking = params.thinking;
64
+ }
65
+ if (params.service_tier !== undefined) {
66
+ anthropicRequest.service_tier = params.service_tier;
67
+ }
68
+
69
+ // Tools
70
+ if (request.tools && request.tools.length > 0) {
71
+ anthropicRequest.tools = request.tools.map(transformTool);
72
+ anthropicRequest.tool_choice = { type: 'auto' };
73
+ }
74
+
75
+ // Structured output via tool-based approach
76
+ // Anthropic doesn't have native structured output, so we use a tool to enforce the schema
77
+ if (request.structure) {
78
+ const structuredTool: AnthropicTool = {
79
+ name: 'json_response',
80
+ description: 'Return the response in the specified JSON format. You MUST use this tool to provide your response.',
81
+ input_schema: {
82
+ type: 'object',
83
+ properties: request.structure.properties,
84
+ required: request.structure.required,
85
+ },
86
+ };
87
+
88
+ // Add the structured output tool (may coexist with user tools)
89
+ anthropicRequest.tools = [...(anthropicRequest.tools ?? []), structuredTool];
90
+ // Force the model to use the json_response tool
91
+ anthropicRequest.tool_choice = { type: 'tool', name: 'json_response' };
92
+ }
93
+
94
+ return anthropicRequest;
95
+ }
96
+
97
+ /**
98
+ * Filter to only valid content blocks with a type property
99
+ */
100
+ function filterValidContent<T extends { type?: string }>(content: T[]): T[] {
101
+ return content.filter((c) => c && typeof c.type === 'string');
102
+ }
103
+
104
+ /**
105
+ * Transform a UPP Message to Anthropic format
106
+ */
107
+ function transformMessage(message: Message): AnthropicMessage {
108
+ if (isUserMessage(message)) {
109
+ const validContent = filterValidContent(message.content);
110
+ return {
111
+ role: 'user',
112
+ content: validContent.map(transformContentBlock),
113
+ };
114
+ }
115
+
116
+ if (isAssistantMessage(message)) {
117
+ const validContent = filterValidContent(message.content);
118
+ const content: AnthropicContent[] = validContent.map(transformContentBlock);
119
+
120
+ // Add tool calls as tool_use content blocks
121
+ if (message.toolCalls) {
122
+ for (const call of message.toolCalls) {
123
+ content.push({
124
+ type: 'tool_use',
125
+ id: call.toolCallId,
126
+ name: call.toolName,
127
+ input: call.arguments,
128
+ });
129
+ }
130
+ }
131
+
132
+ return {
133
+ role: 'assistant',
134
+ content,
135
+ };
136
+ }
137
+
138
+ if (isToolResultMessage(message)) {
139
+ // Tool results are sent as user messages with tool_result content
140
+ return {
141
+ role: 'user',
142
+ content: message.results.map((result) => ({
143
+ type: 'tool_result' as const,
144
+ tool_use_id: result.toolCallId,
145
+ content:
146
+ typeof result.result === 'string'
147
+ ? result.result
148
+ : JSON.stringify(result.result),
149
+ is_error: result.isError,
150
+ })),
151
+ };
152
+ }
153
+
154
+ throw new Error(`Unknown message type: ${message.type}`);
155
+ }
156
+
157
+ /**
158
+ * Transform a content block to Anthropic format
159
+ */
160
+ function transformContentBlock(block: ContentBlock): AnthropicContent {
161
+ switch (block.type) {
162
+ case 'text':
163
+ return { type: 'text', text: block.text };
164
+
165
+ case 'image': {
166
+ const imageBlock = block as ImageBlock;
167
+ if (imageBlock.source.type === 'base64') {
168
+ return {
169
+ type: 'image',
170
+ source: {
171
+ type: 'base64',
172
+ media_type: imageBlock.mimeType,
173
+ data: imageBlock.source.data,
174
+ },
175
+ };
176
+ }
177
+ if (imageBlock.source.type === 'url') {
178
+ return {
179
+ type: 'image',
180
+ source: {
181
+ type: 'url',
182
+ url: imageBlock.source.url,
183
+ },
184
+ };
185
+ }
186
+ if (imageBlock.source.type === 'bytes') {
187
+ // Convert bytes to base64
188
+ const base64 = btoa(
189
+ Array.from(imageBlock.source.data)
190
+ .map((b) => String.fromCharCode(b))
191
+ .join('')
192
+ );
193
+ return {
194
+ type: 'image',
195
+ source: {
196
+ type: 'base64',
197
+ media_type: imageBlock.mimeType,
198
+ data: base64,
199
+ },
200
+ };
201
+ }
202
+ throw new Error(`Unknown image source type`);
203
+ }
204
+
205
+ default:
206
+ throw new Error(`Unsupported content type: ${block.type}`);
207
+ }
208
+ }
209
+
210
+ /**
211
+ * Transform a UPP Tool to Anthropic format
212
+ */
213
+ function transformTool(tool: Tool): AnthropicTool {
214
+ return {
215
+ name: tool.name,
216
+ description: tool.description,
217
+ input_schema: {
218
+ type: 'object',
219
+ properties: tool.parameters.properties,
220
+ required: tool.parameters.required,
221
+ },
222
+ };
223
+ }
224
+
225
+ /**
226
+ * Transform Anthropic response to UPP LLMResponse
227
+ */
228
+ export function transformResponse(data: AnthropicResponse): LLMResponse {
229
+ // Extract text content
230
+ const textContent: TextBlock[] = [];
231
+ const toolCalls: ToolCall[] = [];
232
+ let structuredData: unknown;
233
+
234
+ for (const block of data.content) {
235
+ if (block.type === 'text') {
236
+ textContent.push({ type: 'text', text: block.text });
237
+ } else if (block.type === 'tool_use') {
238
+ // Check if this is the json_response tool (structured output)
239
+ if (block.name === 'json_response') {
240
+ // Extract structured data from tool arguments
241
+ structuredData = block.input;
242
+ }
243
+ toolCalls.push({
244
+ toolCallId: block.id,
245
+ toolName: block.name,
246
+ arguments: block.input,
247
+ });
248
+ }
249
+ // Skip thinking blocks for now
250
+ }
251
+
252
+ const message = new AssistantMessage(
253
+ textContent,
254
+ toolCalls.length > 0 ? toolCalls : undefined,
255
+ {
256
+ id: data.id,
257
+ metadata: {
258
+ anthropic: {
259
+ stop_reason: data.stop_reason,
260
+ stop_sequence: data.stop_sequence,
261
+ model: data.model,
262
+ },
263
+ },
264
+ }
265
+ );
266
+
267
+ const usage: TokenUsage = {
268
+ inputTokens: data.usage.input_tokens,
269
+ outputTokens: data.usage.output_tokens,
270
+ totalTokens: data.usage.input_tokens + data.usage.output_tokens,
271
+ };
272
+
273
+ return {
274
+ message,
275
+ usage,
276
+ stopReason: data.stop_reason ?? 'end_turn',
277
+ data: structuredData,
278
+ };
279
+ }
280
+
281
+ /**
282
+ * State for accumulating streaming response
283
+ */
284
+ export interface StreamState {
285
+ messageId: string;
286
+ model: string;
287
+ content: Array<{ type: string; text?: string; id?: string; name?: string; input?: string }>;
288
+ stopReason: string | null;
289
+ inputTokens: number;
290
+ outputTokens: number;
291
+ }
292
+
293
+ /**
294
+ * Create initial stream state
295
+ */
296
+ export function createStreamState(): StreamState {
297
+ return {
298
+ messageId: '',
299
+ model: '',
300
+ content: [],
301
+ stopReason: null,
302
+ inputTokens: 0,
303
+ outputTokens: 0,
304
+ };
305
+ }
306
+
307
+ /**
308
+ * Transform Anthropic stream event to UPP StreamEvent
309
+ * Returns null for events that don't produce UPP events
310
+ */
311
+ export function transformStreamEvent(
312
+ event: AnthropicStreamEvent,
313
+ state: StreamState
314
+ ): StreamEvent | null {
315
+ switch (event.type) {
316
+ case 'message_start':
317
+ state.messageId = event.message.id;
318
+ state.model = event.message.model;
319
+ state.inputTokens = event.message.usage.input_tokens;
320
+ return { type: 'message_start', index: 0, delta: {} };
321
+
322
+ case 'content_block_start':
323
+ // Initialize content block
324
+ if (event.content_block.type === 'text') {
325
+ state.content[event.index] = { type: 'text', text: '' };
326
+ } else if (event.content_block.type === 'tool_use') {
327
+ state.content[event.index] = {
328
+ type: 'tool_use',
329
+ id: event.content_block.id,
330
+ name: event.content_block.name,
331
+ input: '',
332
+ };
333
+ }
334
+ return { type: 'content_block_start', index: event.index, delta: {} };
335
+
336
+ case 'content_block_delta': {
337
+ const delta = event.delta;
338
+ if (delta.type === 'text_delta') {
339
+ if (state.content[event.index]) {
340
+ state.content[event.index]!.text =
341
+ (state.content[event.index]!.text ?? '') + delta.text;
342
+ }
343
+ return {
344
+ type: 'text_delta',
345
+ index: event.index,
346
+ delta: { text: delta.text },
347
+ };
348
+ }
349
+ if (delta.type === 'input_json_delta') {
350
+ if (state.content[event.index]) {
351
+ state.content[event.index]!.input =
352
+ (state.content[event.index]!.input ?? '') + delta.partial_json;
353
+ }
354
+ return {
355
+ type: 'tool_call_delta',
356
+ index: event.index,
357
+ delta: {
358
+ argumentsJson: delta.partial_json,
359
+ toolCallId: state.content[event.index]?.id,
360
+ toolName: state.content[event.index]?.name,
361
+ },
362
+ };
363
+ }
364
+ if (delta.type === 'thinking_delta') {
365
+ return {
366
+ type: 'reasoning_delta',
367
+ index: event.index,
368
+ delta: { text: delta.thinking },
369
+ };
370
+ }
371
+ return null;
372
+ }
373
+
374
+ case 'content_block_stop':
375
+ return { type: 'content_block_stop', index: event.index, delta: {} };
376
+
377
+ case 'message_delta':
378
+ state.stopReason = event.delta.stop_reason;
379
+ state.outputTokens = event.usage.output_tokens;
380
+ return null;
381
+
382
+ case 'message_stop':
383
+ return { type: 'message_stop', index: 0, delta: {} };
384
+
385
+ case 'ping':
386
+ case 'error':
387
+ return null;
388
+
389
+ default:
390
+ return null;
391
+ }
392
+ }
393
+
394
+ /**
395
+ * Build LLMResponse from accumulated stream state
396
+ */
397
+ export function buildResponseFromState(state: StreamState): LLMResponse {
398
+ const textContent: TextBlock[] = [];
399
+ const toolCalls: ToolCall[] = [];
400
+ let structuredData: unknown;
401
+
402
+ for (const block of state.content) {
403
+ if (block.type === 'text' && block.text) {
404
+ textContent.push({ type: 'text', text: block.text });
405
+ } else if (block.type === 'tool_use' && block.id && block.name) {
406
+ let args: Record<string, unknown> = {};
407
+ if (block.input) {
408
+ try {
409
+ args = JSON.parse(block.input);
410
+ } catch {
411
+ // Invalid JSON - use empty object
412
+ }
413
+ }
414
+ // Check if this is the json_response tool (structured output)
415
+ if (block.name === 'json_response') {
416
+ structuredData = args;
417
+ }
418
+ toolCalls.push({
419
+ toolCallId: block.id,
420
+ toolName: block.name,
421
+ arguments: args,
422
+ });
423
+ }
424
+ }
425
+
426
+ const message = new AssistantMessage(
427
+ textContent,
428
+ toolCalls.length > 0 ? toolCalls : undefined,
429
+ {
430
+ id: state.messageId,
431
+ metadata: {
432
+ anthropic: {
433
+ stop_reason: state.stopReason,
434
+ model: state.model,
435
+ },
436
+ },
437
+ }
438
+ );
439
+
440
+ const usage: TokenUsage = {
441
+ inputTokens: state.inputTokens,
442
+ outputTokens: state.outputTokens,
443
+ totalTokens: state.inputTokens + state.outputTokens,
444
+ };
445
+
446
+ return {
447
+ message,
448
+ usage,
449
+ stopReason: state.stopReason ?? 'end_turn',
450
+ data: structuredData,
451
+ };
452
+ }
@@ -0,0 +1,213 @@
1
+ /**
2
+ * Anthropic-specific LLM parameters
3
+ * These are passed through to the Anthropic Messages API
4
+ */
5
+ export interface AnthropicLLMParams {
6
+ /** Maximum number of tokens to generate (required by Anthropic) */
7
+ max_tokens: number;
8
+
9
+ /** Temperature for randomness (0.0 - 1.0) */
10
+ temperature?: number;
11
+
12
+ /** Top-p (nucleus) sampling */
13
+ top_p?: number;
14
+
15
+ /** Top-k sampling */
16
+ top_k?: number;
17
+
18
+ /** Custom stop sequences */
19
+ stop_sequences?: string[];
20
+
21
+ /** Metadata for the request */
22
+ metadata?: {
23
+ user_id?: string;
24
+ };
25
+
26
+ /** Extended thinking configuration */
27
+ thinking?: {
28
+ type: 'enabled';
29
+ budget_tokens: number;
30
+ };
31
+
32
+ /**
33
+ * Service tier for priority/standard capacity
34
+ * - "auto": Automatically select based on availability (default)
35
+ * - "standard_only": Only use standard capacity
36
+ */
37
+ service_tier?: 'auto' | 'standard_only';
38
+ }
39
+
40
+ /**
41
+ * Anthropic API request body
42
+ */
43
+ export interface AnthropicRequest {
44
+ model: string;
45
+ max_tokens: number;
46
+ messages: AnthropicMessage[];
47
+ system?: string;
48
+ temperature?: number;
49
+ top_p?: number;
50
+ top_k?: number;
51
+ stop_sequences?: string[];
52
+ stream?: boolean;
53
+ tools?: AnthropicTool[];
54
+ tool_choice?: { type: 'auto' | 'any' | 'tool'; name?: string };
55
+ metadata?: { user_id?: string };
56
+ thinking?: { type: 'enabled'; budget_tokens: number };
57
+ service_tier?: 'auto' | 'standard_only';
58
+ }
59
+
60
+ /**
61
+ * Anthropic message format
62
+ */
63
+ export interface AnthropicMessage {
64
+ role: 'user' | 'assistant';
65
+ content: AnthropicContent[] | string;
66
+ }
67
+
68
+ /**
69
+ * Anthropic content types
70
+ */
71
+ export type AnthropicContent =
72
+ | AnthropicTextContent
73
+ | AnthropicImageContent
74
+ | AnthropicToolUseContent
75
+ | AnthropicToolResultContent;
76
+
77
+ export interface AnthropicTextContent {
78
+ type: 'text';
79
+ text: string;
80
+ }
81
+
82
+ export interface AnthropicImageContent {
83
+ type: 'image';
84
+ source: {
85
+ type: 'base64' | 'url';
86
+ media_type?: string;
87
+ data?: string;
88
+ url?: string;
89
+ };
90
+ }
91
+
92
+ export interface AnthropicToolUseContent {
93
+ type: 'tool_use';
94
+ id: string;
95
+ name: string;
96
+ input: Record<string, unknown>;
97
+ }
98
+
99
+ export interface AnthropicToolResultContent {
100
+ type: 'tool_result';
101
+ tool_use_id: string;
102
+ content: string | AnthropicContent[];
103
+ is_error?: boolean;
104
+ }
105
+
106
+ /**
107
+ * Anthropic tool format
108
+ */
109
+ export interface AnthropicTool {
110
+ name: string;
111
+ description: string;
112
+ input_schema: {
113
+ type: 'object';
114
+ properties: Record<string, unknown>;
115
+ required?: string[];
116
+ };
117
+ }
118
+
119
+ /**
120
+ * Anthropic response format
121
+ */
122
+ export interface AnthropicResponse {
123
+ id: string;
124
+ type: 'message';
125
+ role: 'assistant';
126
+ content: AnthropicResponseContent[];
127
+ model: string;
128
+ stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | 'tool_use' | 'pause_turn' | 'refusal' | null;
129
+ stop_sequence: string | null;
130
+ usage: {
131
+ input_tokens: number;
132
+ output_tokens: number;
133
+ cache_creation_input_tokens?: number;
134
+ cache_read_input_tokens?: number;
135
+ };
136
+ }
137
+
138
+ export type AnthropicResponseContent =
139
+ | AnthropicTextContent
140
+ | AnthropicToolUseContent
141
+ | AnthropicThinkingContent;
142
+
143
+ export interface AnthropicThinkingContent {
144
+ type: 'thinking';
145
+ thinking: string;
146
+ signature?: string;
147
+ }
148
+
149
+ /**
150
+ * Anthropic streaming event types
151
+ */
152
+ export type AnthropicStreamEvent =
153
+ | AnthropicMessageStartEvent
154
+ | AnthropicContentBlockStartEvent
155
+ | AnthropicContentBlockDeltaEvent
156
+ | AnthropicContentBlockStopEvent
157
+ | AnthropicMessageDeltaEvent
158
+ | AnthropicMessageStopEvent
159
+ | AnthropicPingEvent
160
+ | AnthropicErrorEvent;
161
+
162
+ export interface AnthropicMessageStartEvent {
163
+ type: 'message_start';
164
+ message: AnthropicResponse;
165
+ }
166
+
167
+ export interface AnthropicContentBlockStartEvent {
168
+ type: 'content_block_start';
169
+ index: number;
170
+ content_block: AnthropicResponseContent;
171
+ }
172
+
173
+ export interface AnthropicContentBlockDeltaEvent {
174
+ type: 'content_block_delta';
175
+ index: number;
176
+ delta:
177
+ | { type: 'text_delta'; text: string }
178
+ | { type: 'thinking_delta'; thinking: string }
179
+ | { type: 'signature_delta'; signature: string }
180
+ | { type: 'input_json_delta'; partial_json: string };
181
+ }
182
+
183
+ export interface AnthropicContentBlockStopEvent {
184
+ type: 'content_block_stop';
185
+ index: number;
186
+ }
187
+
188
+ export interface AnthropicMessageDeltaEvent {
189
+ type: 'message_delta';
190
+ delta: {
191
+ stop_reason: string | null;
192
+ stop_sequence: string | null;
193
+ };
194
+ usage: {
195
+ output_tokens: number;
196
+ };
197
+ }
198
+
199
+ export interface AnthropicMessageStopEvent {
200
+ type: 'message_stop';
201
+ }
202
+
203
+ export interface AnthropicPingEvent {
204
+ type: 'ping';
205
+ }
206
+
207
+ export interface AnthropicErrorEvent {
208
+ type: 'error';
209
+ error: {
210
+ type: string;
211
+ message: string;
212
+ };
213
+ }
@@ -0,0 +1,17 @@
1
+ import { createProvider } from '../../core/provider.ts';
2
+ import { createLLMHandler } from './llm.ts';
3
+
4
+ /**
5
+ * Google Gemini provider
6
+ * Supports LLM modality with Gemini models
7
+ */
8
+ export const google = createProvider({
9
+ name: 'google',
10
+ version: '1.0.0',
11
+ modalities: {
12
+ llm: createLLMHandler(),
13
+ },
14
+ });
15
+
16
+ // Re-export types
17
+ export type { GoogleLLMParams } from './types.ts';