@providerprotocol/ai 0.0.11 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/dist/anthropic/index.d.ts +51 -15
  2. package/dist/anthropic/index.js +54 -19
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-SUNYWHTH.js → chunk-MOU4U3PO.js} +55 -3
  5. package/dist/chunk-MOU4U3PO.js.map +1 -0
  6. package/dist/{chunk-Y6Q7JCNP.js → chunk-MSR5P65T.js} +1 -1
  7. package/dist/chunk-MSR5P65T.js.map +1 -0
  8. package/dist/{chunk-W4BB4BG2.js → chunk-SVYROCLD.js} +31 -11
  9. package/dist/chunk-SVYROCLD.js.map +1 -0
  10. package/dist/chunk-U4JJC2YX.js +234 -0
  11. package/dist/chunk-U4JJC2YX.js.map +1 -0
  12. package/dist/{chunk-X5G4EHL7.js → chunk-Z7RBRCRN.js} +1 -1
  13. package/dist/chunk-Z7RBRCRN.js.map +1 -0
  14. package/dist/google/index.d.ts +376 -7
  15. package/dist/google/index.js +127 -15
  16. package/dist/google/index.js.map +1 -1
  17. package/dist/http/index.d.ts +222 -25
  18. package/dist/http/index.js +3 -3
  19. package/dist/index.d.ts +1482 -198
  20. package/dist/index.js +233 -49
  21. package/dist/index.js.map +1 -1
  22. package/dist/ollama/index.d.ts +92 -20
  23. package/dist/ollama/index.js +17 -7
  24. package/dist/ollama/index.js.map +1 -1
  25. package/dist/openai/index.d.ts +340 -61
  26. package/dist/openai/index.js +57 -15
  27. package/dist/openai/index.js.map +1 -1
  28. package/dist/openrouter/index.d.ts +107 -51
  29. package/dist/openrouter/index.js +36 -8
  30. package/dist/openrouter/index.js.map +1 -1
  31. package/dist/provider-mKkz7Q9U.d.ts +488 -0
  32. package/dist/retry-Dh70lgr0.d.ts +508 -0
  33. package/dist/xai/index.d.ts +97 -22
  34. package/dist/xai/index.js +55 -19
  35. package/dist/xai/index.js.map +1 -1
  36. package/package.json +8 -12
  37. package/dist/chunk-CUCRF5W6.js +0 -136
  38. package/dist/chunk-CUCRF5W6.js.map +0 -1
  39. package/dist/chunk-SUNYWHTH.js.map +0 -1
  40. package/dist/chunk-W4BB4BG2.js.map +0 -1
  41. package/dist/chunk-X5G4EHL7.js.map +0 -1
  42. package/dist/chunk-Y6Q7JCNP.js.map +0 -1
  43. package/dist/provider-CUJWjgNl.d.ts +0 -192
  44. package/dist/retry-I2661_rv.d.ts +0 -118
  45. package/src/anthropic/index.ts +0 -3
  46. package/src/core/image.ts +0 -188
  47. package/src/core/llm.ts +0 -650
  48. package/src/core/provider.ts +0 -92
  49. package/src/google/index.ts +0 -3
  50. package/src/http/errors.ts +0 -112
  51. package/src/http/fetch.ts +0 -210
  52. package/src/http/index.ts +0 -31
  53. package/src/http/keys.ts +0 -136
  54. package/src/http/retry.ts +0 -205
  55. package/src/http/sse.ts +0 -136
  56. package/src/index.ts +0 -32
  57. package/src/ollama/index.ts +0 -3
  58. package/src/openai/index.ts +0 -39
  59. package/src/openrouter/index.ts +0 -11
  60. package/src/providers/anthropic/index.ts +0 -17
  61. package/src/providers/anthropic/llm.ts +0 -196
  62. package/src/providers/anthropic/transform.ts +0 -434
  63. package/src/providers/anthropic/types.ts +0 -213
  64. package/src/providers/google/index.ts +0 -17
  65. package/src/providers/google/llm.ts +0 -203
  66. package/src/providers/google/transform.ts +0 -447
  67. package/src/providers/google/types.ts +0 -214
  68. package/src/providers/ollama/index.ts +0 -43
  69. package/src/providers/ollama/llm.ts +0 -272
  70. package/src/providers/ollama/transform.ts +0 -434
  71. package/src/providers/ollama/types.ts +0 -260
  72. package/src/providers/openai/index.ts +0 -186
  73. package/src/providers/openai/llm.completions.ts +0 -201
  74. package/src/providers/openai/llm.responses.ts +0 -211
  75. package/src/providers/openai/transform.completions.ts +0 -561
  76. package/src/providers/openai/transform.responses.ts +0 -708
  77. package/src/providers/openai/types.ts +0 -1249
  78. package/src/providers/openrouter/index.ts +0 -177
  79. package/src/providers/openrouter/llm.completions.ts +0 -201
  80. package/src/providers/openrouter/llm.responses.ts +0 -211
  81. package/src/providers/openrouter/transform.completions.ts +0 -538
  82. package/src/providers/openrouter/transform.responses.ts +0 -742
  83. package/src/providers/openrouter/types.ts +0 -717
  84. package/src/providers/xai/index.ts +0 -223
  85. package/src/providers/xai/llm.completions.ts +0 -201
  86. package/src/providers/xai/llm.messages.ts +0 -195
  87. package/src/providers/xai/llm.responses.ts +0 -211
  88. package/src/providers/xai/transform.completions.ts +0 -565
  89. package/src/providers/xai/transform.messages.ts +0 -448
  90. package/src/providers/xai/transform.responses.ts +0 -678
  91. package/src/providers/xai/types.ts +0 -938
  92. package/src/types/content.ts +0 -133
  93. package/src/types/errors.ts +0 -85
  94. package/src/types/index.ts +0 -105
  95. package/src/types/llm.ts +0 -211
  96. package/src/types/messages.ts +0 -205
  97. package/src/types/provider.ts +0 -195
  98. package/src/types/schema.ts +0 -58
  99. package/src/types/stream.ts +0 -188
  100. package/src/types/thread.ts +0 -226
  101. package/src/types/tool.ts +0 -88
  102. package/src/types/turn.ts +0 -118
  103. package/src/utils/id.ts +0 -28
  104. package/src/xai/index.ts +0 -41
@@ -1,214 +0,0 @@
1
- /**
2
- * Google Gemini-specific LLM parameters
3
- */
4
- export interface GoogleLLMParams {
5
- /** Maximum number of tokens to generate */
6
- maxOutputTokens?: number;
7
-
8
- /** Temperature for randomness (0.0 - 2.0) */
9
- temperature?: number;
10
-
11
- /** Top-p (nucleus) sampling */
12
- topP?: number;
13
-
14
- /** Top-k sampling */
15
- topK?: number;
16
-
17
- /** Stop sequences */
18
- stopSequences?: string[];
19
-
20
- /** Number of candidates to generate */
21
- candidateCount?: number;
22
-
23
- /** Response MIME type */
24
- responseMimeType?: 'text/plain' | 'application/json';
25
-
26
- /** Response schema for structured output */
27
- responseSchema?: Record<string, unknown>;
28
-
29
- /**
30
- * Presence penalty for new topics
31
- * Positive values encourage discussing new topics
32
- */
33
- presencePenalty?: number;
34
-
35
- /**
36
- * Frequency penalty for repeated tokens
37
- * Positive values discourage repetition
38
- */
39
- frequencyPenalty?: number;
40
-
41
- /**
42
- * Seed for deterministic sampling
43
- * Same seed with same parameters should produce same results
44
- */
45
- seed?: number;
46
-
47
- /**
48
- * Whether to return log probabilities in response
49
- */
50
- responseLogprobs?: boolean;
51
-
52
- /**
53
- * Number of log probabilities to return (requires responseLogprobs: true)
54
- */
55
- logprobs?: number;
56
-
57
- /**
58
- * Whether to include audio timestamps in response
59
- */
60
- audioTimestamp?: boolean;
61
-
62
- /**
63
- * Thinking/reasoning configuration for Gemini 3+ models
64
- */
65
- thinkingConfig?: GoogleThinkingConfig;
66
- }
67
-
68
- /**
69
- * Thinking configuration for Gemini 3+ models
70
- */
71
- export interface GoogleThinkingConfig {
72
- /** Whether thinking is enabled */
73
- thinkingBudget?: number;
74
- }
75
-
76
- /**
77
- * Google API request body
78
- */
79
- export interface GoogleRequest {
80
- contents: GoogleContent[];
81
- systemInstruction?: {
82
- parts: GooglePart[];
83
- };
84
- generationConfig?: {
85
- maxOutputTokens?: number;
86
- temperature?: number;
87
- topP?: number;
88
- topK?: number;
89
- stopSequences?: string[];
90
- candidateCount?: number;
91
- responseMimeType?: string;
92
- responseSchema?: Record<string, unknown>;
93
- presencePenalty?: number;
94
- frequencyPenalty?: number;
95
- seed?: number;
96
- responseLogprobs?: boolean;
97
- logprobs?: number;
98
- audioTimestamp?: boolean;
99
- thinkingConfig?: GoogleThinkingConfig;
100
- };
101
- tools?: GoogleTool[];
102
- safetySettings?: GoogleSafetySetting[];
103
- }
104
-
105
- /**
106
- * Google content (message) format
107
- */
108
- export interface GoogleContent {
109
- role: 'user' | 'model';
110
- parts: GooglePart[];
111
- }
112
-
113
- /**
114
- * Google content part types
115
- */
116
- export type GooglePart =
117
- | GoogleTextPart
118
- | GoogleImagePart
119
- | GoogleFunctionCallPart
120
- | GoogleFunctionResponsePart;
121
-
122
- export interface GoogleTextPart {
123
- text: string;
124
- }
125
-
126
- export interface GoogleImagePart {
127
- inlineData: {
128
- mimeType: string;
129
- data: string;
130
- };
131
- }
132
-
133
- export interface GoogleFunctionCallPart {
134
- functionCall: {
135
- name: string;
136
- args: Record<string, unknown>;
137
- };
138
- /** Gemini 3+ thought signature for multi-turn tool calls */
139
- thoughtSignature?: string;
140
- }
141
-
142
- export interface GoogleFunctionResponsePart {
143
- functionResponse: {
144
- name: string;
145
- response: Record<string, unknown>;
146
- };
147
- }
148
-
149
- /**
150
- * Google tool format
151
- */
152
- export interface GoogleTool {
153
- functionDeclarations: GoogleFunctionDeclaration[];
154
- }
155
-
156
- export interface GoogleFunctionDeclaration {
157
- name: string;
158
- description: string;
159
- parameters: {
160
- type: 'object';
161
- properties: Record<string, unknown>;
162
- required?: string[];
163
- };
164
- }
165
-
166
- /**
167
- * Google safety setting
168
- */
169
- export interface GoogleSafetySetting {
170
- category: string;
171
- threshold: string;
172
- }
173
-
174
- /**
175
- * Google response format
176
- */
177
- export interface GoogleResponse {
178
- candidates: GoogleCandidate[];
179
- usageMetadata?: {
180
- promptTokenCount: number;
181
- candidatesTokenCount: number;
182
- totalTokenCount: number;
183
- };
184
- }
185
-
186
- export interface GoogleCandidate {
187
- content: {
188
- role: 'model';
189
- parts: GoogleResponsePart[];
190
- };
191
- finishReason: 'STOP' | 'MAX_TOKENS' | 'SAFETY' | 'RECITATION' | 'OTHER' | 'TOOL_USE' | null;
192
- index: number;
193
- safetyRatings?: GoogleSafetyRating[];
194
- }
195
-
196
- export type GoogleResponsePart = GoogleTextPart | GoogleFunctionCallPart;
197
-
198
- export interface GoogleSafetyRating {
199
- category: string;
200
- probability: string;
201
- }
202
-
203
- /**
204
- * Google streaming response chunk
205
- * Same structure as regular response but may be partial
206
- */
207
- export interface GoogleStreamChunk {
208
- candidates?: GoogleCandidate[];
209
- usageMetadata?: {
210
- promptTokenCount: number;
211
- candidatesTokenCount: number;
212
- totalTokenCount: number;
213
- };
214
- }
@@ -1,43 +0,0 @@
1
- import { createProvider } from '../../core/provider.ts';
2
- import { createLLMHandler } from './llm.ts';
3
-
4
- /**
5
- * Ollama provider
6
- * Supports LLM modality with local Ollama models
7
- *
8
- * Ollama is a local LLM server that supports many open-source models including:
9
- * - Llama 3.x
10
- * - Mistral
11
- * - Mixtral
12
- * - Gemma
13
- * - Qwen
14
- * - DeepSeek
15
- * - Phi
16
- * - And many more
17
- *
18
- * @example
19
- * ```ts
20
- * import { llm } from 'provider-protocol';
21
- * import { ollama } from 'provider-protocol/ollama';
22
- *
23
- * const model = llm(ollama('llama3.2'));
24
- * const result = await model.generate('Hello, how are you?');
25
- * ```
26
- *
27
- * @example Custom server URL
28
- * ```ts
29
- * const model = llm(ollama('llama3.2'), {
30
- * baseUrl: 'http://my-ollama-server:11434',
31
- * });
32
- * ```
33
- */
34
- export const ollama = createProvider({
35
- name: 'ollama',
36
- version: '1.0.0',
37
- modalities: {
38
- llm: createLLMHandler(),
39
- },
40
- });
41
-
42
- // Re-export types
43
- export type { OllamaLLMParams } from './types.ts';
@@ -1,272 +0,0 @@
1
- import type {
2
- LLMHandler,
3
- BoundLLMModel,
4
- LLMRequest,
5
- LLMResponse,
6
- LLMStreamResult,
7
- LLMCapabilities,
8
- } from '../../types/llm.ts';
9
- import type { StreamEvent } from '../../types/stream.ts';
10
- import type { LLMProvider } from '../../types/provider.ts';
11
- import { UPPError } from '../../types/errors.ts';
12
- import { resolveApiKey } from '../../http/keys.ts';
13
- import { doFetch, doStreamFetch } from '../../http/fetch.ts';
14
- import { normalizeHttpError } from '../../http/errors.ts';
15
- import type { OllamaLLMParams, OllamaResponse, OllamaStreamChunk } from './types.ts';
16
- import {
17
- transformRequest,
18
- transformResponse,
19
- transformStreamChunk,
20
- createStreamState,
21
- buildResponseFromState,
22
- } from './transform.ts';
23
-
24
- const OLLAMA_DEFAULT_URL = 'http://localhost:11434';
25
-
26
- /**
27
- * Ollama API capabilities
28
- * Note: Tool calling is disabled - Ollama recommends using their
29
- * OpenAI-compatible API (/v1/chat/completions) for tool calling.
30
- * Use the OpenAI provider with baseUrl pointed to Ollama for tools.
31
- */
32
- const OLLAMA_CAPABILITIES: LLMCapabilities = {
33
- streaming: true,
34
- tools: false,
35
- structuredOutput: true,
36
- imageInput: true,
37
- videoInput: false,
38
- audioInput: false,
39
- };
40
-
41
- /**
42
- * Parse Ollama's newline-delimited JSON stream
43
- */
44
- async function* parseOllamaStream(
45
- body: ReadableStream<Uint8Array>
46
- ): AsyncGenerator<OllamaStreamChunk, void, unknown> {
47
- const reader = body.getReader();
48
- const decoder = new TextDecoder();
49
- let buffer = '';
50
-
51
- try {
52
- while (true) {
53
- const { done, value } = await reader.read();
54
- if (done) break;
55
-
56
- buffer += decoder.decode(value, { stream: true });
57
-
58
- // Process complete lines (Ollama uses newline-delimited JSON)
59
- const lines = buffer.split('\n');
60
- buffer = lines.pop() ?? ''; // Keep incomplete line in buffer
61
-
62
- for (const line of lines) {
63
- const trimmed = line.trim();
64
- if (!trimmed) continue;
65
-
66
- try {
67
- const chunk = JSON.parse(trimmed) as OllamaStreamChunk;
68
- yield chunk;
69
- } catch {
70
- // Skip invalid JSON lines
71
- }
72
- }
73
- }
74
-
75
- // Process any remaining buffer
76
- if (buffer.trim()) {
77
- try {
78
- const chunk = JSON.parse(buffer.trim()) as OllamaStreamChunk;
79
- yield chunk;
80
- } catch {
81
- // Skip invalid JSON
82
- }
83
- }
84
- } finally {
85
- reader.releaseLock();
86
- }
87
- }
88
-
89
- /**
90
- * Create Ollama LLM handler
91
- */
92
- export function createLLMHandler(): LLMHandler<OllamaLLMParams> {
93
- // Provider reference injected by createProvider() after construction
94
- let providerRef: LLMProvider<OllamaLLMParams> | null = null;
95
-
96
- return {
97
- _setProvider(provider: LLMProvider<OllamaLLMParams>) {
98
- providerRef = provider;
99
- },
100
-
101
- bind(modelId: string): BoundLLMModel<OllamaLLMParams> {
102
- // Use the injected provider reference (set by createProvider)
103
- if (!providerRef) {
104
- throw new UPPError(
105
- 'Provider reference not set. Handler must be used with createProvider().',
106
- 'INVALID_REQUEST',
107
- 'ollama',
108
- 'llm'
109
- );
110
- }
111
-
112
- const model: BoundLLMModel<OllamaLLMParams> = {
113
- modelId,
114
- capabilities: OLLAMA_CAPABILITIES,
115
-
116
- get provider(): LLMProvider<OllamaLLMParams> {
117
- return providerRef!;
118
- },
119
-
120
- async complete(request: LLMRequest<OllamaLLMParams>): Promise<LLMResponse> {
121
- // Ollama doesn't require an API key by default, but may use one for auth
122
- let apiKey: string | undefined;
123
- try {
124
- apiKey = await resolveApiKey(
125
- request.config,
126
- 'OLLAMA_API_KEY',
127
- 'ollama',
128
- 'llm'
129
- );
130
- } catch {
131
- // API key is optional for Ollama
132
- }
133
-
134
- const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;
135
- const url = `${baseUrl}/api/chat`;
136
- const body = transformRequest(request, modelId);
137
- body.stream = false;
138
-
139
- const headers: Record<string, string> = {
140
- 'Content-Type': 'application/json',
141
- };
142
-
143
- if (apiKey) {
144
- headers['Authorization'] = `Bearer ${apiKey}`;
145
- }
146
-
147
- const response = await doFetch(
148
- url,
149
- {
150
- method: 'POST',
151
- headers,
152
- body: JSON.stringify(body),
153
- signal: request.signal,
154
- },
155
- request.config,
156
- 'ollama',
157
- 'llm'
158
- );
159
-
160
- const data = (await response.json()) as OllamaResponse;
161
- return transformResponse(data);
162
- },
163
-
164
- stream(request: LLMRequest<OllamaLLMParams>): LLMStreamResult {
165
- const state = createStreamState();
166
- let responseResolve: (value: LLMResponse) => void;
167
- let responseReject: (error: Error) => void;
168
-
169
- const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
170
- responseResolve = resolve;
171
- responseReject = reject;
172
- });
173
-
174
- async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
175
- try {
176
- // Ollama doesn't require an API key by default
177
- let apiKey: string | undefined;
178
- try {
179
- apiKey = await resolveApiKey(
180
- request.config,
181
- 'OLLAMA_API_KEY',
182
- 'ollama',
183
- 'llm'
184
- );
185
- } catch {
186
- // API key is optional for Ollama
187
- }
188
-
189
- const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;
190
- const url = `${baseUrl}/api/chat`;
191
- const body = transformRequest(request, modelId);
192
- body.stream = true;
193
-
194
- const headers: Record<string, string> = {
195
- 'Content-Type': 'application/json',
196
- };
197
-
198
- if (apiKey) {
199
- headers['Authorization'] = `Bearer ${apiKey}`;
200
- }
201
-
202
- const response = await doStreamFetch(
203
- url,
204
- {
205
- method: 'POST',
206
- headers,
207
- body: JSON.stringify(body),
208
- signal: request.signal,
209
- },
210
- request.config,
211
- 'ollama',
212
- 'llm'
213
- );
214
-
215
- if (!response.ok) {
216
- const error = await normalizeHttpError(response, 'ollama', 'llm');
217
- responseReject(error);
218
- throw error;
219
- }
220
-
221
- if (!response.body) {
222
- const error = new UPPError(
223
- 'No response body for streaming request',
224
- 'PROVIDER_ERROR',
225
- 'ollama',
226
- 'llm'
227
- );
228
- responseReject(error);
229
- throw error;
230
- }
231
-
232
- // Parse Ollama's newline-delimited JSON stream
233
- for await (const chunk of parseOllamaStream(response.body)) {
234
- // Check for error in chunk
235
- if ('error' in chunk && typeof (chunk as Record<string, unknown>).error === 'string') {
236
- const error = new UPPError(
237
- (chunk as Record<string, unknown>).error as string,
238
- 'PROVIDER_ERROR',
239
- 'ollama',
240
- 'llm'
241
- );
242
- responseReject(error);
243
- throw error;
244
- }
245
-
246
- const events = transformStreamChunk(chunk, state);
247
- for (const event of events) {
248
- yield event;
249
- }
250
- }
251
-
252
- // Build final response
253
- responseResolve(buildResponseFromState(state));
254
- } catch (error) {
255
- responseReject(error as Error);
256
- throw error;
257
- }
258
- }
259
-
260
- return {
261
- [Symbol.asyncIterator]() {
262
- return generateEvents();
263
- },
264
- response: responsePromise,
265
- };
266
- },
267
- };
268
-
269
- return model;
270
- },
271
- };
272
- }