@providerprotocol/ai 0.0.10 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/dist/index.d.ts +7 -1
  2. package/dist/index.js +37 -9
  3. package/dist/index.js.map +1 -1
  4. package/package.json +1 -10
  5. package/src/anthropic/index.ts +0 -3
  6. package/src/core/image.ts +0 -188
  7. package/src/core/llm.ts +0 -624
  8. package/src/core/provider.ts +0 -92
  9. package/src/google/index.ts +0 -3
  10. package/src/http/errors.ts +0 -112
  11. package/src/http/fetch.ts +0 -210
  12. package/src/http/index.ts +0 -31
  13. package/src/http/keys.ts +0 -136
  14. package/src/http/retry.ts +0 -205
  15. package/src/http/sse.ts +0 -136
  16. package/src/index.ts +0 -32
  17. package/src/ollama/index.ts +0 -3
  18. package/src/openai/index.ts +0 -39
  19. package/src/openrouter/index.ts +0 -11
  20. package/src/providers/anthropic/index.ts +0 -17
  21. package/src/providers/anthropic/llm.ts +0 -196
  22. package/src/providers/anthropic/transform.ts +0 -434
  23. package/src/providers/anthropic/types.ts +0 -213
  24. package/src/providers/google/index.ts +0 -17
  25. package/src/providers/google/llm.ts +0 -203
  26. package/src/providers/google/transform.ts +0 -447
  27. package/src/providers/google/types.ts +0 -214
  28. package/src/providers/ollama/index.ts +0 -43
  29. package/src/providers/ollama/llm.ts +0 -272
  30. package/src/providers/ollama/transform.ts +0 -434
  31. package/src/providers/ollama/types.ts +0 -260
  32. package/src/providers/openai/index.ts +0 -186
  33. package/src/providers/openai/llm.completions.ts +0 -201
  34. package/src/providers/openai/llm.responses.ts +0 -211
  35. package/src/providers/openai/transform.completions.ts +0 -561
  36. package/src/providers/openai/transform.responses.ts +0 -708
  37. package/src/providers/openai/types.ts +0 -1249
  38. package/src/providers/openrouter/index.ts +0 -177
  39. package/src/providers/openrouter/llm.completions.ts +0 -201
  40. package/src/providers/openrouter/llm.responses.ts +0 -211
  41. package/src/providers/openrouter/transform.completions.ts +0 -538
  42. package/src/providers/openrouter/transform.responses.ts +0 -742
  43. package/src/providers/openrouter/types.ts +0 -717
  44. package/src/providers/xai/index.ts +0 -223
  45. package/src/providers/xai/llm.completions.ts +0 -201
  46. package/src/providers/xai/llm.messages.ts +0 -195
  47. package/src/providers/xai/llm.responses.ts +0 -211
  48. package/src/providers/xai/transform.completions.ts +0 -565
  49. package/src/providers/xai/transform.messages.ts +0 -448
  50. package/src/providers/xai/transform.responses.ts +0 -678
  51. package/src/providers/xai/types.ts +0 -938
  52. package/src/types/content.ts +0 -133
  53. package/src/types/errors.ts +0 -85
  54. package/src/types/index.ts +0 -105
  55. package/src/types/llm.ts +0 -211
  56. package/src/types/messages.ts +0 -205
  57. package/src/types/provider.ts +0 -195
  58. package/src/types/schema.ts +0 -58
  59. package/src/types/stream.ts +0 -146
  60. package/src/types/thread.ts +0 -226
  61. package/src/types/tool.ts +0 -88
  62. package/src/types/turn.ts +0 -118
  63. package/src/utils/id.ts +0 -28
  64. package/src/xai/index.ts +0 -41
@@ -1,214 +0,0 @@
1
- /**
2
- * Google Gemini-specific LLM parameters
3
- */
4
- export interface GoogleLLMParams {
5
- /** Maximum number of tokens to generate */
6
- maxOutputTokens?: number;
7
-
8
- /** Temperature for randomness (0.0 - 2.0) */
9
- temperature?: number;
10
-
11
- /** Top-p (nucleus) sampling */
12
- topP?: number;
13
-
14
- /** Top-k sampling */
15
- topK?: number;
16
-
17
- /** Stop sequences */
18
- stopSequences?: string[];
19
-
20
- /** Number of candidates to generate */
21
- candidateCount?: number;
22
-
23
- /** Response MIME type */
24
- responseMimeType?: 'text/plain' | 'application/json';
25
-
26
- /** Response schema for structured output */
27
- responseSchema?: Record<string, unknown>;
28
-
29
- /**
30
- * Presence penalty for new topics
31
- * Positive values encourage discussing new topics
32
- */
33
- presencePenalty?: number;
34
-
35
- /**
36
- * Frequency penalty for repeated tokens
37
- * Positive values discourage repetition
38
- */
39
- frequencyPenalty?: number;
40
-
41
- /**
42
- * Seed for deterministic sampling
43
- * Same seed with same parameters should produce same results
44
- */
45
- seed?: number;
46
-
47
- /**
48
- * Whether to return log probabilities in response
49
- */
50
- responseLogprobs?: boolean;
51
-
52
- /**
53
- * Number of log probabilities to return (requires responseLogprobs: true)
54
- */
55
- logprobs?: number;
56
-
57
- /**
58
- * Whether to include audio timestamps in response
59
- */
60
- audioTimestamp?: boolean;
61
-
62
- /**
63
- * Thinking/reasoning configuration for Gemini 3+ models
64
- */
65
- thinkingConfig?: GoogleThinkingConfig;
66
- }
67
-
68
- /**
69
- * Thinking configuration for Gemini 3+ models
70
- */
71
- export interface GoogleThinkingConfig {
72
- /** Whether thinking is enabled */
73
- thinkingBudget?: number;
74
- }
75
-
76
- /**
77
- * Google API request body
78
- */
79
- export interface GoogleRequest {
80
- contents: GoogleContent[];
81
- systemInstruction?: {
82
- parts: GooglePart[];
83
- };
84
- generationConfig?: {
85
- maxOutputTokens?: number;
86
- temperature?: number;
87
- topP?: number;
88
- topK?: number;
89
- stopSequences?: string[];
90
- candidateCount?: number;
91
- responseMimeType?: string;
92
- responseSchema?: Record<string, unknown>;
93
- presencePenalty?: number;
94
- frequencyPenalty?: number;
95
- seed?: number;
96
- responseLogprobs?: boolean;
97
- logprobs?: number;
98
- audioTimestamp?: boolean;
99
- thinkingConfig?: GoogleThinkingConfig;
100
- };
101
- tools?: GoogleTool[];
102
- safetySettings?: GoogleSafetySetting[];
103
- }
104
-
105
- /**
106
- * Google content (message) format
107
- */
108
- export interface GoogleContent {
109
- role: 'user' | 'model';
110
- parts: GooglePart[];
111
- }
112
-
113
- /**
114
- * Google content part types
115
- */
116
- export type GooglePart =
117
- | GoogleTextPart
118
- | GoogleImagePart
119
- | GoogleFunctionCallPart
120
- | GoogleFunctionResponsePart;
121
-
122
- export interface GoogleTextPart {
123
- text: string;
124
- }
125
-
126
- export interface GoogleImagePart {
127
- inlineData: {
128
- mimeType: string;
129
- data: string;
130
- };
131
- }
132
-
133
- export interface GoogleFunctionCallPart {
134
- functionCall: {
135
- name: string;
136
- args: Record<string, unknown>;
137
- };
138
- /** Gemini 3+ thought signature for multi-turn tool calls */
139
- thoughtSignature?: string;
140
- }
141
-
142
- export interface GoogleFunctionResponsePart {
143
- functionResponse: {
144
- name: string;
145
- response: Record<string, unknown>;
146
- };
147
- }
148
-
149
- /**
150
- * Google tool format
151
- */
152
- export interface GoogleTool {
153
- functionDeclarations: GoogleFunctionDeclaration[];
154
- }
155
-
156
- export interface GoogleFunctionDeclaration {
157
- name: string;
158
- description: string;
159
- parameters: {
160
- type: 'object';
161
- properties: Record<string, unknown>;
162
- required?: string[];
163
- };
164
- }
165
-
166
- /**
167
- * Google safety setting
168
- */
169
- export interface GoogleSafetySetting {
170
- category: string;
171
- threshold: string;
172
- }
173
-
174
- /**
175
- * Google response format
176
- */
177
- export interface GoogleResponse {
178
- candidates: GoogleCandidate[];
179
- usageMetadata?: {
180
- promptTokenCount: number;
181
- candidatesTokenCount: number;
182
- totalTokenCount: number;
183
- };
184
- }
185
-
186
- export interface GoogleCandidate {
187
- content: {
188
- role: 'model';
189
- parts: GoogleResponsePart[];
190
- };
191
- finishReason: 'STOP' | 'MAX_TOKENS' | 'SAFETY' | 'RECITATION' | 'OTHER' | 'TOOL_USE' | null;
192
- index: number;
193
- safetyRatings?: GoogleSafetyRating[];
194
- }
195
-
196
- export type GoogleResponsePart = GoogleTextPart | GoogleFunctionCallPart;
197
-
198
- export interface GoogleSafetyRating {
199
- category: string;
200
- probability: string;
201
- }
202
-
203
- /**
204
- * Google streaming response chunk
205
- * Same structure as regular response but may be partial
206
- */
207
- export interface GoogleStreamChunk {
208
- candidates?: GoogleCandidate[];
209
- usageMetadata?: {
210
- promptTokenCount: number;
211
- candidatesTokenCount: number;
212
- totalTokenCount: number;
213
- };
214
- }
@@ -1,43 +0,0 @@
1
- import { createProvider } from '../../core/provider.ts';
2
- import { createLLMHandler } from './llm.ts';
3
-
4
- /**
5
- * Ollama provider
6
- * Supports LLM modality with local Ollama models
7
- *
8
- * Ollama is a local LLM server that supports many open-source models including:
9
- * - Llama 3.x
10
- * - Mistral
11
- * - Mixtral
12
- * - Gemma
13
- * - Qwen
14
- * - DeepSeek
15
- * - Phi
16
- * - And many more
17
- *
18
- * @example
19
- * ```ts
20
- * import { llm } from 'provider-protocol';
21
- * import { ollama } from 'provider-protocol/ollama';
22
- *
23
- * const model = llm(ollama('llama3.2'));
24
- * const result = await model.generate('Hello, how are you?');
25
- * ```
26
- *
27
- * @example Custom server URL
28
- * ```ts
29
- * const model = llm(ollama('llama3.2'), {
30
- * baseUrl: 'http://my-ollama-server:11434',
31
- * });
32
- * ```
33
- */
34
- export const ollama = createProvider({
35
- name: 'ollama',
36
- version: '1.0.0',
37
- modalities: {
38
- llm: createLLMHandler(),
39
- },
40
- });
41
-
42
- // Re-export types
43
- export type { OllamaLLMParams } from './types.ts';
@@ -1,272 +0,0 @@
1
- import type {
2
- LLMHandler,
3
- BoundLLMModel,
4
- LLMRequest,
5
- LLMResponse,
6
- LLMStreamResult,
7
- LLMCapabilities,
8
- } from '../../types/llm.ts';
9
- import type { StreamEvent } from '../../types/stream.ts';
10
- import type { LLMProvider } from '../../types/provider.ts';
11
- import { UPPError } from '../../types/errors.ts';
12
- import { resolveApiKey } from '../../http/keys.ts';
13
- import { doFetch, doStreamFetch } from '../../http/fetch.ts';
14
- import { normalizeHttpError } from '../../http/errors.ts';
15
- import type { OllamaLLMParams, OllamaResponse, OllamaStreamChunk } from './types.ts';
16
- import {
17
- transformRequest,
18
- transformResponse,
19
- transformStreamChunk,
20
- createStreamState,
21
- buildResponseFromState,
22
- } from './transform.ts';
23
-
24
- const OLLAMA_DEFAULT_URL = 'http://localhost:11434';
25
-
26
- /**
27
- * Ollama API capabilities
28
- * Note: Tool calling is disabled - Ollama recommends using their
29
- * OpenAI-compatible API (/v1/chat/completions) for tool calling.
30
- * Use the OpenAI provider with baseUrl pointed to Ollama for tools.
31
- */
32
- const OLLAMA_CAPABILITIES: LLMCapabilities = {
33
- streaming: true,
34
- tools: false,
35
- structuredOutput: true,
36
- imageInput: true,
37
- videoInput: false,
38
- audioInput: false,
39
- };
40
-
41
- /**
42
- * Parse Ollama's newline-delimited JSON stream
43
- */
44
- async function* parseOllamaStream(
45
- body: ReadableStream<Uint8Array>
46
- ): AsyncGenerator<OllamaStreamChunk, void, unknown> {
47
- const reader = body.getReader();
48
- const decoder = new TextDecoder();
49
- let buffer = '';
50
-
51
- try {
52
- while (true) {
53
- const { done, value } = await reader.read();
54
- if (done) break;
55
-
56
- buffer += decoder.decode(value, { stream: true });
57
-
58
- // Process complete lines (Ollama uses newline-delimited JSON)
59
- const lines = buffer.split('\n');
60
- buffer = lines.pop() ?? ''; // Keep incomplete line in buffer
61
-
62
- for (const line of lines) {
63
- const trimmed = line.trim();
64
- if (!trimmed) continue;
65
-
66
- try {
67
- const chunk = JSON.parse(trimmed) as OllamaStreamChunk;
68
- yield chunk;
69
- } catch {
70
- // Skip invalid JSON lines
71
- }
72
- }
73
- }
74
-
75
- // Process any remaining buffer
76
- if (buffer.trim()) {
77
- try {
78
- const chunk = JSON.parse(buffer.trim()) as OllamaStreamChunk;
79
- yield chunk;
80
- } catch {
81
- // Skip invalid JSON
82
- }
83
- }
84
- } finally {
85
- reader.releaseLock();
86
- }
87
- }
88
-
89
- /**
90
- * Create Ollama LLM handler
91
- */
92
- export function createLLMHandler(): LLMHandler<OllamaLLMParams> {
93
- // Provider reference injected by createProvider() after construction
94
- let providerRef: LLMProvider<OllamaLLMParams> | null = null;
95
-
96
- return {
97
- _setProvider(provider: LLMProvider<OllamaLLMParams>) {
98
- providerRef = provider;
99
- },
100
-
101
- bind(modelId: string): BoundLLMModel<OllamaLLMParams> {
102
- // Use the injected provider reference (set by createProvider)
103
- if (!providerRef) {
104
- throw new UPPError(
105
- 'Provider reference not set. Handler must be used with createProvider().',
106
- 'INVALID_REQUEST',
107
- 'ollama',
108
- 'llm'
109
- );
110
- }
111
-
112
- const model: BoundLLMModel<OllamaLLMParams> = {
113
- modelId,
114
- capabilities: OLLAMA_CAPABILITIES,
115
-
116
- get provider(): LLMProvider<OllamaLLMParams> {
117
- return providerRef!;
118
- },
119
-
120
- async complete(request: LLMRequest<OllamaLLMParams>): Promise<LLMResponse> {
121
- // Ollama doesn't require an API key by default, but may use one for auth
122
- let apiKey: string | undefined;
123
- try {
124
- apiKey = await resolveApiKey(
125
- request.config,
126
- 'OLLAMA_API_KEY',
127
- 'ollama',
128
- 'llm'
129
- );
130
- } catch {
131
- // API key is optional for Ollama
132
- }
133
-
134
- const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;
135
- const url = `${baseUrl}/api/chat`;
136
- const body = transformRequest(request, modelId);
137
- body.stream = false;
138
-
139
- const headers: Record<string, string> = {
140
- 'Content-Type': 'application/json',
141
- };
142
-
143
- if (apiKey) {
144
- headers['Authorization'] = `Bearer ${apiKey}`;
145
- }
146
-
147
- const response = await doFetch(
148
- url,
149
- {
150
- method: 'POST',
151
- headers,
152
- body: JSON.stringify(body),
153
- signal: request.signal,
154
- },
155
- request.config,
156
- 'ollama',
157
- 'llm'
158
- );
159
-
160
- const data = (await response.json()) as OllamaResponse;
161
- return transformResponse(data);
162
- },
163
-
164
- stream(request: LLMRequest<OllamaLLMParams>): LLMStreamResult {
165
- const state = createStreamState();
166
- let responseResolve: (value: LLMResponse) => void;
167
- let responseReject: (error: Error) => void;
168
-
169
- const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
170
- responseResolve = resolve;
171
- responseReject = reject;
172
- });
173
-
174
- async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
175
- try {
176
- // Ollama doesn't require an API key by default
177
- let apiKey: string | undefined;
178
- try {
179
- apiKey = await resolveApiKey(
180
- request.config,
181
- 'OLLAMA_API_KEY',
182
- 'ollama',
183
- 'llm'
184
- );
185
- } catch {
186
- // API key is optional for Ollama
187
- }
188
-
189
- const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;
190
- const url = `${baseUrl}/api/chat`;
191
- const body = transformRequest(request, modelId);
192
- body.stream = true;
193
-
194
- const headers: Record<string, string> = {
195
- 'Content-Type': 'application/json',
196
- };
197
-
198
- if (apiKey) {
199
- headers['Authorization'] = `Bearer ${apiKey}`;
200
- }
201
-
202
- const response = await doStreamFetch(
203
- url,
204
- {
205
- method: 'POST',
206
- headers,
207
- body: JSON.stringify(body),
208
- signal: request.signal,
209
- },
210
- request.config,
211
- 'ollama',
212
- 'llm'
213
- );
214
-
215
- if (!response.ok) {
216
- const error = await normalizeHttpError(response, 'ollama', 'llm');
217
- responseReject(error);
218
- throw error;
219
- }
220
-
221
- if (!response.body) {
222
- const error = new UPPError(
223
- 'No response body for streaming request',
224
- 'PROVIDER_ERROR',
225
- 'ollama',
226
- 'llm'
227
- );
228
- responseReject(error);
229
- throw error;
230
- }
231
-
232
- // Parse Ollama's newline-delimited JSON stream
233
- for await (const chunk of parseOllamaStream(response.body)) {
234
- // Check for error in chunk
235
- if ('error' in chunk && typeof (chunk as Record<string, unknown>).error === 'string') {
236
- const error = new UPPError(
237
- (chunk as Record<string, unknown>).error as string,
238
- 'PROVIDER_ERROR',
239
- 'ollama',
240
- 'llm'
241
- );
242
- responseReject(error);
243
- throw error;
244
- }
245
-
246
- const events = transformStreamChunk(chunk, state);
247
- for (const event of events) {
248
- yield event;
249
- }
250
- }
251
-
252
- // Build final response
253
- responseResolve(buildResponseFromState(state));
254
- } catch (error) {
255
- responseReject(error as Error);
256
- throw error;
257
- }
258
- }
259
-
260
- return {
261
- [Symbol.asyncIterator]() {
262
- return generateEvents();
263
- },
264
- response: responsePromise,
265
- };
266
- },
267
- };
268
-
269
- return model;
270
- },
271
- };
272
- }