@providerprotocol/ai 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,272 @@
1
+ import type {
2
+ LLMHandler,
3
+ BoundLLMModel,
4
+ LLMRequest,
5
+ LLMResponse,
6
+ LLMStreamResult,
7
+ LLMCapabilities,
8
+ } from '../../types/llm.ts';
9
+ import type { StreamEvent } from '../../types/stream.ts';
10
+ import type { LLMProvider } from '../../types/provider.ts';
11
+ import { UPPError } from '../../types/errors.ts';
12
+ import { resolveApiKey } from '../../http/keys.ts';
13
+ import { doFetch, doStreamFetch } from '../../http/fetch.ts';
14
+ import { normalizeHttpError } from '../../http/errors.ts';
15
+ import type { OllamaLLMParams, OllamaResponse, OllamaStreamChunk } from './types.ts';
16
+ import {
17
+ transformRequest,
18
+ transformResponse,
19
+ transformStreamChunk,
20
+ createStreamState,
21
+ buildResponseFromState,
22
+ } from './transform.ts';
23
+
24
+ const OLLAMA_DEFAULT_URL = 'http://localhost:11434';
25
+
26
+ /**
27
+ * Ollama API capabilities
28
+ * Note: Tool calling is disabled - Ollama recommends using their
29
+ * OpenAI-compatible API (/v1/chat/completions) for tool calling.
30
+ * Use the OpenAI provider with baseUrl pointed to Ollama for tools.
31
+ */
32
+ const OLLAMA_CAPABILITIES: LLMCapabilities = {
33
+ streaming: true,
34
+ tools: false,
35
+ structuredOutput: true,
36
+ imageInput: true,
37
+ videoInput: false,
38
+ audioInput: false,
39
+ };
40
+
41
+ /**
42
+ * Parse Ollama's newline-delimited JSON stream
43
+ */
44
+ async function* parseOllamaStream(
45
+ body: ReadableStream<Uint8Array>
46
+ ): AsyncGenerator<OllamaStreamChunk, void, unknown> {
47
+ const reader = body.getReader();
48
+ const decoder = new TextDecoder();
49
+ let buffer = '';
50
+
51
+ try {
52
+ while (true) {
53
+ const { done, value } = await reader.read();
54
+ if (done) break;
55
+
56
+ buffer += decoder.decode(value, { stream: true });
57
+
58
+ // Process complete lines (Ollama uses newline-delimited JSON)
59
+ const lines = buffer.split('\n');
60
+ buffer = lines.pop() ?? ''; // Keep incomplete line in buffer
61
+
62
+ for (const line of lines) {
63
+ const trimmed = line.trim();
64
+ if (!trimmed) continue;
65
+
66
+ try {
67
+ const chunk = JSON.parse(trimmed) as OllamaStreamChunk;
68
+ yield chunk;
69
+ } catch {
70
+ // Skip invalid JSON lines
71
+ }
72
+ }
73
+ }
74
+
75
+ // Process any remaining buffer
76
+ if (buffer.trim()) {
77
+ try {
78
+ const chunk = JSON.parse(buffer.trim()) as OllamaStreamChunk;
79
+ yield chunk;
80
+ } catch {
81
+ // Skip invalid JSON
82
+ }
83
+ }
84
+ } finally {
85
+ reader.releaseLock();
86
+ }
87
+ }
88
+
89
+ /**
90
+ * Create Ollama LLM handler
91
+ */
92
+ export function createLLMHandler(): LLMHandler<OllamaLLMParams> {
93
+ // Provider reference injected by createProvider() after construction
94
+ let providerRef: LLMProvider<OllamaLLMParams> | null = null;
95
+
96
+ return {
97
+ _setProvider(provider: LLMProvider<OllamaLLMParams>) {
98
+ providerRef = provider;
99
+ },
100
+
101
+ bind(modelId: string): BoundLLMModel<OllamaLLMParams> {
102
+ // Use the injected provider reference (set by createProvider)
103
+ if (!providerRef) {
104
+ throw new UPPError(
105
+ 'Provider reference not set. Handler must be used with createProvider().',
106
+ 'INVALID_REQUEST',
107
+ 'ollama',
108
+ 'llm'
109
+ );
110
+ }
111
+
112
+ const model: BoundLLMModel<OllamaLLMParams> = {
113
+ modelId,
114
+ capabilities: OLLAMA_CAPABILITIES,
115
+
116
+ get provider(): LLMProvider<OllamaLLMParams> {
117
+ return providerRef!;
118
+ },
119
+
120
+ async complete(request: LLMRequest<OllamaLLMParams>): Promise<LLMResponse> {
121
+ // Ollama doesn't require an API key by default, but may use one for auth
122
+ let apiKey: string | undefined;
123
+ try {
124
+ apiKey = await resolveApiKey(
125
+ request.config,
126
+ 'OLLAMA_API_KEY',
127
+ 'ollama',
128
+ 'llm'
129
+ );
130
+ } catch {
131
+ // API key is optional for Ollama
132
+ }
133
+
134
+ const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;
135
+ const url = `${baseUrl}/api/chat`;
136
+ const body = transformRequest(request, modelId);
137
+ body.stream = false;
138
+
139
+ const headers: Record<string, string> = {
140
+ 'Content-Type': 'application/json',
141
+ };
142
+
143
+ if (apiKey) {
144
+ headers['Authorization'] = `Bearer ${apiKey}`;
145
+ }
146
+
147
+ const response = await doFetch(
148
+ url,
149
+ {
150
+ method: 'POST',
151
+ headers,
152
+ body: JSON.stringify(body),
153
+ signal: request.signal,
154
+ },
155
+ request.config,
156
+ 'ollama',
157
+ 'llm'
158
+ );
159
+
160
+ const data = (await response.json()) as OllamaResponse;
161
+ return transformResponse(data);
162
+ },
163
+
164
+ stream(request: LLMRequest<OllamaLLMParams>): LLMStreamResult {
165
+ const state = createStreamState();
166
+ let responseResolve: (value: LLMResponse) => void;
167
+ let responseReject: (error: Error) => void;
168
+
169
+ const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
170
+ responseResolve = resolve;
171
+ responseReject = reject;
172
+ });
173
+
174
+ async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
175
+ try {
176
+ // Ollama doesn't require an API key by default
177
+ let apiKey: string | undefined;
178
+ try {
179
+ apiKey = await resolveApiKey(
180
+ request.config,
181
+ 'OLLAMA_API_KEY',
182
+ 'ollama',
183
+ 'llm'
184
+ );
185
+ } catch {
186
+ // API key is optional for Ollama
187
+ }
188
+
189
+ const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;
190
+ const url = `${baseUrl}/api/chat`;
191
+ const body = transformRequest(request, modelId);
192
+ body.stream = true;
193
+
194
+ const headers: Record<string, string> = {
195
+ 'Content-Type': 'application/json',
196
+ };
197
+
198
+ if (apiKey) {
199
+ headers['Authorization'] = `Bearer ${apiKey}`;
200
+ }
201
+
202
+ const response = await doStreamFetch(
203
+ url,
204
+ {
205
+ method: 'POST',
206
+ headers,
207
+ body: JSON.stringify(body),
208
+ signal: request.signal,
209
+ },
210
+ request.config,
211
+ 'ollama',
212
+ 'llm'
213
+ );
214
+
215
+ if (!response.ok) {
216
+ const error = await normalizeHttpError(response, 'ollama', 'llm');
217
+ responseReject(error);
218
+ throw error;
219
+ }
220
+
221
+ if (!response.body) {
222
+ const error = new UPPError(
223
+ 'No response body for streaming request',
224
+ 'PROVIDER_ERROR',
225
+ 'ollama',
226
+ 'llm'
227
+ );
228
+ responseReject(error);
229
+ throw error;
230
+ }
231
+
232
+ // Parse Ollama's newline-delimited JSON stream
233
+ for await (const chunk of parseOllamaStream(response.body)) {
234
+ // Check for error in chunk
235
+ if ('error' in chunk && typeof (chunk as Record<string, unknown>).error === 'string') {
236
+ const error = new UPPError(
237
+ (chunk as Record<string, unknown>).error as string,
238
+ 'PROVIDER_ERROR',
239
+ 'ollama',
240
+ 'llm'
241
+ );
242
+ responseReject(error);
243
+ throw error;
244
+ }
245
+
246
+ const events = transformStreamChunk(chunk, state);
247
+ for (const event of events) {
248
+ yield event;
249
+ }
250
+ }
251
+
252
+ // Build final response
253
+ responseResolve(buildResponseFromState(state));
254
+ } catch (error) {
255
+ responseReject(error as Error);
256
+ throw error;
257
+ }
258
+ }
259
+
260
+ return {
261
+ [Symbol.asyncIterator]() {
262
+ return generateEvents();
263
+ },
264
+ response: responsePromise,
265
+ };
266
+ },
267
+ };
268
+
269
+ return model;
270
+ },
271
+ };
272
+ }