@providerprotocol/ai 0.0.10 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/dist/index.d.ts +7 -1
  2. package/dist/index.js +37 -9
  3. package/dist/index.js.map +1 -1
  4. package/package.json +1 -10
  5. package/src/anthropic/index.ts +0 -3
  6. package/src/core/image.ts +0 -188
  7. package/src/core/llm.ts +0 -624
  8. package/src/core/provider.ts +0 -92
  9. package/src/google/index.ts +0 -3
  10. package/src/http/errors.ts +0 -112
  11. package/src/http/fetch.ts +0 -210
  12. package/src/http/index.ts +0 -31
  13. package/src/http/keys.ts +0 -136
  14. package/src/http/retry.ts +0 -205
  15. package/src/http/sse.ts +0 -136
  16. package/src/index.ts +0 -32
  17. package/src/ollama/index.ts +0 -3
  18. package/src/openai/index.ts +0 -39
  19. package/src/openrouter/index.ts +0 -11
  20. package/src/providers/anthropic/index.ts +0 -17
  21. package/src/providers/anthropic/llm.ts +0 -196
  22. package/src/providers/anthropic/transform.ts +0 -434
  23. package/src/providers/anthropic/types.ts +0 -213
  24. package/src/providers/google/index.ts +0 -17
  25. package/src/providers/google/llm.ts +0 -203
  26. package/src/providers/google/transform.ts +0 -447
  27. package/src/providers/google/types.ts +0 -214
  28. package/src/providers/ollama/index.ts +0 -43
  29. package/src/providers/ollama/llm.ts +0 -272
  30. package/src/providers/ollama/transform.ts +0 -434
  31. package/src/providers/ollama/types.ts +0 -260
  32. package/src/providers/openai/index.ts +0 -186
  33. package/src/providers/openai/llm.completions.ts +0 -201
  34. package/src/providers/openai/llm.responses.ts +0 -211
  35. package/src/providers/openai/transform.completions.ts +0 -561
  36. package/src/providers/openai/transform.responses.ts +0 -708
  37. package/src/providers/openai/types.ts +0 -1249
  38. package/src/providers/openrouter/index.ts +0 -177
  39. package/src/providers/openrouter/llm.completions.ts +0 -201
  40. package/src/providers/openrouter/llm.responses.ts +0 -211
  41. package/src/providers/openrouter/transform.completions.ts +0 -538
  42. package/src/providers/openrouter/transform.responses.ts +0 -742
  43. package/src/providers/openrouter/types.ts +0 -717
  44. package/src/providers/xai/index.ts +0 -223
  45. package/src/providers/xai/llm.completions.ts +0 -201
  46. package/src/providers/xai/llm.messages.ts +0 -195
  47. package/src/providers/xai/llm.responses.ts +0 -211
  48. package/src/providers/xai/transform.completions.ts +0 -565
  49. package/src/providers/xai/transform.messages.ts +0 -448
  50. package/src/providers/xai/transform.responses.ts +0 -678
  51. package/src/providers/xai/types.ts +0 -938
  52. package/src/types/content.ts +0 -133
  53. package/src/types/errors.ts +0 -85
  54. package/src/types/index.ts +0 -105
  55. package/src/types/llm.ts +0 -211
  56. package/src/types/messages.ts +0 -205
  57. package/src/types/provider.ts +0 -195
  58. package/src/types/schema.ts +0 -58
  59. package/src/types/stream.ts +0 -146
  60. package/src/types/thread.ts +0 -226
  61. package/src/types/tool.ts +0 -88
  62. package/src/types/turn.ts +0 -118
  63. package/src/utils/id.ts +0 -28
  64. package/src/xai/index.ts +0 -41
@@ -1,177 +0,0 @@
1
- import type {
2
- Provider,
3
- ModelReference,
4
- LLMHandler,
5
- LLMProvider,
6
- } from '../../types/provider.ts';
7
- import { createCompletionsLLMHandler } from './llm.completions.ts';
8
- import { createResponsesLLMHandler } from './llm.responses.ts';
9
- import type { OpenRouterCompletionsParams, OpenRouterResponsesParams, OpenRouterConfig } from './types.ts';
10
-
11
- /** Union type for modalities interface */
12
- type OpenRouterLLMParamsUnion = OpenRouterCompletionsParams | OpenRouterResponsesParams;
13
-
14
- /**
15
- * OpenRouter provider options
16
- */
17
- export interface OpenRouterProviderOptions {
18
- /**
19
- * Which API to use:
20
- * - 'completions': Chat Completions API (default, recommended)
21
- * - 'responses': Responses API (beta)
22
- */
23
- api?: 'completions' | 'responses';
24
- }
25
-
26
- /**
27
- * OpenRouter provider with configurable API mode
28
- *
29
- * @example
30
- * // Using the Chat Completions API (default)
31
- * const model = openrouter('openai/gpt-4o');
32
- *
33
- * @example
34
- * // Using the Responses API (beta)
35
- * const model = openrouter('openai/gpt-4o', { api: 'responses' });
36
- *
37
- * @example
38
- * // Explicit Completions API
39
- * const model = openrouter('anthropic/claude-3.5-sonnet', { api: 'completions' });
40
- */
41
- export interface OpenRouterProvider extends Provider<OpenRouterProviderOptions> {
42
- /**
43
- * Create a model reference
44
- * @param modelId - The model identifier (e.g., 'openai/gpt-4o', 'anthropic/claude-3.5-sonnet', 'meta-llama/llama-3.1-70b-instruct')
45
- * @param options - Provider options including API selection
46
- */
47
- (modelId: string, options?: OpenRouterProviderOptions): ModelReference<OpenRouterProviderOptions>;
48
-
49
- /** Provider name */
50
- readonly name: 'openrouter';
51
-
52
- /** Provider version */
53
- readonly version: string;
54
-
55
- /** Supported modalities */
56
- readonly modalities: {
57
- llm: LLMHandler<OpenRouterLLMParamsUnion>;
58
- };
59
- }
60
-
61
- /**
62
- * Create the OpenRouter provider
63
- */
64
- function createOpenRouterProvider(): OpenRouterProvider {
65
- // Track which API mode is currently active for the modalities
66
- // Default to 'completions' (unlike OpenAI which defaults to 'responses')
67
- let currentApiMode: 'completions' | 'responses' = 'completions';
68
-
69
- // Create handlers eagerly so we can inject provider reference
70
- const completionsHandler = createCompletionsLLMHandler();
71
- const responsesHandler = createResponsesLLMHandler();
72
-
73
- const fn = function (
74
- modelId: string,
75
- options?: OpenRouterProviderOptions
76
- ): ModelReference<OpenRouterProviderOptions> {
77
- const apiMode = options?.api ?? 'completions';
78
- currentApiMode = apiMode;
79
- return { modelId, provider };
80
- };
81
-
82
- // Create a dynamic modalities object that returns the correct handler
83
- const modalities = {
84
- get llm(): LLMHandler<OpenRouterLLMParamsUnion> {
85
- return currentApiMode === 'responses'
86
- ? (responsesHandler as unknown as LLMHandler<OpenRouterLLMParamsUnion>)
87
- : (completionsHandler as unknown as LLMHandler<OpenRouterLLMParamsUnion>);
88
- },
89
- };
90
-
91
- // Define properties
92
- Object.defineProperties(fn, {
93
- name: {
94
- value: 'openrouter',
95
- writable: false,
96
- configurable: true,
97
- },
98
- version: {
99
- value: '1.0.0',
100
- writable: false,
101
- configurable: true,
102
- },
103
- modalities: {
104
- value: modalities,
105
- writable: false,
106
- configurable: true,
107
- },
108
- });
109
-
110
- const provider = fn as OpenRouterProvider;
111
-
112
- // Inject provider reference into both handlers (spec compliance)
113
- completionsHandler._setProvider?.(provider as unknown as LLMProvider<OpenRouterCompletionsParams>);
114
- responsesHandler._setProvider?.(provider as unknown as LLMProvider<OpenRouterResponsesParams>);
115
-
116
- return provider;
117
- }
118
-
119
- /**
120
- * OpenRouter provider
121
- *
122
- * Supports both the Chat Completions API (default) and Responses API (beta).
123
- *
124
- * OpenRouter is a unified API that provides access to hundreds of AI models
125
- * through a single endpoint, including models from OpenAI, Anthropic, Google,
126
- * Meta, Mistral, and many others.
127
- *
128
- * @example
129
- * ```ts
130
- * import { openrouter } from './providers/openrouter';
131
- * import { llm } from './core/llm';
132
- *
133
- * // Using Chat Completions API (default, recommended)
134
- * const model = llm({
135
- * model: openrouter('openai/gpt-4o'),
136
- * params: { max_tokens: 1000 }
137
- * });
138
- *
139
- * // Using Responses API (beta)
140
- * const betaModel = llm({
141
- * model: openrouter('openai/gpt-4o', { api: 'responses' }),
142
- * params: { max_output_tokens: 1000 }
143
- * });
144
- *
145
- * // Using OpenRouter-specific features
146
- * const routedModel = llm({
147
- * model: openrouter('openai/gpt-4o'),
148
- * params: {
149
- * max_tokens: 1000,
150
- * // Fallback routing
151
- * models: ['openai/gpt-4o', 'anthropic/claude-3.5-sonnet'],
152
- * route: 'fallback',
153
- * // Provider preferences
154
- * provider: {
155
- * allow_fallbacks: true,
156
- * require_parameters: true,
157
- * },
158
- * }
159
- * });
160
- *
161
- * // Generate
162
- * const turn = await model.generate('Hello!');
163
- * console.log(turn.response.text);
164
- * ```
165
- */
166
- export const openrouter = createOpenRouterProvider();
167
-
168
- // Re-export types
169
- export type {
170
- OpenRouterCompletionsParams,
171
- OpenRouterResponsesParams,
172
- OpenRouterConfig,
173
- OpenRouterAPIMode,
174
- OpenRouterModelOptions,
175
- OpenRouterModelReference,
176
- OpenRouterProviderPreferences,
177
- } from './types.ts';
@@ -1,201 +0,0 @@
1
- import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
2
- import type { StreamEvent } from '../../types/stream.ts';
3
- import type { LLMProvider } from '../../types/provider.ts';
4
- import { UPPError } from '../../types/errors.ts';
5
- import { resolveApiKey } from '../../http/keys.ts';
6
- import { doFetch, doStreamFetch } from '../../http/fetch.ts';
7
- import { parseSSEStream } from '../../http/sse.ts';
8
- import { normalizeHttpError } from '../../http/errors.ts';
9
- import type { OpenRouterCompletionsParams, OpenRouterCompletionsResponse, OpenRouterCompletionsStreamChunk } from './types.ts';
10
- import {
11
- transformRequest,
12
- transformResponse,
13
- transformStreamEvent,
14
- createStreamState,
15
- buildResponseFromState,
16
- } from './transform.completions.ts';
17
-
18
- const OPENROUTER_API_URL = 'https://openrouter.ai/api/v1/chat/completions';
19
-
20
- /**
21
- * OpenRouter API capabilities
22
- */
23
- const OPENROUTER_CAPABILITIES: LLMCapabilities = {
24
- streaming: true,
25
- tools: true,
26
- structuredOutput: true,
27
- imageInput: true,
28
- videoInput: false,
29
- audioInput: false,
30
- };
31
-
32
- /**
33
- * Create OpenRouter Chat Completions LLM handler
34
- */
35
- export function createCompletionsLLMHandler(): LLMHandler<OpenRouterCompletionsParams> {
36
- // Provider reference injected by createProvider() or OpenRouter's custom factory
37
- let providerRef: LLMProvider<OpenRouterCompletionsParams> | null = null;
38
-
39
- return {
40
- _setProvider(provider: LLMProvider<OpenRouterCompletionsParams>) {
41
- providerRef = provider;
42
- },
43
-
44
- bind(modelId: string): BoundLLMModel<OpenRouterCompletionsParams> {
45
- // Use the injected provider reference
46
- if (!providerRef) {
47
- throw new UPPError(
48
- 'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
49
- 'INVALID_REQUEST',
50
- 'openrouter',
51
- 'llm'
52
- );
53
- }
54
-
55
- const model: BoundLLMModel<OpenRouterCompletionsParams> = {
56
- modelId,
57
- capabilities: OPENROUTER_CAPABILITIES,
58
-
59
- get provider(): LLMProvider<OpenRouterCompletionsParams> {
60
- return providerRef!;
61
- },
62
-
63
- async complete(request: LLMRequest<OpenRouterCompletionsParams>): Promise<LLMResponse> {
64
- const apiKey = await resolveApiKey(
65
- request.config,
66
- 'OPENROUTER_API_KEY',
67
- 'openrouter',
68
- 'llm'
69
- );
70
-
71
- const baseUrl = request.config.baseUrl ?? OPENROUTER_API_URL;
72
- const body = transformRequest(request, modelId);
73
-
74
- const response = await doFetch(
75
- baseUrl,
76
- {
77
- method: 'POST',
78
- headers: {
79
- 'Content-Type': 'application/json',
80
- Authorization: `Bearer ${apiKey}`,
81
- },
82
- body: JSON.stringify(body),
83
- signal: request.signal,
84
- },
85
- request.config,
86
- 'openrouter',
87
- 'llm'
88
- );
89
-
90
- const data = (await response.json()) as OpenRouterCompletionsResponse;
91
- return transformResponse(data);
92
- },
93
-
94
- stream(request: LLMRequest<OpenRouterCompletionsParams>): LLMStreamResult {
95
- const state = createStreamState();
96
- let responseResolve: (value: LLMResponse) => void;
97
- let responseReject: (error: Error) => void;
98
-
99
- const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
100
- responseResolve = resolve;
101
- responseReject = reject;
102
- });
103
-
104
- async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
105
- try {
106
- const apiKey = await resolveApiKey(
107
- request.config,
108
- 'OPENROUTER_API_KEY',
109
- 'openrouter',
110
- 'llm'
111
- );
112
-
113
- const baseUrl = request.config.baseUrl ?? OPENROUTER_API_URL;
114
- const body = transformRequest(request, modelId);
115
- body.stream = true;
116
- body.stream_options = { include_usage: true };
117
-
118
- const response = await doStreamFetch(
119
- baseUrl,
120
- {
121
- method: 'POST',
122
- headers: {
123
- 'Content-Type': 'application/json',
124
- Authorization: `Bearer ${apiKey}`,
125
- },
126
- body: JSON.stringify(body),
127
- signal: request.signal,
128
- },
129
- request.config,
130
- 'openrouter',
131
- 'llm'
132
- );
133
-
134
- if (!response.ok) {
135
- const error = await normalizeHttpError(response, 'openrouter', 'llm');
136
- responseReject(error);
137
- throw error;
138
- }
139
-
140
- if (!response.body) {
141
- const error = new UPPError(
142
- 'No response body for streaming request',
143
- 'PROVIDER_ERROR',
144
- 'openrouter',
145
- 'llm'
146
- );
147
- responseReject(error);
148
- throw error;
149
- }
150
-
151
- for await (const data of parseSSEStream(response.body)) {
152
- // Skip [DONE] marker
153
- if (data === '[DONE]') {
154
- continue;
155
- }
156
-
157
- // Check for OpenRouter error event
158
- if (typeof data === 'object' && data !== null) {
159
- const chunk = data as OpenRouterCompletionsStreamChunk;
160
-
161
- // Check for error in chunk
162
- if ('error' in chunk && chunk.error) {
163
- const errorData = chunk.error as { message?: string; type?: string };
164
- const error = new UPPError(
165
- errorData.message ?? 'Unknown error',
166
- 'PROVIDER_ERROR',
167
- 'openrouter',
168
- 'llm'
169
- );
170
- responseReject(error);
171
- throw error;
172
- }
173
-
174
- const uppEvents = transformStreamEvent(chunk, state);
175
- for (const event of uppEvents) {
176
- yield event;
177
- }
178
- }
179
- }
180
-
181
- // Build final response
182
- responseResolve(buildResponseFromState(state));
183
- } catch (error) {
184
- responseReject(error as Error);
185
- throw error;
186
- }
187
- }
188
-
189
- return {
190
- [Symbol.asyncIterator]() {
191
- return generateEvents();
192
- },
193
- response: responsePromise,
194
- };
195
- },
196
- };
197
-
198
- return model;
199
- },
200
- };
201
- }
@@ -1,211 +0,0 @@
1
- import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
2
- import type { StreamEvent } from '../../types/stream.ts';
3
- import type { LLMProvider } from '../../types/provider.ts';
4
- import { UPPError } from '../../types/errors.ts';
5
- import { resolveApiKey } from '../../http/keys.ts';
6
- import { doFetch, doStreamFetch } from '../../http/fetch.ts';
7
- import { parseSSEStream } from '../../http/sse.ts';
8
- import { normalizeHttpError } from '../../http/errors.ts';
9
- import type { OpenRouterResponsesParams, OpenRouterResponsesResponse, OpenRouterResponsesStreamEvent, OpenRouterResponseErrorEvent } from './types.ts';
10
- import {
11
- transformRequest,
12
- transformResponse,
13
- transformStreamEvent,
14
- createStreamState,
15
- buildResponseFromState,
16
- } from './transform.responses.ts';
17
-
18
- const OPENROUTER_RESPONSES_API_URL = 'https://openrouter.ai/api/v1/responses';
19
-
20
- /**
21
- * OpenRouter API capabilities
22
- */
23
- const OPENROUTER_CAPABILITIES: LLMCapabilities = {
24
- streaming: true,
25
- tools: true,
26
- structuredOutput: true,
27
- imageInput: true,
28
- videoInput: false,
29
- audioInput: false,
30
- };
31
-
32
- /**
33
- * Create OpenRouter Responses API LLM handler
34
- */
35
- export function createResponsesLLMHandler(): LLMHandler<OpenRouterResponsesParams> {
36
- // Provider reference injected by createProvider() or OpenRouter's custom factory
37
- let providerRef: LLMProvider<OpenRouterResponsesParams> | null = null;
38
-
39
- return {
40
- _setProvider(provider: LLMProvider<OpenRouterResponsesParams>) {
41
- providerRef = provider;
42
- },
43
-
44
- bind(modelId: string): BoundLLMModel<OpenRouterResponsesParams> {
45
- // Use the injected provider reference
46
- if (!providerRef) {
47
- throw new UPPError(
48
- 'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
49
- 'INVALID_REQUEST',
50
- 'openrouter',
51
- 'llm'
52
- );
53
- }
54
-
55
- const model: BoundLLMModel<OpenRouterResponsesParams> = {
56
- modelId,
57
- capabilities: OPENROUTER_CAPABILITIES,
58
-
59
- get provider(): LLMProvider<OpenRouterResponsesParams> {
60
- return providerRef!;
61
- },
62
-
63
- async complete(request: LLMRequest<OpenRouterResponsesParams>): Promise<LLMResponse> {
64
- const apiKey = await resolveApiKey(
65
- request.config,
66
- 'OPENROUTER_API_KEY',
67
- 'openrouter',
68
- 'llm'
69
- );
70
-
71
- const baseUrl = request.config.baseUrl ?? OPENROUTER_RESPONSES_API_URL;
72
- const body = transformRequest(request, modelId);
73
-
74
- const response = await doFetch(
75
- baseUrl,
76
- {
77
- method: 'POST',
78
- headers: {
79
- 'Content-Type': 'application/json',
80
- Authorization: `Bearer ${apiKey}`,
81
- },
82
- body: JSON.stringify(body),
83
- signal: request.signal,
84
- },
85
- request.config,
86
- 'openrouter',
87
- 'llm'
88
- );
89
-
90
- const data = (await response.json()) as OpenRouterResponsesResponse;
91
-
92
- // Check for error in response
93
- if (data.status === 'failed' && data.error) {
94
- throw new UPPError(
95
- data.error.message,
96
- 'PROVIDER_ERROR',
97
- 'openrouter',
98
- 'llm'
99
- );
100
- }
101
-
102
- return transformResponse(data);
103
- },
104
-
105
- stream(request: LLMRequest<OpenRouterResponsesParams>): LLMStreamResult {
106
- const state = createStreamState();
107
- let responseResolve: (value: LLMResponse) => void;
108
- let responseReject: (error: Error) => void;
109
-
110
- const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
111
- responseResolve = resolve;
112
- responseReject = reject;
113
- });
114
-
115
- async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
116
- try {
117
- const apiKey = await resolveApiKey(
118
- request.config,
119
- 'OPENROUTER_API_KEY',
120
- 'openrouter',
121
- 'llm'
122
- );
123
-
124
- const baseUrl = request.config.baseUrl ?? OPENROUTER_RESPONSES_API_URL;
125
- const body = transformRequest(request, modelId);
126
- body.stream = true;
127
-
128
- const response = await doStreamFetch(
129
- baseUrl,
130
- {
131
- method: 'POST',
132
- headers: {
133
- 'Content-Type': 'application/json',
134
- Authorization: `Bearer ${apiKey}`,
135
- },
136
- body: JSON.stringify(body),
137
- signal: request.signal,
138
- },
139
- request.config,
140
- 'openrouter',
141
- 'llm'
142
- );
143
-
144
- if (!response.ok) {
145
- const error = await normalizeHttpError(response, 'openrouter', 'llm');
146
- responseReject(error);
147
- throw error;
148
- }
149
-
150
- if (!response.body) {
151
- const error = new UPPError(
152
- 'No response body for streaming request',
153
- 'PROVIDER_ERROR',
154
- 'openrouter',
155
- 'llm'
156
- );
157
- responseReject(error);
158
- throw error;
159
- }
160
-
161
- for await (const data of parseSSEStream(response.body)) {
162
- // Skip [DONE] marker
163
- if (data === '[DONE]') {
164
- continue;
165
- }
166
-
167
- // Check for OpenRouter error event
168
- if (typeof data === 'object' && data !== null) {
169
- const event = data as OpenRouterResponsesStreamEvent;
170
-
171
- // Check for error event
172
- if (event.type === 'error') {
173
- const errorEvent = event as OpenRouterResponseErrorEvent;
174
- const error = new UPPError(
175
- errorEvent.error.message,
176
- 'PROVIDER_ERROR',
177
- 'openrouter',
178
- 'llm'
179
- );
180
- responseReject(error);
181
- throw error;
182
- }
183
-
184
- const uppEvents = transformStreamEvent(event, state);
185
- for (const uppEvent of uppEvents) {
186
- yield uppEvent;
187
- }
188
- }
189
- }
190
-
191
- // Build final response
192
- responseResolve(buildResponseFromState(state));
193
- } catch (error) {
194
- responseReject(error as Error);
195
- throw error;
196
- }
197
- }
198
-
199
- return {
200
- [Symbol.asyncIterator]() {
201
- return generateEvents();
202
- },
203
- response: responsePromise,
204
- };
205
- },
206
- };
207
-
208
- return model;
209
- },
210
- };
211
- }