@providerprotocol/ai 0.0.11 → 0.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +3 -3
- package/dist/index.js.map +1 -1
- package/package.json +1 -10
- package/src/anthropic/index.ts +0 -3
- package/src/core/image.ts +0 -188
- package/src/core/llm.ts +0 -650
- package/src/core/provider.ts +0 -92
- package/src/google/index.ts +0 -3
- package/src/http/errors.ts +0 -112
- package/src/http/fetch.ts +0 -210
- package/src/http/index.ts +0 -31
- package/src/http/keys.ts +0 -136
- package/src/http/retry.ts +0 -205
- package/src/http/sse.ts +0 -136
- package/src/index.ts +0 -32
- package/src/ollama/index.ts +0 -3
- package/src/openai/index.ts +0 -39
- package/src/openrouter/index.ts +0 -11
- package/src/providers/anthropic/index.ts +0 -17
- package/src/providers/anthropic/llm.ts +0 -196
- package/src/providers/anthropic/transform.ts +0 -434
- package/src/providers/anthropic/types.ts +0 -213
- package/src/providers/google/index.ts +0 -17
- package/src/providers/google/llm.ts +0 -203
- package/src/providers/google/transform.ts +0 -447
- package/src/providers/google/types.ts +0 -214
- package/src/providers/ollama/index.ts +0 -43
- package/src/providers/ollama/llm.ts +0 -272
- package/src/providers/ollama/transform.ts +0 -434
- package/src/providers/ollama/types.ts +0 -260
- package/src/providers/openai/index.ts +0 -186
- package/src/providers/openai/llm.completions.ts +0 -201
- package/src/providers/openai/llm.responses.ts +0 -211
- package/src/providers/openai/transform.completions.ts +0 -561
- package/src/providers/openai/transform.responses.ts +0 -708
- package/src/providers/openai/types.ts +0 -1249
- package/src/providers/openrouter/index.ts +0 -177
- package/src/providers/openrouter/llm.completions.ts +0 -201
- package/src/providers/openrouter/llm.responses.ts +0 -211
- package/src/providers/openrouter/transform.completions.ts +0 -538
- package/src/providers/openrouter/transform.responses.ts +0 -742
- package/src/providers/openrouter/types.ts +0 -717
- package/src/providers/xai/index.ts +0 -223
- package/src/providers/xai/llm.completions.ts +0 -201
- package/src/providers/xai/llm.messages.ts +0 -195
- package/src/providers/xai/llm.responses.ts +0 -211
- package/src/providers/xai/transform.completions.ts +0 -565
- package/src/providers/xai/transform.messages.ts +0 -448
- package/src/providers/xai/transform.responses.ts +0 -678
- package/src/providers/xai/types.ts +0 -938
- package/src/types/content.ts +0 -133
- package/src/types/errors.ts +0 -85
- package/src/types/index.ts +0 -105
- package/src/types/llm.ts +0 -211
- package/src/types/messages.ts +0 -205
- package/src/types/provider.ts +0 -195
- package/src/types/schema.ts +0 -58
- package/src/types/stream.ts +0 -188
- package/src/types/thread.ts +0 -226
- package/src/types/tool.ts +0 -88
- package/src/types/turn.ts +0 -118
- package/src/utils/id.ts +0 -28
- package/src/xai/index.ts +0 -41
|
@@ -1,186 +0,0 @@
|
|
|
1
|
-
import type {
|
|
2
|
-
Provider,
|
|
3
|
-
ModelReference,
|
|
4
|
-
LLMHandler,
|
|
5
|
-
LLMProvider,
|
|
6
|
-
} from '../../types/provider.ts';
|
|
7
|
-
import { createCompletionsLLMHandler } from './llm.completions.ts';
|
|
8
|
-
import { createResponsesLLMHandler } from './llm.responses.ts';
|
|
9
|
-
import type { OpenAICompletionsParams, OpenAIResponsesParams, OpenAIConfig } from './types.ts';
|
|
10
|
-
|
|
11
|
-
/** Union type for modalities interface */
|
|
12
|
-
type OpenAILLMParamsUnion = OpenAICompletionsParams | OpenAIResponsesParams;
|
|
13
|
-
|
|
14
|
-
/**
|
|
15
|
-
* OpenAI provider options
|
|
16
|
-
*/
|
|
17
|
-
export interface OpenAIProviderOptions {
|
|
18
|
-
/**
|
|
19
|
-
* Which API to use:
|
|
20
|
-
* - 'responses': Modern Responses API (default, recommended)
|
|
21
|
-
* - 'completions': Legacy Chat Completions API
|
|
22
|
-
*/
|
|
23
|
-
api?: 'responses' | 'completions';
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
/**
|
|
27
|
-
* OpenAI provider with configurable API mode
|
|
28
|
-
*
|
|
29
|
-
* @example
|
|
30
|
-
* // Using the modern Responses API (default)
|
|
31
|
-
* const model = openai('gpt-4o');
|
|
32
|
-
*
|
|
33
|
-
* @example
|
|
34
|
-
* // Using the legacy Chat Completions API
|
|
35
|
-
* const model = openai('gpt-4o', { api: 'completions' });
|
|
36
|
-
*
|
|
37
|
-
* @example
|
|
38
|
-
* // Explicit Responses API
|
|
39
|
-
* const model = openai('gpt-4o', { api: 'responses' });
|
|
40
|
-
*/
|
|
41
|
-
export interface OpenAIProvider extends Provider<OpenAIProviderOptions> {
|
|
42
|
-
/**
|
|
43
|
-
* Create a model reference
|
|
44
|
-
* @param modelId - The model identifier (e.g., 'gpt-4o', 'gpt-4-turbo', 'o1-preview')
|
|
45
|
-
* @param options - Provider options including API selection
|
|
46
|
-
*/
|
|
47
|
-
(modelId: string, options?: OpenAIProviderOptions): ModelReference<OpenAIProviderOptions>;
|
|
48
|
-
|
|
49
|
-
/** Provider name */
|
|
50
|
-
readonly name: 'openai';
|
|
51
|
-
|
|
52
|
-
/** Provider version */
|
|
53
|
-
readonly version: string;
|
|
54
|
-
|
|
55
|
-
/** Supported modalities */
|
|
56
|
-
readonly modalities: {
|
|
57
|
-
llm: LLMHandler<OpenAILLMParamsUnion>;
|
|
58
|
-
};
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
/**
|
|
62
|
-
* Create the OpenAI provider
|
|
63
|
-
*/
|
|
64
|
-
function createOpenAIProvider(): OpenAIProvider {
|
|
65
|
-
// Track which API mode is currently active for the modalities
|
|
66
|
-
let currentApiMode: 'responses' | 'completions' = 'responses';
|
|
67
|
-
|
|
68
|
-
// Create handlers eagerly so we can inject provider reference
|
|
69
|
-
const responsesHandler = createResponsesLLMHandler();
|
|
70
|
-
const completionsHandler = createCompletionsLLMHandler();
|
|
71
|
-
|
|
72
|
-
const fn = function (
|
|
73
|
-
modelId: string,
|
|
74
|
-
options?: OpenAIProviderOptions
|
|
75
|
-
): ModelReference<OpenAIProviderOptions> {
|
|
76
|
-
const apiMode = options?.api ?? 'responses';
|
|
77
|
-
currentApiMode = apiMode;
|
|
78
|
-
return { modelId, provider };
|
|
79
|
-
};
|
|
80
|
-
|
|
81
|
-
// Create a dynamic modalities object that returns the correct handler
|
|
82
|
-
const modalities = {
|
|
83
|
-
get llm(): LLMHandler<OpenAILLMParamsUnion> {
|
|
84
|
-
return currentApiMode === 'completions'
|
|
85
|
-
? (completionsHandler as unknown as LLMHandler<OpenAILLMParamsUnion>)
|
|
86
|
-
: (responsesHandler as unknown as LLMHandler<OpenAILLMParamsUnion>);
|
|
87
|
-
},
|
|
88
|
-
};
|
|
89
|
-
|
|
90
|
-
// Define properties
|
|
91
|
-
Object.defineProperties(fn, {
|
|
92
|
-
name: {
|
|
93
|
-
value: 'openai',
|
|
94
|
-
writable: false,
|
|
95
|
-
configurable: true,
|
|
96
|
-
},
|
|
97
|
-
version: {
|
|
98
|
-
value: '1.0.0',
|
|
99
|
-
writable: false,
|
|
100
|
-
configurable: true,
|
|
101
|
-
},
|
|
102
|
-
modalities: {
|
|
103
|
-
value: modalities,
|
|
104
|
-
writable: false,
|
|
105
|
-
configurable: true,
|
|
106
|
-
},
|
|
107
|
-
});
|
|
108
|
-
|
|
109
|
-
const provider = fn as OpenAIProvider;
|
|
110
|
-
|
|
111
|
-
// Inject provider reference into both handlers (spec compliance)
|
|
112
|
-
responsesHandler._setProvider?.(provider as unknown as LLMProvider<OpenAIResponsesParams>);
|
|
113
|
-
completionsHandler._setProvider?.(provider as unknown as LLMProvider<OpenAICompletionsParams>);
|
|
114
|
-
|
|
115
|
-
return provider;
|
|
116
|
-
}
|
|
117
|
-
|
|
118
|
-
/**
|
|
119
|
-
* OpenAI provider
|
|
120
|
-
*
|
|
121
|
-
* Supports both the modern Responses API (default) and legacy Chat Completions API.
|
|
122
|
-
*
|
|
123
|
-
* @example
|
|
124
|
-
* ```ts
|
|
125
|
-
* import { openai } from './providers/openai';
|
|
126
|
-
* import { llm } from './core/llm';
|
|
127
|
-
*
|
|
128
|
-
* // Using Responses API (default, modern, recommended)
|
|
129
|
-
* const model = llm({
|
|
130
|
-
* model: openai('gpt-4o'),
|
|
131
|
-
* params: { max_tokens: 1000 }
|
|
132
|
-
* });
|
|
133
|
-
*
|
|
134
|
-
* // Using Chat Completions API (legacy)
|
|
135
|
-
* const legacyModel = llm({
|
|
136
|
-
* model: openai('gpt-4o', { api: 'completions' }),
|
|
137
|
-
* params: { max_tokens: 1000 }
|
|
138
|
-
* });
|
|
139
|
-
*
|
|
140
|
-
* // Generate
|
|
141
|
-
* const turn = await model.generate('Hello!');
|
|
142
|
-
* console.log(turn.response.text);
|
|
143
|
-
* ```
|
|
144
|
-
*/
|
|
145
|
-
export const openai = createOpenAIProvider();
|
|
146
|
-
|
|
147
|
-
// Re-export types
|
|
148
|
-
export type {
|
|
149
|
-
OpenAICompletionsParams,
|
|
150
|
-
OpenAIResponsesParams,
|
|
151
|
-
OpenAIConfig,
|
|
152
|
-
OpenAIAPIMode,
|
|
153
|
-
OpenAIModelOptions,
|
|
154
|
-
OpenAIModelReference,
|
|
155
|
-
// Audio and web search types
|
|
156
|
-
OpenAIAudioConfig,
|
|
157
|
-
OpenAIWebSearchOptions,
|
|
158
|
-
OpenAIWebSearchUserLocation,
|
|
159
|
-
OpenAICompletionsWebSearchUserLocation,
|
|
160
|
-
// Built-in tool types
|
|
161
|
-
OpenAIBuiltInTool,
|
|
162
|
-
OpenAIWebSearchTool,
|
|
163
|
-
OpenAIFileSearchTool,
|
|
164
|
-
OpenAICodeInterpreterTool,
|
|
165
|
-
OpenAICodeInterpreterContainer,
|
|
166
|
-
OpenAIComputerTool,
|
|
167
|
-
OpenAIComputerEnvironment,
|
|
168
|
-
OpenAIImageGenerationTool,
|
|
169
|
-
OpenAIMcpTool,
|
|
170
|
-
OpenAIMcpServerConfig,
|
|
171
|
-
OpenAIResponsesToolUnion,
|
|
172
|
-
// Conversation and prompt types
|
|
173
|
-
OpenAIConversation,
|
|
174
|
-
OpenAIPromptTemplate,
|
|
175
|
-
} from './types.ts';
|
|
176
|
-
|
|
177
|
-
// Re-export tool helper constructors
|
|
178
|
-
export {
|
|
179
|
-
tools,
|
|
180
|
-
webSearchTool,
|
|
181
|
-
fileSearchTool,
|
|
182
|
-
codeInterpreterTool,
|
|
183
|
-
computerTool,
|
|
184
|
-
imageGenerationTool,
|
|
185
|
-
mcpTool,
|
|
186
|
-
} from './types.ts';
|
|
@@ -1,201 +0,0 @@
|
|
|
1
|
-
import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
|
|
2
|
-
import type { StreamEvent } from '../../types/stream.ts';
|
|
3
|
-
import type { LLMProvider } from '../../types/provider.ts';
|
|
4
|
-
import { UPPError } from '../../types/errors.ts';
|
|
5
|
-
import { resolveApiKey } from '../../http/keys.ts';
|
|
6
|
-
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
|
-
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
|
-
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
-
import type { OpenAICompletionsParams, OpenAICompletionsResponse, OpenAICompletionsStreamChunk } from './types.ts';
|
|
10
|
-
import {
|
|
11
|
-
transformRequest,
|
|
12
|
-
transformResponse,
|
|
13
|
-
transformStreamEvent,
|
|
14
|
-
createStreamState,
|
|
15
|
-
buildResponseFromState,
|
|
16
|
-
} from './transform.completions.ts';
|
|
17
|
-
|
|
18
|
-
const OPENAI_API_URL = 'https://api.openai.com/v1/chat/completions';
|
|
19
|
-
|
|
20
|
-
/**
|
|
21
|
-
* OpenAI API capabilities
|
|
22
|
-
*/
|
|
23
|
-
const OPENAI_CAPABILITIES: LLMCapabilities = {
|
|
24
|
-
streaming: true,
|
|
25
|
-
tools: true,
|
|
26
|
-
structuredOutput: true,
|
|
27
|
-
imageInput: true,
|
|
28
|
-
videoInput: false,
|
|
29
|
-
audioInput: false,
|
|
30
|
-
};
|
|
31
|
-
|
|
32
|
-
/**
|
|
33
|
-
* Create OpenAI Chat Completions LLM handler
|
|
34
|
-
*/
|
|
35
|
-
export function createCompletionsLLMHandler(): LLMHandler<OpenAICompletionsParams> {
|
|
36
|
-
// Provider reference injected by createProvider() or OpenAI's custom factory
|
|
37
|
-
let providerRef: LLMProvider<OpenAICompletionsParams> | null = null;
|
|
38
|
-
|
|
39
|
-
return {
|
|
40
|
-
_setProvider(provider: LLMProvider<OpenAICompletionsParams>) {
|
|
41
|
-
providerRef = provider;
|
|
42
|
-
},
|
|
43
|
-
|
|
44
|
-
bind(modelId: string): BoundLLMModel<OpenAICompletionsParams> {
|
|
45
|
-
// Use the injected provider reference
|
|
46
|
-
if (!providerRef) {
|
|
47
|
-
throw new UPPError(
|
|
48
|
-
'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
|
|
49
|
-
'INVALID_REQUEST',
|
|
50
|
-
'openai',
|
|
51
|
-
'llm'
|
|
52
|
-
);
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
const model: BoundLLMModel<OpenAICompletionsParams> = {
|
|
56
|
-
modelId,
|
|
57
|
-
capabilities: OPENAI_CAPABILITIES,
|
|
58
|
-
|
|
59
|
-
get provider(): LLMProvider<OpenAICompletionsParams> {
|
|
60
|
-
return providerRef!;
|
|
61
|
-
},
|
|
62
|
-
|
|
63
|
-
async complete(request: LLMRequest<OpenAICompletionsParams>): Promise<LLMResponse> {
|
|
64
|
-
const apiKey = await resolveApiKey(
|
|
65
|
-
request.config,
|
|
66
|
-
'OPENAI_API_KEY',
|
|
67
|
-
'openai',
|
|
68
|
-
'llm'
|
|
69
|
-
);
|
|
70
|
-
|
|
71
|
-
const baseUrl = request.config.baseUrl ?? OPENAI_API_URL;
|
|
72
|
-
const body = transformRequest(request, modelId);
|
|
73
|
-
|
|
74
|
-
const response = await doFetch(
|
|
75
|
-
baseUrl,
|
|
76
|
-
{
|
|
77
|
-
method: 'POST',
|
|
78
|
-
headers: {
|
|
79
|
-
'Content-Type': 'application/json',
|
|
80
|
-
Authorization: `Bearer ${apiKey}`,
|
|
81
|
-
},
|
|
82
|
-
body: JSON.stringify(body),
|
|
83
|
-
signal: request.signal,
|
|
84
|
-
},
|
|
85
|
-
request.config,
|
|
86
|
-
'openai',
|
|
87
|
-
'llm'
|
|
88
|
-
);
|
|
89
|
-
|
|
90
|
-
const data = (await response.json()) as OpenAICompletionsResponse;
|
|
91
|
-
return transformResponse(data);
|
|
92
|
-
},
|
|
93
|
-
|
|
94
|
-
stream(request: LLMRequest<OpenAICompletionsParams>): LLMStreamResult {
|
|
95
|
-
const state = createStreamState();
|
|
96
|
-
let responseResolve: (value: LLMResponse) => void;
|
|
97
|
-
let responseReject: (error: Error) => void;
|
|
98
|
-
|
|
99
|
-
const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
|
|
100
|
-
responseResolve = resolve;
|
|
101
|
-
responseReject = reject;
|
|
102
|
-
});
|
|
103
|
-
|
|
104
|
-
async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
|
|
105
|
-
try {
|
|
106
|
-
const apiKey = await resolveApiKey(
|
|
107
|
-
request.config,
|
|
108
|
-
'OPENAI_API_KEY',
|
|
109
|
-
'openai',
|
|
110
|
-
'llm'
|
|
111
|
-
);
|
|
112
|
-
|
|
113
|
-
const baseUrl = request.config.baseUrl ?? OPENAI_API_URL;
|
|
114
|
-
const body = transformRequest(request, modelId);
|
|
115
|
-
body.stream = true;
|
|
116
|
-
body.stream_options = { include_usage: true };
|
|
117
|
-
|
|
118
|
-
const response = await doStreamFetch(
|
|
119
|
-
baseUrl,
|
|
120
|
-
{
|
|
121
|
-
method: 'POST',
|
|
122
|
-
headers: {
|
|
123
|
-
'Content-Type': 'application/json',
|
|
124
|
-
Authorization: `Bearer ${apiKey}`,
|
|
125
|
-
},
|
|
126
|
-
body: JSON.stringify(body),
|
|
127
|
-
signal: request.signal,
|
|
128
|
-
},
|
|
129
|
-
request.config,
|
|
130
|
-
'openai',
|
|
131
|
-
'llm'
|
|
132
|
-
);
|
|
133
|
-
|
|
134
|
-
if (!response.ok) {
|
|
135
|
-
const error = await normalizeHttpError(response, 'openai', 'llm');
|
|
136
|
-
responseReject(error);
|
|
137
|
-
throw error;
|
|
138
|
-
}
|
|
139
|
-
|
|
140
|
-
if (!response.body) {
|
|
141
|
-
const error = new UPPError(
|
|
142
|
-
'No response body for streaming request',
|
|
143
|
-
'PROVIDER_ERROR',
|
|
144
|
-
'openai',
|
|
145
|
-
'llm'
|
|
146
|
-
);
|
|
147
|
-
responseReject(error);
|
|
148
|
-
throw error;
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
for await (const data of parseSSEStream(response.body)) {
|
|
152
|
-
// Skip [DONE] marker
|
|
153
|
-
if (data === '[DONE]') {
|
|
154
|
-
continue;
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
// Check for OpenAI error event
|
|
158
|
-
if (typeof data === 'object' && data !== null) {
|
|
159
|
-
const chunk = data as OpenAICompletionsStreamChunk;
|
|
160
|
-
|
|
161
|
-
// Check for error in chunk
|
|
162
|
-
if ('error' in chunk && chunk.error) {
|
|
163
|
-
const errorData = chunk.error as { message?: string; type?: string };
|
|
164
|
-
const error = new UPPError(
|
|
165
|
-
errorData.message ?? 'Unknown error',
|
|
166
|
-
'PROVIDER_ERROR',
|
|
167
|
-
'openai',
|
|
168
|
-
'llm'
|
|
169
|
-
);
|
|
170
|
-
responseReject(error);
|
|
171
|
-
throw error;
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
const uppEvents = transformStreamEvent(chunk, state);
|
|
175
|
-
for (const event of uppEvents) {
|
|
176
|
-
yield event;
|
|
177
|
-
}
|
|
178
|
-
}
|
|
179
|
-
}
|
|
180
|
-
|
|
181
|
-
// Build final response
|
|
182
|
-
responseResolve(buildResponseFromState(state));
|
|
183
|
-
} catch (error) {
|
|
184
|
-
responseReject(error as Error);
|
|
185
|
-
throw error;
|
|
186
|
-
}
|
|
187
|
-
}
|
|
188
|
-
|
|
189
|
-
return {
|
|
190
|
-
[Symbol.asyncIterator]() {
|
|
191
|
-
return generateEvents();
|
|
192
|
-
},
|
|
193
|
-
response: responsePromise,
|
|
194
|
-
};
|
|
195
|
-
},
|
|
196
|
-
};
|
|
197
|
-
|
|
198
|
-
return model;
|
|
199
|
-
},
|
|
200
|
-
};
|
|
201
|
-
}
|
|
@@ -1,211 +0,0 @@
|
|
|
1
|
-
import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
|
|
2
|
-
import type { StreamEvent } from '../../types/stream.ts';
|
|
3
|
-
import type { LLMProvider } from '../../types/provider.ts';
|
|
4
|
-
import { UPPError } from '../../types/errors.ts';
|
|
5
|
-
import { resolveApiKey } from '../../http/keys.ts';
|
|
6
|
-
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
|
-
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
|
-
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
-
import type { OpenAIResponsesParams, OpenAIResponsesResponse, OpenAIResponsesStreamEvent, OpenAIResponseErrorEvent } from './types.ts';
|
|
10
|
-
import {
|
|
11
|
-
transformRequest,
|
|
12
|
-
transformResponse,
|
|
13
|
-
transformStreamEvent,
|
|
14
|
-
createStreamState,
|
|
15
|
-
buildResponseFromState,
|
|
16
|
-
} from './transform.responses.ts';
|
|
17
|
-
|
|
18
|
-
const OPENAI_RESPONSES_API_URL = 'https://api.openai.com/v1/responses';
|
|
19
|
-
|
|
20
|
-
/**
|
|
21
|
-
* OpenAI API capabilities
|
|
22
|
-
*/
|
|
23
|
-
const OPENAI_CAPABILITIES: LLMCapabilities = {
|
|
24
|
-
streaming: true,
|
|
25
|
-
tools: true,
|
|
26
|
-
structuredOutput: true,
|
|
27
|
-
imageInput: true,
|
|
28
|
-
videoInput: false,
|
|
29
|
-
audioInput: false,
|
|
30
|
-
};
|
|
31
|
-
|
|
32
|
-
/**
|
|
33
|
-
* Create OpenAI Responses API LLM handler
|
|
34
|
-
*/
|
|
35
|
-
export function createResponsesLLMHandler(): LLMHandler<OpenAIResponsesParams> {
|
|
36
|
-
// Provider reference injected by createProvider() or OpenAI's custom factory
|
|
37
|
-
let providerRef: LLMProvider<OpenAIResponsesParams> | null = null;
|
|
38
|
-
|
|
39
|
-
return {
|
|
40
|
-
_setProvider(provider: LLMProvider<OpenAIResponsesParams>) {
|
|
41
|
-
providerRef = provider;
|
|
42
|
-
},
|
|
43
|
-
|
|
44
|
-
bind(modelId: string): BoundLLMModel<OpenAIResponsesParams> {
|
|
45
|
-
// Use the injected provider reference
|
|
46
|
-
if (!providerRef) {
|
|
47
|
-
throw new UPPError(
|
|
48
|
-
'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
|
|
49
|
-
'INVALID_REQUEST',
|
|
50
|
-
'openai',
|
|
51
|
-
'llm'
|
|
52
|
-
);
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
const model: BoundLLMModel<OpenAIResponsesParams> = {
|
|
56
|
-
modelId,
|
|
57
|
-
capabilities: OPENAI_CAPABILITIES,
|
|
58
|
-
|
|
59
|
-
get provider(): LLMProvider<OpenAIResponsesParams> {
|
|
60
|
-
return providerRef!;
|
|
61
|
-
},
|
|
62
|
-
|
|
63
|
-
async complete(request: LLMRequest<OpenAIResponsesParams>): Promise<LLMResponse> {
|
|
64
|
-
const apiKey = await resolveApiKey(
|
|
65
|
-
request.config,
|
|
66
|
-
'OPENAI_API_KEY',
|
|
67
|
-
'openai',
|
|
68
|
-
'llm'
|
|
69
|
-
);
|
|
70
|
-
|
|
71
|
-
const baseUrl = request.config.baseUrl ?? OPENAI_RESPONSES_API_URL;
|
|
72
|
-
const body = transformRequest(request, modelId);
|
|
73
|
-
|
|
74
|
-
const response = await doFetch(
|
|
75
|
-
baseUrl,
|
|
76
|
-
{
|
|
77
|
-
method: 'POST',
|
|
78
|
-
headers: {
|
|
79
|
-
'Content-Type': 'application/json',
|
|
80
|
-
Authorization: `Bearer ${apiKey}`,
|
|
81
|
-
},
|
|
82
|
-
body: JSON.stringify(body),
|
|
83
|
-
signal: request.signal,
|
|
84
|
-
},
|
|
85
|
-
request.config,
|
|
86
|
-
'openai',
|
|
87
|
-
'llm'
|
|
88
|
-
);
|
|
89
|
-
|
|
90
|
-
const data = (await response.json()) as OpenAIResponsesResponse;
|
|
91
|
-
|
|
92
|
-
// Check for error in response
|
|
93
|
-
if (data.status === 'failed' && data.error) {
|
|
94
|
-
throw new UPPError(
|
|
95
|
-
data.error.message,
|
|
96
|
-
'PROVIDER_ERROR',
|
|
97
|
-
'openai',
|
|
98
|
-
'llm'
|
|
99
|
-
);
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
return transformResponse(data);
|
|
103
|
-
},
|
|
104
|
-
|
|
105
|
-
stream(request: LLMRequest<OpenAIResponsesParams>): LLMStreamResult {
|
|
106
|
-
const state = createStreamState();
|
|
107
|
-
let responseResolve: (value: LLMResponse) => void;
|
|
108
|
-
let responseReject: (error: Error) => void;
|
|
109
|
-
|
|
110
|
-
const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
|
|
111
|
-
responseResolve = resolve;
|
|
112
|
-
responseReject = reject;
|
|
113
|
-
});
|
|
114
|
-
|
|
115
|
-
async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
|
|
116
|
-
try {
|
|
117
|
-
const apiKey = await resolveApiKey(
|
|
118
|
-
request.config,
|
|
119
|
-
'OPENAI_API_KEY',
|
|
120
|
-
'openai',
|
|
121
|
-
'llm'
|
|
122
|
-
);
|
|
123
|
-
|
|
124
|
-
const baseUrl = request.config.baseUrl ?? OPENAI_RESPONSES_API_URL;
|
|
125
|
-
const body = transformRequest(request, modelId);
|
|
126
|
-
body.stream = true;
|
|
127
|
-
|
|
128
|
-
const response = await doStreamFetch(
|
|
129
|
-
baseUrl,
|
|
130
|
-
{
|
|
131
|
-
method: 'POST',
|
|
132
|
-
headers: {
|
|
133
|
-
'Content-Type': 'application/json',
|
|
134
|
-
Authorization: `Bearer ${apiKey}`,
|
|
135
|
-
},
|
|
136
|
-
body: JSON.stringify(body),
|
|
137
|
-
signal: request.signal,
|
|
138
|
-
},
|
|
139
|
-
request.config,
|
|
140
|
-
'openai',
|
|
141
|
-
'llm'
|
|
142
|
-
);
|
|
143
|
-
|
|
144
|
-
if (!response.ok) {
|
|
145
|
-
const error = await normalizeHttpError(response, 'openai', 'llm');
|
|
146
|
-
responseReject(error);
|
|
147
|
-
throw error;
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
if (!response.body) {
|
|
151
|
-
const error = new UPPError(
|
|
152
|
-
'No response body for streaming request',
|
|
153
|
-
'PROVIDER_ERROR',
|
|
154
|
-
'openai',
|
|
155
|
-
'llm'
|
|
156
|
-
);
|
|
157
|
-
responseReject(error);
|
|
158
|
-
throw error;
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
for await (const data of parseSSEStream(response.body)) {
|
|
162
|
-
// Skip [DONE] marker
|
|
163
|
-
if (data === '[DONE]') {
|
|
164
|
-
continue;
|
|
165
|
-
}
|
|
166
|
-
|
|
167
|
-
// Check for OpenAI error event
|
|
168
|
-
if (typeof data === 'object' && data !== null) {
|
|
169
|
-
const event = data as OpenAIResponsesStreamEvent;
|
|
170
|
-
|
|
171
|
-
// Check for error event
|
|
172
|
-
if (event.type === 'error') {
|
|
173
|
-
const errorEvent = event as OpenAIResponseErrorEvent;
|
|
174
|
-
const error = new UPPError(
|
|
175
|
-
errorEvent.error.message,
|
|
176
|
-
'PROVIDER_ERROR',
|
|
177
|
-
'openai',
|
|
178
|
-
'llm'
|
|
179
|
-
);
|
|
180
|
-
responseReject(error);
|
|
181
|
-
throw error;
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
const uppEvents = transformStreamEvent(event, state);
|
|
185
|
-
for (const uppEvent of uppEvents) {
|
|
186
|
-
yield uppEvent;
|
|
187
|
-
}
|
|
188
|
-
}
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
// Build final response
|
|
192
|
-
responseResolve(buildResponseFromState(state));
|
|
193
|
-
} catch (error) {
|
|
194
|
-
responseReject(error as Error);
|
|
195
|
-
throw error;
|
|
196
|
-
}
|
|
197
|
-
}
|
|
198
|
-
|
|
199
|
-
return {
|
|
200
|
-
[Symbol.asyncIterator]() {
|
|
201
|
-
return generateEvents();
|
|
202
|
-
},
|
|
203
|
-
response: responsePromise,
|
|
204
|
-
};
|
|
205
|
-
},
|
|
206
|
-
};
|
|
207
|
-
|
|
208
|
-
return model;
|
|
209
|
-
},
|
|
210
|
-
};
|
|
211
|
-
}
|