@amux.ai/adapter-openai 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,390 @@
1
+ import { LLMAdapter, ToolCall, JSONSchema } from '@amux.ai/llm-bridge';
2
+
3
+ /**
4
+ * OpenAI adapter implementation
5
+ * This adapter handles conversion between OpenAI API format and IR
6
+ */
7
+ declare const openaiAdapter: LLMAdapter;
8
+
9
+ /**
10
+ * OpenAI Responses API adapter implementation
11
+ * This adapter uses the new /v1/responses endpoint
12
+ *
13
+ * The Responses API is OpenAI's newer API that supports:
14
+ * - Built-in tools (web search, code interpreter, file search)
15
+ * - Agents and multi-turn conversations with state
16
+ * - Enhanced streaming capabilities
17
+ * - Reasoning models with thinking process
18
+ * - Stateful context management via previous_response_id
19
+ *
20
+ * Key differences from Chat Completions API:
21
+ * - Uses `input` instead of `messages`
22
+ * - Uses `instructions` instead of system message
23
+ * - Uses `max_output_tokens` instead of `max_tokens`
24
+ * - Different response structure with `output` array
25
+ * - Different streaming event format
26
+ * - Uses `text.format` for JSON mode instead of `response_format`
27
+ * - Supports `previous_response_id` for multi-turn state management
28
+ */
29
+ declare const openaiResponsesAdapter: LLMAdapter;
30
+
31
+ /**
32
+ * OpenAI message content part types
33
+ */
34
+ type OpenAIContentPart = {
35
+ type: 'text';
36
+ text: string;
37
+ } | {
38
+ type: 'image_url';
39
+ image_url: {
40
+ url: string;
41
+ detail?: 'auto' | 'low' | 'high';
42
+ };
43
+ };
44
+ /**
45
+ * OpenAI message format
46
+ */
47
+ interface OpenAIMessage {
48
+ role: 'system' | 'user' | 'assistant' | 'tool';
49
+ content?: string | OpenAIContentPart[] | null;
50
+ name?: string;
51
+ tool_calls?: ToolCall[];
52
+ tool_call_id?: string;
53
+ }
54
+ /**
55
+ * OpenAI tool format
56
+ */
57
+ interface OpenAITool {
58
+ type: 'function';
59
+ function: {
60
+ name: string;
61
+ description?: string;
62
+ parameters?: JSONSchema;
63
+ strict?: boolean;
64
+ };
65
+ }
66
+ /**
67
+ * OpenAI response format configuration
68
+ */
69
+ interface OpenAIResponseFormat {
70
+ type: 'text' | 'json_object' | 'json_schema';
71
+ json_schema?: {
72
+ name: string;
73
+ description?: string;
74
+ schema: Record<string, unknown>;
75
+ strict?: boolean;
76
+ };
77
+ }
78
+ /**
79
+ * OpenAI stream options
80
+ */
81
+ interface OpenAIStreamOptions {
82
+ include_usage?: boolean;
83
+ }
84
+ /**
85
+ * OpenAI Chat Completions API request format
86
+ */
87
+ interface OpenAIRequest {
88
+ model: string;
89
+ messages: OpenAIMessage[];
90
+ tools?: OpenAITool[];
91
+ tool_choice?: 'auto' | 'none' | 'required' | {
92
+ type: 'function';
93
+ function: {
94
+ name: string;
95
+ };
96
+ };
97
+ stream?: boolean;
98
+ stream_options?: OpenAIStreamOptions;
99
+ temperature?: number;
100
+ top_p?: number;
101
+ max_tokens?: number;
102
+ max_completion_tokens?: number;
103
+ stop?: string | string[];
104
+ presence_penalty?: number;
105
+ frequency_penalty?: number;
106
+ n?: number;
107
+ seed?: number;
108
+ user?: string;
109
+ response_format?: OpenAIResponseFormat;
110
+ logprobs?: boolean;
111
+ top_logprobs?: number;
112
+ }
113
+ /**
114
+ * OpenAI response usage
115
+ */
116
+ interface OpenAIUsage {
117
+ prompt_tokens: number;
118
+ completion_tokens: number;
119
+ total_tokens: number;
120
+ completion_tokens_details?: {
121
+ reasoning_tokens?: number;
122
+ };
123
+ }
124
+ /**
125
+ * OpenAI Chat Completions API response format
126
+ */
127
+ interface OpenAIResponse {
128
+ id: string;
129
+ object: string;
130
+ created: number;
131
+ model: string;
132
+ system_fingerprint?: string;
133
+ choices: Array<{
134
+ index: number;
135
+ message: {
136
+ role: string;
137
+ content: string | null;
138
+ tool_calls?: ToolCall[];
139
+ };
140
+ finish_reason: string;
141
+ logprobs?: {
142
+ content?: Array<{
143
+ token: string;
144
+ logprob: number;
145
+ top_logprobs?: Array<{
146
+ token: string;
147
+ logprob: number;
148
+ }>;
149
+ }>;
150
+ };
151
+ }>;
152
+ usage?: OpenAIUsage;
153
+ }
154
+ /**
155
+ * OpenAI stream chunk format
156
+ */
157
+ interface OpenAIStreamChunk {
158
+ id: string;
159
+ object: string;
160
+ created: number;
161
+ model: string;
162
+ system_fingerprint?: string;
163
+ choices: Array<{
164
+ index: number;
165
+ delta: {
166
+ role?: string;
167
+ content?: string;
168
+ tool_calls?: Array<{
169
+ index: number;
170
+ id?: string;
171
+ type?: string;
172
+ function?: {
173
+ name?: string;
174
+ arguments?: string;
175
+ };
176
+ }>;
177
+ };
178
+ finish_reason?: string | null;
179
+ logprobs?: {
180
+ content?: Array<{
181
+ token: string;
182
+ logprob: number;
183
+ top_logprobs?: Array<{
184
+ token: string;
185
+ logprob: number;
186
+ }>;
187
+ }>;
188
+ };
189
+ }>;
190
+ usage?: OpenAIUsage;
191
+ }
192
+ /**
193
+ * OpenAI error format
194
+ */
195
+ interface OpenAIError {
196
+ error: {
197
+ message: string;
198
+ type: string;
199
+ param?: string;
200
+ code?: string;
201
+ };
202
+ }
203
+ /**
204
+ * Responses API input content item
205
+ * Supports both explicit type format and shorthand format
206
+ */
207
+ type ResponsesInputItem = {
208
+ type: 'message';
209
+ role: 'user' | 'assistant' | 'system' | 'developer';
210
+ content: string | ResponsesContentPart[];
211
+ } | {
212
+ type: 'item_reference';
213
+ id: string;
214
+ } | {
215
+ role: 'user' | 'assistant' | 'system' | 'developer';
216
+ content: string | ResponsesContentPart[];
217
+ type?: undefined;
218
+ };
219
+ /**
220
+ * Responses API content part
221
+ */
222
+ type ResponsesContentPart = {
223
+ type: 'input_text';
224
+ text: string;
225
+ } | {
226
+ type: 'input_image';
227
+ image_url: string;
228
+ detail?: 'auto' | 'low' | 'high';
229
+ } | {
230
+ type: 'input_file';
231
+ file_id: string;
232
+ };
233
+ /**
234
+ * Responses API tool definition
235
+ */
236
+ type ResponsesTool = {
237
+ type: 'function';
238
+ name: string;
239
+ description?: string;
240
+ parameters?: JSONSchema;
241
+ strict?: boolean;
242
+ } | {
243
+ type: 'web_search_preview';
244
+ search_context_size?: 'low' | 'medium' | 'high';
245
+ } | {
246
+ type: 'file_search';
247
+ vector_store_ids: string[];
248
+ max_num_results?: number;
249
+ } | {
250
+ type: 'code_interpreter';
251
+ };
252
+ /**
253
+ * Responses API text format configuration
254
+ */
255
+ interface ResponsesTextFormat {
256
+ format?: {
257
+ type: 'text' | 'json_object' | 'json_schema';
258
+ json_schema?: {
259
+ name: string;
260
+ description?: string;
261
+ schema: Record<string, unknown>;
262
+ strict?: boolean;
263
+ };
264
+ };
265
+ }
266
+ /**
267
+ * Responses API request format
268
+ */
269
+ interface ResponsesRequest {
270
+ model: string;
271
+ input: string | ResponsesInputItem[];
272
+ instructions?: string;
273
+ tools?: ResponsesTool[];
274
+ tool_choice?: 'auto' | 'none' | 'required' | {
275
+ type: 'function';
276
+ name: string;
277
+ };
278
+ parallel_tool_calls?: boolean;
279
+ stream?: boolean;
280
+ temperature?: number;
281
+ top_p?: number;
282
+ max_output_tokens?: number;
283
+ truncation?: 'auto' | 'disabled';
284
+ metadata?: Record<string, string>;
285
+ store?: boolean;
286
+ reasoning?: {
287
+ effort?: 'low' | 'medium' | 'high';
288
+ summary?: 'auto' | 'concise' | 'detailed';
289
+ };
290
+ text?: ResponsesTextFormat;
291
+ previous_response_id?: string;
292
+ user?: string;
293
+ }
294
+ /**
295
+ * Responses API output item
296
+ */
297
+ type ResponsesOutputItem = {
298
+ type: 'message';
299
+ id: string;
300
+ role: 'assistant';
301
+ content: ResponsesOutputContent[];
302
+ status: 'completed' | 'incomplete';
303
+ } | {
304
+ type: 'function_call';
305
+ id: string;
306
+ call_id: string;
307
+ name: string;
308
+ arguments: string;
309
+ status: 'completed' | 'incomplete';
310
+ } | {
311
+ type: 'function_call_output';
312
+ id: string;
313
+ call_id: string;
314
+ output: string;
315
+ } | {
316
+ type: 'web_search_call';
317
+ id: string;
318
+ status: 'completed' | 'searching' | 'incomplete';
319
+ } | {
320
+ type: 'reasoning';
321
+ id: string;
322
+ content: Array<{
323
+ type: 'reasoning_text';
324
+ text: string;
325
+ }>;
326
+ };
327
+ /**
328
+ * Responses API output content
329
+ */
330
+ type ResponsesOutputContent = {
331
+ type: 'output_text';
332
+ text: string;
333
+ annotations?: unknown[];
334
+ logprobs?: unknown[];
335
+ } | {
336
+ type: 'refusal';
337
+ refusal: string;
338
+ };
339
+ /**
340
+ * Responses API response format
341
+ */
342
+ interface ResponsesResponse {
343
+ id: string;
344
+ object: 'response';
345
+ created_at: number;
346
+ model: string;
347
+ status: 'completed' | 'failed' | 'incomplete' | 'in_progress';
348
+ output: ResponsesOutputItem[];
349
+ output_text?: string;
350
+ usage?: {
351
+ input_tokens: number;
352
+ input_tokens_details?: {
353
+ cached_tokens?: number;
354
+ };
355
+ output_tokens: number;
356
+ output_tokens_details?: {
357
+ reasoning_tokens?: number;
358
+ };
359
+ total_tokens: number;
360
+ };
361
+ error?: {
362
+ type?: string;
363
+ code: string;
364
+ message: string;
365
+ param?: string;
366
+ };
367
+ incomplete_details?: {
368
+ reason: string;
369
+ };
370
+ }
371
+ /**
372
+ * Responses API stream event
373
+ */
374
+ interface ResponsesStreamEvent {
375
+ type: 'response.created' | 'response.in_progress' | 'response.completed' | 'response.failed' | 'response.incomplete' | 'response.output_item.added' | 'response.output_item.done' | 'response.content_part.added' | 'response.content_part.done' | 'response.output_text.delta' | 'response.output_text.done' | 'response.function_call_arguments.delta' | 'response.function_call_arguments.done' | 'response.reasoning_summary_text.delta' | 'response.reasoning_summary_text.done' | 'error';
376
+ response?: ResponsesResponse;
377
+ output_index?: number;
378
+ content_index?: number;
379
+ item?: ResponsesOutputItem;
380
+ part?: ResponsesOutputContent;
381
+ delta?: string;
382
+ text?: string;
383
+ error?: {
384
+ type: string;
385
+ code: string;
386
+ message: string;
387
+ };
388
+ }
389
+
390
+ export { type OpenAIContentPart, type OpenAIError, type OpenAIMessage, type OpenAIRequest, type OpenAIResponse, type OpenAIResponseFormat, type OpenAIStreamChunk, type OpenAIStreamOptions, type OpenAITool, type OpenAIUsage, type ResponsesContentPart, type ResponsesInputItem, type ResponsesOutputContent, type ResponsesOutputItem, type ResponsesRequest, type ResponsesResponse, type ResponsesStreamEvent, type ResponsesTextFormat, type ResponsesTool, openaiAdapter, openaiResponsesAdapter };