@providerprotocol/ai 0.0.7 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,7 +6,7 @@ import {
6
6
  isAssistantMessage,
7
7
  isToolResultMessage,
8
8
  isUserMessage
9
- } from "../chunk-QUUX4G7U.js";
9
+ } from "../chunk-W4BB4BG2.js";
10
10
  import {
11
11
  parseSSEStream
12
12
  } from "../chunk-X5G4EHL7.js";
@@ -30,6 +30,24 @@ var Message = class {
30
30
  get text() {
31
31
  return this.getContent().filter((block) => block.type === "text").map((block) => block.text).join("\n\n");
32
32
  }
33
+ /**
34
+ * Convenience accessor for image content blocks
35
+ */
36
+ get images() {
37
+ return this.getContent().filter((block) => block.type === "image");
38
+ }
39
+ /**
40
+ * Convenience accessor for audio content blocks
41
+ */
42
+ get audio() {
43
+ return this.getContent().filter((block) => block.type === "audio");
44
+ }
45
+ /**
46
+ * Convenience accessor for video content blocks
47
+ */
48
+ get video() {
49
+ return this.getContent().filter((block) => block.type === "video");
50
+ }
33
51
  };
34
52
  var UserMessage = class extends Message {
35
53
  type = "user";
@@ -114,4 +132,4 @@ export {
114
132
  isAssistantMessage,
115
133
  isToolResultMessage
116
134
  };
117
- //# sourceMappingURL=chunk-QUUX4G7U.js.map
135
+ //# sourceMappingURL=chunk-W4BB4BG2.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/utils/id.ts","../src/types/messages.ts"],"sourcesContent":["/**\n * Generate a unique ID\n * Uses crypto.randomUUID if available, falls back to a simple implementation\n */\nexport function generateId(): string {\n if (typeof crypto !== 'undefined' && crypto.randomUUID) {\n return crypto.randomUUID();\n }\n\n // Fallback for environments without crypto.randomUUID\n return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {\n const r = (Math.random() * 16) | 0;\n const v = c === 'x' ? r : (r & 0x3) | 0x8;\n return v.toString(16);\n });\n}\n\n/**\n * Generate a short ID (for tool call IDs, etc.)\n */\nexport function generateShortId(prefix = ''): string {\n const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';\n let result = prefix;\n for (let i = 0; i < 12; i++) {\n result += chars.charAt(Math.floor(Math.random() * chars.length));\n }\n return result;\n}\n","import { generateId } from '../utils/id.ts';\nimport type {\n ContentBlock,\n TextBlock,\n ImageBlock,\n AudioBlock,\n VideoBlock,\n UserContent,\n AssistantContent,\n} from './content.ts';\nimport type { ToolCall, ToolResult } from './tool.ts';\n\n/**\n * Message type discriminator\n */\nexport type MessageType = 'user' | 'assistant' | 'tool_result';\n\n/**\n * Provider-namespaced metadata\n * Each provider uses its own namespace\n */\nexport interface MessageMetadata {\n [provider: string]: Record<string, unknown> | undefined;\n}\n\n/**\n * Options for message construction\n */\nexport interface MessageOptions {\n id?: string;\n metadata?: MessageMetadata;\n}\n\n/**\n * Base message class\n * All messages inherit from this\n */\nexport abstract class Message {\n /** Unique message identifier */\n readonly id: string;\n\n /** Timestamp */\n readonly timestamp: Date;\n\n /** Provider-specific metadata, namespaced by provider */\n readonly metadata?: MessageMetadata;\n\n /** Message type discriminator */\n abstract readonly type: MessageType;\n\n /** Raw content - implemented by subclasses */\n protected abstract getContent(): ContentBlock[];\n\n constructor(options?: MessageOptions) {\n this.id = options?.id ?? generateId();\n this.timestamp = new Date();\n this.metadata = options?.metadata;\n }\n\n /**\n * Convenience accessor for text content\n * Concatenates all text blocks with '\\n\\n'\n */\n get text(): string {\n return this.getContent()\n .filter((block): block is TextBlock => block.type === 'text')\n .map((block) => block.text)\n .join('\\n\\n');\n }\n\n /**\n * Convenience accessor for image content blocks\n */\n get images(): ImageBlock[] {\n return this.getContent().filter((block): block is ImageBlock => block.type === 'image');\n }\n\n /**\n * Convenience accessor for audio content blocks\n */\n get audio(): AudioBlock[] {\n return this.getContent().filter((block): block is AudioBlock => block.type === 'audio');\n }\n\n /**\n * Convenience accessor for video content blocks\n */\n get video(): VideoBlock[] {\n return this.getContent().filter((block): block is VideoBlock => block.type === 'video');\n }\n}\n\n/**\n * User input message\n */\nexport class UserMessage extends Message {\n readonly type = 'user' as const;\n readonly content: UserContent[];\n\n /**\n * @param content - String (converted to TextBlock) or array of content blocks\n */\n constructor(content: string | UserContent[], options?: MessageOptions) {\n super(options);\n if (typeof content === 'string') {\n this.content = [{ type: 'text', text: content }];\n } else {\n this.content = content;\n }\n }\n\n protected getContent(): ContentBlock[] {\n return this.content;\n }\n}\n\n/**\n * Assistant response message\n * May contain text, media, and/or tool calls\n */\nexport class AssistantMessage extends Message {\n readonly type = 'assistant' as const;\n readonly content: AssistantContent[];\n\n /** Tool calls requested by the model (if any) */\n readonly toolCalls?: ToolCall[];\n\n /**\n * @param content - String (converted to TextBlock) or array of content blocks\n * @param toolCalls - Tool calls requested by the model\n * @param options - Message ID and metadata\n */\n constructor(\n content: string | AssistantContent[],\n toolCalls?: ToolCall[],\n options?: MessageOptions\n ) {\n super(options);\n if (typeof content === 'string') {\n this.content = [{ type: 'text', text: content }];\n } else {\n this.content = content;\n }\n this.toolCalls = toolCalls;\n }\n\n protected getContent(): ContentBlock[] {\n return this.content;\n }\n\n /** Check if this message requests tool execution */\n get hasToolCalls(): boolean {\n return this.toolCalls !== undefined && this.toolCalls.length > 0;\n }\n}\n\n/**\n * Result of tool execution (sent back to model)\n */\nexport class ToolResultMessage extends Message {\n readonly type = 'tool_result' as const;\n readonly results: ToolResult[];\n\n /**\n * @param results - Array of tool execution results\n * @param options - Message ID and metadata\n */\n constructor(results: ToolResult[], options?: MessageOptions) {\n super(options);\n this.results = results;\n }\n\n protected getContent(): ContentBlock[] {\n // Tool results don't have traditional content blocks\n // Return text representations of results\n return this.results.map((result) => ({\n type: 'text' as const,\n text:\n typeof result.result === 'string'\n ? result.result\n : JSON.stringify(result.result),\n }));\n }\n}\n\n/**\n * Type guard for UserMessage\n */\nexport function isUserMessage(msg: Message): msg is UserMessage {\n return msg.type === 'user';\n}\n\n/**\n * Type guard for AssistantMessage\n */\nexport function isAssistantMessage(msg: Message): msg is AssistantMessage {\n return msg.type === 'assistant';\n}\n\n/**\n * Type guard for ToolResultMessage\n */\nexport function isToolResultMessage(msg: Message): msg is ToolResultMessage {\n return msg.type === 'tool_result';\n}\n"],"mappings":";AAIO,SAAS,aAAqB;AACnC,MAAI,OAAO,WAAW,eAAe,OAAO,YAAY;AACtD,WAAO,OAAO,WAAW;AAAA,EAC3B;AAGA,SAAO,uCAAuC,QAAQ,SAAS,CAAC,MAAM;AACpE,UAAM,IAAK,KAAK,OAAO,IAAI,KAAM;AACjC,UAAM,IAAI,MAAM,MAAM,IAAK,IAAI,IAAO;AACtC,WAAO,EAAE,SAAS,EAAE;AAAA,EACtB,CAAC;AACH;;;ACsBO,IAAe,UAAf,MAAuB;AAAA;AAAA,EAEnB;AAAA;AAAA,EAGA;AAAA;AAAA,EAGA;AAAA,EAQT,YAAY,SAA0B;AACpC,SAAK,KAAK,SAAS,MAAM,WAAW;AACpC,SAAK,YAAY,oBAAI,KAAK;AAC1B,SAAK,WAAW,SAAS;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,IAAI,OAAe;AACjB,WAAO,KAAK,WAAW,EACpB,OAAO,CAAC,UAA8B,MAAM,SAAS,MAAM,EAC3D,IAAI,CAAC,UAAU,MAAM,IAAI,EACzB,KAAK,MAAM;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKA,IAAI,SAAuB;AACzB,WAAO,KAAK,WAAW,EAAE,OAAO,CAAC,UAA+B,MAAM,SAAS,OAAO;AAAA,EACxF;AAAA;AAAA;AAAA;AAAA,EAKA,IAAI,QAAsB;AACxB,WAAO,KAAK,WAAW,EAAE,OAAO,CAAC,UAA+B,MAAM,SAAS,OAAO;AAAA,EACxF;AAAA;AAAA;AAAA;AAAA,EAKA,IAAI,QAAsB;AACxB,WAAO,KAAK,WAAW,EAAE,OAAO,CAAC,UAA+B,MAAM,SAAS,OAAO;AAAA,EACxF;AACF;AAKO,IAAM,cAAN,cAA0B,QAAQ;AAAA,EAC9B,OAAO;AAAA,EACP;AAAA;AAAA;AAAA;AAAA,EAKT,YAAY,SAAiC,SAA0B;AACrE,UAAM,OAAO;AACb,QAAI,OAAO,YAAY,UAAU;AAC/B,WAAK,UAAU,CAAC,EAAE,MAAM,QAAQ,MAAM,QAAQ,CAAC;AAAA,IACjD,OAAO;AACL,WAAK,UAAU;AAAA,IACjB;AAAA,EACF;AAAA,EAEU,aAA6B;AACrC,WAAO,KAAK;AAAA,EACd;AACF;AAMO,IAAM,mBAAN,cAA+B,QAAQ;AAAA,EACnC,OAAO;AAAA,EACP;AAAA;AAAA,EAGA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOT,YACE,SACA,WACA,SACA;AACA,UAAM,OAAO;AACb,QAAI,OAAO,YAAY,UAAU;AAC/B,WAAK,UAAU,CAAC,EAAE,MAAM,QAAQ,MAAM,QAAQ,CAAC;AAAA,IACjD,OAAO;AACL,WAAK,UAAU;AAAA,IACjB;AACA,SAAK,YAAY;AAAA,EACnB;AAAA,EAEU,aAA6B;AACrC,WAAO,KAAK;AAAA,EACd;AAAA;AAAA,EAGA,IAAI,eAAwB;AAC1B,WAAO,KAAK,cAAc,UAAa,KAAK,UAAU,SAAS;AAAA,EACjE;AACF;AAKO,IAAM,oBAAN,cAAgC,QAAQ;AAAA,EACpC,OAAO;AAAA,EACP;AAAA;AAAA;AAAA;AAAA;AAAA,EAMT,YAAY,SAAuB,SAA0B;AAC3D,UAAM,OAAO;AACb,SAAK,UAAU;AAAA,EACjB;AAAA,EAEU,aAA6B;AAGrC,WAAO,KAAK,QAAQ,IAAI,CAAC,YAAY;AAAA,MACnC,MAAM;AAAA,MACN,MACE,OAAO,OAAO,WAAW,WACrB,OAAO,SACP,KAAK,UAAU,OAAO,MAAM;AAAA,IACpC,EAAE;AAAA,EACJ;AACF;AAKO,SAAS,cAAc,KAAkC;AAC9D,SAAO,IAAI,SAAS;AACtB;AAKO,SAAS,mBAAmB,KAAuC;AACxE,SAAO,IAAI,SAAS;AACtB;AAKO,SAAS,oBAAoB,KAAwC;AAC1E,SAAO,IAAI,SAAS;AACtB;","names":[]}
@@ -6,7 +6,7 @@ import {
6
6
  isAssistantMessage,
7
7
  isToolResultMessage,
8
8
  isUserMessage
9
- } from "../chunk-QUUX4G7U.js";
9
+ } from "../chunk-W4BB4BG2.js";
10
10
  import {
11
11
  parseSSEStream
12
12
  } from "../chunk-X5G4EHL7.js";
package/dist/index.d.ts CHANGED
@@ -249,6 +249,18 @@ declare abstract class Message {
249
249
  * Concatenates all text blocks with '\n\n'
250
250
  */
251
251
  get text(): string;
252
+ /**
253
+ * Convenience accessor for image content blocks
254
+ */
255
+ get images(): ImageBlock[];
256
+ /**
257
+ * Convenience accessor for audio content blocks
258
+ */
259
+ get audio(): AudioBlock[];
260
+ /**
261
+ * Convenience accessor for video content blocks
262
+ */
263
+ get video(): VideoBlock[];
252
264
  }
253
265
  /**
254
266
  * User input message
package/dist/index.js CHANGED
@@ -10,7 +10,7 @@ import {
10
10
  isAssistantMessage,
11
11
  isToolResultMessage,
12
12
  isUserMessage
13
- } from "./chunk-QUUX4G7U.js";
13
+ } from "./chunk-W4BB4BG2.js";
14
14
  import {
15
15
  ExponentialBackoff,
16
16
  LinearBackoff,
@@ -6,7 +6,7 @@ import {
6
6
  isAssistantMessage,
7
7
  isToolResultMessage,
8
8
  isUserMessage
9
- } from "../chunk-QUUX4G7U.js";
9
+ } from "../chunk-W4BB4BG2.js";
10
10
  import {
11
11
  UPPError,
12
12
  doFetch,
@@ -1,5 +1,42 @@
1
1
  import { b as Provider, M as ModelReference, a as LLMHandler } from '../provider-CUJWjgNl.js';
2
2
 
3
+ /**
4
+ * Audio output configuration for Chat Completions
5
+ */
6
+ interface OpenAIAudioConfig {
7
+ /** Audio format */
8
+ format: 'wav' | 'aac' | 'mp3' | 'flac' | 'opus' | 'pcm16';
9
+ /** Voice to use for audio generation */
10
+ voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse' | 'marin' | 'cedar';
11
+ }
12
+ /**
13
+ * User location for web search context
14
+ * Requires type: 'approximate' with location fields at the same level
15
+ */
16
+ interface OpenAIWebSearchUserLocation {
17
+ /** Location type - must be 'approximate' */
18
+ type: 'approximate';
19
+ /** City name */
20
+ city?: string;
21
+ /** ISO 3166-1 country code (e.g., "US") */
22
+ country?: string;
23
+ /** Region/state name */
24
+ region?: string;
25
+ /** IANA timezone (e.g., "America/New_York") */
26
+ timezone?: string;
27
+ }
28
+ /**
29
+ * Web search options for Chat Completions API
30
+ */
31
+ interface OpenAIWebSearchOptions {
32
+ /**
33
+ * Context size for search results
34
+ * Controls how much context from web results to include
35
+ */
36
+ search_context_size?: 'low' | 'medium' | 'high';
37
+ /** User location for localizing search results */
38
+ user_location?: OpenAIWebSearchUserLocation | null;
39
+ }
3
40
  /**
4
41
  * OpenAI Chat Completions API parameters
5
42
  * These are passed through to the /v1/chat/completions endpoint
@@ -25,9 +62,9 @@ interface OpenAICompletionsParams {
25
62
  logprobs?: boolean;
26
63
  /** Number of top logprobs to return (0-20) */
27
64
  top_logprobs?: number;
28
- /** Seed for deterministic sampling (beta) */
65
+ /** Seed for deterministic sampling (beta, deprecated) */
29
66
  seed?: number;
30
- /** User identifier for abuse detection */
67
+ /** User identifier (deprecated, use safety_identifier or prompt_cache_key) */
31
68
  user?: string;
32
69
  /** Logit bias map */
33
70
  logit_bias?: Record<string, number>;
@@ -38,10 +75,10 @@ interface OpenAICompletionsParams {
38
75
  /** Reasoning effort for reasoning models */
39
76
  reasoning_effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
40
77
  /** Service tier */
41
- service_tier?: 'auto' | 'default' | 'flex' | 'priority';
78
+ service_tier?: 'auto' | 'default' | 'flex' | 'scale' | 'priority';
42
79
  /** Store completion for distillation */
43
80
  store?: boolean;
44
- /** Metadata key-value pairs */
81
+ /** Metadata key-value pairs (max 16, keys max 64 chars, values max 512 chars) */
45
82
  metadata?: Record<string, string>;
46
83
  /** Response format for structured output */
47
84
  response_format?: OpenAIResponseFormat;
@@ -58,19 +95,51 @@ interface OpenAICompletionsParams {
58
95
  };
59
96
  /**
60
97
  * Stable identifier for caching similar requests
61
- * Used to optimize cache hit rates
98
+ * Used to optimize cache hit rates (replaces user field)
62
99
  */
63
100
  prompt_cache_key?: string;
64
101
  /**
65
102
  * Retention policy for prompt cache
66
103
  * Set to "24h" to enable extended prompt caching up to 24 hours
67
104
  */
68
- prompt_cache_retention?: '24h';
105
+ prompt_cache_retention?: 'in-memory' | '24h';
69
106
  /**
70
107
  * Stable identifier for abuse detection
71
108
  * Recommend hashing username or email address
72
109
  */
73
110
  safety_identifier?: string;
111
+ /**
112
+ * Output modalities to generate
113
+ * Default: ["text"]. Use ["text", "audio"] for audio-capable models
114
+ */
115
+ modalities?: Array<'text' | 'audio'>;
116
+ /**
117
+ * Audio output configuration
118
+ * Required when modalities includes "audio"
119
+ */
120
+ audio?: OpenAIAudioConfig | null;
121
+ /**
122
+ * Web search configuration
123
+ * Enables the model to search the web for up-to-date information
124
+ */
125
+ web_search_options?: OpenAIWebSearchOptions;
126
+ }
127
+ /**
128
+ * Prompt template reference for Responses API
129
+ */
130
+ interface OpenAIPromptTemplate {
131
+ /** Prompt template ID */
132
+ id: string;
133
+ /** Variables to fill into the template */
134
+ variables?: Record<string, string>;
135
+ }
136
+ /**
137
+ * Conversation reference for Responses API
138
+ * Items from this conversation are prepended to input_items
139
+ */
140
+ interface OpenAIConversation {
141
+ /** Conversation ID */
142
+ id: string;
74
143
  }
75
144
  /**
76
145
  * OpenAI Responses API parameters
@@ -83,27 +152,91 @@ interface OpenAIResponsesParams {
83
152
  temperature?: number;
84
153
  /** Top-p (nucleus) sampling (0.0 - 1.0) */
85
154
  top_p?: number;
155
+ /** Number of top logprobs to return (0-20) */
156
+ top_logprobs?: number;
86
157
  /** Whether to enable parallel tool calls */
87
158
  parallel_tool_calls?: boolean;
88
- /** Reasoning configuration */
159
+ /** Reasoning configuration (for gpt-5 and o-series models) */
89
160
  reasoning?: {
90
161
  effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
91
- summary?: string;
162
+ /** Include summary of reasoning */
163
+ summary?: 'auto' | 'concise' | 'detailed';
92
164
  };
93
165
  /** Service tier */
94
- service_tier?: 'auto' | 'default' | 'flex' | 'priority';
166
+ service_tier?: 'auto' | 'default' | 'flex' | 'scale' | 'priority';
95
167
  /** Truncation strategy */
96
168
  truncation?: 'auto' | 'disabled';
97
- /** Fields to include in output */
169
+ /**
170
+ * Fields to include in output
171
+ * Supported values:
172
+ * - 'web_search_call.action.sources': Include web search sources
173
+ * - 'code_interpreter_call.outputs': Include code execution outputs
174
+ * - 'computer_call_output.output.image_url': Include computer call images
175
+ * - 'file_search_call.results': Include file search results
176
+ * - 'message.input_image.image_url': Include input image URLs
177
+ * - 'message.output_text.logprobs': Include logprobs with messages
178
+ * - 'reasoning.encrypted_content': Include encrypted reasoning tokens
179
+ */
98
180
  include?: string[];
99
- /** Background processing */
181
+ /** Background processing - run response asynchronously */
100
182
  background?: boolean;
101
- /** Continue from a previous response */
183
+ /** Continue from a previous response (cannot use with conversation) */
102
184
  previous_response_id?: string;
185
+ /**
186
+ * Conversation context - items prepended to input_items
187
+ * Cannot be used with previous_response_id
188
+ */
189
+ conversation?: string | OpenAIConversation;
103
190
  /** Store response for continuation */
104
191
  store?: boolean;
105
- /** Metadata key-value pairs */
192
+ /** Metadata key-value pairs (max 16, keys max 64 chars, values max 512 chars) */
106
193
  metadata?: Record<string, string>;
194
+ /**
195
+ * Maximum total calls to built-in tools in a response
196
+ * Applies across all built-in tool calls, not per tool
197
+ */
198
+ max_tool_calls?: number;
199
+ /**
200
+ * Reference to a prompt template and its variables
201
+ */
202
+ prompt?: OpenAIPromptTemplate;
203
+ /**
204
+ * Stable identifier for caching similar requests
205
+ * Used to optimize cache hit rates (replaces user field)
206
+ */
207
+ prompt_cache_key?: string;
208
+ /**
209
+ * Retention policy for prompt cache
210
+ * Set to "24h" to enable extended prompt caching up to 24 hours
211
+ */
212
+ prompt_cache_retention?: 'in-memory' | '24h';
213
+ /**
214
+ * Stable identifier for abuse detection
215
+ * Recommend hashing username or email address
216
+ */
217
+ safety_identifier?: string;
218
+ /** User identifier (deprecated, use safety_identifier or prompt_cache_key) */
219
+ user?: string;
220
+ /**
221
+ * Built-in tools for the Responses API
222
+ * Use the tool helper constructors: tools.webSearch(), tools.imageGeneration(), etc.
223
+ *
224
+ * @example
225
+ * ```ts
226
+ * import { tools } from 'provider-protocol/openai';
227
+ *
228
+ * const model = llm({
229
+ * model: openai('gpt-4o'),
230
+ * params: {
231
+ * tools: [
232
+ * tools.webSearch(),
233
+ * tools.imageGeneration({ quality: 'high' }),
234
+ * ],
235
+ * },
236
+ * });
237
+ * ```
238
+ */
239
+ tools?: OpenAIBuiltInTool[];
107
240
  }
108
241
  /**
109
242
  * API mode for OpenAI provider
@@ -146,6 +279,244 @@ type OpenAIResponseFormat = {
146
279
  strict?: boolean;
147
280
  };
148
281
  };
282
+ /**
283
+ * Tool definition for Responses API
284
+ */
285
+ interface OpenAIResponsesTool {
286
+ type: 'function';
287
+ name: string;
288
+ description: string;
289
+ parameters: {
290
+ type: 'object';
291
+ properties: Record<string, unknown>;
292
+ required?: string[];
293
+ additionalProperties?: boolean;
294
+ };
295
+ strict?: boolean;
296
+ }
297
+ /**
298
+ * Web search tool for Responses API
299
+ * Enables the model to search the web for up-to-date information
300
+ */
301
+ interface OpenAIWebSearchTool {
302
+ type: 'web_search';
303
+ /**
304
+ * Context size for search results
305
+ * Controls how much context from web results to include
306
+ */
307
+ search_context_size?: 'low' | 'medium' | 'high';
308
+ /** User location for localizing search results */
309
+ user_location?: OpenAIWebSearchUserLocation | null;
310
+ }
311
+ /**
312
+ * File search tool for Responses API
313
+ * Enables the model to search through uploaded files
314
+ */
315
+ interface OpenAIFileSearchTool {
316
+ type: 'file_search';
317
+ /** File search configuration */
318
+ file_search?: {
319
+ /** Vector store IDs to search */
320
+ vector_store_ids: string[];
321
+ /** Maximum number of results to return */
322
+ max_num_results?: number;
323
+ /** Ranking options for search results */
324
+ ranking_options?: {
325
+ /** Ranker to use */
326
+ ranker?: 'auto' | 'default_2024_08_21';
327
+ /** Score threshold (0-1) */
328
+ score_threshold?: number;
329
+ };
330
+ /** Filters to apply */
331
+ filters?: Record<string, unknown>;
332
+ };
333
+ }
334
+ /**
335
+ * Code interpreter container configuration
336
+ */
337
+ interface OpenAICodeInterpreterContainer {
338
+ /** Container type - 'auto' creates a new container */
339
+ type: 'auto';
340
+ /** Memory limit for the container (e.g., '1g', '4g') */
341
+ memory_limit?: string;
342
+ /** File IDs to make available in the container */
343
+ file_ids?: string[];
344
+ }
345
+ /**
346
+ * Code interpreter tool for Responses API
347
+ * Allows the model to write and run Python code
348
+ */
349
+ interface OpenAICodeInterpreterTool {
350
+ type: 'code_interpreter';
351
+ /** Code interpreter configuration */
352
+ code_interpreter?: {
353
+ /** Container configuration */
354
+ container: string | OpenAICodeInterpreterContainer;
355
+ };
356
+ }
357
+ /**
358
+ * Computer tool environment configuration
359
+ */
360
+ interface OpenAIComputerEnvironment {
361
+ /** Environment type */
362
+ type: 'browser' | 'mac' | 'windows' | 'linux' | 'ubuntu';
363
+ }
364
+ /**
365
+ * Computer tool for Responses API
366
+ * Enables the model to interact with computer interfaces
367
+ */
368
+ interface OpenAIComputerTool {
369
+ type: 'computer';
370
+ /** Computer tool configuration */
371
+ computer?: {
372
+ /** Display width in pixels */
373
+ display_width: number;
374
+ /** Display height in pixels */
375
+ display_height: number;
376
+ /** Environment configuration */
377
+ environment?: OpenAIComputerEnvironment;
378
+ };
379
+ }
380
+ /**
381
+ * Image generation tool for Responses API
382
+ */
383
+ interface OpenAIImageGenerationTool {
384
+ type: 'image_generation';
385
+ /** Background transparency */
386
+ background?: 'transparent' | 'opaque' | 'auto';
387
+ /** Input image formats supported */
388
+ input_image_mask?: boolean;
389
+ /** Model to use for generation */
390
+ model?: string;
391
+ /** Moderation level */
392
+ moderation?: 'auto' | 'low';
393
+ /** Output compression */
394
+ output_compression?: number;
395
+ /** Output format */
396
+ output_format?: 'png' | 'jpeg' | 'webp';
397
+ /** Partial images during streaming */
398
+ partial_images?: number;
399
+ /** Image quality */
400
+ quality?: 'auto' | 'high' | 'medium' | 'low';
401
+ /** Image size */
402
+ size?: 'auto' | '1024x1024' | '1024x1536' | '1536x1024';
403
+ }
404
+ /**
405
+ * MCP (Model Context Protocol) server configuration
406
+ */
407
+ interface OpenAIMcpServerConfig {
408
+ /** Server URL */
409
+ url: string;
410
+ /** Server name for identification */
411
+ name?: string;
412
+ /** Tool configuration for the server */
413
+ tool_configuration?: {
414
+ /** Allowed tools from this server */
415
+ allowed_tools?: string[] | {
416
+ type: 'all';
417
+ };
418
+ };
419
+ /** Headers to send with requests */
420
+ headers?: Record<string, string>;
421
+ /** Allowed resources */
422
+ allowed_resources?: string[];
423
+ /** Require approval for tool calls */
424
+ require_approval?: 'always' | 'never' | {
425
+ type: 'except';
426
+ tools: string[];
427
+ };
428
+ }
429
+ /**
430
+ * MCP tool for Responses API
431
+ * Enables connections to MCP servers
432
+ */
433
+ interface OpenAIMcpTool {
434
+ type: 'mcp';
435
+ /** MCP server configurations */
436
+ mcp?: {
437
+ /** Server configuration */
438
+ server: OpenAIMcpServerConfig;
439
+ };
440
+ }
441
+ /**
442
+ * Union type for all Responses API built-in tools
443
+ */
444
+ type OpenAIBuiltInTool = OpenAIWebSearchTool | OpenAIFileSearchTool | OpenAICodeInterpreterTool | OpenAIComputerTool | OpenAIImageGenerationTool | OpenAIMcpTool;
445
+ /**
446
+ * Combined tool type for Responses API (built-in or function)
447
+ */
448
+ type OpenAIResponsesToolUnion = OpenAIResponsesTool | OpenAIBuiltInTool;
449
+ /**
450
+ * Helper to create a web search tool
451
+ * Note: Configuration options are passed at the top level, not nested
452
+ */
453
+ declare function webSearchTool(options?: {
454
+ search_context_size?: 'low' | 'medium' | 'high';
455
+ user_location?: OpenAIWebSearchUserLocation | null;
456
+ }): OpenAIWebSearchTool;
457
+ /**
458
+ * Helper to create a file search tool
459
+ */
460
+ declare function fileSearchTool(options: {
461
+ vector_store_ids: string[];
462
+ max_num_results?: number;
463
+ ranking_options?: {
464
+ ranker?: 'auto' | 'default_2024_08_21';
465
+ score_threshold?: number;
466
+ };
467
+ filters?: Record<string, unknown>;
468
+ }): OpenAIFileSearchTool;
469
+ /**
470
+ * Helper to create a code interpreter tool
471
+ */
472
+ declare function codeInterpreterTool(options?: {
473
+ container?: string | OpenAICodeInterpreterContainer;
474
+ }): OpenAICodeInterpreterTool;
475
+ /**
476
+ * Helper to create a computer tool
477
+ */
478
+ declare function computerTool(options: {
479
+ display_width: number;
480
+ display_height: number;
481
+ environment?: OpenAIComputerEnvironment;
482
+ }): OpenAIComputerTool;
483
+ /**
484
+ * Helper to create an image generation tool
485
+ * Note: Configuration options are passed at the top level, not nested
486
+ */
487
+ declare function imageGenerationTool(options?: {
488
+ background?: 'transparent' | 'opaque' | 'auto';
489
+ model?: string;
490
+ quality?: 'auto' | 'high' | 'medium' | 'low';
491
+ size?: 'auto' | '1024x1024' | '1024x1536' | '1536x1024';
492
+ output_format?: 'png' | 'jpeg' | 'webp';
493
+ }): OpenAIImageGenerationTool;
494
+ /**
495
+ * Helper to create an MCP tool
496
+ */
497
+ declare function mcpTool(options: {
498
+ url: string;
499
+ name?: string;
500
+ allowed_tools?: string[] | {
501
+ type: 'all';
502
+ };
503
+ headers?: Record<string, string>;
504
+ require_approval?: 'always' | 'never' | {
505
+ type: 'except';
506
+ tools: string[];
507
+ };
508
+ }): OpenAIMcpTool;
509
+ /**
510
+ * Namespace for tool helper constructors
511
+ */
512
+ declare const tools: {
513
+ webSearch: typeof webSearchTool;
514
+ fileSearch: typeof fileSearchTool;
515
+ codeInterpreter: typeof codeInterpreterTool;
516
+ computer: typeof computerTool;
517
+ imageGeneration: typeof imageGenerationTool;
518
+ mcp: typeof mcpTool;
519
+ };
149
520
 
150
521
  /** Union type for modalities interface */
151
522
  type OpenAILLMParamsUnion = OpenAICompletionsParams | OpenAIResponsesParams;
@@ -220,4 +591,4 @@ interface OpenAIProvider extends Provider<OpenAIProviderOptions> {
220
591
  */
221
592
  declare const openai: OpenAIProvider;
222
593
 
223
- export { type OpenAIAPIMode, type OpenAICompletionsParams, type OpenAIConfig, type OpenAIModelOptions, type OpenAIModelReference, type OpenAIResponsesParams, openai };
594
+ export { type OpenAIAPIMode, type OpenAIAudioConfig, type OpenAIBuiltInTool, type OpenAICodeInterpreterContainer, type OpenAICodeInterpreterTool, type OpenAICompletionsParams, type OpenAIComputerEnvironment, type OpenAIComputerTool, type OpenAIConfig, type OpenAIConversation, type OpenAIFileSearchTool, type OpenAIImageGenerationTool, type OpenAIMcpServerConfig, type OpenAIMcpTool, type OpenAIModelOptions, type OpenAIModelReference, type OpenAIPromptTemplate, type OpenAIResponsesParams, type OpenAIResponsesToolUnion, type OpenAIWebSearchOptions, type OpenAIWebSearchTool, type OpenAIWebSearchUserLocation, codeInterpreterTool, computerTool, fileSearchTool, imageGenerationTool, mcpTool, openai, tools, webSearchTool };