@zenning/openai 2.3.0 → 3.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,40 +1,28 @@
1
- import { LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@zenning/provider';
1
+ import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@zenning/provider';
2
2
  import * as _zenning_provider_utils from '@zenning/provider-utils';
3
- import { FetchFunction } from '@zenning/provider-utils';
4
- import { z } from 'zod/v4';
3
+ import { InferSchema, FetchFunction } from '@zenning/provider-utils';
5
4
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
8
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
- parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
11
- user: z.ZodOptional<z.ZodString>;
12
- reasoningEffort: z.ZodOptional<z.ZodEnum<{
13
- minimal: "minimal";
14
- low: "low";
15
- medium: "medium";
16
- high: "high";
17
- }>>;
18
- maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
19
- store: z.ZodOptional<z.ZodBoolean>;
20
- metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
21
- prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
22
- structuredOutputs: z.ZodOptional<z.ZodBoolean>;
23
- serviceTier: z.ZodOptional<z.ZodEnum<{
24
- auto: "auto";
25
- flex: "flex";
26
- priority: "priority";
27
- }>>;
28
- strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
29
- textVerbosity: z.ZodOptional<z.ZodEnum<{
30
- low: "low";
31
- medium: "medium";
32
- high: "high";
33
- }>>;
34
- promptCacheKey: z.ZodOptional<z.ZodString>;
35
- safetyIdentifier: z.ZodOptional<z.ZodString>;
36
- }, z.core.$strip>;
37
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | (string & {});
6
+ declare const openaiChatLanguageModelOptions: _zenning_provider_utils.LazySchema<{
7
+ logitBias?: Record<number, number> | undefined;
8
+ logprobs?: number | boolean | undefined;
9
+ parallelToolCalls?: boolean | undefined;
10
+ user?: string | undefined;
11
+ reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined;
12
+ maxCompletionTokens?: number | undefined;
13
+ store?: boolean | undefined;
14
+ metadata?: Record<string, string> | undefined;
15
+ prediction?: Record<string, any> | undefined;
16
+ serviceTier?: "default" | "auto" | "flex" | "priority" | undefined;
17
+ strictJsonSchema?: boolean | undefined;
18
+ textVerbosity?: "low" | "medium" | "high" | undefined;
19
+ promptCacheKey?: string | undefined;
20
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
21
+ safetyIdentifier?: string | undefined;
22
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
23
+ forceReasoning?: boolean | undefined;
24
+ }>;
25
+ type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
38
26
 
39
27
  type OpenAIChatConfig = {
40
28
  provider: string;
@@ -55,19 +43,19 @@ declare class OpenAIChatLanguageModel implements LanguageModelV3 {
55
43
  constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
56
44
  get provider(): string;
57
45
  private getArgs;
58
- doGenerate(options: Parameters<LanguageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doGenerate']>>>;
59
- doStream(options: Parameters<LanguageModelV3['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doStream']>>>;
46
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
47
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
60
48
  }
61
49
 
62
50
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
63
- declare const openaiCompletionProviderOptions: z.ZodObject<{
64
- echo: z.ZodOptional<z.ZodBoolean>;
65
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
66
- suffix: z.ZodOptional<z.ZodString>;
67
- user: z.ZodOptional<z.ZodString>;
68
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
69
- }, z.core.$strip>;
70
- type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
51
+ declare const openaiCompletionProviderOptions: _zenning_provider_utils.LazySchema<{
52
+ echo?: boolean | undefined;
53
+ logitBias?: Record<string, number> | undefined;
54
+ suffix?: string | undefined;
55
+ user?: string | undefined;
56
+ logprobs?: number | boolean | undefined;
57
+ }>;
58
+ type OpenAICompletionProviderOptions = InferSchema<typeof openaiCompletionProviderOptions>;
71
59
 
72
60
  type OpenAICompletionConfig = {
73
61
  provider: string;
@@ -87,8 +75,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
87
75
  get provider(): string;
88
76
  readonly supportedUrls: Record<string, RegExp[]>;
89
77
  private getArgs;
90
- doGenerate(options: Parameters<LanguageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doGenerate']>>>;
91
- doStream(options: Parameters<LanguageModelV3['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doStream']>>>;
78
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
79
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
92
80
  }
93
81
 
94
82
  type OpenAIConfig = {
@@ -112,13 +100,13 @@ type OpenAIConfig = {
112
100
  };
113
101
 
114
102
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
115
- declare const openaiEmbeddingProviderOptions: z.ZodObject<{
116
- dimensions: z.ZodOptional<z.ZodNumber>;
117
- user: z.ZodOptional<z.ZodString>;
118
- }, z.core.$strip>;
119
- type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
103
+ declare const openaiEmbeddingProviderOptions: _zenning_provider_utils.LazySchema<{
104
+ dimensions?: number | undefined;
105
+ user?: string | undefined;
106
+ }>;
107
+ type OpenAIEmbeddingProviderOptions = InferSchema<typeof openaiEmbeddingProviderOptions>;
120
108
 
121
- declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
109
+ declare class OpenAIEmbeddingModel implements EmbeddingModelV3 {
122
110
  readonly specificationVersion = "v3";
123
111
  readonly modelId: OpenAIEmbeddingModelId;
124
112
  readonly maxEmbeddingsPerCall = 2048;
@@ -126,10 +114,10 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
126
114
  private readonly config;
127
115
  get provider(): string;
128
116
  constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
129
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3<string>['doEmbed']>>>;
117
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
130
118
  }
131
119
 
132
- type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
120
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | (string & {});
133
121
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
134
122
  declare const hasDefaultResponseFormat: Set<string>;
135
123
 
@@ -145,23 +133,20 @@ declare class OpenAIImageModel implements ImageModelV3 {
145
133
  get maxImagesPerCall(): number;
146
134
  get provider(): string;
147
135
  constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
148
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
136
+ doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
149
137
  }
150
138
 
151
139
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
152
- declare const openAITranscriptionProviderOptions: z.ZodObject<{
153
- include: z.ZodOptional<z.ZodArray<z.ZodString>>;
154
- language: z.ZodOptional<z.ZodString>;
155
- prompt: z.ZodOptional<z.ZodString>;
156
- temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
157
- timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{
158
- word: "word";
159
- segment: "segment";
160
- }>>>>;
161
- }, z.core.$strip>;
162
- type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
140
+ declare const openAITranscriptionProviderOptions: _zenning_provider_utils.LazySchema<{
141
+ include?: string[] | undefined;
142
+ language?: string | undefined;
143
+ prompt?: string | undefined;
144
+ temperature?: number | undefined;
145
+ timestampGranularities?: ("word" | "segment")[] | undefined;
146
+ }>;
147
+ type OpenAITranscriptionProviderOptions = InferSchema<typeof openAITranscriptionProviderOptions>;
163
148
 
164
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
149
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
165
150
  providerOptions?: {
166
151
  openai?: OpenAITranscriptionProviderOptions;
167
152
  };
@@ -171,39 +156,39 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
171
156
  currentDate?: () => Date;
172
157
  };
173
158
  }
174
- declare class OpenAITranscriptionModel implements TranscriptionModelV2 {
159
+ declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
175
160
  readonly modelId: OpenAITranscriptionModelId;
176
161
  private readonly config;
177
- readonly specificationVersion = "v2";
162
+ readonly specificationVersion = "v3";
178
163
  get provider(): string;
179
164
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
180
165
  private getArgs;
181
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV2['doGenerate']>>>;
166
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>>;
182
167
  }
183
168
 
184
169
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
170
+ declare const openaiSpeechProviderOptionsSchema: _zenning_provider_utils.LazySchema<{
171
+ instructions?: string | null | undefined;
172
+ speed?: number | null | undefined;
173
+ }>;
174
+ type OpenAISpeechCallOptions = InferSchema<typeof openaiSpeechProviderOptionsSchema>;
185
175
 
186
- declare const OpenAIProviderOptionsSchema: z.ZodObject<{
187
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
188
- speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
189
- }, z.core.$strip>;
190
- type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
191
176
  interface OpenAISpeechModelConfig extends OpenAIConfig {
192
177
  _internal?: {
193
178
  currentDate?: () => Date;
194
179
  };
195
180
  }
196
- declare class OpenAISpeechModel implements SpeechModelV2 {
181
+ declare class OpenAISpeechModel implements SpeechModelV3 {
197
182
  readonly modelId: OpenAISpeechModelId;
198
183
  private readonly config;
199
- readonly specificationVersion = "v2";
184
+ readonly specificationVersion = "v3";
200
185
  get provider(): string;
201
186
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
202
187
  private getArgs;
203
- doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
188
+ doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
204
189
  }
205
190
 
206
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
191
+ type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
207
192
 
208
193
  declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
209
194
  readonly specificationVersion = "v3";
@@ -213,91 +198,153 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
213
198
  readonly supportedUrls: Record<string, RegExp[]>;
214
199
  get provider(): string;
215
200
  private getArgs;
216
- doGenerate(options: Parameters<LanguageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doGenerate']>>>;
217
- doStream(options: Parameters<LanguageModelV3['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doStream']>>>;
201
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
202
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
218
203
  }
219
- declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
220
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
221
- "file_search_call.results": "file_search_call.results";
222
- "message.output_text.logprobs": "message.output_text.logprobs";
223
- "reasoning.encrypted_content": "reasoning.encrypted_content";
224
- }>>>>;
225
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
226
- logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
227
- maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
228
- metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
229
- parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
230
- previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
231
- promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
232
- reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
233
- reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
234
- safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
235
- serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
236
- auto: "auto";
237
- flex: "flex";
238
- priority: "priority";
239
- }>>>;
240
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
241
- strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
242
- textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
243
- low: "low";
244
- medium: "medium";
245
- high: "high";
246
- }>>>;
247
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
248
- }, z.core.$strip>;
249
- type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
250
- declare const openaiResponsesTextUIPartProviderMetadataSchema: z.ZodObject<{
251
- openai: z.ZodObject<{
252
- itemId: z.ZodString;
253
- annotations: z.ZodArray<z.ZodDiscriminatedUnion<[z.ZodObject<{
254
- type: z.ZodLiteral<"url_citation">;
255
- url: z.ZodString;
256
- title: z.ZodString;
257
- }, z.core.$strip>, z.ZodObject<{
258
- type: z.ZodLiteral<"file_citation">;
259
- file_id: z.ZodString;
260
- filename: z.ZodOptional<z.ZodNullable<z.ZodString>>;
261
- index: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
262
- start_index: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
263
- end_index: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
264
- quote: z.ZodOptional<z.ZodNullable<z.ZodString>>;
265
- }, z.core.$strip>, z.ZodObject<{
266
- type: z.ZodLiteral<"container_file_citation">;
267
- container_id: z.ZodString;
268
- end_index: z.ZodNumber;
269
- file_id: z.ZodString;
270
- filename: z.ZodString;
271
- start_index: z.ZodNumber;
272
- }, z.core.$strip>]>>;
273
- }, z.core.$strip>;
274
- }, z.core.$strip>;
275
- declare const openaiSourceExecutionFileProviderMetadataSchema: z.ZodObject<{
276
- openai: z.ZodObject<{
277
- containerId: z.ZodString;
278
- fileId: z.ZodString;
279
- filename: z.ZodString;
280
- }, z.core.$strip>;
281
- }, z.core.$strip>;
282
204
 
283
- declare const codeInterpreterInputSchema: z.ZodObject<{
284
- code: z.ZodOptional<z.ZodNullable<z.ZodString>>;
285
- containerId: z.ZodString;
286
- }, z.core.$strip>;
287
- declare const codeInterpreterOutputSchema: z.ZodObject<{
288
- outputs: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodDiscriminatedUnion<[z.ZodObject<{
289
- type: z.ZodLiteral<"logs">;
290
- logs: z.ZodString;
291
- }, z.core.$strip>, z.ZodObject<{
292
- type: z.ZodLiteral<"image">;
293
- url: z.ZodString;
294
- }, z.core.$strip>]>>>>;
295
- }, z.core.$strip>;
296
- declare const codeInterpreterArgsSchema: z.ZodObject<{
297
- container: z.ZodOptional<z.ZodUnion<readonly [z.ZodString, z.ZodObject<{
298
- fileIds: z.ZodOptional<z.ZodArray<z.ZodString>>;
299
- }, z.core.$strip>]>>;
300
- }, z.core.$strip>;
205
+ /**
206
+ * Schema for the apply_patch input - what the model sends.
207
+ *
208
+ * Refer the official spec here: https://platform.openai.com/docs/api-reference/responses/create#responses_create-input-input_item_list-item-apply_patch_tool_call
209
+ *
210
+ */
211
+ declare const applyPatchInputSchema: _zenning_provider_utils.LazySchema<{
212
+ callId: string;
213
+ operation: {
214
+ type: "create_file";
215
+ path: string;
216
+ diff: string;
217
+ } | {
218
+ type: "delete_file";
219
+ path: string;
220
+ } | {
221
+ type: "update_file";
222
+ path: string;
223
+ diff: string;
224
+ };
225
+ }>;
226
+ /**
227
+ * Schema for the apply_patch output - what we send back.
228
+ */
229
+ declare const applyPatchOutputSchema: _zenning_provider_utils.LazySchema<{
230
+ status: "completed" | "failed";
231
+ output?: string | undefined;
232
+ }>;
233
+ /**
234
+ * Schema for tool arguments (configuration options).
235
+ * The apply_patch tool doesn't require any configuration options.
236
+ */
237
+ declare const applyPatchArgsSchema: _zenning_provider_utils.LazySchema<Record<string, never>>;
238
+ /**
239
+ * Type definitions for the apply_patch operations.
240
+ */
241
+ type ApplyPatchOperation = {
242
+ type: 'create_file';
243
+ /**
244
+ * Path of the file to create relative to the workspace root.
245
+ */
246
+ path: string;
247
+ /**
248
+ * Unified diff content to apply when creating the file.
249
+ */
250
+ diff: string;
251
+ } | {
252
+ type: 'delete_file';
253
+ /**
254
+ * Path of the file to delete relative to the workspace root.
255
+ */
256
+ path: string;
257
+ } | {
258
+ type: 'update_file';
259
+ /**
260
+ * Path of the file to update relative to the workspace root.
261
+ */
262
+ path: string;
263
+ /**
264
+ * Unified diff content to apply to the existing file.
265
+ */
266
+ diff: string;
267
+ };
268
+ /**
269
+ * The apply_patch tool lets GPT-5.1 create, update, and delete files in your
270
+ * codebase using structured diffs. Instead of just suggesting edits, the model
271
+ * emits patch operations that your application applies and then reports back on,
272
+ * enabling iterative, multi-step code editing workflows.
273
+ *
274
+ * The tool factory creates a provider-defined tool that:
275
+ * - Receives patch operations from the model (create_file, update_file, delete_file)
276
+ * - Returns the status of applying those patches (completed or failed)
277
+ *
278
+ */
279
+ declare const applyPatchToolFactory: _zenning_provider_utils.ProviderToolFactoryWithOutputSchema<{
280
+ /**
281
+ * The unique ID of the apply patch tool call generated by the model.
282
+ */
283
+ callId: string;
284
+ /**
285
+ * The specific create, delete, or update instruction for the apply_patch tool call.
286
+ */
287
+ operation: ApplyPatchOperation;
288
+ }, {
289
+ /**
290
+ * The status of the apply patch tool call output.
291
+ * - 'completed': The patch was applied successfully.
292
+ * - 'failed': The patch failed to apply.
293
+ */
294
+ status: "completed" | "failed";
295
+ /**
296
+ * Optional human-readable log text from the apply patch tool
297
+ * (e.g., patch results or errors).
298
+ */
299
+ output?: string;
300
+ }, {}>;
301
+ /**
302
+ * The apply_patch tool lets GPT-5.1 create, update, and delete files in your
303
+ * codebase using structured diffs. Instead of just suggesting edits, the model
304
+ * emits patch operations that your application applies and then reports back on,
305
+ * enabling iterative, multi-step code editing workflows.
306
+ */
307
+ declare const applyPatch: _zenning_provider_utils.ProviderToolFactoryWithOutputSchema<{
308
+ /**
309
+ * The unique ID of the apply patch tool call generated by the model.
310
+ */
311
+ callId: string;
312
+ /**
313
+ * The specific create, delete, or update instruction for the apply_patch tool call.
314
+ */
315
+ operation: ApplyPatchOperation;
316
+ }, {
317
+ /**
318
+ * The status of the apply patch tool call output.
319
+ * - 'completed': The patch was applied successfully.
320
+ * - 'failed': The patch failed to apply.
321
+ */
322
+ status: "completed" | "failed";
323
+ /**
324
+ * Optional human-readable log text from the apply patch tool
325
+ * (e.g., patch results or errors).
326
+ */
327
+ output?: string;
328
+ }, {}>;
329
+
330
+ declare const codeInterpreterInputSchema: _zenning_provider_utils.LazySchema<{
331
+ containerId: string;
332
+ code?: string | null | undefined;
333
+ }>;
334
+ declare const codeInterpreterOutputSchema: _zenning_provider_utils.LazySchema<{
335
+ outputs?: ({
336
+ type: "logs";
337
+ logs: string;
338
+ } | {
339
+ type: "image";
340
+ url: string;
341
+ })[] | null | undefined;
342
+ }>;
343
+ declare const codeInterpreterArgsSchema: _zenning_provider_utils.LazySchema<{
344
+ container?: string | {
345
+ fileIds?: string[] | undefined;
346
+ } | undefined;
347
+ }>;
301
348
  type CodeInterpreterArgs = {
302
349
  /**
303
350
  * The code interpreter container.
@@ -308,7 +355,7 @@ type CodeInterpreterArgs = {
308
355
  fileIds?: string[];
309
356
  };
310
357
  };
311
- declare const codeInterpreterToolFactory: _zenning_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
358
+ declare const codeInterpreterToolFactory: _zenning_provider_utils.ProviderToolFactoryWithOutputSchema<{
312
359
  /**
313
360
  * The code to run, or null if not available.
314
361
  */
@@ -374,13 +421,13 @@ type OpenAIResponsesFileSearchToolComparisonFilter = {
374
421
  */
375
422
  key: string;
376
423
  /**
377
- * Specifies the comparison operator: eq, ne, gt, gte, lt, lte.
424
+ * Specifies the comparison operator: eq, ne, gt, gte, lt, lte, in, nin.
378
425
  */
379
- type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
426
+ type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte' | 'in' | 'nin';
380
427
  /**
381
- * The value to compare against the attribute key; supports string, number, or boolean types.
428
+ * The value to compare against the attribute key; supports string, number, boolean, or array of string types.
382
429
  */
383
- value: string | number | boolean;
430
+ value: string | number | boolean | string[];
384
431
  };
385
432
  /**
386
433
  * Combine multiple filters using and or or.
@@ -396,37 +443,26 @@ type OpenAIResponsesFileSearchToolCompoundFilter = {
396
443
  filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>;
397
444
  };
398
445
 
399
- declare const fileSearchArgsSchema: z.ZodObject<{
400
- vectorStoreIds: z.ZodArray<z.ZodString>;
401
- maxNumResults: z.ZodOptional<z.ZodNumber>;
402
- ranking: z.ZodOptional<z.ZodObject<{
403
- ranker: z.ZodOptional<z.ZodString>;
404
- scoreThreshold: z.ZodOptional<z.ZodNumber>;
405
- }, z.core.$strip>>;
406
- filters: z.ZodOptional<z.ZodUnion<readonly [z.ZodObject<{
407
- key: z.ZodString;
408
- type: z.ZodEnum<{
409
- lt: "lt";
410
- ne: "ne";
411
- eq: "eq";
412
- gt: "gt";
413
- gte: "gte";
414
- lte: "lte";
415
- }>;
416
- value: z.ZodUnion<readonly [z.ZodString, z.ZodNumber, z.ZodBoolean]>;
417
- }, z.core.$strip>, z.ZodType<any, unknown, z.core.$ZodTypeInternals<any, unknown>>]>>;
418
- }, z.core.$strip>;
419
- declare const fileSearchOutputSchema: z.ZodObject<{
420
- queries: z.ZodArray<z.ZodString>;
421
- results: z.ZodNullable<z.ZodArray<z.ZodObject<{
422
- attributes: z.ZodRecord<z.ZodString, z.ZodUnknown>;
423
- fileId: z.ZodString;
424
- filename: z.ZodString;
425
- score: z.ZodNumber;
426
- text: z.ZodString;
427
- }, z.core.$strip>>>;
428
- }, z.core.$strip>;
429
- declare const fileSearch: _zenning_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{}, {
446
+ declare const fileSearchArgsSchema: _zenning_provider_utils.LazySchema<{
447
+ vectorStoreIds: string[];
448
+ maxNumResults?: number | undefined;
449
+ ranking?: {
450
+ ranker?: string | undefined;
451
+ scoreThreshold?: number | undefined;
452
+ } | undefined;
453
+ filters?: any;
454
+ }>;
455
+ declare const fileSearchOutputSchema: _zenning_provider_utils.LazySchema<{
456
+ queries: string[];
457
+ results: {
458
+ attributes: Record<string, unknown>;
459
+ fileId: string;
460
+ filename: string;
461
+ score: number;
462
+ text: string;
463
+ }[] | null;
464
+ }>;
465
+ declare const fileSearch: _zenning_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
430
466
  /**
431
467
  * The search query to execute. If not provided, the tool will use the conversation context to determine the query.
432
468
  */
@@ -490,47 +526,24 @@ declare const fileSearch: _zenning_provider_utils.ProviderDefinedToolFactoryWith
490
526
  filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
491
527
  }>;
492
528
 
493
- declare const imageGenerationArgsSchema: z.ZodObject<{
494
- background: z.ZodOptional<z.ZodEnum<{
495
- auto: "auto";
496
- opaque: "opaque";
497
- transparent: "transparent";
498
- }>>;
499
- inputFidelity: z.ZodOptional<z.ZodEnum<{
500
- low: "low";
501
- high: "high";
502
- }>>;
503
- inputImageMask: z.ZodOptional<z.ZodObject<{
504
- fileId: z.ZodOptional<z.ZodString>;
505
- imageUrl: z.ZodOptional<z.ZodString>;
506
- }, z.core.$strip>>;
507
- model: z.ZodOptional<z.ZodString>;
508
- moderation: z.ZodOptional<z.ZodEnum<{
509
- auto: "auto";
510
- }>>;
511
- outputCompression: z.ZodOptional<z.ZodNumber>;
512
- outputFormat: z.ZodOptional<z.ZodEnum<{
513
- png: "png";
514
- jpeg: "jpeg";
515
- webp: "webp";
516
- }>>;
517
- partialImages: z.ZodOptional<z.ZodNumber>;
518
- quality: z.ZodOptional<z.ZodEnum<{
519
- low: "low";
520
- medium: "medium";
521
- high: "high";
522
- auto: "auto";
523
- }>>;
524
- size: z.ZodOptional<z.ZodEnum<{
525
- auto: "auto";
526
- "1024x1024": "1024x1024";
527
- "1024x1536": "1024x1536";
528
- "1536x1024": "1536x1024";
529
- }>>;
530
- }, z.core.$strict>;
531
- declare const imageGenerationOutputSchema: z.ZodObject<{
532
- result: z.ZodString;
533
- }, z.core.$strip>;
529
+ declare const imageGenerationArgsSchema: _zenning_provider_utils.LazySchema<{
530
+ background?: "auto" | "transparent" | "opaque" | undefined;
531
+ inputFidelity?: "low" | "high" | undefined;
532
+ inputImageMask?: {
533
+ fileId?: string | undefined;
534
+ imageUrl?: string | undefined;
535
+ } | undefined;
536
+ model?: string | undefined;
537
+ moderation?: "auto" | undefined;
538
+ outputCompression?: number | undefined;
539
+ outputFormat?: "png" | "jpeg" | "webp" | undefined;
540
+ partialImages?: number | undefined;
541
+ quality?: "auto" | "low" | "medium" | "high" | undefined;
542
+ size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024" | undefined;
543
+ }>;
544
+ declare const imageGenerationOutputSchema: _zenning_provider_utils.LazySchema<{
545
+ result: string;
546
+ }>;
534
547
  type ImageGenerationArgs = {
535
548
  /**
536
549
  * Background type for the generated image. Default is 'auto'.
@@ -594,4 +607,87 @@ declare const imageGeneration: (args?: ImageGenerationArgs) => _zenning_provider
594
607
  result: string;
595
608
  }>;
596
609
 
597
- export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiResponsesTextUIPartProviderMetadataSchema, openaiSourceExecutionFileProviderMetadataSchema };
610
+ declare const webSearchPreviewArgsSchema: _zenning_provider_utils.LazySchema<{
611
+ searchContextSize?: "low" | "medium" | "high" | undefined;
612
+ userLocation?: {
613
+ type: "approximate";
614
+ country?: string | undefined;
615
+ city?: string | undefined;
616
+ region?: string | undefined;
617
+ timezone?: string | undefined;
618
+ } | undefined;
619
+ }>;
620
+ declare const webSearchPreviewInputSchema: _zenning_provider_utils.LazySchema<Record<string, never>>;
621
+ declare const webSearchPreview: _zenning_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
622
+ /**
623
+ * An object describing the specific action taken in this web search call.
624
+ * Includes details on how the model used the web (search, open_page, find_in_page).
625
+ */
626
+ action: {
627
+ /**
628
+ * Action type "search" - Performs a web search query.
629
+ */
630
+ type: "search";
631
+ /**
632
+ * The search query.
633
+ */
634
+ query?: string;
635
+ } | {
636
+ /**
637
+ * Action type "openPage" - Opens a specific URL from search results.
638
+ */
639
+ type: "openPage";
640
+ /**
641
+ * The URL opened by the model.
642
+ */
643
+ url?: string | null;
644
+ } | {
645
+ /**
646
+ * Action type "findInPage": Searches for a pattern within a loaded page.
647
+ */
648
+ type: "findInPage";
649
+ /**
650
+ * The URL of the page searched for the pattern.
651
+ */
652
+ url?: string | null;
653
+ /**
654
+ * The pattern or text to search for within the page.
655
+ */
656
+ pattern?: string | null;
657
+ };
658
+ }, {
659
+ /**
660
+ * Search context size to use for the web search.
661
+ * - high: Most comprehensive context, highest cost, slower response
662
+ * - medium: Balanced context, cost, and latency (default)
663
+ * - low: Least context, lowest cost, fastest response
664
+ */
665
+ searchContextSize?: "low" | "medium" | "high";
666
+ /**
667
+ * User location information to provide geographically relevant search results.
668
+ */
669
+ userLocation?: {
670
+ /**
671
+ * Type of location (always 'approximate')
672
+ */
673
+ type: "approximate";
674
+ /**
675
+ * Two-letter ISO country code (e.g., 'US', 'GB')
676
+ */
677
+ country?: string;
678
+ /**
679
+ * City name (free text, e.g., 'Minneapolis')
680
+ */
681
+ city?: string;
682
+ /**
683
+ * Region name (free text, e.g., 'Minnesota')
684
+ */
685
+ region?: string;
686
+ /**
687
+ * IANA timezone (e.g., 'America/Chicago')
688
+ */
689
+ timezone?: string;
690
+ };
691
+ }>;
692
+
693
+ export { type ApplyPatchOperation, OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, applyPatch, applyPatchArgsSchema, applyPatchInputSchema, applyPatchOutputSchema, applyPatchToolFactory, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiSpeechProviderOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };