@luketandjung/dedalus-labs 1.0.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.d.cts +3104 -0
  2. package/dist/index.d.ts +2665 -553
  3. package/package.json +16 -20
@@ -0,0 +1,3104 @@
1
+ declare namespace exports_Generated {
2
+ export { make, ValidationError, UrlCitation, TopLogprob, ToolChoiceEnum, ToolChoice, ThinkingConfigEnabled, ThinkingConfigDisabled, ReasoningSummaryEnum, ReasoningGenerateSummaryEnum, ReasoningEffortEnum, Reasoning, QueryParams, PromptTokensDetails, Models2 as Models, ModelSettingsTruncationEnum, ModelSettings, ModelId, MCPToolChoice, HeaderParams, HTTPValidationError, FunctionCall, Function, Embedding, DedalusModelChoice2 as DedalusModelChoice, DedalusModel, Custom, CreateEmbeddingResponse2 as CreateEmbeddingResponse, CreateEmbeddingRequestModelEnum2 as CreateEmbeddingRequestModelEnum, CreateEmbeddingRequestEncodingFormat, CreateEmbeddingRequest2 as CreateEmbeddingRequest, CompletionUsage2 as CompletionUsage, CompletionTokensDetails, Client2 as Client, ChoiceLogprobs2 as ChoiceLogprobs, ChoiceFinishReasonEnum2 as ChoiceFinishReasonEnum, Choice, ChatCompletionTokenLogprob, ChatCompletionServiceTierEnum2 as ChatCompletionServiceTierEnum, ChatCompletionResponseMessage, ChatCompletionRequestVerbosityEnum, ChatCompletionRequestServiceTierEnum, ChatCompletionRequestReasoningEffortEnum, ChatCompletionRequest2 as ChatCompletionRequest, ChatCompletionMessageToolCall, ChatCompletionMessageCustomToolCall, ChatCompletion2 as ChatCompletion, Audio, AnnotationsItem };
3
+ }
4
+ import * as HttpClient from "@effect/platform/HttpClient";
5
+ import * as HttpClientError from "@effect/platform/HttpClientError";
6
+ import { AiError } from "@luketandjung/ariadne";
7
+ import * as Effect from "effect/Effect";
8
+ import { ParseError } from "effect/ParseResult";
9
+ import * as S from "effect/Schema";
10
+ /**
11
+ * Model identifier string (e.g., 'openai/gpt-5', 'anthropic/claude-3-5-sonnet').
12
+ */
13
+ declare class ModelId extends S.String {}
14
+ declare const ToolChoiceEnum_base: S.Literal<["auto", "required", "none"]>;
15
+ declare class ToolChoiceEnum extends ToolChoiceEnum_base {}
16
+ declare const MCPToolChoice_base: S.Class<MCPToolChoice, {
17
+ server_label: typeof S.String;
18
+ name: typeof S.String;
19
+ }, S.Struct.Encoded<{
20
+ server_label: typeof S.String;
21
+ name: typeof S.String;
22
+ }>, never, {
23
+ readonly server_label: string;
24
+ } & {
25
+ readonly name: string;
26
+ }, {}, {}>;
27
+ declare class MCPToolChoice extends MCPToolChoice_base {}
28
+ declare const ToolChoice_base: S.Union<[typeof ToolChoiceEnum, typeof S.String, S.Record$<typeof S.String, typeof S.Unknown>, typeof MCPToolChoice, typeof S.Null]>;
29
+ declare class ToolChoice extends ToolChoice_base {}
30
+ declare const ModelSettingsTruncationEnum_base: S.Literal<["auto", "disabled"]>;
31
+ declare class ModelSettingsTruncationEnum extends ModelSettingsTruncationEnum_base {}
32
+ declare const ReasoningEffortEnum_base: S.Literal<["minimal", "low", "medium", "high"]>;
33
+ declare class ReasoningEffortEnum extends ReasoningEffortEnum_base {}
34
+ declare const ReasoningGenerateSummaryEnum_base: S.Literal<["auto", "concise", "detailed"]>;
35
+ declare class ReasoningGenerateSummaryEnum extends ReasoningGenerateSummaryEnum_base {}
36
+ declare const ReasoningSummaryEnum_base: S.Literal<["auto", "concise", "detailed"]>;
37
+ declare class ReasoningSummaryEnum extends ReasoningSummaryEnum_base {}
38
+ declare const Reasoning_base: S.Class<Reasoning, {
39
+ effort: S.optionalWith<typeof ReasoningEffortEnum, {
40
+ nullable: true;
41
+ }>;
42
+ generate_summary: S.optionalWith<typeof ReasoningGenerateSummaryEnum, {
43
+ nullable: true;
44
+ }>;
45
+ summary: S.optionalWith<typeof ReasoningSummaryEnum, {
46
+ nullable: true;
47
+ }>;
48
+ }, S.Struct.Encoded<{
49
+ effort: S.optionalWith<typeof ReasoningEffortEnum, {
50
+ nullable: true;
51
+ }>;
52
+ generate_summary: S.optionalWith<typeof ReasoningGenerateSummaryEnum, {
53
+ nullable: true;
54
+ }>;
55
+ summary: S.optionalWith<typeof ReasoningSummaryEnum, {
56
+ nullable: true;
57
+ }>;
58
+ }>, never, {
59
+ readonly effort?: "minimal" | "low" | "medium" | "high" | undefined;
60
+ } & {
61
+ readonly generate_summary?: "auto" | "concise" | "detailed" | undefined;
62
+ } & {
63
+ readonly summary?: "auto" | "concise" | "detailed" | undefined;
64
+ }, {}, {}>;
65
+ declare class Reasoning extends Reasoning_base {}
66
+ declare const QueryParams_base: S.Record$<typeof S.String, typeof S.Unknown>;
67
+ declare class QueryParams extends QueryParams_base {}
68
+ declare const HeaderParams_base: S.Record$<typeof S.String, typeof S.Unknown>;
69
+ declare class HeaderParams extends HeaderParams_base {}
70
+ declare const ModelSettings_base: S.Class<ModelSettings, {
71
+ temperature: S.optionalWith<typeof S.Number, {
72
+ nullable: true;
73
+ }>;
74
+ top_p: S.optionalWith<typeof S.Number, {
75
+ nullable: true;
76
+ }>;
77
+ frequency_penalty: S.optionalWith<typeof S.Number, {
78
+ nullable: true;
79
+ }>;
80
+ presence_penalty: S.optionalWith<typeof S.Number, {
81
+ nullable: true;
82
+ }>;
83
+ stop: S.optionalWith<S.Union<[typeof S.String, S.Array$<typeof S.String>]>, {
84
+ nullable: true;
85
+ }>;
86
+ seed: S.optionalWith<typeof S.Int, {
87
+ nullable: true;
88
+ }>;
89
+ logit_bias: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
90
+ nullable: true;
91
+ }>;
92
+ logprobs: S.optionalWith<typeof S.Boolean, {
93
+ nullable: true;
94
+ }>;
95
+ top_logprobs: S.optionalWith<typeof S.Int, {
96
+ nullable: true;
97
+ }>;
98
+ n: S.optionalWith<typeof S.Int, {
99
+ nullable: true;
100
+ }>;
101
+ user: S.optionalWith<typeof S.String, {
102
+ nullable: true;
103
+ }>;
104
+ response_format: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
105
+ nullable: true;
106
+ }>;
107
+ stream: S.optionalWith<typeof S.Boolean, {
108
+ nullable: true;
109
+ }>;
110
+ stream_options: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
111
+ nullable: true;
112
+ }>;
113
+ audio: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
114
+ nullable: true;
115
+ }>;
116
+ service_tier: S.optionalWith<typeof S.String, {
117
+ nullable: true;
118
+ }>;
119
+ prediction: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
120
+ nullable: true;
121
+ }>;
122
+ tool_choice: S.optionalWith<typeof ToolChoice, {
123
+ nullable: true;
124
+ }>;
125
+ parallel_tool_calls: S.optionalWith<typeof S.Boolean, {
126
+ nullable: true;
127
+ }>;
128
+ truncation: S.optionalWith<typeof ModelSettingsTruncationEnum, {
129
+ nullable: true;
130
+ }>;
131
+ max_tokens: S.optionalWith<typeof S.Int, {
132
+ nullable: true;
133
+ }>;
134
+ max_completion_tokens: S.optionalWith<typeof S.Int, {
135
+ nullable: true;
136
+ }>;
137
+ reasoning: S.optionalWith<typeof Reasoning, {
138
+ nullable: true;
139
+ }>;
140
+ reasoning_effort: S.optionalWith<typeof S.String, {
141
+ nullable: true;
142
+ }>;
143
+ metadata: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
144
+ nullable: true;
145
+ }>;
146
+ store: S.optionalWith<typeof S.Boolean, {
147
+ nullable: true;
148
+ }>;
149
+ include_usage: S.optionalWith<typeof S.Boolean, {
150
+ nullable: true;
151
+ }>;
152
+ timeout: S.optionalWith<typeof S.Number, {
153
+ nullable: true;
154
+ }>;
155
+ prompt_cache_key: S.optionalWith<typeof S.String, {
156
+ nullable: true;
157
+ }>;
158
+ safety_identifier: S.optionalWith<typeof S.String, {
159
+ nullable: true;
160
+ }>;
161
+ verbosity: S.optionalWith<typeof S.String, {
162
+ nullable: true;
163
+ }>;
164
+ web_search_options: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
165
+ nullable: true;
166
+ }>;
167
+ response_include: S.optionalWith<S.Array$<S.Literal<["code_interpreter_call.outputs", "computer_call_output.output.image_url", "file_search_call.results", "message.input_image.image_url", "message.output_text.logprobs", "reasoning.encrypted_content"]>>, {
168
+ nullable: true;
169
+ }>;
170
+ use_responses: S.optionalWith<typeof S.Boolean, {
171
+ nullable: true;
172
+ default: () => false;
173
+ }>;
174
+ extra_query: S.optionalWith<typeof QueryParams, {
175
+ nullable: true;
176
+ }>;
177
+ extra_headers: S.optionalWith<typeof HeaderParams, {
178
+ nullable: true;
179
+ }>;
180
+ extra_args: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
181
+ nullable: true;
182
+ }>;
183
+ attributes: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
184
+ nullable: true;
185
+ }>;
186
+ voice: S.optionalWith<typeof S.String, {
187
+ nullable: true;
188
+ }>;
189
+ modalities: S.optionalWith<S.Array$<typeof S.String>, {
190
+ nullable: true;
191
+ }>;
192
+ input_audio_format: S.optionalWith<typeof S.String, {
193
+ nullable: true;
194
+ }>;
195
+ output_audio_format: S.optionalWith<typeof S.String, {
196
+ nullable: true;
197
+ }>;
198
+ input_audio_transcription: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
199
+ nullable: true;
200
+ }>;
201
+ turn_detection: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
202
+ nullable: true;
203
+ }>;
204
+ thinking: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
205
+ nullable: true;
206
+ }>;
207
+ top_k: S.optionalWith<typeof S.Int, {
208
+ nullable: true;
209
+ }>;
210
+ generation_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
211
+ nullable: true;
212
+ }>;
213
+ system_instruction: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
214
+ nullable: true;
215
+ }>;
216
+ safety_settings: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
217
+ nullable: true;
218
+ }>;
219
+ tool_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
220
+ nullable: true;
221
+ }>;
222
+ disable_automatic_function_calling: S.optionalWith<typeof S.Boolean, {
223
+ nullable: true;
224
+ default: () => true;
225
+ }>;
226
+ search_parameters: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
227
+ nullable: true;
228
+ }>;
229
+ deferred: S.optionalWith<typeof S.Boolean, {
230
+ nullable: true;
231
+ }>;
232
+ }, S.Struct.Encoded<{
233
+ temperature: S.optionalWith<typeof S.Number, {
234
+ nullable: true;
235
+ }>;
236
+ top_p: S.optionalWith<typeof S.Number, {
237
+ nullable: true;
238
+ }>;
239
+ frequency_penalty: S.optionalWith<typeof S.Number, {
240
+ nullable: true;
241
+ }>;
242
+ presence_penalty: S.optionalWith<typeof S.Number, {
243
+ nullable: true;
244
+ }>;
245
+ stop: S.optionalWith<S.Union<[typeof S.String, S.Array$<typeof S.String>]>, {
246
+ nullable: true;
247
+ }>;
248
+ seed: S.optionalWith<typeof S.Int, {
249
+ nullable: true;
250
+ }>;
251
+ logit_bias: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
252
+ nullable: true;
253
+ }>;
254
+ logprobs: S.optionalWith<typeof S.Boolean, {
255
+ nullable: true;
256
+ }>;
257
+ top_logprobs: S.optionalWith<typeof S.Int, {
258
+ nullable: true;
259
+ }>;
260
+ n: S.optionalWith<typeof S.Int, {
261
+ nullable: true;
262
+ }>;
263
+ user: S.optionalWith<typeof S.String, {
264
+ nullable: true;
265
+ }>;
266
+ response_format: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
267
+ nullable: true;
268
+ }>;
269
+ stream: S.optionalWith<typeof S.Boolean, {
270
+ nullable: true;
271
+ }>;
272
+ stream_options: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
273
+ nullable: true;
274
+ }>;
275
+ audio: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
276
+ nullable: true;
277
+ }>;
278
+ service_tier: S.optionalWith<typeof S.String, {
279
+ nullable: true;
280
+ }>;
281
+ prediction: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
282
+ nullable: true;
283
+ }>;
284
+ tool_choice: S.optionalWith<typeof ToolChoice, {
285
+ nullable: true;
286
+ }>;
287
+ parallel_tool_calls: S.optionalWith<typeof S.Boolean, {
288
+ nullable: true;
289
+ }>;
290
+ truncation: S.optionalWith<typeof ModelSettingsTruncationEnum, {
291
+ nullable: true;
292
+ }>;
293
+ max_tokens: S.optionalWith<typeof S.Int, {
294
+ nullable: true;
295
+ }>;
296
+ max_completion_tokens: S.optionalWith<typeof S.Int, {
297
+ nullable: true;
298
+ }>;
299
+ reasoning: S.optionalWith<typeof Reasoning, {
300
+ nullable: true;
301
+ }>;
302
+ reasoning_effort: S.optionalWith<typeof S.String, {
303
+ nullable: true;
304
+ }>;
305
+ metadata: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
306
+ nullable: true;
307
+ }>;
308
+ store: S.optionalWith<typeof S.Boolean, {
309
+ nullable: true;
310
+ }>;
311
+ include_usage: S.optionalWith<typeof S.Boolean, {
312
+ nullable: true;
313
+ }>;
314
+ timeout: S.optionalWith<typeof S.Number, {
315
+ nullable: true;
316
+ }>;
317
+ prompt_cache_key: S.optionalWith<typeof S.String, {
318
+ nullable: true;
319
+ }>;
320
+ safety_identifier: S.optionalWith<typeof S.String, {
321
+ nullable: true;
322
+ }>;
323
+ verbosity: S.optionalWith<typeof S.String, {
324
+ nullable: true;
325
+ }>;
326
+ web_search_options: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
327
+ nullable: true;
328
+ }>;
329
+ response_include: S.optionalWith<S.Array$<S.Literal<["code_interpreter_call.outputs", "computer_call_output.output.image_url", "file_search_call.results", "message.input_image.image_url", "message.output_text.logprobs", "reasoning.encrypted_content"]>>, {
330
+ nullable: true;
331
+ }>;
332
+ use_responses: S.optionalWith<typeof S.Boolean, {
333
+ nullable: true;
334
+ default: () => false;
335
+ }>;
336
+ extra_query: S.optionalWith<typeof QueryParams, {
337
+ nullable: true;
338
+ }>;
339
+ extra_headers: S.optionalWith<typeof HeaderParams, {
340
+ nullable: true;
341
+ }>;
342
+ extra_args: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
343
+ nullable: true;
344
+ }>;
345
+ attributes: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
346
+ nullable: true;
347
+ }>;
348
+ voice: S.optionalWith<typeof S.String, {
349
+ nullable: true;
350
+ }>;
351
+ modalities: S.optionalWith<S.Array$<typeof S.String>, {
352
+ nullable: true;
353
+ }>;
354
+ input_audio_format: S.optionalWith<typeof S.String, {
355
+ nullable: true;
356
+ }>;
357
+ output_audio_format: S.optionalWith<typeof S.String, {
358
+ nullable: true;
359
+ }>;
360
+ input_audio_transcription: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
361
+ nullable: true;
362
+ }>;
363
+ turn_detection: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
364
+ nullable: true;
365
+ }>;
366
+ thinking: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
367
+ nullable: true;
368
+ }>;
369
+ top_k: S.optionalWith<typeof S.Int, {
370
+ nullable: true;
371
+ }>;
372
+ generation_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
373
+ nullable: true;
374
+ }>;
375
+ system_instruction: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
376
+ nullable: true;
377
+ }>;
378
+ safety_settings: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
379
+ nullable: true;
380
+ }>;
381
+ tool_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
382
+ nullable: true;
383
+ }>;
384
+ disable_automatic_function_calling: S.optionalWith<typeof S.Boolean, {
385
+ nullable: true;
386
+ default: () => true;
387
+ }>;
388
+ search_parameters: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
389
+ nullable: true;
390
+ }>;
391
+ deferred: S.optionalWith<typeof S.Boolean, {
392
+ nullable: true;
393
+ }>;
394
+ }>, never, {
395
+ readonly temperature?: number | undefined;
396
+ } & {
397
+ readonly top_p?: number | undefined;
398
+ } & {
399
+ readonly frequency_penalty?: number | undefined;
400
+ } & {
401
+ readonly presence_penalty?: number | undefined;
402
+ } & {
403
+ readonly stop?: string | readonly string[] | undefined;
404
+ } & {
405
+ readonly seed?: number | undefined;
406
+ } & {
407
+ readonly logit_bias?: {
408
+ readonly [x: string]: unknown;
409
+ } | undefined;
410
+ } & {
411
+ readonly logprobs?: boolean | undefined;
412
+ } & {
413
+ readonly top_logprobs?: number | undefined;
414
+ } & {
415
+ readonly n?: number | undefined;
416
+ } & {
417
+ readonly user?: string | undefined;
418
+ } & {
419
+ readonly response_format?: {
420
+ readonly [x: string]: unknown;
421
+ } | undefined;
422
+ } & {
423
+ readonly stream?: boolean | undefined;
424
+ } & {
425
+ readonly stream_options?: {
426
+ readonly [x: string]: unknown;
427
+ } | undefined;
428
+ } & {
429
+ readonly audio?: {
430
+ readonly [x: string]: unknown;
431
+ } | undefined;
432
+ } & {
433
+ readonly service_tier?: string | undefined;
434
+ } & {
435
+ readonly prediction?: {
436
+ readonly [x: string]: unknown;
437
+ } | undefined;
438
+ } & {
439
+ readonly tool_choice?: string | MCPToolChoice | {
440
+ readonly [x: string]: unknown;
441
+ } | null | undefined;
442
+ } & {
443
+ readonly parallel_tool_calls?: boolean | undefined;
444
+ } & {
445
+ readonly truncation?: "auto" | "disabled" | undefined;
446
+ } & {
447
+ readonly max_tokens?: number | undefined;
448
+ } & {
449
+ readonly max_completion_tokens?: number | undefined;
450
+ } & {
451
+ readonly reasoning?: Reasoning | undefined;
452
+ } & {
453
+ readonly reasoning_effort?: string | undefined;
454
+ } & {
455
+ readonly metadata?: {
456
+ readonly [x: string]: unknown;
457
+ } | undefined;
458
+ } & {
459
+ readonly store?: boolean | undefined;
460
+ } & {
461
+ readonly include_usage?: boolean | undefined;
462
+ } & {
463
+ readonly timeout?: number | undefined;
464
+ } & {
465
+ readonly prompt_cache_key?: string | undefined;
466
+ } & {
467
+ readonly safety_identifier?: string | undefined;
468
+ } & {
469
+ readonly verbosity?: string | undefined;
470
+ } & {
471
+ readonly web_search_options?: {
472
+ readonly [x: string]: unknown;
473
+ } | undefined;
474
+ } & {
475
+ readonly response_include?: readonly ("code_interpreter_call.outputs" | "computer_call_output.output.image_url" | "file_search_call.results" | "message.input_image.image_url" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | undefined;
476
+ } & {
477
+ readonly use_responses?: boolean | undefined;
478
+ } & {
479
+ readonly extra_query?: {
480
+ readonly [x: string]: unknown;
481
+ } | undefined;
482
+ } & {
483
+ readonly extra_headers?: {
484
+ readonly [x: string]: unknown;
485
+ } | undefined;
486
+ } & {
487
+ readonly extra_args?: {
488
+ readonly [x: string]: unknown;
489
+ } | undefined;
490
+ } & {
491
+ readonly attributes?: {
492
+ readonly [x: string]: unknown;
493
+ } | undefined;
494
+ } & {
495
+ readonly voice?: string | undefined;
496
+ } & {
497
+ readonly modalities?: readonly string[] | undefined;
498
+ } & {
499
+ readonly input_audio_format?: string | undefined;
500
+ } & {
501
+ readonly output_audio_format?: string | undefined;
502
+ } & {
503
+ readonly input_audio_transcription?: {
504
+ readonly [x: string]: unknown;
505
+ } | undefined;
506
+ } & {
507
+ readonly turn_detection?: {
508
+ readonly [x: string]: unknown;
509
+ } | undefined;
510
+ } & {
511
+ readonly thinking?: {
512
+ readonly [x: string]: unknown;
513
+ } | undefined;
514
+ } & {
515
+ readonly top_k?: number | undefined;
516
+ } & {
517
+ readonly generation_config?: {
518
+ readonly [x: string]: unknown;
519
+ } | undefined;
520
+ } & {
521
+ readonly system_instruction?: {
522
+ readonly [x: string]: unknown;
523
+ } | undefined;
524
+ } & {
525
+ readonly safety_settings?: readonly {
526
+ readonly [x: string]: unknown;
527
+ }[] | undefined;
528
+ } & {
529
+ readonly tool_config?: {
530
+ readonly [x: string]: unknown;
531
+ } | undefined;
532
+ } & {
533
+ readonly disable_automatic_function_calling?: boolean | undefined;
534
+ } & {
535
+ readonly search_parameters?: {
536
+ readonly [x: string]: unknown;
537
+ } | undefined;
538
+ } & {
539
+ readonly deferred?: boolean | undefined;
540
+ }, {}, {}>;
541
+ declare class ModelSettings extends ModelSettings_base {}
542
+ declare const DedalusModel_base: S.Class<DedalusModel, {
543
+ /**
544
+ * Model identifier with provider prefix (e.g., 'openai/gpt-5', 'anthropic/claude-3-5-sonnet').
545
+ */
546
+ model: typeof S.String;
547
+ /**
548
+ * Optional default generation settings (e.g., temperature, max_tokens) applied when this model is selected.
549
+ */
550
+ settings: S.optionalWith<typeof ModelSettings, {
551
+ nullable: true;
552
+ }>;
553
+ }, S.Struct.Encoded<{
554
+ /**
555
+ * Model identifier with provider prefix (e.g., 'openai/gpt-5', 'anthropic/claude-3-5-sonnet').
556
+ */
557
+ model: typeof S.String;
558
+ /**
559
+ * Optional default generation settings (e.g., temperature, max_tokens) applied when this model is selected.
560
+ */
561
+ settings: S.optionalWith<typeof ModelSettings, {
562
+ nullable: true;
563
+ }>;
564
+ }>, never, {
565
+ readonly model: string;
566
+ } & {
567
+ readonly settings?: ModelSettings | undefined;
568
+ }, {}, {}>;
569
+ /**
570
+ * Structured model selection entry used in request payloads.
571
+ *
572
+ * Supports OpenAI-style semantics (string model id) while enabling
573
+ * optional per-model default settings for Dedalus multi-model routing.
574
+ */
575
+ declare class DedalusModel extends DedalusModel_base {}
576
+ declare const DedalusModelChoice_base: S.Union<[typeof ModelId, typeof DedalusModel]>;
577
+ /**
578
+ * Dedalus model choice - either a string ID or DedalusModel configuration object.
579
+ */
580
+ declare class DedalusModelChoice2 extends DedalusModelChoice_base {}
581
+ declare const Models_base: S.Array$<typeof DedalusModelChoice2>;
582
+ /**
583
+ * List of models for multi-model routing.
584
+ */
585
+ declare class Models2 extends Models_base {}
586
+ declare const ThinkingConfigDisabled_base: S.Class<ThinkingConfigDisabled, {
587
+ type: S.Literal<["disabled"]>;
588
+ }, S.Struct.Encoded<{
589
+ type: S.Literal<["disabled"]>;
590
+ }>, never, {
591
+ readonly type: "disabled";
592
+ }, {}, {}>;
593
+ /**
594
+ * Fields:
595
+ * - type (required): Literal['disabled']
596
+ */
597
+ declare class ThinkingConfigDisabled extends ThinkingConfigDisabled_base {}
598
+ declare const ThinkingConfigEnabled_base: S.Class<ThinkingConfigEnabled, {
599
+ /**
600
+ * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough analysis for complex problems, improving response quality.
601
+ *
602
+ * Must be ≥1024 and less than `max_tokens`.
603
+ *
604
+ * See [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking) for details.
605
+ */
606
+ budget_tokens: S.filter<typeof S.Int>;
607
+ type: S.Literal<["enabled"]>;
608
+ }, S.Struct.Encoded<{
609
+ /**
610
+ * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough analysis for complex problems, improving response quality.
611
+ *
612
+ * Must be ≥1024 and less than `max_tokens`.
613
+ *
614
+ * See [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking) for details.
615
+ */
616
+ budget_tokens: S.filter<typeof S.Int>;
617
+ type: S.Literal<["enabled"]>;
618
+ }>, never, {
619
+ readonly type: "enabled";
620
+ } & {
621
+ readonly budget_tokens: number;
622
+ }, {}, {}>;
623
+ /**
624
+ * Fields:
625
+ * - budget_tokens (required): int
626
+ * - type (required): Literal['enabled']
627
+ */
628
+ declare class ThinkingConfigEnabled extends ThinkingConfigEnabled_base {}
629
+ declare const ChatCompletionRequestReasoningEffortEnum_base: S.Literal<["low", "medium", "high"]>;
630
+ declare class ChatCompletionRequestReasoningEffortEnum extends ChatCompletionRequestReasoningEffortEnum_base {}
631
+ declare const ChatCompletionRequestServiceTierEnum_base: S.Literal<["auto", "default"]>;
632
+ declare class ChatCompletionRequestServiceTierEnum extends ChatCompletionRequestServiceTierEnum_base {}
633
+ declare const ChatCompletionRequestVerbosityEnum_base: S.Literal<["low", "medium", "high"]>;
634
+ declare class ChatCompletionRequestVerbosityEnum extends ChatCompletionRequestVerbosityEnum_base {}
635
+ declare const ChatCompletionRequest_base: S.Class<ChatCompletionRequest2, {
636
+ /**
637
+ * Model ID or list of model IDs for multi-model routing.
638
+ */
639
+ model: S.Union<[typeof DedalusModelChoice2, typeof Models2]>;
640
+ /**
641
+ * Conversation history. Accepts either a list of message objects or a string, which is treated as a single user message.
642
+ */
643
+ messages: S.Union<[S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, typeof S.String]>;
644
+ /**
645
+ * Convenience alias for Responses-style `input`. Used when `messages` is omitted to provide the user prompt directly.
646
+ */
647
+ input: S.optionalWith<S.Union<[S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, typeof S.String]>, {
648
+ nullable: true;
649
+ }>;
650
+ /**
651
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic. We generally recommend altering this or 'top_p' but not both.
652
+ */
653
+ temperature: S.optionalWith<S.filter<S.filter<typeof S.Number>>, {
654
+ nullable: true;
655
+ }>;
656
+ /**
657
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or 'temperature' but not both.
658
+ */
659
+ top_p: S.optionalWith<S.filter<S.filter<typeof S.Number>>, {
660
+ nullable: true;
661
+ }>;
662
+ /**
663
+ * The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API. This value is now deprecated in favor of 'max_completion_tokens' and is not compatible with o-series models.
664
+ */
665
+ max_tokens: S.optionalWith<S.filter<typeof S.Int>, {
666
+ nullable: true;
667
+ }>;
668
+ /**
669
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
670
+ */
671
+ presence_penalty: S.optionalWith<S.filter<S.filter<typeof S.Number>>, {
672
+ nullable: true;
673
+ }>;
674
+ /**
675
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
676
+ */
677
+ frequency_penalty: S.optionalWith<S.filter<S.filter<typeof S.Number>>, {
678
+ nullable: true;
679
+ }>;
680
+ /**
681
+ * Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object mapping token IDs (as strings) to bias values from -100 to 100. The bias is added to the logits before sampling; values between -1 and 1 nudge selection probability, while values like -100 or 100 effectively ban or require a token.
682
+ */
683
+ logit_bias: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
684
+ nullable: true;
685
+ }>;
686
+ /**
687
+ * Not supported with latest reasoning models 'o3' and 'o4-mini'.
688
+ *
689
+ * Up to 4 sequences where the API will stop generating further tokens; the returned text will not contain the stop sequence.
690
+ */
691
+ stop: S.optionalWith<S.Array$<typeof S.String>, {
692
+ nullable: true;
693
+ }>;
694
+ /**
695
+ * Extended thinking configuration (Anthropic only). Set type to 'enabled' or 'disabled'. When enabled, shows reasoning process in thinking blocks. Requires min 1,024 token budget.
696
+ */
697
+ thinking: S.optionalWith<S.Union<[typeof ThinkingConfigDisabled, typeof ThinkingConfigEnabled]>, {
698
+ nullable: true;
699
+ }>;
700
+ /**
701
+ * Top-k sampling. Anthropic: pass-through. Google: injected into generationConfig.topK.
702
+ */
703
+ top_k: S.optionalWith<S.filter<typeof S.Int>, {
704
+ nullable: true;
705
+ }>;
706
+ /**
707
+ * System prompt/instructions. Anthropic: pass-through. Google: converted to systemInstruction. OpenAI: extracted from messages.
708
+ */
709
+ system: S.optionalWith<S.Union<[typeof S.String, S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>]>, {
710
+ nullable: true;
711
+ }>;
712
+ /**
713
+ * Convenience alias for Responses-style `instructions`. Takes precedence over `system` and over system-role messages when provided.
714
+ */
715
+ instructions: S.optionalWith<S.Union<[typeof S.String, S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>]>, {
716
+ nullable: true;
717
+ }>;
718
+ /**
719
+ * Google generationConfig object. Merged with auto-generated config. Use for Google-specific params (candidateCount, responseMimeType, etc.).
720
+ */
721
+ generation_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
722
+ nullable: true;
723
+ }>;
724
+ /**
725
+ * Google safety settings (harm categories and thresholds).
726
+ */
727
+ safety_settings: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
728
+ nullable: true;
729
+ }>;
730
+ /**
731
+ * Google tool configuration (function calling mode, etc.).
732
+ */
733
+ tool_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
734
+ nullable: true;
735
+ }>;
736
+ /**
737
+ * Google-only flag to disable the SDK's automatic function execution. When true, the model returns function calls for the client to execute manually.
738
+ */
739
+ disable_automatic_function_calling: S.optionalWith<typeof S.Boolean, {
740
+ nullable: true;
741
+ }>;
742
+ /**
743
+ * If specified, system will make a best effort to sample deterministically. Determinism is not guaranteed for the same seed across different models or API versions.
744
+ */
745
+ seed: S.optionalWith<typeof S.Int, {
746
+ nullable: true;
747
+ }>;
748
+ /**
749
+ * Stable identifier for your end-users. Helps OpenAI detect and prevent abuse and may boost cache hit rates. This field is being replaced by 'safety_identifier' and 'prompt_cache_key'.
750
+ */
751
+ user: S.optionalWith<typeof S.String, {
752
+ nullable: true;
753
+ }>;
754
+ /**
755
+ * How many chat completion choices to generate for each input message. Keep 'n' as 1 to minimize costs.
756
+ */
757
+ n: S.optionalWith<S.filter<S.filter<typeof S.Int>>, {
758
+ nullable: true;
759
+ }>;
760
+ /**
761
+ * If true, the model response data is streamed to the client as it is generated using Server-Sent Events.
762
+ */
763
+ stream: S.optionalWith<typeof S.Boolean, {
764
+ nullable: true;
765
+ default: () => false;
766
+ }>;
767
+ /**
768
+ * Options for streaming responses. Only set when 'stream' is true (supports 'include_usage' and 'include_obfuscation').
769
+ */
770
+ stream_options: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
771
+ nullable: true;
772
+ }>;
773
+ /**
774
+ * An object specifying the format that the model must output. Use {'type': 'json_schema', 'json_schema': {...}} for structured outputs or {'type': 'json_object'} for the legacy JSON mode. Currently only OpenAI-prefixed models honour this field; Anthropic and Google requests will return an invalid_request_error if it is supplied.
775
+ */
776
+ response_format: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
777
+ nullable: true;
778
+ }>;
779
+ /**
780
+ * A list of tools the model may call. Supports OpenAI function tools and custom tools; use 'mcp_servers' for Dedalus-managed server-side tools.
781
+ */
782
+ tools: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
783
+ nullable: true;
784
+ }>;
785
+ /**
786
+ * Controls which (if any) tool is called by the model. 'none' stops tool calling, 'auto' lets the model decide, and 'required' forces at least one tool invocation. Specific tool payloads force that tool.
787
+ */
788
+ tool_choice: S.optionalWith<S.Union<[typeof S.String, S.Record$<typeof S.String, typeof S.Unknown>]>, {
789
+ nullable: true;
790
+ }>;
791
+ /**
792
+ * Whether to enable parallel function calling during tool use.
793
+ */
794
+ parallel_tool_calls: S.optionalWith<typeof S.Boolean, {
795
+ nullable: true;
796
+ }>;
797
+ /**
798
+ * Deprecated in favor of 'tools'. Legacy list of function definitions the model may generate JSON inputs for.
799
+ */
800
+ functions: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
801
+ nullable: true;
802
+ }>;
803
+ /**
804
+ * Deprecated in favor of 'tool_choice'. Controls which function is called by the model (none, auto, or specific name).
805
+ */
806
+ function_call: S.optionalWith<S.Union<[typeof S.String, S.Record$<typeof S.String, typeof S.Unknown>]>, {
807
+ nullable: true;
808
+ }>;
809
+ /**
810
+ * Whether to return log probabilities of the output tokens. If true, returns the log probabilities for each token in the response content.
811
+ */
812
+ logprobs: S.optionalWith<typeof S.Boolean, {
813
+ nullable: true;
814
+ }>;
815
+ /**
816
+ * An integer between 0 and 20 specifying how many of the most likely tokens to return at each position, with log probabilities. Requires 'logprobs' to be true.
817
+ */
818
+ top_logprobs: S.optionalWith<S.filter<S.filter<typeof S.Int>>, {
819
+ nullable: true;
820
+ }>;
821
+ /**
822
+ * An upper bound for the number of tokens that can be generated for a completion, including visible output and reasoning tokens.
823
+ */
824
+ max_completion_tokens: S.optionalWith<S.filter<typeof S.Int>, {
825
+ nullable: true;
826
+ }>;
827
+ /**
828
+ * Constrains effort on reasoning for supported reasoning models. Higher values use more compute, potentially improving reasoning quality at the cost of latency and tokens.
829
+ */
830
+ reasoning_effort: S.optionalWith<typeof ChatCompletionRequestReasoningEffortEnum, {
831
+ nullable: true;
832
+ }>;
833
+ /**
834
+ * Parameters for audio output. Required when requesting audio responses (for example, modalities including 'audio').
835
+ */
836
+ audio: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
837
+ nullable: true;
838
+ }>;
839
+ /**
840
+ * Output types you would like the model to generate. Most models default to ['text']; some support ['text', 'audio'].
841
+ */
842
+ modalities: S.optionalWith<S.Array$<typeof S.String>, {
843
+ nullable: true;
844
+ }>;
845
+ /**
846
+ * Configuration for predicted outputs. Improves response times when you already know large portions of the response content.
847
+ */
848
+ prediction: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
849
+ nullable: true;
850
+ }>;
851
+ /**
852
+ * Set of up to 16 key-value string pairs that can be attached to the request for structured metadata.
853
+ */
854
+ metadata: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
855
+ nullable: true;
856
+ }>;
857
+ /**
858
+ * Whether to store the output of this chat completion request for OpenAI model distillation or eval products. Image inputs over 8MB are dropped if storage is enabled.
859
+ */
860
+ store: S.optionalWith<typeof S.Boolean, {
861
+ nullable: true;
862
+ }>;
863
+ /**
864
+ * Specifies the processing tier used for the request. 'auto' uses project defaults, while 'default' forces standard pricing and performance.
865
+ */
866
+ service_tier: S.optionalWith<typeof ChatCompletionRequestServiceTierEnum, {
867
+ nullable: true;
868
+ }>;
869
+ /**
870
+ * Used by OpenAI to cache responses for similar requests and optimize cache hit rates. Replaces the legacy 'user' field for caching.
871
+ */
872
+ prompt_cache_key: S.optionalWith<typeof S.String, {
873
+ nullable: true;
874
+ }>;
875
+ /**
876
+ * Stable identifier used to help detect users who might violate OpenAI usage policies. Consider hashing end-user identifiers before sending.
877
+ */
878
+ safety_identifier: S.optionalWith<typeof S.String, {
879
+ nullable: true;
880
+ }>;
881
+ /**
882
+ * Constrains the verbosity of the model's response. Lower values produce concise answers, higher values allow more detail.
883
+ */
884
+ verbosity: S.optionalWith<typeof ChatCompletionRequestVerbosityEnum, {
885
+ nullable: true;
886
+ }>;
887
+ /**
888
+ * Configuration for OpenAI's web search tool. Learn more at https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat.
889
+ */
890
+ web_search_options: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
891
+ nullable: true;
892
+ }>;
893
+ /**
894
+ * xAI-specific parameter for configuring web search data acquisition. If not set, no data will be acquired by the model.
895
+ */
896
+ search_parameters: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
897
+ nullable: true;
898
+ }>;
899
+ /**
900
+ * xAI-specific parameter. If set to true, the request returns a request_id for async completion retrieval via GET /v1/chat/deferred-completion/{request_id}.
901
+ */
902
+ deferred: S.optionalWith<typeof S.Boolean, {
903
+ nullable: true;
904
+ }>;
905
+ /**
906
+ * MCP (Model Context Protocol) server addresses to make available for server-side tool execution. Entries can be URLs (e.g., 'https://mcp.example.com'), slugs (e.g., 'dedalus-labs/brave-search'), or structured objects specifying slug/version/url. MCP tools are executed server-side and billed separately.
907
+ */
908
+ mcp_servers: S.optionalWith<S.Union<[typeof S.String, S.Array$<typeof S.String>]>, {
909
+ nullable: true;
910
+ }>;
911
+ /**
912
+ * Guardrails to apply to the agent for input/output validation and safety checks. Reserved for future use - guardrails configuration format not yet finalized.
913
+ */
914
+ guardrails: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
915
+ nullable: true;
916
+ }>;
917
+ /**
918
+ * Configuration for multi-model handoffs and agent orchestration. Reserved for future use - handoff configuration format not yet finalized.
919
+ */
920
+ handoff_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
921
+ nullable: true;
922
+ }>;
923
+ /**
924
+ * Attributes for individual models used in routing decisions during multi-model execution. Format: {'model_name': {'attribute': value}}, where values are 0.0-1.0. Common attributes: 'intelligence', 'speed', 'cost', 'creativity', 'accuracy'. Used by agent to select optimal model based on task requirements.
925
+ */
926
+ model_attributes: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
927
+ nullable: true;
928
+ }>;
929
+ /**
930
+ * Attributes for the agent itself, influencing behavior and model selection. Format: {'attribute': value}, where values are 0.0-1.0. Common attributes: 'complexity', 'accuracy', 'efficiency', 'creativity', 'friendliness'. Higher values indicate stronger preference for that characteristic.
931
+ */
932
+ agent_attributes: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
933
+ nullable: true;
934
+ }>;
935
+ /**
936
+ * Maximum number of turns for agent execution before terminating (default: 10). Each turn represents one model inference cycle. Higher values allow more complex reasoning but increase cost and latency.
937
+ */
938
+ max_turns: S.optionalWith<S.filter<S.filter<typeof S.Int>>, {
939
+ nullable: true;
940
+ }>;
941
+ /**
942
+ * When False, skip server-side tool execution and return raw OpenAI-style tool_calls in the response.
943
+ */
944
+ auto_execute_tools: S.optionalWith<typeof S.Boolean, {
945
+ nullable: true;
946
+ default: () => true;
947
+ }>;
948
+ }, S.Struct.Encoded<{
949
+ /**
950
+ * Model ID or list of model IDs for multi-model routing.
951
+ */
952
+ model: S.Union<[typeof DedalusModelChoice2, typeof Models2]>;
953
+ /**
954
+ * Conversation history. Accepts either a list of message objects or a string, which is treated as a single user message.
955
+ */
956
+ messages: S.Union<[S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, typeof S.String]>;
957
+ /**
958
+ * Convenience alias for Responses-style `input`. Used when `messages` is omitted to provide the user prompt directly.
959
+ */
960
+ input: S.optionalWith<S.Union<[S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, typeof S.String]>, {
961
+ nullable: true;
962
+ }>;
963
+ /**
964
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 make the output more random, while lower values like 0.2 make it more focused and deterministic. We generally recommend altering this or 'top_p' but not both.
965
+ */
966
+ temperature: S.optionalWith<S.filter<S.filter<typeof S.Number>>, {
967
+ nullable: true;
968
+ }>;
969
+ /**
970
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or 'temperature' but not both.
971
+ */
972
+ top_p: S.optionalWith<S.filter<S.filter<typeof S.Number>>, {
973
+ nullable: true;
974
+ }>;
975
+ /**
976
+ * The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API. This value is now deprecated in favor of 'max_completion_tokens' and is not compatible with o-series models.
977
+ */
978
+ max_tokens: S.optionalWith<S.filter<typeof S.Int>, {
979
+ nullable: true;
980
+ }>;
981
+ /**
982
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
983
+ */
984
+ presence_penalty: S.optionalWith<S.filter<S.filter<typeof S.Number>>, {
985
+ nullable: true;
986
+ }>;
987
+ /**
988
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
989
+ */
990
+ frequency_penalty: S.optionalWith<S.filter<S.filter<typeof S.Number>>, {
991
+ nullable: true;
992
+ }>;
993
+ /**
994
+ * Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object mapping token IDs (as strings) to bias values from -100 to 100. The bias is added to the logits before sampling; values between -1 and 1 nudge selection probability, while values like -100 or 100 effectively ban or require a token.
995
+ */
996
+ logit_bias: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
997
+ nullable: true;
998
+ }>;
999
+ /**
1000
+ * Not supported with latest reasoning models 'o3' and 'o4-mini'.
1001
+ *
1002
+ * Up to 4 sequences where the API will stop generating further tokens; the returned text will not contain the stop sequence.
1003
+ */
1004
+ stop: S.optionalWith<S.Array$<typeof S.String>, {
1005
+ nullable: true;
1006
+ }>;
1007
+ /**
1008
+ * Extended thinking configuration (Anthropic only). Set type to 'enabled' or 'disabled'. When enabled, shows reasoning process in thinking blocks. Requires min 1,024 token budget.
1009
+ */
1010
+ thinking: S.optionalWith<S.Union<[typeof ThinkingConfigDisabled, typeof ThinkingConfigEnabled]>, {
1011
+ nullable: true;
1012
+ }>;
1013
+ /**
1014
+ * Top-k sampling. Anthropic: pass-through. Google: injected into generationConfig.topK.
1015
+ */
1016
+ top_k: S.optionalWith<S.filter<typeof S.Int>, {
1017
+ nullable: true;
1018
+ }>;
1019
+ /**
1020
+ * System prompt/instructions. Anthropic: pass-through. Google: converted to systemInstruction. OpenAI: extracted from messages.
1021
+ */
1022
+ system: S.optionalWith<S.Union<[typeof S.String, S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>]>, {
1023
+ nullable: true;
1024
+ }>;
1025
+ /**
1026
+ * Convenience alias for Responses-style `instructions`. Takes precedence over `system` and over system-role messages when provided.
1027
+ */
1028
+ instructions: S.optionalWith<S.Union<[typeof S.String, S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>]>, {
1029
+ nullable: true;
1030
+ }>;
1031
+ /**
1032
+ * Google generationConfig object. Merged with auto-generated config. Use for Google-specific params (candidateCount, responseMimeType, etc.).
1033
+ */
1034
+ generation_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1035
+ nullable: true;
1036
+ }>;
1037
+ /**
1038
+ * Google safety settings (harm categories and thresholds).
1039
+ */
1040
+ safety_settings: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
1041
+ nullable: true;
1042
+ }>;
1043
+ /**
1044
+ * Google tool configuration (function calling mode, etc.).
1045
+ */
1046
+ tool_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1047
+ nullable: true;
1048
+ }>;
1049
+ /**
1050
+ * Google-only flag to disable the SDK's automatic function execution. When true, the model returns function calls for the client to execute manually.
1051
+ */
1052
+ disable_automatic_function_calling: S.optionalWith<typeof S.Boolean, {
1053
+ nullable: true;
1054
+ }>;
1055
+ /**
1056
+ * If specified, system will make a best effort to sample deterministically. Determinism is not guaranteed for the same seed across different models or API versions.
1057
+ */
1058
+ seed: S.optionalWith<typeof S.Int, {
1059
+ nullable: true;
1060
+ }>;
1061
+ /**
1062
+ * Stable identifier for your end-users. Helps OpenAI detect and prevent abuse and may boost cache hit rates. This field is being replaced by 'safety_identifier' and 'prompt_cache_key'.
1063
+ */
1064
+ user: S.optionalWith<typeof S.String, {
1065
+ nullable: true;
1066
+ }>;
1067
+ /**
1068
+ * How many chat completion choices to generate for each input message. Keep 'n' as 1 to minimize costs.
1069
+ */
1070
+ n: S.optionalWith<S.filter<S.filter<typeof S.Int>>, {
1071
+ nullable: true;
1072
+ }>;
1073
+ /**
1074
+ * If true, the model response data is streamed to the client as it is generated using Server-Sent Events.
1075
+ */
1076
+ stream: S.optionalWith<typeof S.Boolean, {
1077
+ nullable: true;
1078
+ default: () => false;
1079
+ }>;
1080
+ /**
1081
+ * Options for streaming responses. Only set when 'stream' is true (supports 'include_usage' and 'include_obfuscation').
1082
+ */
1083
+ stream_options: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1084
+ nullable: true;
1085
+ }>;
1086
+ /**
1087
+ * An object specifying the format that the model must output. Use {'type': 'json_schema', 'json_schema': {...}} for structured outputs or {'type': 'json_object'} for the legacy JSON mode. Currently only OpenAI-prefixed models honour this field; Anthropic and Google requests will return an invalid_request_error if it is supplied.
1088
+ */
1089
+ response_format: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1090
+ nullable: true;
1091
+ }>;
1092
+ /**
1093
+ * A list of tools the model may call. Supports OpenAI function tools and custom tools; use 'mcp_servers' for Dedalus-managed server-side tools.
1094
+ */
1095
+ tools: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
1096
+ nullable: true;
1097
+ }>;
1098
+ /**
1099
+ * Controls which (if any) tool is called by the model. 'none' stops tool calling, 'auto' lets the model decide, and 'required' forces at least one tool invocation. Specific tool payloads force that tool.
1100
+ */
1101
+ tool_choice: S.optionalWith<S.Union<[typeof S.String, S.Record$<typeof S.String, typeof S.Unknown>]>, {
1102
+ nullable: true;
1103
+ }>;
1104
+ /**
1105
+ * Whether to enable parallel function calling during tool use.
1106
+ */
1107
+ parallel_tool_calls: S.optionalWith<typeof S.Boolean, {
1108
+ nullable: true;
1109
+ }>;
1110
+ /**
1111
+ * Deprecated in favor of 'tools'. Legacy list of function definitions the model may generate JSON inputs for.
1112
+ */
1113
+ functions: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
1114
+ nullable: true;
1115
+ }>;
1116
+ /**
1117
+ * Deprecated in favor of 'tool_choice'. Controls which function is called by the model (none, auto, or specific name).
1118
+ */
1119
+ function_call: S.optionalWith<S.Union<[typeof S.String, S.Record$<typeof S.String, typeof S.Unknown>]>, {
1120
+ nullable: true;
1121
+ }>;
1122
+ /**
1123
+ * Whether to return log probabilities of the output tokens. If true, returns the log probabilities for each token in the response content.
1124
+ */
1125
+ logprobs: S.optionalWith<typeof S.Boolean, {
1126
+ nullable: true;
1127
+ }>;
1128
+ /**
1129
+ * An integer between 0 and 20 specifying how many of the most likely tokens to return at each position, with log probabilities. Requires 'logprobs' to be true.
1130
+ */
1131
+ top_logprobs: S.optionalWith<S.filter<S.filter<typeof S.Int>>, {
1132
+ nullable: true;
1133
+ }>;
1134
+ /**
1135
+ * An upper bound for the number of tokens that can be generated for a completion, including visible output and reasoning tokens.
1136
+ */
1137
+ max_completion_tokens: S.optionalWith<S.filter<typeof S.Int>, {
1138
+ nullable: true;
1139
+ }>;
1140
+ /**
1141
+ * Constrains effort on reasoning for supported reasoning models. Higher values use more compute, potentially improving reasoning quality at the cost of latency and tokens.
1142
+ */
1143
+ reasoning_effort: S.optionalWith<typeof ChatCompletionRequestReasoningEffortEnum, {
1144
+ nullable: true;
1145
+ }>;
1146
+ /**
1147
+ * Parameters for audio output. Required when requesting audio responses (for example, modalities including 'audio').
1148
+ */
1149
+ audio: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1150
+ nullable: true;
1151
+ }>;
1152
+ /**
1153
+ * Output types you would like the model to generate. Most models default to ['text']; some support ['text', 'audio'].
1154
+ */
1155
+ modalities: S.optionalWith<S.Array$<typeof S.String>, {
1156
+ nullable: true;
1157
+ }>;
1158
+ /**
1159
+ * Configuration for predicted outputs. Improves response times when you already know large portions of the response content.
1160
+ */
1161
+ prediction: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1162
+ nullable: true;
1163
+ }>;
1164
+ /**
1165
+ * Set of up to 16 key-value string pairs that can be attached to the request for structured metadata.
1166
+ */
1167
+ metadata: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1168
+ nullable: true;
1169
+ }>;
1170
+ /**
1171
+ * Whether to store the output of this chat completion request for OpenAI model distillation or eval products. Image inputs over 8MB are dropped if storage is enabled.
1172
+ */
1173
+ store: S.optionalWith<typeof S.Boolean, {
1174
+ nullable: true;
1175
+ }>;
1176
+ /**
1177
+ * Specifies the processing tier used for the request. 'auto' uses project defaults, while 'default' forces standard pricing and performance.
1178
+ */
1179
+ service_tier: S.optionalWith<typeof ChatCompletionRequestServiceTierEnum, {
1180
+ nullable: true;
1181
+ }>;
1182
+ /**
1183
+ * Used by OpenAI to cache responses for similar requests and optimize cache hit rates. Replaces the legacy 'user' field for caching.
1184
+ */
1185
+ prompt_cache_key: S.optionalWith<typeof S.String, {
1186
+ nullable: true;
1187
+ }>;
1188
+ /**
1189
+ * Stable identifier used to help detect users who might violate OpenAI usage policies. Consider hashing end-user identifiers before sending.
1190
+ */
1191
+ safety_identifier: S.optionalWith<typeof S.String, {
1192
+ nullable: true;
1193
+ }>;
1194
+ /**
1195
+ * Constrains the verbosity of the model's response. Lower values produce concise answers, higher values allow more detail.
1196
+ */
1197
+ verbosity: S.optionalWith<typeof ChatCompletionRequestVerbosityEnum, {
1198
+ nullable: true;
1199
+ }>;
1200
+ /**
1201
+ * Configuration for OpenAI's web search tool. Learn more at https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat.
1202
+ */
1203
+ web_search_options: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1204
+ nullable: true;
1205
+ }>;
1206
+ /**
1207
+ * xAI-specific parameter for configuring web search data acquisition. If not set, no data will be acquired by the model.
1208
+ */
1209
+ search_parameters: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1210
+ nullable: true;
1211
+ }>;
1212
+ /**
1213
+ * xAI-specific parameter. If set to true, the request returns a request_id for async completion retrieval via GET /v1/chat/deferred-completion/{request_id}.
1214
+ */
1215
+ deferred: S.optionalWith<typeof S.Boolean, {
1216
+ nullable: true;
1217
+ }>;
1218
+ /**
1219
+ * MCP (Model Context Protocol) server addresses to make available for server-side tool execution. Entries can be URLs (e.g., 'https://mcp.example.com'), slugs (e.g., 'dedalus-labs/brave-search'), or structured objects specifying slug/version/url. MCP tools are executed server-side and billed separately.
1220
+ */
1221
+ mcp_servers: S.optionalWith<S.Union<[typeof S.String, S.Array$<typeof S.String>]>, {
1222
+ nullable: true;
1223
+ }>;
1224
+ /**
1225
+ * Guardrails to apply to the agent for input/output validation and safety checks. Reserved for future use - guardrails configuration format not yet finalized.
1226
+ */
1227
+ guardrails: S.optionalWith<S.Array$<S.Record$<typeof S.String, typeof S.Unknown>>, {
1228
+ nullable: true;
1229
+ }>;
1230
+ /**
1231
+ * Configuration for multi-model handoffs and agent orchestration. Reserved for future use - handoff configuration format not yet finalized.
1232
+ */
1233
+ handoff_config: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1234
+ nullable: true;
1235
+ }>;
1236
+ /**
1237
+ * Attributes for individual models used in routing decisions during multi-model execution. Format: {'model_name': {'attribute': value}}, where values are 0.0-1.0. Common attributes: 'intelligence', 'speed', 'cost', 'creativity', 'accuracy'. Used by agent to select optimal model based on task requirements.
1238
+ */
1239
+ model_attributes: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1240
+ nullable: true;
1241
+ }>;
1242
+ /**
1243
+ * Attributes for the agent itself, influencing behavior and model selection. Format: {'attribute': value}, where values are 0.0-1.0. Common attributes: 'complexity', 'accuracy', 'efficiency', 'creativity', 'friendliness'. Higher values indicate stronger preference for that characteristic.
1244
+ */
1245
+ agent_attributes: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
1246
+ nullable: true;
1247
+ }>;
1248
+ /**
1249
+ * Maximum number of turns for agent execution before terminating (default: 10). Each turn represents one model inference cycle. Higher values allow more complex reasoning but increase cost and latency.
1250
+ */
1251
+ max_turns: S.optionalWith<S.filter<S.filter<typeof S.Int>>, {
1252
+ nullable: true;
1253
+ }>;
1254
+ /**
1255
+ * When False, skip server-side tool execution and return raw OpenAI-style tool_calls in the response.
1256
+ */
1257
+ auto_execute_tools: S.optionalWith<typeof S.Boolean, {
1258
+ nullable: true;
1259
+ default: () => true;
1260
+ }>;
1261
+ }>, never, {
1262
+ readonly temperature?: number | undefined;
1263
+ } & {
1264
+ readonly top_p?: number | undefined;
1265
+ } & {
1266
+ readonly frequency_penalty?: number | undefined;
1267
+ } & {
1268
+ readonly presence_penalty?: number | undefined;
1269
+ } & {
1270
+ readonly stop?: readonly string[] | undefined;
1271
+ } & {
1272
+ readonly seed?: number | undefined;
1273
+ } & {
1274
+ readonly logit_bias?: {
1275
+ readonly [x: string]: unknown;
1276
+ } | undefined;
1277
+ } & {
1278
+ readonly logprobs?: boolean | undefined;
1279
+ } & {
1280
+ readonly top_logprobs?: number | undefined;
1281
+ } & {
1282
+ readonly n?: number | undefined;
1283
+ } & {
1284
+ readonly user?: string | undefined;
1285
+ } & {
1286
+ readonly response_format?: {
1287
+ readonly [x: string]: unknown;
1288
+ } | undefined;
1289
+ } & {
1290
+ readonly stream?: boolean | undefined;
1291
+ } & {
1292
+ readonly stream_options?: {
1293
+ readonly [x: string]: unknown;
1294
+ } | undefined;
1295
+ } & {
1296
+ readonly audio?: {
1297
+ readonly [x: string]: unknown;
1298
+ } | undefined;
1299
+ } & {
1300
+ readonly service_tier?: "default" | "auto" | undefined;
1301
+ } & {
1302
+ readonly prediction?: {
1303
+ readonly [x: string]: unknown;
1304
+ } | undefined;
1305
+ } & {
1306
+ readonly tool_choice?: string | {
1307
+ readonly [x: string]: unknown;
1308
+ } | undefined;
1309
+ } & {
1310
+ readonly parallel_tool_calls?: boolean | undefined;
1311
+ } & {
1312
+ readonly max_tokens?: number | undefined;
1313
+ } & {
1314
+ readonly max_completion_tokens?: number | undefined;
1315
+ } & {
1316
+ readonly reasoning_effort?: "low" | "medium" | "high" | undefined;
1317
+ } & {
1318
+ readonly metadata?: {
1319
+ readonly [x: string]: unknown;
1320
+ } | undefined;
1321
+ } & {
1322
+ readonly store?: boolean | undefined;
1323
+ } & {
1324
+ readonly prompt_cache_key?: string | undefined;
1325
+ } & {
1326
+ readonly safety_identifier?: string | undefined;
1327
+ } & {
1328
+ readonly verbosity?: "low" | "medium" | "high" | undefined;
1329
+ } & {
1330
+ readonly web_search_options?: {
1331
+ readonly [x: string]: unknown;
1332
+ } | undefined;
1333
+ } & {
1334
+ readonly modalities?: readonly string[] | undefined;
1335
+ } & {
1336
+ readonly thinking?: ThinkingConfigDisabled | ThinkingConfigEnabled | undefined;
1337
+ } & {
1338
+ readonly top_k?: number | undefined;
1339
+ } & {
1340
+ readonly generation_config?: {
1341
+ readonly [x: string]: unknown;
1342
+ } | undefined;
1343
+ } & {
1344
+ readonly safety_settings?: readonly {
1345
+ readonly [x: string]: unknown;
1346
+ }[] | undefined;
1347
+ } & {
1348
+ readonly tool_config?: {
1349
+ readonly [x: string]: unknown;
1350
+ } | undefined;
1351
+ } & {
1352
+ readonly disable_automatic_function_calling?: boolean | undefined;
1353
+ } & {
1354
+ readonly search_parameters?: {
1355
+ readonly [x: string]: unknown;
1356
+ } | undefined;
1357
+ } & {
1358
+ readonly deferred?: boolean | undefined;
1359
+ } & {
1360
+ readonly model: string | DedalusModel | readonly (string | DedalusModel)[];
1361
+ } & {
1362
+ readonly messages: string | readonly {
1363
+ readonly [x: string]: unknown;
1364
+ }[];
1365
+ } & {
1366
+ readonly input?: string | readonly {
1367
+ readonly [x: string]: unknown;
1368
+ }[] | undefined;
1369
+ } & {
1370
+ readonly system?: string | readonly {
1371
+ readonly [x: string]: unknown;
1372
+ }[] | undefined;
1373
+ } & {
1374
+ readonly instructions?: string | readonly {
1375
+ readonly [x: string]: unknown;
1376
+ }[] | undefined;
1377
+ } & {
1378
+ readonly tools?: readonly {
1379
+ readonly [x: string]: unknown;
1380
+ }[] | undefined;
1381
+ } & {
1382
+ readonly functions?: readonly {
1383
+ readonly [x: string]: unknown;
1384
+ }[] | undefined;
1385
+ } & {
1386
+ readonly function_call?: string | {
1387
+ readonly [x: string]: unknown;
1388
+ } | undefined;
1389
+ } & {
1390
+ readonly mcp_servers?: string | readonly string[] | undefined;
1391
+ } & {
1392
+ readonly guardrails?: readonly {
1393
+ readonly [x: string]: unknown;
1394
+ }[] | undefined;
1395
+ } & {
1396
+ readonly handoff_config?: {
1397
+ readonly [x: string]: unknown;
1398
+ } | undefined;
1399
+ } & {
1400
+ readonly model_attributes?: {
1401
+ readonly [x: string]: unknown;
1402
+ } | undefined;
1403
+ } & {
1404
+ readonly agent_attributes?: {
1405
+ readonly [x: string]: unknown;
1406
+ } | undefined;
1407
+ } & {
1408
+ readonly max_turns?: number | undefined;
1409
+ } & {
1410
+ readonly auto_execute_tools?: boolean | undefined;
1411
+ }, {}, {}>;
1412
+ /**
1413
+ * Chat completion request (OpenAI-compatible).
1414
+ *
1415
+ * Stateless chat completion endpoint. For stateful conversations with threads,
1416
+ * use the Responses API instead.
1417
+ */
1418
+ declare class ChatCompletionRequest2 extends ChatCompletionRequest_base {}
1419
+ declare const ChoiceFinishReasonEnum_base: S.Literal<["stop", "length", "tool_calls", "content_filter", "function_call"]>;
1420
+ declare class ChoiceFinishReasonEnum2 extends ChoiceFinishReasonEnum_base {}
1421
+ declare const Function_base: S.Class<Function, {
1422
+ /**
1423
+ * The name of the function to call.
1424
+ */
1425
+ name: typeof S.String;
1426
+ /**
1427
+ * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
1428
+ */
1429
+ arguments: typeof S.String;
1430
+ }, S.Struct.Encoded<{
1431
+ /**
1432
+ * The name of the function to call.
1433
+ */
1434
+ name: typeof S.String;
1435
+ /**
1436
+ * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
1437
+ */
1438
+ arguments: typeof S.String;
1439
+ }>, never, {
1440
+ readonly name: string;
1441
+ } & {
1442
+ readonly arguments: string;
1443
+ }, {}, {}>;
1444
+ /**
1445
+ * The function that the model called.
1446
+ *
1447
+ * Fields:
1448
+ * - name (required): str
1449
+ * - arguments (required): str
1450
+ */
1451
+ declare class Function extends Function_base {}
1452
+ declare const ChatCompletionMessageToolCall_base: S.Class<ChatCompletionMessageToolCall, {
1453
+ /**
1454
+ * The ID of the tool call.
1455
+ */
1456
+ id: typeof S.String;
1457
+ /**
1458
+ * The type of the tool. Currently, only `function` is supported.
1459
+ */
1460
+ type: S.Literal<["function"]>;
1461
+ /**
1462
+ * The function that the model called.
1463
+ */
1464
+ function: typeof Function;
1465
+ }, S.Struct.Encoded<{
1466
+ /**
1467
+ * The ID of the tool call.
1468
+ */
1469
+ id: typeof S.String;
1470
+ /**
1471
+ * The type of the tool. Currently, only `function` is supported.
1472
+ */
1473
+ type: S.Literal<["function"]>;
1474
+ /**
1475
+ * The function that the model called.
1476
+ */
1477
+ function: typeof Function;
1478
+ }>, never, {
1479
+ readonly function: Function;
1480
+ } & {
1481
+ readonly type: "function";
1482
+ } & {
1483
+ readonly id: string;
1484
+ }, {}, {}>;
1485
+ /**
1486
+ * A call to a function tool created by the model.
1487
+ *
1488
+ * Fields:
1489
+ * - id (required): str
1490
+ * - type (required): Literal['function']
1491
+ * - function (required): Function
1492
+ */
1493
+ declare class ChatCompletionMessageToolCall extends ChatCompletionMessageToolCall_base {}
1494
+ declare const Custom_base: S.Class<Custom, {
1495
+ /**
1496
+ * The name of the custom tool to call.
1497
+ */
1498
+ name: typeof S.String;
1499
+ /**
1500
+ * The input for the custom tool call generated by the model.
1501
+ */
1502
+ input: typeof S.String;
1503
+ }, S.Struct.Encoded<{
1504
+ /**
1505
+ * The name of the custom tool to call.
1506
+ */
1507
+ name: typeof S.String;
1508
+ /**
1509
+ * The input for the custom tool call generated by the model.
1510
+ */
1511
+ input: typeof S.String;
1512
+ }>, never, {
1513
+ readonly name: string;
1514
+ } & {
1515
+ readonly input: string;
1516
+ }, {}, {}>;
1517
+ /**
1518
+ * The custom tool that the model called.
1519
+ *
1520
+ * Fields:
1521
+ * - name (required): str
1522
+ * - input (required): str
1523
+ */
1524
+ declare class Custom extends Custom_base {}
1525
+ declare const ChatCompletionMessageCustomToolCall_base: S.Class<ChatCompletionMessageCustomToolCall, {
1526
+ /**
1527
+ * The ID of the tool call.
1528
+ */
1529
+ id: typeof S.String;
1530
+ /**
1531
+ * The type of the tool. Always `custom`.
1532
+ */
1533
+ type: S.Literal<["custom"]>;
1534
+ /**
1535
+ * The custom tool that the model called.
1536
+ */
1537
+ custom: typeof Custom;
1538
+ }, S.Struct.Encoded<{
1539
+ /**
1540
+ * The ID of the tool call.
1541
+ */
1542
+ id: typeof S.String;
1543
+ /**
1544
+ * The type of the tool. Always `custom`.
1545
+ */
1546
+ type: S.Literal<["custom"]>;
1547
+ /**
1548
+ * The custom tool that the model called.
1549
+ */
1550
+ custom: typeof Custom;
1551
+ }>, never, {
1552
+ readonly type: "custom";
1553
+ } & {
1554
+ readonly id: string;
1555
+ } & {
1556
+ readonly custom: Custom;
1557
+ }, {}, {}>;
1558
+ /**
1559
+ * A call to a custom tool created by the model.
1560
+ *
1561
+ * Fields:
1562
+ * - id (required): str
1563
+ * - type (required): Literal['custom']
1564
+ * - custom (required): Custom
1565
+ */
1566
+ declare class ChatCompletionMessageCustomToolCall extends ChatCompletionMessageCustomToolCall_base {}
1567
+ declare const UrlCitation_base: S.Class<UrlCitation, {
1568
+ /**
1569
+ * The index of the last character of the URL citation in the message.
1570
+ */
1571
+ end_index: typeof S.Int;
1572
+ /**
1573
+ * The index of the first character of the URL citation in the message.
1574
+ */
1575
+ start_index: typeof S.Int;
1576
+ /**
1577
+ * The URL of the web resource.
1578
+ */
1579
+ url: typeof S.String;
1580
+ /**
1581
+ * The title of the web resource.
1582
+ */
1583
+ title: typeof S.String;
1584
+ }, S.Struct.Encoded<{
1585
+ /**
1586
+ * The index of the last character of the URL citation in the message.
1587
+ */
1588
+ end_index: typeof S.Int;
1589
+ /**
1590
+ * The index of the first character of the URL citation in the message.
1591
+ */
1592
+ start_index: typeof S.Int;
1593
+ /**
1594
+ * The URL of the web resource.
1595
+ */
1596
+ url: typeof S.String;
1597
+ /**
1598
+ * The title of the web resource.
1599
+ */
1600
+ title: typeof S.String;
1601
+ }>, never, {
1602
+ readonly end_index: number;
1603
+ } & {
1604
+ readonly start_index: number;
1605
+ } & {
1606
+ readonly url: string;
1607
+ } & {
1608
+ readonly title: string;
1609
+ }, {}, {}>;
1610
+ /**
1611
+ * A URL citation when using web search.
1612
+ *
1613
+ * Fields:
1614
+ * - end_index (required): int
1615
+ * - start_index (required): int
1616
+ * - url (required): str
1617
+ * - title (required): str
1618
+ */
1619
+ declare class UrlCitation extends UrlCitation_base {}
1620
+ declare const AnnotationsItem_base: S.Class<AnnotationsItem, {
1621
+ /**
1622
+ * The type of the URL citation. Always `url_citation`.
1623
+ */
1624
+ type: S.Literal<["url_citation"]>;
1625
+ /**
1626
+ * A URL citation when using web search.
1627
+ */
1628
+ url_citation: typeof UrlCitation;
1629
+ }, S.Struct.Encoded<{
1630
+ /**
1631
+ * The type of the URL citation. Always `url_citation`.
1632
+ */
1633
+ type: S.Literal<["url_citation"]>;
1634
+ /**
1635
+ * A URL citation when using web search.
1636
+ */
1637
+ url_citation: typeof UrlCitation;
1638
+ }>, never, {
1639
+ readonly type: "url_citation";
1640
+ } & {
1641
+ readonly url_citation: UrlCitation;
1642
+ }, {}, {}>;
1643
+ /**
1644
+ * A URL citation when using web search.
1645
+ *
1646
+ * Fields:
1647
+ * - type (required): Literal['url_citation']
1648
+ * - url_citation (required): UrlCitation
1649
+ */
1650
+ declare class AnnotationsItem extends AnnotationsItem_base {}
1651
+ declare const FunctionCall_base: S.Class<FunctionCall, {
1652
+ /**
1653
+ * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
1654
+ */
1655
+ arguments: typeof S.String;
1656
+ /**
1657
+ * The name of the function to call.
1658
+ */
1659
+ name: typeof S.String;
1660
+ }, S.Struct.Encoded<{
1661
+ /**
1662
+ * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
1663
+ */
1664
+ arguments: typeof S.String;
1665
+ /**
1666
+ * The name of the function to call.
1667
+ */
1668
+ name: typeof S.String;
1669
+ }>, never, {
1670
+ readonly name: string;
1671
+ } & {
1672
+ readonly arguments: string;
1673
+ }, {}, {}>;
1674
+ /**
1675
+ * Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.
1676
+ *
1677
+ * Fields:
1678
+ * - arguments (required): str
1679
+ * - name (required): str
1680
+ */
1681
+ declare class FunctionCall extends FunctionCall_base {}
1682
+ declare const Audio_base: S.Class<Audio, {
1683
+ /**
1684
+ * Unique identifier for this audio response.
1685
+ */
1686
+ id: typeof S.String;
1687
+ /**
1688
+ * The Unix timestamp (in seconds) for when this audio response will
1689
+ * no longer be accessible on the server for use in multi-turn
1690
+ * conversations.
1691
+ */
1692
+ expires_at: typeof S.Int;
1693
+ /**
1694
+ * Base64 encoded audio bytes generated by the model, in the format
1695
+ * specified in the request.
1696
+ */
1697
+ data: typeof S.String;
1698
+ /**
1699
+ * Transcript of the audio generated by the model.
1700
+ */
1701
+ transcript: typeof S.String;
1702
+ }, S.Struct.Encoded<{
1703
+ /**
1704
+ * Unique identifier for this audio response.
1705
+ */
1706
+ id: typeof S.String;
1707
+ /**
1708
+ * The Unix timestamp (in seconds) for when this audio response will
1709
+ * no longer be accessible on the server for use in multi-turn
1710
+ * conversations.
1711
+ */
1712
+ expires_at: typeof S.Int;
1713
+ /**
1714
+ * Base64 encoded audio bytes generated by the model, in the format
1715
+ * specified in the request.
1716
+ */
1717
+ data: typeof S.String;
1718
+ /**
1719
+ * Transcript of the audio generated by the model.
1720
+ */
1721
+ transcript: typeof S.String;
1722
+ }>, never, {
1723
+ readonly id: string;
1724
+ } & {
1725
+ readonly expires_at: number;
1726
+ } & {
1727
+ readonly data: string;
1728
+ } & {
1729
+ readonly transcript: string;
1730
+ }, {}, {}>;
1731
+ /**
1732
+ * If the audio output modality is requested, this object contains data
1733
+ *
1734
+ * about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).
1735
+ *
1736
+ * Fields:
1737
+ * - id (required): str
1738
+ * - expires_at (required): int
1739
+ * - data (required): str
1740
+ * - transcript (required): str
1741
+ */
1742
+ declare class Audio extends Audio_base {}
1743
+ declare const ChatCompletionResponseMessage_base: S.Class<ChatCompletionResponseMessage, {
1744
+ /**
1745
+ * The contents of the message.
1746
+ */
1747
+ content: S.NullOr<typeof S.String>;
1748
+ /**
1749
+ * The refusal message generated by the model.
1750
+ */
1751
+ refusal: S.optionalWith<S.NullOr<typeof S.String>, {
1752
+ nullable: true;
1753
+ }>;
1754
+ /**
1755
+ * The tool calls generated by the model, such as function calls.
1756
+ */
1757
+ tool_calls: S.optionalWith<S.Array$<S.Union<[typeof ChatCompletionMessageToolCall, typeof ChatCompletionMessageCustomToolCall]>>, {
1758
+ nullable: true;
1759
+ }>;
1760
+ /**
1761
+ * Annotations for the message, when applicable, as when using the
1762
+ * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
1763
+ */
1764
+ annotations: S.optionalWith<S.Array$<typeof AnnotationsItem>, {
1765
+ nullable: true;
1766
+ }>;
1767
+ /**
1768
+ * The role of the author of this message.
1769
+ */
1770
+ role: S.Literal<["assistant"]>;
1771
+ /**
1772
+ * Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.
1773
+ */
1774
+ function_call: S.optionalWith<typeof FunctionCall, {
1775
+ nullable: true;
1776
+ }>;
1777
+ /**
1778
+ * If the audio output modality is requested, this object contains data
1779
+ * about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).
1780
+ */
1781
+ audio: S.optionalWith<typeof Audio, {
1782
+ nullable: true;
1783
+ }>;
1784
+ }, S.Struct.Encoded<{
1785
+ /**
1786
+ * The contents of the message.
1787
+ */
1788
+ content: S.NullOr<typeof S.String>;
1789
+ /**
1790
+ * The refusal message generated by the model.
1791
+ */
1792
+ refusal: S.optionalWith<S.NullOr<typeof S.String>, {
1793
+ nullable: true;
1794
+ }>;
1795
+ /**
1796
+ * The tool calls generated by the model, such as function calls.
1797
+ */
1798
+ tool_calls: S.optionalWith<S.Array$<S.Union<[typeof ChatCompletionMessageToolCall, typeof ChatCompletionMessageCustomToolCall]>>, {
1799
+ nullable: true;
1800
+ }>;
1801
+ /**
1802
+ * Annotations for the message, when applicable, as when using the
1803
+ * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
1804
+ */
1805
+ annotations: S.optionalWith<S.Array$<typeof AnnotationsItem>, {
1806
+ nullable: true;
1807
+ }>;
1808
+ /**
1809
+ * The role of the author of this message.
1810
+ */
1811
+ role: S.Literal<["assistant"]>;
1812
+ /**
1813
+ * Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.
1814
+ */
1815
+ function_call: S.optionalWith<typeof FunctionCall, {
1816
+ nullable: true;
1817
+ }>;
1818
+ /**
1819
+ * If the audio output modality is requested, this object contains data
1820
+ * about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).
1821
+ */
1822
+ audio: S.optionalWith<typeof Audio, {
1823
+ nullable: true;
1824
+ }>;
1825
+ }>, never, {
1826
+ readonly annotations?: readonly AnnotationsItem[] | undefined;
1827
+ } & {
1828
+ readonly audio?: Audio | undefined;
1829
+ } & {
1830
+ readonly function_call?: FunctionCall | undefined;
1831
+ } & {
1832
+ readonly tool_calls?: readonly (ChatCompletionMessageToolCall | ChatCompletionMessageCustomToolCall)[] | undefined;
1833
+ } & {
1834
+ readonly content: string | null;
1835
+ } & {
1836
+ readonly refusal?: string | null | undefined;
1837
+ } & {
1838
+ readonly role: "assistant";
1839
+ }, {}, {}>;
1840
+ /**
1841
+ * A chat completion message generated by the model.
1842
+ *
1843
+ * Fields:
1844
+ * - content (required): str | None
1845
+ * - refusal (required): str | None
1846
+ * - tool_calls (optional): ChatCompletionMessageToolCalls
1847
+ * - annotations (optional): list[AnnotationsItem]
1848
+ * - role (required): Literal['assistant']
1849
+ * - function_call (optional): FunctionCall
1850
+ * - audio (optional): Audio | None
1851
+ */
1852
+ declare class ChatCompletionResponseMessage extends ChatCompletionResponseMessage_base {}
1853
+ declare const TopLogprob_base: S.Class<TopLogprob, {
1854
+ /**
1855
+ * The token.
1856
+ */
1857
+ token: typeof S.String;
1858
+ /**
1859
+ * The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.
1860
+ */
1861
+ logprob: typeof S.Number;
1862
+ /**
1863
+ * A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
1864
+ */
1865
+ bytes: S.NullOr<S.Array$<typeof S.Int>>;
1866
+ }, S.Struct.Encoded<{
1867
+ /**
1868
+ * The token.
1869
+ */
1870
+ token: typeof S.String;
1871
+ /**
1872
+ * The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.
1873
+ */
1874
+ logprob: typeof S.Number;
1875
+ /**
1876
+ * A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
1877
+ */
1878
+ bytes: S.NullOr<S.Array$<typeof S.Int>>;
1879
+ }>, never, {
1880
+ readonly token: string;
1881
+ } & {
1882
+ readonly logprob: number;
1883
+ } & {
1884
+ readonly bytes: readonly number[] | null;
1885
+ }, {}, {}>;
1886
+ /**
1887
+ * Token and its log probability.
1888
+ */
1889
+ declare class TopLogprob extends TopLogprob_base {}
1890
+ declare const ChatCompletionTokenLogprob_base: S.Class<ChatCompletionTokenLogprob, {
1891
+ /**
1892
+ * The token.
1893
+ */
1894
+ token: typeof S.String;
1895
+ /**
1896
+ * The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.
1897
+ */
1898
+ logprob: typeof S.Number;
1899
+ /**
1900
+ * A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
1901
+ */
1902
+ bytes: S.NullOr<S.Array$<typeof S.Int>>;
1903
+ /**
1904
+ * List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.
1905
+ */
1906
+ top_logprobs: S.Array$<typeof TopLogprob>;
1907
+ }, S.Struct.Encoded<{
1908
+ /**
1909
+ * The token.
1910
+ */
1911
+ token: typeof S.String;
1912
+ /**
1913
+ * The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.
1914
+ */
1915
+ logprob: typeof S.Number;
1916
+ /**
1917
+ * A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.
1918
+ */
1919
+ bytes: S.NullOr<S.Array$<typeof S.Int>>;
1920
+ /**
1921
+ * List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.
1922
+ */
1923
+ top_logprobs: S.Array$<typeof TopLogprob>;
1924
+ }>, never, {
1925
+ readonly top_logprobs: readonly TopLogprob[];
1926
+ } & {
1927
+ readonly token: string;
1928
+ } & {
1929
+ readonly logprob: number;
1930
+ } & {
1931
+ readonly bytes: readonly number[] | null;
1932
+ }, {}, {}>;
1933
+ /**
1934
+ * Token log probability information.
1935
+ */
1936
+ declare class ChatCompletionTokenLogprob extends ChatCompletionTokenLogprob_base {}
1937
+ declare const ChoiceLogprobs_base: S.Class<ChoiceLogprobs2, {
1938
+ /**
1939
+ * A list of message content tokens with log probability information.
1940
+ */
1941
+ content: S.optionalWith<S.Array$<typeof ChatCompletionTokenLogprob>, {
1942
+ nullable: true;
1943
+ }>;
1944
+ /**
1945
+ * A list of message refusal tokens with log probability information.
1946
+ */
1947
+ refusal: S.optionalWith<S.Array$<typeof ChatCompletionTokenLogprob>, {
1948
+ nullable: true;
1949
+ }>;
1950
+ }, S.Struct.Encoded<{
1951
+ /**
1952
+ * A list of message content tokens with log probability information.
1953
+ */
1954
+ content: S.optionalWith<S.Array$<typeof ChatCompletionTokenLogprob>, {
1955
+ nullable: true;
1956
+ }>;
1957
+ /**
1958
+ * A list of message refusal tokens with log probability information.
1959
+ */
1960
+ refusal: S.optionalWith<S.Array$<typeof ChatCompletionTokenLogprob>, {
1961
+ nullable: true;
1962
+ }>;
1963
+ }>, never, {
1964
+ readonly content?: readonly ChatCompletionTokenLogprob[] | undefined;
1965
+ } & {
1966
+ readonly refusal?: readonly ChatCompletionTokenLogprob[] | undefined;
1967
+ }, {}, {}>;
1968
+ /**
1969
+ * Log probability information for the choice.
1970
+ */
1971
+ declare class ChoiceLogprobs2 extends ChoiceLogprobs_base {}
1972
+ declare const Choice_base: S.Class<Choice, {
1973
+ /**
1974
+ * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
1975
+ * `length` if the maximum number of tokens specified in the request was reached,
1976
+ * `content_filter` if content was omitted due to a flag from our content filters,
1977
+ * `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.
1978
+ */
1979
+ finish_reason: S.optionalWith<typeof ChoiceFinishReasonEnum2, {
1980
+ nullable: true;
1981
+ }>;
1982
+ /**
1983
+ * The index of the choice in the list of choices.
1984
+ */
1985
+ index: typeof S.Int;
1986
+ /**
1987
+ * A chat completion message generated by the model.
1988
+ */
1989
+ message: typeof ChatCompletionResponseMessage;
1990
+ /**
1991
+ * Log probability information for the choice.
1992
+ */
1993
+ logprobs: S.optionalWith<typeof ChoiceLogprobs2, {
1994
+ nullable: true;
1995
+ }>;
1996
+ }, S.Struct.Encoded<{
1997
+ /**
1998
+ * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
1999
+ * `length` if the maximum number of tokens specified in the request was reached,
2000
+ * `content_filter` if content was omitted due to a flag from our content filters,
2001
+ * `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.
2002
+ */
2003
+ finish_reason: S.optionalWith<typeof ChoiceFinishReasonEnum2, {
2004
+ nullable: true;
2005
+ }>;
2006
+ /**
2007
+ * The index of the choice in the list of choices.
2008
+ */
2009
+ index: typeof S.Int;
2010
+ /**
2011
+ * A chat completion message generated by the model.
2012
+ */
2013
+ message: typeof ChatCompletionResponseMessage;
2014
+ /**
2015
+ * Log probability information for the choice.
2016
+ */
2017
+ logprobs: S.optionalWith<typeof ChoiceLogprobs2, {
2018
+ nullable: true;
2019
+ }>;
2020
+ }>, never, {
2021
+ readonly logprobs?: ChoiceLogprobs2 | undefined;
2022
+ } & {
2023
+ readonly index: number;
2024
+ } & {
2025
+ readonly message: ChatCompletionResponseMessage;
2026
+ } & {
2027
+ readonly finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | undefined;
2028
+ }, {}, {}>;
2029
+ /**
2030
+ * A chat completion choice.
2031
+ *
2032
+ * OpenAI-compatible choice object for non-streaming responses.
2033
+ * Part of the ChatCompletion response.
2034
+ */
2035
+ declare class Choice extends Choice_base {}
2036
+ declare const ChatCompletionServiceTierEnum_base: S.Literal<["auto", "default", "flex", "scale", "priority"]>;
2037
+ declare class ChatCompletionServiceTierEnum2 extends ChatCompletionServiceTierEnum_base {}
2038
+ declare const CompletionTokensDetails_base: S.Class<CompletionTokensDetails, {
2039
+ /**
2040
+ * When using Predicted Outputs, the number of tokens in the
2041
+ * prediction that appeared in the completion.
2042
+ */
2043
+ accepted_prediction_tokens: S.optionalWith<typeof S.Int, {
2044
+ nullable: true;
2045
+ default: () => 0;
2046
+ }>;
2047
+ /**
2048
+ * Audio input tokens generated by the model.
2049
+ */
2050
+ audio_tokens: S.optionalWith<typeof S.Int, {
2051
+ nullable: true;
2052
+ default: () => 0;
2053
+ }>;
2054
+ /**
2055
+ * Tokens generated by the model for reasoning.
2056
+ */
2057
+ reasoning_tokens: S.optionalWith<typeof S.Int, {
2058
+ nullable: true;
2059
+ default: () => 0;
2060
+ }>;
2061
+ /**
2062
+ * When using Predicted Outputs, the number of tokens in the
2063
+ * prediction that did not appear in the completion. However, like
2064
+ * reasoning tokens, these tokens are still counted in the total
2065
+ * completion tokens for purposes of billing, output, and context window
2066
+ * limits.
2067
+ */
2068
+ rejected_prediction_tokens: S.optionalWith<typeof S.Int, {
2069
+ nullable: true;
2070
+ default: () => 0;
2071
+ }>;
2072
+ }, S.Struct.Encoded<{
2073
+ /**
2074
+ * When using Predicted Outputs, the number of tokens in the
2075
+ * prediction that appeared in the completion.
2076
+ */
2077
+ accepted_prediction_tokens: S.optionalWith<typeof S.Int, {
2078
+ nullable: true;
2079
+ default: () => 0;
2080
+ }>;
2081
+ /**
2082
+ * Audio input tokens generated by the model.
2083
+ */
2084
+ audio_tokens: S.optionalWith<typeof S.Int, {
2085
+ nullable: true;
2086
+ default: () => 0;
2087
+ }>;
2088
+ /**
2089
+ * Tokens generated by the model for reasoning.
2090
+ */
2091
+ reasoning_tokens: S.optionalWith<typeof S.Int, {
2092
+ nullable: true;
2093
+ default: () => 0;
2094
+ }>;
2095
+ /**
2096
+ * When using Predicted Outputs, the number of tokens in the
2097
+ * prediction that did not appear in the completion. However, like
2098
+ * reasoning tokens, these tokens are still counted in the total
2099
+ * completion tokens for purposes of billing, output, and context window
2100
+ * limits.
2101
+ */
2102
+ rejected_prediction_tokens: S.optionalWith<typeof S.Int, {
2103
+ nullable: true;
2104
+ default: () => 0;
2105
+ }>;
2106
+ }>, never, {
2107
+ readonly accepted_prediction_tokens?: number | undefined;
2108
+ } & {
2109
+ readonly audio_tokens?: number | undefined;
2110
+ } & {
2111
+ readonly reasoning_tokens?: number | undefined;
2112
+ } & {
2113
+ readonly rejected_prediction_tokens?: number | undefined;
2114
+ }, {}, {}>;
2115
+ /**
2116
+ * Breakdown of tokens used in a completion.
2117
+ *
2118
+ * Fields:
2119
+ * - accepted_prediction_tokens (optional): int
2120
+ * - audio_tokens (optional): int
2121
+ * - reasoning_tokens (optional): int
2122
+ * - rejected_prediction_tokens (optional): int
2123
+ */
2124
+ declare class CompletionTokensDetails extends CompletionTokensDetails_base {}
2125
+ declare const PromptTokensDetails_base: S.Class<PromptTokensDetails, {
2126
+ /**
2127
+ * Audio input tokens present in the prompt.
2128
+ */
2129
+ audio_tokens: S.optionalWith<typeof S.Int, {
2130
+ nullable: true;
2131
+ default: () => 0;
2132
+ }>;
2133
+ /**
2134
+ * Cached tokens present in the prompt.
2135
+ */
2136
+ cached_tokens: S.optionalWith<typeof S.Int, {
2137
+ nullable: true;
2138
+ default: () => 0;
2139
+ }>;
2140
+ }, S.Struct.Encoded<{
2141
+ /**
2142
+ * Audio input tokens present in the prompt.
2143
+ */
2144
+ audio_tokens: S.optionalWith<typeof S.Int, {
2145
+ nullable: true;
2146
+ default: () => 0;
2147
+ }>;
2148
+ /**
2149
+ * Cached tokens present in the prompt.
2150
+ */
2151
+ cached_tokens: S.optionalWith<typeof S.Int, {
2152
+ nullable: true;
2153
+ default: () => 0;
2154
+ }>;
2155
+ }>, never, {
2156
+ readonly audio_tokens?: number | undefined;
2157
+ } & {
2158
+ readonly cached_tokens?: number | undefined;
2159
+ }, {}, {}>;
2160
+ /**
2161
+ * Breakdown of tokens used in the prompt.
2162
+ *
2163
+ * Fields:
2164
+ * - audio_tokens (optional): int
2165
+ * - cached_tokens (optional): int
2166
+ */
2167
+ declare class PromptTokensDetails extends PromptTokensDetails_base {}
2168
+ declare const CompletionUsage_base: S.Class<CompletionUsage2, {
2169
+ /**
2170
+ * Number of tokens in the generated completion.
2171
+ */
2172
+ completion_tokens: typeof S.Int;
2173
+ /**
2174
+ * Number of tokens in the prompt.
2175
+ */
2176
+ prompt_tokens: typeof S.Int;
2177
+ /**
2178
+ * Total number of tokens used in the request (prompt + completion).
2179
+ */
2180
+ total_tokens: typeof S.Int;
2181
+ /**
2182
+ * Breakdown of tokens used in a completion.
2183
+ */
2184
+ completion_tokens_details: S.optionalWith<typeof CompletionTokensDetails, {
2185
+ nullable: true;
2186
+ }>;
2187
+ /**
2188
+ * Breakdown of tokens used in the prompt.
2189
+ */
2190
+ prompt_tokens_details: S.optionalWith<typeof PromptTokensDetails, {
2191
+ nullable: true;
2192
+ }>;
2193
+ }, S.Struct.Encoded<{
2194
+ /**
2195
+ * Number of tokens in the generated completion.
2196
+ */
2197
+ completion_tokens: typeof S.Int;
2198
+ /**
2199
+ * Number of tokens in the prompt.
2200
+ */
2201
+ prompt_tokens: typeof S.Int;
2202
+ /**
2203
+ * Total number of tokens used in the request (prompt + completion).
2204
+ */
2205
+ total_tokens: typeof S.Int;
2206
+ /**
2207
+ * Breakdown of tokens used in a completion.
2208
+ */
2209
+ completion_tokens_details: S.optionalWith<typeof CompletionTokensDetails, {
2210
+ nullable: true;
2211
+ }>;
2212
+ /**
2213
+ * Breakdown of tokens used in the prompt.
2214
+ */
2215
+ prompt_tokens_details: S.optionalWith<typeof PromptTokensDetails, {
2216
+ nullable: true;
2217
+ }>;
2218
+ }>, never, {
2219
+ readonly completion_tokens: number;
2220
+ } & {
2221
+ readonly prompt_tokens: number;
2222
+ } & {
2223
+ readonly total_tokens: number;
2224
+ } & {
2225
+ readonly completion_tokens_details?: CompletionTokensDetails | undefined;
2226
+ } & {
2227
+ readonly prompt_tokens_details?: PromptTokensDetails | undefined;
2228
+ }, {}, {}>;
2229
+ /**
2230
+ * Usage statistics for the completion request.
2231
+ *
2232
+ * Fields:
2233
+ * - completion_tokens (required): int
2234
+ * - prompt_tokens (required): int
2235
+ * - total_tokens (required): int
2236
+ * - completion_tokens_details (optional): CompletionTokensDetails
2237
+ * - prompt_tokens_details (optional): PromptTokensDetails
2238
+ */
2239
+ declare class CompletionUsage2 extends CompletionUsage_base {}
2240
+ declare const ChatCompletion_base: S.Class<ChatCompletion2, {
2241
+ /**
2242
+ * A unique identifier for the chat completion.
2243
+ */
2244
+ id: typeof S.String;
2245
+ /**
2246
+ * A list of chat completion choices. Can be more than one if `n` is greater than 1.
2247
+ */
2248
+ choices: S.Array$<typeof Choice>;
2249
+ /**
2250
+ * The Unix timestamp (in seconds) of when the chat completion was created.
2251
+ */
2252
+ created: typeof S.Int;
2253
+ /**
2254
+ * The model used for the chat completion.
2255
+ */
2256
+ model: typeof S.String;
2257
+ /**
2258
+ * Specifies the processing type used for serving the request.
2259
+ * - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2260
+ * - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2261
+ * - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2262
+ * - When not set, the default behavior is 'auto'.
2263
+ *
2264
+ * When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2265
+ */
2266
+ service_tier: S.optionalWith<typeof ChatCompletionServiceTierEnum2, {
2267
+ nullable: true;
2268
+ }>;
2269
+ /**
2270
+ * This fingerprint represents the backend configuration that the model runs with.
2271
+ *
2272
+ * Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
2273
+ */
2274
+ system_fingerprint: S.optionalWith<typeof S.String, {
2275
+ nullable: true;
2276
+ }>;
2277
+ /**
2278
+ * The object type, which is always `chat.completion`.
2279
+ */
2280
+ object: S.Literal<["chat.completion"]>;
2281
+ /**
2282
+ * Usage statistics for the completion request.
2283
+ */
2284
+ usage: S.optionalWith<typeof CompletionUsage2, {
2285
+ nullable: true;
2286
+ }>;
2287
+ /**
2288
+ * List of tool names that were executed server-side (e.g., MCP tools). Only present when tools were executed on the server rather than returned for client-side execution.
2289
+ */
2290
+ tools_executed: S.optionalWith<S.Array$<typeof S.String>, {
2291
+ nullable: true;
2292
+ }>;
2293
+ /**
2294
+ * Information about MCP server failures, if any occurred during the request. Contains details about which servers failed and why, along with recommendations for the user. Only present when MCP server failures occurred.
2295
+ */
2296
+ mcp_server_errors: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
2297
+ nullable: true;
2298
+ }>;
2299
+ }, S.Struct.Encoded<{
2300
+ /**
2301
+ * A unique identifier for the chat completion.
2302
+ */
2303
+ id: typeof S.String;
2304
+ /**
2305
+ * A list of chat completion choices. Can be more than one if `n` is greater than 1.
2306
+ */
2307
+ choices: S.Array$<typeof Choice>;
2308
+ /**
2309
+ * The Unix timestamp (in seconds) of when the chat completion was created.
2310
+ */
2311
+ created: typeof S.Int;
2312
+ /**
2313
+ * The model used for the chat completion.
2314
+ */
2315
+ model: typeof S.String;
2316
+ /**
2317
+ * Specifies the processing type used for serving the request.
2318
+ * - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'.
2319
+ * - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model.
2320
+ * - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier.
2321
+ * - When not set, the default behavior is 'auto'.
2322
+ *
2323
+ * When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter.
2324
+ */
2325
+ service_tier: S.optionalWith<typeof ChatCompletionServiceTierEnum2, {
2326
+ nullable: true;
2327
+ }>;
2328
+ /**
2329
+ * This fingerprint represents the backend configuration that the model runs with.
2330
+ *
2331
+ * Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
2332
+ */
2333
+ system_fingerprint: S.optionalWith<typeof S.String, {
2334
+ nullable: true;
2335
+ }>;
2336
+ /**
2337
+ * The object type, which is always `chat.completion`.
2338
+ */
2339
+ object: S.Literal<["chat.completion"]>;
2340
+ /**
2341
+ * Usage statistics for the completion request.
2342
+ */
2343
+ usage: S.optionalWith<typeof CompletionUsage2, {
2344
+ nullable: true;
2345
+ }>;
2346
+ /**
2347
+ * List of tool names that were executed server-side (e.g., MCP tools). Only present when tools were executed on the server rather than returned for client-side execution.
2348
+ */
2349
+ tools_executed: S.optionalWith<S.Array$<typeof S.String>, {
2350
+ nullable: true;
2351
+ }>;
2352
+ /**
2353
+ * Information about MCP server failures, if any occurred during the request. Contains details about which servers failed and why, along with recommendations for the user. Only present when MCP server failures occurred.
2354
+ */
2355
+ mcp_server_errors: S.optionalWith<S.Record$<typeof S.String, typeof S.Unknown>, {
2356
+ nullable: true;
2357
+ }>;
2358
+ }>, never, {
2359
+ readonly object: "chat.completion";
2360
+ } & {
2361
+ readonly service_tier?: "default" | "auto" | "flex" | "scale" | "priority" | undefined;
2362
+ } & {
2363
+ readonly model: string;
2364
+ } & {
2365
+ readonly id: string;
2366
+ } & {
2367
+ readonly created: number;
2368
+ } & {
2369
+ readonly choices: readonly Choice[];
2370
+ } & {
2371
+ readonly system_fingerprint?: string | undefined;
2372
+ } & {
2373
+ readonly usage?: CompletionUsage2 | undefined;
2374
+ } & {
2375
+ readonly tools_executed?: readonly string[] | undefined;
2376
+ } & {
2377
+ readonly mcp_server_errors?: {
2378
+ readonly [x: string]: unknown;
2379
+ } | undefined;
2380
+ }, {}, {}>;
2381
+ /**
2382
+ * Chat completion response for Dedalus API.
2383
+ *
2384
+ * OpenAI-compatible chat completion response with Dedalus extensions.
2385
+ * Maintains full compatibility with OpenAI API while providing additional
2386
+ * features like server-side tool execution tracking and MCP error reporting.
2387
+ */
2388
+ declare class ChatCompletion2 extends ChatCompletion_base {}
2389
+ declare const ValidationError_base: S.Class<ValidationError, {
2390
+ loc: S.Array$<S.Union<[typeof S.String, typeof S.Int]>>;
2391
+ msg: typeof S.String;
2392
+ type: typeof S.String;
2393
+ }, S.Struct.Encoded<{
2394
+ loc: S.Array$<S.Union<[typeof S.String, typeof S.Int]>>;
2395
+ msg: typeof S.String;
2396
+ type: typeof S.String;
2397
+ }>, never, {
2398
+ readonly type: string;
2399
+ } & {
2400
+ readonly msg: string;
2401
+ } & {
2402
+ readonly loc: readonly (string | number)[];
2403
+ }, {}, {}>;
2404
+ declare class ValidationError extends ValidationError_base {}
2405
+ declare const HTTPValidationError_base: S.Class<HTTPValidationError, {
2406
+ detail: S.optionalWith<S.Array$<typeof ValidationError>, {
2407
+ nullable: true;
2408
+ }>;
2409
+ }, S.Struct.Encoded<{
2410
+ detail: S.optionalWith<S.Array$<typeof ValidationError>, {
2411
+ nullable: true;
2412
+ }>;
2413
+ }>, never, {
2414
+ readonly detail?: readonly ValidationError[] | undefined;
2415
+ }, {}, {}>;
2416
+ declare class HTTPValidationError extends HTTPValidationError_base {}
2417
+ declare const make: (httpClient: HttpClient.HttpClient, options?: {
2418
+ readonly transformClient?: ((client: HttpClient.HttpClient) => Effect.Effect<HttpClient.HttpClient>) | undefined;
2419
+ }) => Client2;
2420
+ interface Client2 {
2421
+ readonly httpClient: HttpClient.HttpClient;
2422
+ /**
2423
+ * Generate a model response. Supports streaming, tools, and MCP servers.
2424
+ */
2425
+ readonly createChatCompletionV1ChatCompletionsPost: (options: typeof ChatCompletionRequest2.Encoded) => Effect.Effect<typeof ChatCompletion2.Type, HttpClientError.HttpClientError | ParseError | AiError.MalformedInput>;
2426
+ /**
2427
+ * Create embeddings using the configured provider.
2428
+ */
2429
+ readonly createEmbeddingsV1EmbeddingsPost: (options: typeof CreateEmbeddingRequest2.Encoded) => Effect.Effect<typeof CreateEmbeddingResponse2.Type, HttpClientError.HttpClientError | ParseError | AiError.MalformedInput>;
2430
+ }
2431
+ declare const CreateEmbeddingRequestModelEnum_base: S.Literal<["openai/text-embedding-ada-002", "openai/text-embedding-3-small", "openai/text-embedding-3-large", "google/text-embedding-004"]>;
2432
+ declare class CreateEmbeddingRequestModelEnum2 extends CreateEmbeddingRequestModelEnum_base {}
2433
+ declare const CreateEmbeddingRequestEncodingFormat_base: S.Literal<["float", "base64"]>;
2434
+ /**
2435
+ * The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).
2436
+ */
2437
+ declare class CreateEmbeddingRequestEncodingFormat extends CreateEmbeddingRequestEncodingFormat_base {}
2438
+ declare const CreateEmbeddingRequest_base: S.Class<CreateEmbeddingRequest2, {
2439
+ /**
2440
+ * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for all embedding models), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. In addition to the per-input token limit, all embedding models enforce a maximum of 300,000 tokens summed across all inputs in a single request.
2441
+ */
2442
+ input: S.Union<[typeof S.String, S.filter<S.filter<S.NonEmptyArray<typeof S.String>>>, S.filter<S.filter<S.NonEmptyArray<typeof S.Int>>>, S.filter<S.filter<S.NonEmptyArray<S.filter<S.NonEmptyArray<typeof S.Int>>>>>]>;
2443
+ /**
2444
+ * ID of the model to use. See our [Model Providers](/sdk/guides/providers) for available embedding models.
2445
+ */
2446
+ model: S.Union<[typeof S.String, typeof CreateEmbeddingRequestModelEnum2]>;
2447
+ /**
2448
+ * The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).
2449
+ */
2450
+ encoding_format: S.optionalWith<typeof CreateEmbeddingRequestEncodingFormat, {
2451
+ nullable: true;
2452
+ default: () => "float";
2453
+ }>;
2454
+ /**
2455
+ * The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.
2456
+ */
2457
+ dimensions: S.optionalWith<S.filter<typeof S.Int>, {
2458
+ nullable: true;
2459
+ }>;
2460
+ /**
2461
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
2462
+ */
2463
+ user: S.optionalWith<typeof S.String, {
2464
+ nullable: true;
2465
+ }>;
2466
+ }, S.Struct.Encoded<{
2467
+ /**
2468
+ * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for all embedding models), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. In addition to the per-input token limit, all embedding models enforce a maximum of 300,000 tokens summed across all inputs in a single request.
2469
+ */
2470
+ input: S.Union<[typeof S.String, S.filter<S.filter<S.NonEmptyArray<typeof S.String>>>, S.filter<S.filter<S.NonEmptyArray<typeof S.Int>>>, S.filter<S.filter<S.NonEmptyArray<S.filter<S.NonEmptyArray<typeof S.Int>>>>>]>;
2471
+ /**
2472
+ * ID of the model to use. See our [Model Providers](/sdk/guides/providers) for available embedding models.
2473
+ */
2474
+ model: S.Union<[typeof S.String, typeof CreateEmbeddingRequestModelEnum2]>;
2475
+ /**
2476
+ * The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).
2477
+ */
2478
+ encoding_format: S.optionalWith<typeof CreateEmbeddingRequestEncodingFormat, {
2479
+ nullable: true;
2480
+ default: () => "float";
2481
+ }>;
2482
+ /**
2483
+ * The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.
2484
+ */
2485
+ dimensions: S.optionalWith<S.filter<typeof S.Int>, {
2486
+ nullable: true;
2487
+ }>;
2488
+ /**
2489
+ * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
2490
+ */
2491
+ user: S.optionalWith<typeof S.String, {
2492
+ nullable: true;
2493
+ }>;
2494
+ }>, never, {
2495
+ readonly user?: string | undefined;
2496
+ } & {
2497
+ readonly model: string;
2498
+ } & {
2499
+ readonly input: string | readonly [string, ...string[]] | readonly [number, ...number[]] | readonly [readonly [number, ...number[]], ...(readonly [number, ...number[]])[]];
2500
+ } & {
2501
+ readonly encoding_format?: "float" | "base64" | undefined;
2502
+ } & {
2503
+ readonly dimensions?: number | undefined;
2504
+ }, {}, {}>;
2505
+ /**
2506
+ * Fields:
2507
+ * - input (required): str | Annotated[list[str], MinLen(1), MaxLen(2048)] | Annotated[list[int], MinLen(1), MaxLen(2048)] | Annotated[list[Annotated[list[int], MinLen(1)]], MinLen(1), MaxLen(2048)]
2508
+ * - model (required): str | Literal['openai/text-embedding-ada-002', 'openai/text-embedding-3-small', 'openai/text-embedding-3-large', 'google/text-embedding-004']
2509
+ * - encoding_format (optional): Literal['float', 'base64']
2510
+ * - dimensions (optional): int
2511
+ * - user (optional): str
2512
+ */
2513
+ declare class CreateEmbeddingRequest2 extends CreateEmbeddingRequest_base {}
2514
+ declare const Embedding_base: S.Class<Embedding, {
2515
+ /**
2516
+ * Object type, always 'embedding'
2517
+ */
2518
+ object: S.optionalWith<S.Literal<["embedding"]>, {
2519
+ nullable: true;
2520
+ default: () => "embedding";
2521
+ }>;
2522
+ /**
2523
+ * The embedding vector (float array or base64 string)
2524
+ */
2525
+ embedding: S.Union<[S.Array$<typeof S.Number>, typeof S.String]>;
2526
+ /**
2527
+ * Index of the embedding in the list
2528
+ */
2529
+ index: typeof S.Int;
2530
+ }, S.Struct.Encoded<{
2531
+ /**
2532
+ * Object type, always 'embedding'
2533
+ */
2534
+ object: S.optionalWith<S.Literal<["embedding"]>, {
2535
+ nullable: true;
2536
+ default: () => "embedding";
2537
+ }>;
2538
+ /**
2539
+ * The embedding vector (float array or base64 string)
2540
+ */
2541
+ embedding: S.Union<[S.Array$<typeof S.Number>, typeof S.String]>;
2542
+ /**
2543
+ * Index of the embedding in the list
2544
+ */
2545
+ index: typeof S.Int;
2546
+ }>, never, {
2547
+ readonly object?: "embedding" | undefined;
2548
+ } & {
2549
+ readonly index: number;
2550
+ } & {
2551
+ readonly embedding: string | readonly number[];
2552
+ }, {}, {}>;
2553
+ /**
2554
+ * Single embedding object.
2555
+ */
2556
+ declare class Embedding extends Embedding_base {}
2557
+ declare const CreateEmbeddingResponse_base: S.Class<CreateEmbeddingResponse2, {
2558
+ /**
2559
+ * Object type, always 'list'
2560
+ */
2561
+ object: S.optionalWith<S.Literal<["list"]>, {
2562
+ nullable: true;
2563
+ default: () => "list";
2564
+ }>;
2565
+ /**
2566
+ * List of embedding objects
2567
+ */
2568
+ data: S.Array$<typeof Embedding>;
2569
+ /**
2570
+ * The model used for embeddings
2571
+ */
2572
+ model: typeof S.String;
2573
+ /**
2574
+ * Usage statistics (prompt_tokens, total_tokens)
2575
+ */
2576
+ usage: S.Record$<typeof S.String, typeof S.Unknown>;
2577
+ }, S.Struct.Encoded<{
2578
+ /**
2579
+ * Object type, always 'list'
2580
+ */
2581
+ object: S.optionalWith<S.Literal<["list"]>, {
2582
+ nullable: true;
2583
+ default: () => "list";
2584
+ }>;
2585
+ /**
2586
+ * List of embedding objects
2587
+ */
2588
+ data: S.Array$<typeof Embedding>;
2589
+ /**
2590
+ * The model used for embeddings
2591
+ */
2592
+ model: typeof S.String;
2593
+ /**
2594
+ * Usage statistics (prompt_tokens, total_tokens)
2595
+ */
2596
+ usage: S.Record$<typeof S.String, typeof S.Unknown>;
2597
+ }>, never, {
2598
+ readonly object?: "list" | undefined;
2599
+ } & {
2600
+ readonly model: string;
2601
+ } & {
2602
+ readonly data: readonly Embedding[];
2603
+ } & {
2604
+ readonly usage: {
2605
+ readonly [x: string]: unknown;
2606
+ };
2607
+ }, {}, {}>;
2608
+ /**
2609
+ * Response from embeddings endpoint.
2610
+ */
2611
+ declare class CreateEmbeddingResponse2 extends CreateEmbeddingResponse_base {}
2612
+ declare namespace exports_DedalusClient {
2613
+ export { make2 as make, layerConfig, layer, StreamChatCompletionRequest, Service2 as Service, DedalusClient, ChatCompletionChunkDelta, ChatCompletionChunkChoice, ChatCompletionChunk };
2614
+ }
2615
+ import * as HttpClient2 from "@effect/platform/HttpClient";
2616
+ import { AiError as AiError2 } from "@luketandjung/ariadne";
2617
+ import * as Config from "effect/Config";
2618
+ import { ConfigError } from "effect/ConfigError";
2619
+ import * as Context from "effect/Context";
2620
+ import * as Effect2 from "effect/Effect";
2621
+ import * as Layer from "effect/Layer";
2622
+ import * as Redacted from "effect/Redacted";
2623
+ import * as Schema from "effect/Schema";
2624
+ import * as Scope from "effect/Scope";
2625
+ import * as Stream from "effect/Stream";
2626
+ declare const DedalusClient_base: Context.TagClass<DedalusClient, "@dedalus-labs/DedalusClient", Service2>;
2627
+ /**
2628
+ * @since 1.0.0
2629
+ * @category Context
2630
+ */
2631
+ declare class DedalusClient extends DedalusClient_base {}
2632
+ /**
2633
+ * @since 1.0.0
2634
+ * @category Models
2635
+ */
2636
+ interface Service2 {
2637
+ readonly client: exports_Generated.Client;
2638
+ readonly createChatCompletion: (options: typeof exports_Generated.ChatCompletionRequest.Encoded) => Effect2.Effect2<exports_Generated.ChatCompletion, AiError2.AiError2>;
2639
+ readonly createChatCompletionStream: (options: Omit<typeof exports_Generated.ChatCompletionRequest.Encoded, "stream">) => Stream.Stream<ChatCompletionChunk, AiError2.AiError2>;
2640
+ readonly createEmbedding: (options: typeof exports_Generated.CreateEmbeddingRequest.Encoded) => Effect2.Effect2<exports_Generated.CreateEmbeddingResponse, AiError2.AiError2>;
2641
+ }
2642
+ /**
2643
+ * @since 1.0.0
2644
+ * @category Models
2645
+ */
2646
+ type StreamChatCompletionRequest = Omit<typeof exports_Generated.ChatCompletionRequest.Encoded, "stream">;
2647
+ /**
2648
+ * @since 1.0.0
2649
+ * @category Constructors
2650
+ */
2651
+ declare const make2: (options: {
2652
+ /**
2653
+ * Standard OAuth-style API key to use to communicate with the Dedalus API.
2654
+ */
2655
+ readonly apiKey?: Redacted.Redacted | undefined;
2656
+ /**
2657
+ * An alternative API gateway/proxy style API key to use to communicate with the Dedalus API.
2658
+ */
2659
+ readonly xApiKey?: Redacted.Redacted | undefined;
2660
+ /**
2661
+ * The model provider. Only for users with access to and using BYOK API key.
2662
+ */
2663
+ readonly provider?: string | undefined;
2664
+ /**
2665
+ * The model provider key. Only for users with access to and using BYOK API key.
2666
+ */
2667
+ readonly providerKey?: Redacted.Redacted | undefined;
2668
+ /**
2669
+ * The environment to use. Determines the base URL if `apiUrl` is not provided.
2670
+ * - `"production"` uses https://api.dedaluslabs.ai/v1
2671
+ * - `"development"` uses http://localhost:8080/v1
2672
+ * Defaults to `"production"`.
2673
+ */
2674
+ readonly environment?: "production" | "development" | undefined;
2675
+ /**
2676
+ * The URL to use to communicate with the Dedalus API.
2677
+ * Overrides the `environment` setting if provided.
2678
+ */
2679
+ readonly apiUrl?: string | undefined;
2680
+ /**
2681
+ * A method which can be used to transform the underlying `HttpClient` which
2682
+ * will be used to communicate with the Dedalus API.
2683
+ */
2684
+ readonly transformClient?: ((client: HttpClient2.HttpClient2) => HttpClient2.HttpClient2) | undefined;
2685
+ }) => Effect2.Effect2<Service2, never, HttpClient2.HttpClient2 | Scope.Scope>;
2686
+ /**
2687
+ * @since 1.0.0
2688
+ * @category Layers
2689
+ */
2690
+ declare const layer: (options: {
2691
+ readonly apiKey?: Redacted.Redacted | undefined;
2692
+ readonly xApiKey?: Redacted.Redacted | undefined;
2693
+ readonly provider?: string | undefined;
2694
+ readonly providerKey?: Redacted.Redacted | undefined;
2695
+ readonly environment?: "production" | "development" | undefined;
2696
+ readonly apiUrl?: string | undefined;
2697
+ readonly transformClient?: (client: HttpClient2.HttpClient2) => HttpClient2.HttpClient2;
2698
+ }) => Layer.Layer<DedalusClient, never, HttpClient2.HttpClient2>;
2699
+ /**
2700
+ * @since 1.0.0
2701
+ * @category Layers
2702
+ */
2703
+ declare const layerConfig: (options: {
2704
+ readonly apiKey?: Config.Config<Redacted.Redacted | undefined> | undefined;
2705
+ readonly xApiKey?: Config.Config<Redacted.Redacted | undefined> | undefined;
2706
+ readonly provider?: Config.Config<string | undefined> | undefined;
2707
+ readonly providerKey?: Config.Config<Redacted.Redacted | undefined> | undefined;
2708
+ readonly environment?: Config.Config<"production" | "development" | undefined> | undefined;
2709
+ readonly apiUrl?: Config.Config<string | undefined> | undefined;
2710
+ readonly transformClient?: (client: HttpClient2.HttpClient2) => HttpClient2.HttpClient2;
2711
+ }) => Layer.Layer<DedalusClient, ConfigError, HttpClient2.HttpClient2>;
2712
+ declare const ChatCompletionChunkDelta_base: Schema.Class<ChatCompletionChunkDelta, {
2713
+ /**
2714
+ * The role of the author of this message delta.
2715
+ */
2716
+ role: Schema.optional<Schema.Literal<["developer", "system", "user", "assistant", "tool"]>>;
2717
+ /**
2718
+ * The content delta.
2719
+ */
2720
+ content: Schema.optional<Schema.NullOr<typeof Schema.String>>;
2721
+ /**
2722
+ * The refusal message delta.
2723
+ */
2724
+ refusal: Schema.optional<Schema.NullOr<typeof Schema.String>>;
2725
+ /**
2726
+ * Tool calls delta.
2727
+ */
2728
+ tool_calls: Schema.optional<Schema.Array$<Schema.Struct<{
2729
+ index: typeof Schema.Int;
2730
+ id: Schema.optional<typeof Schema.String>;
2731
+ type: Schema.optional<Schema.Literal<["function"]>>;
2732
+ function: Schema.optional<Schema.Struct<{
2733
+ name: Schema.optional<typeof Schema.String>;
2734
+ arguments: Schema.optional<typeof Schema.String>;
2735
+ }>>;
2736
+ }>>>;
2737
+ }, Schema.Struct.Encoded<{
2738
+ /**
2739
+ * The role of the author of this message delta.
2740
+ */
2741
+ role: Schema.optional<Schema.Literal<["developer", "system", "user", "assistant", "tool"]>>;
2742
+ /**
2743
+ * The content delta.
2744
+ */
2745
+ content: Schema.optional<Schema.NullOr<typeof Schema.String>>;
2746
+ /**
2747
+ * The refusal message delta.
2748
+ */
2749
+ refusal: Schema.optional<Schema.NullOr<typeof Schema.String>>;
2750
+ /**
2751
+ * Tool calls delta.
2752
+ */
2753
+ tool_calls: Schema.optional<Schema.Array$<Schema.Struct<{
2754
+ index: typeof Schema.Int;
2755
+ id: Schema.optional<typeof Schema.String>;
2756
+ type: Schema.optional<Schema.Literal<["function"]>>;
2757
+ function: Schema.optional<Schema.Struct<{
2758
+ name: Schema.optional<typeof Schema.String>;
2759
+ arguments: Schema.optional<typeof Schema.String>;
2760
+ }>>;
2761
+ }>>>;
2762
+ }>, never, {
2763
+ readonly tool_calls?: readonly {
2764
+ readonly function?: {
2765
+ readonly name?: string | undefined;
2766
+ readonly arguments?: string | undefined;
2767
+ } | undefined;
2768
+ readonly type?: "function" | undefined;
2769
+ readonly id?: string | undefined;
2770
+ readonly index: number;
2771
+ }[] | undefined;
2772
+ } & {
2773
+ readonly content?: string | null | undefined;
2774
+ } & {
2775
+ readonly refusal?: string | null | undefined;
2776
+ } & {
2777
+ readonly role?: "user" | "system" | "assistant" | "developer" | "tool" | undefined;
2778
+ }, {}, {}>;
2779
+ /**
2780
+ * A delta message in a streaming chat completion chunk.
2781
+ *
2782
+ * @since 1.0.0
2783
+ * @category Schemas
2784
+ */
2785
+ declare class ChatCompletionChunkDelta extends ChatCompletionChunkDelta_base {}
2786
+ declare const ChatCompletionChunkChoice_base: Schema.Class<ChatCompletionChunkChoice, {
2787
+ /**
2788
+ * The index of this choice.
2789
+ */
2790
+ index: typeof Schema.Int;
2791
+ /**
2792
+ * The delta content.
2793
+ */
2794
+ delta: typeof ChatCompletionChunkDelta;
2795
+ /**
2796
+ * Log probability information.
2797
+ */
2798
+ logprobs: Schema.optional<Schema.NullOr<typeof exports_Generated.ChoiceLogprobs>>;
2799
+ /**
2800
+ * The reason the model stopped generating tokens.
2801
+ */
2802
+ finish_reason: Schema.NullOr<typeof exports_Generated.ChoiceFinishReasonEnum>;
2803
+ }, Schema.Struct.Encoded<{
2804
+ /**
2805
+ * The index of this choice.
2806
+ */
2807
+ index: typeof Schema.Int;
2808
+ /**
2809
+ * The delta content.
2810
+ */
2811
+ delta: typeof ChatCompletionChunkDelta;
2812
+ /**
2813
+ * Log probability information.
2814
+ */
2815
+ logprobs: Schema.optional<Schema.NullOr<typeof exports_Generated.ChoiceLogprobs>>;
2816
+ /**
2817
+ * The reason the model stopped generating tokens.
2818
+ */
2819
+ finish_reason: Schema.NullOr<typeof exports_Generated.ChoiceFinishReasonEnum>;
2820
+ }>, never, {
2821
+ readonly logprobs?: exports_Generated.ChoiceLogprobs | null | undefined;
2822
+ } & {
2823
+ readonly index: number;
2824
+ } & {
2825
+ readonly finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null;
2826
+ } & {
2827
+ readonly delta: ChatCompletionChunkDelta;
2828
+ }, {}, {}>;
2829
+ /**
2830
+ * A choice in a streaming chat completion chunk.
2831
+ *
2832
+ * @since 1.0.0
2833
+ * @category Schemas
2834
+ */
2835
+ declare class ChatCompletionChunkChoice extends ChatCompletionChunkChoice_base {}
2836
+ declare const ChatCompletionChunk_base: Schema.Class<ChatCompletionChunk, {
2837
+ /**
2838
+ * A unique identifier for the chat completion chunk.
2839
+ */
2840
+ id: typeof Schema.String;
2841
+ /**
2842
+ * The object type, which is always `chat.completion.chunk`.
2843
+ */
2844
+ object: Schema.Literal<["chat.completion.chunk"]>;
2845
+ /**
2846
+ * The Unix timestamp (in seconds) of when the chunk was created.
2847
+ */
2848
+ created: typeof Schema.Int;
2849
+ /**
2850
+ * The model used for the chat completion.
2851
+ */
2852
+ model: typeof Schema.String;
2853
+ /**
2854
+ * This fingerprint represents the backend configuration that the model runs with.
2855
+ */
2856
+ system_fingerprint: Schema.optional<Schema.NullOr<typeof Schema.String>>;
2857
+ /**
2858
+ * A list of chat completion choices.
2859
+ */
2860
+ choices: Schema.Array$<typeof ChatCompletionChunkChoice>;
2861
+ /**
2862
+ * Usage statistics (only present in final chunk if requested).
2863
+ */
2864
+ usage: Schema.optional<Schema.NullOr<typeof exports_Generated.CompletionUsage>>;
2865
+ /**
2866
+ * Service tier used for processing the request.
2867
+ */
2868
+ service_tier: Schema.optional<Schema.NullOr<typeof exports_Generated.ChatCompletionServiceTierEnum>>;
2869
+ }, Schema.Struct.Encoded<{
2870
+ /**
2871
+ * A unique identifier for the chat completion chunk.
2872
+ */
2873
+ id: typeof Schema.String;
2874
+ /**
2875
+ * The object type, which is always `chat.completion.chunk`.
2876
+ */
2877
+ object: Schema.Literal<["chat.completion.chunk"]>;
2878
+ /**
2879
+ * The Unix timestamp (in seconds) of when the chunk was created.
2880
+ */
2881
+ created: typeof Schema.Int;
2882
+ /**
2883
+ * The model used for the chat completion.
2884
+ */
2885
+ model: typeof Schema.String;
2886
+ /**
2887
+ * This fingerprint represents the backend configuration that the model runs with.
2888
+ */
2889
+ system_fingerprint: Schema.optional<Schema.NullOr<typeof Schema.String>>;
2890
+ /**
2891
+ * A list of chat completion choices.
2892
+ */
2893
+ choices: Schema.Array$<typeof ChatCompletionChunkChoice>;
2894
+ /**
2895
+ * Usage statistics (only present in final chunk if requested).
2896
+ */
2897
+ usage: Schema.optional<Schema.NullOr<typeof exports_Generated.CompletionUsage>>;
2898
+ /**
2899
+ * Service tier used for processing the request.
2900
+ */
2901
+ service_tier: Schema.optional<Schema.NullOr<typeof exports_Generated.ChatCompletionServiceTierEnum>>;
2902
+ }>, never, {
2903
+ readonly object: "chat.completion.chunk";
2904
+ } & {
2905
+ readonly service_tier?: "default" | "auto" | "flex" | "scale" | "priority" | null | undefined;
2906
+ } & {
2907
+ readonly model: string;
2908
+ } & {
2909
+ readonly id: string;
2910
+ } & {
2911
+ readonly created: number;
2912
+ } & {
2913
+ readonly choices: readonly ChatCompletionChunkChoice[];
2914
+ } & {
2915
+ readonly system_fingerprint?: string | null | undefined;
2916
+ } & {
2917
+ readonly usage?: exports_Generated.CompletionUsage | null | undefined;
2918
+ }, {}, {}>;
2919
+ /**
2920
+ * A streaming chat completion chunk (OpenAI-compatible).
2921
+ *
2922
+ * @since 1.0.0
2923
+ * @category Schemas
2924
+ */
2925
+ declare class ChatCompletionChunk extends ChatCompletionChunk_base {}
2926
+ declare namespace exports_DedalusConfig {
2927
+ export { withClientTransform, DedalusConfig };
2928
+ }
2929
+ import { HttpClient as HttpClient3 } from "@effect/platform/HttpClient";
2930
+ import * as Context2 from "effect/Context";
2931
+ import * as Effect3 from "effect/Effect";
2932
+ declare const DedalusConfig_base: Context2.TagClass<DedalusConfig, "@dedalus-labs/DedalusConfig", DedalusConfig.Service>;
2933
+ /**
2934
+ * @since 1.0.0
2935
+ * @category Context
2936
+ */
2937
+ declare class DedalusConfig extends DedalusConfig_base {
2938
+ /**
2939
+ * @since 1.0.0
2940
+ */
2941
+ static readonly getOrUndefined: Effect3.Effect3<typeof DedalusConfig.Service | undefined>;
2942
+ }
2943
+ /**
2944
+ * @since 1.0.0
2945
+ */
2946
+ declare namespace DedalusConfig {
2947
+ /**
2948
+ * @since 1.0.
2949
+ * @category Models
2950
+ */
2951
+ interface Service {
2952
+ readonly transformClient?: (client: HttpClient3) => HttpClient3;
2953
+ }
2954
+ }
2955
+ /**
2956
+ * @since 1.0.0
2957
+ * @category Configuration
2958
+ */
2959
+ declare const withClientTransform: {
2960
+ (transform: (client: HttpClient3) => HttpClient3): <
2961
+ A,
2962
+ E,
2963
+ R
2964
+ >(self: Effect3.Effect3<A, E, R>) => Effect3.Effect3<A, E, R>;
2965
+ <
2966
+ A,
2967
+ E,
2968
+ R
2969
+ >(self: Effect3.Effect3<A, E, R>, transform: (client: HttpClient3) => HttpClient3): Effect3.Effect3<A, E, R>;
2970
+ };
2971
+ declare namespace exports_DedalusEmbeddingModel {
2972
+ export { withConfigOverride, model, makeDataLoader, layerDataLoader, layerBatched, Model, Config2 as Config };
2973
+ }
2974
+ import { Model as AiModel, EmbeddingModel } from "@luketandjung/ariadne";
2975
+ import * as Context3 from "effect/Context";
2976
+ import * as Effect4 from "effect/Effect";
2977
+ import * as Layer2 from "effect/Layer";
2978
+ import { Simplify } from "effect/Types";
2979
+ /**
2980
+ * @since 1.0.0
2981
+ * @category Models
2982
+ */
2983
+ type Model = typeof exports_Generated.CreateEmbeddingRequestModelEnum.Encoded;
2984
+ declare const Config_base: Context3.TagClass<Config2, "@dedalus-labs/DedalusEmbeddingModel/Config", Config2.Service>;
2985
+ /**
2986
+ * @since 1.0.0
2987
+ * @category Context
2988
+ */
2989
+ declare class Config2 extends Config_base {
2990
+ /**
2991
+ * @since 1.0.0
2992
+ */
2993
+ static readonly getOrUndefined: Effect4.Effect4<Config2.Service | undefined>;
2994
+ }
2995
+ /**
2996
+ * @since 1.0.0
2997
+ * @category Models
2998
+ */
2999
+ declare const model: (model: (string & {}) | Model, { mode,...config }: Simplify<({
3000
+ readonly mode: "batched";
3001
+ } & Config2.Batched) | ({
3002
+ readonly mode: "data-loader";
3003
+ } & Config2.DataLoader)>) => AiModel.Model<"dedalus-labs", EmbeddingModel.EmbeddingModel, DedalusClient>;
3004
+ /**
3005
+ * @since 1.0.0
3006
+ * @category Constructors
3007
+ */
3008
+ declare const makeDataLoader: (options: {
3009
+ readonly model: (string & {}) | Model;
3010
+ readonly config: Config2.DataLoader;
3011
+ }) => Effect4.Effect4<any, unknown, unknown>;
3012
+ /**
3013
+ * @since 1.0.0
3014
+ * @category Layers
3015
+ */
3016
+ declare const layerBatched: (options: {
3017
+ readonly model: (string & {}) | Model;
3018
+ readonly config?: Config2.Batched;
3019
+ }) => Layer2.Layer2<EmbeddingModel.EmbeddingModel, never, DedalusClient>;
3020
+ /**
3021
+ * @since 1.0.0
3022
+ * @category Layers
3023
+ */
3024
+ declare const layerDataLoader: (options: {
3025
+ readonly model: (string & {}) | Model;
3026
+ readonly config: Config2.DataLoader;
3027
+ }) => Layer2.Layer2<EmbeddingModel.EmbeddingModel, never, DedalusClient>;
3028
+ /**
3029
+ * @since 1.0.0
3030
+ * @category Configuration
3031
+ */
3032
+ declare const withConfigOverride: {
3033
+ (config: Config2.Service): <
3034
+ A,
3035
+ E,
3036
+ R
3037
+ >(self: Effect4.Effect4<A, E, R>) => Effect4.Effect4<A, E, R>;
3038
+ <
3039
+ A,
3040
+ E,
3041
+ R
3042
+ >(self: Effect4.Effect4<A, E, R>, config: Config2.Service): Effect4.Effect4<A, E, R>;
3043
+ };
3044
+ declare namespace exports_DedalusLanguageModel {
3045
+ export { withConfigOverride2 as withConfigOverride, model2 as model, make3 as make, layer2 as layer, Model2 as Model, Config3 as Config };
3046
+ }
3047
+ import { Model as AiModel2, LanguageModel } from "@luketandjung/ariadne";
3048
+ import * as Context4 from "effect/Context";
3049
+ import * as Effect5 from "effect/Effect";
3050
+ import * as Layer3 from "effect/Layer";
3051
+ /**
3052
+ * @since 1.0.0
3053
+ * @category Models
3054
+ */
3055
+ type Model2 = typeof exports_Generated.DedalusModelChoice.Encoded | typeof exports_Generated.Models.Encoded;
3056
+ declare const Config_base2: Context4.TagClass<Config3, "@dedalus-labs/DedalusLanguageModel/Config", Config3.Service>;
3057
+ /**
3058
+ * @since 1.0.0
3059
+ * @category Context
3060
+ */
3061
+ declare class Config3 extends Config_base2 {
3062
+ /**
3063
+ * @since 1.0.0
3064
+ */
3065
+ static readonly getOrUndefined: Effect5.Effect5<Config3.Service | undefined>;
3066
+ }
3067
+ /**
3068
+ * @since 1.0.0
3069
+ * @category Ai Models
3070
+ */
3071
+ declare const model2: (model2: (string & {}) | Model2, config?: Omit<Config3.Service, "model">) => AiModel2.Model2<"dedalus-labs", LanguageModel.LanguageModel, DedalusClient>;
3072
+ /**
3073
+ * @since 1.0.0
3074
+ * @category Constructors
3075
+ */
3076
+ declare const make3: (options: {
3077
+ readonly model2: (string & {}) | Model2;
3078
+ readonly config?: Omit<Config3.Service, "model">;
3079
+ }) => Effect5.Effect5<any, unknown, unknown>;
3080
+ /**
3081
+ * @since 1.0.0
3082
+ * @category Layers
3083
+ */
3084
+ declare const layer2: (options: {
3085
+ readonly model2: (string & {}) | Model2;
3086
+ readonly config?: Omit<Config3.Service, "model">;
3087
+ }) => Layer3.Layer3<LanguageModel.LanguageModel, never, DedalusClient>;
3088
+ /**
3089
+ * @since 1.0.0
3090
+ * @category Configuration
3091
+ */
3092
+ declare const withConfigOverride2: {
3093
+ (overrides: Config3.Service): <
3094
+ A,
3095
+ E,
3096
+ R
3097
+ >(self: Effect5.Effect5<A, E, R>) => Effect5.Effect5<A, E, R>;
3098
+ <
3099
+ A,
3100
+ E,
3101
+ R
3102
+ >(self: Effect5.Effect5<A, E, R>, overrides: Config3.Service): Effect5.Effect5<A, E, R>;
3103
+ };
3104
+ export { exports_Generated as Generated, exports_DedalusLanguageModel as DedalusLanguageModel, exports_DedalusEmbeddingModel as DedalusEmbeddingModel, exports_DedalusConfig as DedalusConfig, exports_DedalusClient as DedalusClient };