openrouter 0.0.21__py3-none-any.whl → 0.0.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from ._schema0 import Schema0, Schema0TypedDict
4
5
  from .chatstreamoptions import ChatStreamOptions, ChatStreamOptionsTypedDict
5
6
  from .message import Message, MessageTypedDict
6
7
  from .reasoningsummaryverbosity import ReasoningSummaryVerbosity
@@ -29,6 +30,347 @@ from typing import Any, Dict, List, Literal, Optional, Union
29
30
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
30
31
 
31
32
 
33
+ ChatGenerationParamsDataCollection = Union[
34
+ Literal[
35
+ "deny",
36
+ "allow",
37
+ ],
38
+ UnrecognizedStr,
39
+ ]
40
+
41
+
42
+ Quantizations = Union[
43
+ Literal[
44
+ "int4",
45
+ "int8",
46
+ "fp4",
47
+ "fp6",
48
+ "fp8",
49
+ "fp16",
50
+ "bf16",
51
+ "fp32",
52
+ "unknown",
53
+ ],
54
+ UnrecognizedStr,
55
+ ]
56
+
57
+
58
+ Sort = Union[
59
+ Literal[
60
+ "price",
61
+ "throughput",
62
+ "latency",
63
+ ],
64
+ UnrecognizedStr,
65
+ ]
66
+
67
+
68
+ class ChatGenerationParamsMaxPriceTypedDict(TypedDict):
69
+ r"""The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion."""
70
+
71
+ prompt: NotRequired[Any]
72
+ completion: NotRequired[Any]
73
+ image: NotRequired[Any]
74
+ audio: NotRequired[Any]
75
+ request: NotRequired[Any]
76
+
77
+
78
+ class ChatGenerationParamsMaxPrice(BaseModel):
79
+ r"""The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion."""
80
+
81
+ prompt: Optional[Any] = None
82
+
83
+ completion: Optional[Any] = None
84
+
85
+ image: Optional[Any] = None
86
+
87
+ audio: Optional[Any] = None
88
+
89
+ request: Optional[Any] = None
90
+
91
+
92
+ class ChatGenerationParamsProviderTypedDict(TypedDict):
93
+ allow_fallbacks: NotRequired[Nullable[bool]]
94
+ r"""Whether to allow backup providers to serve requests
95
+ - true: (default) when the primary provider (or your custom providers in \"order\") is unavailable, use the next best provider.
96
+ - false: use only the primary/custom provider, and return the upstream error if it's unavailable.
97
+
98
+ """
99
+ require_parameters: NotRequired[Nullable[bool]]
100
+ r"""Whether to filter providers to only those that support the parameters you've provided. If this setting is omitted or set to false, then providers will receive only the parameters they support, and ignore the rest."""
101
+ data_collection: NotRequired[Nullable[ChatGenerationParamsDataCollection]]
102
+ r"""Data collection setting. If no available model provider meets the requirement, your request will return an error.
103
+ - allow: (default) allow providers which store user data non-transiently and may train on it
104
+
105
+ - deny: use only providers which do not collect user data.
106
+ """
107
+ zdr: NotRequired[Nullable[bool]]
108
+ enforce_distillable_text: NotRequired[Nullable[bool]]
109
+ order: NotRequired[Nullable[List[Schema0TypedDict]]]
110
+ r"""An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message."""
111
+ only: NotRequired[Nullable[List[Schema0TypedDict]]]
112
+ r"""List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request."""
113
+ ignore: NotRequired[Nullable[List[Schema0TypedDict]]]
114
+ r"""List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request."""
115
+ quantizations: NotRequired[Nullable[List[Quantizations]]]
116
+ r"""A list of quantization levels to filter the provider by."""
117
+ sort: NotRequired[Nullable[Sort]]
118
+ r"""The sorting strategy to use for this request, if \"order\" is not specified. When set, no load balancing is performed."""
119
+ max_price: NotRequired[ChatGenerationParamsMaxPriceTypedDict]
120
+ r"""The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion."""
121
+ min_throughput: NotRequired[Nullable[float]]
122
+ r"""The minimum throughput (in tokens per second) required for this request. Only providers serving the model with at least this throughput will be used."""
123
+ max_latency: NotRequired[Nullable[float]]
124
+ r"""The maximum latency (in seconds) allowed for this request. Only providers serving the model with better than this latency will be used."""
125
+
126
+
127
+ class ChatGenerationParamsProvider(BaseModel):
128
+ allow_fallbacks: OptionalNullable[bool] = UNSET
129
+ r"""Whether to allow backup providers to serve requests
130
+ - true: (default) when the primary provider (or your custom providers in \"order\") is unavailable, use the next best provider.
131
+ - false: use only the primary/custom provider, and return the upstream error if it's unavailable.
132
+
133
+ """
134
+
135
+ require_parameters: OptionalNullable[bool] = UNSET
136
+ r"""Whether to filter providers to only those that support the parameters you've provided. If this setting is omitted or set to false, then providers will receive only the parameters they support, and ignore the rest."""
137
+
138
+ data_collection: Annotated[
139
+ OptionalNullable[ChatGenerationParamsDataCollection],
140
+ PlainValidator(validate_open_enum(False)),
141
+ ] = UNSET
142
+ r"""Data collection setting. If no available model provider meets the requirement, your request will return an error.
143
+ - allow: (default) allow providers which store user data non-transiently and may train on it
144
+
145
+ - deny: use only providers which do not collect user data.
146
+ """
147
+
148
+ zdr: OptionalNullable[bool] = UNSET
149
+
150
+ enforce_distillable_text: OptionalNullable[bool] = UNSET
151
+
152
+ order: OptionalNullable[List[Schema0]] = UNSET
153
+ r"""An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message."""
154
+
155
+ only: OptionalNullable[List[Schema0]] = UNSET
156
+ r"""List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request."""
157
+
158
+ ignore: OptionalNullable[List[Schema0]] = UNSET
159
+ r"""List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request."""
160
+
161
+ quantizations: OptionalNullable[
162
+ List[Annotated[Quantizations, PlainValidator(validate_open_enum(False))]]
163
+ ] = UNSET
164
+ r"""A list of quantization levels to filter the provider by."""
165
+
166
+ sort: Annotated[
167
+ OptionalNullable[Sort], PlainValidator(validate_open_enum(False))
168
+ ] = UNSET
169
+ r"""The sorting strategy to use for this request, if \"order\" is not specified. When set, no load balancing is performed."""
170
+
171
+ max_price: Optional[ChatGenerationParamsMaxPrice] = None
172
+ r"""The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion."""
173
+
174
+ min_throughput: OptionalNullable[float] = UNSET
175
+ r"""The minimum throughput (in tokens per second) required for this request. Only providers serving the model with at least this throughput will be used."""
176
+
177
+ max_latency: OptionalNullable[float] = UNSET
178
+ r"""The maximum latency (in seconds) allowed for this request. Only providers serving the model with better than this latency will be used."""
179
+
180
+ @model_serializer(mode="wrap")
181
+ def serialize_model(self, handler):
182
+ optional_fields = [
183
+ "allow_fallbacks",
184
+ "require_parameters",
185
+ "data_collection",
186
+ "zdr",
187
+ "enforce_distillable_text",
188
+ "order",
189
+ "only",
190
+ "ignore",
191
+ "quantizations",
192
+ "sort",
193
+ "max_price",
194
+ "min_throughput",
195
+ "max_latency",
196
+ ]
197
+ nullable_fields = [
198
+ "allow_fallbacks",
199
+ "require_parameters",
200
+ "data_collection",
201
+ "zdr",
202
+ "enforce_distillable_text",
203
+ "order",
204
+ "only",
205
+ "ignore",
206
+ "quantizations",
207
+ "sort",
208
+ "min_throughput",
209
+ "max_latency",
210
+ ]
211
+ null_default_fields = []
212
+
213
+ serialized = handler(self)
214
+
215
+ m = {}
216
+
217
+ for n, f in type(self).model_fields.items():
218
+ k = f.alias or n
219
+ val = serialized.get(k)
220
+ serialized.pop(k, None)
221
+
222
+ optional_nullable = k in optional_fields and k in nullable_fields
223
+ is_set = (
224
+ self.__pydantic_fields_set__.intersection({n})
225
+ or k in null_default_fields
226
+ ) # pylint: disable=no-member
227
+
228
+ if val is not None and val != UNSET_SENTINEL:
229
+ m[k] = val
230
+ elif val != UNSET_SENTINEL and (
231
+ not k in optional_fields or (optional_nullable and is_set)
232
+ ):
233
+ m[k] = val
234
+
235
+ return m
236
+
237
+
238
+ class ChatGenerationParamsPluginResponseHealingTypedDict(TypedDict):
239
+ id: Literal["response-healing"]
240
+ enabled: NotRequired[bool]
241
+
242
+
243
+ class ChatGenerationParamsPluginResponseHealing(BaseModel):
244
+ ID: Annotated[
245
+ Annotated[
246
+ Literal["response-healing"],
247
+ AfterValidator(validate_const("response-healing")),
248
+ ],
249
+ pydantic.Field(alias="id"),
250
+ ] = "response-healing"
251
+
252
+ enabled: Optional[bool] = None
253
+
254
+
255
+ ChatGenerationParamsPdfEngine = Union[
256
+ Literal[
257
+ "mistral-ocr",
258
+ "pdf-text",
259
+ "native",
260
+ ],
261
+ UnrecognizedStr,
262
+ ]
263
+
264
+
265
+ class ChatGenerationParamsPdfTypedDict(TypedDict):
266
+ engine: NotRequired[ChatGenerationParamsPdfEngine]
267
+
268
+
269
+ class ChatGenerationParamsPdf(BaseModel):
270
+ engine: Annotated[
271
+ Optional[ChatGenerationParamsPdfEngine],
272
+ PlainValidator(validate_open_enum(False)),
273
+ ] = None
274
+
275
+
276
+ class ChatGenerationParamsPluginFileParserTypedDict(TypedDict):
277
+ id: Literal["file-parser"]
278
+ enabled: NotRequired[bool]
279
+ max_files: NotRequired[float]
280
+ pdf: NotRequired[ChatGenerationParamsPdfTypedDict]
281
+
282
+
283
+ class ChatGenerationParamsPluginFileParser(BaseModel):
284
+ ID: Annotated[
285
+ Annotated[
286
+ Literal["file-parser"], AfterValidator(validate_const("file-parser"))
287
+ ],
288
+ pydantic.Field(alias="id"),
289
+ ] = "file-parser"
290
+
291
+ enabled: Optional[bool] = None
292
+
293
+ max_files: Optional[float] = None
294
+
295
+ pdf: Optional[ChatGenerationParamsPdf] = None
296
+
297
+
298
+ ChatGenerationParamsEngine = Union[
299
+ Literal[
300
+ "native",
301
+ "exa",
302
+ ],
303
+ UnrecognizedStr,
304
+ ]
305
+
306
+
307
+ class ChatGenerationParamsPluginWebTypedDict(TypedDict):
308
+ id: Literal["web"]
309
+ enabled: NotRequired[bool]
310
+ max_results: NotRequired[float]
311
+ search_prompt: NotRequired[str]
312
+ engine: NotRequired[ChatGenerationParamsEngine]
313
+
314
+
315
+ class ChatGenerationParamsPluginWeb(BaseModel):
316
+ ID: Annotated[
317
+ Annotated[Literal["web"], AfterValidator(validate_const("web"))],
318
+ pydantic.Field(alias="id"),
319
+ ] = "web"
320
+
321
+ enabled: Optional[bool] = None
322
+
323
+ max_results: Optional[float] = None
324
+
325
+ search_prompt: Optional[str] = None
326
+
327
+ engine: Annotated[
328
+ Optional[ChatGenerationParamsEngine], PlainValidator(validate_open_enum(False))
329
+ ] = None
330
+
331
+
332
+ class ChatGenerationParamsPluginModerationTypedDict(TypedDict):
333
+ id: Literal["moderation"]
334
+
335
+
336
+ class ChatGenerationParamsPluginModeration(BaseModel):
337
+ ID: Annotated[
338
+ Annotated[Literal["moderation"], AfterValidator(validate_const("moderation"))],
339
+ pydantic.Field(alias="id"),
340
+ ] = "moderation"
341
+
342
+
343
+ ChatGenerationParamsPluginUnionTypedDict = TypeAliasType(
344
+ "ChatGenerationParamsPluginUnionTypedDict",
345
+ Union[
346
+ ChatGenerationParamsPluginModerationTypedDict,
347
+ ChatGenerationParamsPluginResponseHealingTypedDict,
348
+ ChatGenerationParamsPluginFileParserTypedDict,
349
+ ChatGenerationParamsPluginWebTypedDict,
350
+ ],
351
+ )
352
+
353
+
354
+ ChatGenerationParamsPluginUnion = Annotated[
355
+ Union[
356
+ Annotated[ChatGenerationParamsPluginModeration, Tag("moderation")],
357
+ Annotated[ChatGenerationParamsPluginWeb, Tag("web")],
358
+ Annotated[ChatGenerationParamsPluginFileParser, Tag("file-parser")],
359
+ Annotated[ChatGenerationParamsPluginResponseHealing, Tag("response-healing")],
360
+ ],
361
+ Discriminator(lambda m: get_discriminator(m, "id", "id")),
362
+ ]
363
+
364
+
365
+ ChatGenerationParamsRoute = Union[
366
+ Literal[
367
+ "fallback",
368
+ "sort",
369
+ ],
370
+ UnrecognizedStr,
371
+ ]
372
+
373
+
32
374
  Effort = Union[
33
375
  Literal[
34
376
  "none",
@@ -36,6 +378,7 @@ Effort = Union[
36
378
  "low",
37
379
  "medium",
38
380
  "high",
381
+ "xhigh",
39
382
  ],
40
383
  UnrecognizedStr,
41
384
  ]
@@ -166,6 +509,15 @@ class Debug(BaseModel):
166
509
 
167
510
  class ChatGenerationParamsTypedDict(TypedDict):
168
511
  messages: List[MessageTypedDict]
512
+ provider: NotRequired[Nullable[ChatGenerationParamsProviderTypedDict]]
513
+ r"""When multiple model providers are available, optionally indicate your routing preference."""
514
+ plugins: NotRequired[List[ChatGenerationParamsPluginUnionTypedDict]]
515
+ r"""Plugins you want to enable for this request, including their settings."""
516
+ route: NotRequired[Nullable[ChatGenerationParamsRoute]]
517
+ r"""Routing strategy for multiple models: \"fallback\" (default) uses secondary models as backups, \"sort\" sorts all endpoints together by routing criteria."""
518
+ user: NotRequired[str]
519
+ session_id: NotRequired[str]
520
+ r"""A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters."""
169
521
  model: NotRequired[str]
170
522
  models: NotRequired[List[str]]
171
523
  frequency_penalty: NotRequired[Nullable[float]]
@@ -186,13 +538,29 @@ class ChatGenerationParamsTypedDict(TypedDict):
186
538
  tool_choice: NotRequired[Any]
187
539
  tools: NotRequired[List[ToolDefinitionJSONTypedDict]]
188
540
  top_p: NotRequired[Nullable[float]]
189
- user: NotRequired[str]
190
541
  debug: NotRequired[DebugTypedDict]
191
542
 
192
543
 
193
544
  class ChatGenerationParams(BaseModel):
194
545
  messages: List[Message]
195
546
 
547
+ provider: OptionalNullable[ChatGenerationParamsProvider] = UNSET
548
+ r"""When multiple model providers are available, optionally indicate your routing preference."""
549
+
550
+ plugins: Optional[List[ChatGenerationParamsPluginUnion]] = None
551
+ r"""Plugins you want to enable for this request, including their settings."""
552
+
553
+ route: Annotated[
554
+ OptionalNullable[ChatGenerationParamsRoute],
555
+ PlainValidator(validate_open_enum(False)),
556
+ ] = UNSET
557
+ r"""Routing strategy for multiple models: \"fallback\" (default) uses secondary models as backups, \"sort\" sorts all endpoints together by routing criteria."""
558
+
559
+ user: Optional[str] = None
560
+
561
+ session_id: Optional[str] = None
562
+ r"""A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters."""
563
+
196
564
  model: Optional[str] = None
197
565
 
198
566
  models: Optional[List[str]] = None
@@ -233,13 +601,16 @@ class ChatGenerationParams(BaseModel):
233
601
 
234
602
  top_p: OptionalNullable[float] = UNSET
235
603
 
236
- user: Optional[str] = None
237
-
238
604
  debug: Optional[Debug] = None
239
605
 
240
606
  @model_serializer(mode="wrap")
241
607
  def serialize_model(self, handler):
242
608
  optional_fields = [
609
+ "provider",
610
+ "plugins",
611
+ "route",
612
+ "user",
613
+ "session_id",
243
614
  "model",
244
615
  "models",
245
616
  "frequency_penalty",
@@ -260,10 +631,11 @@ class ChatGenerationParams(BaseModel):
260
631
  "tool_choice",
261
632
  "tools",
262
633
  "top_p",
263
- "user",
264
634
  "debug",
265
635
  ]
266
636
  nullable_fields = [
637
+ "provider",
638
+ "route",
267
639
  "frequency_penalty",
268
640
  "logit_bias",
269
641
  "logprobs",
@@ -7,6 +7,7 @@ from typing import Literal, Union
7
7
 
8
8
  OpenAIResponsesReasoningEffort = Union[
9
9
  Literal[
10
+ "xhigh",
10
11
  "high",
11
12
  "medium",
12
13
  "low",