openrouter 0.0.22__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. openrouter/_version.py +2 -2
  2. openrouter/chat.py +70 -12
  3. openrouter/components/__init__.py +328 -81
  4. openrouter/components/_schema0.py +3 -2
  5. openrouter/components/_schema3.py +229 -0
  6. openrouter/components/chatgenerationparams.py +211 -53
  7. openrouter/components/chatgenerationtokenusage.py +3 -0
  8. openrouter/components/chatmessagetokenlogprob.py +4 -4
  9. openrouter/components/chatresponsechoice.py +6 -1
  10. openrouter/components/chatstreamingmessagechunk.py +12 -1
  11. openrouter/components/openresponseseasyinputmessage.py +93 -20
  12. openrouter/components/openresponsesinput.py +2 -2
  13. openrouter/components/openresponsesinputmessageitem.py +87 -14
  14. openrouter/components/openresponsesnonstreamingresponse.py +20 -9
  15. openrouter/components/openresponsesreasoning.py +1 -0
  16. openrouter/components/openresponsesrequest.py +141 -88
  17. openrouter/components/parameter.py +1 -0
  18. openrouter/components/pdfparserengine.py +16 -0
  19. openrouter/components/pdfparseroptions.py +25 -0
  20. openrouter/components/percentilelatencycutoffs.py +71 -0
  21. openrouter/components/percentilestats.py +34 -0
  22. openrouter/components/percentilethroughputcutoffs.py +71 -0
  23. openrouter/components/preferredmaxlatency.py +21 -0
  24. openrouter/components/preferredminthroughput.py +22 -0
  25. openrouter/components/providername.py +3 -2
  26. openrouter/components/providerpreferences.py +355 -0
  27. openrouter/components/providersort.py +0 -1
  28. openrouter/components/providersortconfig.py +71 -0
  29. openrouter/components/providersortunion.py +23 -0
  30. openrouter/components/publicendpoint.py +11 -0
  31. openrouter/components/responseinputvideo.py +26 -0
  32. openrouter/components/responseoutputtext.py +36 -1
  33. openrouter/components/responsesoutputitem.py +1 -1
  34. openrouter/components/responsesoutputitemreasoning.py +43 -3
  35. openrouter/components/responsesoutputmodality.py +14 -0
  36. openrouter/components/websearchengine.py +15 -0
  37. openrouter/embeddings.py +6 -8
  38. openrouter/operations/__init__.py +0 -33
  39. openrouter/operations/createembeddings.py +7 -258
  40. openrouter/operations/getgeneration.py +6 -0
  41. openrouter/operations/getparameters.py +5 -78
  42. openrouter/parameters.py +2 -2
  43. openrouter/responses.py +114 -14
  44. {openrouter-0.0.22.dist-info → openrouter-0.1.2.dist-info}/METADATA +1 -1
  45. {openrouter-0.0.22.dist-info → openrouter-0.1.2.dist-info}/RECORD +48 -34
  46. {openrouter-0.0.22.dist-info → openrouter-0.1.2.dist-info}/WHEEL +1 -1
  47. {openrouter-0.0.22.dist-info → openrouter-0.1.2.dist-info}/licenses/LICENSE +0 -0
  48. {openrouter-0.0.22.dist-info → openrouter-0.1.2.dist-info}/top_level.txt +0 -0
@@ -33,9 +33,18 @@ from .openresponseswebsearchtool import (
33
33
  OpenResponsesWebSearchTool,
34
34
  OpenResponsesWebSearchToolTypedDict,
35
35
  )
36
+ from .pdfparseroptions import PDFParserOptions, PDFParserOptionsTypedDict
37
+ from .preferredmaxlatency import PreferredMaxLatency, PreferredMaxLatencyTypedDict
38
+ from .preferredminthroughput import (
39
+ PreferredMinThroughput,
40
+ PreferredMinThroughputTypedDict,
41
+ )
36
42
  from .providername import ProviderName
37
43
  from .providersort import ProviderSort
44
+ from .providersortconfig import ProviderSortConfig, ProviderSortConfigTypedDict
38
45
  from .quantization import Quantization
46
+ from .responsesoutputmodality import ResponsesOutputModality
47
+ from .websearchengine import WebSearchEngine
39
48
  from openrouter.types import (
40
49
  BaseModel,
41
50
  Nullable,
@@ -136,6 +145,16 @@ OpenResponsesRequestToolUnion = Annotated[
136
145
  ]
137
146
 
138
147
 
148
+ OpenResponsesRequestImageConfigTypedDict = TypeAliasType(
149
+ "OpenResponsesRequestImageConfigTypedDict", Union[str, float]
150
+ )
151
+
152
+
153
+ OpenResponsesRequestImageConfig = TypeAliasType(
154
+ "OpenResponsesRequestImageConfig", Union[str, float]
155
+ )
156
+
157
+
139
158
  ServiceTier = Literal["auto",]
140
159
 
141
160
 
@@ -148,33 +167,57 @@ Truncation = Union[
148
167
  ]
149
168
 
150
169
 
151
- OrderTypedDict = TypeAliasType("OrderTypedDict", Union[ProviderName, str])
170
+ OpenResponsesRequestOrderTypedDict = TypeAliasType(
171
+ "OpenResponsesRequestOrderTypedDict", Union[ProviderName, str]
172
+ )
152
173
 
153
174
 
154
- Order = TypeAliasType(
155
- "Order",
175
+ OpenResponsesRequestOrder = TypeAliasType(
176
+ "OpenResponsesRequestOrder",
156
177
  Union[Annotated[ProviderName, PlainValidator(validate_open_enum(False))], str],
157
178
  )
158
179
 
159
180
 
160
- OnlyTypedDict = TypeAliasType("OnlyTypedDict", Union[ProviderName, str])
181
+ OpenResponsesRequestOnlyTypedDict = TypeAliasType(
182
+ "OpenResponsesRequestOnlyTypedDict", Union[ProviderName, str]
183
+ )
161
184
 
162
185
 
163
- Only = TypeAliasType(
164
- "Only",
186
+ OpenResponsesRequestOnly = TypeAliasType(
187
+ "OpenResponsesRequestOnly",
165
188
  Union[Annotated[ProviderName, PlainValidator(validate_open_enum(False))], str],
166
189
  )
167
190
 
168
191
 
169
- IgnoreTypedDict = TypeAliasType("IgnoreTypedDict", Union[ProviderName, str])
192
+ OpenResponsesRequestIgnoreTypedDict = TypeAliasType(
193
+ "OpenResponsesRequestIgnoreTypedDict", Union[ProviderName, str]
194
+ )
170
195
 
171
196
 
172
- Ignore = TypeAliasType(
173
- "Ignore",
197
+ OpenResponsesRequestIgnore = TypeAliasType(
198
+ "OpenResponsesRequestIgnore",
174
199
  Union[Annotated[ProviderName, PlainValidator(validate_open_enum(False))], str],
175
200
  )
176
201
 
177
202
 
203
+ OpenResponsesRequestSortTypedDict = TypeAliasType(
204
+ "OpenResponsesRequestSortTypedDict",
205
+ Union[ProviderSortConfigTypedDict, ProviderSort, Any],
206
+ )
207
+ r"""The sorting strategy to use for this request, if \"order\" is not specified. When set, no load balancing is performed."""
208
+
209
+
210
+ OpenResponsesRequestSort = TypeAliasType(
211
+ "OpenResponsesRequestSort",
212
+ Union[
213
+ ProviderSortConfig,
214
+ Annotated[ProviderSort, PlainValidator(validate_open_enum(False))],
215
+ Any,
216
+ ],
217
+ )
218
+ r"""The sorting strategy to use for this request, if \"order\" is not specified. When set, no load balancing is performed."""
219
+
220
+
178
221
  class OpenResponsesRequestMaxPriceTypedDict(TypedDict):
179
222
  r"""The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion."""
180
223
 
@@ -230,22 +273,22 @@ class OpenResponsesRequestProviderTypedDict(TypedDict):
230
273
  r"""Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. When true, only endpoints that do not retain prompts will be used."""
231
274
  enforce_distillable_text: NotRequired[Nullable[bool]]
232
275
  r"""Whether to restrict routing to only models that allow text distillation. When true, only models where the author has allowed distillation will be used."""
233
- order: NotRequired[Nullable[List[OrderTypedDict]]]
276
+ order: NotRequired[Nullable[List[OpenResponsesRequestOrderTypedDict]]]
234
277
  r"""An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message."""
235
- only: NotRequired[Nullable[List[OnlyTypedDict]]]
278
+ only: NotRequired[Nullable[List[OpenResponsesRequestOnlyTypedDict]]]
236
279
  r"""List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request."""
237
- ignore: NotRequired[Nullable[List[IgnoreTypedDict]]]
280
+ ignore: NotRequired[Nullable[List[OpenResponsesRequestIgnoreTypedDict]]]
238
281
  r"""List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request."""
239
282
  quantizations: NotRequired[Nullable[List[Quantization]]]
240
283
  r"""A list of quantization levels to filter the provider by."""
241
- sort: NotRequired[Nullable[ProviderSort]]
284
+ sort: NotRequired[Nullable[OpenResponsesRequestSortTypedDict]]
242
285
  r"""The sorting strategy to use for this request, if \"order\" is not specified. When set, no load balancing is performed."""
243
286
  max_price: NotRequired[OpenResponsesRequestMaxPriceTypedDict]
244
287
  r"""The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion."""
245
- min_throughput: NotRequired[Nullable[float]]
246
- r"""The minimum throughput (in tokens per second) required for this request. Only providers serving the model with at least this throughput will be used."""
247
- max_latency: NotRequired[Nullable[float]]
248
- r"""The maximum latency (in seconds) allowed for this request. Only providers serving the model with better than this latency will be used."""
288
+ preferred_min_throughput: NotRequired[Nullable[PreferredMinThroughputTypedDict]]
289
+ r"""Preferred minimum throughput (in tokens per second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints below the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold."""
290
+ preferred_max_latency: NotRequired[Nullable[PreferredMaxLatencyTypedDict]]
291
+ r"""Preferred maximum latency (in seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints above the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold."""
249
292
 
250
293
 
251
294
  class OpenResponsesRequestProvider(BaseModel):
@@ -276,13 +319,13 @@ class OpenResponsesRequestProvider(BaseModel):
276
319
  enforce_distillable_text: OptionalNullable[bool] = UNSET
277
320
  r"""Whether to restrict routing to only models that allow text distillation. When true, only models where the author has allowed distillation will be used."""
278
321
 
279
- order: OptionalNullable[List[Order]] = UNSET
322
+ order: OptionalNullable[List[OpenResponsesRequestOrder]] = UNSET
280
323
  r"""An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message."""
281
324
 
282
- only: OptionalNullable[List[Only]] = UNSET
325
+ only: OptionalNullable[List[OpenResponsesRequestOnly]] = UNSET
283
326
  r"""List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request."""
284
327
 
285
- ignore: OptionalNullable[List[Ignore]] = UNSET
328
+ ignore: OptionalNullable[List[OpenResponsesRequestIgnore]] = UNSET
286
329
  r"""List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request."""
287
330
 
288
331
  quantizations: OptionalNullable[
@@ -290,19 +333,17 @@ class OpenResponsesRequestProvider(BaseModel):
290
333
  ] = UNSET
291
334
  r"""A list of quantization levels to filter the provider by."""
292
335
 
293
- sort: Annotated[
294
- OptionalNullable[ProviderSort], PlainValidator(validate_open_enum(False))
295
- ] = UNSET
336
+ sort: OptionalNullable[OpenResponsesRequestSort] = UNSET
296
337
  r"""The sorting strategy to use for this request, if \"order\" is not specified. When set, no load balancing is performed."""
297
338
 
298
339
  max_price: Optional[OpenResponsesRequestMaxPrice] = None
299
340
  r"""The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion."""
300
341
 
301
- min_throughput: OptionalNullable[float] = UNSET
302
- r"""The minimum throughput (in tokens per second) required for this request. Only providers serving the model with at least this throughput will be used."""
342
+ preferred_min_throughput: OptionalNullable[PreferredMinThroughput] = UNSET
343
+ r"""Preferred minimum throughput (in tokens per second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints below the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold."""
303
344
 
304
- max_latency: OptionalNullable[float] = UNSET
305
- r"""The maximum latency (in seconds) allowed for this request. Only providers serving the model with better than this latency will be used."""
345
+ preferred_max_latency: OptionalNullable[PreferredMaxLatency] = UNSET
346
+ r"""Preferred maximum latency (in seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints above the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold."""
306
347
 
307
348
  @model_serializer(mode="wrap")
308
349
  def serialize_model(self, handler):
@@ -318,8 +359,8 @@ class OpenResponsesRequestProvider(BaseModel):
318
359
  "quantizations",
319
360
  "sort",
320
361
  "max_price",
321
- "min_throughput",
322
- "max_latency",
362
+ "preferred_min_throughput",
363
+ "preferred_max_latency",
323
364
  ]
324
365
  nullable_fields = [
325
366
  "allow_fallbacks",
@@ -332,8 +373,8 @@ class OpenResponsesRequestProvider(BaseModel):
332
373
  "ignore",
333
374
  "quantizations",
334
375
  "sort",
335
- "min_throughput",
336
- "max_latency",
376
+ "preferred_min_throughput",
377
+ "preferred_max_latency",
337
378
  ]
338
379
  null_default_fields = []
339
380
 
@@ -381,33 +422,12 @@ class OpenResponsesRequestPluginResponseHealing(BaseModel):
381
422
  IDFileParser = Literal["file-parser",]
382
423
 
383
424
 
384
- OpenResponsesRequestPdfEngine = Union[
385
- Literal[
386
- "mistral-ocr",
387
- "pdf-text",
388
- "native",
389
- ],
390
- UnrecognizedStr,
391
- ]
392
-
393
-
394
- class OpenResponsesRequestPdfTypedDict(TypedDict):
395
- engine: NotRequired[OpenResponsesRequestPdfEngine]
396
-
397
-
398
- class OpenResponsesRequestPdf(BaseModel):
399
- engine: Annotated[
400
- Optional[OpenResponsesRequestPdfEngine],
401
- PlainValidator(validate_open_enum(False)),
402
- ] = None
403
-
404
-
405
425
  class OpenResponsesRequestPluginFileParserTypedDict(TypedDict):
406
426
  id: IDFileParser
407
427
  enabled: NotRequired[bool]
408
428
  r"""Set to false to disable the file-parser plugin for this request. Defaults to true."""
409
- max_files: NotRequired[float]
410
- pdf: NotRequired[OpenResponsesRequestPdfTypedDict]
429
+ pdf: NotRequired[PDFParserOptionsTypedDict]
430
+ r"""Options for PDF parsing."""
411
431
 
412
432
 
413
433
  class OpenResponsesRequestPluginFileParser(BaseModel):
@@ -416,30 +436,21 @@ class OpenResponsesRequestPluginFileParser(BaseModel):
416
436
  enabled: Optional[bool] = None
417
437
  r"""Set to false to disable the file-parser plugin for this request. Defaults to true."""
418
438
 
419
- max_files: Optional[float] = None
420
-
421
- pdf: Optional[OpenResponsesRequestPdf] = None
439
+ pdf: Optional[PDFParserOptions] = None
440
+ r"""Options for PDF parsing."""
422
441
 
423
442
 
424
443
  IDWeb = Literal["web",]
425
444
 
426
445
 
427
- OpenResponsesRequestEngine = Union[
428
- Literal[
429
- "native",
430
- "exa",
431
- ],
432
- UnrecognizedStr,
433
- ]
434
-
435
-
436
446
  class OpenResponsesRequestPluginWebTypedDict(TypedDict):
437
447
  id: IDWeb
438
448
  enabled: NotRequired[bool]
439
449
  r"""Set to false to disable the web-search plugin for this request. Defaults to true."""
440
450
  max_results: NotRequired[float]
441
451
  search_prompt: NotRequired[str]
442
- engine: NotRequired[OpenResponsesRequestEngine]
452
+ engine: NotRequired[WebSearchEngine]
453
+ r"""The search engine to use for web search."""
443
454
 
444
455
 
445
456
  class OpenResponsesRequestPluginWeb(BaseModel):
@@ -453,8 +464,9 @@ class OpenResponsesRequestPluginWeb(BaseModel):
453
464
  search_prompt: Optional[str] = None
454
465
 
455
466
  engine: Annotated[
456
- Optional[OpenResponsesRequestEngine], PlainValidator(validate_open_enum(False))
467
+ Optional[WebSearchEngine], PlainValidator(validate_open_enum(False))
457
468
  ] = None
469
+ r"""The search engine to use for web search."""
458
470
 
459
471
 
460
472
  IDModeration = Literal["moderation",]
@@ -468,11 +480,33 @@ class OpenResponsesRequestPluginModeration(BaseModel):
468
480
  id: IDModeration
469
481
 
470
482
 
483
+ IDAutoRouter = Literal["auto-router",]
484
+
485
+
486
+ class OpenResponsesRequestPluginAutoRouterTypedDict(TypedDict):
487
+ id: IDAutoRouter
488
+ enabled: NotRequired[bool]
489
+ r"""Set to false to disable the auto-router plugin for this request. Defaults to true."""
490
+ allowed_models: NotRequired[List[str]]
491
+ r"""List of model patterns to filter which models the auto-router can route between. Supports wildcards (e.g., \"anthropic/*\" matches all Anthropic models). When not specified, uses the default supported models list."""
492
+
493
+
494
+ class OpenResponsesRequestPluginAutoRouter(BaseModel):
495
+ id: IDAutoRouter
496
+
497
+ enabled: Optional[bool] = None
498
+ r"""Set to false to disable the auto-router plugin for this request. Defaults to true."""
499
+
500
+ allowed_models: Optional[List[str]] = None
501
+ r"""List of model patterns to filter which models the auto-router can route between. Supports wildcards (e.g., \"anthropic/*\" matches all Anthropic models). When not specified, uses the default supported models list."""
502
+
503
+
471
504
  OpenResponsesRequestPluginUnionTypedDict = TypeAliasType(
472
505
  "OpenResponsesRequestPluginUnionTypedDict",
473
506
  Union[
474
507
  OpenResponsesRequestPluginModerationTypedDict,
475
508
  OpenResponsesRequestPluginResponseHealingTypedDict,
509
+ OpenResponsesRequestPluginAutoRouterTypedDict,
476
510
  OpenResponsesRequestPluginFileParserTypedDict,
477
511
  OpenResponsesRequestPluginWebTypedDict,
478
512
  ],
@@ -481,6 +515,7 @@ OpenResponsesRequestPluginUnionTypedDict = TypeAliasType(
481
515
 
482
516
  OpenResponsesRequestPluginUnion = Annotated[
483
517
  Union[
518
+ Annotated[OpenResponsesRequestPluginAutoRouter, Tag("auto-router")],
484
519
  Annotated[OpenResponsesRequestPluginModeration, Tag("moderation")],
485
520
  Annotated[OpenResponsesRequestPluginWeb, Tag("web")],
486
521
  Annotated[OpenResponsesRequestPluginFileParser, Tag("file-parser")],
@@ -490,16 +525,6 @@ OpenResponsesRequestPluginUnion = Annotated[
490
525
  ]
491
526
 
492
527
 
493
- OpenResponsesRequestRoute = Union[
494
- Literal[
495
- "fallback",
496
- "sort",
497
- ],
498
- UnrecognizedStr,
499
- ]
500
- r"""Routing strategy for multiple models: \"fallback\" (default) uses secondary models as backups, \"sort\" sorts all endpoints together by routing criteria."""
501
-
502
-
503
528
  class OpenResponsesRequestTypedDict(TypedDict):
504
529
  r"""Request schema for Responses endpoint"""
505
530
 
@@ -520,7 +545,15 @@ class OpenResponsesRequestTypedDict(TypedDict):
520
545
  max_output_tokens: NotRequired[Nullable[float]]
521
546
  temperature: NotRequired[Nullable[float]]
522
547
  top_p: NotRequired[Nullable[float]]
548
+ top_logprobs: NotRequired[Nullable[int]]
549
+ max_tool_calls: NotRequired[Nullable[int]]
550
+ presence_penalty: NotRequired[Nullable[float]]
551
+ frequency_penalty: NotRequired[Nullable[float]]
523
552
  top_k: NotRequired[float]
553
+ image_config: NotRequired[Dict[str, OpenResponsesRequestImageConfigTypedDict]]
554
+ r"""Provider-specific image configuration options. Keys and values vary by model/provider. See https://openrouter.ai/docs/features/multimodal/image-generation for more details."""
555
+ modalities: NotRequired[List[ResponsesOutputModality]]
556
+ r"""Output modalities for the response. Supported values are \"text\" and \"image\"."""
524
557
  prompt_cache_key: NotRequired[Nullable[str]]
525
558
  previous_response_id: NotRequired[Nullable[str]]
526
559
  prompt: NotRequired[Nullable[OpenAIResponsesPromptTypedDict]]
@@ -535,8 +568,6 @@ class OpenResponsesRequestTypedDict(TypedDict):
535
568
  r"""When multiple model providers are available, optionally indicate your routing preference."""
536
569
  plugins: NotRequired[List[OpenResponsesRequestPluginUnionTypedDict]]
537
570
  r"""Plugins you want to enable for this request, including their settings."""
538
- route: NotRequired[Nullable[OpenResponsesRequestRoute]]
539
- r"""Routing strategy for multiple models: \"fallback\" (default) uses secondary models as backups, \"sort\" sorts all endpoints together by routing criteria."""
540
571
  user: NotRequired[str]
541
572
  r"""A unique identifier representing your end-user, which helps distinguish between different users of your app. This allows your app to identify specific users in case of abuse reports, preventing your entire app from being affected by the actions of individual users. Maximum of 128 characters."""
542
573
  session_id: NotRequired[str]
@@ -576,8 +607,28 @@ class OpenResponsesRequest(BaseModel):
576
607
 
577
608
  top_p: OptionalNullable[float] = UNSET
578
609
 
610
+ top_logprobs: OptionalNullable[int] = UNSET
611
+
612
+ max_tool_calls: OptionalNullable[int] = UNSET
613
+
614
+ presence_penalty: OptionalNullable[float] = UNSET
615
+
616
+ frequency_penalty: OptionalNullable[float] = UNSET
617
+
579
618
  top_k: Optional[float] = None
580
619
 
620
+ image_config: Optional[Dict[str, OpenResponsesRequestImageConfig]] = None
621
+ r"""Provider-specific image configuration options. Keys and values vary by model/provider. See https://openrouter.ai/docs/features/multimodal/image-generation for more details."""
622
+
623
+ modalities: Optional[
624
+ List[
625
+ Annotated[
626
+ ResponsesOutputModality, PlainValidator(validate_open_enum(False))
627
+ ]
628
+ ]
629
+ ] = None
630
+ r"""Output modalities for the response. Supported values are \"text\" and \"image\"."""
631
+
581
632
  prompt_cache_key: OptionalNullable[str] = UNSET
582
633
 
583
634
  previous_response_id: OptionalNullable[str] = UNSET
@@ -615,12 +666,6 @@ class OpenResponsesRequest(BaseModel):
615
666
  plugins: Optional[List[OpenResponsesRequestPluginUnion]] = None
616
667
  r"""Plugins you want to enable for this request, including their settings."""
617
668
 
618
- route: Annotated[
619
- OptionalNullable[OpenResponsesRequestRoute],
620
- PlainValidator(validate_open_enum(False)),
621
- ] = UNSET
622
- r"""Routing strategy for multiple models: \"fallback\" (default) uses secondary models as backups, \"sort\" sorts all endpoints together by routing criteria."""
623
-
624
669
  user: Optional[str] = None
625
670
  r"""A unique identifier representing your end-user, which helps distinguish between different users of your app. This allows your app to identify specific users in case of abuse reports, preventing your entire app from being affected by the actions of individual users. Maximum of 128 characters."""
626
671
 
@@ -643,7 +688,13 @@ class OpenResponsesRequest(BaseModel):
643
688
  "max_output_tokens",
644
689
  "temperature",
645
690
  "top_p",
691
+ "top_logprobs",
692
+ "max_tool_calls",
693
+ "presence_penalty",
694
+ "frequency_penalty",
646
695
  "top_k",
696
+ "image_config",
697
+ "modalities",
647
698
  "prompt_cache_key",
648
699
  "previous_response_id",
649
700
  "prompt",
@@ -656,7 +707,6 @@ class OpenResponsesRequest(BaseModel):
656
707
  "stream",
657
708
  "provider",
658
709
  "plugins",
659
- "route",
660
710
  "user",
661
711
  "session_id",
662
712
  ]
@@ -668,6 +718,10 @@ class OpenResponsesRequest(BaseModel):
668
718
  "max_output_tokens",
669
719
  "temperature",
670
720
  "top_p",
721
+ "top_logprobs",
722
+ "max_tool_calls",
723
+ "presence_penalty",
724
+ "frequency_penalty",
671
725
  "prompt_cache_key",
672
726
  "previous_response_id",
673
727
  "prompt",
@@ -676,7 +730,6 @@ class OpenResponsesRequest(BaseModel):
676
730
  "safety_identifier",
677
731
  "truncation",
678
732
  "provider",
679
- "route",
680
733
  ]
681
734
  null_default_fields = []
682
735
 
@@ -28,6 +28,7 @@ Parameter = Union[
28
28
  "parallel_tool_calls",
29
29
  "include_reasoning",
30
30
  "reasoning",
31
+ "reasoning_effort",
31
32
  "web_search_options",
32
33
  "verbosity",
33
34
  ],
@@ -0,0 +1,16 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from openrouter.types import UnrecognizedStr
5
+ from typing import Literal, Union
6
+
7
+
8
+ PDFParserEngine = Union[
9
+ Literal[
10
+ "mistral-ocr",
11
+ "pdf-text",
12
+ "native",
13
+ ],
14
+ UnrecognizedStr,
15
+ ]
16
+ r"""The engine to use for parsing PDF files."""
@@ -0,0 +1,25 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .pdfparserengine import PDFParserEngine
5
+ from openrouter.types import BaseModel
6
+ from openrouter.utils import validate_open_enum
7
+ from pydantic.functional_validators import PlainValidator
8
+ from typing import Optional
9
+ from typing_extensions import Annotated, NotRequired, TypedDict
10
+
11
+
12
+ class PDFParserOptionsTypedDict(TypedDict):
13
+ r"""Options for PDF parsing."""
14
+
15
+ engine: NotRequired[PDFParserEngine]
16
+ r"""The engine to use for parsing PDF files."""
17
+
18
+
19
+ class PDFParserOptions(BaseModel):
20
+ r"""Options for PDF parsing."""
21
+
22
+ engine: Annotated[
23
+ Optional[PDFParserEngine], PlainValidator(validate_open_enum(False))
24
+ ] = None
25
+ r"""The engine to use for parsing PDF files."""
@@ -0,0 +1,71 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from openrouter.types import (
5
+ BaseModel,
6
+ Nullable,
7
+ OptionalNullable,
8
+ UNSET,
9
+ UNSET_SENTINEL,
10
+ )
11
+ from pydantic import model_serializer
12
+ from typing_extensions import NotRequired, TypedDict
13
+
14
+
15
+ class PercentileLatencyCutoffsTypedDict(TypedDict):
16
+ r"""Percentile-based latency cutoffs. All specified cutoffs must be met for an endpoint to be preferred."""
17
+
18
+ p50: NotRequired[Nullable[float]]
19
+ r"""Maximum p50 latency (seconds)"""
20
+ p75: NotRequired[Nullable[float]]
21
+ r"""Maximum p75 latency (seconds)"""
22
+ p90: NotRequired[Nullable[float]]
23
+ r"""Maximum p90 latency (seconds)"""
24
+ p99: NotRequired[Nullable[float]]
25
+ r"""Maximum p99 latency (seconds)"""
26
+
27
+
28
+ class PercentileLatencyCutoffs(BaseModel):
29
+ r"""Percentile-based latency cutoffs. All specified cutoffs must be met for an endpoint to be preferred."""
30
+
31
+ p50: OptionalNullable[float] = UNSET
32
+ r"""Maximum p50 latency (seconds)"""
33
+
34
+ p75: OptionalNullable[float] = UNSET
35
+ r"""Maximum p75 latency (seconds)"""
36
+
37
+ p90: OptionalNullable[float] = UNSET
38
+ r"""Maximum p90 latency (seconds)"""
39
+
40
+ p99: OptionalNullable[float] = UNSET
41
+ r"""Maximum p99 latency (seconds)"""
42
+
43
+ @model_serializer(mode="wrap")
44
+ def serialize_model(self, handler):
45
+ optional_fields = ["p50", "p75", "p90", "p99"]
46
+ nullable_fields = ["p50", "p75", "p90", "p99"]
47
+ null_default_fields = []
48
+
49
+ serialized = handler(self)
50
+
51
+ m = {}
52
+
53
+ for n, f in type(self).model_fields.items():
54
+ k = f.alias or n
55
+ val = serialized.get(k)
56
+ serialized.pop(k, None)
57
+
58
+ optional_nullable = k in optional_fields and k in nullable_fields
59
+ is_set = (
60
+ self.__pydantic_fields_set__.intersection({n})
61
+ or k in null_default_fields
62
+ ) # pylint: disable=no-member
63
+
64
+ if val is not None and val != UNSET_SENTINEL:
65
+ m[k] = val
66
+ elif val != UNSET_SENTINEL and (
67
+ not k in optional_fields or (optional_nullable and is_set)
68
+ ):
69
+ m[k] = val
70
+
71
+ return m
@@ -0,0 +1,34 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from openrouter.types import BaseModel
5
+ from typing_extensions import TypedDict
6
+
7
+
8
+ class PercentileStatsTypedDict(TypedDict):
9
+ r"""Latency percentiles in milliseconds over the last 30 minutes. Latency measures time to first token. Only visible when authenticated with an API key or cookie; returns null for unauthenticated requests."""
10
+
11
+ p50: float
12
+ r"""Median (50th percentile)"""
13
+ p75: float
14
+ r"""75th percentile"""
15
+ p90: float
16
+ r"""90th percentile"""
17
+ p99: float
18
+ r"""99th percentile"""
19
+
20
+
21
+ class PercentileStats(BaseModel):
22
+ r"""Latency percentiles in milliseconds over the last 30 minutes. Latency measures time to first token. Only visible when authenticated with an API key or cookie; returns null for unauthenticated requests."""
23
+
24
+ p50: float
25
+ r"""Median (50th percentile)"""
26
+
27
+ p75: float
28
+ r"""75th percentile"""
29
+
30
+ p90: float
31
+ r"""90th percentile"""
32
+
33
+ p99: float
34
+ r"""99th percentile"""
@@ -0,0 +1,71 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from openrouter.types import (
5
+ BaseModel,
6
+ Nullable,
7
+ OptionalNullable,
8
+ UNSET,
9
+ UNSET_SENTINEL,
10
+ )
11
+ from pydantic import model_serializer
12
+ from typing_extensions import NotRequired, TypedDict
13
+
14
+
15
+ class PercentileThroughputCutoffsTypedDict(TypedDict):
16
+ r"""Percentile-based throughput cutoffs. All specified cutoffs must be met for an endpoint to be preferred."""
17
+
18
+ p50: NotRequired[Nullable[float]]
19
+ r"""Minimum p50 throughput (tokens/sec)"""
20
+ p75: NotRequired[Nullable[float]]
21
+ r"""Minimum p75 throughput (tokens/sec)"""
22
+ p90: NotRequired[Nullable[float]]
23
+ r"""Minimum p90 throughput (tokens/sec)"""
24
+ p99: NotRequired[Nullable[float]]
25
+ r"""Minimum p99 throughput (tokens/sec)"""
26
+
27
+
28
+ class PercentileThroughputCutoffs(BaseModel):
29
+ r"""Percentile-based throughput cutoffs. All specified cutoffs must be met for an endpoint to be preferred."""
30
+
31
+ p50: OptionalNullable[float] = UNSET
32
+ r"""Minimum p50 throughput (tokens/sec)"""
33
+
34
+ p75: OptionalNullable[float] = UNSET
35
+ r"""Minimum p75 throughput (tokens/sec)"""
36
+
37
+ p90: OptionalNullable[float] = UNSET
38
+ r"""Minimum p90 throughput (tokens/sec)"""
39
+
40
+ p99: OptionalNullable[float] = UNSET
41
+ r"""Minimum p99 throughput (tokens/sec)"""
42
+
43
+ @model_serializer(mode="wrap")
44
+ def serialize_model(self, handler):
45
+ optional_fields = ["p50", "p75", "p90", "p99"]
46
+ nullable_fields = ["p50", "p75", "p90", "p99"]
47
+ null_default_fields = []
48
+
49
+ serialized = handler(self)
50
+
51
+ m = {}
52
+
53
+ for n, f in type(self).model_fields.items():
54
+ k = f.alias or n
55
+ val = serialized.get(k)
56
+ serialized.pop(k, None)
57
+
58
+ optional_nullable = k in optional_fields and k in nullable_fields
59
+ is_set = (
60
+ self.__pydantic_fields_set__.intersection({n})
61
+ or k in null_default_fields
62
+ ) # pylint: disable=no-member
63
+
64
+ if val is not None and val != UNSET_SENTINEL:
65
+ m[k] = val
66
+ elif val != UNSET_SENTINEL and (
67
+ not k in optional_fields or (optional_nullable and is_set)
68
+ ):
69
+ m[k] = val
70
+
71
+ return m