orq-ai-sdk 4.2.0rc49__py3-none-any.whl → 4.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/agents.py +186 -186
  4. orq_ai_sdk/audio.py +30 -0
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +438 -0
  7. orq_ai_sdk/contacts.py +43 -886
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +364 -0
  10. orq_ai_sdk/embeddings.py +344 -0
  11. orq_ai_sdk/generations.py +370 -0
  12. orq_ai_sdk/images.py +28 -0
  13. orq_ai_sdk/models/__init__.py +3839 -424
  14. orq_ai_sdk/models/conversationresponse.py +1 -1
  15. orq_ai_sdk/models/conversationwithmessagesresponse.py +1 -1
  16. orq_ai_sdk/models/createagentrequestop.py +768 -12
  17. orq_ai_sdk/models/createagentresponse.py +68 -2
  18. orq_ai_sdk/models/createchatcompletionop.py +538 -313
  19. orq_ai_sdk/models/createcompletionop.py +2078 -0
  20. orq_ai_sdk/models/createcontactop.py +5 -10
  21. orq_ai_sdk/models/createconversationop.py +1 -1
  22. orq_ai_sdk/models/createconversationresponseop.py +2 -2
  23. orq_ai_sdk/models/createdatasetitemop.py +4 -4
  24. orq_ai_sdk/models/createdatasetop.py +1 -1
  25. orq_ai_sdk/models/createdatasourceop.py +1 -1
  26. orq_ai_sdk/models/createembeddingop.py +579 -0
  27. orq_ai_sdk/models/createevalop.py +14 -14
  28. orq_ai_sdk/models/createidentityop.py +1 -1
  29. orq_ai_sdk/models/createimageeditop.py +715 -0
  30. orq_ai_sdk/models/createimageop.py +228 -82
  31. orq_ai_sdk/models/createimagevariationop.py +706 -0
  32. orq_ai_sdk/models/creatememoryop.py +4 -2
  33. orq_ai_sdk/models/createmoderationop.py +521 -0
  34. orq_ai_sdk/models/createpromptop.py +375 -6
  35. orq_ai_sdk/models/creatererankop.py +608 -0
  36. orq_ai_sdk/models/createresponseop.py +2567 -0
  37. orq_ai_sdk/models/createspeechop.py +466 -0
  38. orq_ai_sdk/models/createtoolop.py +6 -6
  39. orq_ai_sdk/models/createtranscriptionop.py +732 -0
  40. orq_ai_sdk/models/createtranslationop.py +702 -0
  41. orq_ai_sdk/models/deploymentgetconfigop.py +17 -7
  42. orq_ai_sdk/models/deploymentsop.py +1 -0
  43. orq_ai_sdk/models/deploymentstreamop.py +7 -0
  44. orq_ai_sdk/models/filegetop.py +1 -1
  45. orq_ai_sdk/models/filelistop.py +1 -1
  46. orq_ai_sdk/models/fileuploadop.py +1 -1
  47. orq_ai_sdk/models/generateconversationnameop.py +1 -1
  48. orq_ai_sdk/models/getallmemoriesop.py +4 -2
  49. orq_ai_sdk/models/getallpromptsop.py +188 -3
  50. orq_ai_sdk/models/getalltoolsop.py +6 -6
  51. orq_ai_sdk/models/getevalsop.py +17 -17
  52. orq_ai_sdk/models/getonepromptop.py +188 -3
  53. orq_ai_sdk/models/getpromptversionop.py +188 -3
  54. orq_ai_sdk/models/invokedeploymentrequest.py +11 -4
  55. orq_ai_sdk/models/listagentsop.py +372 -0
  56. orq_ai_sdk/models/listdatasetdatapointsop.py +4 -4
  57. orq_ai_sdk/models/listdatasetsop.py +1 -1
  58. orq_ai_sdk/models/listdatasourcesop.py +1 -1
  59. orq_ai_sdk/models/listidentitiesop.py +1 -1
  60. orq_ai_sdk/models/listmodelsop.py +1 -0
  61. orq_ai_sdk/models/listpromptversionsop.py +188 -3
  62. orq_ai_sdk/models/partdoneevent.py +1 -1
  63. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  64. orq_ai_sdk/models/publiccontact.py +9 -3
  65. orq_ai_sdk/models/publicidentity.py +62 -0
  66. orq_ai_sdk/models/reasoningpart.py +1 -1
  67. orq_ai_sdk/models/responsedoneevent.py +14 -11
  68. orq_ai_sdk/models/retrieveagentrequestop.py +382 -0
  69. orq_ai_sdk/models/retrievedatapointop.py +4 -4
  70. orq_ai_sdk/models/retrievedatasetop.py +1 -1
  71. orq_ai_sdk/models/retrievedatasourceop.py +1 -1
  72. orq_ai_sdk/models/retrieveidentityop.py +1 -1
  73. orq_ai_sdk/models/retrievememoryop.py +4 -2
  74. orq_ai_sdk/models/retrievetoolop.py +6 -6
  75. orq_ai_sdk/models/runagentop.py +379 -9
  76. orq_ai_sdk/models/streamrunagentop.py +385 -9
  77. orq_ai_sdk/models/updateagentop.py +770 -12
  78. orq_ai_sdk/models/updateconversationop.py +1 -1
  79. orq_ai_sdk/models/updatedatapointop.py +4 -4
  80. orq_ai_sdk/models/updatedatasetop.py +1 -1
  81. orq_ai_sdk/models/updatedatasourceop.py +1 -1
  82. orq_ai_sdk/models/updateevalop.py +14 -14
  83. orq_ai_sdk/models/updateidentityop.py +1 -1
  84. orq_ai_sdk/models/updatememoryop.py +4 -2
  85. orq_ai_sdk/models/updatepromptop.py +375 -6
  86. orq_ai_sdk/models/updatetoolop.py +7 -7
  87. orq_ai_sdk/moderations.py +218 -0
  88. orq_ai_sdk/orq_completions.py +666 -0
  89. orq_ai_sdk/orq_responses.py +398 -0
  90. orq_ai_sdk/rerank.py +330 -0
  91. orq_ai_sdk/router.py +89 -641
  92. orq_ai_sdk/speech.py +333 -0
  93. orq_ai_sdk/transcriptions.py +416 -0
  94. orq_ai_sdk/translations.py +384 -0
  95. orq_ai_sdk/variations.py +364 -0
  96. orq_ai_sdk-4.2.15.dist-info/METADATA +888 -0
  97. {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.15.dist-info}/RECORD +99 -76
  98. {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.15.dist-info}/WHEEL +1 -1
  99. orq_ai_sdk/models/deletecontactop.py +0 -44
  100. orq_ai_sdk/models/listcontactsop.py +0 -265
  101. orq_ai_sdk/models/retrievecontactop.py +0 -142
  102. orq_ai_sdk/models/updatecontactop.py +0 -233
  103. orq_ai_sdk-4.2.0rc49.dist-info/METADATA +0 -788
  104. {orq_ai_sdk-4.2.0rc49.dist-info → orq_ai_sdk-4.2.15.dist-info}/top_level.txt +0 -0
orq_ai_sdk/router.py CHANGED
@@ -1,430 +1,86 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from .basesdk import BaseSDK
4
- from enum import Enum
4
+ from .sdkconfiguration import SDKConfiguration
5
5
  from orq_ai_sdk import models, utils
6
6
  from orq_ai_sdk._hooks import HookContext
7
- from orq_ai_sdk.models import (
8
- createchatcompletionop as models_createchatcompletionop,
9
- createimageop as models_createimageop,
10
- )
7
+ from orq_ai_sdk.audio import Audio
8
+ from orq_ai_sdk.chat import Chat
9
+ from orq_ai_sdk.completions import Completions
10
+ from orq_ai_sdk.embeddings import Embeddings
11
+ from orq_ai_sdk.images import Images
12
+ from orq_ai_sdk.models import post_v2_router_ocrop as models_post_v2_router_ocrop
13
+ from orq_ai_sdk.moderations import Moderations
14
+ from orq_ai_sdk.orq_responses import OrqResponses
15
+ from orq_ai_sdk.rerank import Rerank
11
16
  from orq_ai_sdk.types import OptionalNullable, UNSET
12
- from orq_ai_sdk.utils import eventstreaming, get_security_from_env
17
+ from orq_ai_sdk.utils import get_security_from_env
13
18
  from orq_ai_sdk.utils.unmarshal_json_response import unmarshal_json_response
14
- from typing import Dict, List, Mapping, Optional, Union
15
-
16
-
17
- class ChatCompletionsAcceptEnum(str, Enum):
18
- APPLICATION_JSON = "application/json"
19
- TEXT_EVENT_STREAM = "text/event-stream"
19
+ from typing import List, Mapping, Optional, Union
20
20
 
21
21
 
22
22
  class Router(BaseSDK):
23
- def chat_completions(
24
- self,
25
- *,
26
- messages: Union[
27
- List[models_createchatcompletionop.CreateChatCompletionMessages],
28
- List[models_createchatcompletionop.CreateChatCompletionMessagesTypedDict],
29
- ],
30
- model: str,
31
- metadata: Optional[Dict[str, str]] = None,
32
- audio: OptionalNullable[
33
- Union[
34
- models_createchatcompletionop.CreateChatCompletionAudio,
35
- models_createchatcompletionop.CreateChatCompletionAudioTypedDict,
36
- ]
37
- ] = UNSET,
38
- frequency_penalty: OptionalNullable[float] = UNSET,
39
- max_tokens: OptionalNullable[int] = UNSET,
40
- max_completion_tokens: OptionalNullable[int] = UNSET,
41
- logprobs: OptionalNullable[bool] = UNSET,
42
- top_logprobs: OptionalNullable[int] = UNSET,
43
- n: OptionalNullable[int] = UNSET,
44
- presence_penalty: OptionalNullable[float] = UNSET,
45
- response_format: Optional[
46
- Union[
47
- models_createchatcompletionop.CreateChatCompletionResponseFormat,
48
- models_createchatcompletionop.CreateChatCompletionResponseFormatTypedDict,
49
- ]
50
- ] = None,
51
- reasoning_effort: Optional[
52
- models_createchatcompletionop.CreateChatCompletionReasoningEffort
53
- ] = None,
54
- verbosity: Optional[str] = None,
55
- seed: OptionalNullable[float] = UNSET,
56
- stop: OptionalNullable[
57
- Union[
58
- models_createchatcompletionop.CreateChatCompletionStop,
59
- models_createchatcompletionop.CreateChatCompletionStopTypedDict,
60
- ]
61
- ] = UNSET,
62
- stream_options: OptionalNullable[
63
- Union[
64
- models_createchatcompletionop.CreateChatCompletionStreamOptions,
65
- models_createchatcompletionop.CreateChatCompletionStreamOptionsTypedDict,
66
- ]
67
- ] = UNSET,
68
- thinking: Optional[
69
- Union[
70
- models_createchatcompletionop.CreateChatCompletionThinking,
71
- models_createchatcompletionop.CreateChatCompletionThinkingTypedDict,
72
- ]
73
- ] = None,
74
- temperature: OptionalNullable[float] = UNSET,
75
- top_p: OptionalNullable[float] = UNSET,
76
- top_k: OptionalNullable[float] = UNSET,
77
- tools: Optional[
78
- Union[
79
- List[models_createchatcompletionop.CreateChatCompletionTools],
80
- List[models_createchatcompletionop.CreateChatCompletionToolsTypedDict],
81
- ]
82
- ] = None,
83
- tool_choice: Optional[
84
- Union[
85
- models_createchatcompletionop.CreateChatCompletionToolChoice,
86
- models_createchatcompletionop.CreateChatCompletionToolChoiceTypedDict,
87
- ]
88
- ] = None,
89
- parallel_tool_calls: Optional[bool] = None,
90
- modalities: OptionalNullable[
91
- List[models_createchatcompletionop.CreateChatCompletionModalities]
92
- ] = UNSET,
93
- guardrails: Optional[
94
- Union[
95
- List[models_createchatcompletionop.CreateChatCompletionGuardrails],
96
- List[
97
- models_createchatcompletionop.CreateChatCompletionGuardrailsTypedDict
98
- ],
99
- ]
100
- ] = None,
101
- orq: Optional[
102
- Union[
103
- models_createchatcompletionop.CreateChatCompletionOrq,
104
- models_createchatcompletionop.CreateChatCompletionOrqTypedDict,
105
- ]
106
- ] = None,
107
- stream: Optional[bool] = False,
108
- retries: OptionalNullable[utils.RetryConfig] = UNSET,
109
- server_url: Optional[str] = None,
110
- timeout_ms: Optional[int] = None,
111
- accept_header_override: Optional[ChatCompletionsAcceptEnum] = None,
112
- http_headers: Optional[Mapping[str, str]] = None,
113
- ) -> models.CreateChatCompletionResponse:
114
- r"""Create chat completion
115
-
116
- Creates a model response for the given chat conversation with support for retries, fallbacks, prompts, and variables.
117
-
118
- :param messages: A list of messages comprising the conversation so far.
119
- :param model: Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`. The AI Gateway offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the (Supported models)[/docs/proxy/supported-models] to browse available models.
120
- :param metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can have a maximum length of 64 characters and values can have a maximum length of 512 characters.
121
- :param audio: Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more.
122
- :param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
123
- :param max_tokens: `[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
124
-
125
- This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
126
- :param max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens
127
- :param logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.
128
- :param top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
129
- :param n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
130
- :param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
131
- :param response_format: An object specifying the format that the model must output
132
- :param reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
133
-
134
- - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
135
- - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
136
- - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
137
- - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
138
-
139
- Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
140
- :param verbosity: Adjusts response verbosity. Lower levels yield shorter answers.
141
- :param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
142
- :param stop: Up to 4 sequences where the API will stop generating further tokens.
143
- :param stream_options: Options for streaming response. Only set this when you set stream: true.
144
- :param thinking:
145
- :param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
146
- :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
147
- :param top_k: Limits the model to consider only the top k most likely tokens at each step.
148
- :param tools: A list of tools the model may call.
149
- :param tool_choice: Controls which (if any) tool is called by the model.
150
- :param parallel_tool_calls: Whether to enable parallel function calling during tool use.
151
- :param modalities: Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"].
152
- :param guardrails: A list of guardrails to apply to the request.
153
- :param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
154
- :param stream:
155
- :param retries: Override the default retry configuration for this method
156
- :param server_url: Override the default server URL for this method
157
- :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
158
- :param accept_header_override: Override the default accept header for this method
159
- :param http_headers: Additional headers to set or replace on requests.
160
- """
161
- base_url = None
162
- url_variables = None
163
- if timeout_ms is None:
164
- timeout_ms = self.sdk_configuration.timeout_ms
165
-
166
- if timeout_ms is None:
167
- timeout_ms = 600000
168
-
169
- if server_url is not None:
170
- base_url = server_url
171
- else:
172
- base_url = self._get_url(base_url, url_variables)
173
-
174
- request = models.CreateChatCompletionRequestBody(
175
- messages=utils.get_pydantic_model(
176
- messages, List[models.CreateChatCompletionMessages]
177
- ),
178
- model=model,
179
- metadata=metadata,
180
- audio=utils.get_pydantic_model(
181
- audio, OptionalNullable[models.CreateChatCompletionAudio]
182
- ),
183
- frequency_penalty=frequency_penalty,
184
- max_tokens=max_tokens,
185
- max_completion_tokens=max_completion_tokens,
186
- logprobs=logprobs,
187
- top_logprobs=top_logprobs,
188
- n=n,
189
- presence_penalty=presence_penalty,
190
- response_format=utils.get_pydantic_model(
191
- response_format, Optional[models.CreateChatCompletionResponseFormat]
192
- ),
193
- reasoning_effort=reasoning_effort,
194
- verbosity=verbosity,
195
- seed=seed,
196
- stop=stop,
197
- stream_options=utils.get_pydantic_model(
198
- stream_options,
199
- OptionalNullable[models.CreateChatCompletionStreamOptions],
200
- ),
201
- thinking=utils.get_pydantic_model(
202
- thinking, Optional[models.CreateChatCompletionThinking]
203
- ),
204
- temperature=temperature,
205
- top_p=top_p,
206
- top_k=top_k,
207
- tools=utils.get_pydantic_model(
208
- tools, Optional[List[models.CreateChatCompletionTools]]
209
- ),
210
- tool_choice=utils.get_pydantic_model(
211
- tool_choice, Optional[models.CreateChatCompletionToolChoice]
212
- ),
213
- parallel_tool_calls=parallel_tool_calls,
214
- modalities=modalities,
215
- guardrails=utils.get_pydantic_model(
216
- guardrails, Optional[List[models.CreateChatCompletionGuardrails]]
217
- ),
218
- orq=utils.get_pydantic_model(orq, Optional[models.CreateChatCompletionOrq]),
219
- stream=stream,
23
+ chat: Chat
24
+ moderations: Moderations
25
+ embeddings: Embeddings
26
+ rerank: Rerank
27
+ audio: Audio
28
+ completions: Completions
29
+ images: Images
30
+ responses: OrqResponses
31
+
32
+ def __init__(
33
+ self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
34
+ ) -> None:
35
+ BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
36
+ self.sdk_configuration = sdk_config
37
+ self._init_sdks()
38
+
39
+ def _init_sdks(self):
40
+ self.chat = Chat(self.sdk_configuration, parent_ref=self.parent_ref)
41
+ self.moderations = Moderations(
42
+ self.sdk_configuration, parent_ref=self.parent_ref
220
43
  )
221
-
222
- req = self._build_request(
223
- method="POST",
224
- path="/v2/gateway/chat/completions",
225
- base_url=base_url,
226
- url_variables=url_variables,
227
- request=request,
228
- request_body_required=True,
229
- request_has_path_params=False,
230
- request_has_query_params=True,
231
- user_agent_header="user-agent",
232
- accept_header_value=accept_header_override.value
233
- if accept_header_override is not None
234
- else "application/json;q=1, text/event-stream;q=0",
235
- http_headers=http_headers,
236
- security=self.sdk_configuration.security,
237
- get_serialized_body=lambda: utils.serialize_request_body(
238
- request, False, False, "json", models.CreateChatCompletionRequestBody
239
- ),
240
- allow_empty_value=None,
241
- timeout_ms=timeout_ms,
44
+ self.embeddings = Embeddings(self.sdk_configuration, parent_ref=self.parent_ref)
45
+ self.rerank = Rerank(self.sdk_configuration, parent_ref=self.parent_ref)
46
+ self.audio = Audio(self.sdk_configuration, parent_ref=self.parent_ref)
47
+ self.completions = Completions(
48
+ self.sdk_configuration, parent_ref=self.parent_ref
242
49
  )
243
-
244
- if retries == UNSET:
245
- if self.sdk_configuration.retry_config is not UNSET:
246
- retries = self.sdk_configuration.retry_config
247
-
248
- retry_config = None
249
- if isinstance(retries, utils.RetryConfig):
250
- retry_config = (retries, ["429", "500", "502", "503", "504"])
251
-
252
- http_res = self.do_request(
253
- hook_ctx=HookContext(
254
- config=self.sdk_configuration,
255
- base_url=base_url or "",
256
- operation_id="createChatCompletion",
257
- oauth2_scopes=None,
258
- security_source=get_security_from_env(
259
- self.sdk_configuration.security, models.Security
260
- ),
261
- ),
262
- request=req,
263
- error_status_codes=["4XX", "5XX"],
264
- stream=True,
265
- retry_config=retry_config,
50
+ self.images = Images(self.sdk_configuration, parent_ref=self.parent_ref)
51
+ self.responses = OrqResponses(
52
+ self.sdk_configuration, parent_ref=self.parent_ref
266
53
  )
267
54
 
268
- if utils.match_response(http_res, "200", "application/json"):
269
- http_res_text = utils.stream_to_text(http_res)
270
- return unmarshal_json_response(
271
- models.CreateChatCompletionResponseBody, http_res, http_res_text
272
- )
273
- if utils.match_response(http_res, "200", "text/event-stream"):
274
- return eventstreaming.EventStream(
275
- http_res,
276
- lambda raw: utils.unmarshal_json(
277
- raw, models.CreateChatCompletionRouterResponseBody
278
- ),
279
- sentinel="[DONE]",
280
- client_ref=self,
281
- )
282
- if utils.match_response(http_res, "4XX", "*"):
283
- http_res_text = utils.stream_to_text(http_res)
284
- raise models.APIError("API error occurred", http_res, http_res_text)
285
- if utils.match_response(http_res, "5XX", "*"):
286
- http_res_text = utils.stream_to_text(http_res)
287
- raise models.APIError("API error occurred", http_res, http_res_text)
288
-
289
- http_res_text = utils.stream_to_text(http_res)
290
- raise models.APIError("Unexpected response received", http_res, http_res_text)
291
-
292
- async def chat_completions_async(
55
+ def ocr(
293
56
  self,
294
57
  *,
295
- messages: Union[
296
- List[models_createchatcompletionop.CreateChatCompletionMessages],
297
- List[models_createchatcompletionop.CreateChatCompletionMessagesTypedDict],
298
- ],
299
58
  model: str,
300
- metadata: Optional[Dict[str, str]] = None,
301
- audio: OptionalNullable[
302
- Union[
303
- models_createchatcompletionop.CreateChatCompletionAudio,
304
- models_createchatcompletionop.CreateChatCompletionAudioTypedDict,
305
- ]
306
- ] = UNSET,
307
- frequency_penalty: OptionalNullable[float] = UNSET,
308
- max_tokens: OptionalNullable[int] = UNSET,
309
- max_completion_tokens: OptionalNullable[int] = UNSET,
310
- logprobs: OptionalNullable[bool] = UNSET,
311
- top_logprobs: OptionalNullable[int] = UNSET,
312
- n: OptionalNullable[int] = UNSET,
313
- presence_penalty: OptionalNullable[float] = UNSET,
314
- response_format: Optional[
315
- Union[
316
- models_createchatcompletionop.CreateChatCompletionResponseFormat,
317
- models_createchatcompletionop.CreateChatCompletionResponseFormatTypedDict,
318
- ]
319
- ] = None,
320
- reasoning_effort: Optional[
321
- models_createchatcompletionop.CreateChatCompletionReasoningEffort
322
- ] = None,
323
- verbosity: Optional[str] = None,
324
- seed: OptionalNullable[float] = UNSET,
325
- stop: OptionalNullable[
326
- Union[
327
- models_createchatcompletionop.CreateChatCompletionStop,
328
- models_createchatcompletionop.CreateChatCompletionStopTypedDict,
329
- ]
330
- ] = UNSET,
331
- stream_options: OptionalNullable[
332
- Union[
333
- models_createchatcompletionop.CreateChatCompletionStreamOptions,
334
- models_createchatcompletionop.CreateChatCompletionStreamOptionsTypedDict,
335
- ]
336
- ] = UNSET,
337
- thinking: Optional[
338
- Union[
339
- models_createchatcompletionop.CreateChatCompletionThinking,
340
- models_createchatcompletionop.CreateChatCompletionThinkingTypedDict,
341
- ]
342
- ] = None,
343
- temperature: OptionalNullable[float] = UNSET,
344
- top_p: OptionalNullable[float] = UNSET,
345
- top_k: OptionalNullable[float] = UNSET,
346
- tools: Optional[
347
- Union[
348
- List[models_createchatcompletionop.CreateChatCompletionTools],
349
- List[models_createchatcompletionop.CreateChatCompletionToolsTypedDict],
350
- ]
351
- ] = None,
352
- tool_choice: Optional[
353
- Union[
354
- models_createchatcompletionop.CreateChatCompletionToolChoice,
355
- models_createchatcompletionop.CreateChatCompletionToolChoiceTypedDict,
356
- ]
357
- ] = None,
358
- parallel_tool_calls: Optional[bool] = None,
359
- modalities: OptionalNullable[
360
- List[models_createchatcompletionop.CreateChatCompletionModalities]
361
- ] = UNSET,
362
- guardrails: Optional[
363
- Union[
364
- List[models_createchatcompletionop.CreateChatCompletionGuardrails],
365
- List[
366
- models_createchatcompletionop.CreateChatCompletionGuardrailsTypedDict
367
- ],
368
- ]
369
- ] = None,
370
- orq: Optional[
59
+ document: Union[
60
+ models_post_v2_router_ocrop.Document,
61
+ models_post_v2_router_ocrop.DocumentTypedDict,
62
+ ],
63
+ pages: OptionalNullable[List[int]] = UNSET,
64
+ ocr_settings: Optional[
371
65
  Union[
372
- models_createchatcompletionop.CreateChatCompletionOrq,
373
- models_createchatcompletionop.CreateChatCompletionOrqTypedDict,
66
+ models_post_v2_router_ocrop.OcrSettings,
67
+ models_post_v2_router_ocrop.OcrSettingsTypedDict,
374
68
  ]
375
69
  ] = None,
376
- stream: Optional[bool] = False,
377
70
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
378
71
  server_url: Optional[str] = None,
379
72
  timeout_ms: Optional[int] = None,
380
- accept_header_override: Optional[ChatCompletionsAcceptEnum] = None,
381
73
  http_headers: Optional[Mapping[str, str]] = None,
382
- ) -> models.CreateChatCompletionResponse:
383
- r"""Create chat completion
384
-
385
- Creates a model response for the given chat conversation with support for retries, fallbacks, prompts, and variables.
386
-
387
- :param messages: A list of messages comprising the conversation so far.
388
- :param model: Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`. The AI Gateway offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the (Supported models)[/docs/proxy/supported-models] to browse available models.
389
- :param metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can have a maximum length of 64 characters and values can have a maximum length of 512 characters.
390
- :param audio: Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more.
391
- :param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
392
- :param max_tokens: `[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
74
+ ) -> models.PostV2RouterOcrResponseBody:
75
+ r"""Extracts text content while maintaining document structure and hierarchy
393
76
 
394
- This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
395
- :param max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens
396
- :param logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.
397
- :param top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
398
- :param n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
399
- :param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
400
- :param response_format: An object specifying the format that the model must output
401
- :param reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
402
-
403
- - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
404
- - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
405
- - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
406
- - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
407
-
408
- Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
409
- :param verbosity: Adjusts response verbosity. Lower levels yield shorter answers.
410
- :param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
411
- :param stop: Up to 4 sequences where the API will stop generating further tokens.
412
- :param stream_options: Options for streaming response. Only set this when you set stream: true.
413
- :param thinking:
414
- :param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
415
- :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
416
- :param top_k: Limits the model to consider only the top k most likely tokens at each step.
417
- :param tools: A list of tools the model may call.
418
- :param tool_choice: Controls which (if any) tool is called by the model.
419
- :param parallel_tool_calls: Whether to enable parallel function calling during tool use.
420
- :param modalities: Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"].
421
- :param guardrails: A list of guardrails to apply to the request.
422
- :param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
423
- :param stream:
77
+ :param model: ID of the model to use for OCR.
78
+ :param document: Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk.
79
+ :param pages: Specific pages to process. Can be a single number, range, or list. Starts from 0. Null for all pages.
80
+ :param ocr_settings: Optional settings for the OCR run
424
81
  :param retries: Override the default retry configuration for this method
425
82
  :param server_url: Override the default server URL for this method
426
83
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
427
- :param accept_header_override: Override the default accept header for this method
428
84
  :param http_headers: Additional headers to set or replace on requests.
429
85
  """
430
86
  base_url = None
@@ -440,203 +96,18 @@ class Router(BaseSDK):
440
96
  else:
441
97
  base_url = self._get_url(base_url, url_variables)
442
98
 
443
- request = models.CreateChatCompletionRequestBody(
444
- messages=utils.get_pydantic_model(
445
- messages, List[models.CreateChatCompletionMessages]
446
- ),
99
+ request = models.PostV2RouterOcrRequestBody(
447
100
  model=model,
448
- metadata=metadata,
449
- audio=utils.get_pydantic_model(
450
- audio, OptionalNullable[models.CreateChatCompletionAudio]
451
- ),
452
- frequency_penalty=frequency_penalty,
453
- max_tokens=max_tokens,
454
- max_completion_tokens=max_completion_tokens,
455
- logprobs=logprobs,
456
- top_logprobs=top_logprobs,
457
- n=n,
458
- presence_penalty=presence_penalty,
459
- response_format=utils.get_pydantic_model(
460
- response_format, Optional[models.CreateChatCompletionResponseFormat]
461
- ),
462
- reasoning_effort=reasoning_effort,
463
- verbosity=verbosity,
464
- seed=seed,
465
- stop=stop,
466
- stream_options=utils.get_pydantic_model(
467
- stream_options,
468
- OptionalNullable[models.CreateChatCompletionStreamOptions],
469
- ),
470
- thinking=utils.get_pydantic_model(
471
- thinking, Optional[models.CreateChatCompletionThinking]
472
- ),
473
- temperature=temperature,
474
- top_p=top_p,
475
- top_k=top_k,
476
- tools=utils.get_pydantic_model(
477
- tools, Optional[List[models.CreateChatCompletionTools]]
478
- ),
479
- tool_choice=utils.get_pydantic_model(
480
- tool_choice, Optional[models.CreateChatCompletionToolChoice]
481
- ),
482
- parallel_tool_calls=parallel_tool_calls,
483
- modalities=modalities,
484
- guardrails=utils.get_pydantic_model(
485
- guardrails, Optional[List[models.CreateChatCompletionGuardrails]]
486
- ),
487
- orq=utils.get_pydantic_model(orq, Optional[models.CreateChatCompletionOrq]),
488
- stream=stream,
489
- )
490
-
491
- req = self._build_request_async(
492
- method="POST",
493
- path="/v2/gateway/chat/completions",
494
- base_url=base_url,
495
- url_variables=url_variables,
496
- request=request,
497
- request_body_required=True,
498
- request_has_path_params=False,
499
- request_has_query_params=True,
500
- user_agent_header="user-agent",
501
- accept_header_value=accept_header_override.value
502
- if accept_header_override is not None
503
- else "application/json;q=1, text/event-stream;q=0",
504
- http_headers=http_headers,
505
- security=self.sdk_configuration.security,
506
- get_serialized_body=lambda: utils.serialize_request_body(
507
- request, False, False, "json", models.CreateChatCompletionRequestBody
508
- ),
509
- allow_empty_value=None,
510
- timeout_ms=timeout_ms,
511
- )
512
-
513
- if retries == UNSET:
514
- if self.sdk_configuration.retry_config is not UNSET:
515
- retries = self.sdk_configuration.retry_config
516
-
517
- retry_config = None
518
- if isinstance(retries, utils.RetryConfig):
519
- retry_config = (retries, ["429", "500", "502", "503", "504"])
520
-
521
- http_res = await self.do_request_async(
522
- hook_ctx=HookContext(
523
- config=self.sdk_configuration,
524
- base_url=base_url or "",
525
- operation_id="createChatCompletion",
526
- oauth2_scopes=None,
527
- security_source=get_security_from_env(
528
- self.sdk_configuration.security, models.Security
529
- ),
101
+ document=utils.get_pydantic_model(document, models.Document),
102
+ pages=pages,
103
+ ocr_settings=utils.get_pydantic_model(
104
+ ocr_settings, Optional[models.OcrSettings]
530
105
  ),
531
- request=req,
532
- error_status_codes=["4XX", "5XX"],
533
- stream=True,
534
- retry_config=retry_config,
535
- )
536
-
537
- if utils.match_response(http_res, "200", "application/json"):
538
- http_res_text = await utils.stream_to_text_async(http_res)
539
- return unmarshal_json_response(
540
- models.CreateChatCompletionResponseBody, http_res, http_res_text
541
- )
542
- if utils.match_response(http_res, "200", "text/event-stream"):
543
- return eventstreaming.EventStreamAsync(
544
- http_res,
545
- lambda raw: utils.unmarshal_json(
546
- raw, models.CreateChatCompletionRouterResponseBody
547
- ),
548
- sentinel="[DONE]",
549
- client_ref=self,
550
- )
551
- if utils.match_response(http_res, "4XX", "*"):
552
- http_res_text = await utils.stream_to_text_async(http_res)
553
- raise models.APIError("API error occurred", http_res, http_res_text)
554
- if utils.match_response(http_res, "5XX", "*"):
555
- http_res_text = await utils.stream_to_text_async(http_res)
556
- raise models.APIError("API error occurred", http_res, http_res_text)
557
-
558
- http_res_text = await utils.stream_to_text_async(http_res)
559
- raise models.APIError("Unexpected response received", http_res, http_res_text)
560
-
561
- def images_generate(
562
- self,
563
- *,
564
- prompt: str,
565
- model: str,
566
- background: OptionalNullable[models_createimageop.Background] = UNSET,
567
- moderation: OptionalNullable[models_createimageop.Moderation] = UNSET,
568
- n: OptionalNullable[int] = 1,
569
- output_compression: OptionalNullable[int] = UNSET,
570
- output_format: OptionalNullable[models_createimageop.OutputFormat] = UNSET,
571
- quality: OptionalNullable[models_createimageop.Quality] = UNSET,
572
- response_format: OptionalNullable[
573
- models_createimageop.CreateImageResponseFormat
574
- ] = UNSET,
575
- size: OptionalNullable[str] = UNSET,
576
- style: OptionalNullable[models_createimageop.Style] = UNSET,
577
- orq: Optional[
578
- Union[
579
- models_createimageop.CreateImageOrq,
580
- models_createimageop.CreateImageOrqTypedDict,
581
- ]
582
- ] = None,
583
- retries: OptionalNullable[utils.RetryConfig] = UNSET,
584
- server_url: Optional[str] = None,
585
- timeout_ms: Optional[int] = None,
586
- http_headers: Optional[Mapping[str, str]] = None,
587
- ) -> models.CreateImageResponseBody:
588
- r"""Create image
589
-
590
- Create an Image
591
-
592
- :param prompt: A text description of the desired image(s).
593
- :param model: The model to use for image generation. One of `openai/dall-e-2`, `openai/dall-e-3`, or `openai/gpt-image-1`.
594
- :param background: Allows to set transparency for the background of the generated image(s). This parameter is only supported for `openai/gpt-image-1`.
595
- :param moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must be either `low` or `auto`.
596
- :param n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
597
- :param output_compression: The compression level (0-100%) for the generated images. This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` output formats.
598
- :param output_format: The format in which the generated images are returned. This parameter is only supported for `openai/gpt-image-1`.
599
- :param quality: The quality of the image that will be generated. `auto` will automatically select the best quality for the given model.
600
- :param response_format: The format in which generated images are returned. Must be one of `url` or `b64_json`. This parameter isn't supported for `gpt-image-1` which will always return base64-encoded images.
601
- :param size: The size of the generated images. Must be one of the specified sizes for each model.
602
- :param style: The style of the generated images. This parameter is only supported for `openai/dall-e-3`. Must be one of `vivid` or `natural`.
603
- :param orq:
604
- :param retries: Override the default retry configuration for this method
605
- :param server_url: Override the default server URL for this method
606
- :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
607
- :param http_headers: Additional headers to set or replace on requests.
608
- """
609
- base_url = None
610
- url_variables = None
611
- if timeout_ms is None:
612
- timeout_ms = self.sdk_configuration.timeout_ms
613
-
614
- if timeout_ms is None:
615
- timeout_ms = 600000
616
-
617
- if server_url is not None:
618
- base_url = server_url
619
- else:
620
- base_url = self._get_url(base_url, url_variables)
621
-
622
- request = models.CreateImageRequestBody(
623
- prompt=prompt,
624
- background=background,
625
- model=model,
626
- moderation=moderation,
627
- n=n,
628
- output_compression=output_compression,
629
- output_format=output_format,
630
- quality=quality,
631
- response_format=response_format,
632
- size=size,
633
- style=style,
634
- orq=utils.get_pydantic_model(orq, Optional[models.CreateImageOrq]),
635
106
  )
636
107
 
637
108
  req = self._build_request(
638
109
  method="POST",
639
- path="/v2/gateway/images/generations",
110
+ path="/v2/router/ocr",
640
111
  base_url=base_url,
641
112
  url_variables=url_variables,
642
113
  request=request,
@@ -648,7 +119,7 @@ class Router(BaseSDK):
648
119
  http_headers=http_headers,
649
120
  security=self.sdk_configuration.security,
650
121
  get_serialized_body=lambda: utils.serialize_request_body(
651
- request, False, False, "json", models.CreateImageRequestBody
122
+ request, False, False, "json", models.PostV2RouterOcrRequestBody
652
123
  ),
653
124
  allow_empty_value=None,
654
125
  timeout_ms=timeout_ms,
@@ -666,7 +137,7 @@ class Router(BaseSDK):
666
137
  hook_ctx=HookContext(
667
138
  config=self.sdk_configuration,
668
139
  base_url=base_url or "",
669
- operation_id="createImage",
140
+ operation_id="post_/v2/router/ocr",
670
141
  oauth2_scopes=None,
671
142
  security_source=get_security_from_env(
672
143
  self.sdk_configuration.security, models.Security
@@ -678,7 +149,7 @@ class Router(BaseSDK):
678
149
  )
679
150
 
680
151
  if utils.match_response(http_res, "200", "application/json"):
681
- return unmarshal_json_response(models.CreateImageResponseBody, http_res)
152
+ return unmarshal_json_response(models.PostV2RouterOcrResponseBody, http_res)
682
153
  if utils.match_response(http_res, "4XX", "*"):
683
154
  http_res_text = utils.stream_to_text(http_res)
684
155
  raise models.APIError("API error occurred", http_res, http_res_text)
@@ -688,49 +159,32 @@ class Router(BaseSDK):
688
159
 
689
160
  raise models.APIError("Unexpected response received", http_res)
690
161
 
691
- async def images_generate_async(
162
+ async def ocr_async(
692
163
  self,
693
164
  *,
694
- prompt: str,
695
165
  model: str,
696
- background: OptionalNullable[models_createimageop.Background] = UNSET,
697
- moderation: OptionalNullable[models_createimageop.Moderation] = UNSET,
698
- n: OptionalNullable[int] = 1,
699
- output_compression: OptionalNullable[int] = UNSET,
700
- output_format: OptionalNullable[models_createimageop.OutputFormat] = UNSET,
701
- quality: OptionalNullable[models_createimageop.Quality] = UNSET,
702
- response_format: OptionalNullable[
703
- models_createimageop.CreateImageResponseFormat
704
- ] = UNSET,
705
- size: OptionalNullable[str] = UNSET,
706
- style: OptionalNullable[models_createimageop.Style] = UNSET,
707
- orq: Optional[
166
+ document: Union[
167
+ models_post_v2_router_ocrop.Document,
168
+ models_post_v2_router_ocrop.DocumentTypedDict,
169
+ ],
170
+ pages: OptionalNullable[List[int]] = UNSET,
171
+ ocr_settings: Optional[
708
172
  Union[
709
- models_createimageop.CreateImageOrq,
710
- models_createimageop.CreateImageOrqTypedDict,
173
+ models_post_v2_router_ocrop.OcrSettings,
174
+ models_post_v2_router_ocrop.OcrSettingsTypedDict,
711
175
  ]
712
176
  ] = None,
713
177
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
714
178
  server_url: Optional[str] = None,
715
179
  timeout_ms: Optional[int] = None,
716
180
  http_headers: Optional[Mapping[str, str]] = None,
717
- ) -> models.CreateImageResponseBody:
718
- r"""Create image
719
-
720
- Create an Image
181
+ ) -> models.PostV2RouterOcrResponseBody:
182
+ r"""Extracts text content while maintaining document structure and hierarchy
721
183
 
722
- :param prompt: A text description of the desired image(s).
723
- :param model: The model to use for image generation. One of `openai/dall-e-2`, `openai/dall-e-3`, or `openai/gpt-image-1`.
724
- :param background: Allows to set transparency for the background of the generated image(s). This parameter is only supported for `openai/gpt-image-1`.
725
- :param moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must be either `low` or `auto`.
726
- :param n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
727
- :param output_compression: The compression level (0-100%) for the generated images. This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` output formats.
728
- :param output_format: The format in which the generated images are returned. This parameter is only supported for `openai/gpt-image-1`.
729
- :param quality: The quality of the image that will be generated. `auto` will automatically select the best quality for the given model.
730
- :param response_format: The format in which generated images are returned. Must be one of `url` or `b64_json`. This parameter isn't supported for `gpt-image-1` which will always return base64-encoded images.
731
- :param size: The size of the generated images. Must be one of the specified sizes for each model.
732
- :param style: The style of the generated images. This parameter is only supported for `openai/dall-e-3`. Must be one of `vivid` or `natural`.
733
- :param orq:
184
+ :param model: ID of the model to use for OCR.
185
+ :param document: Document to run OCR on. Can be a DocumentURLChunk or ImageURLChunk.
186
+ :param pages: Specific pages to process. Can be a single number, range, or list. Starts from 0. Null for all pages.
187
+ :param ocr_settings: Optional settings for the OCR run
734
188
  :param retries: Override the default retry configuration for this method
735
189
  :param server_url: Override the default server URL for this method
736
190
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -749,24 +203,18 @@ class Router(BaseSDK):
749
203
  else:
750
204
  base_url = self._get_url(base_url, url_variables)
751
205
 
752
- request = models.CreateImageRequestBody(
753
- prompt=prompt,
754
- background=background,
206
+ request = models.PostV2RouterOcrRequestBody(
755
207
  model=model,
756
- moderation=moderation,
757
- n=n,
758
- output_compression=output_compression,
759
- output_format=output_format,
760
- quality=quality,
761
- response_format=response_format,
762
- size=size,
763
- style=style,
764
- orq=utils.get_pydantic_model(orq, Optional[models.CreateImageOrq]),
208
+ document=utils.get_pydantic_model(document, models.Document),
209
+ pages=pages,
210
+ ocr_settings=utils.get_pydantic_model(
211
+ ocr_settings, Optional[models.OcrSettings]
212
+ ),
765
213
  )
766
214
 
767
215
  req = self._build_request_async(
768
216
  method="POST",
769
- path="/v2/gateway/images/generations",
217
+ path="/v2/router/ocr",
770
218
  base_url=base_url,
771
219
  url_variables=url_variables,
772
220
  request=request,
@@ -778,7 +226,7 @@ class Router(BaseSDK):
778
226
  http_headers=http_headers,
779
227
  security=self.sdk_configuration.security,
780
228
  get_serialized_body=lambda: utils.serialize_request_body(
781
- request, False, False, "json", models.CreateImageRequestBody
229
+ request, False, False, "json", models.PostV2RouterOcrRequestBody
782
230
  ),
783
231
  allow_empty_value=None,
784
232
  timeout_ms=timeout_ms,
@@ -796,7 +244,7 @@ class Router(BaseSDK):
796
244
  hook_ctx=HookContext(
797
245
  config=self.sdk_configuration,
798
246
  base_url=base_url or "",
799
- operation_id="createImage",
247
+ operation_id="post_/v2/router/ocr",
800
248
  oauth2_scopes=None,
801
249
  security_source=get_security_from_env(
802
250
  self.sdk_configuration.security, models.Security
@@ -808,7 +256,7 @@ class Router(BaseSDK):
808
256
  )
809
257
 
810
258
  if utils.match_response(http_res, "200", "application/json"):
811
- return unmarshal_json_response(models.CreateImageResponseBody, http_res)
259
+ return unmarshal_json_response(models.PostV2RouterOcrResponseBody, http_res)
812
260
  if utils.match_response(http_res, "4XX", "*"):
813
261
  http_res_text = await utils.stream_to_text_async(http_res)
814
262
  raise models.APIError("API error occurred", http_res, http_res_text)