orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +332 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +258 -0
  10. orq_ai_sdk/embeddings.py +238 -0
  11. orq_ai_sdk/generations.py +272 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5341 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1922 -384
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1375 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +1890 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +384 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +504 -0
  41. orq_ai_sdk/models/createimageop.py +208 -117
  42. orq_ai_sdk/models/createimagevariationop.py +486 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2748 -1252
  49. orq_ai_sdk/models/creatererankop.py +416 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +316 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +562 -0
  54. orq_ai_sdk/models/createtranslationop.py +540 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1690 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1673 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1670 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +793 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1684 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1451 -197
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1428 -195
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1951 -404
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2844 -1450
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +660 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +232 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +251 -0
  158. orq_ai_sdk/transcriptions.py +326 -0
  159. orq_ai_sdk/translations.py +298 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +254 -0
  162. orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
  163. orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
  167. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -0,0 +1,660 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from .basesdk import BaseSDK
4
+ from enum import Enum
5
+ from orq_ai_sdk import models, utils
6
+ from orq_ai_sdk._hooks import HookContext
7
+ from orq_ai_sdk.models import createchatcompletionop as models_createchatcompletionop
8
+ from orq_ai_sdk.types import OptionalNullable, UNSET
9
+ from orq_ai_sdk.utils import eventstreaming, get_security_from_env
10
+ from orq_ai_sdk.utils.unmarshal_json_response import unmarshal_json_response
11
+ from typing import Dict, List, Mapping, Optional, Union
12
+
13
+
14
+ class CreateAcceptEnum(str, Enum):
15
+ APPLICATION_JSON = "application/json"
16
+ TEXT_EVENT_STREAM = "text/event-stream"
17
+
18
+
19
+ class OrqCompletions(BaseSDK):
20
+ def create(
21
+ self,
22
+ *,
23
+ messages: Union[
24
+ List[models_createchatcompletionop.CreateChatCompletionMessages],
25
+ List[models_createchatcompletionop.CreateChatCompletionMessagesTypedDict],
26
+ ],
27
+ model: str,
28
+ metadata: Optional[Dict[str, str]] = None,
29
+ audio: OptionalNullable[
30
+ Union[
31
+ models_createchatcompletionop.CreateChatCompletionAudio,
32
+ models_createchatcompletionop.CreateChatCompletionAudioTypedDict,
33
+ ]
34
+ ] = UNSET,
35
+ frequency_penalty: OptionalNullable[float] = UNSET,
36
+ max_tokens: OptionalNullable[int] = UNSET,
37
+ max_completion_tokens: OptionalNullable[int] = UNSET,
38
+ logprobs: OptionalNullable[bool] = UNSET,
39
+ top_logprobs: OptionalNullable[int] = UNSET,
40
+ n: OptionalNullable[int] = UNSET,
41
+ presence_penalty: OptionalNullable[float] = UNSET,
42
+ response_format: Optional[
43
+ Union[
44
+ models_createchatcompletionop.CreateChatCompletionResponseFormat,
45
+ models_createchatcompletionop.CreateChatCompletionResponseFormatTypedDict,
46
+ ]
47
+ ] = None,
48
+ reasoning_effort: Optional[
49
+ models_createchatcompletionop.CreateChatCompletionReasoningEffort
50
+ ] = None,
51
+ verbosity: Optional[str] = None,
52
+ seed: OptionalNullable[float] = UNSET,
53
+ stop: OptionalNullable[
54
+ Union[
55
+ models_createchatcompletionop.CreateChatCompletionStop,
56
+ models_createchatcompletionop.CreateChatCompletionStopTypedDict,
57
+ ]
58
+ ] = UNSET,
59
+ stream_options: OptionalNullable[
60
+ Union[
61
+ models_createchatcompletionop.CreateChatCompletionStreamOptions,
62
+ models_createchatcompletionop.CreateChatCompletionStreamOptionsTypedDict,
63
+ ]
64
+ ] = UNSET,
65
+ thinking: Optional[
66
+ Union[
67
+ models_createchatcompletionop.CreateChatCompletionThinking,
68
+ models_createchatcompletionop.CreateChatCompletionThinkingTypedDict,
69
+ ]
70
+ ] = None,
71
+ temperature: OptionalNullable[float] = UNSET,
72
+ top_p: OptionalNullable[float] = UNSET,
73
+ top_k: OptionalNullable[float] = UNSET,
74
+ tools: Optional[
75
+ Union[
76
+ List[models_createchatcompletionop.CreateChatCompletionTools],
77
+ List[models_createchatcompletionop.CreateChatCompletionToolsTypedDict],
78
+ ]
79
+ ] = None,
80
+ tool_choice: Optional[
81
+ Union[
82
+ models_createchatcompletionop.CreateChatCompletionToolChoice,
83
+ models_createchatcompletionop.CreateChatCompletionToolChoiceTypedDict,
84
+ ]
85
+ ] = None,
86
+ parallel_tool_calls: Optional[bool] = None,
87
+ modalities: OptionalNullable[
88
+ List[models_createchatcompletionop.CreateChatCompletionModalities]
89
+ ] = UNSET,
90
+ guardrails: Optional[
91
+ Union[
92
+ List[models_createchatcompletionop.CreateChatCompletionGuardrails],
93
+ List[
94
+ models_createchatcompletionop.CreateChatCompletionGuardrailsTypedDict
95
+ ],
96
+ ]
97
+ ] = None,
98
+ fallbacks: Optional[
99
+ Union[
100
+ List[models_createchatcompletionop.CreateChatCompletionFallbacks],
101
+ List[
102
+ models_createchatcompletionop.CreateChatCompletionFallbacksTypedDict
103
+ ],
104
+ ]
105
+ ] = None,
106
+ retry: Optional[
107
+ Union[
108
+ models_createchatcompletionop.CreateChatCompletionRetry,
109
+ models_createchatcompletionop.CreateChatCompletionRetryTypedDict,
110
+ ]
111
+ ] = None,
112
+ cache: Optional[
113
+ Union[
114
+ models_createchatcompletionop.CreateChatCompletionCache,
115
+ models_createchatcompletionop.CreateChatCompletionCacheTypedDict,
116
+ ]
117
+ ] = None,
118
+ load_balancer: Optional[
119
+ Union[
120
+ models_createchatcompletionop.CreateChatCompletionLoadBalancer,
121
+ models_createchatcompletionop.CreateChatCompletionLoadBalancerTypedDict,
122
+ ]
123
+ ] = None,
124
+ timeout: Optional[
125
+ Union[
126
+ models_createchatcompletionop.CreateChatCompletionTimeout,
127
+ models_createchatcompletionop.CreateChatCompletionTimeoutTypedDict,
128
+ ]
129
+ ] = None,
130
+ orq: Optional[
131
+ Union[
132
+ models_createchatcompletionop.CreateChatCompletionOrq,
133
+ models_createchatcompletionop.CreateChatCompletionOrqTypedDict,
134
+ ]
135
+ ] = None,
136
+ stream: Optional[bool] = False,
137
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
138
+ server_url: Optional[str] = None,
139
+ timeout_ms: Optional[int] = None,
140
+ accept_header_override: Optional[CreateAcceptEnum] = None,
141
+ http_headers: Optional[Mapping[str, str]] = None,
142
+ ) -> models.CreateChatCompletionResponse:
143
+ r"""Create chat completion
144
+
145
+ Creates a model response for the given chat conversation with support for retries, fallbacks, prompts, and variables.
146
+
147
+ :param messages: A list of messages comprising the conversation so far.
148
+ :param model: Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`. The AI Gateway offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the (Supported models)[/docs/proxy/supported-models] to browse available models.
149
+ :param metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can have a maximum length of 64 characters and values can have a maximum length of 512 characters.
150
+ :param audio: Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more.
151
+ :param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
152
+ :param max_tokens: `[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
153
+
154
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
155
+ :param max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens
156
+ :param logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.
157
+ :param top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
158
+ :param n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
159
+ :param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
160
+ :param response_format: An object specifying the format that the model must output
161
+ :param reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
162
+
163
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
164
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
165
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
166
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
167
+
168
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
169
+ :param verbosity: Adjusts response verbosity. Lower levels yield shorter answers.
170
+ :param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
171
+ :param stop: Up to 4 sequences where the API will stop generating further tokens.
172
+ :param stream_options: Options for streaming response. Only set this when you set stream: true.
173
+ :param thinking:
174
+ :param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
175
+ :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
176
+ :param top_k: Limits the model to consider only the top k most likely tokens at each step.
177
+ :param tools: A list of tools the model may call.
178
+ :param tool_choice: Controls which (if any) tool is called by the model.
179
+ :param parallel_tool_calls: Whether to enable parallel function calling during tool use.
180
+ :param modalities: Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"].
181
+ :param guardrails: A list of guardrails to apply to the request.
182
+ :param fallbacks: Array of fallback models to use if primary model fails
183
+ :param retry: Retry configuration for the request
184
+ :param cache: Cache configuration for the request.
185
+ :param load_balancer: Load balancer configuration for the request.
186
+ :param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
187
+ :param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
188
+ :param stream:
189
+ :param retries: Override the default retry configuration for this method
190
+ :param server_url: Override the default server URL for this method
191
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
192
+ :param accept_header_override: Override the default accept header for this method
193
+ :param http_headers: Additional headers to set or replace on requests.
194
+ """
195
+ base_url = None
196
+ url_variables = None
197
+ if timeout_ms is None:
198
+ timeout_ms = self.sdk_configuration.timeout_ms
199
+
200
+ if timeout_ms is None:
201
+ timeout_ms = 600000
202
+
203
+ if server_url is not None:
204
+ base_url = server_url
205
+ else:
206
+ base_url = self._get_url(base_url, url_variables)
207
+
208
+ request = models.CreateChatCompletionRequestBody(
209
+ messages=utils.get_pydantic_model(
210
+ messages, List[models.CreateChatCompletionMessages]
211
+ ),
212
+ model=model,
213
+ metadata=metadata,
214
+ audio=utils.get_pydantic_model(
215
+ audio, OptionalNullable[models.CreateChatCompletionAudio]
216
+ ),
217
+ frequency_penalty=frequency_penalty,
218
+ max_tokens=max_tokens,
219
+ max_completion_tokens=max_completion_tokens,
220
+ logprobs=logprobs,
221
+ top_logprobs=top_logprobs,
222
+ n=n,
223
+ presence_penalty=presence_penalty,
224
+ response_format=utils.get_pydantic_model(
225
+ response_format, Optional[models.CreateChatCompletionResponseFormat]
226
+ ),
227
+ reasoning_effort=reasoning_effort,
228
+ verbosity=verbosity,
229
+ seed=seed,
230
+ stop=stop,
231
+ stream_options=utils.get_pydantic_model(
232
+ stream_options,
233
+ OptionalNullable[models.CreateChatCompletionStreamOptions],
234
+ ),
235
+ thinking=utils.get_pydantic_model(
236
+ thinking, Optional[models.CreateChatCompletionThinking]
237
+ ),
238
+ temperature=temperature,
239
+ top_p=top_p,
240
+ top_k=top_k,
241
+ tools=utils.get_pydantic_model(
242
+ tools, Optional[List[models.CreateChatCompletionTools]]
243
+ ),
244
+ tool_choice=utils.get_pydantic_model(
245
+ tool_choice, Optional[models.CreateChatCompletionToolChoice]
246
+ ),
247
+ parallel_tool_calls=parallel_tool_calls,
248
+ modalities=modalities,
249
+ guardrails=utils.get_pydantic_model(
250
+ guardrails, Optional[List[models.CreateChatCompletionGuardrails]]
251
+ ),
252
+ fallbacks=utils.get_pydantic_model(
253
+ fallbacks, Optional[List[models.CreateChatCompletionFallbacks]]
254
+ ),
255
+ retry=utils.get_pydantic_model(
256
+ retry, Optional[models.CreateChatCompletionRetry]
257
+ ),
258
+ cache=utils.get_pydantic_model(
259
+ cache, Optional[models.CreateChatCompletionCache]
260
+ ),
261
+ load_balancer=utils.get_pydantic_model(
262
+ load_balancer, Optional[models.CreateChatCompletionLoadBalancer]
263
+ ),
264
+ timeout=utils.get_pydantic_model(
265
+ timeout, Optional[models.CreateChatCompletionTimeout]
266
+ ),
267
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateChatCompletionOrq]),
268
+ stream=stream,
269
+ )
270
+
271
+ req = self._build_request(
272
+ method="POST",
273
+ path="/v2/router/chat/completions",
274
+ base_url=base_url,
275
+ url_variables=url_variables,
276
+ request=request,
277
+ request_body_required=True,
278
+ request_has_path_params=False,
279
+ request_has_query_params=True,
280
+ user_agent_header="user-agent",
281
+ accept_header_value=accept_header_override.value
282
+ if accept_header_override is not None
283
+ else "application/json;q=1, text/event-stream;q=0",
284
+ http_headers=http_headers,
285
+ security=self.sdk_configuration.security,
286
+ get_serialized_body=lambda: utils.serialize_request_body(
287
+ request, False, False, "json", models.CreateChatCompletionRequestBody
288
+ ),
289
+ allow_empty_value=None,
290
+ timeout_ms=timeout_ms,
291
+ )
292
+
293
+ if retries == UNSET:
294
+ if self.sdk_configuration.retry_config is not UNSET:
295
+ retries = self.sdk_configuration.retry_config
296
+
297
+ retry_config = None
298
+ if isinstance(retries, utils.RetryConfig):
299
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
300
+
301
+ http_res = self.do_request(
302
+ hook_ctx=HookContext(
303
+ config=self.sdk_configuration,
304
+ base_url=base_url or "",
305
+ operation_id="createChatCompletion",
306
+ oauth2_scopes=None,
307
+ security_source=get_security_from_env(
308
+ self.sdk_configuration.security, models.Security
309
+ ),
310
+ ),
311
+ request=req,
312
+ error_status_codes=["4XX", "5XX"],
313
+ stream=True,
314
+ retry_config=retry_config,
315
+ )
316
+
317
+ if utils.match_response(http_res, "200", "application/json"):
318
+ http_res_text = utils.stream_to_text(http_res)
319
+ return unmarshal_json_response(
320
+ models.CreateChatCompletionResponseBody, http_res, http_res_text
321
+ )
322
+ if utils.match_response(http_res, "200", "text/event-stream"):
323
+ return eventstreaming.EventStream(
324
+ http_res,
325
+ lambda raw: utils.unmarshal_json(
326
+ raw, models.CreateChatCompletionRouterChatCompletionsResponseBody
327
+ ),
328
+ sentinel="[DONE]",
329
+ client_ref=self,
330
+ )
331
+ if utils.match_response(http_res, "4XX", "*"):
332
+ http_res_text = utils.stream_to_text(http_res)
333
+ raise models.APIError("API error occurred", http_res, http_res_text)
334
+ if utils.match_response(http_res, "5XX", "*"):
335
+ http_res_text = utils.stream_to_text(http_res)
336
+ raise models.APIError("API error occurred", http_res, http_res_text)
337
+
338
+ http_res_text = utils.stream_to_text(http_res)
339
+ raise models.APIError("Unexpected response received", http_res, http_res_text)
340
+
341
+ async def create_async(
342
+ self,
343
+ *,
344
+ messages: Union[
345
+ List[models_createchatcompletionop.CreateChatCompletionMessages],
346
+ List[models_createchatcompletionop.CreateChatCompletionMessagesTypedDict],
347
+ ],
348
+ model: str,
349
+ metadata: Optional[Dict[str, str]] = None,
350
+ audio: OptionalNullable[
351
+ Union[
352
+ models_createchatcompletionop.CreateChatCompletionAudio,
353
+ models_createchatcompletionop.CreateChatCompletionAudioTypedDict,
354
+ ]
355
+ ] = UNSET,
356
+ frequency_penalty: OptionalNullable[float] = UNSET,
357
+ max_tokens: OptionalNullable[int] = UNSET,
358
+ max_completion_tokens: OptionalNullable[int] = UNSET,
359
+ logprobs: OptionalNullable[bool] = UNSET,
360
+ top_logprobs: OptionalNullable[int] = UNSET,
361
+ n: OptionalNullable[int] = UNSET,
362
+ presence_penalty: OptionalNullable[float] = UNSET,
363
+ response_format: Optional[
364
+ Union[
365
+ models_createchatcompletionop.CreateChatCompletionResponseFormat,
366
+ models_createchatcompletionop.CreateChatCompletionResponseFormatTypedDict,
367
+ ]
368
+ ] = None,
369
+ reasoning_effort: Optional[
370
+ models_createchatcompletionop.CreateChatCompletionReasoningEffort
371
+ ] = None,
372
+ verbosity: Optional[str] = None,
373
+ seed: OptionalNullable[float] = UNSET,
374
+ stop: OptionalNullable[
375
+ Union[
376
+ models_createchatcompletionop.CreateChatCompletionStop,
377
+ models_createchatcompletionop.CreateChatCompletionStopTypedDict,
378
+ ]
379
+ ] = UNSET,
380
+ stream_options: OptionalNullable[
381
+ Union[
382
+ models_createchatcompletionop.CreateChatCompletionStreamOptions,
383
+ models_createchatcompletionop.CreateChatCompletionStreamOptionsTypedDict,
384
+ ]
385
+ ] = UNSET,
386
+ thinking: Optional[
387
+ Union[
388
+ models_createchatcompletionop.CreateChatCompletionThinking,
389
+ models_createchatcompletionop.CreateChatCompletionThinkingTypedDict,
390
+ ]
391
+ ] = None,
392
+ temperature: OptionalNullable[float] = UNSET,
393
+ top_p: OptionalNullable[float] = UNSET,
394
+ top_k: OptionalNullable[float] = UNSET,
395
+ tools: Optional[
396
+ Union[
397
+ List[models_createchatcompletionop.CreateChatCompletionTools],
398
+ List[models_createchatcompletionop.CreateChatCompletionToolsTypedDict],
399
+ ]
400
+ ] = None,
401
+ tool_choice: Optional[
402
+ Union[
403
+ models_createchatcompletionop.CreateChatCompletionToolChoice,
404
+ models_createchatcompletionop.CreateChatCompletionToolChoiceTypedDict,
405
+ ]
406
+ ] = None,
407
+ parallel_tool_calls: Optional[bool] = None,
408
+ modalities: OptionalNullable[
409
+ List[models_createchatcompletionop.CreateChatCompletionModalities]
410
+ ] = UNSET,
411
+ guardrails: Optional[
412
+ Union[
413
+ List[models_createchatcompletionop.CreateChatCompletionGuardrails],
414
+ List[
415
+ models_createchatcompletionop.CreateChatCompletionGuardrailsTypedDict
416
+ ],
417
+ ]
418
+ ] = None,
419
+ fallbacks: Optional[
420
+ Union[
421
+ List[models_createchatcompletionop.CreateChatCompletionFallbacks],
422
+ List[
423
+ models_createchatcompletionop.CreateChatCompletionFallbacksTypedDict
424
+ ],
425
+ ]
426
+ ] = None,
427
+ retry: Optional[
428
+ Union[
429
+ models_createchatcompletionop.CreateChatCompletionRetry,
430
+ models_createchatcompletionop.CreateChatCompletionRetryTypedDict,
431
+ ]
432
+ ] = None,
433
+ cache: Optional[
434
+ Union[
435
+ models_createchatcompletionop.CreateChatCompletionCache,
436
+ models_createchatcompletionop.CreateChatCompletionCacheTypedDict,
437
+ ]
438
+ ] = None,
439
+ load_balancer: Optional[
440
+ Union[
441
+ models_createchatcompletionop.CreateChatCompletionLoadBalancer,
442
+ models_createchatcompletionop.CreateChatCompletionLoadBalancerTypedDict,
443
+ ]
444
+ ] = None,
445
+ timeout: Optional[
446
+ Union[
447
+ models_createchatcompletionop.CreateChatCompletionTimeout,
448
+ models_createchatcompletionop.CreateChatCompletionTimeoutTypedDict,
449
+ ]
450
+ ] = None,
451
+ orq: Optional[
452
+ Union[
453
+ models_createchatcompletionop.CreateChatCompletionOrq,
454
+ models_createchatcompletionop.CreateChatCompletionOrqTypedDict,
455
+ ]
456
+ ] = None,
457
+ stream: Optional[bool] = False,
458
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
459
+ server_url: Optional[str] = None,
460
+ timeout_ms: Optional[int] = None,
461
+ accept_header_override: Optional[CreateAcceptEnum] = None,
462
+ http_headers: Optional[Mapping[str, str]] = None,
463
+ ) -> models.CreateChatCompletionResponse:
464
+ r"""Create chat completion
465
+
466
+ Creates a model response for the given chat conversation with support for retries, fallbacks, prompts, and variables.
467
+
468
+ :param messages: A list of messages comprising the conversation so far.
469
+ :param model: Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`. The AI Gateway offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the (Supported models)[/docs/proxy/supported-models] to browse available models.
470
+ :param metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can have a maximum length of 64 characters and values can have a maximum length of 512 characters.
471
+ :param audio: Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more.
472
+ :param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
473
+ :param max_tokens: `[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
474
+
475
+ This value is now `deprecated` in favor of `max_completion_tokens`, and is not compatible with o1 series models.
476
+ :param max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens
477
+ :param logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.
478
+ :param top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
479
+ :param n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
480
+ :param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
481
+ :param response_format: An object specifying the format that the model must output
482
+ :param reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
483
+
484
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
485
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
486
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
487
+ - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
488
+
489
+ Any of \"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\".
490
+ :param verbosity: Adjusts response verbosity. Lower levels yield shorter answers.
491
+ :param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
492
+ :param stop: Up to 4 sequences where the API will stop generating further tokens.
493
+ :param stream_options: Options for streaming response. Only set this when you set stream: true.
494
+ :param thinking:
495
+ :param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
496
+ :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
497
+ :param top_k: Limits the model to consider only the top k most likely tokens at each step.
498
+ :param tools: A list of tools the model may call.
499
+ :param tool_choice: Controls which (if any) tool is called by the model.
500
+ :param parallel_tool_calls: Whether to enable parallel function calling during tool use.
501
+ :param modalities: Output types that you would like the model to generate. Most models are capable of generating text, which is the default: [\"text\"]. The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use: [\"text\", \"audio\"].
502
+ :param guardrails: A list of guardrails to apply to the request.
503
+ :param fallbacks: Array of fallback models to use if primary model fails
504
+ :param retry: Retry configuration for the request
505
+ :param cache: Cache configuration for the request.
506
+ :param load_balancer: Load balancer configuration for the request.
507
+ :param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
508
+ :param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
509
+ :param stream:
510
+ :param retries: Override the default retry configuration for this method
511
+ :param server_url: Override the default server URL for this method
512
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
513
+ :param accept_header_override: Override the default accept header for this method
514
+ :param http_headers: Additional headers to set or replace on requests.
515
+ """
516
+ base_url = None
517
+ url_variables = None
518
+ if timeout_ms is None:
519
+ timeout_ms = self.sdk_configuration.timeout_ms
520
+
521
+ if timeout_ms is None:
522
+ timeout_ms = 600000
523
+
524
+ if server_url is not None:
525
+ base_url = server_url
526
+ else:
527
+ base_url = self._get_url(base_url, url_variables)
528
+
529
+ request = models.CreateChatCompletionRequestBody(
530
+ messages=utils.get_pydantic_model(
531
+ messages, List[models.CreateChatCompletionMessages]
532
+ ),
533
+ model=model,
534
+ metadata=metadata,
535
+ audio=utils.get_pydantic_model(
536
+ audio, OptionalNullable[models.CreateChatCompletionAudio]
537
+ ),
538
+ frequency_penalty=frequency_penalty,
539
+ max_tokens=max_tokens,
540
+ max_completion_tokens=max_completion_tokens,
541
+ logprobs=logprobs,
542
+ top_logprobs=top_logprobs,
543
+ n=n,
544
+ presence_penalty=presence_penalty,
545
+ response_format=utils.get_pydantic_model(
546
+ response_format, Optional[models.CreateChatCompletionResponseFormat]
547
+ ),
548
+ reasoning_effort=reasoning_effort,
549
+ verbosity=verbosity,
550
+ seed=seed,
551
+ stop=stop,
552
+ stream_options=utils.get_pydantic_model(
553
+ stream_options,
554
+ OptionalNullable[models.CreateChatCompletionStreamOptions],
555
+ ),
556
+ thinking=utils.get_pydantic_model(
557
+ thinking, Optional[models.CreateChatCompletionThinking]
558
+ ),
559
+ temperature=temperature,
560
+ top_p=top_p,
561
+ top_k=top_k,
562
+ tools=utils.get_pydantic_model(
563
+ tools, Optional[List[models.CreateChatCompletionTools]]
564
+ ),
565
+ tool_choice=utils.get_pydantic_model(
566
+ tool_choice, Optional[models.CreateChatCompletionToolChoice]
567
+ ),
568
+ parallel_tool_calls=parallel_tool_calls,
569
+ modalities=modalities,
570
+ guardrails=utils.get_pydantic_model(
571
+ guardrails, Optional[List[models.CreateChatCompletionGuardrails]]
572
+ ),
573
+ fallbacks=utils.get_pydantic_model(
574
+ fallbacks, Optional[List[models.CreateChatCompletionFallbacks]]
575
+ ),
576
+ retry=utils.get_pydantic_model(
577
+ retry, Optional[models.CreateChatCompletionRetry]
578
+ ),
579
+ cache=utils.get_pydantic_model(
580
+ cache, Optional[models.CreateChatCompletionCache]
581
+ ),
582
+ load_balancer=utils.get_pydantic_model(
583
+ load_balancer, Optional[models.CreateChatCompletionLoadBalancer]
584
+ ),
585
+ timeout=utils.get_pydantic_model(
586
+ timeout, Optional[models.CreateChatCompletionTimeout]
587
+ ),
588
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateChatCompletionOrq]),
589
+ stream=stream,
590
+ )
591
+
592
+ req = self._build_request_async(
593
+ method="POST",
594
+ path="/v2/router/chat/completions",
595
+ base_url=base_url,
596
+ url_variables=url_variables,
597
+ request=request,
598
+ request_body_required=True,
599
+ request_has_path_params=False,
600
+ request_has_query_params=True,
601
+ user_agent_header="user-agent",
602
+ accept_header_value=accept_header_override.value
603
+ if accept_header_override is not None
604
+ else "application/json;q=1, text/event-stream;q=0",
605
+ http_headers=http_headers,
606
+ security=self.sdk_configuration.security,
607
+ get_serialized_body=lambda: utils.serialize_request_body(
608
+ request, False, False, "json", models.CreateChatCompletionRequestBody
609
+ ),
610
+ allow_empty_value=None,
611
+ timeout_ms=timeout_ms,
612
+ )
613
+
614
+ if retries == UNSET:
615
+ if self.sdk_configuration.retry_config is not UNSET:
616
+ retries = self.sdk_configuration.retry_config
617
+
618
+ retry_config = None
619
+ if isinstance(retries, utils.RetryConfig):
620
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
621
+
622
+ http_res = await self.do_request_async(
623
+ hook_ctx=HookContext(
624
+ config=self.sdk_configuration,
625
+ base_url=base_url or "",
626
+ operation_id="createChatCompletion",
627
+ oauth2_scopes=None,
628
+ security_source=get_security_from_env(
629
+ self.sdk_configuration.security, models.Security
630
+ ),
631
+ ),
632
+ request=req,
633
+ error_status_codes=["4XX", "5XX"],
634
+ stream=True,
635
+ retry_config=retry_config,
636
+ )
637
+
638
+ if utils.match_response(http_res, "200", "application/json"):
639
+ http_res_text = await utils.stream_to_text_async(http_res)
640
+ return unmarshal_json_response(
641
+ models.CreateChatCompletionResponseBody, http_res, http_res_text
642
+ )
643
+ if utils.match_response(http_res, "200", "text/event-stream"):
644
+ return eventstreaming.EventStreamAsync(
645
+ http_res,
646
+ lambda raw: utils.unmarshal_json(
647
+ raw, models.CreateChatCompletionRouterChatCompletionsResponseBody
648
+ ),
649
+ sentinel="[DONE]",
650
+ client_ref=self,
651
+ )
652
+ if utils.match_response(http_res, "4XX", "*"):
653
+ http_res_text = await utils.stream_to_text_async(http_res)
654
+ raise models.APIError("API error occurred", http_res, http_res_text)
655
+ if utils.match_response(http_res, "5XX", "*"):
656
+ http_res_text = await utils.stream_to_text_async(http_res)
657
+ raise models.APIError("API error occurred", http_res, http_res_text)
658
+
659
+ http_res_text = await utils.stream_to_text_async(http_res)
660
+ raise models.APIError("Unexpected response received", http_res, http_res_text)