orq-ai-sdk 4.2.0rc48__py3-none-any.whl → 4.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. orq_ai_sdk/_hooks/globalhook.py +0 -1
  2. orq_ai_sdk/_version.py +3 -3
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/chat.py +22 -0
  5. orq_ai_sdk/completions.py +438 -0
  6. orq_ai_sdk/contacts.py +43 -886
  7. orq_ai_sdk/deployments.py +61 -0
  8. orq_ai_sdk/edits.py +364 -0
  9. orq_ai_sdk/embeddings.py +344 -0
  10. orq_ai_sdk/generations.py +370 -0
  11. orq_ai_sdk/images.py +28 -0
  12. orq_ai_sdk/models/__init__.py +3839 -424
  13. orq_ai_sdk/models/conversationresponse.py +1 -1
  14. orq_ai_sdk/models/conversationwithmessagesresponse.py +1 -1
  15. orq_ai_sdk/models/createagentrequestop.py +768 -12
  16. orq_ai_sdk/models/createagentresponse.py +68 -2
  17. orq_ai_sdk/models/createchatcompletionop.py +538 -313
  18. orq_ai_sdk/models/createcompletionop.py +2078 -0
  19. orq_ai_sdk/models/createcontactop.py +5 -10
  20. orq_ai_sdk/models/createconversationop.py +1 -1
  21. orq_ai_sdk/models/createconversationresponseop.py +2 -2
  22. orq_ai_sdk/models/createdatasetitemop.py +4 -4
  23. orq_ai_sdk/models/createdatasetop.py +1 -1
  24. orq_ai_sdk/models/createdatasourceop.py +1 -1
  25. orq_ai_sdk/models/createembeddingop.py +579 -0
  26. orq_ai_sdk/models/createevalop.py +14 -14
  27. orq_ai_sdk/models/createidentityop.py +1 -1
  28. orq_ai_sdk/models/createimageeditop.py +715 -0
  29. orq_ai_sdk/models/createimageop.py +228 -82
  30. orq_ai_sdk/models/createimagevariationop.py +706 -0
  31. orq_ai_sdk/models/creatememoryop.py +4 -2
  32. orq_ai_sdk/models/createmoderationop.py +521 -0
  33. orq_ai_sdk/models/createpromptop.py +375 -6
  34. orq_ai_sdk/models/creatererankop.py +608 -0
  35. orq_ai_sdk/models/createresponseop.py +2567 -0
  36. orq_ai_sdk/models/createspeechop.py +466 -0
  37. orq_ai_sdk/models/createtoolop.py +6 -6
  38. orq_ai_sdk/models/createtranscriptionop.py +732 -0
  39. orq_ai_sdk/models/createtranslationop.py +702 -0
  40. orq_ai_sdk/models/deploymentgetconfigop.py +17 -7
  41. orq_ai_sdk/models/deploymentsop.py +1 -0
  42. orq_ai_sdk/models/deploymentstreamop.py +7 -0
  43. orq_ai_sdk/models/filegetop.py +1 -1
  44. orq_ai_sdk/models/filelistop.py +1 -1
  45. orq_ai_sdk/models/fileuploadop.py +1 -1
  46. orq_ai_sdk/models/generateconversationnameop.py +1 -1
  47. orq_ai_sdk/models/getallmemoriesop.py +4 -2
  48. orq_ai_sdk/models/getallpromptsop.py +188 -3
  49. orq_ai_sdk/models/getalltoolsop.py +6 -6
  50. orq_ai_sdk/models/getevalsop.py +17 -17
  51. orq_ai_sdk/models/getonepromptop.py +188 -3
  52. orq_ai_sdk/models/getpromptversionop.py +188 -3
  53. orq_ai_sdk/models/invokedeploymentrequest.py +11 -4
  54. orq_ai_sdk/models/listagentsop.py +372 -0
  55. orq_ai_sdk/models/listdatasetdatapointsop.py +4 -4
  56. orq_ai_sdk/models/listdatasetsop.py +1 -1
  57. orq_ai_sdk/models/listdatasourcesop.py +1 -1
  58. orq_ai_sdk/models/listidentitiesop.py +1 -1
  59. orq_ai_sdk/models/listmodelsop.py +1 -0
  60. orq_ai_sdk/models/listpromptversionsop.py +188 -3
  61. orq_ai_sdk/models/partdoneevent.py +1 -1
  62. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  63. orq_ai_sdk/models/publiccontact.py +9 -3
  64. orq_ai_sdk/models/publicidentity.py +62 -0
  65. orq_ai_sdk/models/reasoningpart.py +1 -1
  66. orq_ai_sdk/models/responsedoneevent.py +14 -11
  67. orq_ai_sdk/models/retrieveagentrequestop.py +382 -0
  68. orq_ai_sdk/models/retrievedatapointop.py +4 -4
  69. orq_ai_sdk/models/retrievedatasetop.py +1 -1
  70. orq_ai_sdk/models/retrievedatasourceop.py +1 -1
  71. orq_ai_sdk/models/retrieveidentityop.py +1 -1
  72. orq_ai_sdk/models/retrievememoryop.py +4 -2
  73. orq_ai_sdk/models/retrievetoolop.py +6 -6
  74. orq_ai_sdk/models/runagentop.py +379 -9
  75. orq_ai_sdk/models/streamrunagentop.py +385 -9
  76. orq_ai_sdk/models/updateagentop.py +770 -12
  77. orq_ai_sdk/models/updateconversationop.py +1 -1
  78. orq_ai_sdk/models/updatedatapointop.py +4 -4
  79. orq_ai_sdk/models/updatedatasetop.py +1 -1
  80. orq_ai_sdk/models/updatedatasourceop.py +1 -1
  81. orq_ai_sdk/models/updateevalop.py +14 -14
  82. orq_ai_sdk/models/updateidentityop.py +1 -1
  83. orq_ai_sdk/models/updatememoryop.py +4 -2
  84. orq_ai_sdk/models/updatepromptop.py +375 -6
  85. orq_ai_sdk/models/updatetoolop.py +7 -7
  86. orq_ai_sdk/moderations.py +218 -0
  87. orq_ai_sdk/orq_completions.py +666 -0
  88. orq_ai_sdk/orq_responses.py +398 -0
  89. orq_ai_sdk/rerank.py +330 -0
  90. orq_ai_sdk/router.py +89 -641
  91. orq_ai_sdk/speech.py +333 -0
  92. orq_ai_sdk/transcriptions.py +416 -0
  93. orq_ai_sdk/translations.py +384 -0
  94. orq_ai_sdk/variations.py +364 -0
  95. orq_ai_sdk-4.2.12.dist-info/METADATA +888 -0
  96. {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/RECORD +98 -75
  97. {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/WHEEL +1 -1
  98. orq_ai_sdk/models/deletecontactop.py +0 -44
  99. orq_ai_sdk/models/listcontactsop.py +0 -265
  100. orq_ai_sdk/models/retrievecontactop.py +0 -142
  101. orq_ai_sdk/models/updatecontactop.py +0 -233
  102. orq_ai_sdk-4.2.0rc48.dist-info/METADATA +0 -788
  103. {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/top_level.txt +0 -0
@@ -42,4 +42,3 @@ class GlobalHook(BeforeRequestHook):
42
42
  return httpx.Request(method=request.method, url=request.url, extensions=request.extensions, headers=request.headers, content=data)
43
43
 
44
44
  return request
45
-
orq_ai_sdk/_version.py CHANGED
@@ -3,10 +3,10 @@
3
3
  import importlib.metadata
4
4
 
5
5
  __title__: str = "orq-ai-sdk"
6
- __version__: str = "4.2.0-rc.48"
6
+ __version__: str = "4.2.12"
7
7
  __openapi_doc_version__: str = "2.0"
8
- __gen_version__: str = "2.797.1"
9
- __user_agent__: str = "speakeasy-sdk/python 4.2.0-rc.48 2.797.1 2.0 orq-ai-sdk"
8
+ __gen_version__: str = "2.801.0"
9
+ __user_agent__: str = "speakeasy-sdk/python 4.2.12 2.801.0 2.0 orq-ai-sdk"
10
10
 
11
11
  try:
12
12
  if __package__ is not None:
orq_ai_sdk/audio.py ADDED
@@ -0,0 +1,30 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from .basesdk import BaseSDK
4
+ from .sdkconfiguration import SDKConfiguration
5
+ from orq_ai_sdk.speech import Speech
6
+ from orq_ai_sdk.transcriptions import Transcriptions
7
+ from orq_ai_sdk.translations import Translations
8
+ from typing import Optional
9
+
10
+
11
+ class Audio(BaseSDK):
12
+ speech: Speech
13
+ transcriptions: Transcriptions
14
+ translations: Translations
15
+
16
+ def __init__(
17
+ self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
18
+ ) -> None:
19
+ BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
20
+ self.sdk_configuration = sdk_config
21
+ self._init_sdks()
22
+
23
+ def _init_sdks(self):
24
+ self.speech = Speech(self.sdk_configuration, parent_ref=self.parent_ref)
25
+ self.transcriptions = Transcriptions(
26
+ self.sdk_configuration, parent_ref=self.parent_ref
27
+ )
28
+ self.translations = Translations(
29
+ self.sdk_configuration, parent_ref=self.parent_ref
30
+ )
orq_ai_sdk/chat.py ADDED
@@ -0,0 +1,22 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from .basesdk import BaseSDK
4
+ from .sdkconfiguration import SDKConfiguration
5
+ from orq_ai_sdk.orq_completions import OrqCompletions
6
+ from typing import Optional
7
+
8
+
9
+ class Chat(BaseSDK):
10
+ completions: OrqCompletions
11
+
12
+ def __init__(
13
+ self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
14
+ ) -> None:
15
+ BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
16
+ self.sdk_configuration = sdk_config
17
+ self._init_sdks()
18
+
19
+ def _init_sdks(self):
20
+ self.completions = OrqCompletions(
21
+ self.sdk_configuration, parent_ref=self.parent_ref
22
+ )
@@ -0,0 +1,438 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from .basesdk import BaseSDK
4
+ from enum import Enum
5
+ from orq_ai_sdk import models, utils
6
+ from orq_ai_sdk._hooks import HookContext
7
+ from orq_ai_sdk.models import createcompletionop as models_createcompletionop
8
+ from orq_ai_sdk.types import OptionalNullable, UNSET
9
+ from orq_ai_sdk.utils import eventstreaming, get_security_from_env
10
+ from orq_ai_sdk.utils.unmarshal_json_response import unmarshal_json_response
11
+ from typing import List, Mapping, Optional, Union
12
+
13
+
14
+ class CreateAcceptEnum(str, Enum):
15
+ APPLICATION_JSON = "application/json"
16
+ TEXT_EVENT_STREAM = "text/event-stream"
17
+
18
+
19
+ class Completions(BaseSDK):
20
+ def create(
21
+ self,
22
+ *,
23
+ model: str,
24
+ prompt: str,
25
+ echo: OptionalNullable[bool] = False,
26
+ frequency_penalty: OptionalNullable[float] = 0,
27
+ max_tokens: OptionalNullable[float] = 16,
28
+ presence_penalty: OptionalNullable[float] = 0,
29
+ seed: OptionalNullable[float] = UNSET,
30
+ stop: OptionalNullable[
31
+ Union[
32
+ models_createcompletionop.CreateCompletionStop,
33
+ models_createcompletionop.CreateCompletionStopTypedDict,
34
+ ]
35
+ ] = UNSET,
36
+ temperature: OptionalNullable[float] = 1,
37
+ top_p: OptionalNullable[float] = 1,
38
+ n: OptionalNullable[float] = 1,
39
+ user: Optional[str] = None,
40
+ name: Optional[str] = None,
41
+ fallbacks: Optional[
42
+ Union[
43
+ List[models_createcompletionop.CreateCompletionFallbacks],
44
+ List[models_createcompletionop.CreateCompletionFallbacksTypedDict],
45
+ ]
46
+ ] = None,
47
+ retry: Optional[
48
+ Union[
49
+ models_createcompletionop.CreateCompletionRetry,
50
+ models_createcompletionop.CreateCompletionRetryTypedDict,
51
+ ]
52
+ ] = None,
53
+ cache: Optional[
54
+ Union[
55
+ models_createcompletionop.CreateCompletionCache,
56
+ models_createcompletionop.CreateCompletionCacheTypedDict,
57
+ ]
58
+ ] = None,
59
+ load_balancer: Optional[
60
+ Union[
61
+ models_createcompletionop.CreateCompletionLoadBalancer,
62
+ models_createcompletionop.CreateCompletionLoadBalancerTypedDict,
63
+ ]
64
+ ] = None,
65
+ timeout: Optional[
66
+ Union[
67
+ models_createcompletionop.CreateCompletionTimeout,
68
+ models_createcompletionop.CreateCompletionTimeoutTypedDict,
69
+ ]
70
+ ] = None,
71
+ orq: Optional[
72
+ Union[
73
+ models_createcompletionop.CreateCompletionOrq,
74
+ models_createcompletionop.CreateCompletionOrqTypedDict,
75
+ ]
76
+ ] = None,
77
+ stream: Optional[bool] = False,
78
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
79
+ server_url: Optional[str] = None,
80
+ timeout_ms: Optional[int] = None,
81
+ accept_header_override: Optional[CreateAcceptEnum] = None,
82
+ http_headers: Optional[Mapping[str, str]] = None,
83
+ ) -> models.CreateCompletionResponse:
84
+ r"""Create completion
85
+
86
+ For sending requests to legacy completion models
87
+
88
+ :param model: ID of the model to use
89
+ :param prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
90
+ :param echo: Echo back the prompt in addition to the completion
91
+ :param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
92
+ :param max_tokens: The maximum number of tokens that can be generated in the completion.
93
+ :param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
94
+ :param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
95
+ :param stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
96
+ :param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
97
+ :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
98
+ :param n: How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota.
99
+ :param user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
100
+ :param name: The name to display on the trace. If not specified, the default system name will be used.
101
+ :param fallbacks: Array of fallback models to use if primary model fails
102
+ :param retry: Retry configuration for the request
103
+ :param cache: Cache configuration for the request.
104
+ :param load_balancer: Load balancer configuration for the request.
105
+ :param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
106
+ :param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
107
+ :param stream:
108
+ :param retries: Override the default retry configuration for this method
109
+ :param server_url: Override the default server URL for this method
110
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
111
+ :param accept_header_override: Override the default accept header for this method
112
+ :param http_headers: Additional headers to set or replace on requests.
113
+ """
114
+ base_url = None
115
+ url_variables = None
116
+ if timeout_ms is None:
117
+ timeout_ms = self.sdk_configuration.timeout_ms
118
+
119
+ if timeout_ms is None:
120
+ timeout_ms = 600000
121
+
122
+ if server_url is not None:
123
+ base_url = server_url
124
+ else:
125
+ base_url = self._get_url(base_url, url_variables)
126
+
127
+ request = models.CreateCompletionRequestBody(
128
+ model=model,
129
+ prompt=prompt,
130
+ echo=echo,
131
+ frequency_penalty=frequency_penalty,
132
+ max_tokens=max_tokens,
133
+ presence_penalty=presence_penalty,
134
+ seed=seed,
135
+ stop=stop,
136
+ temperature=temperature,
137
+ top_p=top_p,
138
+ n=n,
139
+ user=user,
140
+ name=name,
141
+ fallbacks=utils.get_pydantic_model(
142
+ fallbacks, Optional[List[models.CreateCompletionFallbacks]]
143
+ ),
144
+ retry=utils.get_pydantic_model(
145
+ retry, Optional[models.CreateCompletionRetry]
146
+ ),
147
+ cache=utils.get_pydantic_model(
148
+ cache, Optional[models.CreateCompletionCache]
149
+ ),
150
+ load_balancer=utils.get_pydantic_model(
151
+ load_balancer, Optional[models.CreateCompletionLoadBalancer]
152
+ ),
153
+ timeout=utils.get_pydantic_model(
154
+ timeout, Optional[models.CreateCompletionTimeout]
155
+ ),
156
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateCompletionOrq]),
157
+ stream=stream,
158
+ )
159
+
160
+ req = self._build_request(
161
+ method="POST",
162
+ path="/v2/router/completions",
163
+ base_url=base_url,
164
+ url_variables=url_variables,
165
+ request=request,
166
+ request_body_required=True,
167
+ request_has_path_params=False,
168
+ request_has_query_params=True,
169
+ user_agent_header="user-agent",
170
+ accept_header_value=accept_header_override.value
171
+ if accept_header_override is not None
172
+ else "application/json;q=1, text/event-stream;q=0",
173
+ http_headers=http_headers,
174
+ security=self.sdk_configuration.security,
175
+ get_serialized_body=lambda: utils.serialize_request_body(
176
+ request, False, False, "json", models.CreateCompletionRequestBody
177
+ ),
178
+ allow_empty_value=None,
179
+ timeout_ms=timeout_ms,
180
+ )
181
+
182
+ if retries == UNSET:
183
+ if self.sdk_configuration.retry_config is not UNSET:
184
+ retries = self.sdk_configuration.retry_config
185
+
186
+ retry_config = None
187
+ if isinstance(retries, utils.RetryConfig):
188
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
189
+
190
+ http_res = self.do_request(
191
+ hook_ctx=HookContext(
192
+ config=self.sdk_configuration,
193
+ base_url=base_url or "",
194
+ operation_id="createCompletion",
195
+ oauth2_scopes=None,
196
+ security_source=get_security_from_env(
197
+ self.sdk_configuration.security, models.Security
198
+ ),
199
+ ),
200
+ request=req,
201
+ error_status_codes=["4XX", "5XX"],
202
+ stream=True,
203
+ retry_config=retry_config,
204
+ )
205
+
206
+ if utils.match_response(http_res, "200", "application/json"):
207
+ http_res_text = utils.stream_to_text(http_res)
208
+ return unmarshal_json_response(
209
+ models.CreateCompletionResponseBody, http_res, http_res_text
210
+ )
211
+ if utils.match_response(http_res, "200", "text/event-stream"):
212
+ return eventstreaming.EventStream(
213
+ http_res,
214
+ lambda raw: utils.unmarshal_json(
215
+ raw, models.CreateCompletionRouterCompletionsResponseBody
216
+ ),
217
+ sentinel="[DONE]",
218
+ client_ref=self,
219
+ )
220
+ if utils.match_response(http_res, "4XX", "*"):
221
+ http_res_text = utils.stream_to_text(http_res)
222
+ raise models.APIError("API error occurred", http_res, http_res_text)
223
+ if utils.match_response(http_res, "5XX", "*"):
224
+ http_res_text = utils.stream_to_text(http_res)
225
+ raise models.APIError("API error occurred", http_res, http_res_text)
226
+
227
+ http_res_text = utils.stream_to_text(http_res)
228
+ raise models.APIError("Unexpected response received", http_res, http_res_text)
229
+
230
+ async def create_async(
231
+ self,
232
+ *,
233
+ model: str,
234
+ prompt: str,
235
+ echo: OptionalNullable[bool] = False,
236
+ frequency_penalty: OptionalNullable[float] = 0,
237
+ max_tokens: OptionalNullable[float] = 16,
238
+ presence_penalty: OptionalNullable[float] = 0,
239
+ seed: OptionalNullable[float] = UNSET,
240
+ stop: OptionalNullable[
241
+ Union[
242
+ models_createcompletionop.CreateCompletionStop,
243
+ models_createcompletionop.CreateCompletionStopTypedDict,
244
+ ]
245
+ ] = UNSET,
246
+ temperature: OptionalNullable[float] = 1,
247
+ top_p: OptionalNullable[float] = 1,
248
+ n: OptionalNullable[float] = 1,
249
+ user: Optional[str] = None,
250
+ name: Optional[str] = None,
251
+ fallbacks: Optional[
252
+ Union[
253
+ List[models_createcompletionop.CreateCompletionFallbacks],
254
+ List[models_createcompletionop.CreateCompletionFallbacksTypedDict],
255
+ ]
256
+ ] = None,
257
+ retry: Optional[
258
+ Union[
259
+ models_createcompletionop.CreateCompletionRetry,
260
+ models_createcompletionop.CreateCompletionRetryTypedDict,
261
+ ]
262
+ ] = None,
263
+ cache: Optional[
264
+ Union[
265
+ models_createcompletionop.CreateCompletionCache,
266
+ models_createcompletionop.CreateCompletionCacheTypedDict,
267
+ ]
268
+ ] = None,
269
+ load_balancer: Optional[
270
+ Union[
271
+ models_createcompletionop.CreateCompletionLoadBalancer,
272
+ models_createcompletionop.CreateCompletionLoadBalancerTypedDict,
273
+ ]
274
+ ] = None,
275
+ timeout: Optional[
276
+ Union[
277
+ models_createcompletionop.CreateCompletionTimeout,
278
+ models_createcompletionop.CreateCompletionTimeoutTypedDict,
279
+ ]
280
+ ] = None,
281
+ orq: Optional[
282
+ Union[
283
+ models_createcompletionop.CreateCompletionOrq,
284
+ models_createcompletionop.CreateCompletionOrqTypedDict,
285
+ ]
286
+ ] = None,
287
+ stream: Optional[bool] = False,
288
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
289
+ server_url: Optional[str] = None,
290
+ timeout_ms: Optional[int] = None,
291
+ accept_header_override: Optional[CreateAcceptEnum] = None,
292
+ http_headers: Optional[Mapping[str, str]] = None,
293
+ ) -> models.CreateCompletionResponse:
294
+ r"""Create completion
295
+
296
+ For sending requests to legacy completion models
297
+
298
+ :param model: ID of the model to use
299
+ :param prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
300
+ :param echo: Echo back the prompt in addition to the completion
301
+ :param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
302
+ :param max_tokens: The maximum number of tokens that can be generated in the completion.
303
+ :param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
304
+ :param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
305
+ :param stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
306
+ :param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
307
+ :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
308
+ :param n: How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota.
309
+ :param user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
310
+ :param name: The name to display on the trace. If not specified, the default system name will be used.
311
+ :param fallbacks: Array of fallback models to use if primary model fails
312
+ :param retry: Retry configuration for the request
313
+ :param cache: Cache configuration for the request.
314
+ :param load_balancer: Load balancer configuration for the request.
315
+ :param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
316
+ :param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
317
+ :param stream:
318
+ :param retries: Override the default retry configuration for this method
319
+ :param server_url: Override the default server URL for this method
320
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
321
+ :param accept_header_override: Override the default accept header for this method
322
+ :param http_headers: Additional headers to set or replace on requests.
323
+ """
324
+ base_url = None
325
+ url_variables = None
326
+ if timeout_ms is None:
327
+ timeout_ms = self.sdk_configuration.timeout_ms
328
+
329
+ if timeout_ms is None:
330
+ timeout_ms = 600000
331
+
332
+ if server_url is not None:
333
+ base_url = server_url
334
+ else:
335
+ base_url = self._get_url(base_url, url_variables)
336
+
337
+ request = models.CreateCompletionRequestBody(
338
+ model=model,
339
+ prompt=prompt,
340
+ echo=echo,
341
+ frequency_penalty=frequency_penalty,
342
+ max_tokens=max_tokens,
343
+ presence_penalty=presence_penalty,
344
+ seed=seed,
345
+ stop=stop,
346
+ temperature=temperature,
347
+ top_p=top_p,
348
+ n=n,
349
+ user=user,
350
+ name=name,
351
+ fallbacks=utils.get_pydantic_model(
352
+ fallbacks, Optional[List[models.CreateCompletionFallbacks]]
353
+ ),
354
+ retry=utils.get_pydantic_model(
355
+ retry, Optional[models.CreateCompletionRetry]
356
+ ),
357
+ cache=utils.get_pydantic_model(
358
+ cache, Optional[models.CreateCompletionCache]
359
+ ),
360
+ load_balancer=utils.get_pydantic_model(
361
+ load_balancer, Optional[models.CreateCompletionLoadBalancer]
362
+ ),
363
+ timeout=utils.get_pydantic_model(
364
+ timeout, Optional[models.CreateCompletionTimeout]
365
+ ),
366
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateCompletionOrq]),
367
+ stream=stream,
368
+ )
369
+
370
+ req = self._build_request_async(
371
+ method="POST",
372
+ path="/v2/router/completions",
373
+ base_url=base_url,
374
+ url_variables=url_variables,
375
+ request=request,
376
+ request_body_required=True,
377
+ request_has_path_params=False,
378
+ request_has_query_params=True,
379
+ user_agent_header="user-agent",
380
+ accept_header_value=accept_header_override.value
381
+ if accept_header_override is not None
382
+ else "application/json;q=1, text/event-stream;q=0",
383
+ http_headers=http_headers,
384
+ security=self.sdk_configuration.security,
385
+ get_serialized_body=lambda: utils.serialize_request_body(
386
+ request, False, False, "json", models.CreateCompletionRequestBody
387
+ ),
388
+ allow_empty_value=None,
389
+ timeout_ms=timeout_ms,
390
+ )
391
+
392
+ if retries == UNSET:
393
+ if self.sdk_configuration.retry_config is not UNSET:
394
+ retries = self.sdk_configuration.retry_config
395
+
396
+ retry_config = None
397
+ if isinstance(retries, utils.RetryConfig):
398
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
399
+
400
+ http_res = await self.do_request_async(
401
+ hook_ctx=HookContext(
402
+ config=self.sdk_configuration,
403
+ base_url=base_url or "",
404
+ operation_id="createCompletion",
405
+ oauth2_scopes=None,
406
+ security_source=get_security_from_env(
407
+ self.sdk_configuration.security, models.Security
408
+ ),
409
+ ),
410
+ request=req,
411
+ error_status_codes=["4XX", "5XX"],
412
+ stream=True,
413
+ retry_config=retry_config,
414
+ )
415
+
416
+ if utils.match_response(http_res, "200", "application/json"):
417
+ http_res_text = await utils.stream_to_text_async(http_res)
418
+ return unmarshal_json_response(
419
+ models.CreateCompletionResponseBody, http_res, http_res_text
420
+ )
421
+ if utils.match_response(http_res, "200", "text/event-stream"):
422
+ return eventstreaming.EventStreamAsync(
423
+ http_res,
424
+ lambda raw: utils.unmarshal_json(
425
+ raw, models.CreateCompletionRouterCompletionsResponseBody
426
+ ),
427
+ sentinel="[DONE]",
428
+ client_ref=self,
429
+ )
430
+ if utils.match_response(http_res, "4XX", "*"):
431
+ http_res_text = await utils.stream_to_text_async(http_res)
432
+ raise models.APIError("API error occurred", http_res, http_res_text)
433
+ if utils.match_response(http_res, "5XX", "*"):
434
+ http_res_text = await utils.stream_to_text_async(http_res)
435
+ raise models.APIError("API error occurred", http_res, http_res_text)
436
+
437
+ http_res_text = await utils.stream_to_text_async(http_res)
438
+ raise models.APIError("Unexpected response received", http_res, http_res_text)