orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.3.0rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (166) hide show
  1. orq_ai_sdk/_version.py +3 -3
  2. orq_ai_sdk/agents.py +186 -186
  3. orq_ai_sdk/audio.py +30 -0
  4. orq_ai_sdk/basesdk.py +20 -6
  5. orq_ai_sdk/chat.py +22 -0
  6. orq_ai_sdk/completions.py +438 -0
  7. orq_ai_sdk/contacts.py +43 -855
  8. orq_ai_sdk/deployments.py +61 -0
  9. orq_ai_sdk/edits.py +364 -0
  10. orq_ai_sdk/embeddings.py +344 -0
  11. orq_ai_sdk/generations.py +370 -0
  12. orq_ai_sdk/identities.py +1037 -0
  13. orq_ai_sdk/images.py +28 -0
  14. orq_ai_sdk/models/__init__.py +5746 -737
  15. orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
  16. orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
  17. orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
  18. orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
  19. orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
  20. orq_ai_sdk/models/agentresponsemessage.py +18 -2
  21. orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
  22. orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
  23. orq_ai_sdk/models/conversationresponse.py +31 -20
  24. orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
  25. orq_ai_sdk/models/createagentrequestop.py +1945 -383
  26. orq_ai_sdk/models/createagentresponse.py +147 -91
  27. orq_ai_sdk/models/createagentresponserequestop.py +111 -2
  28. orq_ai_sdk/models/createchatcompletionop.py +1381 -861
  29. orq_ai_sdk/models/createchunkop.py +46 -19
  30. orq_ai_sdk/models/createcompletionop.py +2078 -0
  31. orq_ai_sdk/models/createcontactop.py +45 -56
  32. orq_ai_sdk/models/createconversationop.py +61 -39
  33. orq_ai_sdk/models/createconversationresponseop.py +68 -4
  34. orq_ai_sdk/models/createdatasetitemop.py +424 -80
  35. orq_ai_sdk/models/createdatasetop.py +19 -2
  36. orq_ai_sdk/models/createdatasourceop.py +92 -26
  37. orq_ai_sdk/models/createembeddingop.py +579 -0
  38. orq_ai_sdk/models/createevalop.py +552 -24
  39. orq_ai_sdk/models/createidentityop.py +176 -0
  40. orq_ai_sdk/models/createimageeditop.py +715 -0
  41. orq_ai_sdk/models/createimageop.py +407 -128
  42. orq_ai_sdk/models/createimagevariationop.py +706 -0
  43. orq_ai_sdk/models/createknowledgeop.py +186 -121
  44. orq_ai_sdk/models/creatememorydocumentop.py +50 -1
  45. orq_ai_sdk/models/creatememoryop.py +34 -21
  46. orq_ai_sdk/models/creatememorystoreop.py +34 -1
  47. orq_ai_sdk/models/createmoderationop.py +521 -0
  48. orq_ai_sdk/models/createpromptop.py +2759 -1251
  49. orq_ai_sdk/models/creatererankop.py +608 -0
  50. orq_ai_sdk/models/createresponseop.py +2567 -0
  51. orq_ai_sdk/models/createspeechop.py +466 -0
  52. orq_ai_sdk/models/createtoolop.py +537 -12
  53. orq_ai_sdk/models/createtranscriptionop.py +732 -0
  54. orq_ai_sdk/models/createtranslationop.py +702 -0
  55. orq_ai_sdk/models/datapart.py +18 -1
  56. orq_ai_sdk/models/deletechunksop.py +34 -1
  57. orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
  58. orq_ai_sdk/models/deletepromptop.py +26 -0
  59. orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
  60. orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
  61. orq_ai_sdk/models/deploymentinvokeop.py +168 -173
  62. orq_ai_sdk/models/deploymentsop.py +195 -58
  63. orq_ai_sdk/models/deploymentstreamop.py +652 -304
  64. orq_ai_sdk/models/errorpart.py +18 -1
  65. orq_ai_sdk/models/filecontentpartschema.py +18 -1
  66. orq_ai_sdk/models/filegetop.py +19 -2
  67. orq_ai_sdk/models/filelistop.py +35 -2
  68. orq_ai_sdk/models/filepart.py +50 -1
  69. orq_ai_sdk/models/fileuploadop.py +51 -2
  70. orq_ai_sdk/models/generateconversationnameop.py +31 -20
  71. orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
  72. orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
  73. orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
  74. orq_ai_sdk/models/getallmemoriesop.py +34 -21
  75. orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
  76. orq_ai_sdk/models/getallmemorystoresop.py +34 -1
  77. orq_ai_sdk/models/getallpromptsop.py +1696 -230
  78. orq_ai_sdk/models/getalltoolsop.py +325 -8
  79. orq_ai_sdk/models/getchunkscountop.py +34 -1
  80. orq_ai_sdk/models/getevalsop.py +395 -43
  81. orq_ai_sdk/models/getonechunkop.py +14 -19
  82. orq_ai_sdk/models/getoneknowledgeop.py +116 -96
  83. orq_ai_sdk/models/getonepromptop.py +1679 -230
  84. orq_ai_sdk/models/getpromptversionop.py +1676 -216
  85. orq_ai_sdk/models/imagecontentpartschema.py +50 -1
  86. orq_ai_sdk/models/internal/globals.py +18 -1
  87. orq_ai_sdk/models/invokeagentop.py +140 -2
  88. orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
  89. orq_ai_sdk/models/invokeevalop.py +160 -131
  90. orq_ai_sdk/models/listagentsop.py +805 -166
  91. orq_ai_sdk/models/listchunksop.py +32 -19
  92. orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
  93. orq_ai_sdk/models/listconversationsop.py +18 -1
  94. orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
  95. orq_ai_sdk/models/listdatasetsop.py +35 -2
  96. orq_ai_sdk/models/listdatasourcesop.py +35 -26
  97. orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
  98. orq_ai_sdk/models/listknowledgebasesop.py +132 -96
  99. orq_ai_sdk/models/listmodelsop.py +1 -0
  100. orq_ai_sdk/models/listpromptversionsop.py +1690 -216
  101. orq_ai_sdk/models/parseop.py +161 -17
  102. orq_ai_sdk/models/partdoneevent.py +19 -2
  103. orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
  104. orq_ai_sdk/models/publiccontact.py +27 -4
  105. orq_ai_sdk/models/publicidentity.py +62 -0
  106. orq_ai_sdk/models/reasoningpart.py +19 -2
  107. orq_ai_sdk/models/refusalpartschema.py +18 -1
  108. orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
  109. orq_ai_sdk/models/responsedoneevent.py +114 -84
  110. orq_ai_sdk/models/responsestartedevent.py +18 -1
  111. orq_ai_sdk/models/retrieveagentrequestop.py +799 -166
  112. orq_ai_sdk/models/retrievedatapointop.py +236 -42
  113. orq_ai_sdk/models/retrievedatasetop.py +19 -2
  114. orq_ai_sdk/models/retrievedatasourceop.py +17 -26
  115. orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
  116. orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
  117. orq_ai_sdk/models/retrievememoryop.py +18 -21
  118. orq_ai_sdk/models/retrievememorystoreop.py +18 -1
  119. orq_ai_sdk/models/retrievetoolop.py +309 -8
  120. orq_ai_sdk/models/runagentop.py +1462 -196
  121. orq_ai_sdk/models/searchknowledgeop.py +108 -1
  122. orq_ai_sdk/models/security.py +18 -1
  123. orq_ai_sdk/models/streamagentop.py +93 -2
  124. orq_ai_sdk/models/streamrunagentop.py +1439 -194
  125. orq_ai_sdk/models/textcontentpartschema.py +34 -1
  126. orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
  127. orq_ai_sdk/models/toolcallpart.py +18 -1
  128. orq_ai_sdk/models/tooldoneevent.py +18 -1
  129. orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
  130. orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
  131. orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
  132. orq_ai_sdk/models/toolresultpart.py +18 -1
  133. orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
  134. orq_ai_sdk/models/toolstartedevent.py +18 -1
  135. orq_ai_sdk/models/updateagentop.py +1968 -397
  136. orq_ai_sdk/models/updatechunkop.py +46 -19
  137. orq_ai_sdk/models/updateconversationop.py +61 -39
  138. orq_ai_sdk/models/updatedatapointop.py +424 -80
  139. orq_ai_sdk/models/updatedatasetop.py +51 -2
  140. orq_ai_sdk/models/updatedatasourceop.py +17 -26
  141. orq_ai_sdk/models/updateevalop.py +577 -16
  142. orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
  143. orq_ai_sdk/models/updateknowledgeop.py +234 -190
  144. orq_ai_sdk/models/updatememorydocumentop.py +50 -1
  145. orq_ai_sdk/models/updatememoryop.py +50 -21
  146. orq_ai_sdk/models/updatememorystoreop.py +66 -1
  147. orq_ai_sdk/models/updatepromptop.py +2854 -1448
  148. orq_ai_sdk/models/updatetoolop.py +592 -9
  149. orq_ai_sdk/models/usermessagerequest.py +18 -2
  150. orq_ai_sdk/moderations.py +218 -0
  151. orq_ai_sdk/orq_completions.py +666 -0
  152. orq_ai_sdk/orq_responses.py +398 -0
  153. orq_ai_sdk/prompts.py +28 -36
  154. orq_ai_sdk/rerank.py +330 -0
  155. orq_ai_sdk/router.py +89 -641
  156. orq_ai_sdk/sdk.py +3 -0
  157. orq_ai_sdk/speech.py +333 -0
  158. orq_ai_sdk/transcriptions.py +416 -0
  159. orq_ai_sdk/translations.py +384 -0
  160. orq_ai_sdk/utils/__init__.py +13 -1
  161. orq_ai_sdk/variations.py +364 -0
  162. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/METADATA +169 -148
  163. orq_ai_sdk-4.3.0rc7.dist-info/RECORD +263 -0
  164. {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.3.0rc7.dist-info}/WHEEL +2 -1
  165. orq_ai_sdk-4.3.0rc7.dist-info/top_level.txt +1 -0
  166. orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
@@ -0,0 +1,438 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from .basesdk import BaseSDK
4
+ from enum import Enum
5
+ from orq_ai_sdk import models, utils
6
+ from orq_ai_sdk._hooks import HookContext
7
+ from orq_ai_sdk.models import createcompletionop as models_createcompletionop
8
+ from orq_ai_sdk.types import OptionalNullable, UNSET
9
+ from orq_ai_sdk.utils import eventstreaming, get_security_from_env
10
+ from orq_ai_sdk.utils.unmarshal_json_response import unmarshal_json_response
11
+ from typing import List, Mapping, Optional, Union
12
+
13
+
14
+ class CreateAcceptEnum(str, Enum):
15
+ APPLICATION_JSON = "application/json"
16
+ TEXT_EVENT_STREAM = "text/event-stream"
17
+
18
+
19
+ class Completions(BaseSDK):
20
+ def create(
21
+ self,
22
+ *,
23
+ model: str,
24
+ prompt: str,
25
+ echo: OptionalNullable[bool] = False,
26
+ frequency_penalty: OptionalNullable[float] = 0,
27
+ max_tokens: OptionalNullable[float] = 16,
28
+ presence_penalty: OptionalNullable[float] = 0,
29
+ seed: OptionalNullable[float] = UNSET,
30
+ stop: OptionalNullable[
31
+ Union[
32
+ models_createcompletionop.CreateCompletionStop,
33
+ models_createcompletionop.CreateCompletionStopTypedDict,
34
+ ]
35
+ ] = UNSET,
36
+ temperature: OptionalNullable[float] = 1,
37
+ top_p: OptionalNullable[float] = 1,
38
+ n: OptionalNullable[float] = 1,
39
+ user: Optional[str] = None,
40
+ name: Optional[str] = None,
41
+ fallbacks: Optional[
42
+ Union[
43
+ List[models_createcompletionop.CreateCompletionFallbacks],
44
+ List[models_createcompletionop.CreateCompletionFallbacksTypedDict],
45
+ ]
46
+ ] = None,
47
+ retry: Optional[
48
+ Union[
49
+ models_createcompletionop.CreateCompletionRetry,
50
+ models_createcompletionop.CreateCompletionRetryTypedDict,
51
+ ]
52
+ ] = None,
53
+ cache: Optional[
54
+ Union[
55
+ models_createcompletionop.CreateCompletionCache,
56
+ models_createcompletionop.CreateCompletionCacheTypedDict,
57
+ ]
58
+ ] = None,
59
+ load_balancer: Optional[
60
+ Union[
61
+ models_createcompletionop.CreateCompletionLoadBalancer,
62
+ models_createcompletionop.CreateCompletionLoadBalancerTypedDict,
63
+ ]
64
+ ] = None,
65
+ timeout: Optional[
66
+ Union[
67
+ models_createcompletionop.CreateCompletionTimeout,
68
+ models_createcompletionop.CreateCompletionTimeoutTypedDict,
69
+ ]
70
+ ] = None,
71
+ orq: Optional[
72
+ Union[
73
+ models_createcompletionop.CreateCompletionOrq,
74
+ models_createcompletionop.CreateCompletionOrqTypedDict,
75
+ ]
76
+ ] = None,
77
+ stream: Optional[bool] = False,
78
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
79
+ server_url: Optional[str] = None,
80
+ timeout_ms: Optional[int] = None,
81
+ accept_header_override: Optional[CreateAcceptEnum] = None,
82
+ http_headers: Optional[Mapping[str, str]] = None,
83
+ ) -> models.CreateCompletionResponse:
84
+ r"""Create completion
85
+
86
+ For sending requests to legacy completion models
87
+
88
+ :param model: ID of the model to use
89
+ :param prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
90
+ :param echo: Echo back the prompt in addition to the completion
91
+ :param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
92
+ :param max_tokens: The maximum number of tokens that can be generated in the completion.
93
+ :param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
94
+ :param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
95
+ :param stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
96
+ :param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
97
+ :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
98
+ :param n: How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota.
99
+ :param user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
100
+ :param name: The name to display on the trace. If not specified, the default system name will be used.
101
+ :param fallbacks: Array of fallback models to use if primary model fails
102
+ :param retry: Retry configuration for the request
103
+ :param cache: Cache configuration for the request.
104
+ :param load_balancer: Load balancer configuration for the request.
105
+ :param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
106
+ :param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
107
+ :param stream:
108
+ :param retries: Override the default retry configuration for this method
109
+ :param server_url: Override the default server URL for this method
110
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
111
+ :param accept_header_override: Override the default accept header for this method
112
+ :param http_headers: Additional headers to set or replace on requests.
113
+ """
114
+ base_url = None
115
+ url_variables = None
116
+ if timeout_ms is None:
117
+ timeout_ms = self.sdk_configuration.timeout_ms
118
+
119
+ if timeout_ms is None:
120
+ timeout_ms = 600000
121
+
122
+ if server_url is not None:
123
+ base_url = server_url
124
+ else:
125
+ base_url = self._get_url(base_url, url_variables)
126
+
127
+ request = models.CreateCompletionRequestBody(
128
+ model=model,
129
+ prompt=prompt,
130
+ echo=echo,
131
+ frequency_penalty=frequency_penalty,
132
+ max_tokens=max_tokens,
133
+ presence_penalty=presence_penalty,
134
+ seed=seed,
135
+ stop=stop,
136
+ temperature=temperature,
137
+ top_p=top_p,
138
+ n=n,
139
+ user=user,
140
+ name=name,
141
+ fallbacks=utils.get_pydantic_model(
142
+ fallbacks, Optional[List[models.CreateCompletionFallbacks]]
143
+ ),
144
+ retry=utils.get_pydantic_model(
145
+ retry, Optional[models.CreateCompletionRetry]
146
+ ),
147
+ cache=utils.get_pydantic_model(
148
+ cache, Optional[models.CreateCompletionCache]
149
+ ),
150
+ load_balancer=utils.get_pydantic_model(
151
+ load_balancer, Optional[models.CreateCompletionLoadBalancer]
152
+ ),
153
+ timeout=utils.get_pydantic_model(
154
+ timeout, Optional[models.CreateCompletionTimeout]
155
+ ),
156
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateCompletionOrq]),
157
+ stream=stream,
158
+ )
159
+
160
+ req = self._build_request(
161
+ method="POST",
162
+ path="/v2/router/completions",
163
+ base_url=base_url,
164
+ url_variables=url_variables,
165
+ request=request,
166
+ request_body_required=True,
167
+ request_has_path_params=False,
168
+ request_has_query_params=True,
169
+ user_agent_header="user-agent",
170
+ accept_header_value=accept_header_override.value
171
+ if accept_header_override is not None
172
+ else "application/json;q=1, text/event-stream;q=0",
173
+ http_headers=http_headers,
174
+ security=self.sdk_configuration.security,
175
+ get_serialized_body=lambda: utils.serialize_request_body(
176
+ request, False, False, "json", models.CreateCompletionRequestBody
177
+ ),
178
+ allow_empty_value=None,
179
+ timeout_ms=timeout_ms,
180
+ )
181
+
182
+ if retries == UNSET:
183
+ if self.sdk_configuration.retry_config is not UNSET:
184
+ retries = self.sdk_configuration.retry_config
185
+
186
+ retry_config = None
187
+ if isinstance(retries, utils.RetryConfig):
188
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
189
+
190
+ http_res = self.do_request(
191
+ hook_ctx=HookContext(
192
+ config=self.sdk_configuration,
193
+ base_url=base_url or "",
194
+ operation_id="createCompletion",
195
+ oauth2_scopes=None,
196
+ security_source=get_security_from_env(
197
+ self.sdk_configuration.security, models.Security
198
+ ),
199
+ ),
200
+ request=req,
201
+ error_status_codes=["4XX", "5XX"],
202
+ stream=True,
203
+ retry_config=retry_config,
204
+ )
205
+
206
+ if utils.match_response(http_res, "200", "application/json"):
207
+ http_res_text = utils.stream_to_text(http_res)
208
+ return unmarshal_json_response(
209
+ models.CreateCompletionResponseBody, http_res, http_res_text
210
+ )
211
+ if utils.match_response(http_res, "200", "text/event-stream"):
212
+ return eventstreaming.EventStream(
213
+ http_res,
214
+ lambda raw: utils.unmarshal_json(
215
+ raw, models.CreateCompletionRouterCompletionsResponseBody
216
+ ),
217
+ sentinel="[DONE]",
218
+ client_ref=self,
219
+ )
220
+ if utils.match_response(http_res, "4XX", "*"):
221
+ http_res_text = utils.stream_to_text(http_res)
222
+ raise models.APIError("API error occurred", http_res, http_res_text)
223
+ if utils.match_response(http_res, "5XX", "*"):
224
+ http_res_text = utils.stream_to_text(http_res)
225
+ raise models.APIError("API error occurred", http_res, http_res_text)
226
+
227
+ http_res_text = utils.stream_to_text(http_res)
228
+ raise models.APIError("Unexpected response received", http_res, http_res_text)
229
+
230
+ async def create_async(
231
+ self,
232
+ *,
233
+ model: str,
234
+ prompt: str,
235
+ echo: OptionalNullable[bool] = False,
236
+ frequency_penalty: OptionalNullable[float] = 0,
237
+ max_tokens: OptionalNullable[float] = 16,
238
+ presence_penalty: OptionalNullable[float] = 0,
239
+ seed: OptionalNullable[float] = UNSET,
240
+ stop: OptionalNullable[
241
+ Union[
242
+ models_createcompletionop.CreateCompletionStop,
243
+ models_createcompletionop.CreateCompletionStopTypedDict,
244
+ ]
245
+ ] = UNSET,
246
+ temperature: OptionalNullable[float] = 1,
247
+ top_p: OptionalNullable[float] = 1,
248
+ n: OptionalNullable[float] = 1,
249
+ user: Optional[str] = None,
250
+ name: Optional[str] = None,
251
+ fallbacks: Optional[
252
+ Union[
253
+ List[models_createcompletionop.CreateCompletionFallbacks],
254
+ List[models_createcompletionop.CreateCompletionFallbacksTypedDict],
255
+ ]
256
+ ] = None,
257
+ retry: Optional[
258
+ Union[
259
+ models_createcompletionop.CreateCompletionRetry,
260
+ models_createcompletionop.CreateCompletionRetryTypedDict,
261
+ ]
262
+ ] = None,
263
+ cache: Optional[
264
+ Union[
265
+ models_createcompletionop.CreateCompletionCache,
266
+ models_createcompletionop.CreateCompletionCacheTypedDict,
267
+ ]
268
+ ] = None,
269
+ load_balancer: Optional[
270
+ Union[
271
+ models_createcompletionop.CreateCompletionLoadBalancer,
272
+ models_createcompletionop.CreateCompletionLoadBalancerTypedDict,
273
+ ]
274
+ ] = None,
275
+ timeout: Optional[
276
+ Union[
277
+ models_createcompletionop.CreateCompletionTimeout,
278
+ models_createcompletionop.CreateCompletionTimeoutTypedDict,
279
+ ]
280
+ ] = None,
281
+ orq: Optional[
282
+ Union[
283
+ models_createcompletionop.CreateCompletionOrq,
284
+ models_createcompletionop.CreateCompletionOrqTypedDict,
285
+ ]
286
+ ] = None,
287
+ stream: Optional[bool] = False,
288
+ retries: OptionalNullable[utils.RetryConfig] = UNSET,
289
+ server_url: Optional[str] = None,
290
+ timeout_ms: Optional[int] = None,
291
+ accept_header_override: Optional[CreateAcceptEnum] = None,
292
+ http_headers: Optional[Mapping[str, str]] = None,
293
+ ) -> models.CreateCompletionResponse:
294
+ r"""Create completion
295
+
296
+ For sending requests to legacy completion models
297
+
298
+ :param model: ID of the model to use
299
+ :param prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
300
+ :param echo: Echo back the prompt in addition to the completion
301
+ :param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
302
+ :param max_tokens: The maximum number of tokens that can be generated in the completion.
303
+ :param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
304
+ :param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
305
+ :param stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
306
+ :param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
307
+ :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
308
+ :param n: How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota.
309
+ :param user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
310
+ :param name: The name to display on the trace. If not specified, the default system name will be used.
311
+ :param fallbacks: Array of fallback models to use if primary model fails
312
+ :param retry: Retry configuration for the request
313
+ :param cache: Cache configuration for the request.
314
+ :param load_balancer: Load balancer configuration for the request.
315
+ :param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
316
+ :param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
317
+ :param stream:
318
+ :param retries: Override the default retry configuration for this method
319
+ :param server_url: Override the default server URL for this method
320
+ :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
321
+ :param accept_header_override: Override the default accept header for this method
322
+ :param http_headers: Additional headers to set or replace on requests.
323
+ """
324
+ base_url = None
325
+ url_variables = None
326
+ if timeout_ms is None:
327
+ timeout_ms = self.sdk_configuration.timeout_ms
328
+
329
+ if timeout_ms is None:
330
+ timeout_ms = 600000
331
+
332
+ if server_url is not None:
333
+ base_url = server_url
334
+ else:
335
+ base_url = self._get_url(base_url, url_variables)
336
+
337
+ request = models.CreateCompletionRequestBody(
338
+ model=model,
339
+ prompt=prompt,
340
+ echo=echo,
341
+ frequency_penalty=frequency_penalty,
342
+ max_tokens=max_tokens,
343
+ presence_penalty=presence_penalty,
344
+ seed=seed,
345
+ stop=stop,
346
+ temperature=temperature,
347
+ top_p=top_p,
348
+ n=n,
349
+ user=user,
350
+ name=name,
351
+ fallbacks=utils.get_pydantic_model(
352
+ fallbacks, Optional[List[models.CreateCompletionFallbacks]]
353
+ ),
354
+ retry=utils.get_pydantic_model(
355
+ retry, Optional[models.CreateCompletionRetry]
356
+ ),
357
+ cache=utils.get_pydantic_model(
358
+ cache, Optional[models.CreateCompletionCache]
359
+ ),
360
+ load_balancer=utils.get_pydantic_model(
361
+ load_balancer, Optional[models.CreateCompletionLoadBalancer]
362
+ ),
363
+ timeout=utils.get_pydantic_model(
364
+ timeout, Optional[models.CreateCompletionTimeout]
365
+ ),
366
+ orq=utils.get_pydantic_model(orq, Optional[models.CreateCompletionOrq]),
367
+ stream=stream,
368
+ )
369
+
370
+ req = self._build_request_async(
371
+ method="POST",
372
+ path="/v2/router/completions",
373
+ base_url=base_url,
374
+ url_variables=url_variables,
375
+ request=request,
376
+ request_body_required=True,
377
+ request_has_path_params=False,
378
+ request_has_query_params=True,
379
+ user_agent_header="user-agent",
380
+ accept_header_value=accept_header_override.value
381
+ if accept_header_override is not None
382
+ else "application/json;q=1, text/event-stream;q=0",
383
+ http_headers=http_headers,
384
+ security=self.sdk_configuration.security,
385
+ get_serialized_body=lambda: utils.serialize_request_body(
386
+ request, False, False, "json", models.CreateCompletionRequestBody
387
+ ),
388
+ allow_empty_value=None,
389
+ timeout_ms=timeout_ms,
390
+ )
391
+
392
+ if retries == UNSET:
393
+ if self.sdk_configuration.retry_config is not UNSET:
394
+ retries = self.sdk_configuration.retry_config
395
+
396
+ retry_config = None
397
+ if isinstance(retries, utils.RetryConfig):
398
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
399
+
400
+ http_res = await self.do_request_async(
401
+ hook_ctx=HookContext(
402
+ config=self.sdk_configuration,
403
+ base_url=base_url or "",
404
+ operation_id="createCompletion",
405
+ oauth2_scopes=None,
406
+ security_source=get_security_from_env(
407
+ self.sdk_configuration.security, models.Security
408
+ ),
409
+ ),
410
+ request=req,
411
+ error_status_codes=["4XX", "5XX"],
412
+ stream=True,
413
+ retry_config=retry_config,
414
+ )
415
+
416
+ if utils.match_response(http_res, "200", "application/json"):
417
+ http_res_text = await utils.stream_to_text_async(http_res)
418
+ return unmarshal_json_response(
419
+ models.CreateCompletionResponseBody, http_res, http_res_text
420
+ )
421
+ if utils.match_response(http_res, "200", "text/event-stream"):
422
+ return eventstreaming.EventStreamAsync(
423
+ http_res,
424
+ lambda raw: utils.unmarshal_json(
425
+ raw, models.CreateCompletionRouterCompletionsResponseBody
426
+ ),
427
+ sentinel="[DONE]",
428
+ client_ref=self,
429
+ )
430
+ if utils.match_response(http_res, "4XX", "*"):
431
+ http_res_text = await utils.stream_to_text_async(http_res)
432
+ raise models.APIError("API error occurred", http_res, http_res_text)
433
+ if utils.match_response(http_res, "5XX", "*"):
434
+ http_res_text = await utils.stream_to_text_async(http_res)
435
+ raise models.APIError("API error occurred", http_res, http_res_text)
436
+
437
+ http_res_text = await utils.stream_to_text_async(http_res)
438
+ raise models.APIError("Unexpected response received", http_res, http_res_text)