orq-ai-sdk 4.2.0rc28__py3-none-any.whl → 4.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/basesdk.py +20 -6
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +332 -0
- orq_ai_sdk/contacts.py +43 -855
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +258 -0
- orq_ai_sdk/embeddings.py +238 -0
- orq_ai_sdk/generations.py +272 -0
- orq_ai_sdk/identities.py +1037 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +5341 -737
- orq_ai_sdk/models/actionreviewedstreamingevent.py +18 -1
- orq_ai_sdk/models/actionreviewrequestedstreamingevent.py +44 -1
- orq_ai_sdk/models/agenterroredstreamingevent.py +18 -1
- orq_ai_sdk/models/agentinactivestreamingevent.py +168 -70
- orq_ai_sdk/models/agentmessagecreatedstreamingevent.py +18 -2
- orq_ai_sdk/models/agentresponsemessage.py +18 -2
- orq_ai_sdk/models/agentstartedstreamingevent.py +127 -2
- orq_ai_sdk/models/agentthoughtstreamingevent.py +178 -211
- orq_ai_sdk/models/conversationresponse.py +31 -20
- orq_ai_sdk/models/conversationwithmessagesresponse.py +31 -20
- orq_ai_sdk/models/createagentrequestop.py +1922 -384
- orq_ai_sdk/models/createagentresponse.py +147 -91
- orq_ai_sdk/models/createagentresponserequestop.py +111 -2
- orq_ai_sdk/models/createchatcompletionop.py +1375 -861
- orq_ai_sdk/models/createchunkop.py +46 -19
- orq_ai_sdk/models/createcompletionop.py +1890 -0
- orq_ai_sdk/models/createcontactop.py +45 -56
- orq_ai_sdk/models/createconversationop.py +61 -39
- orq_ai_sdk/models/createconversationresponseop.py +68 -4
- orq_ai_sdk/models/createdatasetitemop.py +424 -80
- orq_ai_sdk/models/createdatasetop.py +19 -2
- orq_ai_sdk/models/createdatasourceop.py +92 -26
- orq_ai_sdk/models/createembeddingop.py +384 -0
- orq_ai_sdk/models/createevalop.py +552 -24
- orq_ai_sdk/models/createidentityop.py +176 -0
- orq_ai_sdk/models/createimageeditop.py +504 -0
- orq_ai_sdk/models/createimageop.py +208 -117
- orq_ai_sdk/models/createimagevariationop.py +486 -0
- orq_ai_sdk/models/createknowledgeop.py +186 -121
- orq_ai_sdk/models/creatememorydocumentop.py +50 -1
- orq_ai_sdk/models/creatememoryop.py +34 -21
- orq_ai_sdk/models/creatememorystoreop.py +34 -1
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +2748 -1252
- orq_ai_sdk/models/creatererankop.py +416 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +316 -0
- orq_ai_sdk/models/createtoolop.py +537 -12
- orq_ai_sdk/models/createtranscriptionop.py +562 -0
- orq_ai_sdk/models/createtranslationop.py +540 -0
- orq_ai_sdk/models/datapart.py +18 -1
- orq_ai_sdk/models/deletechunksop.py +34 -1
- orq_ai_sdk/models/{deletecontactop.py → deleteidentityop.py} +9 -9
- orq_ai_sdk/models/deletepromptop.py +26 -0
- orq_ai_sdk/models/deploymentcreatemetricop.py +362 -76
- orq_ai_sdk/models/deploymentgetconfigop.py +635 -194
- orq_ai_sdk/models/deploymentinvokeop.py +168 -173
- orq_ai_sdk/models/deploymentsop.py +195 -58
- orq_ai_sdk/models/deploymentstreamop.py +652 -304
- orq_ai_sdk/models/errorpart.py +18 -1
- orq_ai_sdk/models/filecontentpartschema.py +18 -1
- orq_ai_sdk/models/filegetop.py +19 -2
- orq_ai_sdk/models/filelistop.py +35 -2
- orq_ai_sdk/models/filepart.py +50 -1
- orq_ai_sdk/models/fileuploadop.py +51 -2
- orq_ai_sdk/models/generateconversationnameop.py +31 -20
- orq_ai_sdk/models/get_v2_evaluators_id_versionsop.py +34 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versions_version_id_op.py +18 -1
- orq_ai_sdk/models/get_v2_tools_tool_id_versionsop.py +34 -1
- orq_ai_sdk/models/getallmemoriesop.py +34 -21
- orq_ai_sdk/models/getallmemorydocumentsop.py +42 -1
- orq_ai_sdk/models/getallmemorystoresop.py +34 -1
- orq_ai_sdk/models/getallpromptsop.py +1690 -230
- orq_ai_sdk/models/getalltoolsop.py +325 -8
- orq_ai_sdk/models/getchunkscountop.py +34 -1
- orq_ai_sdk/models/getevalsop.py +395 -43
- orq_ai_sdk/models/getonechunkop.py +14 -19
- orq_ai_sdk/models/getoneknowledgeop.py +116 -96
- orq_ai_sdk/models/getonepromptop.py +1673 -230
- orq_ai_sdk/models/getpromptversionop.py +1670 -216
- orq_ai_sdk/models/imagecontentpartschema.py +50 -1
- orq_ai_sdk/models/internal/globals.py +18 -1
- orq_ai_sdk/models/invokeagentop.py +140 -2
- orq_ai_sdk/models/invokedeploymentrequest.py +418 -80
- orq_ai_sdk/models/invokeevalop.py +160 -131
- orq_ai_sdk/models/listagentsop.py +793 -166
- orq_ai_sdk/models/listchunksop.py +32 -19
- orq_ai_sdk/models/listchunkspaginatedop.py +46 -19
- orq_ai_sdk/models/listconversationsop.py +18 -1
- orq_ai_sdk/models/listdatasetdatapointsop.py +252 -42
- orq_ai_sdk/models/listdatasetsop.py +35 -2
- orq_ai_sdk/models/listdatasourcesop.py +35 -26
- orq_ai_sdk/models/{listcontactsop.py → listidentitiesop.py} +89 -79
- orq_ai_sdk/models/listknowledgebasesop.py +132 -96
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +1684 -216
- orq_ai_sdk/models/parseop.py +161 -17
- orq_ai_sdk/models/partdoneevent.py +19 -2
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +27 -4
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +19 -2
- orq_ai_sdk/models/refusalpartschema.py +18 -1
- orq_ai_sdk/models/remoteconfigsgetconfigop.py +34 -1
- orq_ai_sdk/models/responsedoneevent.py +114 -84
- orq_ai_sdk/models/responsestartedevent.py +18 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +787 -166
- orq_ai_sdk/models/retrievedatapointop.py +236 -42
- orq_ai_sdk/models/retrievedatasetop.py +19 -2
- orq_ai_sdk/models/retrievedatasourceop.py +17 -26
- orq_ai_sdk/models/{retrievecontactop.py → retrieveidentityop.py} +38 -41
- orq_ai_sdk/models/retrievememorydocumentop.py +18 -1
- orq_ai_sdk/models/retrievememoryop.py +18 -21
- orq_ai_sdk/models/retrievememorystoreop.py +18 -1
- orq_ai_sdk/models/retrievetoolop.py +309 -8
- orq_ai_sdk/models/runagentop.py +1451 -197
- orq_ai_sdk/models/searchknowledgeop.py +108 -1
- orq_ai_sdk/models/security.py +18 -1
- orq_ai_sdk/models/streamagentop.py +93 -2
- orq_ai_sdk/models/streamrunagentop.py +1428 -195
- orq_ai_sdk/models/textcontentpartschema.py +34 -1
- orq_ai_sdk/models/thinkingconfigenabledschema.py +18 -1
- orq_ai_sdk/models/toolcallpart.py +18 -1
- orq_ai_sdk/models/tooldoneevent.py +18 -1
- orq_ai_sdk/models/toolexecutionfailedstreamingevent.py +50 -1
- orq_ai_sdk/models/toolexecutionfinishedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolexecutionstartedstreamingevent.py +34 -1
- orq_ai_sdk/models/toolresultpart.py +18 -1
- orq_ai_sdk/models/toolreviewrequestedevent.py +18 -1
- orq_ai_sdk/models/toolstartedevent.py +18 -1
- orq_ai_sdk/models/updateagentop.py +1951 -404
- orq_ai_sdk/models/updatechunkop.py +46 -19
- orq_ai_sdk/models/updateconversationop.py +61 -39
- orq_ai_sdk/models/updatedatapointop.py +424 -80
- orq_ai_sdk/models/updatedatasetop.py +51 -2
- orq_ai_sdk/models/updatedatasourceop.py +17 -26
- orq_ai_sdk/models/updateevalop.py +577 -16
- orq_ai_sdk/models/{updatecontactop.py → updateidentityop.py} +78 -68
- orq_ai_sdk/models/updateknowledgeop.py +234 -190
- orq_ai_sdk/models/updatememorydocumentop.py +50 -1
- orq_ai_sdk/models/updatememoryop.py +50 -21
- orq_ai_sdk/models/updatememorystoreop.py +66 -1
- orq_ai_sdk/models/updatepromptop.py +2844 -1450
- orq_ai_sdk/models/updatetoolop.py +592 -9
- orq_ai_sdk/models/usermessagerequest.py +18 -2
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +660 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/prompts.py +28 -36
- orq_ai_sdk/rerank.py +232 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/sdk.py +3 -0
- orq_ai_sdk/speech.py +251 -0
- orq_ai_sdk/transcriptions.py +326 -0
- orq_ai_sdk/translations.py +298 -0
- orq_ai_sdk/utils/__init__.py +13 -1
- orq_ai_sdk/variations.py +254 -0
- orq_ai_sdk-4.2.6.dist-info/METADATA +888 -0
- orq_ai_sdk-4.2.6.dist-info/RECORD +263 -0
- {orq_ai_sdk-4.2.0rc28.dist-info → orq_ai_sdk-4.2.6.dist-info}/WHEEL +2 -1
- orq_ai_sdk-4.2.6.dist-info/top_level.txt +1 -0
- orq_ai_sdk-4.2.0rc28.dist-info/METADATA +0 -867
- orq_ai_sdk-4.2.0rc28.dist-info/RECORD +0 -233
orq_ai_sdk/_hooks/globalhook.py
CHANGED
orq_ai_sdk/_version.py
CHANGED
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
import importlib.metadata
|
|
4
4
|
|
|
5
5
|
__title__: str = "orq-ai-sdk"
|
|
6
|
-
__version__: str = "4.2.
|
|
6
|
+
__version__: str = "4.2.6"
|
|
7
7
|
__openapi_doc_version__: str = "2.0"
|
|
8
|
-
__gen_version__: str = "2.
|
|
9
|
-
__user_agent__: str = "speakeasy-sdk/python 4.2.
|
|
8
|
+
__gen_version__: str = "2.799.0"
|
|
9
|
+
__user_agent__: str = "speakeasy-sdk/python 4.2.6 2.799.0 2.0 orq-ai-sdk"
|
|
10
10
|
|
|
11
11
|
try:
|
|
12
12
|
if __package__ is not None:
|
orq_ai_sdk/audio.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from .basesdk import BaseSDK
|
|
4
|
+
from .sdkconfiguration import SDKConfiguration
|
|
5
|
+
from orq_ai_sdk.speech import Speech
|
|
6
|
+
from orq_ai_sdk.transcriptions import Transcriptions
|
|
7
|
+
from orq_ai_sdk.translations import Translations
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Audio(BaseSDK):
|
|
12
|
+
speech: Speech
|
|
13
|
+
transcriptions: Transcriptions
|
|
14
|
+
translations: Translations
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
|
|
18
|
+
) -> None:
|
|
19
|
+
BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
|
|
20
|
+
self.sdk_configuration = sdk_config
|
|
21
|
+
self._init_sdks()
|
|
22
|
+
|
|
23
|
+
def _init_sdks(self):
|
|
24
|
+
self.speech = Speech(self.sdk_configuration, parent_ref=self.parent_ref)
|
|
25
|
+
self.transcriptions = Transcriptions(
|
|
26
|
+
self.sdk_configuration, parent_ref=self.parent_ref
|
|
27
|
+
)
|
|
28
|
+
self.translations = Translations(
|
|
29
|
+
self.sdk_configuration, parent_ref=self.parent_ref
|
|
30
|
+
)
|
orq_ai_sdk/basesdk.py
CHANGED
|
@@ -8,7 +8,12 @@ from orq_ai_sdk._hooks import (
|
|
|
8
8
|
AfterSuccessContext,
|
|
9
9
|
BeforeRequestContext,
|
|
10
10
|
)
|
|
11
|
-
from orq_ai_sdk.utils import
|
|
11
|
+
from orq_ai_sdk.utils import (
|
|
12
|
+
RetryConfig,
|
|
13
|
+
SerializedRequestBody,
|
|
14
|
+
get_body_content,
|
|
15
|
+
run_sync_in_thread,
|
|
16
|
+
)
|
|
12
17
|
from typing import Callable, List, Mapping, Optional, Tuple
|
|
13
18
|
from urllib.parse import parse_qs, urlparse
|
|
14
19
|
|
|
@@ -311,7 +316,10 @@ class BaseSDK:
|
|
|
311
316
|
async def do():
|
|
312
317
|
http_res = None
|
|
313
318
|
try:
|
|
314
|
-
req =
|
|
319
|
+
req = await run_sync_in_thread(
|
|
320
|
+
hooks.before_request, BeforeRequestContext(hook_ctx), request
|
|
321
|
+
)
|
|
322
|
+
|
|
315
323
|
logger.debug(
|
|
316
324
|
"Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s",
|
|
317
325
|
req.method,
|
|
@@ -325,7 +333,10 @@ class BaseSDK:
|
|
|
325
333
|
|
|
326
334
|
http_res = await client.send(req, stream=stream)
|
|
327
335
|
except Exception as e:
|
|
328
|
-
_, e =
|
|
336
|
+
_, e = await run_sync_in_thread(
|
|
337
|
+
hooks.after_error, AfterErrorContext(hook_ctx), None, e
|
|
338
|
+
)
|
|
339
|
+
|
|
329
340
|
if e is not None:
|
|
330
341
|
logger.debug("Request Exception", exc_info=True)
|
|
331
342
|
raise e
|
|
@@ -343,9 +354,10 @@ class BaseSDK:
|
|
|
343
354
|
)
|
|
344
355
|
|
|
345
356
|
if utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
346
|
-
result, err =
|
|
347
|
-
AfterErrorContext(hook_ctx), http_res, None
|
|
357
|
+
result, err = await run_sync_in_thread(
|
|
358
|
+
hooks.after_error, AfterErrorContext(hook_ctx), http_res, None
|
|
348
359
|
)
|
|
360
|
+
|
|
349
361
|
if err is not None:
|
|
350
362
|
logger.debug("Request Exception", exc_info=True)
|
|
351
363
|
raise err
|
|
@@ -365,6 +377,8 @@ class BaseSDK:
|
|
|
365
377
|
http_res = await do()
|
|
366
378
|
|
|
367
379
|
if not utils.match_status_codes(error_status_codes, http_res.status_code):
|
|
368
|
-
http_res =
|
|
380
|
+
http_res = await run_sync_in_thread(
|
|
381
|
+
hooks.after_success, AfterSuccessContext(hook_ctx), http_res
|
|
382
|
+
)
|
|
369
383
|
|
|
370
384
|
return http_res
|
orq_ai_sdk/chat.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from .basesdk import BaseSDK
|
|
4
|
+
from .sdkconfiguration import SDKConfiguration
|
|
5
|
+
from orq_ai_sdk.orq_completions import OrqCompletions
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Chat(BaseSDK):
|
|
10
|
+
completions: OrqCompletions
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
|
|
14
|
+
) -> None:
|
|
15
|
+
BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
|
|
16
|
+
self.sdk_configuration = sdk_config
|
|
17
|
+
self._init_sdks()
|
|
18
|
+
|
|
19
|
+
def _init_sdks(self):
|
|
20
|
+
self.completions = OrqCompletions(
|
|
21
|
+
self.sdk_configuration, parent_ref=self.parent_ref
|
|
22
|
+
)
|
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from .basesdk import BaseSDK
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from orq_ai_sdk import models, utils
|
|
6
|
+
from orq_ai_sdk._hooks import HookContext
|
|
7
|
+
from orq_ai_sdk.models import createcompletionop as models_createcompletionop
|
|
8
|
+
from orq_ai_sdk.types import OptionalNullable, UNSET
|
|
9
|
+
from orq_ai_sdk.utils import eventstreaming, get_security_from_env
|
|
10
|
+
from orq_ai_sdk.utils.unmarshal_json_response import unmarshal_json_response
|
|
11
|
+
from typing import Mapping, Optional, Union
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CreateAcceptEnum(str, Enum):
|
|
15
|
+
APPLICATION_JSON = "application/json"
|
|
16
|
+
TEXT_EVENT_STREAM = "text/event-stream"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Completions(BaseSDK):
|
|
20
|
+
def create(
|
|
21
|
+
self,
|
|
22
|
+
*,
|
|
23
|
+
model: str,
|
|
24
|
+
prompt: str,
|
|
25
|
+
echo: OptionalNullable[bool] = False,
|
|
26
|
+
frequency_penalty: OptionalNullable[float] = 0,
|
|
27
|
+
max_tokens: OptionalNullable[float] = 16,
|
|
28
|
+
presence_penalty: OptionalNullable[float] = 0,
|
|
29
|
+
seed: OptionalNullable[float] = UNSET,
|
|
30
|
+
stop: OptionalNullable[
|
|
31
|
+
Union[
|
|
32
|
+
models_createcompletionop.CreateCompletionStop,
|
|
33
|
+
models_createcompletionop.CreateCompletionStopTypedDict,
|
|
34
|
+
]
|
|
35
|
+
] = UNSET,
|
|
36
|
+
temperature: OptionalNullable[float] = 1,
|
|
37
|
+
top_p: OptionalNullable[float] = 1,
|
|
38
|
+
n: OptionalNullable[float] = 1,
|
|
39
|
+
user: Optional[str] = None,
|
|
40
|
+
orq: Optional[
|
|
41
|
+
Union[
|
|
42
|
+
models_createcompletionop.CreateCompletionOrq,
|
|
43
|
+
models_createcompletionop.CreateCompletionOrqTypedDict,
|
|
44
|
+
]
|
|
45
|
+
] = None,
|
|
46
|
+
stream: Optional[bool] = False,
|
|
47
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
48
|
+
server_url: Optional[str] = None,
|
|
49
|
+
timeout_ms: Optional[int] = None,
|
|
50
|
+
accept_header_override: Optional[CreateAcceptEnum] = None,
|
|
51
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
52
|
+
) -> models.CreateCompletionResponse:
|
|
53
|
+
r"""Create completion
|
|
54
|
+
|
|
55
|
+
For sending requests to legacy completion models
|
|
56
|
+
|
|
57
|
+
:param model: ID of the model to use
|
|
58
|
+
:param prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
|
|
59
|
+
:param echo: Echo back the prompt in addition to the completion
|
|
60
|
+
:param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
|
|
61
|
+
:param max_tokens: The maximum number of tokens that can be generated in the completion.
|
|
62
|
+
:param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
|
|
63
|
+
:param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
|
|
64
|
+
:param stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
|
|
65
|
+
:param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
|
|
66
|
+
:param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
|
|
67
|
+
:param n: How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota.
|
|
68
|
+
:param user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
|
|
69
|
+
:param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
|
|
70
|
+
:param stream:
|
|
71
|
+
:param retries: Override the default retry configuration for this method
|
|
72
|
+
:param server_url: Override the default server URL for this method
|
|
73
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
74
|
+
:param accept_header_override: Override the default accept header for this method
|
|
75
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
76
|
+
"""
|
|
77
|
+
base_url = None
|
|
78
|
+
url_variables = None
|
|
79
|
+
if timeout_ms is None:
|
|
80
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
81
|
+
|
|
82
|
+
if timeout_ms is None:
|
|
83
|
+
timeout_ms = 600000
|
|
84
|
+
|
|
85
|
+
if server_url is not None:
|
|
86
|
+
base_url = server_url
|
|
87
|
+
else:
|
|
88
|
+
base_url = self._get_url(base_url, url_variables)
|
|
89
|
+
|
|
90
|
+
request = models.CreateCompletionRequestBody(
|
|
91
|
+
model=model,
|
|
92
|
+
prompt=prompt,
|
|
93
|
+
echo=echo,
|
|
94
|
+
frequency_penalty=frequency_penalty,
|
|
95
|
+
max_tokens=max_tokens,
|
|
96
|
+
presence_penalty=presence_penalty,
|
|
97
|
+
seed=seed,
|
|
98
|
+
stop=stop,
|
|
99
|
+
temperature=temperature,
|
|
100
|
+
top_p=top_p,
|
|
101
|
+
n=n,
|
|
102
|
+
user=user,
|
|
103
|
+
orq=utils.get_pydantic_model(orq, Optional[models.CreateCompletionOrq]),
|
|
104
|
+
stream=stream,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
req = self._build_request(
|
|
108
|
+
method="POST",
|
|
109
|
+
path="/v2/router/completions",
|
|
110
|
+
base_url=base_url,
|
|
111
|
+
url_variables=url_variables,
|
|
112
|
+
request=request,
|
|
113
|
+
request_body_required=True,
|
|
114
|
+
request_has_path_params=False,
|
|
115
|
+
request_has_query_params=True,
|
|
116
|
+
user_agent_header="user-agent",
|
|
117
|
+
accept_header_value=accept_header_override.value
|
|
118
|
+
if accept_header_override is not None
|
|
119
|
+
else "application/json;q=1, text/event-stream;q=0",
|
|
120
|
+
http_headers=http_headers,
|
|
121
|
+
security=self.sdk_configuration.security,
|
|
122
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
123
|
+
request, False, False, "json", models.CreateCompletionRequestBody
|
|
124
|
+
),
|
|
125
|
+
allow_empty_value=None,
|
|
126
|
+
timeout_ms=timeout_ms,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
if retries == UNSET:
|
|
130
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
131
|
+
retries = self.sdk_configuration.retry_config
|
|
132
|
+
|
|
133
|
+
retry_config = None
|
|
134
|
+
if isinstance(retries, utils.RetryConfig):
|
|
135
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
136
|
+
|
|
137
|
+
http_res = self.do_request(
|
|
138
|
+
hook_ctx=HookContext(
|
|
139
|
+
config=self.sdk_configuration,
|
|
140
|
+
base_url=base_url or "",
|
|
141
|
+
operation_id="createCompletion",
|
|
142
|
+
oauth2_scopes=None,
|
|
143
|
+
security_source=get_security_from_env(
|
|
144
|
+
self.sdk_configuration.security, models.Security
|
|
145
|
+
),
|
|
146
|
+
),
|
|
147
|
+
request=req,
|
|
148
|
+
error_status_codes=["4XX", "5XX"],
|
|
149
|
+
stream=True,
|
|
150
|
+
retry_config=retry_config,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
154
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
155
|
+
return unmarshal_json_response(
|
|
156
|
+
models.CreateCompletionResponseBody, http_res, http_res_text
|
|
157
|
+
)
|
|
158
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
159
|
+
return eventstreaming.EventStream(
|
|
160
|
+
http_res,
|
|
161
|
+
lambda raw: utils.unmarshal_json(
|
|
162
|
+
raw, models.CreateCompletionRouterCompletionsResponseBody
|
|
163
|
+
),
|
|
164
|
+
sentinel="[DONE]",
|
|
165
|
+
client_ref=self,
|
|
166
|
+
)
|
|
167
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
168
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
169
|
+
raise models.APIError("API error occurred", http_res, http_res_text)
|
|
170
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
171
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
172
|
+
raise models.APIError("API error occurred", http_res, http_res_text)
|
|
173
|
+
|
|
174
|
+
http_res_text = utils.stream_to_text(http_res)
|
|
175
|
+
raise models.APIError("Unexpected response received", http_res, http_res_text)
|
|
176
|
+
|
|
177
|
+
async def create_async(
|
|
178
|
+
self,
|
|
179
|
+
*,
|
|
180
|
+
model: str,
|
|
181
|
+
prompt: str,
|
|
182
|
+
echo: OptionalNullable[bool] = False,
|
|
183
|
+
frequency_penalty: OptionalNullable[float] = 0,
|
|
184
|
+
max_tokens: OptionalNullable[float] = 16,
|
|
185
|
+
presence_penalty: OptionalNullable[float] = 0,
|
|
186
|
+
seed: OptionalNullable[float] = UNSET,
|
|
187
|
+
stop: OptionalNullable[
|
|
188
|
+
Union[
|
|
189
|
+
models_createcompletionop.CreateCompletionStop,
|
|
190
|
+
models_createcompletionop.CreateCompletionStopTypedDict,
|
|
191
|
+
]
|
|
192
|
+
] = UNSET,
|
|
193
|
+
temperature: OptionalNullable[float] = 1,
|
|
194
|
+
top_p: OptionalNullable[float] = 1,
|
|
195
|
+
n: OptionalNullable[float] = 1,
|
|
196
|
+
user: Optional[str] = None,
|
|
197
|
+
orq: Optional[
|
|
198
|
+
Union[
|
|
199
|
+
models_createcompletionop.CreateCompletionOrq,
|
|
200
|
+
models_createcompletionop.CreateCompletionOrqTypedDict,
|
|
201
|
+
]
|
|
202
|
+
] = None,
|
|
203
|
+
stream: Optional[bool] = False,
|
|
204
|
+
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
205
|
+
server_url: Optional[str] = None,
|
|
206
|
+
timeout_ms: Optional[int] = None,
|
|
207
|
+
accept_header_override: Optional[CreateAcceptEnum] = None,
|
|
208
|
+
http_headers: Optional[Mapping[str, str]] = None,
|
|
209
|
+
) -> models.CreateCompletionResponse:
|
|
210
|
+
r"""Create completion
|
|
211
|
+
|
|
212
|
+
For sending requests to legacy completion models
|
|
213
|
+
|
|
214
|
+
:param model: ID of the model to use
|
|
215
|
+
:param prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
|
|
216
|
+
:param echo: Echo back the prompt in addition to the completion
|
|
217
|
+
:param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
|
|
218
|
+
:param max_tokens: The maximum number of tokens that can be generated in the completion.
|
|
219
|
+
:param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
|
|
220
|
+
:param seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
|
|
221
|
+
:param stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
|
|
222
|
+
:param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
|
|
223
|
+
:param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
|
|
224
|
+
:param n: How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota.
|
|
225
|
+
:param user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
|
|
226
|
+
:param orq: Leverage Orq's intelligent routing capabilities to enhance your AI application with enterprise-grade reliability and observability. Orq provides automatic request management including retries on failures, model fallbacks for high availability, identity-level analytics tracking, conversation threading, and dynamic prompt templating with variable substitution.
|
|
227
|
+
:param stream:
|
|
228
|
+
:param retries: Override the default retry configuration for this method
|
|
229
|
+
:param server_url: Override the default server URL for this method
|
|
230
|
+
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
231
|
+
:param accept_header_override: Override the default accept header for this method
|
|
232
|
+
:param http_headers: Additional headers to set or replace on requests.
|
|
233
|
+
"""
|
|
234
|
+
base_url = None
|
|
235
|
+
url_variables = None
|
|
236
|
+
if timeout_ms is None:
|
|
237
|
+
timeout_ms = self.sdk_configuration.timeout_ms
|
|
238
|
+
|
|
239
|
+
if timeout_ms is None:
|
|
240
|
+
timeout_ms = 600000
|
|
241
|
+
|
|
242
|
+
if server_url is not None:
|
|
243
|
+
base_url = server_url
|
|
244
|
+
else:
|
|
245
|
+
base_url = self._get_url(base_url, url_variables)
|
|
246
|
+
|
|
247
|
+
request = models.CreateCompletionRequestBody(
|
|
248
|
+
model=model,
|
|
249
|
+
prompt=prompt,
|
|
250
|
+
echo=echo,
|
|
251
|
+
frequency_penalty=frequency_penalty,
|
|
252
|
+
max_tokens=max_tokens,
|
|
253
|
+
presence_penalty=presence_penalty,
|
|
254
|
+
seed=seed,
|
|
255
|
+
stop=stop,
|
|
256
|
+
temperature=temperature,
|
|
257
|
+
top_p=top_p,
|
|
258
|
+
n=n,
|
|
259
|
+
user=user,
|
|
260
|
+
orq=utils.get_pydantic_model(orq, Optional[models.CreateCompletionOrq]),
|
|
261
|
+
stream=stream,
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
req = self._build_request_async(
|
|
265
|
+
method="POST",
|
|
266
|
+
path="/v2/router/completions",
|
|
267
|
+
base_url=base_url,
|
|
268
|
+
url_variables=url_variables,
|
|
269
|
+
request=request,
|
|
270
|
+
request_body_required=True,
|
|
271
|
+
request_has_path_params=False,
|
|
272
|
+
request_has_query_params=True,
|
|
273
|
+
user_agent_header="user-agent",
|
|
274
|
+
accept_header_value=accept_header_override.value
|
|
275
|
+
if accept_header_override is not None
|
|
276
|
+
else "application/json;q=1, text/event-stream;q=0",
|
|
277
|
+
http_headers=http_headers,
|
|
278
|
+
security=self.sdk_configuration.security,
|
|
279
|
+
get_serialized_body=lambda: utils.serialize_request_body(
|
|
280
|
+
request, False, False, "json", models.CreateCompletionRequestBody
|
|
281
|
+
),
|
|
282
|
+
allow_empty_value=None,
|
|
283
|
+
timeout_ms=timeout_ms,
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
if retries == UNSET:
|
|
287
|
+
if self.sdk_configuration.retry_config is not UNSET:
|
|
288
|
+
retries = self.sdk_configuration.retry_config
|
|
289
|
+
|
|
290
|
+
retry_config = None
|
|
291
|
+
if isinstance(retries, utils.RetryConfig):
|
|
292
|
+
retry_config = (retries, ["429", "500", "502", "503", "504"])
|
|
293
|
+
|
|
294
|
+
http_res = await self.do_request_async(
|
|
295
|
+
hook_ctx=HookContext(
|
|
296
|
+
config=self.sdk_configuration,
|
|
297
|
+
base_url=base_url or "",
|
|
298
|
+
operation_id="createCompletion",
|
|
299
|
+
oauth2_scopes=None,
|
|
300
|
+
security_source=get_security_from_env(
|
|
301
|
+
self.sdk_configuration.security, models.Security
|
|
302
|
+
),
|
|
303
|
+
),
|
|
304
|
+
request=req,
|
|
305
|
+
error_status_codes=["4XX", "5XX"],
|
|
306
|
+
stream=True,
|
|
307
|
+
retry_config=retry_config,
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
if utils.match_response(http_res, "200", "application/json"):
|
|
311
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
312
|
+
return unmarshal_json_response(
|
|
313
|
+
models.CreateCompletionResponseBody, http_res, http_res_text
|
|
314
|
+
)
|
|
315
|
+
if utils.match_response(http_res, "200", "text/event-stream"):
|
|
316
|
+
return eventstreaming.EventStreamAsync(
|
|
317
|
+
http_res,
|
|
318
|
+
lambda raw: utils.unmarshal_json(
|
|
319
|
+
raw, models.CreateCompletionRouterCompletionsResponseBody
|
|
320
|
+
),
|
|
321
|
+
sentinel="[DONE]",
|
|
322
|
+
client_ref=self,
|
|
323
|
+
)
|
|
324
|
+
if utils.match_response(http_res, "4XX", "*"):
|
|
325
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
326
|
+
raise models.APIError("API error occurred", http_res, http_res_text)
|
|
327
|
+
if utils.match_response(http_res, "5XX", "*"):
|
|
328
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
329
|
+
raise models.APIError("API error occurred", http_res, http_res_text)
|
|
330
|
+
|
|
331
|
+
http_res_text = await utils.stream_to_text_async(http_res)
|
|
332
|
+
raise models.APIError("Unexpected response received", http_res, http_res_text)
|