orq-ai-sdk 4.2.8__py3-none-any.whl → 4.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_version.py +2 -2
- orq_ai_sdk/completions.py +107 -1
- orq_ai_sdk/edits.py +107 -1
- orq_ai_sdk/embeddings.py +107 -1
- orq_ai_sdk/generations.py +99 -1
- orq_ai_sdk/models/__init__.py +405 -0
- orq_ai_sdk/models/conversationresponse.py +1 -1
- orq_ai_sdk/models/conversationwithmessagesresponse.py +1 -1
- orq_ai_sdk/models/createagentrequestop.py +24 -0
- orq_ai_sdk/models/createchatcompletionop.py +6 -0
- orq_ai_sdk/models/createcompletionop.py +218 -30
- orq_ai_sdk/models/createcontactop.py +1 -1
- orq_ai_sdk/models/createconversationop.py +1 -1
- orq_ai_sdk/models/createconversationresponseop.py +2 -2
- orq_ai_sdk/models/createdatasetitemop.py +4 -4
- orq_ai_sdk/models/createdatasetop.py +1 -1
- orq_ai_sdk/models/createdatasourceop.py +1 -1
- orq_ai_sdk/models/createembeddingop.py +221 -26
- orq_ai_sdk/models/createevalop.py +14 -14
- orq_ai_sdk/models/createidentityop.py +1 -1
- orq_ai_sdk/models/createimageeditop.py +242 -31
- orq_ai_sdk/models/createimageop.py +210 -22
- orq_ai_sdk/models/createimagevariationop.py +251 -31
- orq_ai_sdk/models/createpromptop.py +12 -0
- orq_ai_sdk/models/creatererankop.py +218 -26
- orq_ai_sdk/models/createspeechop.py +174 -24
- orq_ai_sdk/models/createtoolop.py +6 -6
- orq_ai_sdk/models/createtranscriptionop.py +180 -10
- orq_ai_sdk/models/createtranslationop.py +172 -10
- orq_ai_sdk/models/filegetop.py +1 -1
- orq_ai_sdk/models/filelistop.py +1 -1
- orq_ai_sdk/models/fileuploadop.py +1 -1
- orq_ai_sdk/models/generateconversationnameop.py +1 -1
- orq_ai_sdk/models/getallpromptsop.py +6 -0
- orq_ai_sdk/models/getalltoolsop.py +6 -6
- orq_ai_sdk/models/getevalsop.py +14 -14
- orq_ai_sdk/models/getonepromptop.py +6 -0
- orq_ai_sdk/models/getpromptversionop.py +6 -0
- orq_ai_sdk/models/listagentsop.py +12 -0
- orq_ai_sdk/models/listdatasetdatapointsop.py +4 -4
- orq_ai_sdk/models/listdatasetsop.py +1 -1
- orq_ai_sdk/models/listdatasourcesop.py +1 -1
- orq_ai_sdk/models/listidentitiesop.py +1 -1
- orq_ai_sdk/models/listpromptversionsop.py +6 -0
- orq_ai_sdk/models/partdoneevent.py +1 -1
- orq_ai_sdk/models/reasoningpart.py +1 -1
- orq_ai_sdk/models/retrieveagentrequestop.py +12 -0
- orq_ai_sdk/models/retrievedatapointop.py +4 -4
- orq_ai_sdk/models/retrievedatasetop.py +1 -1
- orq_ai_sdk/models/retrievedatasourceop.py +1 -1
- orq_ai_sdk/models/retrieveidentityop.py +1 -1
- orq_ai_sdk/models/retrievetoolop.py +6 -6
- orq_ai_sdk/models/runagentop.py +13 -1
- orq_ai_sdk/models/streamrunagentop.py +13 -1
- orq_ai_sdk/models/updateagentop.py +24 -0
- orq_ai_sdk/models/updateconversationop.py +1 -1
- orq_ai_sdk/models/updatedatapointop.py +4 -4
- orq_ai_sdk/models/updatedatasetop.py +1 -1
- orq_ai_sdk/models/updatedatasourceop.py +1 -1
- orq_ai_sdk/models/updateevalop.py +14 -14
- orq_ai_sdk/models/updateidentityop.py +1 -1
- orq_ai_sdk/models/updatepromptop.py +12 -0
- orq_ai_sdk/models/updatetoolop.py +7 -7
- orq_ai_sdk/orq_completions.py +6 -0
- orq_ai_sdk/rerank.py +98 -0
- orq_ai_sdk/speech.py +83 -1
- orq_ai_sdk/transcriptions.py +90 -0
- orq_ai_sdk/translations.py +87 -1
- orq_ai_sdk/variations.py +111 -1
- {orq_ai_sdk-4.2.8.dist-info → orq_ai_sdk-4.2.9.dist-info}/METADATA +1 -1
- {orq_ai_sdk-4.2.8.dist-info → orq_ai_sdk-4.2.9.dist-info}/RECORD +73 -73
- {orq_ai_sdk-4.2.8.dist-info → orq_ai_sdk-4.2.9.dist-info}/WHEEL +1 -1
- {orq_ai_sdk-4.2.8.dist-info → orq_ai_sdk-4.2.9.dist-info}/top_level.txt +0 -0
|
@@ -533,6 +533,8 @@ class ModelConfigurationTimeout(BaseModel):
|
|
|
533
533
|
class ModelConfigurationParametersTypedDict(TypedDict):
|
|
534
534
|
r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
|
|
535
535
|
|
|
536
|
+
name: NotRequired[str]
|
|
537
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
536
538
|
audio: NotRequired[Nullable[UpdateAgentModelConfigurationAudioTypedDict]]
|
|
537
539
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
538
540
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -602,6 +604,9 @@ class ModelConfigurationParametersTypedDict(TypedDict):
|
|
|
602
604
|
class ModelConfigurationParameters(BaseModel):
|
|
603
605
|
r"""Model behavior parameters that control how the model generates responses. Common parameters: `temperature` (0-1, randomness), `max_completion_tokens` (max output length), `top_p` (sampling diversity). Advanced: `frequency_penalty`, `presence_penalty`, `response_format` (JSON/structured), `reasoning_effort`, `seed` (reproducibility). Support varies by model - consult AI Gateway documentation."""
|
|
604
606
|
|
|
607
|
+
name: Optional[str] = None
|
|
608
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
609
|
+
|
|
605
610
|
audio: OptionalNullable[UpdateAgentModelConfigurationAudio] = UNSET
|
|
606
611
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
607
612
|
|
|
@@ -697,6 +702,7 @@ class ModelConfigurationParameters(BaseModel):
|
|
|
697
702
|
def serialize_model(self, handler):
|
|
698
703
|
optional_fields = set(
|
|
699
704
|
[
|
|
705
|
+
"name",
|
|
700
706
|
"audio",
|
|
701
707
|
"frequency_penalty",
|
|
702
708
|
"max_tokens",
|
|
@@ -1373,6 +1379,8 @@ class UpdateAgentFallbackModelConfigurationTimeout(BaseModel):
|
|
|
1373
1379
|
class UpdateAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
|
|
1374
1380
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1375
1381
|
|
|
1382
|
+
name: NotRequired[str]
|
|
1383
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1376
1384
|
audio: NotRequired[Nullable[UpdateAgentFallbackModelConfigurationAudioTypedDict]]
|
|
1377
1385
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1378
1386
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -1454,6 +1462,9 @@ class UpdateAgentFallbackModelConfigurationParametersTypedDict(TypedDict):
|
|
|
1454
1462
|
class UpdateAgentFallbackModelConfigurationParameters(BaseModel):
|
|
1455
1463
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
1456
1464
|
|
|
1465
|
+
name: Optional[str] = None
|
|
1466
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1467
|
+
|
|
1457
1468
|
audio: OptionalNullable[UpdateAgentFallbackModelConfigurationAudio] = UNSET
|
|
1458
1469
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1459
1470
|
|
|
@@ -1557,6 +1568,7 @@ class UpdateAgentFallbackModelConfigurationParameters(BaseModel):
|
|
|
1557
1568
|
def serialize_model(self, handler):
|
|
1558
1569
|
optional_fields = set(
|
|
1559
1570
|
[
|
|
1571
|
+
"name",
|
|
1560
1572
|
"audio",
|
|
1561
1573
|
"frequency_penalty",
|
|
1562
1574
|
"max_tokens",
|
|
@@ -3561,6 +3573,8 @@ class UpdateAgentTimeout(BaseModel):
|
|
|
3561
3573
|
class UpdateAgentParametersTypedDict(TypedDict):
|
|
3562
3574
|
r"""Model behavior parameters (snake_case) stored as part of the agent configuration. These become the default parameters used when the agent is executed. Commonly used: temperature (0-1, controls randomness), max_completion_tokens (response length), top_p (nucleus sampling). Advanced: frequency_penalty, presence_penalty, response_format (JSON/structured output), reasoning_effort (for o1/thinking models), seed (reproducibility), stop sequences. Model-specific support varies. Runtime parameters in agent execution requests can override these defaults."""
|
|
3563
3575
|
|
|
3576
|
+
name: NotRequired[str]
|
|
3577
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
3564
3578
|
audio: NotRequired[Nullable[UpdateAgentAudioTypedDict]]
|
|
3565
3579
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
3566
3580
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -3630,6 +3644,9 @@ class UpdateAgentParametersTypedDict(TypedDict):
|
|
|
3630
3644
|
class UpdateAgentParameters(BaseModel):
|
|
3631
3645
|
r"""Model behavior parameters (snake_case) stored as part of the agent configuration. These become the default parameters used when the agent is executed. Commonly used: temperature (0-1, controls randomness), max_completion_tokens (response length), top_p (nucleus sampling). Advanced: frequency_penalty, presence_penalty, response_format (JSON/structured output), reasoning_effort (for o1/thinking models), seed (reproducibility), stop sequences. Model-specific support varies. Runtime parameters in agent execution requests can override these defaults."""
|
|
3632
3646
|
|
|
3647
|
+
name: Optional[str] = None
|
|
3648
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
3649
|
+
|
|
3633
3650
|
audio: OptionalNullable[UpdateAgentAudio] = UNSET
|
|
3634
3651
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
3635
3652
|
|
|
@@ -3725,6 +3742,7 @@ class UpdateAgentParameters(BaseModel):
|
|
|
3725
3742
|
def serialize_model(self, handler):
|
|
3726
3743
|
optional_fields = set(
|
|
3727
3744
|
[
|
|
3745
|
+
"name",
|
|
3728
3746
|
"audio",
|
|
3729
3747
|
"frequency_penalty",
|
|
3730
3748
|
"max_tokens",
|
|
@@ -4367,6 +4385,8 @@ class UpdateAgentFallbackModelConfigurationAgentsTimeout(BaseModel):
|
|
|
4367
4385
|
class UpdateAgentFallbackModelConfigurationAgentsParametersTypedDict(TypedDict):
|
|
4368
4386
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
4369
4387
|
|
|
4388
|
+
name: NotRequired[str]
|
|
4389
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
4370
4390
|
audio: NotRequired[
|
|
4371
4391
|
Nullable[UpdateAgentFallbackModelConfigurationAgentsAudioTypedDict]
|
|
4372
4392
|
]
|
|
@@ -4458,6 +4478,9 @@ class UpdateAgentFallbackModelConfigurationAgentsParametersTypedDict(TypedDict):
|
|
|
4458
4478
|
class UpdateAgentFallbackModelConfigurationAgentsParameters(BaseModel):
|
|
4459
4479
|
r"""Optional model parameters specific to this fallback model. Overrides primary model parameters if this fallback is used."""
|
|
4460
4480
|
|
|
4481
|
+
name: Optional[str] = None
|
|
4482
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
4483
|
+
|
|
4461
4484
|
audio: OptionalNullable[UpdateAgentFallbackModelConfigurationAgentsAudio] = UNSET
|
|
4462
4485
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
4463
4486
|
|
|
@@ -4567,6 +4590,7 @@ class UpdateAgentFallbackModelConfigurationAgentsParameters(BaseModel):
|
|
|
4567
4590
|
def serialize_model(self, handler):
|
|
4568
4591
|
optional_fields = set(
|
|
4569
4592
|
[
|
|
4593
|
+
"name",
|
|
4570
4594
|
"audio",
|
|
4571
4595
|
"frequency_penalty",
|
|
4572
4596
|
"max_tokens",
|
|
@@ -231,7 +231,7 @@ class UpdateConversationResponseBody(BaseModel):
|
|
|
231
231
|
r"""Unix timestamp in milliseconds when the conversation was last modified."""
|
|
232
232
|
|
|
233
233
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
234
|
-
"
|
|
234
|
+
"conv_01kfvn6e02bgrcrkhxtn1kb9g3"
|
|
235
235
|
)
|
|
236
236
|
r"""Unique conversation identifier with `conv_` prefix."""
|
|
237
237
|
|
|
@@ -1445,7 +1445,7 @@ class UpdateDatapointEvaluations3(BaseModel):
|
|
|
1445
1445
|
|
|
1446
1446
|
source: Optional[UpdateDatapointEvaluationsDatasetsResponseSource] = "orq"
|
|
1447
1447
|
|
|
1448
|
-
reviewed_at: Optional[datetime] = parse_datetime("2026-01-
|
|
1448
|
+
reviewed_at: Optional[datetime] = parse_datetime("2026-01-25T22:43:05.508Z")
|
|
1449
1449
|
r"""The date and time the item was reviewed"""
|
|
1450
1450
|
|
|
1451
1451
|
@model_serializer(mode="wrap")
|
|
@@ -1513,7 +1513,7 @@ class UpdateDatapointEvaluations2(BaseModel):
|
|
|
1513
1513
|
|
|
1514
1514
|
source: Optional[UpdateDatapointEvaluationsDatasetsSource] = "orq"
|
|
1515
1515
|
|
|
1516
|
-
reviewed_at: Optional[datetime] = parse_datetime("2026-01-
|
|
1516
|
+
reviewed_at: Optional[datetime] = parse_datetime("2026-01-25T22:43:05.507Z")
|
|
1517
1517
|
r"""The date and time the item was reviewed"""
|
|
1518
1518
|
|
|
1519
1519
|
@model_serializer(mode="wrap")
|
|
@@ -1581,7 +1581,7 @@ class UpdateDatapointEvaluations1(BaseModel):
|
|
|
1581
1581
|
|
|
1582
1582
|
source: Optional[UpdateDatapointEvaluationsSource] = "orq"
|
|
1583
1583
|
|
|
1584
|
-
reviewed_at: Optional[datetime] = parse_datetime("2026-01-
|
|
1584
|
+
reviewed_at: Optional[datetime] = parse_datetime("2026-01-25T22:43:05.507Z")
|
|
1585
1585
|
r"""The date and time the item was reviewed"""
|
|
1586
1586
|
|
|
1587
1587
|
@model_serializer(mode="wrap")
|
|
@@ -1684,7 +1684,7 @@ class UpdateDatapointResponseBody(BaseModel):
|
|
|
1684
1684
|
created: Optional[datetime] = None
|
|
1685
1685
|
r"""The date and time the resource was created"""
|
|
1686
1686
|
|
|
1687
|
-
updated: Optional[datetime] = parse_datetime("2026-01-
|
|
1687
|
+
updated: Optional[datetime] = parse_datetime("2026-01-25T22:42:51.693Z")
|
|
1688
1688
|
r"""The date and time the resource was last updated"""
|
|
1689
1689
|
|
|
1690
1690
|
@model_serializer(mode="wrap")
|
|
@@ -154,7 +154,7 @@ class UpdateDatasetResponseBody(BaseModel):
|
|
|
154
154
|
created: Optional[datetime] = None
|
|
155
155
|
r"""The date and time the resource was created"""
|
|
156
156
|
|
|
157
|
-
updated: Optional[datetime] = parse_datetime("2026-01-
|
|
157
|
+
updated: Optional[datetime] = parse_datetime("2026-01-25T22:42:51.693Z")
|
|
158
158
|
r"""The date and time the resource was last updated"""
|
|
159
159
|
|
|
160
160
|
@model_serializer(mode="wrap")
|
|
@@ -104,7 +104,7 @@ class UpdateDatasourceResponseBody(BaseModel):
|
|
|
104
104
|
r"""The number of chunks in the datasource"""
|
|
105
105
|
|
|
106
106
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
107
|
-
"
|
|
107
|
+
"01KFVN6EH2ANE9D6GBKQQW2JCN"
|
|
108
108
|
)
|
|
109
109
|
r"""The unique identifier of the data source"""
|
|
110
110
|
|
|
@@ -959,9 +959,9 @@ class ResponseBodyTypescript(BaseModel):
|
|
|
959
959
|
|
|
960
960
|
key: str
|
|
961
961
|
|
|
962
|
-
created: Optional[str] = "2026-01-
|
|
962
|
+
created: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
963
963
|
|
|
964
|
-
updated: Optional[str] = "2026-01-
|
|
964
|
+
updated: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
965
965
|
|
|
966
966
|
guardrail_config: Optional[
|
|
967
967
|
UpdateEvalResponseBodyEvalsResponse200ApplicationJSON7GuardrailConfig
|
|
@@ -1155,9 +1155,9 @@ class ResponseBodyRagas(BaseModel):
|
|
|
1155
1155
|
|
|
1156
1156
|
model: str
|
|
1157
1157
|
|
|
1158
|
-
created: Optional[str] = "2026-01-
|
|
1158
|
+
created: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
1159
1159
|
|
|
1160
|
-
updated: Optional[str] = "2026-01-
|
|
1160
|
+
updated: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
1161
1161
|
|
|
1162
1162
|
guardrail_config: Optional[
|
|
1163
1163
|
UpdateEvalResponseBodyEvalsResponse200ApplicationJSON6GuardrailConfig
|
|
@@ -1852,9 +1852,9 @@ class ResponseBodyFunction(BaseModel):
|
|
|
1852
1852
|
|
|
1853
1853
|
key: str
|
|
1854
1854
|
|
|
1855
|
-
created: Optional[str] = "2026-01-
|
|
1855
|
+
created: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
1856
1856
|
|
|
1857
|
-
updated: Optional[str] = "2026-01-
|
|
1857
|
+
updated: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
1858
1858
|
|
|
1859
1859
|
guardrail_config: Optional[
|
|
1860
1860
|
UpdateEvalResponseBodyEvalsResponse200ApplicationJSONGuardrailConfig
|
|
@@ -2029,9 +2029,9 @@ class UpdateEvalResponseBodyPython(BaseModel):
|
|
|
2029
2029
|
|
|
2030
2030
|
key: str
|
|
2031
2031
|
|
|
2032
|
-
created: Optional[str] = "2026-01-
|
|
2032
|
+
created: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
2033
2033
|
|
|
2034
|
-
updated: Optional[str] = "2026-01-
|
|
2034
|
+
updated: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
2035
2035
|
|
|
2036
2036
|
guardrail_config: Optional[
|
|
2037
2037
|
UpdateEvalResponseBodyEvalsResponse200GuardrailConfig
|
|
@@ -2213,9 +2213,9 @@ class UpdateEvalResponseBodyHTTP(BaseModel):
|
|
|
2213
2213
|
|
|
2214
2214
|
key: str
|
|
2215
2215
|
|
|
2216
|
-
created: Optional[str] = "2026-01-
|
|
2216
|
+
created: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
2217
2217
|
|
|
2218
|
-
updated: Optional[str] = "2026-01-
|
|
2218
|
+
updated: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
2219
2219
|
|
|
2220
2220
|
guardrail_config: Optional[UpdateEvalResponseBodyEvalsResponseGuardrailConfig] = (
|
|
2221
2221
|
None
|
|
@@ -2368,9 +2368,9 @@ class UpdateEvalResponseBodyJSON(BaseModel):
|
|
|
2368
2368
|
|
|
2369
2369
|
key: str
|
|
2370
2370
|
|
|
2371
|
-
created: Optional[str] = "2026-01-
|
|
2371
|
+
created: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
2372
2372
|
|
|
2373
|
-
updated: Optional[str] = "2026-01-
|
|
2373
|
+
updated: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
2374
2374
|
|
|
2375
2375
|
guardrail_config: Optional[UpdateEvalResponseBodyEvalsGuardrailConfig] = None
|
|
2376
2376
|
|
|
@@ -2522,9 +2522,9 @@ class UpdateEvalResponseBodyLLM(BaseModel):
|
|
|
2522
2522
|
|
|
2523
2523
|
model: str
|
|
2524
2524
|
|
|
2525
|
-
created: Optional[str] = "2026-01-
|
|
2525
|
+
created: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
2526
2526
|
|
|
2527
|
-
updated: Optional[str] = "2026-01-
|
|
2527
|
+
updated: Optional[str] = "2026-01-25T22:42:53.396Z"
|
|
2528
2528
|
|
|
2529
2529
|
guardrail_config: Optional[UpdateEvalResponseBodyGuardrailConfig] = None
|
|
2530
2530
|
|
|
@@ -194,7 +194,7 @@ class UpdateIdentityResponseBody(BaseModel):
|
|
|
194
194
|
created: Optional[datetime] = None
|
|
195
195
|
r"""The date and time the resource was created"""
|
|
196
196
|
|
|
197
|
-
updated: Optional[datetime] = parse_datetime("2026-01-
|
|
197
|
+
updated: Optional[datetime] = parse_datetime("2026-01-25T22:42:51.693Z")
|
|
198
198
|
r"""The date and time the resource was last updated"""
|
|
199
199
|
|
|
200
200
|
@model_serializer(mode="wrap")
|
|
@@ -1128,6 +1128,8 @@ class UpdatePromptPromptInputTypedDict(TypedDict):
|
|
|
1128
1128
|
r"""Array of messages that make up the conversation. Each message has a role (system, user, assistant, or tool) and content."""
|
|
1129
1129
|
model: NotRequired[Nullable[str]]
|
|
1130
1130
|
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`. The full list of models can be found at https://docs.orq.ai/docs/ai-gateway-supported-models. Only chat models are supported."""
|
|
1131
|
+
name: NotRequired[str]
|
|
1132
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1131
1133
|
audio: NotRequired[Nullable[UpdatePromptAudioTypedDict]]
|
|
1132
1134
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1133
1135
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -1203,6 +1205,9 @@ class UpdatePromptPromptInput(BaseModel):
|
|
|
1203
1205
|
model: OptionalNullable[str] = UNSET
|
|
1204
1206
|
r"""Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-3-5-sonnet-20241022`. For private models, use format: `{workspaceKey}@{provider}/{model}`. The full list of models can be found at https://docs.orq.ai/docs/ai-gateway-supported-models. Only chat models are supported."""
|
|
1205
1207
|
|
|
1208
|
+
name: Optional[str] = None
|
|
1209
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
1210
|
+
|
|
1206
1211
|
audio: OptionalNullable[UpdatePromptAudio] = UNSET
|
|
1207
1212
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
1208
1213
|
|
|
@@ -1300,6 +1305,7 @@ class UpdatePromptPromptInput(BaseModel):
|
|
|
1300
1305
|
[
|
|
1301
1306
|
"messages",
|
|
1302
1307
|
"model",
|
|
1308
|
+
"name",
|
|
1303
1309
|
"audio",
|
|
1304
1310
|
"frequency_penalty",
|
|
1305
1311
|
"max_tokens",
|
|
@@ -3503,6 +3509,8 @@ UpdatePromptPromptsResponseMessages = Annotated[
|
|
|
3503
3509
|
class UpdatePromptPromptFieldTypedDict(TypedDict):
|
|
3504
3510
|
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
3505
3511
|
|
|
3512
|
+
name: NotRequired[str]
|
|
3513
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
3506
3514
|
audio: NotRequired[Nullable[UpdatePromptPromptsAudioTypedDict]]
|
|
3507
3515
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
3508
3516
|
frequency_penalty: NotRequired[Nullable[float]]
|
|
@@ -3577,6 +3585,9 @@ class UpdatePromptPromptFieldTypedDict(TypedDict):
|
|
|
3577
3585
|
class UpdatePromptPromptField(BaseModel):
|
|
3578
3586
|
r"""Prompt configuration with model and messages. Use this instead of prompt_config."""
|
|
3579
3587
|
|
|
3588
|
+
name: Optional[str] = None
|
|
3589
|
+
r"""The name to display on the trace. If not specified, the default system name will be used."""
|
|
3590
|
+
|
|
3580
3591
|
audio: OptionalNullable[UpdatePromptPromptsAudio] = UNSET
|
|
3581
3592
|
r"""Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more."""
|
|
3582
3593
|
|
|
@@ -3680,6 +3691,7 @@ class UpdatePromptPromptField(BaseModel):
|
|
|
3680
3691
|
def serialize_model(self, handler):
|
|
3681
3692
|
optional_fields = set(
|
|
3682
3693
|
[
|
|
3694
|
+
"name",
|
|
3683
3695
|
"audio",
|
|
3684
3696
|
"frequency_penalty",
|
|
3685
3697
|
"max_tokens",
|
|
@@ -259,7 +259,7 @@ class RequestBodyTools(BaseModel):
|
|
|
259
259
|
|
|
260
260
|
schema_: Annotated[UpdateToolRequestBodyToolsSchema, pydantic.Field(alias="schema")]
|
|
261
261
|
|
|
262
|
-
id: Optional[str] = "
|
|
262
|
+
id: Optional[str] = "01KFVN6ECAKKZWM7C9VJMS4E18"
|
|
263
263
|
|
|
264
264
|
description: Optional[str] = None
|
|
265
265
|
|
|
@@ -1214,7 +1214,7 @@ class UpdateToolResponseBodyCodeExecutionTool(BaseModel):
|
|
|
1214
1214
|
code_tool: UpdateToolResponseBodyCodeTool
|
|
1215
1215
|
|
|
1216
1216
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
1217
|
-
"
|
|
1217
|
+
"tool_01KFVN6EC5MYDCAMFJM4VHKMPY"
|
|
1218
1218
|
)
|
|
1219
1219
|
|
|
1220
1220
|
display_name: Optional[str] = None
|
|
@@ -1345,7 +1345,7 @@ class UpdateToolResponseBodyTools(BaseModel):
|
|
|
1345
1345
|
UpdateToolResponseBodyToolsSchema, pydantic.Field(alias="schema")
|
|
1346
1346
|
]
|
|
1347
1347
|
|
|
1348
|
-
id: Optional[str] = "
|
|
1348
|
+
id: Optional[str] = "01KFVN6EC497801YG8BYE696ZD"
|
|
1349
1349
|
|
|
1350
1350
|
description: Optional[str] = None
|
|
1351
1351
|
|
|
@@ -1476,7 +1476,7 @@ class UpdateToolResponseBodyMCPTool(BaseModel):
|
|
|
1476
1476
|
mcp: UpdateToolResponseBodyMcp
|
|
1477
1477
|
|
|
1478
1478
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
1479
|
-
"
|
|
1479
|
+
"tool_01KFVN6EC35QXNK3VNN8ZREJB2"
|
|
1480
1480
|
)
|
|
1481
1481
|
|
|
1482
1482
|
display_name: Optional[str] = None
|
|
@@ -1777,7 +1777,7 @@ class UpdateToolResponseBodyHTTPTool(BaseModel):
|
|
|
1777
1777
|
http: UpdateToolResponseBodyHTTP
|
|
1778
1778
|
|
|
1779
1779
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
1780
|
-
"
|
|
1780
|
+
"tool_01KFVN6EC13XW1297BB2G6QVF5"
|
|
1781
1781
|
)
|
|
1782
1782
|
|
|
1783
1783
|
display_name: Optional[str] = None
|
|
@@ -1972,7 +1972,7 @@ class UpdateToolResponseBodyJSONSchemaTool(BaseModel):
|
|
|
1972
1972
|
json_schema: UpdateToolResponseBodyJSONSchema
|
|
1973
1973
|
|
|
1974
1974
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
1975
|
-
"
|
|
1975
|
+
"tool_01KFVN6EC06J7W0KTB5SH0JBPX"
|
|
1976
1976
|
)
|
|
1977
1977
|
|
|
1978
1978
|
display_name: Optional[str] = None
|
|
@@ -2171,7 +2171,7 @@ class UpdateToolResponseBodyFunctionTool(BaseModel):
|
|
|
2171
2171
|
function: UpdateToolResponseBodyFunction
|
|
2172
2172
|
|
|
2173
2173
|
id: Annotated[Optional[str], pydantic.Field(alias="_id")] = (
|
|
2174
|
-
"
|
|
2174
|
+
"tool_01KFVN6EBQNA6CKEPNV8334XCJ"
|
|
2175
2175
|
)
|
|
2176
2176
|
|
|
2177
2177
|
display_name: Optional[str] = None
|
orq_ai_sdk/orq_completions.py
CHANGED
|
@@ -26,6 +26,7 @@ class OrqCompletions(BaseSDK):
|
|
|
26
26
|
],
|
|
27
27
|
model: str,
|
|
28
28
|
metadata: Optional[Dict[str, str]] = None,
|
|
29
|
+
name: Optional[str] = None,
|
|
29
30
|
audio: OptionalNullable[
|
|
30
31
|
Union[
|
|
31
32
|
models_createchatcompletionop.CreateChatCompletionAudio,
|
|
@@ -147,6 +148,7 @@ class OrqCompletions(BaseSDK):
|
|
|
147
148
|
:param messages: A list of messages comprising the conversation so far.
|
|
148
149
|
:param model: Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`. The AI Gateway offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the (Supported models)[/docs/proxy/supported-models] to browse available models.
|
|
149
150
|
:param metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can have a maximum length of 64 characters and values can have a maximum length of 512 characters.
|
|
151
|
+
:param name: The name to display on the trace. If not specified, the default system name will be used.
|
|
150
152
|
:param audio: Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more.
|
|
151
153
|
:param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
|
|
152
154
|
:param max_tokens: `[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
@@ -211,6 +213,7 @@ class OrqCompletions(BaseSDK):
|
|
|
211
213
|
),
|
|
212
214
|
model=model,
|
|
213
215
|
metadata=metadata,
|
|
216
|
+
name=name,
|
|
214
217
|
audio=utils.get_pydantic_model(
|
|
215
218
|
audio, OptionalNullable[models.CreateChatCompletionAudio]
|
|
216
219
|
),
|
|
@@ -347,6 +350,7 @@ class OrqCompletions(BaseSDK):
|
|
|
347
350
|
],
|
|
348
351
|
model: str,
|
|
349
352
|
metadata: Optional[Dict[str, str]] = None,
|
|
353
|
+
name: Optional[str] = None,
|
|
350
354
|
audio: OptionalNullable[
|
|
351
355
|
Union[
|
|
352
356
|
models_createchatcompletionop.CreateChatCompletionAudio,
|
|
@@ -468,6 +472,7 @@ class OrqCompletions(BaseSDK):
|
|
|
468
472
|
:param messages: A list of messages comprising the conversation so far.
|
|
469
473
|
:param model: Model ID used to generate the response, like `openai/gpt-4o` or `anthropic/claude-haiku-4-5-20251001`. The AI Gateway offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the (Supported models)[/docs/proxy/supported-models] to browse available models.
|
|
470
474
|
:param metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can have a maximum length of 64 characters and values can have a maximum length of 512 characters.
|
|
475
|
+
:param name: The name to display on the trace. If not specified, the default system name will be used.
|
|
471
476
|
:param audio: Parameters for audio output. Required when audio output is requested with modalities: [\"audio\"]. Learn more.
|
|
472
477
|
:param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
|
|
473
478
|
:param max_tokens: `[Deprecated]`. The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.
|
|
@@ -532,6 +537,7 @@ class OrqCompletions(BaseSDK):
|
|
|
532
537
|
),
|
|
533
538
|
model=model,
|
|
534
539
|
metadata=metadata,
|
|
540
|
+
name=name,
|
|
535
541
|
audio=utils.get_pydantic_model(
|
|
536
542
|
audio, OptionalNullable[models.CreateChatCompletionAudio]
|
|
537
543
|
),
|
orq_ai_sdk/rerank.py
CHANGED
|
@@ -19,6 +19,37 @@ class Rerank(BaseSDK):
|
|
|
19
19
|
model: str,
|
|
20
20
|
top_n: Optional[float] = None,
|
|
21
21
|
filename: OptionalNullable[str] = UNSET,
|
|
22
|
+
name: Optional[str] = None,
|
|
23
|
+
fallbacks: Optional[
|
|
24
|
+
Union[
|
|
25
|
+
List[models_creatererankop.CreateRerankFallbacks],
|
|
26
|
+
List[models_creatererankop.CreateRerankFallbacksTypedDict],
|
|
27
|
+
]
|
|
28
|
+
] = None,
|
|
29
|
+
retry: Optional[
|
|
30
|
+
Union[
|
|
31
|
+
models_creatererankop.CreateRerankRetry,
|
|
32
|
+
models_creatererankop.CreateRerankRetryTypedDict,
|
|
33
|
+
]
|
|
34
|
+
] = None,
|
|
35
|
+
cache: Optional[
|
|
36
|
+
Union[
|
|
37
|
+
models_creatererankop.CreateRerankCache,
|
|
38
|
+
models_creatererankop.CreateRerankCacheTypedDict,
|
|
39
|
+
]
|
|
40
|
+
] = None,
|
|
41
|
+
load_balancer: Optional[
|
|
42
|
+
Union[
|
|
43
|
+
models_creatererankop.CreateRerankLoadBalancer,
|
|
44
|
+
models_creatererankop.CreateRerankLoadBalancerTypedDict,
|
|
45
|
+
]
|
|
46
|
+
] = None,
|
|
47
|
+
timeout: Optional[
|
|
48
|
+
Union[
|
|
49
|
+
models_creatererankop.CreateRerankTimeout,
|
|
50
|
+
models_creatererankop.CreateRerankTimeoutTypedDict,
|
|
51
|
+
]
|
|
52
|
+
] = None,
|
|
22
53
|
orq: Optional[
|
|
23
54
|
Union[
|
|
24
55
|
models_creatererankop.CreateRerankOrq,
|
|
@@ -39,6 +70,12 @@ class Rerank(BaseSDK):
|
|
|
39
70
|
:param model: The identifier of the model to use
|
|
40
71
|
:param top_n: The number of most relevant documents or indices to return, defaults to the length of the documents
|
|
41
72
|
:param filename: The filename of the document to rerank
|
|
73
|
+
:param name: The name to display on the trace. If not specified, the default system name will be used.
|
|
74
|
+
:param fallbacks: Array of fallback models to use if primary model fails
|
|
75
|
+
:param retry: Retry configuration for the request
|
|
76
|
+
:param cache: Cache configuration for the request.
|
|
77
|
+
:param load_balancer: Load balancer configuration for the request.
|
|
78
|
+
:param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
|
|
42
79
|
:param orq:
|
|
43
80
|
:param retries: Override the default retry configuration for this method
|
|
44
81
|
:param server_url: Override the default server URL for this method
|
|
@@ -64,6 +101,18 @@ class Rerank(BaseSDK):
|
|
|
64
101
|
model=model,
|
|
65
102
|
top_n=top_n,
|
|
66
103
|
filename=filename,
|
|
104
|
+
name=name,
|
|
105
|
+
fallbacks=utils.get_pydantic_model(
|
|
106
|
+
fallbacks, Optional[List[models.CreateRerankFallbacks]]
|
|
107
|
+
),
|
|
108
|
+
retry=utils.get_pydantic_model(retry, Optional[models.CreateRerankRetry]),
|
|
109
|
+
cache=utils.get_pydantic_model(cache, Optional[models.CreateRerankCache]),
|
|
110
|
+
load_balancer=utils.get_pydantic_model(
|
|
111
|
+
load_balancer, Optional[models.CreateRerankLoadBalancer]
|
|
112
|
+
),
|
|
113
|
+
timeout=utils.get_pydantic_model(
|
|
114
|
+
timeout, Optional[models.CreateRerankTimeout]
|
|
115
|
+
),
|
|
67
116
|
orq=utils.get_pydantic_model(orq, Optional[models.CreateRerankOrq]),
|
|
68
117
|
)
|
|
69
118
|
|
|
@@ -129,6 +178,37 @@ class Rerank(BaseSDK):
|
|
|
129
178
|
model: str,
|
|
130
179
|
top_n: Optional[float] = None,
|
|
131
180
|
filename: OptionalNullable[str] = UNSET,
|
|
181
|
+
name: Optional[str] = None,
|
|
182
|
+
fallbacks: Optional[
|
|
183
|
+
Union[
|
|
184
|
+
List[models_creatererankop.CreateRerankFallbacks],
|
|
185
|
+
List[models_creatererankop.CreateRerankFallbacksTypedDict],
|
|
186
|
+
]
|
|
187
|
+
] = None,
|
|
188
|
+
retry: Optional[
|
|
189
|
+
Union[
|
|
190
|
+
models_creatererankop.CreateRerankRetry,
|
|
191
|
+
models_creatererankop.CreateRerankRetryTypedDict,
|
|
192
|
+
]
|
|
193
|
+
] = None,
|
|
194
|
+
cache: Optional[
|
|
195
|
+
Union[
|
|
196
|
+
models_creatererankop.CreateRerankCache,
|
|
197
|
+
models_creatererankop.CreateRerankCacheTypedDict,
|
|
198
|
+
]
|
|
199
|
+
] = None,
|
|
200
|
+
load_balancer: Optional[
|
|
201
|
+
Union[
|
|
202
|
+
models_creatererankop.CreateRerankLoadBalancer,
|
|
203
|
+
models_creatererankop.CreateRerankLoadBalancerTypedDict,
|
|
204
|
+
]
|
|
205
|
+
] = None,
|
|
206
|
+
timeout: Optional[
|
|
207
|
+
Union[
|
|
208
|
+
models_creatererankop.CreateRerankTimeout,
|
|
209
|
+
models_creatererankop.CreateRerankTimeoutTypedDict,
|
|
210
|
+
]
|
|
211
|
+
] = None,
|
|
132
212
|
orq: Optional[
|
|
133
213
|
Union[
|
|
134
214
|
models_creatererankop.CreateRerankOrq,
|
|
@@ -149,6 +229,12 @@ class Rerank(BaseSDK):
|
|
|
149
229
|
:param model: The identifier of the model to use
|
|
150
230
|
:param top_n: The number of most relevant documents or indices to return, defaults to the length of the documents
|
|
151
231
|
:param filename: The filename of the document to rerank
|
|
232
|
+
:param name: The name to display on the trace. If not specified, the default system name will be used.
|
|
233
|
+
:param fallbacks: Array of fallback models to use if primary model fails
|
|
234
|
+
:param retry: Retry configuration for the request
|
|
235
|
+
:param cache: Cache configuration for the request.
|
|
236
|
+
:param load_balancer: Load balancer configuration for the request.
|
|
237
|
+
:param timeout: Timeout configuration to apply to the request. If the request exceeds the timeout, it will be retried or fallback to the next model if configured.
|
|
152
238
|
:param orq:
|
|
153
239
|
:param retries: Override the default retry configuration for this method
|
|
154
240
|
:param server_url: Override the default server URL for this method
|
|
@@ -174,6 +260,18 @@ class Rerank(BaseSDK):
|
|
|
174
260
|
model=model,
|
|
175
261
|
top_n=top_n,
|
|
176
262
|
filename=filename,
|
|
263
|
+
name=name,
|
|
264
|
+
fallbacks=utils.get_pydantic_model(
|
|
265
|
+
fallbacks, Optional[List[models.CreateRerankFallbacks]]
|
|
266
|
+
),
|
|
267
|
+
retry=utils.get_pydantic_model(retry, Optional[models.CreateRerankRetry]),
|
|
268
|
+
cache=utils.get_pydantic_model(cache, Optional[models.CreateRerankCache]),
|
|
269
|
+
load_balancer=utils.get_pydantic_model(
|
|
270
|
+
load_balancer, Optional[models.CreateRerankLoadBalancer]
|
|
271
|
+
),
|
|
272
|
+
timeout=utils.get_pydantic_model(
|
|
273
|
+
timeout, Optional[models.CreateRerankTimeout]
|
|
274
|
+
),
|
|
177
275
|
orq=utils.get_pydantic_model(orq, Optional[models.CreateRerankOrq]),
|
|
178
276
|
)
|
|
179
277
|
|