orq-ai-sdk 4.2.0rc48__py3-none-any.whl → 4.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orq_ai_sdk/_hooks/globalhook.py +0 -1
- orq_ai_sdk/_version.py +3 -3
- orq_ai_sdk/audio.py +30 -0
- orq_ai_sdk/chat.py +22 -0
- orq_ai_sdk/completions.py +438 -0
- orq_ai_sdk/contacts.py +43 -886
- orq_ai_sdk/deployments.py +61 -0
- orq_ai_sdk/edits.py +364 -0
- orq_ai_sdk/embeddings.py +344 -0
- orq_ai_sdk/generations.py +370 -0
- orq_ai_sdk/images.py +28 -0
- orq_ai_sdk/models/__init__.py +3839 -424
- orq_ai_sdk/models/conversationresponse.py +1 -1
- orq_ai_sdk/models/conversationwithmessagesresponse.py +1 -1
- orq_ai_sdk/models/createagentrequestop.py +768 -12
- orq_ai_sdk/models/createagentresponse.py +68 -2
- orq_ai_sdk/models/createchatcompletionop.py +538 -313
- orq_ai_sdk/models/createcompletionop.py +2078 -0
- orq_ai_sdk/models/createcontactop.py +5 -10
- orq_ai_sdk/models/createconversationop.py +1 -1
- orq_ai_sdk/models/createconversationresponseop.py +2 -2
- orq_ai_sdk/models/createdatasetitemop.py +4 -4
- orq_ai_sdk/models/createdatasetop.py +1 -1
- orq_ai_sdk/models/createdatasourceop.py +1 -1
- orq_ai_sdk/models/createembeddingop.py +579 -0
- orq_ai_sdk/models/createevalop.py +14 -14
- orq_ai_sdk/models/createidentityop.py +1 -1
- orq_ai_sdk/models/createimageeditop.py +715 -0
- orq_ai_sdk/models/createimageop.py +228 -82
- orq_ai_sdk/models/createimagevariationop.py +706 -0
- orq_ai_sdk/models/creatememoryop.py +4 -2
- orq_ai_sdk/models/createmoderationop.py +521 -0
- orq_ai_sdk/models/createpromptop.py +375 -6
- orq_ai_sdk/models/creatererankop.py +608 -0
- orq_ai_sdk/models/createresponseop.py +2567 -0
- orq_ai_sdk/models/createspeechop.py +466 -0
- orq_ai_sdk/models/createtoolop.py +6 -6
- orq_ai_sdk/models/createtranscriptionop.py +732 -0
- orq_ai_sdk/models/createtranslationop.py +702 -0
- orq_ai_sdk/models/deploymentgetconfigop.py +17 -7
- orq_ai_sdk/models/deploymentsop.py +1 -0
- orq_ai_sdk/models/deploymentstreamop.py +7 -0
- orq_ai_sdk/models/filegetop.py +1 -1
- orq_ai_sdk/models/filelistop.py +1 -1
- orq_ai_sdk/models/fileuploadop.py +1 -1
- orq_ai_sdk/models/generateconversationnameop.py +1 -1
- orq_ai_sdk/models/getallmemoriesop.py +4 -2
- orq_ai_sdk/models/getallpromptsop.py +188 -3
- orq_ai_sdk/models/getalltoolsop.py +6 -6
- orq_ai_sdk/models/getevalsop.py +17 -17
- orq_ai_sdk/models/getonepromptop.py +188 -3
- orq_ai_sdk/models/getpromptversionop.py +188 -3
- orq_ai_sdk/models/invokedeploymentrequest.py +11 -4
- orq_ai_sdk/models/listagentsop.py +372 -0
- orq_ai_sdk/models/listdatasetdatapointsop.py +4 -4
- orq_ai_sdk/models/listdatasetsop.py +1 -1
- orq_ai_sdk/models/listdatasourcesop.py +1 -1
- orq_ai_sdk/models/listidentitiesop.py +1 -1
- orq_ai_sdk/models/listmodelsop.py +1 -0
- orq_ai_sdk/models/listpromptversionsop.py +188 -3
- orq_ai_sdk/models/partdoneevent.py +1 -1
- orq_ai_sdk/models/post_v2_router_ocrop.py +408 -0
- orq_ai_sdk/models/publiccontact.py +9 -3
- orq_ai_sdk/models/publicidentity.py +62 -0
- orq_ai_sdk/models/reasoningpart.py +1 -1
- orq_ai_sdk/models/responsedoneevent.py +14 -11
- orq_ai_sdk/models/retrieveagentrequestop.py +382 -0
- orq_ai_sdk/models/retrievedatapointop.py +4 -4
- orq_ai_sdk/models/retrievedatasetop.py +1 -1
- orq_ai_sdk/models/retrievedatasourceop.py +1 -1
- orq_ai_sdk/models/retrieveidentityop.py +1 -1
- orq_ai_sdk/models/retrievememoryop.py +4 -2
- orq_ai_sdk/models/retrievetoolop.py +6 -6
- orq_ai_sdk/models/runagentop.py +379 -9
- orq_ai_sdk/models/streamrunagentop.py +385 -9
- orq_ai_sdk/models/updateagentop.py +770 -12
- orq_ai_sdk/models/updateconversationop.py +1 -1
- orq_ai_sdk/models/updatedatapointop.py +4 -4
- orq_ai_sdk/models/updatedatasetop.py +1 -1
- orq_ai_sdk/models/updatedatasourceop.py +1 -1
- orq_ai_sdk/models/updateevalop.py +14 -14
- orq_ai_sdk/models/updateidentityop.py +1 -1
- orq_ai_sdk/models/updatememoryop.py +4 -2
- orq_ai_sdk/models/updatepromptop.py +375 -6
- orq_ai_sdk/models/updatetoolop.py +7 -7
- orq_ai_sdk/moderations.py +218 -0
- orq_ai_sdk/orq_completions.py +666 -0
- orq_ai_sdk/orq_responses.py +398 -0
- orq_ai_sdk/rerank.py +330 -0
- orq_ai_sdk/router.py +89 -641
- orq_ai_sdk/speech.py +333 -0
- orq_ai_sdk/transcriptions.py +416 -0
- orq_ai_sdk/translations.py +384 -0
- orq_ai_sdk/variations.py +364 -0
- orq_ai_sdk-4.2.12.dist-info/METADATA +888 -0
- {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/RECORD +98 -75
- {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/WHEEL +1 -1
- orq_ai_sdk/models/deletecontactop.py +0 -44
- orq_ai_sdk/models/listcontactsop.py +0 -265
- orq_ai_sdk/models/retrievecontactop.py +0 -142
- orq_ai_sdk/models/updatecontactop.py +0 -233
- orq_ai_sdk-4.2.0rc48.dist-info/METADATA +0 -788
- {orq_ai_sdk-4.2.0rc48.dist-info → orq_ai_sdk-4.2.12.dist-info}/top_level.txt +0 -0
|
@@ -63,8 +63,9 @@ class CreateMemoryResponseBodyTypedDict(TypedDict):
|
|
|
63
63
|
r"""Memory successfully created."""
|
|
64
64
|
|
|
65
65
|
id: str
|
|
66
|
+
r"""Unique identifier for the memory. This is automatically generated by the system."""
|
|
66
67
|
entity_id: str
|
|
67
|
-
r"""This
|
|
68
|
+
r"""Customer provided entity ID for the memory. This is used to link the memory to a specific user/company/session/etc. Has to be unique within the memory store."""
|
|
68
69
|
created: str
|
|
69
70
|
updated: str
|
|
70
71
|
store_id: str
|
|
@@ -79,6 +80,7 @@ class CreateMemoryResponseBody(BaseModel):
|
|
|
79
80
|
r"""Memory successfully created."""
|
|
80
81
|
|
|
81
82
|
id: Annotated[str, pydantic.Field(alias="_id")]
|
|
83
|
+
r"""Unique identifier for the memory. This is automatically generated by the system."""
|
|
82
84
|
|
|
83
85
|
entity_id: Annotated[
|
|
84
86
|
str,
|
|
@@ -86,7 +88,7 @@ class CreateMemoryResponseBody(BaseModel):
|
|
|
86
88
|
deprecated="warning: ** DEPRECATED ** - This will be removed in a future release, please migrate away from it as soon as possible."
|
|
87
89
|
),
|
|
88
90
|
]
|
|
89
|
-
r"""This
|
|
91
|
+
r"""Customer provided entity ID for the memory. This is used to link the memory to a specific user/company/session/etc. Has to be unique within the memory store."""
|
|
90
92
|
|
|
91
93
|
created: str
|
|
92
94
|
|
|
@@ -0,0 +1,521 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
import httpx
|
|
6
|
+
from orq_ai_sdk.models import OrqError
|
|
7
|
+
from orq_ai_sdk.types import BaseModel, Nullable, UNSET_SENTINEL
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic import model_serializer
|
|
10
|
+
from typing import List, Optional, Union
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
InputTypedDict = TypeAliasType("InputTypedDict", Union[str, List[str]])
|
|
15
|
+
r"""Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models."""
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
Input = TypeAliasType("Input", Union[str, List[str]])
|
|
19
|
+
r"""Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models."""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class CreateModerationRequestBodyTypedDict(TypedDict):
|
|
23
|
+
r"""Classifies if text violates content policy"""
|
|
24
|
+
|
|
25
|
+
input: InputTypedDict
|
|
26
|
+
r"""Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models."""
|
|
27
|
+
model: str
|
|
28
|
+
r"""The content moderation model you would like to use. Defaults to omni-moderation-latest"""
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class CreateModerationRequestBody(BaseModel):
|
|
32
|
+
r"""Classifies if text violates content policy"""
|
|
33
|
+
|
|
34
|
+
input: Input
|
|
35
|
+
r"""Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models."""
|
|
36
|
+
|
|
37
|
+
model: str
|
|
38
|
+
r"""The content moderation model you would like to use. Defaults to omni-moderation-latest"""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class CreateModerationErrorTypedDict(TypedDict):
|
|
42
|
+
message: str
|
|
43
|
+
type: str
|
|
44
|
+
param: Nullable[str]
|
|
45
|
+
code: str
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class CreateModerationError(BaseModel):
|
|
49
|
+
message: str
|
|
50
|
+
|
|
51
|
+
type: str
|
|
52
|
+
|
|
53
|
+
param: Nullable[str]
|
|
54
|
+
|
|
55
|
+
code: str
|
|
56
|
+
|
|
57
|
+
@model_serializer(mode="wrap")
|
|
58
|
+
def serialize_model(self, handler):
|
|
59
|
+
serialized = handler(self)
|
|
60
|
+
m = {}
|
|
61
|
+
|
|
62
|
+
for n, f in type(self).model_fields.items():
|
|
63
|
+
k = f.alias or n
|
|
64
|
+
val = serialized.get(k)
|
|
65
|
+
|
|
66
|
+
if val != UNSET_SENTINEL:
|
|
67
|
+
m[k] = val
|
|
68
|
+
|
|
69
|
+
return m
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class CreateModerationRouterModerationsResponseBodyData(BaseModel):
|
|
73
|
+
error: CreateModerationError
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass(unsafe_hash=True)
|
|
77
|
+
class CreateModerationRouterModerationsResponseBody(OrqError):
|
|
78
|
+
r"""Returns validation error"""
|
|
79
|
+
|
|
80
|
+
data: CreateModerationRouterModerationsResponseBodyData = field(hash=False)
|
|
81
|
+
|
|
82
|
+
def __init__(
|
|
83
|
+
self,
|
|
84
|
+
data: CreateModerationRouterModerationsResponseBodyData,
|
|
85
|
+
raw_response: httpx.Response,
|
|
86
|
+
body: Optional[str] = None,
|
|
87
|
+
):
|
|
88
|
+
fallback = body or raw_response.text
|
|
89
|
+
message = str(data.error.message) or fallback
|
|
90
|
+
super().__init__(message, raw_response, body)
|
|
91
|
+
object.__setattr__(self, "data", data)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class ResultsCategoriesTypedDict(TypedDict):
|
|
95
|
+
r"""A list of the categories, and whether they are flagged or not"""
|
|
96
|
+
|
|
97
|
+
sexual: bool
|
|
98
|
+
r"""Sexual content detected"""
|
|
99
|
+
hate_and_discrimination: bool
|
|
100
|
+
r"""Hate and discrimination content detected"""
|
|
101
|
+
violence_and_threats: bool
|
|
102
|
+
r"""Violence and threats content detected"""
|
|
103
|
+
dangerous_and_criminal_content: bool
|
|
104
|
+
r"""Dangerous and criminal content detected"""
|
|
105
|
+
selfharm: bool
|
|
106
|
+
r"""Self-harm content detected"""
|
|
107
|
+
health: bool
|
|
108
|
+
r"""Unqualified health advice detected"""
|
|
109
|
+
financial: bool
|
|
110
|
+
r"""Unqualified financial advice detected"""
|
|
111
|
+
law: bool
|
|
112
|
+
r"""Unqualified legal advice detected"""
|
|
113
|
+
pii: bool
|
|
114
|
+
r"""Personally identifiable information detected"""
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class ResultsCategories(BaseModel):
|
|
118
|
+
r"""A list of the categories, and whether they are flagged or not"""
|
|
119
|
+
|
|
120
|
+
sexual: bool
|
|
121
|
+
r"""Sexual content detected"""
|
|
122
|
+
|
|
123
|
+
hate_and_discrimination: bool
|
|
124
|
+
r"""Hate and discrimination content detected"""
|
|
125
|
+
|
|
126
|
+
violence_and_threats: bool
|
|
127
|
+
r"""Violence and threats content detected"""
|
|
128
|
+
|
|
129
|
+
dangerous_and_criminal_content: bool
|
|
130
|
+
r"""Dangerous and criminal content detected"""
|
|
131
|
+
|
|
132
|
+
selfharm: bool
|
|
133
|
+
r"""Self-harm content detected"""
|
|
134
|
+
|
|
135
|
+
health: bool
|
|
136
|
+
r"""Unqualified health advice detected"""
|
|
137
|
+
|
|
138
|
+
financial: bool
|
|
139
|
+
r"""Unqualified financial advice detected"""
|
|
140
|
+
|
|
141
|
+
law: bool
|
|
142
|
+
r"""Unqualified legal advice detected"""
|
|
143
|
+
|
|
144
|
+
pii: bool
|
|
145
|
+
r"""Personally identifiable information detected"""
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class ResultsCategoryScoresTypedDict(TypedDict):
|
|
149
|
+
r"""A list of the categories along with their scores as predicted by model"""
|
|
150
|
+
|
|
151
|
+
sexual: float
|
|
152
|
+
r"""The score for sexual content"""
|
|
153
|
+
hate_and_discrimination: float
|
|
154
|
+
r"""The score for hate and discrimination content"""
|
|
155
|
+
violence_and_threats: float
|
|
156
|
+
r"""The score for violence and threats content"""
|
|
157
|
+
dangerous_and_criminal_content: float
|
|
158
|
+
r"""The score for dangerous and criminal content"""
|
|
159
|
+
selfharm: float
|
|
160
|
+
r"""The score for self-harm content"""
|
|
161
|
+
health: float
|
|
162
|
+
r"""The score for unqualified health advice"""
|
|
163
|
+
financial: float
|
|
164
|
+
r"""The score for unqualified financial advice"""
|
|
165
|
+
law: float
|
|
166
|
+
r"""The score for unqualified legal advice"""
|
|
167
|
+
pii: float
|
|
168
|
+
r"""The score for personally identifiable information"""
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
class ResultsCategoryScores(BaseModel):
|
|
172
|
+
r"""A list of the categories along with their scores as predicted by model"""
|
|
173
|
+
|
|
174
|
+
sexual: float
|
|
175
|
+
r"""The score for sexual content"""
|
|
176
|
+
|
|
177
|
+
hate_and_discrimination: float
|
|
178
|
+
r"""The score for hate and discrimination content"""
|
|
179
|
+
|
|
180
|
+
violence_and_threats: float
|
|
181
|
+
r"""The score for violence and threats content"""
|
|
182
|
+
|
|
183
|
+
dangerous_and_criminal_content: float
|
|
184
|
+
r"""The score for dangerous and criminal content"""
|
|
185
|
+
|
|
186
|
+
selfharm: float
|
|
187
|
+
r"""The score for self-harm content"""
|
|
188
|
+
|
|
189
|
+
health: float
|
|
190
|
+
r"""The score for unqualified health advice"""
|
|
191
|
+
|
|
192
|
+
financial: float
|
|
193
|
+
r"""The score for unqualified financial advice"""
|
|
194
|
+
|
|
195
|
+
law: float
|
|
196
|
+
r"""The score for unqualified legal advice"""
|
|
197
|
+
|
|
198
|
+
pii: float
|
|
199
|
+
r"""The score for personally identifiable information"""
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
class Results2TypedDict(TypedDict):
|
|
203
|
+
categories: ResultsCategoriesTypedDict
|
|
204
|
+
r"""A list of the categories, and whether they are flagged or not"""
|
|
205
|
+
category_scores: ResultsCategoryScoresTypedDict
|
|
206
|
+
r"""A list of the categories along with their scores as predicted by model"""
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
class Results2(BaseModel):
|
|
210
|
+
categories: ResultsCategories
|
|
211
|
+
r"""A list of the categories, and whether they are flagged or not"""
|
|
212
|
+
|
|
213
|
+
category_scores: ResultsCategoryScores
|
|
214
|
+
r"""A list of the categories along with their scores as predicted by model"""
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class CategoriesTypedDict(TypedDict):
|
|
218
|
+
r"""A list of the categories, and whether they are flagged or not"""
|
|
219
|
+
|
|
220
|
+
hate: bool
|
|
221
|
+
r"""Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste."""
|
|
222
|
+
hate_threatening: bool
|
|
223
|
+
r"""Hateful content that also includes violence or serious harm towards the targeted group."""
|
|
224
|
+
harassment: bool
|
|
225
|
+
r"""Content that expresses, incites, or promotes harassing language towards any target."""
|
|
226
|
+
harassment_threatening: bool
|
|
227
|
+
r"""Harassment content that also includes violence or serious harm towards any target."""
|
|
228
|
+
illicit: bool
|
|
229
|
+
r"""Content that includes instructions or advice that facilitate the planning or execution of wrongdoing."""
|
|
230
|
+
illicit_violent: bool
|
|
231
|
+
r"""Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence."""
|
|
232
|
+
self_harm: bool
|
|
233
|
+
r"""Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders."""
|
|
234
|
+
self_harm_intent: bool
|
|
235
|
+
r"""Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm."""
|
|
236
|
+
self_harm_instructions: bool
|
|
237
|
+
r"""Content that encourages performing acts of self-harm, or that gives instructions or advice on how to commit such acts."""
|
|
238
|
+
sexual: bool
|
|
239
|
+
r"""Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services."""
|
|
240
|
+
sexual_minors: bool
|
|
241
|
+
r"""Sexual content that includes an individual who is under 18 years old."""
|
|
242
|
+
violence: bool
|
|
243
|
+
r"""Content that depicts death, violence, or physical injury."""
|
|
244
|
+
violence_graphic: bool
|
|
245
|
+
r"""Content that depicts death, violence, or physical injury in graphic detail."""
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
class Categories(BaseModel):
|
|
249
|
+
r"""A list of the categories, and whether they are flagged or not"""
|
|
250
|
+
|
|
251
|
+
hate: bool
|
|
252
|
+
r"""Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste."""
|
|
253
|
+
|
|
254
|
+
hate_threatening: Annotated[bool, pydantic.Field(alias="hate/threatening")]
|
|
255
|
+
r"""Hateful content that also includes violence or serious harm towards the targeted group."""
|
|
256
|
+
|
|
257
|
+
harassment: bool
|
|
258
|
+
r"""Content that expresses, incites, or promotes harassing language towards any target."""
|
|
259
|
+
|
|
260
|
+
harassment_threatening: Annotated[
|
|
261
|
+
bool, pydantic.Field(alias="harassment/threatening")
|
|
262
|
+
]
|
|
263
|
+
r"""Harassment content that also includes violence or serious harm towards any target."""
|
|
264
|
+
|
|
265
|
+
illicit: bool
|
|
266
|
+
r"""Content that includes instructions or advice that facilitate the planning or execution of wrongdoing."""
|
|
267
|
+
|
|
268
|
+
illicit_violent: Annotated[bool, pydantic.Field(alias="illicit/violent")]
|
|
269
|
+
r"""Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence."""
|
|
270
|
+
|
|
271
|
+
self_harm: Annotated[bool, pydantic.Field(alias="self-harm")]
|
|
272
|
+
r"""Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders."""
|
|
273
|
+
|
|
274
|
+
self_harm_intent: Annotated[bool, pydantic.Field(alias="self-harm/intent")]
|
|
275
|
+
r"""Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm."""
|
|
276
|
+
|
|
277
|
+
self_harm_instructions: Annotated[
|
|
278
|
+
bool, pydantic.Field(alias="self-harm/instructions")
|
|
279
|
+
]
|
|
280
|
+
r"""Content that encourages performing acts of self-harm, or that gives instructions or advice on how to commit such acts."""
|
|
281
|
+
|
|
282
|
+
sexual: bool
|
|
283
|
+
r"""Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services."""
|
|
284
|
+
|
|
285
|
+
sexual_minors: Annotated[bool, pydantic.Field(alias="sexual/minors")]
|
|
286
|
+
r"""Sexual content that includes an individual who is under 18 years old."""
|
|
287
|
+
|
|
288
|
+
violence: bool
|
|
289
|
+
r"""Content that depicts death, violence, or physical injury."""
|
|
290
|
+
|
|
291
|
+
violence_graphic: Annotated[bool, pydantic.Field(alias="violence/graphic")]
|
|
292
|
+
r"""Content that depicts death, violence, or physical injury in graphic detail."""
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
class CategoryScoresTypedDict(TypedDict):
|
|
296
|
+
r"""A list of the categories along with their scores as predicted by model"""
|
|
297
|
+
|
|
298
|
+
hate: float
|
|
299
|
+
r"""The score for the category hate"""
|
|
300
|
+
hate_threatening: float
|
|
301
|
+
r"""The score for the category hate/threatening"""
|
|
302
|
+
harassment: float
|
|
303
|
+
r"""The score for the category harassment"""
|
|
304
|
+
harassment_threatening: float
|
|
305
|
+
r"""The score for the category harassment/threatening"""
|
|
306
|
+
illicit: float
|
|
307
|
+
r"""The score for the category illicit"""
|
|
308
|
+
illicit_violent: float
|
|
309
|
+
r"""The score for the category illicit/violent"""
|
|
310
|
+
self_harm: float
|
|
311
|
+
r"""The score for the category self-harm"""
|
|
312
|
+
self_harm_intent: float
|
|
313
|
+
r"""The score for the category self-harm/intent"""
|
|
314
|
+
self_harm_instructions: float
|
|
315
|
+
r"""The score for the category self-harm/instructions"""
|
|
316
|
+
sexual: float
|
|
317
|
+
r"""The score for the category sexual"""
|
|
318
|
+
sexual_minors: float
|
|
319
|
+
r"""The score for the category sexual/minors"""
|
|
320
|
+
violence: float
|
|
321
|
+
r"""The score for the category violence"""
|
|
322
|
+
violence_graphic: float
|
|
323
|
+
r"""The score for the category violence/graphic"""
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
class CategoryScores(BaseModel):
|
|
327
|
+
r"""A list of the categories along with their scores as predicted by model"""
|
|
328
|
+
|
|
329
|
+
hate: float
|
|
330
|
+
r"""The score for the category hate"""
|
|
331
|
+
|
|
332
|
+
hate_threatening: Annotated[float, pydantic.Field(alias="hate/threatening")]
|
|
333
|
+
r"""The score for the category hate/threatening"""
|
|
334
|
+
|
|
335
|
+
harassment: float
|
|
336
|
+
r"""The score for the category harassment"""
|
|
337
|
+
|
|
338
|
+
harassment_threatening: Annotated[
|
|
339
|
+
float, pydantic.Field(alias="harassment/threatening")
|
|
340
|
+
]
|
|
341
|
+
r"""The score for the category harassment/threatening"""
|
|
342
|
+
|
|
343
|
+
illicit: float
|
|
344
|
+
r"""The score for the category illicit"""
|
|
345
|
+
|
|
346
|
+
illicit_violent: Annotated[float, pydantic.Field(alias="illicit/violent")]
|
|
347
|
+
r"""The score for the category illicit/violent"""
|
|
348
|
+
|
|
349
|
+
self_harm: Annotated[float, pydantic.Field(alias="self-harm")]
|
|
350
|
+
r"""The score for the category self-harm"""
|
|
351
|
+
|
|
352
|
+
self_harm_intent: Annotated[float, pydantic.Field(alias="self-harm/intent")]
|
|
353
|
+
r"""The score for the category self-harm/intent"""
|
|
354
|
+
|
|
355
|
+
self_harm_instructions: Annotated[
|
|
356
|
+
float, pydantic.Field(alias="self-harm/instructions")
|
|
357
|
+
]
|
|
358
|
+
r"""The score for the category self-harm/instructions"""
|
|
359
|
+
|
|
360
|
+
sexual: float
|
|
361
|
+
r"""The score for the category sexual"""
|
|
362
|
+
|
|
363
|
+
sexual_minors: Annotated[float, pydantic.Field(alias="sexual/minors")]
|
|
364
|
+
r"""The score for the category sexual/minors"""
|
|
365
|
+
|
|
366
|
+
violence: float
|
|
367
|
+
r"""The score for the category violence"""
|
|
368
|
+
|
|
369
|
+
violence_graphic: Annotated[float, pydantic.Field(alias="violence/graphic")]
|
|
370
|
+
r"""The score for the category violence/graphic"""
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
class CategoryAppliedInputTypesTypedDict(TypedDict):
|
|
374
|
+
r"""A list of the categories along with the input type(s) that the score applies to"""
|
|
375
|
+
|
|
376
|
+
hate: List[str]
|
|
377
|
+
r"""The applied input type(s) for the category hate"""
|
|
378
|
+
hate_threatening: List[str]
|
|
379
|
+
r"""The applied input type(s) for the category hate/threatening"""
|
|
380
|
+
harassment: List[str]
|
|
381
|
+
r"""The applied input type(s) for the category harassment"""
|
|
382
|
+
harassment_threatening: List[str]
|
|
383
|
+
r"""The applied input type(s) for the category harassment/threatening"""
|
|
384
|
+
illicit: List[str]
|
|
385
|
+
r"""The applied input type(s) for the category illicit"""
|
|
386
|
+
illicit_violent: List[str]
|
|
387
|
+
r"""The applied input type(s) for the category illicit/violent"""
|
|
388
|
+
self_harm: List[str]
|
|
389
|
+
r"""The applied input type(s) for the category self-harm"""
|
|
390
|
+
self_harm_intent: List[str]
|
|
391
|
+
r"""The applied input type(s) for the category self-harm/intent"""
|
|
392
|
+
self_harm_instructions: List[str]
|
|
393
|
+
r"""The applied input type(s) for the category self-harm/instructions"""
|
|
394
|
+
sexual: List[str]
|
|
395
|
+
r"""The applied input type(s) for the category sexual"""
|
|
396
|
+
sexual_minors: List[str]
|
|
397
|
+
r"""The applied input type(s) for the category sexual/minors"""
|
|
398
|
+
violence: List[str]
|
|
399
|
+
r"""The applied input type(s) for the category violence"""
|
|
400
|
+
violence_graphic: List[str]
|
|
401
|
+
r"""The applied input type(s) for the category violence/graphic"""
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
class CategoryAppliedInputTypes(BaseModel):
|
|
405
|
+
r"""A list of the categories along with the input type(s) that the score applies to"""
|
|
406
|
+
|
|
407
|
+
hate: List[str]
|
|
408
|
+
r"""The applied input type(s) for the category hate"""
|
|
409
|
+
|
|
410
|
+
hate_threatening: Annotated[List[str], pydantic.Field(alias="hate/threatening")]
|
|
411
|
+
r"""The applied input type(s) for the category hate/threatening"""
|
|
412
|
+
|
|
413
|
+
harassment: List[str]
|
|
414
|
+
r"""The applied input type(s) for the category harassment"""
|
|
415
|
+
|
|
416
|
+
harassment_threatening: Annotated[
|
|
417
|
+
List[str], pydantic.Field(alias="harassment/threatening")
|
|
418
|
+
]
|
|
419
|
+
r"""The applied input type(s) for the category harassment/threatening"""
|
|
420
|
+
|
|
421
|
+
illicit: List[str]
|
|
422
|
+
r"""The applied input type(s) for the category illicit"""
|
|
423
|
+
|
|
424
|
+
illicit_violent: Annotated[List[str], pydantic.Field(alias="illicit/violent")]
|
|
425
|
+
r"""The applied input type(s) for the category illicit/violent"""
|
|
426
|
+
|
|
427
|
+
self_harm: Annotated[List[str], pydantic.Field(alias="self-harm")]
|
|
428
|
+
r"""The applied input type(s) for the category self-harm"""
|
|
429
|
+
|
|
430
|
+
self_harm_intent: Annotated[List[str], pydantic.Field(alias="self-harm/intent")]
|
|
431
|
+
r"""The applied input type(s) for the category self-harm/intent"""
|
|
432
|
+
|
|
433
|
+
self_harm_instructions: Annotated[
|
|
434
|
+
List[str], pydantic.Field(alias="self-harm/instructions")
|
|
435
|
+
]
|
|
436
|
+
r"""The applied input type(s) for the category self-harm/instructions"""
|
|
437
|
+
|
|
438
|
+
sexual: List[str]
|
|
439
|
+
r"""The applied input type(s) for the category sexual"""
|
|
440
|
+
|
|
441
|
+
sexual_minors: Annotated[List[str], pydantic.Field(alias="sexual/minors")]
|
|
442
|
+
r"""The applied input type(s) for the category sexual/minors"""
|
|
443
|
+
|
|
444
|
+
violence: List[str]
|
|
445
|
+
r"""The applied input type(s) for the category violence"""
|
|
446
|
+
|
|
447
|
+
violence_graphic: Annotated[List[str], pydantic.Field(alias="violence/graphic")]
|
|
448
|
+
r"""The applied input type(s) for the category violence/graphic"""
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
class Results1TypedDict(TypedDict):
|
|
452
|
+
flagged: bool
|
|
453
|
+
r"""Whether any of the categories are flagged"""
|
|
454
|
+
categories: CategoriesTypedDict
|
|
455
|
+
r"""A list of the categories, and whether they are flagged or not"""
|
|
456
|
+
category_scores: CategoryScoresTypedDict
|
|
457
|
+
r"""A list of the categories along with their scores as predicted by model"""
|
|
458
|
+
category_applied_input_types: NotRequired[CategoryAppliedInputTypesTypedDict]
|
|
459
|
+
r"""A list of the categories along with the input type(s) that the score applies to"""
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
class Results1(BaseModel):
|
|
463
|
+
flagged: bool
|
|
464
|
+
r"""Whether any of the categories are flagged"""
|
|
465
|
+
|
|
466
|
+
categories: Categories
|
|
467
|
+
r"""A list of the categories, and whether they are flagged or not"""
|
|
468
|
+
|
|
469
|
+
category_scores: CategoryScores
|
|
470
|
+
r"""A list of the categories along with their scores as predicted by model"""
|
|
471
|
+
|
|
472
|
+
category_applied_input_types: Optional[CategoryAppliedInputTypes] = None
|
|
473
|
+
r"""A list of the categories along with the input type(s) that the score applies to"""
|
|
474
|
+
|
|
475
|
+
@model_serializer(mode="wrap")
|
|
476
|
+
def serialize_model(self, handler):
|
|
477
|
+
optional_fields = set(["category_applied_input_types"])
|
|
478
|
+
serialized = handler(self)
|
|
479
|
+
m = {}
|
|
480
|
+
|
|
481
|
+
for n, f in type(self).model_fields.items():
|
|
482
|
+
k = f.alias or n
|
|
483
|
+
val = serialized.get(k)
|
|
484
|
+
|
|
485
|
+
if val != UNSET_SENTINEL:
|
|
486
|
+
if val is not None or k not in optional_fields:
|
|
487
|
+
m[k] = val
|
|
488
|
+
|
|
489
|
+
return m
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
ResultsTypedDict = TypeAliasType(
|
|
493
|
+
"ResultsTypedDict", Union[Results2TypedDict, Results1TypedDict]
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
Results = TypeAliasType("Results", Union[Results2, Results1])
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
class CreateModerationResponseBodyTypedDict(TypedDict):
|
|
501
|
+
r"""Returns moderation classification results"""
|
|
502
|
+
|
|
503
|
+
id: str
|
|
504
|
+
r"""The unique identifier for the moderation request"""
|
|
505
|
+
model: str
|
|
506
|
+
r"""The model used to generate the moderation results"""
|
|
507
|
+
results: List[ResultsTypedDict]
|
|
508
|
+
r"""A list of moderation objects"""
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
class CreateModerationResponseBody(BaseModel):
|
|
512
|
+
r"""Returns moderation classification results"""
|
|
513
|
+
|
|
514
|
+
id: str
|
|
515
|
+
r"""The unique identifier for the moderation request"""
|
|
516
|
+
|
|
517
|
+
model: str
|
|
518
|
+
r"""The model used to generate the moderation results"""
|
|
519
|
+
|
|
520
|
+
results: List[Results]
|
|
521
|
+
r"""A list of moderation objects"""
|