hume 0.6.0 → 0.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mock/definition/api.yml +12 -0
- package/.mock/definition/custom-models/__package__.yml +1392 -0
- package/.mock/definition/custom-models/datasets.yml +406 -0
- package/.mock/definition/custom-models/files.yml +263 -0
- package/.mock/definition/custom-models/jobs.yml +44 -0
- package/.mock/definition/custom-models/models.yml +303 -0
- package/.mock/definition/empathic-voice/__package__.yml +896 -0
- package/.mock/definition/empathic-voice/chat.yml +59 -0
- package/.mock/definition/empathic-voice/chatGroups.yml +80 -0
- package/.mock/definition/empathic-voice/chats.yml +86 -0
- package/.mock/definition/empathic-voice/configs.yml +413 -0
- package/.mock/definition/empathic-voice/prompts.yml +265 -0
- package/.mock/definition/empathic-voice/tools.yml +398 -0
- package/.mock/definition/expression-measurement/__package__.yml +1122 -0
- package/.mock/definition/expression-measurement/batch.yml +352 -0
- package/.mock/definition/expression-measurement/stream.yml +362 -0
- package/.mock/fern.config.json +4 -0
- package/Client.d.ts +3 -3
- package/Client.js +6 -6
- package/api/resources/customModels/resources/datasets/client/Client.js +9 -9
- package/api/resources/customModels/resources/files/client/Client.js +7 -7
- package/api/resources/customModels/resources/jobs/client/Client.js +2 -2
- package/api/resources/customModels/resources/models/client/Client.js +6 -6
- package/api/resources/empathicVoice/client/Client.d.ts +3 -0
- package/api/resources/empathicVoice/client/Client.js +5 -0
- package/api/resources/empathicVoice/client/StreamSocket.d.ts +9 -1
- package/api/resources/empathicVoice/client/StreamSocket.js +17 -1
- package/api/resources/empathicVoice/resources/chat/types/SubscribeEvent.d.ts +1 -1
- package/api/resources/empathicVoice/resources/chatGroups/client/Client.d.ts +42 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/Client.js +178 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/index.js +17 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupEventsRequest.d.ts +21 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupEventsRequest.js +5 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupsRequest.d.ts +17 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupsRequest.js +5 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/requests/index.d.ts +2 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/requests/index.js +2 -0
- package/api/resources/empathicVoice/resources/chatGroups/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/chatGroups/index.js +17 -0
- package/api/resources/empathicVoice/resources/chats/client/Client.js +2 -2
- package/api/resources/empathicVoice/resources/configs/client/Client.js +9 -9
- package/api/resources/empathicVoice/resources/index.d.ts +2 -0
- package/api/resources/empathicVoice/resources/index.js +3 -1
- package/api/resources/empathicVoice/resources/prompts/client/Client.js +9 -9
- package/api/resources/empathicVoice/resources/tools/client/Client.js +9 -9
- package/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
- package/api/resources/empathicVoice/types/ChatMetadata.d.ts +15 -0
- package/api/resources/empathicVoice/types/ChatMetadata.js +5 -0
- package/api/resources/empathicVoice/types/ReturnChat.d.ts +2 -0
- package/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +2 -0
- package/api/resources/empathicVoice/types/ReturnChatGroup.d.ts +19 -0
- package/api/resources/empathicVoice/types/ReturnChatGroup.js +5 -0
- package/api/resources/empathicVoice/types/ReturnChatGroupPagedChats.d.ts +24 -0
- package/api/resources/empathicVoice/types/ReturnChatGroupPagedChats.js +5 -0
- package/api/resources/empathicVoice/types/ReturnChatGroupPagedEvents.d.ts +19 -0
- package/api/resources/empathicVoice/types/ReturnChatGroupPagedEvents.js +5 -0
- package/api/resources/empathicVoice/types/ReturnChatPagedEvents.d.ts +2 -0
- package/api/resources/empathicVoice/types/ReturnPagedChatGroups.d.ts +15 -0
- package/api/resources/empathicVoice/types/ReturnPagedChatGroups.js +5 -0
- package/api/resources/empathicVoice/types/index.d.ts +5 -0
- package/api/resources/empathicVoice/types/index.js +5 -0
- package/api/resources/expressionMeasurement/resources/batch/client/Client.js +6 -6
- package/api/resources/index.d.ts +1 -1
- package/api/resources/index.js +2 -2
- package/core/form-data-utils/FormDataWrapper.js +2 -1
- package/dist/Client.d.ts +3 -3
- package/dist/Client.js +6 -6
- package/dist/api/resources/customModels/resources/datasets/client/Client.js +9 -9
- package/dist/api/resources/customModels/resources/files/client/Client.js +7 -7
- package/dist/api/resources/customModels/resources/jobs/client/Client.js +2 -2
- package/dist/api/resources/customModels/resources/models/client/Client.js +6 -6
- package/dist/api/resources/empathicVoice/client/Client.d.ts +3 -0
- package/dist/api/resources/empathicVoice/client/Client.js +5 -0
- package/dist/api/resources/empathicVoice/client/StreamSocket.d.ts +9 -1
- package/dist/api/resources/empathicVoice/client/StreamSocket.js +17 -1
- package/dist/api/resources/empathicVoice/resources/chat/types/SubscribeEvent.d.ts +1 -1
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/Client.d.ts +42 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/Client.js +178 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/index.js +17 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupEventsRequest.d.ts +21 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupEventsRequest.js +5 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupsRequest.d.ts +17 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsListChatGroupsRequest.js +5 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/requests/index.d.ts +2 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/requests/index.js +2 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/index.js +17 -0
- package/dist/api/resources/empathicVoice/resources/chats/client/Client.js +2 -2
- package/dist/api/resources/empathicVoice/resources/configs/client/Client.js +9 -9
- package/dist/api/resources/empathicVoice/resources/index.d.ts +2 -0
- package/dist/api/resources/empathicVoice/resources/index.js +3 -1
- package/dist/api/resources/empathicVoice/resources/prompts/client/Client.js +9 -9
- package/dist/api/resources/empathicVoice/resources/tools/client/Client.js +9 -9
- package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +15 -0
- package/dist/api/resources/empathicVoice/types/ChatMetadata.js +5 -0
- package/dist/api/resources/empathicVoice/types/ReturnChat.d.ts +2 -0
- package/dist/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +2 -0
- package/dist/api/resources/empathicVoice/types/ReturnChatGroup.d.ts +19 -0
- package/dist/api/resources/empathicVoice/types/ReturnChatGroup.js +5 -0
- package/dist/api/resources/empathicVoice/types/ReturnChatGroupPagedChats.d.ts +24 -0
- package/dist/api/resources/empathicVoice/types/ReturnChatGroupPagedChats.js +5 -0
- package/dist/api/resources/empathicVoice/types/ReturnChatGroupPagedEvents.d.ts +19 -0
- package/dist/api/resources/empathicVoice/types/ReturnChatGroupPagedEvents.js +5 -0
- package/dist/api/resources/empathicVoice/types/ReturnChatPagedEvents.d.ts +2 -0
- package/dist/api/resources/empathicVoice/types/ReturnPagedChatGroups.d.ts +15 -0
- package/dist/api/resources/empathicVoice/types/ReturnPagedChatGroups.js +5 -0
- package/dist/api/resources/empathicVoice/types/index.d.ts +5 -0
- package/dist/api/resources/empathicVoice/types/index.js +5 -0
- package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.js +6 -6
- package/dist/api/resources/index.d.ts +1 -1
- package/dist/api/resources/index.js +2 -2
- package/dist/core/form-data-utils/FormDataWrapper.js +2 -1
- package/dist/serialization/resources/empathicVoice/resources/chat/types/SubscribeEvent.d.ts +2 -1
- package/dist/serialization/resources/empathicVoice/resources/chat/types/SubscribeEvent.js +2 -0
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +15 -0
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.js +36 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChat.d.ts +1 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChat.js +1 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatEvent.d.ts +1 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatEvent.js +1 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatGroup.d.ts +17 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatGroup.js +38 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatGroupPagedChats.d.ts +20 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatGroupPagedChats.js +41 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatGroupPagedEvents.d.ts +17 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatGroupPagedEvents.js +38 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatPagedEvents.d.ts +1 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatPagedEvents.js +1 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnPagedChatGroups.d.ts +15 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnPagedChatGroups.js +36 -0
- package/dist/serialization/resources/empathicVoice/types/index.d.ts +5 -0
- package/dist/serialization/resources/empathicVoice/types/index.js +5 -0
- package/dist/serialization/resources/index.d.ts +1 -1
- package/dist/serialization/resources/index.js +2 -2
- package/dist/wrapper/empathicVoice/chat/ChatClient.d.ts +2 -0
- package/dist/wrapper/empathicVoice/chat/ChatClient.js +3 -0
- package/package.json +1 -1
- package/serialization/resources/empathicVoice/resources/chat/types/SubscribeEvent.d.ts +2 -1
- package/serialization/resources/empathicVoice/resources/chat/types/SubscribeEvent.js +2 -0
- package/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +15 -0
- package/serialization/resources/empathicVoice/types/ChatMetadata.js +36 -0
- package/serialization/resources/empathicVoice/types/ReturnChat.d.ts +1 -0
- package/serialization/resources/empathicVoice/types/ReturnChat.js +1 -0
- package/serialization/resources/empathicVoice/types/ReturnChatEvent.d.ts +1 -0
- package/serialization/resources/empathicVoice/types/ReturnChatEvent.js +1 -0
- package/serialization/resources/empathicVoice/types/ReturnChatGroup.d.ts +17 -0
- package/serialization/resources/empathicVoice/types/ReturnChatGroup.js +38 -0
- package/serialization/resources/empathicVoice/types/ReturnChatGroupPagedChats.d.ts +20 -0
- package/serialization/resources/empathicVoice/types/ReturnChatGroupPagedChats.js +41 -0
- package/serialization/resources/empathicVoice/types/ReturnChatGroupPagedEvents.d.ts +17 -0
- package/serialization/resources/empathicVoice/types/ReturnChatGroupPagedEvents.js +38 -0
- package/serialization/resources/empathicVoice/types/ReturnChatPagedEvents.d.ts +1 -0
- package/serialization/resources/empathicVoice/types/ReturnChatPagedEvents.js +1 -0
- package/serialization/resources/empathicVoice/types/ReturnPagedChatGroups.d.ts +15 -0
- package/serialization/resources/empathicVoice/types/ReturnPagedChatGroups.js +36 -0
- package/serialization/resources/empathicVoice/types/index.d.ts +5 -0
- package/serialization/resources/empathicVoice/types/index.js +5 -0
- package/serialization/resources/index.d.ts +1 -1
- package/serialization/resources/index.js +2 -2
- package/wrapper/empathicVoice/chat/ChatClient.d.ts +2 -0
- package/wrapper/empathicVoice/chat/ChatClient.js +3 -0
|
@@ -0,0 +1,1392 @@
|
|
|
1
|
+
types:
|
|
2
|
+
AttributeFilter:
|
|
3
|
+
docs: List of Attributes
|
|
4
|
+
properties:
|
|
5
|
+
name:
|
|
6
|
+
type: string
|
|
7
|
+
docs: Attribute name (key)
|
|
8
|
+
value:
|
|
9
|
+
type: string
|
|
10
|
+
docs: Attribute description (value)
|
|
11
|
+
FileInput:
|
|
12
|
+
docs: File details
|
|
13
|
+
properties:
|
|
14
|
+
name:
|
|
15
|
+
type: string
|
|
16
|
+
docs: File name
|
|
17
|
+
uri:
|
|
18
|
+
type: optional<string>
|
|
19
|
+
docs: File URI
|
|
20
|
+
hume_storage:
|
|
21
|
+
type: boolean
|
|
22
|
+
docs: Flag which denotes whether the file is stored with Hume
|
|
23
|
+
data_type:
|
|
24
|
+
type: string
|
|
25
|
+
docs: >-
|
|
26
|
+
File type: video, audio, video_no_audio, image, text, or
|
|
27
|
+
mediapipe_facemesh
|
|
28
|
+
FileWithAttributesInput:
|
|
29
|
+
properties:
|
|
30
|
+
file: FileInput
|
|
31
|
+
attributes:
|
|
32
|
+
type: optional<list<AttributeFilter>>
|
|
33
|
+
docs: List of Attributes
|
|
34
|
+
AuthorizedFile:
|
|
35
|
+
docs: File details
|
|
36
|
+
properties:
|
|
37
|
+
id:
|
|
38
|
+
type: string
|
|
39
|
+
docs: Hume-generated File ID
|
|
40
|
+
name:
|
|
41
|
+
type: optional<string>
|
|
42
|
+
docs: File name
|
|
43
|
+
uri:
|
|
44
|
+
type: optional<string>
|
|
45
|
+
docs: File URI
|
|
46
|
+
upload_uri:
|
|
47
|
+
type: optional<string>
|
|
48
|
+
docs: File upload URI
|
|
49
|
+
thumbnail_uri:
|
|
50
|
+
type: optional<string>
|
|
51
|
+
docs: File thumbnail URI
|
|
52
|
+
user_id:
|
|
53
|
+
type: string
|
|
54
|
+
docs: Hume-generated User ID
|
|
55
|
+
data_type:
|
|
56
|
+
type: string
|
|
57
|
+
docs: >-
|
|
58
|
+
File type: video, audio, video_no_audio, image, text, or
|
|
59
|
+
mediapipe_facemesh
|
|
60
|
+
created_on:
|
|
61
|
+
type: optional<integer>
|
|
62
|
+
docs: Created date and time
|
|
63
|
+
modified_on:
|
|
64
|
+
type: optional<integer>
|
|
65
|
+
docs: Updated date and time
|
|
66
|
+
metadata:
|
|
67
|
+
type: optional<map<string, map<string, unknown>>>
|
|
68
|
+
docs: Additional details as key, value pairs
|
|
69
|
+
hume_storage:
|
|
70
|
+
type: optional<boolean>
|
|
71
|
+
docs: Flag which denotes whether the file is stored with Hume
|
|
72
|
+
hume_storage_upload_timestamp:
|
|
73
|
+
type: optional<integer>
|
|
74
|
+
docs: Timestamp denoting when the file was uploaded to Hume
|
|
75
|
+
is_sanitized:
|
|
76
|
+
type: boolean
|
|
77
|
+
docs: Indicates whether this file has been sanitized for sharing
|
|
78
|
+
is_owned_by_reader:
|
|
79
|
+
type: boolean
|
|
80
|
+
docs: Indicates whether this file is owned by the current file reader
|
|
81
|
+
is_linked_to_publicly_shared:
|
|
82
|
+
type: optional<boolean>
|
|
83
|
+
docs: >-
|
|
84
|
+
Indicates whether this file is linked to a model that is publicly
|
|
85
|
+
shared
|
|
86
|
+
is_linked_to_hume_model:
|
|
87
|
+
type: optional<boolean>
|
|
88
|
+
docs: >-
|
|
89
|
+
Indicates whether this file is linked to a Hume-owned model that is
|
|
90
|
+
publicly shared
|
|
91
|
+
FileWithAttributes:
|
|
92
|
+
properties:
|
|
93
|
+
file: AuthorizedFile
|
|
94
|
+
attributes:
|
|
95
|
+
type: optional<list<AttributeFilter>>
|
|
96
|
+
docs: List of Attributes
|
|
97
|
+
Unit: map<string, unknown>
|
|
98
|
+
DatasetVersionFeatureTypesValue:
|
|
99
|
+
enum:
|
|
100
|
+
- CATEGORICAL
|
|
101
|
+
- NUMERIC
|
|
102
|
+
- TEXT
|
|
103
|
+
- DATETIME
|
|
104
|
+
- UNDEFINED
|
|
105
|
+
docs: Feature types of label mapped to feature type
|
|
106
|
+
DatasetVersion:
|
|
107
|
+
properties:
|
|
108
|
+
id:
|
|
109
|
+
type: string
|
|
110
|
+
docs: Hume-generated Dataset version ID
|
|
111
|
+
user_id:
|
|
112
|
+
type: string
|
|
113
|
+
docs: Hume-generated User ID
|
|
114
|
+
labels_file_uri:
|
|
115
|
+
type: string
|
|
116
|
+
docs: Dataset Labels file URI
|
|
117
|
+
feature_types:
|
|
118
|
+
docs: Feature types of label mapped to feature type
|
|
119
|
+
type: map<string, DatasetVersionFeatureTypesValue>
|
|
120
|
+
dataset_id:
|
|
121
|
+
type: string
|
|
122
|
+
docs: Hume-generated Dataset ID of the parent Dataset
|
|
123
|
+
dataset_version:
|
|
124
|
+
type: integer
|
|
125
|
+
docs: Dataset version number
|
|
126
|
+
created_on:
|
|
127
|
+
type: integer
|
|
128
|
+
docs: Created date and time
|
|
129
|
+
ReturnDataset:
|
|
130
|
+
properties:
|
|
131
|
+
id:
|
|
132
|
+
type: optional<string>
|
|
133
|
+
docs: Hume-generated Dataset ID
|
|
134
|
+
name:
|
|
135
|
+
type: string
|
|
136
|
+
docs: Dataset name
|
|
137
|
+
latest_version: optional<DatasetVersion>
|
|
138
|
+
modified_on:
|
|
139
|
+
type: optional<integer>
|
|
140
|
+
docs: Updated date and time
|
|
141
|
+
metadata:
|
|
142
|
+
type: optional<map<string, map<string, unknown>>>
|
|
143
|
+
docs: Additional details as key, value pairs
|
|
144
|
+
DatasetLabelsFileUriInputFeatureTypesValue:
|
|
145
|
+
enum:
|
|
146
|
+
- CATEGORICAL
|
|
147
|
+
- NUMERIC
|
|
148
|
+
- TEXT
|
|
149
|
+
- DATETIME
|
|
150
|
+
- UNDEFINED
|
|
151
|
+
docs: Feature types as key, value pairs
|
|
152
|
+
DatasetLabelsFileUriInput:
|
|
153
|
+
properties:
|
|
154
|
+
name:
|
|
155
|
+
type: string
|
|
156
|
+
docs: Dataset name
|
|
157
|
+
labels_file_uri:
|
|
158
|
+
type: string
|
|
159
|
+
docs: URI of a Labels File
|
|
160
|
+
feature_types:
|
|
161
|
+
docs: Feature types as key, value pairs
|
|
162
|
+
type: map<string, DatasetLabelsFileUriInputFeatureTypesValue>
|
|
163
|
+
ExternalModel:
|
|
164
|
+
properties:
|
|
165
|
+
id:
|
|
166
|
+
type: string
|
|
167
|
+
docs: Hume-generated Model ID
|
|
168
|
+
name:
|
|
169
|
+
type: string
|
|
170
|
+
docs: Model name
|
|
171
|
+
created_on:
|
|
172
|
+
type: integer
|
|
173
|
+
docs: Created date and time
|
|
174
|
+
modified_on:
|
|
175
|
+
type: integer
|
|
176
|
+
docs: Updated date and time
|
|
177
|
+
total_stars:
|
|
178
|
+
type: integer
|
|
179
|
+
docs: Total stars on this model
|
|
180
|
+
model_is_starred_by_user:
|
|
181
|
+
type: boolean
|
|
182
|
+
docs: Model is starred by this user
|
|
183
|
+
archived:
|
|
184
|
+
type: boolean
|
|
185
|
+
docs: Model is archived.
|
|
186
|
+
latest_version: optional<ExternalModelVersion>
|
|
187
|
+
is_publicly_shared:
|
|
188
|
+
type: boolean
|
|
189
|
+
docs: Model is shared publicly
|
|
190
|
+
ExternalModelVersionFileType:
|
|
191
|
+
enum:
|
|
192
|
+
- video
|
|
193
|
+
- audio
|
|
194
|
+
- video_no_audio
|
|
195
|
+
- image
|
|
196
|
+
- text
|
|
197
|
+
- mediapipe_facemesh
|
|
198
|
+
docs: >-
|
|
199
|
+
File type: video, audio, video_no_audio, image, text, or
|
|
200
|
+
mediapipe_facemesh
|
|
201
|
+
ExternalModelVersion:
|
|
202
|
+
docs: Latest Model version number
|
|
203
|
+
properties:
|
|
204
|
+
id:
|
|
205
|
+
type: string
|
|
206
|
+
docs: Hume-generated Model version ID
|
|
207
|
+
model_id:
|
|
208
|
+
type: string
|
|
209
|
+
docs: Hume-generated Model ID of the parent Model
|
|
210
|
+
user_id:
|
|
211
|
+
type: string
|
|
212
|
+
docs: Hume-generated User ID
|
|
213
|
+
version:
|
|
214
|
+
type: string
|
|
215
|
+
docs: Model version number
|
|
216
|
+
source_uri:
|
|
217
|
+
type: string
|
|
218
|
+
docs: Model version's source file URI
|
|
219
|
+
dataset_version_id:
|
|
220
|
+
type: string
|
|
221
|
+
docs: >-
|
|
222
|
+
Hume-generated Dataset version ID for the Dataset version the Model
|
|
223
|
+
version was trained on
|
|
224
|
+
created_on:
|
|
225
|
+
type: integer
|
|
226
|
+
docs: Created date and time
|
|
227
|
+
metadata:
|
|
228
|
+
type: optional<map<string, map<string, unknown>>>
|
|
229
|
+
docs: Additional details as key, value pairs
|
|
230
|
+
description:
|
|
231
|
+
type: optional<string>
|
|
232
|
+
docs: Model version description
|
|
233
|
+
tags:
|
|
234
|
+
type: optional<list<ExternalModelVersionTag>>
|
|
235
|
+
docs: List of Tags associated with the Model version
|
|
236
|
+
file_type:
|
|
237
|
+
type: optional<ExternalModelVersionFileType>
|
|
238
|
+
docs: >-
|
|
239
|
+
File type: video, audio, video_no_audio, image, text, or
|
|
240
|
+
mediapipe_facemesh
|
|
241
|
+
target_feature:
|
|
242
|
+
type: optional<string>
|
|
243
|
+
docs: Target feature, the feature the model was trained against
|
|
244
|
+
task_type:
|
|
245
|
+
type: optional<string>
|
|
246
|
+
docs: Type of the task used to train
|
|
247
|
+
training_job_id:
|
|
248
|
+
type: optional<string>
|
|
249
|
+
docs: ID of the batch training job
|
|
250
|
+
ExternalModelVersionTag:
|
|
251
|
+
docs: List of Tags associated with the Model version
|
|
252
|
+
properties:
|
|
253
|
+
key:
|
|
254
|
+
type: string
|
|
255
|
+
docs: Tag name (key)
|
|
256
|
+
value:
|
|
257
|
+
type: string
|
|
258
|
+
docs: Tag description (value)
|
|
259
|
+
ModelPage:
|
|
260
|
+
properties:
|
|
261
|
+
content: optional<list<ExternalModel>>
|
|
262
|
+
pageable: optional<PageableObject>
|
|
263
|
+
total: optional<integer>
|
|
264
|
+
last: optional<boolean>
|
|
265
|
+
total_elements: optional<integer>
|
|
266
|
+
total_pages: optional<integer>
|
|
267
|
+
size: optional<integer>
|
|
268
|
+
number: optional<integer>
|
|
269
|
+
sort: optional<SortObject>
|
|
270
|
+
first: optional<boolean>
|
|
271
|
+
number_of_elements: optional<integer>
|
|
272
|
+
empty: optional<boolean>
|
|
273
|
+
PageableObject:
|
|
274
|
+
properties:
|
|
275
|
+
offset: optional<integer>
|
|
276
|
+
sort: optional<SortObject>
|
|
277
|
+
paged: optional<boolean>
|
|
278
|
+
unpaged: optional<boolean>
|
|
279
|
+
page_number: optional<integer>
|
|
280
|
+
page_size: optional<integer>
|
|
281
|
+
SortObject:
|
|
282
|
+
properties:
|
|
283
|
+
empty: optional<boolean>
|
|
284
|
+
sorted: optional<boolean>
|
|
285
|
+
unsorted: optional<boolean>
|
|
286
|
+
JsonObject:
|
|
287
|
+
properties:
|
|
288
|
+
empty: optional<boolean>
|
|
289
|
+
FilePage:
|
|
290
|
+
properties:
|
|
291
|
+
content: optional<list<FileWithAttributes>>
|
|
292
|
+
pageable: optional<PageableObject>
|
|
293
|
+
total: optional<integer>
|
|
294
|
+
last: optional<boolean>
|
|
295
|
+
total_elements: optional<integer>
|
|
296
|
+
total_pages: optional<integer>
|
|
297
|
+
size: optional<integer>
|
|
298
|
+
number: optional<integer>
|
|
299
|
+
sort: optional<SortObject>
|
|
300
|
+
first: optional<boolean>
|
|
301
|
+
number_of_elements: optional<integer>
|
|
302
|
+
empty: optional<boolean>
|
|
303
|
+
DatasetPage:
|
|
304
|
+
properties:
|
|
305
|
+
content: optional<list<ReturnDataset>>
|
|
306
|
+
pageable: optional<PageableObject>
|
|
307
|
+
total: optional<integer>
|
|
308
|
+
last: optional<boolean>
|
|
309
|
+
total_elements: optional<integer>
|
|
310
|
+
total_pages: optional<integer>
|
|
311
|
+
size: optional<integer>
|
|
312
|
+
number: optional<integer>
|
|
313
|
+
sort: optional<SortObject>
|
|
314
|
+
first: optional<boolean>
|
|
315
|
+
number_of_elements: optional<integer>
|
|
316
|
+
empty: optional<boolean>
|
|
317
|
+
DatasetVersionPage:
|
|
318
|
+
properties:
|
|
319
|
+
content: optional<list<DatasetVersion>>
|
|
320
|
+
pageable: optional<PageableObject>
|
|
321
|
+
total: optional<integer>
|
|
322
|
+
last: optional<boolean>
|
|
323
|
+
total_elements: optional<integer>
|
|
324
|
+
total_pages: optional<integer>
|
|
325
|
+
size: optional<integer>
|
|
326
|
+
number: optional<integer>
|
|
327
|
+
sort: optional<SortObject>
|
|
328
|
+
first: optional<boolean>
|
|
329
|
+
number_of_elements: optional<integer>
|
|
330
|
+
empty: optional<boolean>
|
|
331
|
+
DatasetLabels:
|
|
332
|
+
properties:
|
|
333
|
+
id:
|
|
334
|
+
type: string
|
|
335
|
+
docs: Hume-generated Dataset version ID
|
|
336
|
+
user_id:
|
|
337
|
+
type: string
|
|
338
|
+
docs: Hume-generated User ID
|
|
339
|
+
labels_file_uri:
|
|
340
|
+
type: string
|
|
341
|
+
docs: Dataset Labels file URI
|
|
342
|
+
feature_type_json_uri:
|
|
343
|
+
type: string
|
|
344
|
+
docs: Feature types json file URI
|
|
345
|
+
dataset_id:
|
|
346
|
+
type: string
|
|
347
|
+
docs: Hume-generated Dataset ID
|
|
348
|
+
dataset_version:
|
|
349
|
+
type: integer
|
|
350
|
+
docs: Dataset version number
|
|
351
|
+
created_on:
|
|
352
|
+
type: integer
|
|
353
|
+
docs: Created date and time
|
|
354
|
+
is_most_recent_version:
|
|
355
|
+
type: boolean
|
|
356
|
+
docs: Boolean indicating that this is the most recent version
|
|
357
|
+
Alternative: literal<"language_only">
|
|
358
|
+
Bcp47Tag:
|
|
359
|
+
enum:
|
|
360
|
+
- zh
|
|
361
|
+
- da
|
|
362
|
+
- nl
|
|
363
|
+
- en
|
|
364
|
+
- value: en-AU
|
|
365
|
+
name: EnAu
|
|
366
|
+
- value: en-IN
|
|
367
|
+
name: EnIn
|
|
368
|
+
- value: en-NZ
|
|
369
|
+
name: EnNz
|
|
370
|
+
- value: en-GB
|
|
371
|
+
name: EnGb
|
|
372
|
+
- fr
|
|
373
|
+
- value: fr-CA
|
|
374
|
+
name: FrCa
|
|
375
|
+
- de
|
|
376
|
+
- hi
|
|
377
|
+
- value: hi-Latn
|
|
378
|
+
name: HiLatn
|
|
379
|
+
- id
|
|
380
|
+
- it
|
|
381
|
+
- ja
|
|
382
|
+
- ko
|
|
383
|
+
- 'no'
|
|
384
|
+
- pl
|
|
385
|
+
- pt
|
|
386
|
+
- value: pt-BR
|
|
387
|
+
name: PtBr
|
|
388
|
+
- value: pt-PT
|
|
389
|
+
name: PtPt
|
|
390
|
+
- ru
|
|
391
|
+
- es
|
|
392
|
+
- value: es-419
|
|
393
|
+
name: Es419
|
|
394
|
+
- sv
|
|
395
|
+
- ta
|
|
396
|
+
- tr
|
|
397
|
+
- uk
|
|
398
|
+
BoundingBox:
|
|
399
|
+
docs: A bounding box around a face.
|
|
400
|
+
properties:
|
|
401
|
+
x:
|
|
402
|
+
type: double
|
|
403
|
+
docs: x-coordinate of bounding box top left corner.
|
|
404
|
+
'y':
|
|
405
|
+
type: double
|
|
406
|
+
docs: y-coordinate of bounding box top left corner.
|
|
407
|
+
w:
|
|
408
|
+
type: double
|
|
409
|
+
docs: Bounding box width.
|
|
410
|
+
h:
|
|
411
|
+
type: double
|
|
412
|
+
docs: Bounding box height.
|
|
413
|
+
BurstPrediction:
|
|
414
|
+
properties:
|
|
415
|
+
time: TimeInterval
|
|
416
|
+
emotions:
|
|
417
|
+
docs: A high-dimensional embedding in emotion space.
|
|
418
|
+
type: list<EmotionScore>
|
|
419
|
+
descriptions:
|
|
420
|
+
docs: Modality-specific descriptive features and their scores.
|
|
421
|
+
type: list<DescriptionsScore>
|
|
422
|
+
Classification: map<string, unknown>
|
|
423
|
+
CompletedEmbeddingGeneration:
|
|
424
|
+
properties:
|
|
425
|
+
created_timestamp_ms:
|
|
426
|
+
type: integer
|
|
427
|
+
docs: When this job was created (Unix timestamp in milliseconds).
|
|
428
|
+
started_timestamp_ms:
|
|
429
|
+
type: integer
|
|
430
|
+
docs: When this job started (Unix timestamp in milliseconds).
|
|
431
|
+
ended_timestamp_ms:
|
|
432
|
+
type: integer
|
|
433
|
+
docs: When this job ended (Unix timestamp in milliseconds).
|
|
434
|
+
CompletedInference:
|
|
435
|
+
properties:
|
|
436
|
+
created_timestamp_ms:
|
|
437
|
+
type: integer
|
|
438
|
+
docs: When this job was created (Unix timestamp in milliseconds).
|
|
439
|
+
started_timestamp_ms:
|
|
440
|
+
type: integer
|
|
441
|
+
docs: When this job started (Unix timestamp in milliseconds).
|
|
442
|
+
ended_timestamp_ms:
|
|
443
|
+
type: integer
|
|
444
|
+
docs: When this job ended (Unix timestamp in milliseconds).
|
|
445
|
+
num_predictions:
|
|
446
|
+
type: integer
|
|
447
|
+
docs: The number of predictions that were generated by this job.
|
|
448
|
+
num_errors:
|
|
449
|
+
type: integer
|
|
450
|
+
docs: The number of errors that occurred while running this job.
|
|
451
|
+
CompletedTlInference:
|
|
452
|
+
properties:
|
|
453
|
+
created_timestamp_ms:
|
|
454
|
+
type: integer
|
|
455
|
+
docs: When this job was created (Unix timestamp in milliseconds).
|
|
456
|
+
started_timestamp_ms:
|
|
457
|
+
type: integer
|
|
458
|
+
docs: When this job started (Unix timestamp in milliseconds).
|
|
459
|
+
ended_timestamp_ms:
|
|
460
|
+
type: integer
|
|
461
|
+
docs: When this job ended (Unix timestamp in milliseconds).
|
|
462
|
+
num_predictions:
|
|
463
|
+
type: integer
|
|
464
|
+
docs: The number of predictions that were generated by this job.
|
|
465
|
+
num_errors:
|
|
466
|
+
type: integer
|
|
467
|
+
docs: The number of errors that occurred while running this job.
|
|
468
|
+
CompletedTraining:
|
|
469
|
+
properties:
|
|
470
|
+
created_timestamp_ms:
|
|
471
|
+
type: integer
|
|
472
|
+
docs: When this job was created (Unix timestamp in milliseconds).
|
|
473
|
+
started_timestamp_ms:
|
|
474
|
+
type: integer
|
|
475
|
+
docs: When this job started (Unix timestamp in milliseconds).
|
|
476
|
+
ended_timestamp_ms:
|
|
477
|
+
type: integer
|
|
478
|
+
docs: When this job ended (Unix timestamp in milliseconds).
|
|
479
|
+
custom_model: TrainingCustomModel
|
|
480
|
+
alternatives: optional<map<string, TrainingCustomModel>>
|
|
481
|
+
CustomModelPrediction:
|
|
482
|
+
properties:
|
|
483
|
+
output: map<string, double>
|
|
484
|
+
error: string
|
|
485
|
+
task_type: string
|
|
486
|
+
CustomModelRequest:
|
|
487
|
+
properties:
|
|
488
|
+
name: string
|
|
489
|
+
description: optional<string>
|
|
490
|
+
tags: optional<list<Tag>>
|
|
491
|
+
Dataset:
|
|
492
|
+
discriminated: false
|
|
493
|
+
union:
|
|
494
|
+
- DatasetId
|
|
495
|
+
- DatasetVersionId
|
|
496
|
+
DatasetId:
|
|
497
|
+
properties:
|
|
498
|
+
id:
|
|
499
|
+
type: string
|
|
500
|
+
validation:
|
|
501
|
+
format: uuid
|
|
502
|
+
DatasetVersionId:
|
|
503
|
+
properties:
|
|
504
|
+
version_id:
|
|
505
|
+
type: string
|
|
506
|
+
validation:
|
|
507
|
+
format: uuid
|
|
508
|
+
DescriptionsScore:
|
|
509
|
+
properties:
|
|
510
|
+
name:
|
|
511
|
+
type: string
|
|
512
|
+
docs: Name of the descriptive feature being expressed.
|
|
513
|
+
score:
|
|
514
|
+
type: string
|
|
515
|
+
docs: Embedding value for the descriptive feature being expressed.
|
|
516
|
+
Direction:
|
|
517
|
+
enum:
|
|
518
|
+
- asc
|
|
519
|
+
- desc
|
|
520
|
+
EmbeddingGenerationBaseRequest:
|
|
521
|
+
properties:
|
|
522
|
+
registry_file_details:
|
|
523
|
+
type: optional<list<RegistryFileDetail>>
|
|
524
|
+
docs: File ID and File URL pairs for an asset registry file
|
|
525
|
+
EmotionScore:
|
|
526
|
+
properties:
|
|
527
|
+
name:
|
|
528
|
+
type: string
|
|
529
|
+
docs: Name of the emotion being expressed.
|
|
530
|
+
score:
|
|
531
|
+
type: double
|
|
532
|
+
docs: Embedding value for the emotion being expressed.
|
|
533
|
+
Error:
|
|
534
|
+
properties:
|
|
535
|
+
message:
|
|
536
|
+
type: string
|
|
537
|
+
docs: An error message.
|
|
538
|
+
file:
|
|
539
|
+
type: string
|
|
540
|
+
docs: A file path relative to the top level source URL or file.
|
|
541
|
+
EvaluationArgs:
|
|
542
|
+
properties:
|
|
543
|
+
validation: optional<ValidationArgs>
|
|
544
|
+
Face:
|
|
545
|
+
properties:
|
|
546
|
+
fps_pred:
|
|
547
|
+
type: optional<double>
|
|
548
|
+
docs: >-
|
|
549
|
+
Number of frames per second to process. Other frames will be omitted
|
|
550
|
+
from the response. Set to `0` to process every frame.
|
|
551
|
+
default: 3
|
|
552
|
+
prob_threshold:
|
|
553
|
+
type: optional<double>
|
|
554
|
+
docs: >-
|
|
555
|
+
Face detection probability threshold. Faces detected with a
|
|
556
|
+
probability less than this threshold will be omitted from the
|
|
557
|
+
response.
|
|
558
|
+
default: 0.99
|
|
559
|
+
validation:
|
|
560
|
+
min: 0
|
|
561
|
+
max: 1
|
|
562
|
+
identify_faces:
|
|
563
|
+
type: optional<boolean>
|
|
564
|
+
docs: >-
|
|
565
|
+
Whether to return identifiers for faces across frames. If `true`,
|
|
566
|
+
unique identifiers will be assigned to face bounding boxes to
|
|
567
|
+
differentiate different faces. If `false`, all faces will be tagged
|
|
568
|
+
with an `unknown` ID.
|
|
569
|
+
min_face_size:
|
|
570
|
+
type: optional<integer>
|
|
571
|
+
docs: >-
|
|
572
|
+
Minimum bounding box side length in pixels to treat as a face. Faces
|
|
573
|
+
detected with a bounding box side length in pixels less than this
|
|
574
|
+
threshold will be omitted from the response.
|
|
575
|
+
default: 60
|
|
576
|
+
validation:
|
|
577
|
+
min: 10
|
|
578
|
+
facs: optional<Unconfigurable>
|
|
579
|
+
descriptions: optional<Unconfigurable>
|
|
580
|
+
save_faces:
|
|
581
|
+
type: optional<boolean>
|
|
582
|
+
docs: >-
|
|
583
|
+
Whether to extract and save the detected faces in the artifacts zip
|
|
584
|
+
created by each job.
|
|
585
|
+
FacePrediction:
|
|
586
|
+
properties:
|
|
587
|
+
frame:
|
|
588
|
+
type: integer
|
|
589
|
+
docs: Frame number
|
|
590
|
+
time:
|
|
591
|
+
type: double
|
|
592
|
+
docs: Time in seconds when face detection occurred.
|
|
593
|
+
prob:
|
|
594
|
+
type: double
|
|
595
|
+
docs: The predicted probability that a detected face was actually a face.
|
|
596
|
+
box: BoundingBox
|
|
597
|
+
emotions:
|
|
598
|
+
docs: A high-dimensional embedding in emotion space.
|
|
599
|
+
type: list<EmotionScore>
|
|
600
|
+
facs:
|
|
601
|
+
type: optional<list<FacsScore>>
|
|
602
|
+
docs: FACS 2.0 features and their scores.
|
|
603
|
+
descriptions:
|
|
604
|
+
type: optional<list<DescriptionsScore>>
|
|
605
|
+
docs: Modality-specific descriptive features and their scores.
|
|
606
|
+
FacemeshPrediction:
|
|
607
|
+
properties:
|
|
608
|
+
emotions:
|
|
609
|
+
docs: A high-dimensional embedding in emotion space.
|
|
610
|
+
type: list<EmotionScore>
|
|
611
|
+
FacsScore:
|
|
612
|
+
properties:
|
|
613
|
+
name:
|
|
614
|
+
type: string
|
|
615
|
+
docs: Name of the FACS 2.0 feature being expressed.
|
|
616
|
+
score:
|
|
617
|
+
type: string
|
|
618
|
+
docs: Embedding value for the FACS 2.0 feature being expressed.
|
|
619
|
+
Failed:
|
|
620
|
+
properties:
|
|
621
|
+
created_timestamp_ms:
|
|
622
|
+
type: integer
|
|
623
|
+
docs: When this job was created (Unix timestamp in milliseconds).
|
|
624
|
+
started_timestamp_ms:
|
|
625
|
+
type: integer
|
|
626
|
+
docs: When this job started (Unix timestamp in milliseconds).
|
|
627
|
+
ended_timestamp_ms:
|
|
628
|
+
type: integer
|
|
629
|
+
docs: When this job ended (Unix timestamp in milliseconds).
|
|
630
|
+
message:
|
|
631
|
+
type: string
|
|
632
|
+
docs: An error message.
|
|
633
|
+
File:
|
|
634
|
+
properties:
|
|
635
|
+
filename:
|
|
636
|
+
type: optional<string>
|
|
637
|
+
docs: The name of the file.
|
|
638
|
+
content_type:
|
|
639
|
+
type: optional<string>
|
|
640
|
+
docs: The content type of the file.
|
|
641
|
+
md5sum:
|
|
642
|
+
type: string
|
|
643
|
+
docs: The MD5 checksum of the file.
|
|
644
|
+
Granularity:
|
|
645
|
+
enum:
|
|
646
|
+
- word
|
|
647
|
+
- sentence
|
|
648
|
+
- utterance
|
|
649
|
+
- conversational_turn
|
|
650
|
+
docs: >-
|
|
651
|
+
The granularity at which to generate predictions. `utterance` corresponds
|
|
652
|
+
to a natural pause or break in conversation, while `conversational_turn`
|
|
653
|
+
corresponds to a change in speaker.
|
|
654
|
+
GroupedPredictionsBurstPrediction:
|
|
655
|
+
properties:
|
|
656
|
+
id:
|
|
657
|
+
type: string
|
|
658
|
+
docs: >-
|
|
659
|
+
An automatically generated label to identify individuals in your media
|
|
660
|
+
file. Will be `unknown` if you have chosen to disable identification,
|
|
661
|
+
or if the model is unable to distinguish between individuals.
|
|
662
|
+
predictions: list<BurstPrediction>
|
|
663
|
+
GroupedPredictionsFacePrediction:
|
|
664
|
+
properties:
|
|
665
|
+
id:
|
|
666
|
+
type: string
|
|
667
|
+
docs: >-
|
|
668
|
+
An automatically generated label to identify individuals in your media
|
|
669
|
+
file. Will be `unknown` if you have chosen to disable identification,
|
|
670
|
+
or if the model is unable to distinguish between individuals.
|
|
671
|
+
predictions: list<FacePrediction>
|
|
672
|
+
GroupedPredictionsFacemeshPrediction:
|
|
673
|
+
properties:
|
|
674
|
+
id:
|
|
675
|
+
type: string
|
|
676
|
+
docs: >-
|
|
677
|
+
An automatically generated label to identify individuals in your media
|
|
678
|
+
file. Will be `unknown` if you have chosen to disable identification,
|
|
679
|
+
or if the model is unable to distinguish between individuals.
|
|
680
|
+
predictions: list<FacemeshPrediction>
|
|
681
|
+
GroupedPredictionsLanguagePrediction:
|
|
682
|
+
properties:
|
|
683
|
+
id:
|
|
684
|
+
type: string
|
|
685
|
+
docs: >-
|
|
686
|
+
An automatically generated label to identify individuals in your media
|
|
687
|
+
file. Will be `unknown` if you have chosen to disable identification,
|
|
688
|
+
or if the model is unable to distinguish between individuals.
|
|
689
|
+
predictions: list<LanguagePrediction>
|
|
690
|
+
GroupedPredictionsNerPrediction:
|
|
691
|
+
properties:
|
|
692
|
+
id:
|
|
693
|
+
type: string
|
|
694
|
+
docs: >-
|
|
695
|
+
An automatically generated label to identify individuals in your media
|
|
696
|
+
file. Will be `unknown` if you have chosen to disable identification,
|
|
697
|
+
or if the model is unable to distinguish between individuals.
|
|
698
|
+
predictions: list<NerPrediction>
|
|
699
|
+
GroupedPredictionsProsodyPrediction:
|
|
700
|
+
properties:
|
|
701
|
+
id:
|
|
702
|
+
type: string
|
|
703
|
+
docs: >-
|
|
704
|
+
An automatically generated label to identify individuals in your media
|
|
705
|
+
file. Will be `unknown` if you have chosen to disable identification,
|
|
706
|
+
or if the model is unable to distinguish between individuals.
|
|
707
|
+
predictions: list<ProsodyPrediction>
|
|
708
|
+
InProgress:
|
|
709
|
+
properties:
|
|
710
|
+
created_timestamp_ms:
|
|
711
|
+
type: integer
|
|
712
|
+
docs: When this job was created (Unix timestamp in milliseconds).
|
|
713
|
+
started_timestamp_ms:
|
|
714
|
+
type: integer
|
|
715
|
+
docs: When this job started (Unix timestamp in milliseconds).
|
|
716
|
+
InferenceBaseRequest:
|
|
717
|
+
properties:
|
|
718
|
+
models: optional<Models>
|
|
719
|
+
transcription: optional<Transcription>
|
|
720
|
+
urls:
|
|
721
|
+
type: optional<list<string>>
|
|
722
|
+
docs: >-
|
|
723
|
+
URLs to the media files to be processed. Each must be a valid public
|
|
724
|
+
URL to a media file (see recommended input filetypes) or an archive
|
|
725
|
+
(`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files.
|
|
726
|
+
|
|
727
|
+
|
|
728
|
+
If you wish to supply more than 100 URLs, consider providing them as
|
|
729
|
+
an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`).
|
|
730
|
+
registry_files:
|
|
731
|
+
type: optional<list<string>>
|
|
732
|
+
docs: List of File IDs corresponding to the files in the asset registry.
|
|
733
|
+
text:
|
|
734
|
+
type: optional<list<string>>
|
|
735
|
+
docs: Text to supply directly to our language and NER models.
|
|
736
|
+
callback_url:
|
|
737
|
+
type: optional<string>
|
|
738
|
+
docs: >-
|
|
739
|
+
If provided, a `POST` request will be made to the URL with the
|
|
740
|
+
generated predictions on completion or the error message on failure.
|
|
741
|
+
validation:
|
|
742
|
+
format: url
|
|
743
|
+
notify:
|
|
744
|
+
type: optional<boolean>
|
|
745
|
+
docs: >-
|
|
746
|
+
Whether to send an email notification to the user upon job
|
|
747
|
+
completion/failure.
|
|
748
|
+
InferencePrediction:
|
|
749
|
+
properties:
|
|
750
|
+
file:
|
|
751
|
+
type: string
|
|
752
|
+
docs: A file path relative to the top level source URL or file.
|
|
753
|
+
models: ModelsPredictions
|
|
754
|
+
InferenceRequest:
|
|
755
|
+
properties:
|
|
756
|
+
models: optional<Models>
|
|
757
|
+
transcription: optional<Transcription>
|
|
758
|
+
urls:
|
|
759
|
+
type: optional<list<string>>
|
|
760
|
+
docs: >-
|
|
761
|
+
URLs to the media files to be processed. Each must be a valid public
|
|
762
|
+
URL to a media file (see recommended input filetypes) or an archive
|
|
763
|
+
(`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files.
|
|
764
|
+
|
|
765
|
+
|
|
766
|
+
If you wish to supply more than 100 URLs, consider providing them as
|
|
767
|
+
an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`).
|
|
768
|
+
registry_files:
|
|
769
|
+
type: optional<list<string>>
|
|
770
|
+
docs: List of File IDs corresponding to the files in the asset registry.
|
|
771
|
+
text:
|
|
772
|
+
type: optional<list<string>>
|
|
773
|
+
docs: Text to supply directly to our language and NER models.
|
|
774
|
+
callback_url:
|
|
775
|
+
type: optional<string>
|
|
776
|
+
docs: >-
|
|
777
|
+
If provided, a `POST` request will be made to the URL with the
|
|
778
|
+
generated predictions on completion or the error message on failure.
|
|
779
|
+
validation:
|
|
780
|
+
format: url
|
|
781
|
+
notify:
|
|
782
|
+
type: optional<boolean>
|
|
783
|
+
docs: >-
|
|
784
|
+
Whether to send an email notification to the user upon job
|
|
785
|
+
completion/failure.
|
|
786
|
+
files: list<File>
|
|
787
|
+
InferenceResults:
|
|
788
|
+
properties:
|
|
789
|
+
predictions: list<InferencePrediction>
|
|
790
|
+
errors: list<Error>
|
|
791
|
+
InferenceSourcePredictResult:
|
|
792
|
+
properties:
|
|
793
|
+
source: Source
|
|
794
|
+
results: optional<InferenceResults>
|
|
795
|
+
error:
|
|
796
|
+
type: optional<string>
|
|
797
|
+
docs: An error message.
|
|
798
|
+
JobEmbeddingGeneration:
|
|
799
|
+
properties:
|
|
800
|
+
job_id:
|
|
801
|
+
type: string
|
|
802
|
+
docs: The ID associated with this job.
|
|
803
|
+
validation:
|
|
804
|
+
format: uuid
|
|
805
|
+
user_id:
|
|
806
|
+
type: string
|
|
807
|
+
validation:
|
|
808
|
+
format: uuid
|
|
809
|
+
request: EmbeddingGenerationBaseRequest
|
|
810
|
+
state: StateEmbeddingGeneration
|
|
811
|
+
JobInference:
|
|
812
|
+
properties:
|
|
813
|
+
job_id:
|
|
814
|
+
type: string
|
|
815
|
+
docs: The ID associated with this job.
|
|
816
|
+
validation:
|
|
817
|
+
format: uuid
|
|
818
|
+
user_id:
|
|
819
|
+
type: string
|
|
820
|
+
validation:
|
|
821
|
+
format: uuid
|
|
822
|
+
request: InferenceRequest
|
|
823
|
+
state: StateInference
|
|
824
|
+
JobTlInference:
|
|
825
|
+
properties:
|
|
826
|
+
job_id:
|
|
827
|
+
type: string
|
|
828
|
+
docs: The ID associated with this job.
|
|
829
|
+
validation:
|
|
830
|
+
format: uuid
|
|
831
|
+
user_id:
|
|
832
|
+
type: string
|
|
833
|
+
validation:
|
|
834
|
+
format: uuid
|
|
835
|
+
request: TlInferenceBaseRequest
|
|
836
|
+
state: StateTlInference
|
|
837
|
+
JobTraining:
|
|
838
|
+
properties:
|
|
839
|
+
job_id:
|
|
840
|
+
type: string
|
|
841
|
+
docs: The ID associated with this job.
|
|
842
|
+
validation:
|
|
843
|
+
format: uuid
|
|
844
|
+
user_id:
|
|
845
|
+
type: string
|
|
846
|
+
validation:
|
|
847
|
+
format: uuid
|
|
848
|
+
request: TrainingBaseRequest
|
|
849
|
+
state: StateTraining
|
|
850
|
+
JobId:
|
|
851
|
+
properties:
|
|
852
|
+
job_id:
|
|
853
|
+
type: string
|
|
854
|
+
docs: The ID of the started job.
|
|
855
|
+
validation:
|
|
856
|
+
format: uuid
|
|
857
|
+
Language:
|
|
858
|
+
properties:
|
|
859
|
+
granularity: optional<Granularity>
|
|
860
|
+
sentiment: optional<Unconfigurable>
|
|
861
|
+
toxicity: optional<Unconfigurable>
|
|
862
|
+
identify_speakers:
|
|
863
|
+
type: optional<boolean>
|
|
864
|
+
docs: >-
|
|
865
|
+
Whether to return identifiers for speakers over time. If `true`,
|
|
866
|
+
unique identifiers will be assigned to spoken words to differentiate
|
|
867
|
+
different speakers. If `false`, all speakers will be tagged with an
|
|
868
|
+
`unknown` ID.
|
|
869
|
+
LanguagePrediction:
|
|
870
|
+
properties:
|
|
871
|
+
text:
|
|
872
|
+
type: string
|
|
873
|
+
docs: A segment of text (like a word or a sentence).
|
|
874
|
+
position: PositionInterval
|
|
875
|
+
time: optional<TimeInterval>
|
|
876
|
+
confidence:
|
|
877
|
+
type: optional<double>
|
|
878
|
+
docs: >-
|
|
879
|
+
Value between `0.0` and `1.0` that indicates our transcription model's
|
|
880
|
+
relative confidence in this text.
|
|
881
|
+
speaker_confidence:
|
|
882
|
+
type: optional<double>
|
|
883
|
+
docs: >-
|
|
884
|
+
Value between `0.0` and `1.0` that indicates our transcription model's
|
|
885
|
+
relative confidence that this text was spoken by this speaker.
|
|
886
|
+
emotions:
|
|
887
|
+
docs: A high-dimensional embedding in emotion space.
|
|
888
|
+
type: list<EmotionScore>
|
|
889
|
+
sentiment:
|
|
890
|
+
type: optional<list<SentimentScore>>
|
|
891
|
+
docs: >-
|
|
892
|
+
Sentiment predictions returned as a distribution. This model predicts
|
|
893
|
+
the probability that a given text could be interpreted as having each
|
|
894
|
+
sentiment level from `1` (negative) to `9` (positive).
|
|
895
|
+
|
|
896
|
+
|
|
897
|
+
Compared to returning one estimate of sentiment, this enables a more
|
|
898
|
+
nuanced analysis of a text's meaning. For example, a text with very
|
|
899
|
+
neutral sentiment would have an average rating of `5`. But also a text
|
|
900
|
+
that could be interpreted as having very positive sentiment or very
|
|
901
|
+
negative sentiment would also have an average rating of `5`. The
|
|
902
|
+
average sentiment is less informative than the distribution over
|
|
903
|
+
sentiment, so this API returns a value for each sentiment level.
|
|
904
|
+
toxicity:
|
|
905
|
+
type: optional<list<ToxicityScore>>
|
|
906
|
+
docs: >-
|
|
907
|
+
Toxicity predictions returned as probabilities that the text can be
|
|
908
|
+
classified into the following categories: `toxic`, `severe_toxic`,
|
|
909
|
+
`obscene`, `threat`, `insult`, and `identity_hate`.
|
|
910
|
+
Models:
|
|
911
|
+
properties:
|
|
912
|
+
face: optional<Face>
|
|
913
|
+
burst: optional<Unconfigurable>
|
|
914
|
+
prosody: optional<Prosody>
|
|
915
|
+
language: optional<Language>
|
|
916
|
+
ner: optional<Ner>
|
|
917
|
+
facemesh: optional<Unconfigurable>
|
|
918
|
+
ModelsPredictions:
|
|
919
|
+
properties:
|
|
920
|
+
face: optional<PredictionsOptionalNullFacePrediction>
|
|
921
|
+
burst: optional<PredictionsOptionalNullBurstPrediction>
|
|
922
|
+
prosody: optional<PredictionsOptionalTranscriptionMetadataProsodyPrediction>
|
|
923
|
+
language: optional<PredictionsOptionalTranscriptionMetadataLanguagePrediction>
|
|
924
|
+
ner: optional<PredictionsOptionalTranscriptionMetadataNerPrediction>
|
|
925
|
+
facemesh: optional<PredictionsOptionalNullFacemeshPrediction>
|
|
926
|
+
Ner:
|
|
927
|
+
properties:
|
|
928
|
+
identify_speakers:
|
|
929
|
+
type: optional<boolean>
|
|
930
|
+
docs: >-
|
|
931
|
+
Whether to return identifiers for speakers over time. If `true`,
|
|
932
|
+
unique identifiers will be assigned to spoken words to differentiate
|
|
933
|
+
different speakers. If `false`, all speakers will be tagged with an
|
|
934
|
+
`unknown` ID.
|
|
935
|
+
NerPrediction:
|
|
936
|
+
properties:
|
|
937
|
+
entity:
|
|
938
|
+
type: string
|
|
939
|
+
docs: The recognized topic or entity.
|
|
940
|
+
position: PositionInterval
|
|
941
|
+
entity_confidence:
|
|
942
|
+
type: double
|
|
943
|
+
docs: Our NER model's relative confidence in the recognized topic or entity.
|
|
944
|
+
support:
|
|
945
|
+
type: double
|
|
946
|
+
docs: A measure of how often the entity is linked to by other entities.
|
|
947
|
+
uri:
|
|
948
|
+
type: string
|
|
949
|
+
docs: >-
|
|
950
|
+
A URL which provides more information about the recognized topic or
|
|
951
|
+
entity.
|
|
952
|
+
link_word:
|
|
953
|
+
type: string
|
|
954
|
+
docs: The specific word to which the emotion predictions are linked.
|
|
955
|
+
time: optional<TimeInterval>
|
|
956
|
+
confidence:
|
|
957
|
+
type: optional<double>
|
|
958
|
+
docs: >-
|
|
959
|
+
Value between `0.0` and `1.0` that indicates our transcription model's
|
|
960
|
+
relative confidence in this text.
|
|
961
|
+
speaker_confidence:
|
|
962
|
+
type: optional<double>
|
|
963
|
+
docs: >-
|
|
964
|
+
Value between `0.0` and `1.0` that indicates our transcription model's
|
|
965
|
+
relative confidence that this text was spoken by this speaker.
|
|
966
|
+
emotions:
|
|
967
|
+
docs: A high-dimensional embedding in emotion space.
|
|
968
|
+
type: list<EmotionScore>
|
|
969
|
+
'Null':
|
|
970
|
+
docs: No associated metadata for this model. Value will be `null`.
|
|
971
|
+
type: map<string, unknown>
|
|
972
|
+
PositionInterval:
|
|
973
|
+
docs: >-
|
|
974
|
+
Position of a segment of text within a larger document, measured in
|
|
975
|
+
characters. Uses zero-based indexing. The beginning index is inclusive and
|
|
976
|
+
the end index is exclusive.
|
|
977
|
+
properties:
|
|
978
|
+
begin:
|
|
979
|
+
type: integer
|
|
980
|
+
docs: The index of the first character in the text segment, inclusive.
|
|
981
|
+
end:
|
|
982
|
+
type: integer
|
|
983
|
+
docs: The index of the last character in the text segment, exclusive.
|
|
984
|
+
PredictionsOptionalNullBurstPrediction:
|
|
985
|
+
properties:
|
|
986
|
+
metadata: optional<Null>
|
|
987
|
+
grouped_predictions: list<GroupedPredictionsBurstPrediction>
|
|
988
|
+
PredictionsOptionalNullFacePrediction:
|
|
989
|
+
properties:
|
|
990
|
+
metadata: optional<Null>
|
|
991
|
+
grouped_predictions: list<GroupedPredictionsFacePrediction>
|
|
992
|
+
PredictionsOptionalNullFacemeshPrediction:
|
|
993
|
+
properties:
|
|
994
|
+
metadata: optional<Null>
|
|
995
|
+
grouped_predictions: list<GroupedPredictionsFacemeshPrediction>
|
|
996
|
+
PredictionsOptionalTranscriptionMetadataLanguagePrediction:
|
|
997
|
+
properties:
|
|
998
|
+
metadata: optional<TranscriptionMetadata>
|
|
999
|
+
grouped_predictions: list<GroupedPredictionsLanguagePrediction>
|
|
1000
|
+
PredictionsOptionalTranscriptionMetadataNerPrediction:
|
|
1001
|
+
properties:
|
|
1002
|
+
metadata: optional<TranscriptionMetadata>
|
|
1003
|
+
grouped_predictions: list<GroupedPredictionsNerPrediction>
|
|
1004
|
+
PredictionsOptionalTranscriptionMetadataProsodyPrediction:
|
|
1005
|
+
properties:
|
|
1006
|
+
metadata: optional<TranscriptionMetadata>
|
|
1007
|
+
grouped_predictions: list<GroupedPredictionsProsodyPrediction>
|
|
1008
|
+
Prosody:
|
|
1009
|
+
properties:
|
|
1010
|
+
granularity: optional<Granularity>
|
|
1011
|
+
window: optional<Window>
|
|
1012
|
+
identify_speakers:
|
|
1013
|
+
type: optional<boolean>
|
|
1014
|
+
docs: >-
|
|
1015
|
+
Whether to return identifiers for speakers over time. If `true`,
|
|
1016
|
+
unique identifiers will be assigned to spoken words to differentiate
|
|
1017
|
+
different speakers. If `false`, all speakers will be tagged with an
|
|
1018
|
+
`unknown` ID.
|
|
1019
|
+
ProsodyPrediction:
|
|
1020
|
+
properties:
|
|
1021
|
+
text:
|
|
1022
|
+
type: optional<string>
|
|
1023
|
+
docs: A segment of text (like a word or a sentence).
|
|
1024
|
+
time: TimeInterval
|
|
1025
|
+
confidence:
|
|
1026
|
+
type: optional<double>
|
|
1027
|
+
docs: >-
|
|
1028
|
+
Value between `0.0` and `1.0` that indicates our transcription model's
|
|
1029
|
+
relative confidence in this text.
|
|
1030
|
+
speaker_confidence:
|
|
1031
|
+
type: optional<double>
|
|
1032
|
+
docs: >-
|
|
1033
|
+
Value between `0.0` and `1.0` that indicates our transcription model's
|
|
1034
|
+
relative confidence that this text was spoken by this speaker.
|
|
1035
|
+
emotions:
|
|
1036
|
+
docs: A high-dimensional embedding in emotion space.
|
|
1037
|
+
type: list<EmotionScore>
|
|
1038
|
+
Queued:
|
|
1039
|
+
properties:
|
|
1040
|
+
created_timestamp_ms:
|
|
1041
|
+
type: integer
|
|
1042
|
+
docs: When this job was created (Unix timestamp in milliseconds).
|
|
1043
|
+
RegistryFileDetail:
|
|
1044
|
+
properties:
|
|
1045
|
+
file_id:
|
|
1046
|
+
type: string
|
|
1047
|
+
docs: File ID in the Asset Registry
|
|
1048
|
+
file_url:
|
|
1049
|
+
type: string
|
|
1050
|
+
docs: URL to the file in the Asset Registry
|
|
1051
|
+
Regression: map<string, unknown>
|
|
1052
|
+
SentimentScore:
|
|
1053
|
+
properties:
|
|
1054
|
+
name:
|
|
1055
|
+
type: string
|
|
1056
|
+
docs: Level of sentiment, ranging from `1` (negative) to `9` (positive)
|
|
1057
|
+
score:
|
|
1058
|
+
type: string
|
|
1059
|
+
docs: Prediction for this level of sentiment
|
|
1060
|
+
SortBy:
|
|
1061
|
+
enum:
|
|
1062
|
+
- created
|
|
1063
|
+
- started
|
|
1064
|
+
- ended
|
|
1065
|
+
Source:
|
|
1066
|
+
discriminant: type
|
|
1067
|
+
base-properties: {}
|
|
1068
|
+
union:
|
|
1069
|
+
url: SourceUrl
|
|
1070
|
+
file: SourceFile
|
|
1071
|
+
text: SourceTextSource
|
|
1072
|
+
SourceFile:
|
|
1073
|
+
properties: {}
|
|
1074
|
+
extends:
|
|
1075
|
+
- File
|
|
1076
|
+
SourceTextSource:
|
|
1077
|
+
properties: {}
|
|
1078
|
+
SourceUrl:
|
|
1079
|
+
properties: {}
|
|
1080
|
+
extends:
|
|
1081
|
+
- Url
|
|
1082
|
+
Url:
|
|
1083
|
+
properties:
|
|
1084
|
+
url:
|
|
1085
|
+
type: string
|
|
1086
|
+
docs: The URL of the source media file.
|
|
1087
|
+
StateEmbeddingGeneration:
|
|
1088
|
+
discriminant: status
|
|
1089
|
+
base-properties: {}
|
|
1090
|
+
union:
|
|
1091
|
+
QUEUED: StateEmbeddingGenerationQueued
|
|
1092
|
+
IN_PROGRESS: StateEmbeddingGenerationInProgress
|
|
1093
|
+
COMPLETED: StateEmbeddingGenerationCompletedEmbeddingGeneration
|
|
1094
|
+
FAILED: StateEmbeddingGenerationFailed
|
|
1095
|
+
StateEmbeddingGenerationCompletedEmbeddingGeneration:
|
|
1096
|
+
properties: {}
|
|
1097
|
+
extends:
|
|
1098
|
+
- CompletedEmbeddingGeneration
|
|
1099
|
+
StateEmbeddingGenerationFailed:
|
|
1100
|
+
properties: {}
|
|
1101
|
+
extends:
|
|
1102
|
+
- Failed
|
|
1103
|
+
StateEmbeddingGenerationInProgress:
|
|
1104
|
+
properties: {}
|
|
1105
|
+
extends:
|
|
1106
|
+
- InProgress
|
|
1107
|
+
StateEmbeddingGenerationQueued:
|
|
1108
|
+
properties: {}
|
|
1109
|
+
extends:
|
|
1110
|
+
- Queued
|
|
1111
|
+
StateInference:
|
|
1112
|
+
discriminant: status
|
|
1113
|
+
base-properties: {}
|
|
1114
|
+
union:
|
|
1115
|
+
QUEUED: StateInferenceQueued
|
|
1116
|
+
IN_PROGRESS: StateInferenceInProgress
|
|
1117
|
+
COMPLETED: StateInferenceCompletedInference
|
|
1118
|
+
FAILED: StateInferenceFailed
|
|
1119
|
+
StateInferenceCompletedInference:
|
|
1120
|
+
properties: {}
|
|
1121
|
+
extends:
|
|
1122
|
+
- CompletedInference
|
|
1123
|
+
StateInferenceFailed:
|
|
1124
|
+
properties: {}
|
|
1125
|
+
extends:
|
|
1126
|
+
- Failed
|
|
1127
|
+
StateInferenceInProgress:
|
|
1128
|
+
properties: {}
|
|
1129
|
+
extends:
|
|
1130
|
+
- InProgress
|
|
1131
|
+
StateInferenceQueued:
|
|
1132
|
+
properties: {}
|
|
1133
|
+
extends:
|
|
1134
|
+
- Queued
|
|
1135
|
+
StateTlInference:
|
|
1136
|
+
discriminant: status
|
|
1137
|
+
base-properties: {}
|
|
1138
|
+
union:
|
|
1139
|
+
QUEUED: StateTlInferenceQueued
|
|
1140
|
+
IN_PROGRESS: StateTlInferenceInProgress
|
|
1141
|
+
COMPLETED: StateTlInferenceCompletedTlInference
|
|
1142
|
+
FAILED: StateTlInferenceFailed
|
|
1143
|
+
StateTlInferenceCompletedTlInference:
|
|
1144
|
+
properties: {}
|
|
1145
|
+
extends:
|
|
1146
|
+
- CompletedTlInference
|
|
1147
|
+
StateTlInferenceFailed:
|
|
1148
|
+
properties: {}
|
|
1149
|
+
extends:
|
|
1150
|
+
- Failed
|
|
1151
|
+
StateTlInferenceInProgress:
|
|
1152
|
+
properties: {}
|
|
1153
|
+
extends:
|
|
1154
|
+
- InProgress
|
|
1155
|
+
StateTlInferenceQueued:
|
|
1156
|
+
properties: {}
|
|
1157
|
+
extends:
|
|
1158
|
+
- Queued
|
|
1159
|
+
StateTraining:
|
|
1160
|
+
discriminant: status
|
|
1161
|
+
base-properties: {}
|
|
1162
|
+
union:
|
|
1163
|
+
QUEUED: StateTrainingQueued
|
|
1164
|
+
IN_PROGRESS: StateTrainingInProgress
|
|
1165
|
+
COMPLETED: StateTrainingCompletedTraining
|
|
1166
|
+
FAILED: StateTrainingFailed
|
|
1167
|
+
StateTrainingCompletedTraining:
|
|
1168
|
+
properties: {}
|
|
1169
|
+
extends:
|
|
1170
|
+
- CompletedTraining
|
|
1171
|
+
StateTrainingFailed:
|
|
1172
|
+
properties: {}
|
|
1173
|
+
extends:
|
|
1174
|
+
- Failed
|
|
1175
|
+
StateTrainingInProgress:
|
|
1176
|
+
properties: {}
|
|
1177
|
+
extends:
|
|
1178
|
+
- InProgress
|
|
1179
|
+
StateTrainingQueued:
|
|
1180
|
+
properties: {}
|
|
1181
|
+
extends:
|
|
1182
|
+
- Queued
|
|
1183
|
+
Status:
|
|
1184
|
+
enum:
|
|
1185
|
+
- QUEUED
|
|
1186
|
+
- IN_PROGRESS
|
|
1187
|
+
- COMPLETED
|
|
1188
|
+
- FAILED
|
|
1189
|
+
TlInferencePrediction:
|
|
1190
|
+
properties:
|
|
1191
|
+
file:
|
|
1192
|
+
type: string
|
|
1193
|
+
docs: A file path relative to the top level source URL or file.
|
|
1194
|
+
file_type: string
|
|
1195
|
+
custom_models: map<string, CustomModelPrediction>
|
|
1196
|
+
TlInferenceResults:
|
|
1197
|
+
properties:
|
|
1198
|
+
predictions: list<TlInferencePrediction>
|
|
1199
|
+
errors: list<Error>
|
|
1200
|
+
TlInferenceSourcePredictResult:
|
|
1201
|
+
properties:
|
|
1202
|
+
source: Source
|
|
1203
|
+
results: optional<TlInferenceResults>
|
|
1204
|
+
error:
|
|
1205
|
+
type: optional<string>
|
|
1206
|
+
docs: An error message.
|
|
1207
|
+
Tag:
|
|
1208
|
+
properties:
|
|
1209
|
+
key: string
|
|
1210
|
+
value: string
|
|
1211
|
+
Target:
|
|
1212
|
+
discriminated: false
|
|
1213
|
+
union:
|
|
1214
|
+
- integer
|
|
1215
|
+
- double
|
|
1216
|
+
- string
|
|
1217
|
+
Task:
|
|
1218
|
+
discriminant: type
|
|
1219
|
+
base-properties: {}
|
|
1220
|
+
union:
|
|
1221
|
+
classification: TaskClassification
|
|
1222
|
+
regression: TaskRegression
|
|
1223
|
+
TaskClassification:
|
|
1224
|
+
properties: {}
|
|
1225
|
+
TaskRegression:
|
|
1226
|
+
properties: {}
|
|
1227
|
+
TextSource: map<string, unknown>
|
|
1228
|
+
TimeInterval:
|
|
1229
|
+
docs: A time range with a beginning and end, measured in seconds.
|
|
1230
|
+
properties:
|
|
1231
|
+
begin:
|
|
1232
|
+
type: double
|
|
1233
|
+
docs: Beginning of time range in seconds.
|
|
1234
|
+
end:
|
|
1235
|
+
type: double
|
|
1236
|
+
docs: End of time range in seconds.
|
|
1237
|
+
TlInferenceBaseRequest:
|
|
1238
|
+
properties:
|
|
1239
|
+
custom_model: CustomModel
|
|
1240
|
+
urls:
|
|
1241
|
+
type: optional<list<string>>
|
|
1242
|
+
docs: >-
|
|
1243
|
+
URLs to the media files to be processed. Each must be a valid public
|
|
1244
|
+
URL to a media file (see recommended input filetypes) or an archive
|
|
1245
|
+
(`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files.
|
|
1246
|
+
|
|
1247
|
+
|
|
1248
|
+
If you wish to supply more than 100 URLs, consider providing them as
|
|
1249
|
+
an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`).
|
|
1250
|
+
registry_files:
|
|
1251
|
+
type: optional<list<string>>
|
|
1252
|
+
docs: List of File IDs corresponding to the files in the asset registry.
|
|
1253
|
+
callback_url:
|
|
1254
|
+
type: optional<string>
|
|
1255
|
+
docs: >-
|
|
1256
|
+
If provided, a `POST` request will be made to the URL with the
|
|
1257
|
+
generated predictions on completion or the error message on failure.
|
|
1258
|
+
validation:
|
|
1259
|
+
format: url
|
|
1260
|
+
notify:
|
|
1261
|
+
type: optional<boolean>
|
|
1262
|
+
docs: >-
|
|
1263
|
+
Whether to send an email notification to the user upon job
|
|
1264
|
+
completion/failure.
|
|
1265
|
+
CustomModel:
|
|
1266
|
+
discriminated: false
|
|
1267
|
+
union:
|
|
1268
|
+
- CustomModelId
|
|
1269
|
+
- CustomModelVersionId
|
|
1270
|
+
CustomModelId:
|
|
1271
|
+
properties:
|
|
1272
|
+
id: string
|
|
1273
|
+
CustomModelVersionId:
|
|
1274
|
+
properties:
|
|
1275
|
+
version_id: string
|
|
1276
|
+
ToxicityScore:
|
|
1277
|
+
properties:
|
|
1278
|
+
name:
|
|
1279
|
+
type: string
|
|
1280
|
+
docs: Category of toxicity.
|
|
1281
|
+
score:
|
|
1282
|
+
type: string
|
|
1283
|
+
docs: Prediction for this category of toxicity
|
|
1284
|
+
TrainingBaseRequest:
|
|
1285
|
+
properties:
|
|
1286
|
+
custom_model: CustomModelRequest
|
|
1287
|
+
dataset: Dataset
|
|
1288
|
+
target_feature:
|
|
1289
|
+
type: optional<string>
|
|
1290
|
+
default: label
|
|
1291
|
+
task: optional<Task>
|
|
1292
|
+
evaluation: optional<EvaluationArgs>
|
|
1293
|
+
alternatives: optional<list<Alternative>>
|
|
1294
|
+
callback_url:
|
|
1295
|
+
type: optional<string>
|
|
1296
|
+
validation:
|
|
1297
|
+
format: url
|
|
1298
|
+
notify: optional<boolean>
|
|
1299
|
+
TrainingCustomModel:
|
|
1300
|
+
properties:
|
|
1301
|
+
id: string
|
|
1302
|
+
version_id: optional<string>
|
|
1303
|
+
Transcription:
|
|
1304
|
+
properties:
|
|
1305
|
+
language: optional<Bcp47Tag>
|
|
1306
|
+
identify_speakers:
|
|
1307
|
+
type: optional<boolean>
|
|
1308
|
+
docs: >-
|
|
1309
|
+
Whether to return identifiers for speakers over time. If `true`,
|
|
1310
|
+
unique identifiers will be assigned to spoken words to differentiate
|
|
1311
|
+
different speakers. If `false`, all speakers will be tagged with an
|
|
1312
|
+
`unknown` ID.
|
|
1313
|
+
confidence_threshold:
|
|
1314
|
+
type: optional<double>
|
|
1315
|
+
docs: >-
|
|
1316
|
+
Transcript confidence threshold. Transcripts generated with a
|
|
1317
|
+
confidence less than this threshold will be considered invalid and not
|
|
1318
|
+
used as an input for model inference.
|
|
1319
|
+
default: 0.5
|
|
1320
|
+
validation:
|
|
1321
|
+
min: 0
|
|
1322
|
+
max: 1
|
|
1323
|
+
TranscriptionMetadata:
|
|
1324
|
+
docs: Transcription metadata for your media file.
|
|
1325
|
+
properties:
|
|
1326
|
+
confidence:
|
|
1327
|
+
type: double
|
|
1328
|
+
docs: >-
|
|
1329
|
+
Value between `0.0` and `1.0` indicating our transcription model's
|
|
1330
|
+
relative confidence in the transcription of your media file.
|
|
1331
|
+
detected_language: optional<Bcp47Tag>
|
|
1332
|
+
Type:
|
|
1333
|
+
enum:
|
|
1334
|
+
- EMBEDDING_GENERATION
|
|
1335
|
+
- INFERENCE
|
|
1336
|
+
- TL_INFERENCE
|
|
1337
|
+
- TRAINING
|
|
1338
|
+
Unconfigurable:
|
|
1339
|
+
docs: >-
|
|
1340
|
+
To include predictions for this model type, set this field to `{}`. It is
|
|
1341
|
+
currently not configurable further.
|
|
1342
|
+
type: map<string, unknown>
|
|
1343
|
+
UnionJob:
|
|
1344
|
+
discriminant: type
|
|
1345
|
+
base-properties: {}
|
|
1346
|
+
union:
|
|
1347
|
+
EMBEDDING_GENERATION: UnionJobJobEmbeddingGeneration
|
|
1348
|
+
INFERENCE: UnionJobJobInference
|
|
1349
|
+
TL_INFERENCE: UnionJobJobTlInference
|
|
1350
|
+
TRAINING: UnionJobJobTraining
|
|
1351
|
+
UnionJobJobEmbeddingGeneration:
|
|
1352
|
+
properties: {}
|
|
1353
|
+
extends:
|
|
1354
|
+
- JobEmbeddingGeneration
|
|
1355
|
+
UnionJobJobInference:
|
|
1356
|
+
properties: {}
|
|
1357
|
+
extends:
|
|
1358
|
+
- JobInference
|
|
1359
|
+
UnionJobJobTlInference:
|
|
1360
|
+
properties: {}
|
|
1361
|
+
extends:
|
|
1362
|
+
- JobTlInference
|
|
1363
|
+
UnionJobJobTraining:
|
|
1364
|
+
properties: {}
|
|
1365
|
+
extends:
|
|
1366
|
+
- JobTraining
|
|
1367
|
+
UnionPredictResult:
|
|
1368
|
+
discriminated: false
|
|
1369
|
+
union:
|
|
1370
|
+
- InferenceSourcePredictResult
|
|
1371
|
+
- TlInferenceSourcePredictResult
|
|
1372
|
+
ValidationArgs:
|
|
1373
|
+
properties:
|
|
1374
|
+
positive_label: optional<Target>
|
|
1375
|
+
When:
|
|
1376
|
+
enum:
|
|
1377
|
+
- created_before
|
|
1378
|
+
- created_after
|
|
1379
|
+
Window:
|
|
1380
|
+
properties:
|
|
1381
|
+
length:
|
|
1382
|
+
type: optional<double>
|
|
1383
|
+
docs: The length of the sliding window.
|
|
1384
|
+
default: 4
|
|
1385
|
+
validation:
|
|
1386
|
+
min: 0.5
|
|
1387
|
+
step:
|
|
1388
|
+
type: optional<double>
|
|
1389
|
+
docs: The step size of the sliding window.
|
|
1390
|
+
default: 1
|
|
1391
|
+
validation:
|
|
1392
|
+
min: 0.5
|