hume 0.13.5 → 0.13.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/.mock/definition/api.yml +12 -0
  2. package/.mock/definition/empathic-voice/__package__.yml +2976 -0
  3. package/.mock/definition/empathic-voice/chat.yml +175 -0
  4. package/.mock/definition/empathic-voice/chatGroups.yml +627 -0
  5. package/.mock/definition/empathic-voice/chatWebhooks.yml +30 -0
  6. package/.mock/definition/empathic-voice/chats.yml +506 -0
  7. package/.mock/definition/empathic-voice/configs.yml +842 -0
  8. package/.mock/definition/empathic-voice/prompts.yml +558 -0
  9. package/.mock/definition/empathic-voice/tools.yml +626 -0
  10. package/.mock/definition/expression-measurement/__package__.yml +1 -0
  11. package/.mock/definition/expression-measurement/batch/__package__.yml +1803 -0
  12. package/.mock/definition/expression-measurement/stream/__package__.yml +113 -0
  13. package/.mock/definition/expression-measurement/stream/stream.yml +438 -0
  14. package/.mock/definition/tts/__package__.yml +713 -0
  15. package/.mock/definition/tts/streamInput.yml +84 -0
  16. package/.mock/definition/tts/voices.yml +143 -0
  17. package/.mock/fern.config.json +4 -0
  18. package/Client.js +10 -3
  19. package/api/resources/empathicVoice/types/ReturnConfig.d.ts +2 -2
  20. package/api/resources/index.d.ts +1 -1
  21. package/api/resources/index.js +2 -2
  22. package/dist/Client.js +10 -3
  23. package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +2 -2
  24. package/dist/api/resources/index.d.ts +1 -1
  25. package/dist/api/resources/index.js +2 -2
  26. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +1 -1
  27. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +1 -1
  28. package/dist/serialization/resources/index.d.ts +1 -1
  29. package/dist/serialization/resources/index.js +2 -2
  30. package/dist/version.d.ts +1 -1
  31. package/dist/version.js +1 -1
  32. package/package.json +1 -1
  33. package/reference.md +702 -702
  34. package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +1 -1
  35. package/serialization/resources/empathicVoice/types/ReturnConfig.js +1 -1
  36. package/serialization/resources/index.d.ts +1 -1
  37. package/serialization/resources/index.js +2 -2
  38. package/version.d.ts +1 -1
  39. package/version.js +1 -1
@@ -0,0 +1,113 @@
1
+ types:
2
+ EmotionEmbeddingItem:
3
+ properties:
4
+ name:
5
+ type: optional<string>
6
+ docs: Name of the emotion being expressed.
7
+ score:
8
+ type: optional<double>
9
+ docs: Embedding value for the emotion being expressed.
10
+ source:
11
+ openapi: streaming-asyncapi.yml
12
+ EmotionEmbedding:
13
+ docs: A high-dimensional embedding in emotion space.
14
+ type: list<EmotionEmbeddingItem>
15
+ StreamBoundingBox:
16
+ docs: A bounding box around a face.
17
+ properties:
18
+ x:
19
+ type: optional<double>
20
+ docs: x-coordinate of bounding box top left corner.
21
+ validation:
22
+ min: 0
23
+ 'y':
24
+ type: optional<double>
25
+ docs: y-coordinate of bounding box top left corner.
26
+ validation:
27
+ min: 0
28
+ w:
29
+ type: optional<double>
30
+ docs: Bounding box width.
31
+ validation:
32
+ min: 0
33
+ h:
34
+ type: optional<double>
35
+ docs: Bounding box height.
36
+ validation:
37
+ min: 0
38
+ source:
39
+ openapi: streaming-asyncapi.yml
40
+ TimeRange:
41
+ docs: A time range with a beginning and end, measured in seconds.
42
+ properties:
43
+ begin:
44
+ type: optional<double>
45
+ docs: Beginning of time range in seconds.
46
+ validation:
47
+ min: 0
48
+ end:
49
+ type: optional<double>
50
+ docs: End of time range in seconds.
51
+ validation:
52
+ min: 0
53
+ source:
54
+ openapi: streaming-asyncapi.yml
55
+ TextPosition:
56
+ docs: >
57
+ Position of a segment of text within a larger document, measured in
58
+ characters. Uses zero-based indexing. The beginning index is inclusive and
59
+ the end index is exclusive.
60
+ properties:
61
+ begin:
62
+ type: optional<double>
63
+ docs: The index of the first character in the text segment, inclusive.
64
+ validation:
65
+ min: 0
66
+ end:
67
+ type: optional<double>
68
+ docs: The index of the last character in the text segment, exclusive.
69
+ validation:
70
+ min: 0
71
+ source:
72
+ openapi: streaming-asyncapi.yml
73
+ SentimentItem:
74
+ properties:
75
+ name:
76
+ type: optional<string>
77
+ docs: Level of sentiment, ranging from 1 (negative) to 9 (positive)
78
+ score:
79
+ type: optional<double>
80
+ docs: Prediction for this level of sentiment
81
+ source:
82
+ openapi: streaming-asyncapi.yml
83
+ Sentiment:
84
+ docs: >-
85
+ Sentiment predictions returned as a distribution. This model predicts the
86
+ probability that a given text could be interpreted as having each
87
+ sentiment level from 1 (negative) to 9 (positive).
88
+
89
+
90
+ Compared to returning one estimate of sentiment, this enables a more
91
+ nuanced analysis of a text's meaning. For example, a text with very
92
+ neutral sentiment would have an average rating of 5. But also a text that
93
+ could be interpreted as having very positive sentiment or very negative
94
+ sentiment would also have an average rating of 5. The average sentiment is
95
+ less informative than the distribution over sentiment, so this API returns
96
+ a value for each sentiment level.
97
+ type: list<SentimentItem>
98
+ ToxicityItem:
99
+ properties:
100
+ name:
101
+ type: optional<string>
102
+ docs: Category of toxicity.
103
+ score:
104
+ type: optional<double>
105
+ docs: Prediction for this category of toxicity
106
+ source:
107
+ openapi: streaming-asyncapi.yml
108
+ Toxicity:
109
+ docs: >-
110
+ Toxicity predictions returned as probabilities that the text can be
111
+ classified into the following categories: toxic, severe_toxic, obscene,
112
+ threat, insult, and identity_hate.
113
+ type: list<ToxicityItem>
@@ -0,0 +1,438 @@
1
+ channel:
2
+ path: /models
3
+ url: prod
4
+ auth: false
5
+ headers:
6
+ X-Hume-Api-Key:
7
+ type: string
8
+ name: humeApiKey
9
+ messages:
10
+ publish:
11
+ origin: client
12
+ body:
13
+ type: StreamModelsEndpointPayload
14
+ docs: Models endpoint payload
15
+ subscribe:
16
+ origin: server
17
+ body: SubscribeEvent
18
+ examples:
19
+ - messages:
20
+ - type: publish
21
+ body: {}
22
+ - type: subscribe
23
+ body: {}
24
+ types:
25
+ StreamFace:
26
+ docs: >
27
+ Configuration for the facial expression emotion model.
28
+
29
+
30
+ Note: Using the `reset_stream` parameter does not have any effect on face
31
+ identification. A single face identifier cache is maintained over a full
32
+ session whether `reset_stream` is used or not.
33
+ properties:
34
+ facs:
35
+ type: optional<map<string, unknown>>
36
+ docs: >-
37
+ Configuration for FACS predictions. If missing or null, no FACS
38
+ predictions will be generated.
39
+ descriptions:
40
+ type: optional<map<string, unknown>>
41
+ docs: >-
42
+ Configuration for Descriptions predictions. If missing or null, no
43
+ Descriptions predictions will be generated.
44
+ identify_faces:
45
+ type: optional<boolean>
46
+ docs: >
47
+ Whether to return identifiers for faces across frames. If true, unique
48
+ identifiers will be assigned to face bounding boxes to differentiate
49
+ different faces. If false, all faces will be tagged with an "unknown"
50
+ ID.
51
+ default: false
52
+ fps_pred:
53
+ type: optional<double>
54
+ docs: >
55
+ Number of frames per second to process. Other frames will be omitted
56
+ from the response.
57
+ default: 3
58
+ prob_threshold:
59
+ type: optional<double>
60
+ docs: >
61
+ Face detection probability threshold. Faces detected with a
62
+ probability less than this threshold will be omitted from the
63
+ response.
64
+ default: 3
65
+ min_face_size:
66
+ type: optional<double>
67
+ docs: >
68
+ Minimum bounding box side length in pixels to treat as a face. Faces
69
+ detected with a bounding box side length in pixels less than this
70
+ threshold will be omitted from the response.
71
+ default: 3
72
+ source:
73
+ openapi: streaming-asyncapi.yml
74
+ inline: true
75
+ StreamLanguage:
76
+ docs: Configuration for the language emotion model.
77
+ properties:
78
+ sentiment:
79
+ type: optional<map<string, unknown>>
80
+ docs: >-
81
+ Configuration for sentiment predictions. If missing or null, no
82
+ sentiment predictions will be generated.
83
+ toxicity:
84
+ type: optional<map<string, unknown>>
85
+ docs: >-
86
+ Configuration for toxicity predictions. If missing or null, no
87
+ toxicity predictions will be generated.
88
+ granularity:
89
+ type: optional<string>
90
+ docs: >-
91
+ The granularity at which to generate predictions. Values are `word`,
92
+ `sentence`, `utterance`, or `passage`. To get a single prediction for
93
+ the entire text of your streaming payload use `passage`. Default value
94
+ is `word`.
95
+ source:
96
+ openapi: streaming-asyncapi.yml
97
+ inline: true
98
+ Config:
99
+ docs: >
100
+ Configuration used to specify which models should be used and with what
101
+ settings.
102
+ properties:
103
+ burst:
104
+ type: optional<map<string, unknown>>
105
+ docs: |
106
+ Configuration for the vocal burst emotion model.
107
+
108
+ Note: Model configuration is not currently available in streaming.
109
+
110
+ Please use the default configuration by passing an empty object `{}`.
111
+ face:
112
+ type: optional<StreamFace>
113
+ docs: >
114
+ Configuration for the facial expression emotion model.
115
+
116
+
117
+ Note: Using the `reset_stream` parameter does not have any effect on
118
+ face identification. A single face identifier cache is maintained over
119
+ a full session whether `reset_stream` is used or not.
120
+ facemesh:
121
+ type: optional<map<string, unknown>>
122
+ docs: |
123
+ Configuration for the facemesh emotion model.
124
+
125
+ Note: Model configuration is not currently available in streaming.
126
+
127
+ Please use the default configuration by passing an empty object `{}`.
128
+ language:
129
+ type: optional<StreamLanguage>
130
+ docs: Configuration for the language emotion model.
131
+ prosody:
132
+ type: optional<map<string, unknown>>
133
+ docs: |
134
+ Configuration for the speech prosody emotion model.
135
+
136
+ Note: Model configuration is not currently available in streaming.
137
+
138
+ Please use the default configuration by passing an empty object `{}`.
139
+ source:
140
+ openapi: streaming-asyncapi.yml
141
+ inline: true
142
+ StreamModelsEndpointPayload:
143
+ docs: Models endpoint payload
144
+ properties:
145
+ data:
146
+ type: optional<string>
147
+ models:
148
+ type: optional<Config>
149
+ docs: >
150
+ Configuration used to specify which models should be used and with
151
+ what settings.
152
+ stream_window_ms:
153
+ type: optional<double>
154
+ docs: >
155
+ Length in milliseconds of streaming sliding window.
156
+
157
+
158
+ Extending the length of this window will prepend media context from
159
+ past payloads into the current payload.
160
+
161
+
162
+ For example, if on the first payload you send 500ms of data and on the
163
+ second payload you send an additional 500ms of data, a window of at
164
+ least 1000ms will allow the model to process all 1000ms of stream
165
+ data.
166
+
167
+
168
+ A window of 600ms would append the full 500ms of the second payload to
169
+ the last 100ms of the first payload.
170
+
171
+
172
+ Note: This feature is currently only supported for audio data and
173
+ audio models. For other file types and models this parameter will be
174
+ ignored.
175
+ default: 5000
176
+ validation:
177
+ min: 500
178
+ max: 10000
179
+ reset_stream:
180
+ type: optional<boolean>
181
+ docs: >
182
+ Whether to reset the streaming sliding window before processing the
183
+ current payload.
184
+
185
+
186
+ If this parameter is set to `true` then past context will be deleted
187
+ before processing the current payload.
188
+
189
+
190
+ Use reset_stream when one audio file is done being processed and you
191
+ do not want context to leak across files.
192
+ default: false
193
+ raw_text:
194
+ type: optional<boolean>
195
+ docs: >
196
+ Set to `true` to enable the data parameter to be parsed as raw text
197
+ rather than base64 encoded bytes.
198
+
199
+ This parameter is useful if you want to send text to be processed by
200
+ the language model, but it cannot be used with other file types like
201
+ audio, image, or video.
202
+ default: false
203
+ job_details:
204
+ type: optional<boolean>
205
+ docs: >
206
+ Set to `true` to get details about the job.
207
+
208
+
209
+ This parameter can be set in the same payload as data or it can be set
210
+ without data and models configuration to get the job details between
211
+ payloads.
212
+
213
+
214
+ This parameter is useful to get the unique job ID.
215
+ default: false
216
+ payload_id:
217
+ type: optional<string>
218
+ docs: >
219
+ Pass an arbitrary string as the payload ID and get it back at the top
220
+ level of the socket response.
221
+
222
+
223
+ This can be useful if you have multiple requests running
224
+ asynchronously and want to disambiguate responses as they are
225
+ received.
226
+ source:
227
+ openapi: streaming-asyncapi.yml
228
+ StreamModelPredictionsJobDetails:
229
+ docs: >
230
+ If the job_details flag was set in the request, details about the current
231
+ streaming job will be returned in the response body.
232
+ properties:
233
+ job_id:
234
+ type: optional<string>
235
+ docs: ID of the current streaming job.
236
+ source:
237
+ openapi: streaming-asyncapi.yml
238
+ inline: true
239
+ StreamModelPredictionsBurstPredictionsItem:
240
+ properties:
241
+ time: optional<streamRoot.TimeRange>
242
+ emotions: optional<streamRoot.EmotionEmbedding>
243
+ source:
244
+ openapi: streaming-asyncapi.yml
245
+ inline: true
246
+ StreamModelPredictionsBurst:
247
+ docs: Response for the vocal burst emotion model.
248
+ properties:
249
+ predictions: optional<list<StreamModelPredictionsBurstPredictionsItem>>
250
+ source:
251
+ openapi: streaming-asyncapi.yml
252
+ inline: true
253
+ StreamModelPredictionsFacePredictionsItem:
254
+ properties:
255
+ frame:
256
+ type: optional<double>
257
+ docs: Frame number
258
+ time:
259
+ type: optional<double>
260
+ docs: Time in seconds when face detection occurred.
261
+ bbox: optional<streamRoot.StreamBoundingBox>
262
+ prob:
263
+ type: optional<double>
264
+ docs: The predicted probability that a detected face was actually a face.
265
+ face_id:
266
+ type: optional<string>
267
+ docs: >-
268
+ Identifier for a face. Not that this defaults to `unknown` unless face
269
+ identification is enabled in the face model configuration.
270
+ emotions: optional<streamRoot.EmotionEmbedding>
271
+ facs: optional<streamRoot.EmotionEmbedding>
272
+ descriptions: optional<streamRoot.EmotionEmbedding>
273
+ source:
274
+ openapi: streaming-asyncapi.yml
275
+ inline: true
276
+ StreamModelPredictionsFace:
277
+ docs: Response for the facial expression emotion model.
278
+ properties:
279
+ predictions: optional<list<StreamModelPredictionsFacePredictionsItem>>
280
+ source:
281
+ openapi: streaming-asyncapi.yml
282
+ inline: true
283
+ StreamModelPredictionsFacemeshPredictionsItem:
284
+ properties:
285
+ emotions: optional<streamRoot.EmotionEmbedding>
286
+ source:
287
+ openapi: streaming-asyncapi.yml
288
+ inline: true
289
+ StreamModelPredictionsFacemesh:
290
+ docs: Response for the facemesh emotion model.
291
+ properties:
292
+ predictions: optional<list<StreamModelPredictionsFacemeshPredictionsItem>>
293
+ source:
294
+ openapi: streaming-asyncapi.yml
295
+ inline: true
296
+ StreamModelPredictionsLanguagePredictionsItem:
297
+ properties:
298
+ text:
299
+ type: optional<string>
300
+ docs: A segment of text (like a word or a sentence).
301
+ position: optional<streamRoot.TextPosition>
302
+ emotions: optional<streamRoot.EmotionEmbedding>
303
+ sentiment: optional<streamRoot.Sentiment>
304
+ toxicity: optional<streamRoot.Toxicity>
305
+ source:
306
+ openapi: streaming-asyncapi.yml
307
+ inline: true
308
+ StreamModelPredictionsLanguage:
309
+ docs: Response for the language emotion model.
310
+ properties:
311
+ predictions: optional<list<StreamModelPredictionsLanguagePredictionsItem>>
312
+ source:
313
+ openapi: streaming-asyncapi.yml
314
+ inline: true
315
+ StreamModelPredictionsProsodyPredictionsItem:
316
+ properties:
317
+ time: optional<streamRoot.TimeRange>
318
+ emotions: optional<streamRoot.EmotionEmbedding>
319
+ source:
320
+ openapi: streaming-asyncapi.yml
321
+ inline: true
322
+ StreamModelPredictionsProsody:
323
+ docs: Response for the speech prosody emotion model.
324
+ properties:
325
+ predictions: optional<list<StreamModelPredictionsProsodyPredictionsItem>>
326
+ source:
327
+ openapi: streaming-asyncapi.yml
328
+ inline: true
329
+ StreamModelPredictions:
330
+ docs: Model predictions
331
+ properties:
332
+ payload_id:
333
+ type: optional<string>
334
+ docs: >
335
+ If a payload ID was passed in the request, the same payload ID will be
336
+ sent back in the response body.
337
+ job_details:
338
+ type: optional<StreamModelPredictionsJobDetails>
339
+ docs: >
340
+ If the job_details flag was set in the request, details about the
341
+ current streaming job will be returned in the response body.
342
+ burst:
343
+ type: optional<StreamModelPredictionsBurst>
344
+ docs: Response for the vocal burst emotion model.
345
+ face:
346
+ type: optional<StreamModelPredictionsFace>
347
+ docs: Response for the facial expression emotion model.
348
+ facemesh:
349
+ type: optional<StreamModelPredictionsFacemesh>
350
+ docs: Response for the facemesh emotion model.
351
+ language:
352
+ type: optional<StreamModelPredictionsLanguage>
353
+ docs: Response for the language emotion model.
354
+ prosody:
355
+ type: optional<StreamModelPredictionsProsody>
356
+ docs: Response for the speech prosody emotion model.
357
+ source:
358
+ openapi: streaming-asyncapi.yml
359
+ inline: true
360
+ JobDetails:
361
+ docs: >
362
+ If the job_details flag was set in the request, details about the current
363
+ streaming job will be returned in the response body.
364
+ properties:
365
+ job_id:
366
+ type: optional<string>
367
+ docs: ID of the current streaming job.
368
+ source:
369
+ openapi: streaming-asyncapi.yml
370
+ inline: true
371
+ StreamErrorMessage:
372
+ docs: Error message
373
+ properties:
374
+ error:
375
+ type: optional<string>
376
+ docs: Error message text.
377
+ code:
378
+ type: optional<string>
379
+ docs: Unique identifier for the error.
380
+ payload_id:
381
+ type: optional<string>
382
+ docs: >
383
+ If a payload ID was passed in the request, the same payload ID will be
384
+ sent back in the response body.
385
+ job_details:
386
+ type: optional<JobDetails>
387
+ docs: >
388
+ If the job_details flag was set in the request, details about the
389
+ current streaming job will be returned in the response body.
390
+ source:
391
+ openapi: streaming-asyncapi.yml
392
+ inline: true
393
+ StreamWarningMessageJobDetails:
394
+ docs: >
395
+ If the job_details flag was set in the request, details about the current
396
+ streaming job will be returned in the response body.
397
+ properties:
398
+ job_id:
399
+ type: optional<string>
400
+ docs: ID of the current streaming job.
401
+ source:
402
+ openapi: streaming-asyncapi.yml
403
+ inline: true
404
+ StreamWarningMessage:
405
+ docs: Warning message
406
+ properties:
407
+ warning:
408
+ type: optional<string>
409
+ docs: Warning message text.
410
+ code:
411
+ type: optional<string>
412
+ docs: Unique identifier for the error.
413
+ payload_id:
414
+ type: optional<string>
415
+ docs: >
416
+ If a payload ID was passed in the request, the same payload ID will be
417
+ sent back in the response body.
418
+ job_details:
419
+ type: optional<StreamWarningMessageJobDetails>
420
+ docs: >
421
+ If the job_details flag was set in the request, details about the
422
+ current streaming job will be returned in the response body.
423
+ source:
424
+ openapi: streaming-asyncapi.yml
425
+ inline: true
426
+ SubscribeEvent:
427
+ discriminated: false
428
+ union:
429
+ - type: StreamModelPredictions
430
+ docs: Model predictions
431
+ - type: StreamErrorMessage
432
+ docs: Error message
433
+ - type: StreamWarningMessage
434
+ docs: Warning message
435
+ source:
436
+ openapi: streaming-asyncapi.yml
437
+ imports:
438
+ streamRoot: __package__.yml