hume 0.9.18 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/.mock/definition/empathic-voice/chat.yml +16 -16
  2. package/.mock/definition/empathic-voice/chatGroups.yml +6 -0
  3. package/.mock/definition/empathic-voice/configs.yml +6 -0
  4. package/.mock/definition/empathic-voice/customVoices.yml +3 -0
  5. package/.mock/definition/empathic-voice/tools.yml +3 -0
  6. package/.mock/definition/expression-measurement/stream/stream.yml +206 -206
  7. package/.mock/definition/tts/voices.yml +4 -2
  8. package/.mock/fern.config.json +1 -1
  9. package/api/resources/empathicVoice/resources/chat/types/index.d.ts +1 -1
  10. package/api/resources/empathicVoice/resources/chat/types/index.js +1 -1
  11. package/api/resources/empathicVoice/resources/chatGroups/client/Client.d.ts +2 -2
  12. package/api/resources/empathicVoice/resources/chatGroups/client/Client.js +139 -115
  13. package/api/resources/empathicVoice/resources/chats/client/Client.js +3 -3
  14. package/api/resources/empathicVoice/resources/configs/client/Client.d.ts +2 -2
  15. package/api/resources/empathicVoice/resources/configs/client/Client.js +144 -120
  16. package/api/resources/empathicVoice/resources/customVoices/client/Client.d.ts +1 -1
  17. package/api/resources/empathicVoice/resources/customVoices/client/Client.js +72 -60
  18. package/api/resources/empathicVoice/resources/prompts/client/Client.js +9 -9
  19. package/api/resources/empathicVoice/resources/tools/client/Client.d.ts +1 -1
  20. package/api/resources/empathicVoice/resources/tools/client/Client.js +75 -63
  21. package/api/resources/expressionMeasurement/resources/batch/client/Client.js +6 -6
  22. package/api/resources/expressionMeasurement/resources/stream/resources/stream/types/index.d.ts +4 -4
  23. package/api/resources/expressionMeasurement/resources/stream/resources/stream/types/index.js +4 -4
  24. package/api/resources/tts/client/Client.js +4 -4
  25. package/api/resources/tts/resources/voices/client/Client.d.ts +1 -1
  26. package/api/resources/tts/resources/voices/client/Client.js +72 -60
  27. package/dist/api/resources/empathicVoice/resources/chat/types/index.d.ts +1 -1
  28. package/dist/api/resources/empathicVoice/resources/chat/types/index.js +1 -1
  29. package/dist/api/resources/empathicVoice/resources/chatGroups/client/Client.d.ts +2 -2
  30. package/dist/api/resources/empathicVoice/resources/chatGroups/client/Client.js +139 -115
  31. package/dist/api/resources/empathicVoice/resources/chats/client/Client.js +3 -3
  32. package/dist/api/resources/empathicVoice/resources/configs/client/Client.d.ts +2 -2
  33. package/dist/api/resources/empathicVoice/resources/configs/client/Client.js +144 -120
  34. package/dist/api/resources/empathicVoice/resources/customVoices/client/Client.d.ts +1 -1
  35. package/dist/api/resources/empathicVoice/resources/customVoices/client/Client.js +72 -60
  36. package/dist/api/resources/empathicVoice/resources/prompts/client/Client.js +9 -9
  37. package/dist/api/resources/empathicVoice/resources/tools/client/Client.d.ts +1 -1
  38. package/dist/api/resources/empathicVoice/resources/tools/client/Client.js +75 -63
  39. package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.js +6 -6
  40. package/dist/api/resources/expressionMeasurement/resources/stream/resources/stream/types/index.d.ts +4 -4
  41. package/dist/api/resources/expressionMeasurement/resources/stream/resources/stream/types/index.js +4 -4
  42. package/dist/api/resources/tts/client/Client.js +4 -4
  43. package/dist/api/resources/tts/resources/voices/client/Client.d.ts +1 -1
  44. package/dist/api/resources/tts/resources/voices/client/Client.js +72 -60
  45. package/dist/serialization/resources/empathicVoice/resources/chat/types/index.d.ts +1 -1
  46. package/dist/serialization/resources/empathicVoice/resources/chat/types/index.js +1 -1
  47. package/dist/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/index.d.ts +4 -4
  48. package/dist/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/index.js +4 -4
  49. package/dist/version.d.ts +1 -1
  50. package/dist/version.js +1 -1
  51. package/package.json +1 -1
  52. package/reference.md +91 -14
  53. package/serialization/resources/empathicVoice/resources/chat/types/index.d.ts +1 -1
  54. package/serialization/resources/empathicVoice/resources/chat/types/index.js +1 -1
  55. package/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/index.d.ts +4 -4
  56. package/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/index.js +4 -4
  57. package/version.d.ts +1 -1
  58. package/version.js +1 -1
@@ -113,12 +113,12 @@ channel:
113
113
  For more details, refer to the [Authentication Strategies
114
114
  Guide](/docs/introduction/api-key#authentication-strategies).
115
115
  messages:
116
- subscribe:
117
- origin: server
118
- body: SubscribeEvent
119
116
  publish:
120
117
  origin: client
121
118
  body: PublishEvent
119
+ subscribe:
120
+ origin: server
121
+ body: SubscribeEvent
122
122
  examples:
123
123
  - messages:
124
124
  - type: publish
@@ -131,6 +131,19 @@ channel:
131
131
  imports:
132
132
  root: __package__.yml
133
133
  types:
134
+ PublishEvent:
135
+ discriminated: false
136
+ union:
137
+ - type: root.AudioInput
138
+ - type: root.SessionSettings
139
+ - type: root.UserInput
140
+ - type: root.AssistantInput
141
+ - type: root.ToolResponseMessage
142
+ - type: root.ToolErrorMessage
143
+ - type: root.PauseAssistantMessage
144
+ - type: root.ResumeAssistantMessage
145
+ source:
146
+ openapi: evi-asyncapi.json
134
147
  SubscribeEvent:
135
148
  discriminated: false
136
149
  union:
@@ -146,16 +159,3 @@ types:
146
159
  - type: root.ToolErrorMessage
147
160
  source:
148
161
  openapi: evi-asyncapi.json
149
- PublishEvent:
150
- discriminated: false
151
- union:
152
- - type: root.AudioInput
153
- - type: root.SessionSettings
154
- - type: root.UserInput
155
- - type: root.AssistantInput
156
- - type: root.ToolResponseMessage
157
- - type: root.ToolErrorMessage
158
- - type: root.PauseAssistantMessage
159
- - type: root.ResumeAssistantMessage
160
- source:
161
- openapi: evi-asyncapi.json
@@ -9,6 +9,9 @@ service:
9
9
  method: GET
10
10
  auth: false
11
11
  docs: Fetches a paginated list of **Chat Groups**.
12
+ pagination:
13
+ offset: $request.page_number
14
+ results: $response.chat_groups_page
12
15
  source:
13
16
  openapi: evi-openapi.json
14
17
  display-name: List chat_groups
@@ -165,6 +168,9 @@ service:
165
168
  docs: >-
166
169
  Fetches a paginated list of **Chat** events associated with a **Chat
167
170
  Group**.
171
+ pagination:
172
+ offset: $request.page_number
173
+ results: $response.events_page
168
174
  source:
169
175
  openapi: evi-openapi.json
170
176
  path-parameters:
@@ -15,6 +15,9 @@ service:
15
15
  For more details on configuration options and how to configure EVI, see
16
16
  our [configuration
17
17
  guide](/docs/empathic-voice-interface-evi/configuration).
18
+ pagination:
19
+ offset: $request.page_number
20
+ results: $response.configs_page
18
21
  source:
19
22
  openapi: evi-openapi.json
20
23
  display-name: List configs
@@ -315,6 +318,9 @@ service:
315
318
  For more details on configuration options and how to configure EVI, see
316
319
  our [configuration
317
320
  guide](/docs/empathic-voice-interface-evi/configuration).
321
+ pagination:
322
+ offset: $request.page_number
323
+ results: $response.configs_page
318
324
  source:
319
325
  openapi: evi-openapi.json
320
326
  path-parameters:
@@ -15,6 +15,9 @@ service:
15
15
  Refer to our [voices
16
16
  guide](/docs/empathic-voice-interface-evi/configuration/voices) for
17
17
  details on creating a custom voice.
18
+ pagination:
19
+ offset: $request.page_number
20
+ results: $response.custom_voices_page
18
21
  source:
19
22
  openapi: evi-openapi.json
20
23
  display-name: List custom voices
@@ -210,6 +210,9 @@ service:
210
210
  use](/docs/empathic-voice-interface-evi/features/tool-use#function-calling)
211
211
  guide for comprehensive instructions on defining and integrating tools
212
212
  into EVI.
213
+ pagination:
214
+ offset: $request.page_number
215
+ results: $response.tools_page
213
216
  source:
214
217
  openapi: evi-openapi.json
215
218
  path-parameters:
@@ -7,14 +7,14 @@ channel:
7
7
  type: string
8
8
  name: humeApiKey
9
9
  messages:
10
- subscribe:
11
- origin: server
12
- body: SubscribeEvent
13
10
  publish:
14
11
  origin: client
15
12
  body:
16
13
  type: StreamModelsEndpointPayload
17
14
  docs: Models endpoint payload
15
+ subscribe:
16
+ origin: server
17
+ body: SubscribeEvent
18
18
  examples:
19
19
  - messages:
20
20
  - type: publish
@@ -22,6 +22,209 @@ channel:
22
22
  - type: subscribe
23
23
  body: {}
24
24
  types:
25
+ StreamFace:
26
+ docs: >
27
+ Configuration for the facial expression emotion model.
28
+
29
+
30
+ Note: Using the `reset_stream` parameter does not have any effect on face
31
+ identification. A single face identifier cache is maintained over a full
32
+ session whether `reset_stream` is used or not.
33
+ properties:
34
+ facs:
35
+ type: optional<map<string, unknown>>
36
+ docs: >-
37
+ Configuration for FACS predictions. If missing or null, no FACS
38
+ predictions will be generated.
39
+ descriptions:
40
+ type: optional<map<string, unknown>>
41
+ docs: >-
42
+ Configuration for Descriptions predictions. If missing or null, no
43
+ Descriptions predictions will be generated.
44
+ identify_faces:
45
+ type: optional<boolean>
46
+ docs: >
47
+ Whether to return identifiers for faces across frames. If true, unique
48
+ identifiers will be assigned to face bounding boxes to differentiate
49
+ different faces. If false, all faces will be tagged with an "unknown"
50
+ ID.
51
+ default: false
52
+ fps_pred:
53
+ type: optional<double>
54
+ docs: >
55
+ Number of frames per second to process. Other frames will be omitted
56
+ from the response.
57
+ default: 3
58
+ prob_threshold:
59
+ type: optional<double>
60
+ docs: >
61
+ Face detection probability threshold. Faces detected with a
62
+ probability less than this threshold will be omitted from the
63
+ response.
64
+ default: 3
65
+ min_face_size:
66
+ type: optional<double>
67
+ docs: >
68
+ Minimum bounding box side length in pixels to treat as a face. Faces
69
+ detected with a bounding box side length in pixels less than this
70
+ threshold will be omitted from the response.
71
+ default: 3
72
+ source:
73
+ openapi: streaming-asyncapi.yml
74
+ inline: true
75
+ StreamLanguage:
76
+ docs: Configuration for the language emotion model.
77
+ properties:
78
+ sentiment:
79
+ type: optional<map<string, unknown>>
80
+ docs: >-
81
+ Configuration for sentiment predictions. If missing or null, no
82
+ sentiment predictions will be generated.
83
+ toxicity:
84
+ type: optional<map<string, unknown>>
85
+ docs: >-
86
+ Configuration for toxicity predictions. If missing or null, no
87
+ toxicity predictions will be generated.
88
+ granularity:
89
+ type: optional<string>
90
+ docs: >-
91
+ The granularity at which to generate predictions. Values are `word`,
92
+ `sentence`, `utterance`, or `passage`. To get a single prediction for
93
+ the entire text of your streaming payload use `passage`. Default value
94
+ is `word`.
95
+ source:
96
+ openapi: streaming-asyncapi.yml
97
+ inline: true
98
+ Config:
99
+ docs: >
100
+ Configuration used to specify which models should be used and with what
101
+ settings.
102
+ properties:
103
+ burst:
104
+ type: optional<map<string, unknown>>
105
+ docs: |
106
+ Configuration for the vocal burst emotion model.
107
+
108
+ Note: Model configuration is not currently available in streaming.
109
+
110
+ Please use the default configuration by passing an empty object `{}`.
111
+ face:
112
+ type: optional<StreamFace>
113
+ docs: >
114
+ Configuration for the facial expression emotion model.
115
+
116
+
117
+ Note: Using the `reset_stream` parameter does not have any effect on
118
+ face identification. A single face identifier cache is maintained over
119
+ a full session whether `reset_stream` is used or not.
120
+ facemesh:
121
+ type: optional<map<string, unknown>>
122
+ docs: |
123
+ Configuration for the facemesh emotion model.
124
+
125
+ Note: Model configuration is not currently available in streaming.
126
+
127
+ Please use the default configuration by passing an empty object `{}`.
128
+ language:
129
+ type: optional<StreamLanguage>
130
+ docs: Configuration for the language emotion model.
131
+ prosody:
132
+ type: optional<map<string, unknown>>
133
+ docs: |
134
+ Configuration for the speech prosody emotion model.
135
+
136
+ Note: Model configuration is not currently available in streaming.
137
+
138
+ Please use the default configuration by passing an empty object `{}`.
139
+ source:
140
+ openapi: streaming-asyncapi.yml
141
+ inline: true
142
+ StreamModelsEndpointPayload:
143
+ docs: Models endpoint payload
144
+ properties:
145
+ data:
146
+ type: optional<string>
147
+ models:
148
+ type: optional<Config>
149
+ docs: >
150
+ Configuration used to specify which models should be used and with
151
+ what settings.
152
+ stream_window_ms:
153
+ type: optional<double>
154
+ docs: >
155
+ Length in milliseconds of streaming sliding window.
156
+
157
+
158
+ Extending the length of this window will prepend media context from
159
+ past payloads into the current payload.
160
+
161
+
162
+ For example, if on the first payload you send 500ms of data and on the
163
+ second payload you send an additional 500ms of data, a window of at
164
+ least 1000ms will allow the model to process all 1000ms of stream
165
+ data.
166
+
167
+
168
+ A window of 600ms would append the full 500ms of the second payload to
169
+ the last 100ms of the first payload.
170
+
171
+
172
+ Note: This feature is currently only supported for audio data and
173
+ audio models. For other file types and models this parameter will be
174
+ ignored.
175
+ default: 5000
176
+ validation:
177
+ min: 500
178
+ max: 10000
179
+ reset_stream:
180
+ type: optional<boolean>
181
+ docs: >
182
+ Whether to reset the streaming sliding window before processing the
183
+ current payload.
184
+
185
+
186
+ If this parameter is set to `true` then past context will be deleted
187
+ before processing the current payload.
188
+
189
+
190
+ Use reset_stream when one audio file is done being processed and you
191
+ do not want context to leak across files.
192
+ default: false
193
+ raw_text:
194
+ type: optional<boolean>
195
+ docs: >
196
+ Set to `true` to enable the data parameter to be parsed as raw text
197
+ rather than base64 encoded bytes.
198
+
199
+ This parameter is useful if you want to send text to be processed by
200
+ the language model, but it cannot be used with other file types like
201
+ audio, image, or video.
202
+ default: false
203
+ job_details:
204
+ type: optional<boolean>
205
+ docs: >
206
+ Set to `true` to get details about the job.
207
+
208
+
209
+ This parameter can be set in the same payload as data or it can be set
210
+ without data and models configuration to get the job details between
211
+ payloads.
212
+
213
+
214
+ This parameter is useful to get the unique job ID.
215
+ default: false
216
+ payload_id:
217
+ type: optional<string>
218
+ docs: >
219
+ Pass an arbitrary string as the payload ID and get it back at the top
220
+ level of the socket response.
221
+
222
+
223
+ This can be useful if you have multiple requests running
224
+ asynchronously and want to disambiguate responses as they are
225
+ received.
226
+ source:
227
+ openapi: streaming-asyncapi.yml
25
228
  StreamModelPredictionsJobDetails:
26
229
  docs: >
27
230
  If the job_details flag was set in the request, details about the current
@@ -231,208 +434,5 @@ types:
231
434
  docs: Warning message
232
435
  source:
233
436
  openapi: streaming-asyncapi.yml
234
- StreamFace:
235
- docs: >
236
- Configuration for the facial expression emotion model.
237
-
238
-
239
- Note: Using the `reset_stream` parameter does not have any effect on face
240
- identification. A single face identifier cache is maintained over a full
241
- session whether `reset_stream` is used or not.
242
- properties:
243
- facs:
244
- type: optional<map<string, unknown>>
245
- docs: >-
246
- Configuration for FACS predictions. If missing or null, no FACS
247
- predictions will be generated.
248
- descriptions:
249
- type: optional<map<string, unknown>>
250
- docs: >-
251
- Configuration for Descriptions predictions. If missing or null, no
252
- Descriptions predictions will be generated.
253
- identify_faces:
254
- type: optional<boolean>
255
- docs: >
256
- Whether to return identifiers for faces across frames. If true, unique
257
- identifiers will be assigned to face bounding boxes to differentiate
258
- different faces. If false, all faces will be tagged with an "unknown"
259
- ID.
260
- default: false
261
- fps_pred:
262
- type: optional<double>
263
- docs: >
264
- Number of frames per second to process. Other frames will be omitted
265
- from the response.
266
- default: 3
267
- prob_threshold:
268
- type: optional<double>
269
- docs: >
270
- Face detection probability threshold. Faces detected with a
271
- probability less than this threshold will be omitted from the
272
- response.
273
- default: 3
274
- min_face_size:
275
- type: optional<double>
276
- docs: >
277
- Minimum bounding box side length in pixels to treat as a face. Faces
278
- detected with a bounding box side length in pixels less than this
279
- threshold will be omitted from the response.
280
- default: 3
281
- source:
282
- openapi: streaming-asyncapi.yml
283
- inline: true
284
- StreamLanguage:
285
- docs: Configuration for the language emotion model.
286
- properties:
287
- sentiment:
288
- type: optional<map<string, unknown>>
289
- docs: >-
290
- Configuration for sentiment predictions. If missing or null, no
291
- sentiment predictions will be generated.
292
- toxicity:
293
- type: optional<map<string, unknown>>
294
- docs: >-
295
- Configuration for toxicity predictions. If missing or null, no
296
- toxicity predictions will be generated.
297
- granularity:
298
- type: optional<string>
299
- docs: >-
300
- The granularity at which to generate predictions. Values are `word`,
301
- `sentence`, `utterance`, or `passage`. To get a single prediction for
302
- the entire text of your streaming payload use `passage`. Default value
303
- is `word`.
304
- source:
305
- openapi: streaming-asyncapi.yml
306
- inline: true
307
- Config:
308
- docs: >
309
- Configuration used to specify which models should be used and with what
310
- settings.
311
- properties:
312
- burst:
313
- type: optional<map<string, unknown>>
314
- docs: |
315
- Configuration for the vocal burst emotion model.
316
-
317
- Note: Model configuration is not currently available in streaming.
318
-
319
- Please use the default configuration by passing an empty object `{}`.
320
- face:
321
- type: optional<StreamFace>
322
- docs: >
323
- Configuration for the facial expression emotion model.
324
-
325
-
326
- Note: Using the `reset_stream` parameter does not have any effect on
327
- face identification. A single face identifier cache is maintained over
328
- a full session whether `reset_stream` is used or not.
329
- facemesh:
330
- type: optional<map<string, unknown>>
331
- docs: |
332
- Configuration for the facemesh emotion model.
333
-
334
- Note: Model configuration is not currently available in streaming.
335
-
336
- Please use the default configuration by passing an empty object `{}`.
337
- language:
338
- type: optional<StreamLanguage>
339
- docs: Configuration for the language emotion model.
340
- prosody:
341
- type: optional<map<string, unknown>>
342
- docs: |
343
- Configuration for the speech prosody emotion model.
344
-
345
- Note: Model configuration is not currently available in streaming.
346
-
347
- Please use the default configuration by passing an empty object `{}`.
348
- source:
349
- openapi: streaming-asyncapi.yml
350
- inline: true
351
- StreamModelsEndpointPayload:
352
- docs: Models endpoint payload
353
- properties:
354
- data:
355
- type: optional<string>
356
- models:
357
- type: optional<Config>
358
- docs: >
359
- Configuration used to specify which models should be used and with
360
- what settings.
361
- stream_window_ms:
362
- type: optional<double>
363
- docs: >
364
- Length in milliseconds of streaming sliding window.
365
-
366
-
367
- Extending the length of this window will prepend media context from
368
- past payloads into the current payload.
369
-
370
-
371
- For example, if on the first payload you send 500ms of data and on the
372
- second payload you send an additional 500ms of data, a window of at
373
- least 1000ms will allow the model to process all 1000ms of stream
374
- data.
375
-
376
-
377
- A window of 600ms would append the full 500ms of the second payload to
378
- the last 100ms of the first payload.
379
-
380
-
381
- Note: This feature is currently only supported for audio data and
382
- audio models. For other file types and models this parameter will be
383
- ignored.
384
- default: 5000
385
- validation:
386
- min: 500
387
- max: 10000
388
- reset_stream:
389
- type: optional<boolean>
390
- docs: >
391
- Whether to reset the streaming sliding window before processing the
392
- current payload.
393
-
394
-
395
- If this parameter is set to `true` then past context will be deleted
396
- before processing the current payload.
397
-
398
-
399
- Use reset_stream when one audio file is done being processed and you
400
- do not want context to leak across files.
401
- default: false
402
- raw_text:
403
- type: optional<boolean>
404
- docs: >
405
- Set to `true` to enable the data parameter to be parsed as raw text
406
- rather than base64 encoded bytes.
407
-
408
- This parameter is useful if you want to send text to be processed by
409
- the language model, but it cannot be used with other file types like
410
- audio, image, or video.
411
- default: false
412
- job_details:
413
- type: optional<boolean>
414
- docs: >
415
- Set to `true` to get details about the job.
416
-
417
-
418
- This parameter can be set in the same payload as data or it can be set
419
- without data and models configuration to get the job details between
420
- payloads.
421
-
422
-
423
- This parameter is useful to get the unique job ID.
424
- default: false
425
- payload_id:
426
- type: optional<string>
427
- docs: >
428
- Pass an arbitrary string as the payload ID and get it back at the top
429
- level of the socket response.
430
-
431
-
432
- This can be useful if you have multiple requests running
433
- asynchronously and want to disambiguate responses as they are
434
- received.
435
- source:
436
- openapi: streaming-asyncapi.yml
437
437
  imports:
438
438
  streamRoot: __package__.yml
@@ -12,6 +12,9 @@ service:
12
12
  Lists voices in your **Voice Library**. Set provider to `HUME_AI` to
13
13
  list Hume's preset voices, or to `CUSTOM_VOICE` to a custom voice
14
14
  created in your account.
15
+ pagination:
16
+ offset: $request.page_number
17
+ results: $response.voices_page
15
18
  source:
16
19
  openapi: tts-openapi.yml
17
20
  display-name: List voices
@@ -44,8 +47,7 @@ service:
44
47
 
45
48
  For example, if `page_size` is set to 10, each page will include
46
49
  up to 10 items. Defaults to 10.
47
- ascending_order:
48
- type: optional<boolean>
50
+ ascending_order: optional<boolean>
49
51
  response:
50
52
  docs: Success
51
53
  type: root.ReturnPagedVoices
@@ -1,4 +1,4 @@
1
1
  {
2
2
  "organization" : "hume",
3
- "version" : "0.56.23"
3
+ "version" : "0.57.5"
4
4
  }
@@ -1,2 +1,2 @@
1
- export * from "./SubscribeEvent";
2
1
  export * from "./PublishEvent";
2
+ export * from "./SubscribeEvent";
@@ -14,5 +14,5 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./SubscribeEvent"), exports);
18
17
  __exportStar(require("./PublishEvent"), exports);
18
+ __exportStar(require("./SubscribeEvent"), exports);
@@ -42,7 +42,7 @@ export declare class ChatGroups {
42
42
  * configId: "1b60e1a0-cc59-424a-8d2c-189d354db3f3"
43
43
  * })
44
44
  */
45
- listChatGroups(request?: Hume.empathicVoice.ChatGroupsListChatGroupsRequest, requestOptions?: ChatGroups.RequestOptions): Promise<Hume.empathicVoice.ReturnPagedChatGroups>;
45
+ listChatGroups(request?: Hume.empathicVoice.ChatGroupsListChatGroupsRequest, requestOptions?: ChatGroups.RequestOptions): Promise<core.Page<Hume.empathicVoice.ReturnChatGroup>>;
46
46
  /**
47
47
  * Fetches a **ChatGroup** by ID, including a paginated list of **Chats** associated with the **ChatGroup**.
48
48
  *
@@ -76,7 +76,7 @@ export declare class ChatGroups {
76
76
  * ascendingOrder: true
77
77
  * })
78
78
  */
79
- listChatGroupEvents(id: string, request?: Hume.empathicVoice.ChatGroupsListChatGroupEventsRequest, requestOptions?: ChatGroups.RequestOptions): Promise<Hume.empathicVoice.ReturnChatGroupPagedEvents>;
79
+ listChatGroupEvents(id: string, request?: Hume.empathicVoice.ChatGroupsListChatGroupEventsRequest, requestOptions?: ChatGroups.RequestOptions): Promise<core.Page<Hume.empathicVoice.ReturnChatEvent>>;
80
80
  /**
81
81
  * Fetches a paginated list of audio for each **Chat** within the specified **Chat Group**. For more details, see our guide on audio reconstruction [here](/docs/empathic-voice-interface-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi).
82
82
  *