hume 0.13.0 → 0.13.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/.mock/definition/empathic-voice/__package__.yml +68 -61
  2. package/.mock/definition/empathic-voice/chatWebhooks.yml +8 -12
  3. package/.mock/definition/empathic-voice/prompts.yml +2 -2
  4. package/.mock/definition/empathic-voice/tools.yml +2 -2
  5. package/.mock/definition/tts/__package__.yml +85 -47
  6. package/.mock/definition/tts/voices.yml +9 -9
  7. package/api/resources/empathicVoice/resources/chat/client/Client.d.ts +2 -0
  8. package/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.d.ts +1 -1
  9. package/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.d.ts +1 -1
  10. package/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.d.ts +1 -1
  11. package/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.d.ts +1 -1
  12. package/api/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  13. package/api/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  14. package/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
  15. package/api/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  16. package/api/resources/empathicVoice/types/ContextType.d.ts +2 -2
  17. package/api/resources/empathicVoice/types/ContextType.js +1 -1
  18. package/api/resources/empathicVoice/types/JsonMessage.d.ts +1 -1
  19. package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  20. package/api/resources/empathicVoice/types/PostedLanguageModel.d.ts +1 -1
  21. package/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +2 -2
  22. package/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +2 -2
  23. package/api/resources/empathicVoice/types/ReturnConfig.d.ts +2 -1
  24. package/api/resources/empathicVoice/types/ReturnLanguageModel.d.ts +1 -1
  25. package/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -4
  26. package/api/resources/empathicVoice/types/ReturnUserDefinedTool.d.ts +1 -1
  27. package/api/resources/empathicVoice/types/ReturnVoice.d.ts +12 -0
  28. package/api/resources/empathicVoice/types/ReturnVoice.js +5 -0
  29. package/api/resources/empathicVoice/types/SessionSettings.d.ts +2 -2
  30. package/api/resources/empathicVoice/types/Tool.d.ts +1 -1
  31. package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +1 -1
  32. package/api/resources/empathicVoice/types/UserInput.d.ts +1 -1
  33. package/api/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  34. package/api/resources/empathicVoice/types/UserMessage.d.ts +1 -1
  35. package/api/resources/empathicVoice/types/VoiceProvider.d.ts +1 -2
  36. package/api/resources/empathicVoice/types/VoiceProvider.js +0 -1
  37. package/api/resources/empathicVoice/types/index.d.ts +4 -3
  38. package/api/resources/empathicVoice/types/index.js +4 -3
  39. package/api/resources/tts/client/Client.d.ts +5 -5
  40. package/api/resources/tts/client/Client.js +5 -5
  41. package/api/resources/tts/types/SnippetAudioChunk.d.ts +20 -0
  42. package/dist/api/resources/empathicVoice/resources/chat/client/Client.d.ts +2 -0
  43. package/dist/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.d.ts +1 -1
  44. package/dist/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.d.ts +1 -1
  45. package/dist/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.d.ts +1 -1
  46. package/dist/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.d.ts +1 -1
  47. package/dist/api/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  48. package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  49. package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
  50. package/dist/api/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  51. package/dist/api/resources/empathicVoice/types/ContextType.d.ts +2 -2
  52. package/dist/api/resources/empathicVoice/types/ContextType.js +1 -1
  53. package/dist/api/resources/empathicVoice/types/JsonMessage.d.ts +1 -1
  54. package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  55. package/dist/api/resources/empathicVoice/types/PostedLanguageModel.d.ts +1 -1
  56. package/dist/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +2 -2
  57. package/dist/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +2 -2
  58. package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +2 -1
  59. package/dist/api/resources/empathicVoice/types/ReturnLanguageModel.d.ts +1 -1
  60. package/dist/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -4
  61. package/dist/api/resources/empathicVoice/types/ReturnUserDefinedTool.d.ts +1 -1
  62. package/dist/api/resources/empathicVoice/types/ReturnVoice.d.ts +12 -0
  63. package/dist/api/resources/empathicVoice/types/ReturnVoice.js +5 -0
  64. package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +2 -2
  65. package/dist/api/resources/empathicVoice/types/Tool.d.ts +1 -1
  66. package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +1 -1
  67. package/dist/api/resources/empathicVoice/types/UserInput.d.ts +1 -1
  68. package/dist/api/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  69. package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +1 -1
  70. package/dist/api/resources/empathicVoice/types/VoiceProvider.d.ts +1 -2
  71. package/dist/api/resources/empathicVoice/types/VoiceProvider.js +0 -1
  72. package/dist/api/resources/empathicVoice/types/index.d.ts +4 -3
  73. package/dist/api/resources/empathicVoice/types/index.js +4 -3
  74. package/dist/api/resources/tts/client/Client.d.ts +5 -5
  75. package/dist/api/resources/tts/client/Client.js +5 -5
  76. package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +20 -0
  77. package/dist/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  78. package/dist/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  79. package/dist/serialization/resources/empathicVoice/types/JsonMessage.d.ts +2 -2
  80. package/dist/serialization/resources/empathicVoice/types/JsonMessage.js +2 -2
  81. package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +1 -1
  82. package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.js +1 -1
  83. package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +1 -1
  84. package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.js +1 -1
  85. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +2 -1
  86. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +2 -1
  87. package/dist/serialization/resources/empathicVoice/types/ReturnVoice.d.ts +15 -0
  88. package/dist/serialization/resources/empathicVoice/types/ReturnVoice.js +46 -0
  89. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  90. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  91. package/dist/serialization/resources/empathicVoice/types/index.d.ts +4 -3
  92. package/dist/serialization/resources/empathicVoice/types/index.js +4 -3
  93. package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +12 -0
  94. package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +14 -1
  95. package/dist/version.d.ts +1 -1
  96. package/dist/version.js +1 -1
  97. package/package.json +1 -1
  98. package/reference.md +14 -14
  99. package/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  100. package/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  101. package/serialization/resources/empathicVoice/types/JsonMessage.d.ts +2 -2
  102. package/serialization/resources/empathicVoice/types/JsonMessage.js +2 -2
  103. package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +1 -1
  104. package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.js +1 -1
  105. package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +1 -1
  106. package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.js +1 -1
  107. package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +2 -1
  108. package/serialization/resources/empathicVoice/types/ReturnConfig.js +2 -1
  109. package/serialization/resources/empathicVoice/types/ReturnVoice.d.ts +15 -0
  110. package/serialization/resources/empathicVoice/types/ReturnVoice.js +46 -0
  111. package/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  112. package/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  113. package/serialization/resources/empathicVoice/types/index.d.ts +4 -3
  114. package/serialization/resources/empathicVoice/types/index.js +4 -3
  115. package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +12 -0
  116. package/serialization/resources/tts/types/SnippetAudioChunk.js +14 -1
  117. package/version.d.ts +1 -1
  118. package/version.js +1 -1
@@ -29,7 +29,7 @@ types:
29
29
 
30
30
  EVI uses this text to generate spoken audio using our proprietary
31
31
  expressive text-to-speech model. Our model adds appropriate emotional
32
- inflections and tones to the text based on the users expressions and
32
+ inflections and tones to the text based on the user's expressions and
33
33
  the context of the conversation. The synthesized audio is streamed
34
34
  back to the user as an [Assistant
35
35
  Message](/reference/empathic-voice-interface-evi/chat/chat#receive.AssistantMessage.type).
@@ -75,7 +75,7 @@ types:
75
75
  Base64 encoded audio input to insert into the conversation.
76
76
 
77
77
 
78
- The content of an Audio Input message is treated as the users speech
78
+ The content of an Audio Input message is treated as the user's speech
79
79
  to EVI and must be streamed continuously. Pre-recorded audio files are
80
80
  not supported.
81
81
 
@@ -142,8 +142,8 @@ types:
142
142
  openapi: evi-asyncapi.json
143
143
  ContextType:
144
144
  enum:
145
- - temporary
146
145
  - persistent
146
+ - temporary
147
147
  source:
148
148
  openapi: evi-asyncapi.json
149
149
  Encoding:
@@ -166,7 +166,7 @@ types:
166
166
  Once this message is sent, EVI will not respond until a [Resume
167
167
  Assistant
168
168
  message](/reference/empathic-voice-interface-evi/chat/chat#send.ResumeAssistantMessage.type)
169
- is sent. When paused, EVI wont respond, but transcriptions of your
169
+ is sent. When paused, EVI won't respond, but transcriptions of your
170
170
  audio inputs will still be recorded.
171
171
  custom_session_id:
172
172
  type: optional<string>
@@ -250,7 +250,7 @@ types:
250
250
  system_prompt:
251
251
  type: optional<string>
252
252
  docs: >-
253
- Instructions used to shape EVIs behavior, responses, and style for
253
+ Instructions used to shape EVI's behavior, responses, and style for
254
254
  the session.
255
255
 
256
256
 
@@ -301,7 +301,7 @@ types:
301
301
  Third party API key for the supplemental language model.
302
302
 
303
303
 
304
- When provided, EVI will use this key instead of Humes API key for the
304
+ When provided, EVI will use this key instead of Hume's API key for the
305
305
  supplemental LLM. This allows you to bypass rate limits and utilize
306
306
  your own API key as needed.
307
307
  tools:
@@ -367,7 +367,7 @@ types:
367
367
  Parameters of the tool. Is a stringified JSON schema.
368
368
 
369
369
 
370
- These parameters define the inputs needed for the tools execution,
370
+ These parameters define the inputs needed for the tool's execution,
371
371
  including the expected data type and description for each input field.
372
372
  Structured as a JSON schema, this format ensures the tool receives
373
373
  data in the expected format.
@@ -521,7 +521,7 @@ types:
521
521
  type: string
522
522
  docs: >-
523
523
  User text to insert into the conversation. Text sent through a User
524
- Input message is treated as the users speech to EVI. EVI processes
524
+ Input message is treated as the user's speech to EVI. EVI processes
525
525
  this input and provides a corresponding response.
526
526
 
527
527
 
@@ -540,7 +540,7 @@ types:
540
540
  message, this must be `assistant_end`.
541
541
 
542
542
 
543
- This message indicates the conclusion of the assistants response,
543
+ This message indicates the conclusion of the assistant's response,
544
544
  signaling that the assistant has finished speaking for the current
545
545
  conversational turn.
546
546
  custom_session_id:
@@ -560,8 +560,8 @@ types:
560
560
  this must be `assistant_message`.
561
561
 
562
562
 
563
- This message contains both a transcript of the assistants response
564
- and the expression measurement predictions of the assistants audio
563
+ This message contains both a transcript of the assistant's response
564
+ and the expression measurement predictions of the assistant's audio
565
565
  output.
566
566
  custom_session_id:
567
567
  type: optional<string>
@@ -860,7 +860,7 @@ types:
860
860
  Parameters of the tool.
861
861
 
862
862
 
863
- These parameters define the inputs needed for the tools execution,
863
+ These parameters define the inputs needed for the tool's execution,
864
864
  including the expected data type and description for each input field.
865
865
  Structured as a stringified JSON schema, this format ensures the tool
866
866
  receives data in the expected format.
@@ -912,7 +912,7 @@ types:
912
912
  message, this must be `user_interruption`.
913
913
 
914
914
 
915
- This message indicates the user has interrupted the assistants
915
+ This message indicates the user has interrupted the assistant's
916
916
  response. EVI detects the interruption in real-time and sends this
917
917
  message to signal the interruption event. This message allows the
918
918
  system to stop the current audio playback, clear the audio queue, and
@@ -937,7 +937,7 @@ types:
937
937
  must be `user_message`.
938
938
 
939
939
 
940
- This message contains both a transcript of the users input and the
940
+ This message contains both a transcript of the user's input and the
941
941
  expression measurement predictions if the input was sent as an [Audio
942
942
  Input
943
943
  message](/reference/empathic-voice-interface-evi/chat/chat#send.AudioInput.type).
@@ -990,6 +990,7 @@ types:
990
990
  union:
991
991
  - type: AssistantEnd
992
992
  - type: AssistantMessage
993
+ - type: AssistantProsody
993
994
  - type: ChatMetadata
994
995
  - type: WebSocketError
995
996
  - type: UserInterruption
@@ -997,7 +998,6 @@ types:
997
998
  - type: ToolCallMessage
998
999
  - type: ToolResponseMessage
999
1000
  - type: ToolErrorMessage
1000
- - type: AssistantProsody
1001
1001
  source:
1002
1002
  openapi: evi-asyncapi.json
1003
1003
  HTTPValidationError:
@@ -1142,6 +1142,26 @@ types:
1142
1142
  type: string
1143
1143
  source:
1144
1144
  openapi: evi-openapi.json
1145
+ VoiceId:
1146
+ properties:
1147
+ id:
1148
+ type: string
1149
+ docs: ID of the voice in the `Voice Library`.
1150
+ provider:
1151
+ type: optional<VoiceProvider>
1152
+ docs: Model provider associated with this Voice ID.
1153
+ source:
1154
+ openapi: evi-openapi.json
1155
+ VoiceName:
1156
+ properties:
1157
+ name:
1158
+ type: string
1159
+ docs: Name of the voice in the `Voice Library`.
1160
+ provider:
1161
+ type: optional<VoiceProvider>
1162
+ docs: Model provider associated with this Voice Name.
1163
+ source:
1164
+ openapi: evi-openapi.json
1145
1165
  WebhookEventChatEnded:
1146
1166
  properties:
1147
1167
  event_name:
@@ -1363,7 +1383,7 @@ types:
1363
1383
  Tool.
1364
1384
 
1365
1385
 
1366
- These parameters define the inputs needed for the Tools execution,
1386
+ These parameters define the inputs needed for the Tool's execution,
1367
1387
  including the expected data type and description for each input field.
1368
1388
  Structured as a stringified JSON schema, this format ensures the tool
1369
1389
  receives data in the expected format.
@@ -1410,17 +1430,15 @@ types:
1410
1430
  text:
1411
1431
  type: string
1412
1432
  docs: >-
1413
- Instructions used to shape EVIs behavior, responses, and style.
1433
+ Instructions used to shape EVI's behavior, responses, and style.
1414
1434
 
1415
1435
 
1416
1436
  You can use the Prompt to define a specific goal or role for EVI,
1417
1437
  specifying how it should act or what it should focus on during the
1418
1438
  conversation. For example, EVI can be instructed to act as a customer
1419
1439
  support representative, a fitness coach, or a travel advisor, each
1420
- with its own set of behaviors and response styles.
1421
-
1422
-
1423
- For help writing a system prompt, see our [Prompting
1440
+ with its own set of behaviors and response styles. For help writing a
1441
+ system prompt, see our [Prompting
1424
1442
  Guide](/docs/speech-to-speech-evi/guides/prompting).
1425
1443
  version:
1426
1444
  type: integer
@@ -1553,7 +1571,9 @@ types:
1553
1571
  model that takes into account both expression measures and language.
1554
1572
  The eLLM generates short, empathic language responses and guides
1555
1573
  text-to-speech (TTS) prosody.
1556
- voice: optional<unknown>
1574
+ voice:
1575
+ type: optional<ReturnVoice>
1576
+ docs: A voice specification associated with this Config.
1557
1577
  prompt: optional<ReturnPrompt>
1558
1578
  webhooks:
1559
1579
  type: optional<list<optional<ReturnWebhookSpec>>>
@@ -2142,7 +2162,7 @@ types:
2142
2162
  The model temperature, with values between 0 to 1 (inclusive).
2143
2163
 
2144
2164
 
2145
- Controls the randomness of the LLMs output, with values closer to 0
2165
+ Controls the randomness of the LLM's output, with values closer to 0
2146
2166
  yielding focused, deterministic responses and values closer to 1
2147
2167
  producing more creative, diverse responses.
2148
2168
  source:
@@ -2293,6 +2313,11 @@ types:
2293
2313
  Accepts a minimum value of 30 seconds and a maximum value of 1,800
2294
2314
  seconds.
2295
2315
  properties:
2316
+ duration_secs:
2317
+ type: optional<integer>
2318
+ docs: >-
2319
+ Duration in seconds for the timeout (e.g. 600 seconds represents 10
2320
+ minutes).
2296
2321
  enabled:
2297
2322
  type: boolean
2298
2323
  docs: >-
@@ -2303,11 +2328,6 @@ types:
2303
2328
  user inactivity being reached. However, the conversation will
2304
2329
  eventually disconnect after 1,800 seconds (30 minutes), which is the
2305
2330
  maximum WebSocket duration limit for EVI.
2306
- duration_secs:
2307
- type: optional<integer>
2308
- docs: >-
2309
- Duration in seconds for the timeout (e.g. 600 seconds represents 10
2310
- minutes).
2311
2331
  source:
2312
2332
  openapi: evi-openapi.json
2313
2333
  inline: true
@@ -2321,6 +2341,11 @@ types:
2321
2341
  Accepts a minimum value of 30 seconds and a maximum value of 1,800
2322
2342
  seconds.
2323
2343
  properties:
2344
+ duration_secs:
2345
+ type: optional<integer>
2346
+ docs: >-
2347
+ Duration in seconds for the timeout (e.g. 600 seconds represents 10
2348
+ minutes).
2324
2349
  enabled:
2325
2350
  type: boolean
2326
2351
  docs: >-
@@ -2331,11 +2356,6 @@ types:
2331
2356
  duration being reached. However, the conversation will eventually
2332
2357
  disconnect after 1,800 seconds (30 minutes), which is the maximum
2333
2358
  WebSocket duration limit for EVI.
2334
- duration_secs:
2335
- type: optional<integer>
2336
- docs: >-
2337
- Duration in seconds for the timeout (e.g. 600 seconds represents 10
2338
- minutes).
2339
2359
  source:
2340
2360
  openapi: evi-openapi.json
2341
2361
  inline: true
@@ -2414,7 +2434,7 @@ types:
2414
2434
  The model temperature, with values between 0 to 1 (inclusive).
2415
2435
 
2416
2436
 
2417
- Controls the randomness of the LLMs output, with values closer to 0
2437
+ Controls the randomness of the LLM's output, with values closer to 0
2418
2438
  yielding focused, deterministic responses and values closer to 1
2419
2439
  producing more creative, diverse responses.
2420
2440
  source:
@@ -2930,33 +2950,6 @@ types:
2930
2950
  minutes).
2931
2951
  source:
2932
2952
  openapi: evi-openapi.json
2933
- VoiceProvider:
2934
- enum:
2935
- - HUME_AI
2936
- - CUSTOM_VOICE
2937
- - OCTAVE_COMBINED
2938
- source:
2939
- openapi: evi-openapi.json
2940
- VoiceId:
2941
- properties:
2942
- id:
2943
- type: string
2944
- docs: ID of the voice in the `Voice Library`.
2945
- provider:
2946
- type: optional<VoiceProvider>
2947
- docs: Model provider associated with this Voice ID.
2948
- source:
2949
- openapi: evi-openapi.json
2950
- VoiceName:
2951
- properties:
2952
- name:
2953
- type: string
2954
- docs: Name of the voice in the `Voice Library`.
2955
- provider:
2956
- type: optional<VoiceProvider>
2957
- docs: Model provider associated with this Voice Name.
2958
- source:
2959
- openapi: evi-openapi.json
2960
2953
  VoiceRef:
2961
2954
  discriminated: false
2962
2955
  union:
@@ -2964,3 +2957,17 @@ types:
2964
2957
  - type: VoiceName
2965
2958
  source:
2966
2959
  openapi: evi-openapi.json
2960
+ ReturnVoice:
2961
+ docs: An Octave voice available for text-to-speech
2962
+ properties:
2963
+ id: optional<string>
2964
+ name: optional<string>
2965
+ provider: optional<VoiceProvider>
2966
+ source:
2967
+ openapi: evi-openapi.json
2968
+ VoiceProvider:
2969
+ enum:
2970
+ - HUME_AI
2971
+ - CUSTOM_VOICE
2972
+ source:
2973
+ openapi: evi-openapi.json
@@ -9,13 +9,11 @@ webhooks:
9
9
  payload: root.WebhookEventChatEnded
10
10
  examples:
11
11
  - payload:
12
- chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f
13
- chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0
14
- config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3
15
- event_name: chat_ended
16
- end_time: 1716244958546
17
- duration_seconds: 180
18
- end_reason: USER_ENDED
12
+ chat_group_id: chat_group_id
13
+ chat_id: chat_id
14
+ end_time: 1
15
+ duration_seconds: 1
16
+ end_reason: ACTIVE
19
17
  docs: Sent when an EVI chat ends.
20
18
  chatStarted:
21
19
  audiences: []
@@ -25,10 +23,8 @@ webhooks:
25
23
  payload: root.WebhookEventChatStarted
26
24
  examples:
27
25
  - payload:
28
- chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f
29
- chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0
30
- config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3
31
- event_name: chat_started
32
- start_time: 1716244940648
26
+ chat_group_id: chat_group_id
27
+ chat_id: chat_id
28
+ start_time: 1
33
29
  chat_start_type: new_chat_group
34
30
  docs: Sent when an EVI chat is started.
@@ -126,7 +126,7 @@ service:
126
126
  text:
127
127
  type: string
128
128
  docs: >-
129
- Instructions used to shape EVIs behavior, responses, and style.
129
+ Instructions used to shape EVI's behavior, responses, and style.
130
130
 
131
131
 
132
132
  You can use the Prompt to define a specific goal or role for
@@ -278,7 +278,7 @@ service:
278
278
  text:
279
279
  type: string
280
280
  docs: >-
281
- Instructions used to shape EVIs behavior, responses, and style
281
+ Instructions used to shape EVI's behavior, responses, and style
282
282
  for this version of the Prompt.
283
283
 
284
284
 
@@ -145,7 +145,7 @@ service:
145
145
  the Tool.
146
146
 
147
147
 
148
- These parameters define the inputs needed for the Tools
148
+ These parameters define the inputs needed for the Tool's
149
149
  execution, including the expected data type and description for
150
150
  each input field. Structured as a stringified JSON schema, this
151
151
  format ensures the Tool receives data in the expected format.
@@ -324,7 +324,7 @@ service:
324
324
  the Tool.
325
325
 
326
326
 
327
- These parameters define the inputs needed for the Tools
327
+ These parameters define the inputs needed for the Tool's
328
328
  execution, including the expected data type and description for
329
329
  each input field. Structured as a stringified JSON schema, this
330
330
  format ensures the Tool receives data in the expected format.