tencentcloud-sdk-python-intl-en 3.0.1088__py2.py3-none-any.whl → 3.0.1090__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tencentcloud-sdk-python-intl-en might be problematic. Click here for more details.
- tencentcloud/__init__.py +1 -1
- tencentcloud/ams/v20201229/models.py +2 -2
- tencentcloud/billing/v20180709/models.py +109 -74
- tencentcloud/ccc/v20200210/ccc_client.py +25 -0
- tencentcloud/ccc/v20200210/errorcodes.py +8 -8
- tencentcloud/ccc/v20200210/models.py +756 -0
- tencentcloud/cfs/v20190719/models.py +160 -2
- tencentcloud/config/__init__.py +0 -0
- tencentcloud/config/v20220802/__init__.py +0 -0
- tencentcloud/config/v20220802/config_client.py +141 -0
- tencentcloud/config/v20220802/errorcodes.py +27 -0
- tencentcloud/config/v20220802/models.py +2362 -0
- tencentcloud/dbbrain/v20210527/models.py +2 -2
- tencentcloud/domain/v20180808/models.py +2 -2
- tencentcloud/gaap/v20180529/models.py +2 -2
- tencentcloud/mdp/v20200527/models.py +119 -4
- tencentcloud/mongodb/v20190725/models.py +2 -2
- tencentcloud/ssl/v20191205/errorcodes.py +21 -0
- tencentcloud/ssl/v20191205/models.py +62 -62
- tencentcloud/tcr/v20190924/errorcodes.py +3 -0
- {tencentcloud_sdk_python_intl_en-3.0.1088.dist-info → tencentcloud_sdk_python_intl_en-3.0.1090.dist-info}/METADATA +1 -1
- {tencentcloud_sdk_python_intl_en-3.0.1088.dist-info → tencentcloud_sdk_python_intl_en-3.0.1090.dist-info}/RECORD +24 -19
- {tencentcloud_sdk_python_intl_en-3.0.1088.dist-info → tencentcloud_sdk_python_intl_en-3.0.1090.dist-info}/WHEEL +0 -0
- {tencentcloud_sdk_python_intl_en-3.0.1088.dist-info → tencentcloud_sdk_python_intl_en-3.0.1090.dist-info}/top_level.txt +0 -0
|
@@ -1094,6 +1094,762 @@ class CalleeAttribute(AbstractModel):
|
|
|
1094
1094
|
|
|
1095
1095
|
|
|
1096
1096
|
|
|
1097
|
+
class CreateAICallRequest(AbstractModel):
|
|
1098
|
+
"""CreateAICall request structure.
|
|
1099
|
+
|
|
1100
|
+
"""
|
|
1101
|
+
|
|
1102
|
+
def __init__(self):
|
|
1103
|
+
r"""
|
|
1104
|
+
:param _SdkAppId: Application ID (required) can be found at https://console.cloud.tencent.com/ccc.
|
|
1105
|
+
:type SdkAppId: int
|
|
1106
|
+
:param _Callee: Called number.
|
|
1107
|
+
:type Callee: str
|
|
1108
|
+
:param _SystemPrompt: ## Identity
|
|
1109
|
+
You are Kate from the appointment department at Retell Health calling Cindy over the phone to prepare for the annual checkup coming up. You are a pleasant and friendly receptionist caring deeply for the user. You don't provide medical advice but would use the medical knowledge to understand user responses.
|
|
1110
|
+
|
|
1111
|
+
## Style Guardrails
|
|
1112
|
+
Be Concise: Respond succinctly, addressing one topic at most.
|
|
1113
|
+
Embrace Variety: Use diverse language and rephrasing to enhance clarity without repeating content.
|
|
1114
|
+
Be Conversational: Use everyday language, making the chat feel like talking to a friend.
|
|
1115
|
+
Be Proactive: Lead the conversation, often wrapping up with a question or next-step suggestion.
|
|
1116
|
+
Avoid multiple questions in a single response.
|
|
1117
|
+
Get clarity: If the user only partially answers a question, or if the answer is unclear, keep asking to get clarity.
|
|
1118
|
+
Use a colloquial way of referring to the date (like Friday, January 14th, or Tuesday, January 12th, 2024 at 8am).
|
|
1119
|
+
|
|
1120
|
+
## Response Guideline
|
|
1121
|
+
Adapt and Guess: Try to understand transcripts that may contain transcription errors. Avoid mentioning "transcription error" in the response.
|
|
1122
|
+
Stay in Character: Keep conversations within your role's scope, guiding them back creatively without repeating.
|
|
1123
|
+
Ensure Fluid Dialogue: Respond in a role-appropriate, direct manner to maintain a smooth conversation flow.
|
|
1124
|
+
|
|
1125
|
+
## Task
|
|
1126
|
+
You will follow the steps below, do not skip steps, and only ask up to one question in response.
|
|
1127
|
+
If at any time the user showed anger or wanted a human agent, call transfer_call to transfer to a human representative.
|
|
1128
|
+
1. Begin with a self-introduction and verify if callee is Cindy.
|
|
1129
|
+
- if callee is not Cindy, call end_call to hang up, say sorry for the confusion when hanging up.
|
|
1130
|
+
- if Cindy is not available, call end_call politely to hang up, say you will call back later when hanging up.
|
|
1131
|
+
2. Inform Cindy she has an annual body check coming up on April 4th, 2024 at 10am PDT. Check if Cindy is available.
|
|
1132
|
+
- If not, tell Cindy to reschedule online and jump to step 5.
|
|
1133
|
+
3. Ask Cindy if there's anything that the doctor should know before the annual checkup.
|
|
1134
|
+
- Ask followup questions as needed to assess the severity of the issue, and understand how it has progressed.
|
|
1135
|
+
4. Tell Cindy to not eat or drink that day before the checkup. Also tell Cindy to give you a callback if there's any changes in health condition.
|
|
1136
|
+
5. Ask Cindy if she has any questions, and if so, answer them until there are no questions.
|
|
1137
|
+
- If user asks something you do not know, let them know you don't have the answer. Ask them if they have any other questions.
|
|
1138
|
+
- If user do not have any questions, call function end_call to hang up.
|
|
1139
|
+
:type SystemPrompt: str
|
|
1140
|
+
:param _LLMType: Model interface protocol types, currently compatible with three protocol types:
|
|
1141
|
+
|
|
1142
|
+
- OpenAI protocol (including GPT, Hunyuan, DeepSeek, etc.):"openai"
|
|
1143
|
+
- Azure protocol:"azure"
|
|
1144
|
+
- Minimax protocol:"minimax"
|
|
1145
|
+
:type LLMType: str
|
|
1146
|
+
:param _Model: Model name, such as
|
|
1147
|
+
|
|
1148
|
+
- OpenAI protocol
|
|
1149
|
+
"gpt-4o-mini","gpt-4o","hunyuan-standard", "hunyuan-turbo","deepseek-chat";
|
|
1150
|
+
|
|
1151
|
+
- Azure protocol
|
|
1152
|
+
"gpt-4o-mini", "gpt-4o";
|
|
1153
|
+
|
|
1154
|
+
- Minimax protocol
|
|
1155
|
+
"deepseek-chat".
|
|
1156
|
+
:type Model: str
|
|
1157
|
+
:param _APIKey: Model API key, for authentication information, please refer to the respective model's official website
|
|
1158
|
+
|
|
1159
|
+
- OpenAI protocol: [GPT](https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key), [Hunyuan](https://intl.cloud.tencent.com/document/product/1729/111008?from_cn_redirect=1), [DeepSeek](https://api-docs.deepseek.com/zh-cn/);
|
|
1160
|
+
|
|
1161
|
+
- Azure protocol: [Azure GPT](https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Ctypescript%2Cpython-new&pivots=programming-language-studio#key-settings);
|
|
1162
|
+
|
|
1163
|
+
- Minimax:[Minimax](https://platform.minimaxi.com/document/Fast%20access?key=66701cf51d57f38758d581b2)
|
|
1164
|
+
:type APIKey: str
|
|
1165
|
+
:param _APIUrl: Model interface address
|
|
1166
|
+
|
|
1167
|
+
- OpenAI protocol
|
|
1168
|
+
GPT:"https://api.openai.com/v1/"
|
|
1169
|
+
Hunyuan:"https://api.hunyuan.cloud.tencent.com/v1"
|
|
1170
|
+
Deepseek:"https://api.deepseek.com/v1"
|
|
1171
|
+
|
|
1172
|
+
- Azure protocol
|
|
1173
|
+
"https://{your-resource-name}.openai.azure.com?api-version={api-version}"
|
|
1174
|
+
|
|
1175
|
+
- Minimax protocol
|
|
1176
|
+
"https://api.minimax.chat/v1"
|
|
1177
|
+
:type APIUrl: str
|
|
1178
|
+
:param _VoiceType: The following voice parameter values are available by default. If you wish to customize the voice type, please leave VoiceType blank and configure it in the CustomTTSConfig parameter.
|
|
1179
|
+
|
|
1180
|
+
Chinese:
|
|
1181
|
+
ZhiMei: Zhimei, customer service female voice
|
|
1182
|
+
ZhiXi: Zhixi, general female voice
|
|
1183
|
+
ZhiQi: Zhiqi, customer service female voice
|
|
1184
|
+
ZhiTian: Zhitian, female child voice
|
|
1185
|
+
AiXiaoJing: Ai Xiaojing, dialogue female voice
|
|
1186
|
+
|
|
1187
|
+
English:
|
|
1188
|
+
WeRose:English Female Voice
|
|
1189
|
+
Monika:English Female Voice
|
|
1190
|
+
|
|
1191
|
+
Japanese:
|
|
1192
|
+
Nanami
|
|
1193
|
+
|
|
1194
|
+
Korean:
|
|
1195
|
+
SunHi
|
|
1196
|
+
|
|
1197
|
+
Indonesian (Indonesia):
|
|
1198
|
+
Gadis
|
|
1199
|
+
|
|
1200
|
+
Malay (Malaysia):
|
|
1201
|
+
Yasmin
|
|
1202
|
+
|
|
1203
|
+
Tamil (Malaysia):
|
|
1204
|
+
Kani
|
|
1205
|
+
|
|
1206
|
+
Thai (Thailand):
|
|
1207
|
+
Achara
|
|
1208
|
+
|
|
1209
|
+
Vietnamese (Vietnam):
|
|
1210
|
+
HoaiMy
|
|
1211
|
+
|
|
1212
|
+
|
|
1213
|
+
:type VoiceType: str
|
|
1214
|
+
:param _Callers: List of calling numbers.
|
|
1215
|
+
:type Callers: list of str
|
|
1216
|
+
:param _WelcomeMessage: Used to set the AI Agent Greeting.
|
|
1217
|
+
:type WelcomeMessage: str
|
|
1218
|
+
:param _WelcomeType: 0: Use welcomeMessage (if empty, the callee speaks first; if not empty, the bot speaks first)
|
|
1219
|
+
1: Use AI to automatically generate welcomeMessage and speak first based on the prompt
|
|
1220
|
+
:type WelcomeType: int
|
|
1221
|
+
:param _MaxDuration: Maximum Waiting Duration (milliseconds), default is 60 seconds, if the user does not speak within this time, the call is automatically terminated
|
|
1222
|
+
:type MaxDuration: int
|
|
1223
|
+
:param _Languages: ASR Supported Languages, default is "zh" Chinese,
|
|
1224
|
+
Fill in the array with up to 4 languages, the first is the primary language for recognition, followed by optional languages,
|
|
1225
|
+
Note: When the primary language is a Chinese dialect, optional languages are invalid
|
|
1226
|
+
Currently, the supported languages are as follows. The English name of the language is on the left side of the equals sign, and the value to be filled in the Language field is on the right side, following ISO639:
|
|
1227
|
+
1. Chinese = "zh" # Chinese
|
|
1228
|
+
2. Chinese_TW = "zh-TW" # Taiwan (China)
|
|
1229
|
+
3. Chinese_DIALECT = "zh-dialect" # Chinese Dialect
|
|
1230
|
+
4. English = "en" # English
|
|
1231
|
+
5. Vietnamese = "vi" # Vietnamese
|
|
1232
|
+
6. Japanese = "ja" # Japanese
|
|
1233
|
+
7. Korean = "ko" # Korean
|
|
1234
|
+
8. Indonesia = "id" # Indonesian
|
|
1235
|
+
9. Thai = "th" # Thai
|
|
1236
|
+
10. Portuguese = "pt" # Portuguese
|
|
1237
|
+
11. Turkish = "tr" # Turkish
|
|
1238
|
+
12. Arabic = "ar" # Arabic
|
|
1239
|
+
13. Spanish = "es" # Spanish
|
|
1240
|
+
14. Hindi = "hi" # Hindi
|
|
1241
|
+
15. French = "fr" # French
|
|
1242
|
+
16. Malay = "ms" # Malay
|
|
1243
|
+
17. Filipino = "fil" # Filipino
|
|
1244
|
+
18. German = "de" # German
|
|
1245
|
+
19. Italian = "it" # Italian
|
|
1246
|
+
20. Russian = "ru" # Russian
|
|
1247
|
+
:type Languages: list of str
|
|
1248
|
+
:param _InterruptMode: Interrupt AI speech mode, default is 0, 0 indicates the server interrupts automatically, 1 indicates the server does not interrupt, interruption signal sent by the client side.
|
|
1249
|
+
:type InterruptMode: int
|
|
1250
|
+
:param _InterruptSpeechDuration: Used when InterruptMode is 0, unit in milliseconds, default is 500ms. It means that the server-side detects ongoing vocal input for the InterruptSpeechDuration milliseconds and then interrupts.
|
|
1251
|
+
:type InterruptSpeechDuration: int
|
|
1252
|
+
:param _EndFunctionEnable: Whether the model supports (or enables) call_end function calling
|
|
1253
|
+
:type EndFunctionEnable: bool
|
|
1254
|
+
:param _EndFunctionDesc: Effective when EndFunctionEnable is true; the description of call_end function calling, default is "End the call when user has to leave (like says bye) or you are instructed to do so."
|
|
1255
|
+
:type EndFunctionDesc: str
|
|
1256
|
+
:param _NotifyDuration: The duration after which the user hasn't spoken to trigger a notification, minimum 10 seconds, default 10 seconds
|
|
1257
|
+
:type NotifyDuration: int
|
|
1258
|
+
:param _NotifyMessage: The AI prompt when NotifyDuration has passed without the user speaking, default is "Sorry, I didn't hear you clearly. Can you repeat that?"
|
|
1259
|
+
:type NotifyMessage: str
|
|
1260
|
+
:param _CustomTTSConfig: <p>And VoiceType field needs to select one, here is to use your own custom TTS, VoiceType is some built-in sound qualities</p>
|
|
1261
|
+
<ul>
|
|
1262
|
+
<li>Tencent TTS<br>
|
|
1263
|
+
For configuration, please refer to <a href="https://intl.cloud.tencent.com/document/product/1073/92668?from_cn_redirect=1#55924b56-1a73-4663-a7a1-a8dd82d6e823" target="_blank">Tencent Cloud TTS documentation link</a></li>
|
|
1264
|
+
</ul>
|
|
1265
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1266
|
+
"TTSType": "tencent", // String TTS type, currently supports "tencent" and "minixmax", other vendors support in progress
|
|
1267
|
+
"AppId": "Your application ID", // String required
|
|
1268
|
+
"SecretId": "Your Secret ID", // String Required
|
|
1269
|
+
"SecretKey": "Your Secret Key", // String Required
|
|
1270
|
+
"VoiceType": 101001, // Integer Required, Sound quality ID, includes standard and premium sound quality. Premium sound quality is more realistic and differently priced than standard sound quality. See TTS billing overview for details. For the full list of sound quality IDs, see the TTS sound quality list.
|
|
1271
|
+
"Speed": 1.25, // Integer Optional, speech speed, range: [-2,6], corresponding to different speeds: -2: represents 0.6x -1: represents 0.8x 0: represents 1.0x (default) 1: represents 1.2x 2: represents 1.5x 6: represents 2.5x For more precise speed control, you can retain two decimal places, such as 0.5/1.25/2.81, etc. For parameter value to actual speed conversion, refer to Speed Conversion
|
|
1272
|
+
"Volume": 5, // Integer Optional, Volume level, range: [0,10], corresponding to 11 levels of volume, default is 0, which represents normal volume.
|
|
1273
|
+
"PrimaryLanguage": 1, // Integer Optional, Primary language 1- Chinese (default) 2- English 3- Japanese
|
|
1274
|
+
"FastVoiceType": "xxxx" // Optional parameter, Fast VRS parameter
|
|
1275
|
+
}
|
|
1276
|
+
</code></pre>
|
|
1277
|
+
|
|
1278
|
+
</div></div><ul>
|
|
1279
|
+
<li>Minimax TTS<br>
|
|
1280
|
+
For configuration, please refer to the <a href="https://platform.minimaxi.com/document/T2A%20V2?key=66719005a427f0c8a5701643" target="_blank">Minimax TTS documentation link</a>. Note that Minimax TTS has frequency limits, and exceeding the limit may cause response delays, <a href="https://platform.minimaxi.com/document/Rate%20limits?key=66b19417290299a26b234572" target="_blank">Minimax TTS frequency limit related documentation link</a>.</li>
|
|
1281
|
+
</ul>
|
|
1282
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1283
|
+
"TTSType": "minimax", // String TTS type,
|
|
1284
|
+
"Model": "speech-01-turbo",
|
|
1285
|
+
"APIUrl": "https://api.minimax.chat/v1/t2a_v2",
|
|
1286
|
+
"APIKey": "eyxxxx",
|
|
1287
|
+
"GroupId": "181000000000000",
|
|
1288
|
+
"VoiceType":"female-tianmei-yujie",
|
|
1289
|
+
"Speed": 1.2
|
|
1290
|
+
}
|
|
1291
|
+
</code></pre>
|
|
1292
|
+
</div></div><ul>
|
|
1293
|
+
<li>Volcano TTS</li>
|
|
1294
|
+
</ul>
|
|
1295
|
+
<p>For type of sound quality configuration, refer to the<a href="https://www.volcengine.com/docs/6561/162929" target="_blank">Volcano TTS documentation</a><br>
|
|
1296
|
+
TTS Sound Quality List - Voice Technology - Volcano Engine<br>
|
|
1297
|
+
Large Model TTS Sound Quality List - Voice Technology - Volcano Engine</p>
|
|
1298
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1299
|
+
"TTSType": "volcengine", // Required: String TTS type
|
|
1300
|
+
"AppId" : "xxxxxxxx", // Required: String Volcano Engine assigned AppId
|
|
1301
|
+
"Token" : "TY9d4sQXHxxxxxxx", // Required: String type Volcano Engine access token
|
|
1302
|
+
"Speed" : 1.0, // Optional parameter: Playback speed, default is 1.0
|
|
1303
|
+
"Volume": 1.0, // Optional parameter: Volume, default is 1.0
|
|
1304
|
+
"Cluster" : "volcano_tts", // Optional parameter: Business cluster, default is volcano_tts
|
|
1305
|
+
"VoiceType" : "zh_male_aojiaobazong_moon_bigtts" // Sound quality type, default is the sound quality of the large model TTS. If using normal TTS, fill in the corresponding sound quality type. Incorrect sound quality type will result in no sound.
|
|
1306
|
+
}
|
|
1307
|
+
</code></pre>
|
|
1308
|
+
|
|
1309
|
+
</div></div><ul>
|
|
1310
|
+
<li>Azure TTS<br>
|
|
1311
|
+
For configuration, refer to the<a href="https://docs.azure.cn/zh-cn/ai-services/speech-service/speech-synthesis-markup-voice" target="_blank">Azure TTS documentation</a></li>
|
|
1312
|
+
</ul>
|
|
1313
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1314
|
+
"TTSType": "azure", // Required: String TTS type
|
|
1315
|
+
"SubscriptionKey": "xxxxxxxx", // Required: String subscription key
|
|
1316
|
+
"Region": "chinanorth3", // Required: String subscription region
|
|
1317
|
+
"VoiceName": "zh-CN-XiaoxiaoNeural", // Required: String Timbre Name required
|
|
1318
|
+
"Language": "zh-CN", // Required: String Language for synthesis
|
|
1319
|
+
"Rate": 1 // Optional: float Playback Speed 0.5-2 default is 1
|
|
1320
|
+
}
|
|
1321
|
+
</code></pre>
|
|
1322
|
+
|
|
1323
|
+
</div></div><ul>
|
|
1324
|
+
<li>Custom</li>
|
|
1325
|
+
</ul>
|
|
1326
|
+
<p>TTS<br>
|
|
1327
|
+
Please refer to the specific protocol standards in the <a href="https://doc.weixin.qq.com/doc/w3_ANQAiAbdAFwHILbJBmtSqSbV1WZ3L?scode=AJEAIQdfAAo5a1xajYANQAiAbdAFw" target="_blank">Tencent documentation</a></p>
|
|
1328
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1329
|
+
"TTSType": "custom", // Required String
|
|
1330
|
+
"APIKey": "ApiKey", // Required String for Authentication
|
|
1331
|
+
"APIUrl": "http://0.0.0.0:8080/stream-audio" // Required String, TTS API URL
|
|
1332
|
+
"AudioFormat": "wav", // String, optional, expected audio format, such as mp3, ogg_opus, pcm, wav, default is wav, currently only pcm and wav are supported,
|
|
1333
|
+
"SampleRate": 16000, // Integer, optional, audio sample rate, default is 16000 (16k), recommended value is 16000
|
|
1334
|
+
"AudioChannel": 1, // Integer, optional, number of audio channels, values: 1 or 2, default is 1
|
|
1335
|
+
}
|
|
1336
|
+
</code></pre>
|
|
1337
|
+
|
|
1338
|
+
</div></div>
|
|
1339
|
+
:type CustomTTSConfig: str
|
|
1340
|
+
"""
|
|
1341
|
+
self._SdkAppId = None
|
|
1342
|
+
self._Callee = None
|
|
1343
|
+
self._SystemPrompt = None
|
|
1344
|
+
self._LLMType = None
|
|
1345
|
+
self._Model = None
|
|
1346
|
+
self._APIKey = None
|
|
1347
|
+
self._APIUrl = None
|
|
1348
|
+
self._VoiceType = None
|
|
1349
|
+
self._Callers = None
|
|
1350
|
+
self._WelcomeMessage = None
|
|
1351
|
+
self._WelcomeType = None
|
|
1352
|
+
self._MaxDuration = None
|
|
1353
|
+
self._Languages = None
|
|
1354
|
+
self._InterruptMode = None
|
|
1355
|
+
self._InterruptSpeechDuration = None
|
|
1356
|
+
self._EndFunctionEnable = None
|
|
1357
|
+
self._EndFunctionDesc = None
|
|
1358
|
+
self._NotifyDuration = None
|
|
1359
|
+
self._NotifyMessage = None
|
|
1360
|
+
self._CustomTTSConfig = None
|
|
1361
|
+
|
|
1362
|
+
@property
|
|
1363
|
+
def SdkAppId(self):
|
|
1364
|
+
"""Application ID (required) can be found at https://console.cloud.tencent.com/ccc.
|
|
1365
|
+
:rtype: int
|
|
1366
|
+
"""
|
|
1367
|
+
return self._SdkAppId
|
|
1368
|
+
|
|
1369
|
+
@SdkAppId.setter
|
|
1370
|
+
def SdkAppId(self, SdkAppId):
|
|
1371
|
+
self._SdkAppId = SdkAppId
|
|
1372
|
+
|
|
1373
|
+
@property
|
|
1374
|
+
def Callee(self):
|
|
1375
|
+
"""Called number.
|
|
1376
|
+
:rtype: str
|
|
1377
|
+
"""
|
|
1378
|
+
return self._Callee
|
|
1379
|
+
|
|
1380
|
+
@Callee.setter
|
|
1381
|
+
def Callee(self, Callee):
|
|
1382
|
+
self._Callee = Callee
|
|
1383
|
+
|
|
1384
|
+
@property
|
|
1385
|
+
def SystemPrompt(self):
|
|
1386
|
+
"""## Identity
|
|
1387
|
+
You are Kate from the appointment department at Retell Health calling Cindy over the phone to prepare for the annual checkup coming up. You are a pleasant and friendly receptionist caring deeply for the user. You don't provide medical advice but would use the medical knowledge to understand user responses.
|
|
1388
|
+
|
|
1389
|
+
## Style Guardrails
|
|
1390
|
+
Be Concise: Respond succinctly, addressing one topic at most.
|
|
1391
|
+
Embrace Variety: Use diverse language and rephrasing to enhance clarity without repeating content.
|
|
1392
|
+
Be Conversational: Use everyday language, making the chat feel like talking to a friend.
|
|
1393
|
+
Be Proactive: Lead the conversation, often wrapping up with a question or next-step suggestion.
|
|
1394
|
+
Avoid multiple questions in a single response.
|
|
1395
|
+
Get clarity: If the user only partially answers a question, or if the answer is unclear, keep asking to get clarity.
|
|
1396
|
+
Use a colloquial way of referring to the date (like Friday, January 14th, or Tuesday, January 12th, 2024 at 8am).
|
|
1397
|
+
|
|
1398
|
+
## Response Guideline
|
|
1399
|
+
Adapt and Guess: Try to understand transcripts that may contain transcription errors. Avoid mentioning "transcription error" in the response.
|
|
1400
|
+
Stay in Character: Keep conversations within your role's scope, guiding them back creatively without repeating.
|
|
1401
|
+
Ensure Fluid Dialogue: Respond in a role-appropriate, direct manner to maintain a smooth conversation flow.
|
|
1402
|
+
|
|
1403
|
+
## Task
|
|
1404
|
+
You will follow the steps below, do not skip steps, and only ask up to one question in response.
|
|
1405
|
+
If at any time the user showed anger or wanted a human agent, call transfer_call to transfer to a human representative.
|
|
1406
|
+
1. Begin with a self-introduction and verify if callee is Cindy.
|
|
1407
|
+
- if callee is not Cindy, call end_call to hang up, say sorry for the confusion when hanging up.
|
|
1408
|
+
- if Cindy is not available, call end_call politely to hang up, say you will call back later when hanging up.
|
|
1409
|
+
2. Inform Cindy she has an annual body check coming up on April 4th, 2024 at 10am PDT. Check if Cindy is available.
|
|
1410
|
+
- If not, tell Cindy to reschedule online and jump to step 5.
|
|
1411
|
+
3. Ask Cindy if there's anything that the doctor should know before the annual checkup.
|
|
1412
|
+
- Ask followup questions as needed to assess the severity of the issue, and understand how it has progressed.
|
|
1413
|
+
4. Tell Cindy to not eat or drink that day before the checkup. Also tell Cindy to give you a callback if there's any changes in health condition.
|
|
1414
|
+
5. Ask Cindy if she has any questions, and if so, answer them until there are no questions.
|
|
1415
|
+
- If user asks something you do not know, let them know you don't have the answer. Ask them if they have any other questions.
|
|
1416
|
+
- If user do not have any questions, call function end_call to hang up.
|
|
1417
|
+
:rtype: str
|
|
1418
|
+
"""
|
|
1419
|
+
return self._SystemPrompt
|
|
1420
|
+
|
|
1421
|
+
@SystemPrompt.setter
|
|
1422
|
+
def SystemPrompt(self, SystemPrompt):
|
|
1423
|
+
self._SystemPrompt = SystemPrompt
|
|
1424
|
+
|
|
1425
|
+
@property
|
|
1426
|
+
def LLMType(self):
|
|
1427
|
+
"""Model interface protocol types, currently compatible with three protocol types:
|
|
1428
|
+
|
|
1429
|
+
- OpenAI protocol (including GPT, Hunyuan, DeepSeek, etc.):"openai"
|
|
1430
|
+
- Azure protocol:"azure"
|
|
1431
|
+
- Minimax protocol:"minimax"
|
|
1432
|
+
:rtype: str
|
|
1433
|
+
"""
|
|
1434
|
+
return self._LLMType
|
|
1435
|
+
|
|
1436
|
+
@LLMType.setter
|
|
1437
|
+
def LLMType(self, LLMType):
|
|
1438
|
+
self._LLMType = LLMType
|
|
1439
|
+
|
|
1440
|
+
@property
|
|
1441
|
+
def Model(self):
|
|
1442
|
+
"""Model name, such as
|
|
1443
|
+
|
|
1444
|
+
- OpenAI protocol
|
|
1445
|
+
"gpt-4o-mini","gpt-4o","hunyuan-standard", "hunyuan-turbo","deepseek-chat";
|
|
1446
|
+
|
|
1447
|
+
- Azure protocol
|
|
1448
|
+
"gpt-4o-mini", "gpt-4o";
|
|
1449
|
+
|
|
1450
|
+
- Minimax protocol
|
|
1451
|
+
"deepseek-chat".
|
|
1452
|
+
:rtype: str
|
|
1453
|
+
"""
|
|
1454
|
+
return self._Model
|
|
1455
|
+
|
|
1456
|
+
@Model.setter
|
|
1457
|
+
def Model(self, Model):
|
|
1458
|
+
self._Model = Model
|
|
1459
|
+
|
|
1460
|
+
@property
|
|
1461
|
+
def APIKey(self):
|
|
1462
|
+
"""Model API key, for authentication information, please refer to the respective model's official website
|
|
1463
|
+
|
|
1464
|
+
- OpenAI protocol: [GPT](https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key), [Hunyuan](https://intl.cloud.tencent.com/document/product/1729/111008?from_cn_redirect=1), [DeepSeek](https://api-docs.deepseek.com/zh-cn/);
|
|
1465
|
+
|
|
1466
|
+
- Azure protocol: [Azure GPT](https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Ctypescript%2Cpython-new&pivots=programming-language-studio#key-settings);
|
|
1467
|
+
|
|
1468
|
+
- Minimax:[Minimax](https://platform.minimaxi.com/document/Fast%20access?key=66701cf51d57f38758d581b2)
|
|
1469
|
+
:rtype: str
|
|
1470
|
+
"""
|
|
1471
|
+
return self._APIKey
|
|
1472
|
+
|
|
1473
|
+
@APIKey.setter
|
|
1474
|
+
def APIKey(self, APIKey):
|
|
1475
|
+
self._APIKey = APIKey
|
|
1476
|
+
|
|
1477
|
+
@property
|
|
1478
|
+
def APIUrl(self):
|
|
1479
|
+
"""Model interface address
|
|
1480
|
+
|
|
1481
|
+
- OpenAI protocol
|
|
1482
|
+
GPT:"https://api.openai.com/v1/"
|
|
1483
|
+
Hunyuan:"https://api.hunyuan.cloud.tencent.com/v1"
|
|
1484
|
+
Deepseek:"https://api.deepseek.com/v1"
|
|
1485
|
+
|
|
1486
|
+
- Azure protocol
|
|
1487
|
+
"https://{your-resource-name}.openai.azure.com?api-version={api-version}"
|
|
1488
|
+
|
|
1489
|
+
- Minimax protocol
|
|
1490
|
+
"https://api.minimax.chat/v1"
|
|
1491
|
+
:rtype: str
|
|
1492
|
+
"""
|
|
1493
|
+
return self._APIUrl
|
|
1494
|
+
|
|
1495
|
+
@APIUrl.setter
|
|
1496
|
+
def APIUrl(self, APIUrl):
|
|
1497
|
+
self._APIUrl = APIUrl
|
|
1498
|
+
|
|
1499
|
+
@property
|
|
1500
|
+
def VoiceType(self):
|
|
1501
|
+
"""The following voice parameter values are available by default. If you wish to customize the voice type, please leave VoiceType blank and configure it in the CustomTTSConfig parameter.
|
|
1502
|
+
|
|
1503
|
+
Chinese:
|
|
1504
|
+
ZhiMei: Zhimei, customer service female voice
|
|
1505
|
+
ZhiXi: Zhixi, general female voice
|
|
1506
|
+
ZhiQi: Zhiqi, customer service female voice
|
|
1507
|
+
ZhiTian: Zhitian, female child voice
|
|
1508
|
+
AiXiaoJing: Ai Xiaojing, dialogue female voice
|
|
1509
|
+
|
|
1510
|
+
English:
|
|
1511
|
+
WeRose:English Female Voice
|
|
1512
|
+
Monika:English Female Voice
|
|
1513
|
+
|
|
1514
|
+
Japanese:
|
|
1515
|
+
Nanami
|
|
1516
|
+
|
|
1517
|
+
Korean:
|
|
1518
|
+
SunHi
|
|
1519
|
+
|
|
1520
|
+
Indonesian (Indonesia):
|
|
1521
|
+
Gadis
|
|
1522
|
+
|
|
1523
|
+
Malay (Malaysia):
|
|
1524
|
+
Yasmin
|
|
1525
|
+
|
|
1526
|
+
Tamil (Malaysia):
|
|
1527
|
+
Kani
|
|
1528
|
+
|
|
1529
|
+
Thai (Thailand):
|
|
1530
|
+
Achara
|
|
1531
|
+
|
|
1532
|
+
Vietnamese (Vietnam):
|
|
1533
|
+
HoaiMy
|
|
1534
|
+
|
|
1535
|
+
|
|
1536
|
+
:rtype: str
|
|
1537
|
+
"""
|
|
1538
|
+
return self._VoiceType
|
|
1539
|
+
|
|
1540
|
+
@VoiceType.setter
|
|
1541
|
+
def VoiceType(self, VoiceType):
|
|
1542
|
+
self._VoiceType = VoiceType
|
|
1543
|
+
|
|
1544
|
+
@property
|
|
1545
|
+
def Callers(self):
|
|
1546
|
+
"""List of calling numbers.
|
|
1547
|
+
:rtype: list of str
|
|
1548
|
+
"""
|
|
1549
|
+
return self._Callers
|
|
1550
|
+
|
|
1551
|
+
@Callers.setter
|
|
1552
|
+
def Callers(self, Callers):
|
|
1553
|
+
self._Callers = Callers
|
|
1554
|
+
|
|
1555
|
+
@property
|
|
1556
|
+
def WelcomeMessage(self):
|
|
1557
|
+
"""Used to set the AI Agent Greeting.
|
|
1558
|
+
:rtype: str
|
|
1559
|
+
"""
|
|
1560
|
+
return self._WelcomeMessage
|
|
1561
|
+
|
|
1562
|
+
@WelcomeMessage.setter
|
|
1563
|
+
def WelcomeMessage(self, WelcomeMessage):
|
|
1564
|
+
self._WelcomeMessage = WelcomeMessage
|
|
1565
|
+
|
|
1566
|
+
@property
|
|
1567
|
+
def WelcomeType(self):
|
|
1568
|
+
"""0: Use welcomeMessage (if empty, the callee speaks first; if not empty, the bot speaks first)
|
|
1569
|
+
1: Use AI to automatically generate welcomeMessage and speak first based on the prompt
|
|
1570
|
+
:rtype: int
|
|
1571
|
+
"""
|
|
1572
|
+
return self._WelcomeType
|
|
1573
|
+
|
|
1574
|
+
@WelcomeType.setter
|
|
1575
|
+
def WelcomeType(self, WelcomeType):
|
|
1576
|
+
self._WelcomeType = WelcomeType
|
|
1577
|
+
|
|
1578
|
+
@property
|
|
1579
|
+
def MaxDuration(self):
|
|
1580
|
+
"""Maximum Waiting Duration (milliseconds), default is 60 seconds, if the user does not speak within this time, the call is automatically terminated
|
|
1581
|
+
:rtype: int
|
|
1582
|
+
"""
|
|
1583
|
+
return self._MaxDuration
|
|
1584
|
+
|
|
1585
|
+
@MaxDuration.setter
|
|
1586
|
+
def MaxDuration(self, MaxDuration):
|
|
1587
|
+
self._MaxDuration = MaxDuration
|
|
1588
|
+
|
|
1589
|
+
@property
|
|
1590
|
+
def Languages(self):
|
|
1591
|
+
"""ASR Supported Languages, default is "zh" Chinese,
|
|
1592
|
+
Fill in the array with up to 4 languages, the first is the primary language for recognition, followed by optional languages,
|
|
1593
|
+
Note: When the primary language is a Chinese dialect, optional languages are invalid
|
|
1594
|
+
Currently, the supported languages are as follows. The English name of the language is on the left side of the equals sign, and the value to be filled in the Language field is on the right side, following ISO639:
|
|
1595
|
+
1. Chinese = "zh" # Chinese
|
|
1596
|
+
2. Chinese_TW = "zh-TW" # Taiwan (China)
|
|
1597
|
+
3. Chinese_DIALECT = "zh-dialect" # Chinese Dialect
|
|
1598
|
+
4. English = "en" # English
|
|
1599
|
+
5. Vietnamese = "vi" # Vietnamese
|
|
1600
|
+
6. Japanese = "ja" # Japanese
|
|
1601
|
+
7. Korean = "ko" # Korean
|
|
1602
|
+
8. Indonesia = "id" # Indonesian
|
|
1603
|
+
9. Thai = "th" # Thai
|
|
1604
|
+
10. Portuguese = "pt" # Portuguese
|
|
1605
|
+
11. Turkish = "tr" # Turkish
|
|
1606
|
+
12. Arabic = "ar" # Arabic
|
|
1607
|
+
13. Spanish = "es" # Spanish
|
|
1608
|
+
14. Hindi = "hi" # Hindi
|
|
1609
|
+
15. French = "fr" # French
|
|
1610
|
+
16. Malay = "ms" # Malay
|
|
1611
|
+
17. Filipino = "fil" # Filipino
|
|
1612
|
+
18. German = "de" # German
|
|
1613
|
+
19. Italian = "it" # Italian
|
|
1614
|
+
20. Russian = "ru" # Russian
|
|
1615
|
+
:rtype: list of str
|
|
1616
|
+
"""
|
|
1617
|
+
return self._Languages
|
|
1618
|
+
|
|
1619
|
+
@Languages.setter
|
|
1620
|
+
def Languages(self, Languages):
|
|
1621
|
+
self._Languages = Languages
|
|
1622
|
+
|
|
1623
|
+
@property
|
|
1624
|
+
def InterruptMode(self):
|
|
1625
|
+
"""Interrupt AI speech mode, default is 0, 0 indicates the server interrupts automatically, 1 indicates the server does not interrupt, interruption signal sent by the client side.
|
|
1626
|
+
:rtype: int
|
|
1627
|
+
"""
|
|
1628
|
+
return self._InterruptMode
|
|
1629
|
+
|
|
1630
|
+
@InterruptMode.setter
|
|
1631
|
+
def InterruptMode(self, InterruptMode):
|
|
1632
|
+
self._InterruptMode = InterruptMode
|
|
1633
|
+
|
|
1634
|
+
@property
|
|
1635
|
+
def InterruptSpeechDuration(self):
|
|
1636
|
+
"""Used when InterruptMode is 0, unit in milliseconds, default is 500ms. It means that the server-side detects ongoing vocal input for the InterruptSpeechDuration milliseconds and then interrupts.
|
|
1637
|
+
:rtype: int
|
|
1638
|
+
"""
|
|
1639
|
+
return self._InterruptSpeechDuration
|
|
1640
|
+
|
|
1641
|
+
@InterruptSpeechDuration.setter
|
|
1642
|
+
def InterruptSpeechDuration(self, InterruptSpeechDuration):
|
|
1643
|
+
self._InterruptSpeechDuration = InterruptSpeechDuration
|
|
1644
|
+
|
|
1645
|
+
@property
|
|
1646
|
+
def EndFunctionEnable(self):
|
|
1647
|
+
"""Whether the model supports (or enables) call_end function calling
|
|
1648
|
+
:rtype: bool
|
|
1649
|
+
"""
|
|
1650
|
+
return self._EndFunctionEnable
|
|
1651
|
+
|
|
1652
|
+
@EndFunctionEnable.setter
|
|
1653
|
+
def EndFunctionEnable(self, EndFunctionEnable):
|
|
1654
|
+
self._EndFunctionEnable = EndFunctionEnable
|
|
1655
|
+
|
|
1656
|
+
@property
|
|
1657
|
+
def EndFunctionDesc(self):
|
|
1658
|
+
"""Effective when EndFunctionEnable is true; the description of call_end function calling, default is "End the call when user has to leave (like says bye) or you are instructed to do so."
|
|
1659
|
+
:rtype: str
|
|
1660
|
+
"""
|
|
1661
|
+
return self._EndFunctionDesc
|
|
1662
|
+
|
|
1663
|
+
@EndFunctionDesc.setter
|
|
1664
|
+
def EndFunctionDesc(self, EndFunctionDesc):
|
|
1665
|
+
self._EndFunctionDesc = EndFunctionDesc
|
|
1666
|
+
|
|
1667
|
+
@property
|
|
1668
|
+
def NotifyDuration(self):
|
|
1669
|
+
"""The duration after which the user hasn't spoken to trigger a notification, minimum 10 seconds, default 10 seconds
|
|
1670
|
+
:rtype: int
|
|
1671
|
+
"""
|
|
1672
|
+
return self._NotifyDuration
|
|
1673
|
+
|
|
1674
|
+
@NotifyDuration.setter
|
|
1675
|
+
def NotifyDuration(self, NotifyDuration):
|
|
1676
|
+
self._NotifyDuration = NotifyDuration
|
|
1677
|
+
|
|
1678
|
+
@property
|
|
1679
|
+
def NotifyMessage(self):
|
|
1680
|
+
"""The AI prompt when NotifyDuration has passed without the user speaking, default is "Sorry, I didn't hear you clearly. Can you repeat that?"
|
|
1681
|
+
:rtype: str
|
|
1682
|
+
"""
|
|
1683
|
+
return self._NotifyMessage
|
|
1684
|
+
|
|
1685
|
+
@NotifyMessage.setter
|
|
1686
|
+
def NotifyMessage(self, NotifyMessage):
|
|
1687
|
+
self._NotifyMessage = NotifyMessage
|
|
1688
|
+
|
|
1689
|
+
@property
|
|
1690
|
+
def CustomTTSConfig(self):
|
|
1691
|
+
"""<p>And VoiceType field needs to select one, here is to use your own custom TTS, VoiceType is some built-in sound qualities</p>
|
|
1692
|
+
<ul>
|
|
1693
|
+
<li>Tencent TTS<br>
|
|
1694
|
+
For configuration, please refer to <a href="https://intl.cloud.tencent.com/document/product/1073/92668?from_cn_redirect=1#55924b56-1a73-4663-a7a1-a8dd82d6e823" target="_blank">Tencent Cloud TTS documentation link</a></li>
|
|
1695
|
+
</ul>
|
|
1696
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1697
|
+
"TTSType": "tencent", // String TTS type, currently supports "tencent" and "minixmax", other vendors support in progress
|
|
1698
|
+
"AppId": "Your application ID", // String required
|
|
1699
|
+
"SecretId": "Your Secret ID", // String Required
|
|
1700
|
+
"SecretKey": "Your Secret Key", // String Required
|
|
1701
|
+
"VoiceType": 101001, // Integer Required, Sound quality ID, includes standard and premium sound quality. Premium sound quality is more realistic and differently priced than standard sound quality. See TTS billing overview for details. For the full list of sound quality IDs, see the TTS sound quality list.
|
|
1702
|
+
"Speed": 1.25, // Integer Optional, speech speed, range: [-2,6], corresponding to different speeds: -2: represents 0.6x -1: represents 0.8x 0: represents 1.0x (default) 1: represents 1.2x 2: represents 1.5x 6: represents 2.5x For more precise speed control, you can retain two decimal places, such as 0.5/1.25/2.81, etc. For parameter value to actual speed conversion, refer to Speed Conversion
|
|
1703
|
+
"Volume": 5, // Integer Optional, Volume level, range: [0,10], corresponding to 11 levels of volume, default is 0, which represents normal volume.
|
|
1704
|
+
"PrimaryLanguage": 1, // Integer Optional, Primary language 1- Chinese (default) 2- English 3- Japanese
|
|
1705
|
+
"FastVoiceType": "xxxx" // Optional parameter, Fast VRS parameter
|
|
1706
|
+
}
|
|
1707
|
+
</code></pre>
|
|
1708
|
+
|
|
1709
|
+
</div></div><ul>
|
|
1710
|
+
<li>Minimax TTS<br>
|
|
1711
|
+
For configuration, please refer to the <a href="https://platform.minimaxi.com/document/T2A%20V2?key=66719005a427f0c8a5701643" target="_blank">Minimax TTS documentation link</a>. Note that Minimax TTS has frequency limits, and exceeding the limit may cause response delays, <a href="https://platform.minimaxi.com/document/Rate%20limits?key=66b19417290299a26b234572" target="_blank">Minimax TTS frequency limit related documentation link</a>.</li>
|
|
1712
|
+
</ul>
|
|
1713
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1714
|
+
"TTSType": "minimax", // String TTS type,
|
|
1715
|
+
"Model": "speech-01-turbo",
|
|
1716
|
+
"APIUrl": "https://api.minimax.chat/v1/t2a_v2",
|
|
1717
|
+
"APIKey": "eyxxxx",
|
|
1718
|
+
"GroupId": "181000000000000",
|
|
1719
|
+
"VoiceType":"female-tianmei-yujie",
|
|
1720
|
+
"Speed": 1.2
|
|
1721
|
+
}
|
|
1722
|
+
</code></pre>
|
|
1723
|
+
</div></div><ul>
|
|
1724
|
+
<li>Volcano TTS</li>
|
|
1725
|
+
</ul>
|
|
1726
|
+
<p>For type of sound quality configuration, refer to the<a href="https://www.volcengine.com/docs/6561/162929" target="_blank">Volcano TTS documentation</a><br>
|
|
1727
|
+
TTS Sound Quality List - Voice Technology - Volcano Engine<br>
|
|
1728
|
+
Large Model TTS Sound Quality List - Voice Technology - Volcano Engine</p>
|
|
1729
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1730
|
+
"TTSType": "volcengine", // Required: String TTS type
|
|
1731
|
+
"AppId" : "xxxxxxxx", // Required: String Volcano Engine assigned AppId
|
|
1732
|
+
"Token" : "TY9d4sQXHxxxxxxx", // Required: String type Volcano Engine access token
|
|
1733
|
+
"Speed" : 1.0, // Optional parameter: Playback speed, default is 1.0
|
|
1734
|
+
"Volume": 1.0, // Optional parameter: Volume, default is 1.0
|
|
1735
|
+
"Cluster" : "volcano_tts", // Optional parameter: Business cluster, default is volcano_tts
|
|
1736
|
+
"VoiceType" : "zh_male_aojiaobazong_moon_bigtts" // Sound quality type, default is the sound quality of the large model TTS. If using normal TTS, fill in the corresponding sound quality type. Incorrect sound quality type will result in no sound.
|
|
1737
|
+
}
|
|
1738
|
+
</code></pre>
|
|
1739
|
+
|
|
1740
|
+
</div></div><ul>
|
|
1741
|
+
<li>Azure TTS<br>
|
|
1742
|
+
For configuration, refer to the<a href="https://docs.azure.cn/zh-cn/ai-services/speech-service/speech-synthesis-markup-voice" target="_blank">Azure TTS documentation</a></li>
|
|
1743
|
+
</ul>
|
|
1744
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1745
|
+
"TTSType": "azure", // Required: String TTS type
|
|
1746
|
+
"SubscriptionKey": "xxxxxxxx", // Required: String subscription key
|
|
1747
|
+
"Region": "chinanorth3", // Required: String subscription region
|
|
1748
|
+
"VoiceName": "zh-CN-XiaoxiaoNeural", // Required: String Timbre Name required
|
|
1749
|
+
"Language": "zh-CN", // Required: String Language for synthesis
|
|
1750
|
+
"Rate": 1 // Optional: float Playback Speed 0.5-2 default is 1
|
|
1751
|
+
}
|
|
1752
|
+
</code></pre>
|
|
1753
|
+
|
|
1754
|
+
</div></div><ul>
|
|
1755
|
+
<li>Custom</li>
|
|
1756
|
+
</ul>
|
|
1757
|
+
<p>TTS<br>
|
|
1758
|
+
Please refer to the specific protocol standards in the <a href="https://doc.weixin.qq.com/doc/w3_ANQAiAbdAFwHILbJBmtSqSbV1WZ3L?scode=AJEAIQdfAAo5a1xajYANQAiAbdAFw" target="_blank">Tencent documentation</a></p>
|
|
1759
|
+
<div><div class="v-md-pre-wrapper copy-code-mode v-md-pre-wrapper- extra-class"><pre class="v-md-prism-"><code>{
|
|
1760
|
+
"TTSType": "custom", // Required String
|
|
1761
|
+
"APIKey": "ApiKey", // Required String for Authentication
|
|
1762
|
+
"APIUrl": "http://0.0.0.0:8080/stream-audio" // Required String, TTS API URL
|
|
1763
|
+
"AudioFormat": "wav", // String, optional, expected audio format, such as mp3, ogg_opus, pcm, wav, default is wav, currently only pcm and wav are supported,
|
|
1764
|
+
"SampleRate": 16000, // Integer, optional, audio sample rate, default is 16000 (16k), recommended value is 16000
|
|
1765
|
+
"AudioChannel": 1, // Integer, optional, number of audio channels, values: 1 or 2, default is 1
|
|
1766
|
+
}
|
|
1767
|
+
</code></pre>
|
|
1768
|
+
|
|
1769
|
+
</div></div>
|
|
1770
|
+
:rtype: str
|
|
1771
|
+
"""
|
|
1772
|
+
return self._CustomTTSConfig
|
|
1773
|
+
|
|
1774
|
+
@CustomTTSConfig.setter
|
|
1775
|
+
def CustomTTSConfig(self, CustomTTSConfig):
|
|
1776
|
+
self._CustomTTSConfig = CustomTTSConfig
|
|
1777
|
+
|
|
1778
|
+
|
|
1779
|
+
def _deserialize(self, params):
|
|
1780
|
+
self._SdkAppId = params.get("SdkAppId")
|
|
1781
|
+
self._Callee = params.get("Callee")
|
|
1782
|
+
self._SystemPrompt = params.get("SystemPrompt")
|
|
1783
|
+
self._LLMType = params.get("LLMType")
|
|
1784
|
+
self._Model = params.get("Model")
|
|
1785
|
+
self._APIKey = params.get("APIKey")
|
|
1786
|
+
self._APIUrl = params.get("APIUrl")
|
|
1787
|
+
self._VoiceType = params.get("VoiceType")
|
|
1788
|
+
self._Callers = params.get("Callers")
|
|
1789
|
+
self._WelcomeMessage = params.get("WelcomeMessage")
|
|
1790
|
+
self._WelcomeType = params.get("WelcomeType")
|
|
1791
|
+
self._MaxDuration = params.get("MaxDuration")
|
|
1792
|
+
self._Languages = params.get("Languages")
|
|
1793
|
+
self._InterruptMode = params.get("InterruptMode")
|
|
1794
|
+
self._InterruptSpeechDuration = params.get("InterruptSpeechDuration")
|
|
1795
|
+
self._EndFunctionEnable = params.get("EndFunctionEnable")
|
|
1796
|
+
self._EndFunctionDesc = params.get("EndFunctionDesc")
|
|
1797
|
+
self._NotifyDuration = params.get("NotifyDuration")
|
|
1798
|
+
self._NotifyMessage = params.get("NotifyMessage")
|
|
1799
|
+
self._CustomTTSConfig = params.get("CustomTTSConfig")
|
|
1800
|
+
memeber_set = set(params.keys())
|
|
1801
|
+
for name, value in vars(self).items():
|
|
1802
|
+
property_name = name[1:]
|
|
1803
|
+
if property_name in memeber_set:
|
|
1804
|
+
memeber_set.remove(property_name)
|
|
1805
|
+
if len(memeber_set) > 0:
|
|
1806
|
+
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
|
|
1807
|
+
|
|
1808
|
+
|
|
1809
|
+
|
|
1810
|
+
class CreateAICallResponse(AbstractModel):
|
|
1811
|
+
"""CreateAICall response structure.
|
|
1812
|
+
|
|
1813
|
+
"""
|
|
1814
|
+
|
|
1815
|
+
def __init__(self):
|
|
1816
|
+
r"""
|
|
1817
|
+
:param _SessionId: Newly created session ID.
|
|
1818
|
+
:type SessionId: str
|
|
1819
|
+
:param _RequestId: The unique request ID, generated by the server, will be returned for every request (if the request fails to reach the server for other reasons, the request will not obtain a RequestId). RequestId is required for locating a problem.
|
|
1820
|
+
:type RequestId: str
|
|
1821
|
+
"""
|
|
1822
|
+
self._SessionId = None
|
|
1823
|
+
self._RequestId = None
|
|
1824
|
+
|
|
1825
|
+
@property
|
|
1826
|
+
def SessionId(self):
|
|
1827
|
+
"""Newly created session ID.
|
|
1828
|
+
:rtype: str
|
|
1829
|
+
"""
|
|
1830
|
+
return self._SessionId
|
|
1831
|
+
|
|
1832
|
+
@SessionId.setter
|
|
1833
|
+
def SessionId(self, SessionId):
|
|
1834
|
+
self._SessionId = SessionId
|
|
1835
|
+
|
|
1836
|
+
@property
|
|
1837
|
+
def RequestId(self):
|
|
1838
|
+
"""The unique request ID, generated by the server, will be returned for every request (if the request fails to reach the server for other reasons, the request will not obtain a RequestId). RequestId is required for locating a problem.
|
|
1839
|
+
:rtype: str
|
|
1840
|
+
"""
|
|
1841
|
+
return self._RequestId
|
|
1842
|
+
|
|
1843
|
+
@RequestId.setter
|
|
1844
|
+
def RequestId(self, RequestId):
|
|
1845
|
+
self._RequestId = RequestId
|
|
1846
|
+
|
|
1847
|
+
|
|
1848
|
+
def _deserialize(self, params):
|
|
1849
|
+
self._SessionId = params.get("SessionId")
|
|
1850
|
+
self._RequestId = params.get("RequestId")
|
|
1851
|
+
|
|
1852
|
+
|
|
1097
1853
|
class CreateAdminURLRequest(AbstractModel):
|
|
1098
1854
|
"""CreateAdminURL request structure.
|
|
1099
1855
|
|