alibabacloud-ice20201109 6.8.3__tar.gz → 6.8.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (17) hide show
  1. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/ChangeLog.md +9 -0
  2. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/PKG-INFO +1 -1
  3. alibabacloud_ice20201109-6.8.5/alibabacloud_ice20201109/__init__.py +1 -0
  4. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/alibabacloud_ice20201109/client.py +104 -20
  5. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/alibabacloud_ice20201109/models.py +551 -6
  6. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/alibabacloud_ice20201109.egg-info/PKG-INFO +1 -1
  7. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/setup.py +1 -1
  8. alibabacloud_ice20201109-6.8.3/alibabacloud_ice20201109/__init__.py +0 -1
  9. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/LICENSE +0 -0
  10. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/MANIFEST.in +0 -0
  11. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/README-CN.md +0 -0
  12. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/README.md +0 -0
  13. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/alibabacloud_ice20201109.egg-info/SOURCES.txt +0 -0
  14. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/alibabacloud_ice20201109.egg-info/dependency_links.txt +0 -0
  15. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/alibabacloud_ice20201109.egg-info/requires.txt +0 -0
  16. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/alibabacloud_ice20201109.egg-info/top_level.txt +0 -0
  17. {alibabacloud_ice20201109-6.8.3 → alibabacloud_ice20201109-6.8.5}/setup.cfg +0 -0
@@ -1,3 +1,12 @@
1
+ 2025-12-01 Version: 6.8.4
2
+ - Update API ForwardAIAgentCall: add request parameters CallerNumber.
3
+
4
+
5
+ 2025-11-30 Version: 6.8.3
6
+ - Update API BatchGetMediaInfos: add response parameters Body.IgnoredList.
7
+ - Update API BatchGetMediaInfos: add response parameters Body.MediaInfos.$.MediaDynamicInfo.
8
+
9
+
1
10
  2025-11-18 Version: 6.8.2
2
11
  - Generated python 2020-11-09 for ICE.
3
12
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud_ice20201109
3
- Version: 6.8.3
3
+ Version: 6.8.5
4
4
  Summary: Alibaba Cloud ICE (20201109) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -0,0 +1 @@
1
+ __version__ = '6.8.5'
@@ -105,7 +105,11 @@ class Client(OpenApiClient):
105
105
  runtime: util_models.RuntimeOptions,
106
106
  ) -> ice20201109_models.ActiveAiRtcLicenseResponse:
107
107
  """
108
- @summary 激活AI实时互动授权信息
108
+ @summary Activates a specified license using the batch ID, authorization code, and device SN.
109
+
110
+ @description ## [](#)Usage notes
111
+ This API is used to activate a specific license for Real-time Conversational AI by providing a batch ID (`LicenseItemId`), authorization code (`AuthCode`), and device ID (`DeviceId`). Upon successful activation, the API returns a response containing the request ID, an error code, the request status, the HTTP status code, and the activated license information.
112
+ *Note**: Ensure that the provided batch ID, authorization code, and device ID are correct. Incorrect information may cause the activation to fail.
109
113
 
110
114
  @param request: ActiveAiRtcLicenseRequest
111
115
  @param runtime: runtime options for this request RuntimeOptions
@@ -144,7 +148,11 @@ class Client(OpenApiClient):
144
148
  runtime: util_models.RuntimeOptions,
145
149
  ) -> ice20201109_models.ActiveAiRtcLicenseResponse:
146
150
  """
147
- @summary 激活AI实时互动授权信息
151
+ @summary Activates a specified license using the batch ID, authorization code, and device SN.
152
+
153
+ @description ## [](#)Usage notes
154
+ This API is used to activate a specific license for Real-time Conversational AI by providing a batch ID (`LicenseItemId`), authorization code (`AuthCode`), and device ID (`DeviceId`). Upon successful activation, the API returns a response containing the request ID, an error code, the request status, the HTTP status code, and the activated license information.
155
+ *Note**: Ensure that the provided batch ID, authorization code, and device ID are correct. Incorrect information may cause the activation to fail.
148
156
 
149
157
  @param request: ActiveAiRtcLicenseRequest
150
158
  @param runtime: runtime options for this request RuntimeOptions
@@ -182,7 +190,11 @@ class Client(OpenApiClient):
182
190
  request: ice20201109_models.ActiveAiRtcLicenseRequest,
183
191
  ) -> ice20201109_models.ActiveAiRtcLicenseResponse:
184
192
  """
185
- @summary 激活AI实时互动授权信息
193
+ @summary Activates a specified license using the batch ID, authorization code, and device SN.
194
+
195
+ @description ## [](#)Usage notes
196
+ This API is used to activate a specific license for Real-time Conversational AI by providing a batch ID (`LicenseItemId`), authorization code (`AuthCode`), and device ID (`DeviceId`). Upon successful activation, the API returns a response containing the request ID, an error code, the request status, the HTTP status code, and the activated license information.
197
+ *Note**: Ensure that the provided batch ID, authorization code, and device ID are correct. Incorrect information may cause the activation to fail.
186
198
 
187
199
  @param request: ActiveAiRtcLicenseRequest
188
200
  @return: ActiveAiRtcLicenseResponse
@@ -195,7 +207,11 @@ class Client(OpenApiClient):
195
207
  request: ice20201109_models.ActiveAiRtcLicenseRequest,
196
208
  ) -> ice20201109_models.ActiveAiRtcLicenseResponse:
197
209
  """
198
- @summary 激活AI实时互动授权信息
210
+ @summary Activates a specified license using the batch ID, authorization code, and device SN.
211
+
212
+ @description ## [](#)Usage notes
213
+ This API is used to activate a specific license for Real-time Conversational AI by providing a batch ID (`LicenseItemId`), authorization code (`AuthCode`), and device ID (`DeviceId`). Upon successful activation, the API returns a response containing the request ID, an error code, the request status, the HTTP status code, and the activated license information.
214
+ *Note**: Ensure that the provided batch ID, authorization code, and device ID are correct. Incorrect information may cause the activation to fail.
199
215
 
200
216
  @param request: ActiveAiRtcLicenseRequest
201
217
  @return: ActiveAiRtcLicenseResponse
@@ -12363,6 +12379,8 @@ class Client(OpenApiClient):
12363
12379
  query = {}
12364
12380
  if not UtilClient.is_unset(request.called_number):
12365
12381
  query['CalledNumber'] = request.called_number
12382
+ if not UtilClient.is_unset(request.caller_number):
12383
+ query['CallerNumber'] = request.caller_number
12366
12384
  if not UtilClient.is_unset(request.error_prompt):
12367
12385
  query['ErrorPrompt'] = request.error_prompt
12368
12386
  if not UtilClient.is_unset(request.instance_id):
@@ -12404,6 +12422,8 @@ class Client(OpenApiClient):
12404
12422
  query = {}
12405
12423
  if not UtilClient.is_unset(request.called_number):
12406
12424
  query['CalledNumber'] = request.called_number
12425
+ if not UtilClient.is_unset(request.caller_number):
12426
+ query['CallerNumber'] = request.caller_number
12407
12427
  if not UtilClient.is_unset(request.error_prompt):
12408
12428
  query['ErrorPrompt'] = request.error_prompt
12409
12429
  if not UtilClient.is_unset(request.instance_id):
@@ -12991,7 +13011,13 @@ class Client(OpenApiClient):
12991
13011
  runtime: util_models.RuntimeOptions,
12992
13012
  ) -> ice20201109_models.GetAiRtcAuthCodeListResponse:
12993
13013
  """
12994
- @summary 获取AI实时互动授权码列表
13014
+ @summary Retrieves a list of Real-time Conversational AI authentication codes and their status for a specified batch.
13015
+
13016
+ @description ## [](#)Usage notes
13017
+ This API retrieves a list of authorization codes for a specific batch ID. You can filter the results by status and type.
13018
+ Pagination is supported via the `PageNo` and `PageSize` parameters.
13019
+ By default, the `NeedTotalCount` parameter is set to `true`, indicating that the response includes the total count of matching records.
13020
+ `LicenseItemId` is a required parameter that specifies the batch to query.
12995
13021
 
12996
13022
  @param request: GetAiRtcAuthCodeListRequest
12997
13023
  @param runtime: runtime options for this request RuntimeOptions
@@ -13036,7 +13062,13 @@ class Client(OpenApiClient):
13036
13062
  runtime: util_models.RuntimeOptions,
13037
13063
  ) -> ice20201109_models.GetAiRtcAuthCodeListResponse:
13038
13064
  """
13039
- @summary 获取AI实时互动授权码列表
13065
+ @summary Retrieves a list of Real-time Conversational AI authentication codes and their status for a specified batch.
13066
+
13067
+ @description ## [](#)Usage notes
13068
+ This API retrieves a list of authorization codes for a specific batch ID. You can filter the results by status and type.
13069
+ Pagination is supported via the `PageNo` and `PageSize` parameters.
13070
+ By default, the `NeedTotalCount` parameter is set to `true`, indicating that the response includes the total count of matching records.
13071
+ `LicenseItemId` is a required parameter that specifies the batch to query.
13040
13072
 
13041
13073
  @param request: GetAiRtcAuthCodeListRequest
13042
13074
  @param runtime: runtime options for this request RuntimeOptions
@@ -13080,7 +13112,13 @@ class Client(OpenApiClient):
13080
13112
  request: ice20201109_models.GetAiRtcAuthCodeListRequest,
13081
13113
  ) -> ice20201109_models.GetAiRtcAuthCodeListResponse:
13082
13114
  """
13083
- @summary 获取AI实时互动授权码列表
13115
+ @summary Retrieves a list of Real-time Conversational AI authentication codes and their status for a specified batch.
13116
+
13117
+ @description ## [](#)Usage notes
13118
+ This API retrieves a list of authorization codes for a specific batch ID. You can filter the results by status and type.
13119
+ Pagination is supported via the `PageNo` and `PageSize` parameters.
13120
+ By default, the `NeedTotalCount` parameter is set to `true`, indicating that the response includes the total count of matching records.
13121
+ `LicenseItemId` is a required parameter that specifies the batch to query.
13084
13122
 
13085
13123
  @param request: GetAiRtcAuthCodeListRequest
13086
13124
  @return: GetAiRtcAuthCodeListResponse
@@ -13093,7 +13131,13 @@ class Client(OpenApiClient):
13093
13131
  request: ice20201109_models.GetAiRtcAuthCodeListRequest,
13094
13132
  ) -> ice20201109_models.GetAiRtcAuthCodeListResponse:
13095
13133
  """
13096
- @summary 获取AI实时互动授权码列表
13134
+ @summary Retrieves a list of Real-time Conversational AI authentication codes and their status for a specified batch.
13135
+
13136
+ @description ## [](#)Usage notes
13137
+ This API retrieves a list of authorization codes for a specific batch ID. You can filter the results by status and type.
13138
+ Pagination is supported via the `PageNo` and `PageSize` parameters.
13139
+ By default, the `NeedTotalCount` parameter is set to `true`, indicating that the response includes the total count of matching records.
13140
+ `LicenseItemId` is a required parameter that specifies the batch to query.
13097
13141
 
13098
13142
  @param request: GetAiRtcAuthCodeListRequest
13099
13143
  @return: GetAiRtcAuthCodeListResponse
@@ -13107,7 +13151,12 @@ class Client(OpenApiClient):
13107
13151
  runtime: util_models.RuntimeOptions,
13108
13152
  ) -> ice20201109_models.GetAiRtcLicenseInfoListResponse:
13109
13153
  """
13110
- @summary 获取AI实时互动授权批次列表
13154
+ @summary Retrieves a list of license batches for Real-time Conversational AI based on specified filter criteria.
13155
+
13156
+ @description ## [](#)Usage notes
13157
+ This API allows you to retrieve a list of license batches for Real-time Conversational AI using filters such as Batch ID, status, and type.
13158
+ By default, the `NeedTotalCount` parameter is set to `true`, indicating that the response includes the total count of matching records. Set it to `false` if you do not need this total.
13159
+ If no filter criteria are provided, the API returns information for all license batches.
13111
13160
 
13112
13161
  @param request: GetAiRtcLicenseInfoListRequest
13113
13162
  @param runtime: runtime options for this request RuntimeOptions
@@ -13152,7 +13201,12 @@ class Client(OpenApiClient):
13152
13201
  runtime: util_models.RuntimeOptions,
13153
13202
  ) -> ice20201109_models.GetAiRtcLicenseInfoListResponse:
13154
13203
  """
13155
- @summary 获取AI实时互动授权批次列表
13204
+ @summary Retrieves a list of license batches for Real-time Conversational AI based on specified filter criteria.
13205
+
13206
+ @description ## [](#)Usage notes
13207
+ This API allows you to retrieve a list of license batches for Real-time Conversational AI using filters such as Batch ID, status, and type.
13208
+ By default, the `NeedTotalCount` parameter is set to `true`, indicating that the response includes the total count of matching records. Set it to `false` if you do not need this total.
13209
+ If no filter criteria are provided, the API returns information for all license batches.
13156
13210
 
13157
13211
  @param request: GetAiRtcLicenseInfoListRequest
13158
13212
  @param runtime: runtime options for this request RuntimeOptions
@@ -13196,7 +13250,12 @@ class Client(OpenApiClient):
13196
13250
  request: ice20201109_models.GetAiRtcLicenseInfoListRequest,
13197
13251
  ) -> ice20201109_models.GetAiRtcLicenseInfoListResponse:
13198
13252
  """
13199
- @summary 获取AI实时互动授权批次列表
13253
+ @summary Retrieves a list of license batches for Real-time Conversational AI based on specified filter criteria.
13254
+
13255
+ @description ## [](#)Usage notes
13256
+ This API allows you to retrieve a list of license batches for Real-time Conversational AI using filters such as Batch ID, status, and type.
13257
+ By default, the `NeedTotalCount` parameter is set to `true`, indicating that the response includes the total count of matching records. Set it to `false` if you do not need this total.
13258
+ If no filter criteria are provided, the API returns information for all license batches.
13200
13259
 
13201
13260
  @param request: GetAiRtcLicenseInfoListRequest
13202
13261
  @return: GetAiRtcLicenseInfoListResponse
@@ -13209,7 +13268,12 @@ class Client(OpenApiClient):
13209
13268
  request: ice20201109_models.GetAiRtcLicenseInfoListRequest,
13210
13269
  ) -> ice20201109_models.GetAiRtcLicenseInfoListResponse:
13211
13270
  """
13212
- @summary 获取AI实时互动授权批次列表
13271
+ @summary Retrieves a list of license batches for Real-time Conversational AI based on specified filter criteria.
13272
+
13273
+ @description ## [](#)Usage notes
13274
+ This API allows you to retrieve a list of license batches for Real-time Conversational AI using filters such as Batch ID, status, and type.
13275
+ By default, the `NeedTotalCount` parameter is set to `true`, indicating that the response includes the total count of matching records. Set it to `false` if you do not need this total.
13276
+ If no filter criteria are provided, the API returns information for all license batches.
13213
13277
 
13214
13278
  @param request: GetAiRtcLicenseInfoListRequest
13215
13279
  @return: GetAiRtcLicenseInfoListResponse
@@ -36367,7 +36431,9 @@ class Client(OpenApiClient):
36367
36431
  runtime: util_models.RuntimeOptions,
36368
36432
  ) -> ice20201109_models.SubmitSceneBatchEditingJobResponse:
36369
36433
  """
36370
- @summary 提交场景化批量合成任务
36434
+ @summary Submits a batch job to render multiple videos by providing a list of editing project IDs.
36435
+
36436
+ @description After submitting a job, you can call ListBatchMediaProducingJob to retrieve all matching jobs. To get detailed information for a specific job, including its status, output media asset IDs, and URLs, call GetBatchMediaProducingJob.
36371
36437
 
36372
36438
  @param request: SubmitSceneBatchEditingJobRequest
36373
36439
  @param runtime: runtime options for this request RuntimeOptions
@@ -36406,7 +36472,9 @@ class Client(OpenApiClient):
36406
36472
  runtime: util_models.RuntimeOptions,
36407
36473
  ) -> ice20201109_models.SubmitSceneBatchEditingJobResponse:
36408
36474
  """
36409
- @summary 提交场景化批量合成任务
36475
+ @summary Submits a batch job to render multiple videos by providing a list of editing project IDs.
36476
+
36477
+ @description After submitting a job, you can call ListBatchMediaProducingJob to retrieve all matching jobs. To get detailed information for a specific job, including its status, output media asset IDs, and URLs, call GetBatchMediaProducingJob.
36410
36478
 
36411
36479
  @param request: SubmitSceneBatchEditingJobRequest
36412
36480
  @param runtime: runtime options for this request RuntimeOptions
@@ -36444,7 +36512,9 @@ class Client(OpenApiClient):
36444
36512
  request: ice20201109_models.SubmitSceneBatchEditingJobRequest,
36445
36513
  ) -> ice20201109_models.SubmitSceneBatchEditingJobResponse:
36446
36514
  """
36447
- @summary 提交场景化批量合成任务
36515
+ @summary Submits a batch job to render multiple videos by providing a list of editing project IDs.
36516
+
36517
+ @description After submitting a job, you can call ListBatchMediaProducingJob to retrieve all matching jobs. To get detailed information for a specific job, including its status, output media asset IDs, and URLs, call GetBatchMediaProducingJob.
36448
36518
 
36449
36519
  @param request: SubmitSceneBatchEditingJobRequest
36450
36520
  @return: SubmitSceneBatchEditingJobResponse
@@ -36457,7 +36527,9 @@ class Client(OpenApiClient):
36457
36527
  request: ice20201109_models.SubmitSceneBatchEditingJobRequest,
36458
36528
  ) -> ice20201109_models.SubmitSceneBatchEditingJobResponse:
36459
36529
  """
36460
- @summary 提交场景化批量合成任务
36530
+ @summary Submits a batch job to render multiple videos by providing a list of editing project IDs.
36531
+
36532
+ @description After submitting a job, you can call ListBatchMediaProducingJob to retrieve all matching jobs. To get detailed information for a specific job, including its status, output media asset IDs, and URLs, call GetBatchMediaProducingJob.
36461
36533
 
36462
36534
  @param request: SubmitSceneBatchEditingJobRequest
36463
36535
  @return: SubmitSceneBatchEditingJobResponse
@@ -36599,7 +36671,10 @@ class Client(OpenApiClient):
36599
36671
  runtime: util_models.RuntimeOptions,
36600
36672
  ) -> ice20201109_models.SubmitSceneTimelineOrganizationJobResponse:
36601
36673
  """
36602
- @summary 提交场景化时间线编排任务
36674
+ @summary Arranges media assets, including videos, images, background music, and voiceovers, into a complete timeline based on media selection results, and creates an editing project for preview. Two scenarios are supported: image-text matching and highlight mashup.
36675
+
36676
+ @description After submitting a job, you can call [ListBatchMediaProducingJob](https://help.aliyun.com/document_detail/2803751.html) to retrieve matching jobs. To get detailed information for a specific job, including its status, output media asset IDs, and URLs, call [GetBatchMediaProducingJob](https://help.aliyun.com/document_detail/2693269.html).
36677
+ - The feature is in public preview and does not charge fees.
36603
36678
 
36604
36679
  @param request: SubmitSceneTimelineOrganizationJobRequest
36605
36680
  @param runtime: runtime options for this request RuntimeOptions
@@ -36646,7 +36721,10 @@ class Client(OpenApiClient):
36646
36721
  runtime: util_models.RuntimeOptions,
36647
36722
  ) -> ice20201109_models.SubmitSceneTimelineOrganizationJobResponse:
36648
36723
  """
36649
- @summary 提交场景化时间线编排任务
36724
+ @summary Arranges media assets, including videos, images, background music, and voiceovers, into a complete timeline based on media selection results, and creates an editing project for preview. Two scenarios are supported: image-text matching and highlight mashup.
36725
+
36726
+ @description After submitting a job, you can call [ListBatchMediaProducingJob](https://help.aliyun.com/document_detail/2803751.html) to retrieve matching jobs. To get detailed information for a specific job, including its status, output media asset IDs, and URLs, call [GetBatchMediaProducingJob](https://help.aliyun.com/document_detail/2693269.html).
36727
+ - The feature is in public preview and does not charge fees.
36650
36728
 
36651
36729
  @param request: SubmitSceneTimelineOrganizationJobRequest
36652
36730
  @param runtime: runtime options for this request RuntimeOptions
@@ -36692,7 +36770,10 @@ class Client(OpenApiClient):
36692
36770
  request: ice20201109_models.SubmitSceneTimelineOrganizationJobRequest,
36693
36771
  ) -> ice20201109_models.SubmitSceneTimelineOrganizationJobResponse:
36694
36772
  """
36695
- @summary 提交场景化时间线编排任务
36773
+ @summary Arranges media assets, including videos, images, background music, and voiceovers, into a complete timeline based on media selection results, and creates an editing project for preview. Two scenarios are supported: image-text matching and highlight mashup.
36774
+
36775
+ @description After submitting a job, you can call [ListBatchMediaProducingJob](https://help.aliyun.com/document_detail/2803751.html) to retrieve matching jobs. To get detailed information for a specific job, including its status, output media asset IDs, and URLs, call [GetBatchMediaProducingJob](https://help.aliyun.com/document_detail/2693269.html).
36776
+ - The feature is in public preview and does not charge fees.
36696
36777
 
36697
36778
  @param request: SubmitSceneTimelineOrganizationJobRequest
36698
36779
  @return: SubmitSceneTimelineOrganizationJobResponse
@@ -36705,7 +36786,10 @@ class Client(OpenApiClient):
36705
36786
  request: ice20201109_models.SubmitSceneTimelineOrganizationJobRequest,
36706
36787
  ) -> ice20201109_models.SubmitSceneTimelineOrganizationJobResponse:
36707
36788
  """
36708
- @summary 提交场景化时间线编排任务
36789
+ @summary Arranges media assets, including videos, images, background music, and voiceovers, into a complete timeline based on media selection results, and creates an editing project for preview. Two scenarios are supported: image-text matching and highlight mashup.
36790
+
36791
+ @description After submitting a job, you can call [ListBatchMediaProducingJob](https://help.aliyun.com/document_detail/2803751.html) to retrieve matching jobs. To get detailed information for a specific job, including its status, output media asset IDs, and URLs, call [GetBatchMediaProducingJob](https://help.aliyun.com/document_detail/2693269.html).
36792
+ - The feature is in public preview and does not charge fees.
36709
36793
 
36710
36794
  @param request: SubmitSceneTimelineOrganizationJobRequest
36711
36795
  @return: SubmitSceneTimelineOrganizationJobResponse
@@ -94,6 +94,198 @@ class AIAgentConfigAsrConfig(TeaModel):
94
94
  return self
95
95
 
96
96
 
97
+ class AIAgentConfigAutoSpeechConfigLlmPendingMessages(TeaModel):
98
+ def __init__(
99
+ self,
100
+ probability: float = None,
101
+ text: str = None,
102
+ ):
103
+ self.probability = probability
104
+ self.text = text
105
+
106
+ def validate(self):
107
+ pass
108
+
109
+ def to_map(self):
110
+ _map = super().to_map()
111
+ if _map is not None:
112
+ return _map
113
+
114
+ result = dict()
115
+ if self.probability is not None:
116
+ result['Probability'] = self.probability
117
+ if self.text is not None:
118
+ result['Text'] = self.text
119
+ return result
120
+
121
+ def from_map(self, m: dict = None):
122
+ m = m or dict()
123
+ if m.get('Probability') is not None:
124
+ self.probability = m.get('Probability')
125
+ if m.get('Text') is not None:
126
+ self.text = m.get('Text')
127
+ return self
128
+
129
+
130
+ class AIAgentConfigAutoSpeechConfigLlmPending(TeaModel):
131
+ def __init__(
132
+ self,
133
+ messages: List[AIAgentConfigAutoSpeechConfigLlmPendingMessages] = None,
134
+ wait_time: int = None,
135
+ ):
136
+ self.messages = messages
137
+ self.wait_time = wait_time
138
+
139
+ def validate(self):
140
+ if self.messages:
141
+ for k in self.messages:
142
+ if k:
143
+ k.validate()
144
+
145
+ def to_map(self):
146
+ _map = super().to_map()
147
+ if _map is not None:
148
+ return _map
149
+
150
+ result = dict()
151
+ result['Messages'] = []
152
+ if self.messages is not None:
153
+ for k in self.messages:
154
+ result['Messages'].append(k.to_map() if k else None)
155
+ if self.wait_time is not None:
156
+ result['WaitTime'] = self.wait_time
157
+ return result
158
+
159
+ def from_map(self, m: dict = None):
160
+ m = m or dict()
161
+ self.messages = []
162
+ if m.get('Messages') is not None:
163
+ for k in m.get('Messages'):
164
+ temp_model = AIAgentConfigAutoSpeechConfigLlmPendingMessages()
165
+ self.messages.append(temp_model.from_map(k))
166
+ if m.get('WaitTime') is not None:
167
+ self.wait_time = m.get('WaitTime')
168
+ return self
169
+
170
+
171
+ class AIAgentConfigAutoSpeechConfigUserIdleMessages(TeaModel):
172
+ def __init__(
173
+ self,
174
+ probability: float = None,
175
+ text: str = None,
176
+ ):
177
+ self.probability = probability
178
+ self.text = text
179
+
180
+ def validate(self):
181
+ pass
182
+
183
+ def to_map(self):
184
+ _map = super().to_map()
185
+ if _map is not None:
186
+ return _map
187
+
188
+ result = dict()
189
+ if self.probability is not None:
190
+ result['Probability'] = self.probability
191
+ if self.text is not None:
192
+ result['Text'] = self.text
193
+ return result
194
+
195
+ def from_map(self, m: dict = None):
196
+ m = m or dict()
197
+ if m.get('Probability') is not None:
198
+ self.probability = m.get('Probability')
199
+ if m.get('Text') is not None:
200
+ self.text = m.get('Text')
201
+ return self
202
+
203
+
204
+ class AIAgentConfigAutoSpeechConfigUserIdle(TeaModel):
205
+ def __init__(
206
+ self,
207
+ max_repeats: int = None,
208
+ messages: List[AIAgentConfigAutoSpeechConfigUserIdleMessages] = None,
209
+ wait_time: int = None,
210
+ ):
211
+ self.max_repeats = max_repeats
212
+ self.messages = messages
213
+ self.wait_time = wait_time
214
+
215
+ def validate(self):
216
+ if self.messages:
217
+ for k in self.messages:
218
+ if k:
219
+ k.validate()
220
+
221
+ def to_map(self):
222
+ _map = super().to_map()
223
+ if _map is not None:
224
+ return _map
225
+
226
+ result = dict()
227
+ if self.max_repeats is not None:
228
+ result['MaxRepeats'] = self.max_repeats
229
+ result['Messages'] = []
230
+ if self.messages is not None:
231
+ for k in self.messages:
232
+ result['Messages'].append(k.to_map() if k else None)
233
+ if self.wait_time is not None:
234
+ result['WaitTime'] = self.wait_time
235
+ return result
236
+
237
+ def from_map(self, m: dict = None):
238
+ m = m or dict()
239
+ if m.get('MaxRepeats') is not None:
240
+ self.max_repeats = m.get('MaxRepeats')
241
+ self.messages = []
242
+ if m.get('Messages') is not None:
243
+ for k in m.get('Messages'):
244
+ temp_model = AIAgentConfigAutoSpeechConfigUserIdleMessages()
245
+ self.messages.append(temp_model.from_map(k))
246
+ if m.get('WaitTime') is not None:
247
+ self.wait_time = m.get('WaitTime')
248
+ return self
249
+
250
+
251
+ class AIAgentConfigAutoSpeechConfig(TeaModel):
252
+ def __init__(
253
+ self,
254
+ llm_pending: AIAgentConfigAutoSpeechConfigLlmPending = None,
255
+ user_idle: AIAgentConfigAutoSpeechConfigUserIdle = None,
256
+ ):
257
+ self.llm_pending = llm_pending
258
+ self.user_idle = user_idle
259
+
260
+ def validate(self):
261
+ if self.llm_pending:
262
+ self.llm_pending.validate()
263
+ if self.user_idle:
264
+ self.user_idle.validate()
265
+
266
+ def to_map(self):
267
+ _map = super().to_map()
268
+ if _map is not None:
269
+ return _map
270
+
271
+ result = dict()
272
+ if self.llm_pending is not None:
273
+ result['LlmPending'] = self.llm_pending.to_map()
274
+ if self.user_idle is not None:
275
+ result['UserIdle'] = self.user_idle.to_map()
276
+ return result
277
+
278
+ def from_map(self, m: dict = None):
279
+ m = m or dict()
280
+ if m.get('LlmPending') is not None:
281
+ temp_model = AIAgentConfigAutoSpeechConfigLlmPending()
282
+ self.llm_pending = temp_model.from_map(m['LlmPending'])
283
+ if m.get('UserIdle') is not None:
284
+ temp_model = AIAgentConfigAutoSpeechConfigUserIdle()
285
+ self.user_idle = temp_model.from_map(m['UserIdle'])
286
+ return self
287
+
288
+
97
289
  class AIAgentConfigAvatarConfig(TeaModel):
98
290
  def __init__(
99
291
  self,
@@ -225,6 +417,7 @@ class AIAgentConfigLlmConfig(TeaModel):
225
417
  self,
226
418
  bailian_app_params: str = None,
227
419
  function_map: List[AIAgentConfigLlmConfigFunctionMap] = None,
420
+ history_sync_with_tts: bool = None,
228
421
  llm_complete_reply: bool = None,
229
422
  llm_history: List[AIAgentConfigLlmConfigLlmHistory] = None,
230
423
  llm_history_limit: int = None,
@@ -235,6 +428,7 @@ class AIAgentConfigLlmConfig(TeaModel):
235
428
  ):
236
429
  self.bailian_app_params = bailian_app_params
237
430
  self.function_map = function_map
431
+ self.history_sync_with_tts = history_sync_with_tts
238
432
  self.llm_complete_reply = llm_complete_reply
239
433
  self.llm_history = llm_history
240
434
  self.llm_history_limit = llm_history_limit
@@ -265,6 +459,8 @@ class AIAgentConfigLlmConfig(TeaModel):
265
459
  if self.function_map is not None:
266
460
  for k in self.function_map:
267
461
  result['FunctionMap'].append(k.to_map() if k else None)
462
+ if self.history_sync_with_tts is not None:
463
+ result['HistorySyncWithTTS'] = self.history_sync_with_tts
268
464
  if self.llm_complete_reply is not None:
269
465
  result['LlmCompleteReply'] = self.llm_complete_reply
270
466
  result['LlmHistory'] = []
@@ -292,6 +488,8 @@ class AIAgentConfigLlmConfig(TeaModel):
292
488
  for k in m.get('FunctionMap'):
293
489
  temp_model = AIAgentConfigLlmConfigFunctionMap()
294
490
  self.function_map.append(temp_model.from_map(k))
491
+ if m.get('HistorySyncWithTTS') is not None:
492
+ self.history_sync_with_tts = m.get('HistorySyncWithTTS')
295
493
  if m.get('LlmCompleteReply') is not None:
296
494
  self.llm_complete_reply = m.get('LlmCompleteReply')
297
495
  self.llm_history = []
@@ -747,6 +945,7 @@ class AIAgentConfig(TeaModel):
747
945
  self,
748
946
  ambient_sound_config: AIAgentConfigAmbientSoundConfig = None,
749
947
  asr_config: AIAgentConfigAsrConfig = None,
948
+ auto_speech_config: AIAgentConfigAutoSpeechConfig = None,
750
949
  avatar_config: AIAgentConfigAvatarConfig = None,
751
950
  avatar_url: str = None,
752
951
  avatar_url_type: str = None,
@@ -770,6 +969,7 @@ class AIAgentConfig(TeaModel):
770
969
  ):
771
970
  self.ambient_sound_config = ambient_sound_config
772
971
  self.asr_config = asr_config
972
+ self.auto_speech_config = auto_speech_config
773
973
  self.avatar_config = avatar_config
774
974
  self.avatar_url = avatar_url
775
975
  self.avatar_url_type = avatar_url_type
@@ -796,6 +996,8 @@ class AIAgentConfig(TeaModel):
796
996
  self.ambient_sound_config.validate()
797
997
  if self.asr_config:
798
998
  self.asr_config.validate()
999
+ if self.auto_speech_config:
1000
+ self.auto_speech_config.validate()
799
1001
  if self.avatar_config:
800
1002
  self.avatar_config.validate()
801
1003
  if self.interrupt_config:
@@ -821,6 +1023,8 @@ class AIAgentConfig(TeaModel):
821
1023
  result['AmbientSoundConfig'] = self.ambient_sound_config.to_map()
822
1024
  if self.asr_config is not None:
823
1025
  result['AsrConfig'] = self.asr_config.to_map()
1026
+ if self.auto_speech_config is not None:
1027
+ result['AutoSpeechConfig'] = self.auto_speech_config.to_map()
824
1028
  if self.avatar_config is not None:
825
1029
  result['AvatarConfig'] = self.avatar_config.to_map()
826
1030
  if self.avatar_url is not None:
@@ -871,6 +1075,9 @@ class AIAgentConfig(TeaModel):
871
1075
  if m.get('AsrConfig') is not None:
872
1076
  temp_model = AIAgentConfigAsrConfig()
873
1077
  self.asr_config = temp_model.from_map(m['AsrConfig'])
1078
+ if m.get('AutoSpeechConfig') is not None:
1079
+ temp_model = AIAgentConfigAutoSpeechConfig()
1080
+ self.auto_speech_config = temp_model.from_map(m['AutoSpeechConfig'])
874
1081
  if m.get('AvatarConfig') is not None:
875
1082
  temp_model = AIAgentConfigAvatarConfig()
876
1083
  self.avatar_config = temp_model.from_map(m['AvatarConfig'])
@@ -1011,6 +1218,198 @@ class AIAgentOutboundCallConfigAsrConfig(TeaModel):
1011
1218
  return self
1012
1219
 
1013
1220
 
1221
+ class AIAgentOutboundCallConfigAutoSpeechConfigLlmPendingMessages(TeaModel):
1222
+ def __init__(
1223
+ self,
1224
+ probability: float = None,
1225
+ text: str = None,
1226
+ ):
1227
+ self.probability = probability
1228
+ self.text = text
1229
+
1230
+ def validate(self):
1231
+ pass
1232
+
1233
+ def to_map(self):
1234
+ _map = super().to_map()
1235
+ if _map is not None:
1236
+ return _map
1237
+
1238
+ result = dict()
1239
+ if self.probability is not None:
1240
+ result['Probability'] = self.probability
1241
+ if self.text is not None:
1242
+ result['Text'] = self.text
1243
+ return result
1244
+
1245
+ def from_map(self, m: dict = None):
1246
+ m = m or dict()
1247
+ if m.get('Probability') is not None:
1248
+ self.probability = m.get('Probability')
1249
+ if m.get('Text') is not None:
1250
+ self.text = m.get('Text')
1251
+ return self
1252
+
1253
+
1254
+ class AIAgentOutboundCallConfigAutoSpeechConfigLlmPending(TeaModel):
1255
+ def __init__(
1256
+ self,
1257
+ messages: List[AIAgentOutboundCallConfigAutoSpeechConfigLlmPendingMessages] = None,
1258
+ wait_time: int = None,
1259
+ ):
1260
+ self.messages = messages
1261
+ self.wait_time = wait_time
1262
+
1263
+ def validate(self):
1264
+ if self.messages:
1265
+ for k in self.messages:
1266
+ if k:
1267
+ k.validate()
1268
+
1269
+ def to_map(self):
1270
+ _map = super().to_map()
1271
+ if _map is not None:
1272
+ return _map
1273
+
1274
+ result = dict()
1275
+ result['Messages'] = []
1276
+ if self.messages is not None:
1277
+ for k in self.messages:
1278
+ result['Messages'].append(k.to_map() if k else None)
1279
+ if self.wait_time is not None:
1280
+ result['WaitTime'] = self.wait_time
1281
+ return result
1282
+
1283
+ def from_map(self, m: dict = None):
1284
+ m = m or dict()
1285
+ self.messages = []
1286
+ if m.get('Messages') is not None:
1287
+ for k in m.get('Messages'):
1288
+ temp_model = AIAgentOutboundCallConfigAutoSpeechConfigLlmPendingMessages()
1289
+ self.messages.append(temp_model.from_map(k))
1290
+ if m.get('WaitTime') is not None:
1291
+ self.wait_time = m.get('WaitTime')
1292
+ return self
1293
+
1294
+
1295
+ class AIAgentOutboundCallConfigAutoSpeechConfigUserIdleMessages(TeaModel):
1296
+ def __init__(
1297
+ self,
1298
+ probability: float = None,
1299
+ text: str = None,
1300
+ ):
1301
+ self.probability = probability
1302
+ self.text = text
1303
+
1304
+ def validate(self):
1305
+ pass
1306
+
1307
+ def to_map(self):
1308
+ _map = super().to_map()
1309
+ if _map is not None:
1310
+ return _map
1311
+
1312
+ result = dict()
1313
+ if self.probability is not None:
1314
+ result['Probability'] = self.probability
1315
+ if self.text is not None:
1316
+ result['Text'] = self.text
1317
+ return result
1318
+
1319
+ def from_map(self, m: dict = None):
1320
+ m = m or dict()
1321
+ if m.get('Probability') is not None:
1322
+ self.probability = m.get('Probability')
1323
+ if m.get('Text') is not None:
1324
+ self.text = m.get('Text')
1325
+ return self
1326
+
1327
+
1328
+ class AIAgentOutboundCallConfigAutoSpeechConfigUserIdle(TeaModel):
1329
+ def __init__(
1330
+ self,
1331
+ max_repeats: int = None,
1332
+ messages: List[AIAgentOutboundCallConfigAutoSpeechConfigUserIdleMessages] = None,
1333
+ wait_time: int = None,
1334
+ ):
1335
+ self.max_repeats = max_repeats
1336
+ self.messages = messages
1337
+ self.wait_time = wait_time
1338
+
1339
+ def validate(self):
1340
+ if self.messages:
1341
+ for k in self.messages:
1342
+ if k:
1343
+ k.validate()
1344
+
1345
+ def to_map(self):
1346
+ _map = super().to_map()
1347
+ if _map is not None:
1348
+ return _map
1349
+
1350
+ result = dict()
1351
+ if self.max_repeats is not None:
1352
+ result['MaxRepeats'] = self.max_repeats
1353
+ result['Messages'] = []
1354
+ if self.messages is not None:
1355
+ for k in self.messages:
1356
+ result['Messages'].append(k.to_map() if k else None)
1357
+ if self.wait_time is not None:
1358
+ result['WaitTime'] = self.wait_time
1359
+ return result
1360
+
1361
+ def from_map(self, m: dict = None):
1362
+ m = m or dict()
1363
+ if m.get('MaxRepeats') is not None:
1364
+ self.max_repeats = m.get('MaxRepeats')
1365
+ self.messages = []
1366
+ if m.get('Messages') is not None:
1367
+ for k in m.get('Messages'):
1368
+ temp_model = AIAgentOutboundCallConfigAutoSpeechConfigUserIdleMessages()
1369
+ self.messages.append(temp_model.from_map(k))
1370
+ if m.get('WaitTime') is not None:
1371
+ self.wait_time = m.get('WaitTime')
1372
+ return self
1373
+
1374
+
1375
+ class AIAgentOutboundCallConfigAutoSpeechConfig(TeaModel):
1376
+ def __init__(
1377
+ self,
1378
+ llm_pending: AIAgentOutboundCallConfigAutoSpeechConfigLlmPending = None,
1379
+ user_idle: AIAgentOutboundCallConfigAutoSpeechConfigUserIdle = None,
1380
+ ):
1381
+ self.llm_pending = llm_pending
1382
+ self.user_idle = user_idle
1383
+
1384
+ def validate(self):
1385
+ if self.llm_pending:
1386
+ self.llm_pending.validate()
1387
+ if self.user_idle:
1388
+ self.user_idle.validate()
1389
+
1390
+ def to_map(self):
1391
+ _map = super().to_map()
1392
+ if _map is not None:
1393
+ return _map
1394
+
1395
+ result = dict()
1396
+ if self.llm_pending is not None:
1397
+ result['LlmPending'] = self.llm_pending.to_map()
1398
+ if self.user_idle is not None:
1399
+ result['UserIdle'] = self.user_idle.to_map()
1400
+ return result
1401
+
1402
+ def from_map(self, m: dict = None):
1403
+ m = m or dict()
1404
+ if m.get('LlmPending') is not None:
1405
+ temp_model = AIAgentOutboundCallConfigAutoSpeechConfigLlmPending()
1406
+ self.llm_pending = temp_model.from_map(m['LlmPending'])
1407
+ if m.get('UserIdle') is not None:
1408
+ temp_model = AIAgentOutboundCallConfigAutoSpeechConfigUserIdle()
1409
+ self.user_idle = temp_model.from_map(m['UserIdle'])
1410
+ return self
1411
+
1412
+
1014
1413
  class AIAgentOutboundCallConfigInterruptConfig(TeaModel):
1015
1414
  def __init__(
1016
1415
  self,
@@ -1115,6 +1514,7 @@ class AIAgentOutboundCallConfigLlmConfig(TeaModel):
1115
1514
  self,
1116
1515
  bailian_app_params: str = None,
1117
1516
  function_map: List[AIAgentOutboundCallConfigLlmConfigFunctionMap] = None,
1517
+ history_sync_with_tts: bool = None,
1118
1518
  llm_complete_reply: bool = None,
1119
1519
  llm_history: List[AIAgentOutboundCallConfigLlmConfigLlmHistory] = None,
1120
1520
  llm_history_limit: int = None,
@@ -1125,6 +1525,7 @@ class AIAgentOutboundCallConfigLlmConfig(TeaModel):
1125
1525
  ):
1126
1526
  self.bailian_app_params = bailian_app_params
1127
1527
  self.function_map = function_map
1528
+ self.history_sync_with_tts = history_sync_with_tts
1128
1529
  self.llm_complete_reply = llm_complete_reply
1129
1530
  self.llm_history = llm_history
1130
1531
  self.llm_history_limit = llm_history_limit
@@ -1155,6 +1556,8 @@ class AIAgentOutboundCallConfigLlmConfig(TeaModel):
1155
1556
  if self.function_map is not None:
1156
1557
  for k in self.function_map:
1157
1558
  result['FunctionMap'].append(k.to_map() if k else None)
1559
+ if self.history_sync_with_tts is not None:
1560
+ result['HistorySyncWithTTS'] = self.history_sync_with_tts
1158
1561
  if self.llm_complete_reply is not None:
1159
1562
  result['LlmCompleteReply'] = self.llm_complete_reply
1160
1563
  result['LlmHistory'] = []
@@ -1182,6 +1585,8 @@ class AIAgentOutboundCallConfigLlmConfig(TeaModel):
1182
1585
  for k in m.get('FunctionMap'):
1183
1586
  temp_model = AIAgentOutboundCallConfigLlmConfigFunctionMap()
1184
1587
  self.function_map.append(temp_model.from_map(k))
1588
+ if m.get('HistorySyncWithTTS') is not None:
1589
+ self.history_sync_with_tts = m.get('HistorySyncWithTTS')
1185
1590
  if m.get('LlmCompleteReply') is not None:
1186
1591
  self.llm_complete_reply = m.get('LlmCompleteReply')
1187
1592
  self.llm_history = []
@@ -1356,23 +1761,27 @@ class AIAgentOutboundCallConfig(TeaModel):
1356
1761
  self,
1357
1762
  ambient_sound_config: AIAgentOutboundCallConfigAmbientSoundConfig = None,
1358
1763
  asr_config: AIAgentOutboundCallConfigAsrConfig = None,
1764
+ auto_speech_config: AIAgentOutboundCallConfigAutoSpeechConfig = None,
1359
1765
  enable_intelligent_segment: bool = None,
1360
1766
  experimental_config: str = None,
1361
1767
  greeting: str = None,
1362
1768
  greeting_delay: int = None,
1363
1769
  interrupt_config: AIAgentOutboundCallConfigInterruptConfig = None,
1364
1770
  llm_config: AIAgentOutboundCallConfigLlmConfig = None,
1771
+ max_idle_time: int = None,
1365
1772
  tts_config: AIAgentOutboundCallConfigTtsConfig = None,
1366
1773
  turn_detection_config: AIAgentOutboundCallConfigTurnDetectionConfig = None,
1367
1774
  ):
1368
1775
  self.ambient_sound_config = ambient_sound_config
1369
1776
  self.asr_config = asr_config
1777
+ self.auto_speech_config = auto_speech_config
1370
1778
  self.enable_intelligent_segment = enable_intelligent_segment
1371
1779
  self.experimental_config = experimental_config
1372
1780
  self.greeting = greeting
1373
1781
  self.greeting_delay = greeting_delay
1374
1782
  self.interrupt_config = interrupt_config
1375
1783
  self.llm_config = llm_config
1784
+ self.max_idle_time = max_idle_time
1376
1785
  self.tts_config = tts_config
1377
1786
  self.turn_detection_config = turn_detection_config
1378
1787
 
@@ -1381,6 +1790,8 @@ class AIAgentOutboundCallConfig(TeaModel):
1381
1790
  self.ambient_sound_config.validate()
1382
1791
  if self.asr_config:
1383
1792
  self.asr_config.validate()
1793
+ if self.auto_speech_config:
1794
+ self.auto_speech_config.validate()
1384
1795
  if self.interrupt_config:
1385
1796
  self.interrupt_config.validate()
1386
1797
  if self.llm_config:
@@ -1400,6 +1811,8 @@ class AIAgentOutboundCallConfig(TeaModel):
1400
1811
  result['AmbientSoundConfig'] = self.ambient_sound_config.to_map()
1401
1812
  if self.asr_config is not None:
1402
1813
  result['AsrConfig'] = self.asr_config.to_map()
1814
+ if self.auto_speech_config is not None:
1815
+ result['AutoSpeechConfig'] = self.auto_speech_config.to_map()
1403
1816
  if self.enable_intelligent_segment is not None:
1404
1817
  result['EnableIntelligentSegment'] = self.enable_intelligent_segment
1405
1818
  if self.experimental_config is not None:
@@ -1412,6 +1825,8 @@ class AIAgentOutboundCallConfig(TeaModel):
1412
1825
  result['InterruptConfig'] = self.interrupt_config.to_map()
1413
1826
  if self.llm_config is not None:
1414
1827
  result['LlmConfig'] = self.llm_config.to_map()
1828
+ if self.max_idle_time is not None:
1829
+ result['MaxIdleTime'] = self.max_idle_time
1415
1830
  if self.tts_config is not None:
1416
1831
  result['TtsConfig'] = self.tts_config.to_map()
1417
1832
  if self.turn_detection_config is not None:
@@ -1426,6 +1841,9 @@ class AIAgentOutboundCallConfig(TeaModel):
1426
1841
  if m.get('AsrConfig') is not None:
1427
1842
  temp_model = AIAgentOutboundCallConfigAsrConfig()
1428
1843
  self.asr_config = temp_model.from_map(m['AsrConfig'])
1844
+ if m.get('AutoSpeechConfig') is not None:
1845
+ temp_model = AIAgentOutboundCallConfigAutoSpeechConfig()
1846
+ self.auto_speech_config = temp_model.from_map(m['AutoSpeechConfig'])
1429
1847
  if m.get('EnableIntelligentSegment') is not None:
1430
1848
  self.enable_intelligent_segment = m.get('EnableIntelligentSegment')
1431
1849
  if m.get('ExperimentalConfig') is not None:
@@ -1440,6 +1858,8 @@ class AIAgentOutboundCallConfig(TeaModel):
1440
1858
  if m.get('LlmConfig') is not None:
1441
1859
  temp_model = AIAgentOutboundCallConfigLlmConfig()
1442
1860
  self.llm_config = temp_model.from_map(m['LlmConfig'])
1861
+ if m.get('MaxIdleTime') is not None:
1862
+ self.max_idle_time = m.get('MaxIdleTime')
1443
1863
  if m.get('TtsConfig') is not None:
1444
1864
  temp_model = AIAgentOutboundCallConfigTtsConfig()
1445
1865
  self.tts_config = temp_model.from_map(m['TtsConfig'])
@@ -4101,13 +4521,13 @@ class MediaConvertJobConfigInputs(TeaModel):
4101
4521
  class MediaConvertJobConfigOutputGroupsGroupConfigManifestExtendExcludes(TeaModel):
4102
4522
  def __init__(
4103
4523
  self,
4524
+ language: str = None,
4104
4525
  name: str = None,
4105
4526
  type: str = None,
4106
- language: str = None,
4107
4527
  ):
4528
+ self.language = language
4108
4529
  self.name = name
4109
4530
  self.type = type
4110
- self.language = language
4111
4531
 
4112
4532
  def validate(self):
4113
4533
  pass
@@ -4118,22 +4538,22 @@ class MediaConvertJobConfigOutputGroupsGroupConfigManifestExtendExcludes(TeaMode
4118
4538
  return _map
4119
4539
 
4120
4540
  result = dict()
4541
+ if self.language is not None:
4542
+ result['Language'] = self.language
4121
4543
  if self.name is not None:
4122
4544
  result['Name'] = self.name
4123
4545
  if self.type is not None:
4124
4546
  result['Type'] = self.type
4125
- if self.language is not None:
4126
- result['language'] = self.language
4127
4547
  return result
4128
4548
 
4129
4549
  def from_map(self, m: dict = None):
4130
4550
  m = m or dict()
4551
+ if m.get('Language') is not None:
4552
+ self.language = m.get('Language')
4131
4553
  if m.get('Name') is not None:
4132
4554
  self.name = m.get('Name')
4133
4555
  if m.get('Type') is not None:
4134
4556
  self.type = m.get('Type')
4135
- if m.get('language') is not None:
4136
- self.language = m.get('language')
4137
4557
  return self
4138
4558
 
4139
4559
 
@@ -7015,8 +7435,11 @@ class ActiveAiRtcLicenseRequest(TeaModel):
7015
7435
  device_id: str = None,
7016
7436
  license_item_id: str = None,
7017
7437
  ):
7438
+ # The authorization code.
7018
7439
  self.auth_code = auth_code
7440
+ # The device ID.
7019
7441
  self.device_id = device_id
7442
+ # The batch ID.
7020
7443
  self.license_item_id = license_item_id
7021
7444
 
7022
7445
  def validate(self):
@@ -7057,11 +7480,17 @@ class ActiveAiRtcLicenseResponseBody(TeaModel):
7057
7480
  request_id: str = None,
7058
7481
  success: bool = None,
7059
7482
  ):
7483
+ # The error code returned.
7060
7484
  self.code = code
7485
+ # The HTTP status code.
7061
7486
  self.http_status_code = http_status_code
7487
+ # The license information.
7062
7488
  self.license = license
7489
+ # The error message.
7063
7490
  self.message = message
7491
+ # The request ID.
7064
7492
  self.request_id = request_id
7493
+ # Indicates whether the call was successful.
7065
7494
  self.success = success
7066
7495
 
7067
7496
  def validate(self):
@@ -26268,11 +26697,13 @@ class ForwardAIAgentCallRequest(TeaModel):
26268
26697
  def __init__(
26269
26698
  self,
26270
26699
  called_number: str = None,
26700
+ caller_number: str = None,
26271
26701
  error_prompt: str = None,
26272
26702
  instance_id: str = None,
26273
26703
  transfer_prompt: str = None,
26274
26704
  ):
26275
26705
  self.called_number = called_number
26706
+ self.caller_number = caller_number
26276
26707
  self.error_prompt = error_prompt
26277
26708
  self.instance_id = instance_id
26278
26709
  self.transfer_prompt = transfer_prompt
@@ -26288,6 +26719,8 @@ class ForwardAIAgentCallRequest(TeaModel):
26288
26719
  result = dict()
26289
26720
  if self.called_number is not None:
26290
26721
  result['CalledNumber'] = self.called_number
26722
+ if self.caller_number is not None:
26723
+ result['CallerNumber'] = self.caller_number
26291
26724
  if self.error_prompt is not None:
26292
26725
  result['ErrorPrompt'] = self.error_prompt
26293
26726
  if self.instance_id is not None:
@@ -26300,6 +26733,8 @@ class ForwardAIAgentCallRequest(TeaModel):
26300
26733
  m = m or dict()
26301
26734
  if m.get('CalledNumber') is not None:
26302
26735
  self.called_number = m.get('CalledNumber')
26736
+ if m.get('CallerNumber') is not None:
26737
+ self.caller_number = m.get('CallerNumber')
26303
26738
  if m.get('ErrorPrompt') is not None:
26304
26739
  self.error_prompt = m.get('ErrorPrompt')
26305
26740
  if m.get('InstanceId') is not None:
@@ -27542,11 +27977,23 @@ class GetAiRtcAuthCodeListRequest(TeaModel):
27542
27977
  status: int = None,
27543
27978
  type: int = None,
27544
27979
  ):
27980
+ # The ID of the batch.
27545
27981
  self.license_item_id = license_item_id
27982
+ # Specifies whether to include the total count of records in the response. Defaults to `true`.
27546
27983
  self.need_total_count = need_total_count
27984
+ # The page number.
27547
27985
  self.page_no = page_no
27986
+ # The number of entries per page.
27548
27987
  self.page_size = page_size
27988
+ # The status of the authorization code. Valid values:
27989
+ #
27990
+ # * `1`: Activated
27991
+ # * `2`: Inactive
27549
27992
  self.status = status
27993
+ # The type of license. Valid values:
27994
+ #
27995
+ # * `1`: Audio call
27996
+ # * `2`: Vision call
27550
27997
  self.type = type
27551
27998
 
27552
27999
  def validate(self):
@@ -27600,12 +28047,19 @@ class GetAiRtcAuthCodeListResponseBody(TeaModel):
27600
28047
  success: bool = None,
27601
28048
  total_count: int = None,
27602
28049
  ):
28050
+ # An array of AiRtcAuthCodeDTO objects, each representing an authorization code.
27603
28051
  self.auth_code_list = auth_code_list
28052
+ # The error code.
27604
28053
  self.code = code
28054
+ # The HTTP status code.
27605
28055
  self.http_status_code = http_status_code
28056
+ # The error message.
27606
28057
  self.message = message
28058
+ # The request ID.
27607
28059
  self.request_id = request_id
28060
+ # Indicates whether the call was successful.
27608
28061
  self.success = success
28062
+ # The total number of entries returned.
27609
28063
  self.total_count = total_count
27610
28064
 
27611
28065
  def validate(self):
@@ -27711,11 +28165,23 @@ class GetAiRtcLicenseInfoListRequest(TeaModel):
27711
28165
  status: int = None,
27712
28166
  type: int = None,
27713
28167
  ):
28168
+ # The ID of the batch.
27714
28169
  self.license_item_id = license_item_id
28170
+ # Specifies whether to include the total count of records in the response. Defaults to `true`.
27715
28171
  self.need_total_count = need_total_count
28172
+ # The page number.
27716
28173
  self.page_no = page_no
28174
+ # The number of entries per page.
27717
28175
  self.page_size = page_size
28176
+ # The status of the batch. Valid values:
28177
+ #
28178
+ # * `1`: Active
28179
+ # * `2`: Expired
27718
28180
  self.status = status
28181
+ # The type of license. Valid values:
28182
+ #
28183
+ # * `1`: Audio call
28184
+ # * `2`: Vision call
27719
28185
  self.type = type
27720
28186
 
27721
28187
  def validate(self):
@@ -27769,12 +28235,19 @@ class GetAiRtcLicenseInfoListResponseBody(TeaModel):
27769
28235
  success: bool = None,
27770
28236
  total_count: int = None,
27771
28237
  ):
28238
+ # The error code returned.
27772
28239
  self.code = code
28240
+ # The HTTP status code.
27773
28241
  self.http_status_code = http_status_code
28242
+ # An array of AiRtcLicenseInfoDTO objects, each representing a license batch.
27774
28243
  self.license_info_list = license_info_list
28244
+ # The error message.
27775
28245
  self.message = message
28246
+ # The request ID.
27776
28247
  self.request_id = request_id
28248
+ # Indicates whether the call was successful.
27777
28249
  self.success = success
28250
+ # The total number of entries returned.
27778
28251
  self.total_count = total_count
27779
28252
 
27780
28253
  def validate(self):
@@ -44284,18 +44757,27 @@ class GetTemplateParamsResponseBodyParamList(TeaModel):
44284
44757
  self,
44285
44758
  content: str = None,
44286
44759
  cover_url: str = None,
44760
+ height: int = None,
44287
44761
  key: str = None,
44288
44762
  media_url: str = None,
44763
+ timeline_in: float = None,
44764
+ timeline_out: float = None,
44289
44765
  type: str = None,
44766
+ width: int = None,
44767
+ x: int = None,
44768
+ y: int = None,
44290
44769
  ):
44291
44770
  # The original subtitle content.
44292
44771
  self.content = content
44293
44772
  # The thumbnail URL of the original material.
44294
44773
  self.cover_url = cover_url
44774
+ self.height = height
44295
44775
  # The parameter name.
44296
44776
  self.key = key
44297
44777
  # The URL of the original material.
44298
44778
  self.media_url = media_url
44779
+ self.timeline_in = timeline_in
44780
+ self.timeline_out = timeline_out
44299
44781
  # The material type.
44300
44782
  #
44301
44783
  # Valid values:
@@ -44304,6 +44786,9 @@ class GetTemplateParamsResponseBodyParamList(TeaModel):
44304
44786
  # * Text
44305
44787
  # * Image
44306
44788
  self.type = type
44789
+ self.width = width
44790
+ self.x = x
44791
+ self.y = y
44307
44792
 
44308
44793
  def validate(self):
44309
44794
  pass
@@ -44318,12 +44803,24 @@ class GetTemplateParamsResponseBodyParamList(TeaModel):
44318
44803
  result['Content'] = self.content
44319
44804
  if self.cover_url is not None:
44320
44805
  result['CoverUrl'] = self.cover_url
44806
+ if self.height is not None:
44807
+ result['Height'] = self.height
44321
44808
  if self.key is not None:
44322
44809
  result['Key'] = self.key
44323
44810
  if self.media_url is not None:
44324
44811
  result['MediaUrl'] = self.media_url
44812
+ if self.timeline_in is not None:
44813
+ result['TimelineIn'] = self.timeline_in
44814
+ if self.timeline_out is not None:
44815
+ result['TimelineOut'] = self.timeline_out
44325
44816
  if self.type is not None:
44326
44817
  result['Type'] = self.type
44818
+ if self.width is not None:
44819
+ result['Width'] = self.width
44820
+ if self.x is not None:
44821
+ result['X'] = self.x
44822
+ if self.y is not None:
44823
+ result['Y'] = self.y
44327
44824
  return result
44328
44825
 
44329
44826
  def from_map(self, m: dict = None):
@@ -44332,12 +44829,24 @@ class GetTemplateParamsResponseBodyParamList(TeaModel):
44332
44829
  self.content = m.get('Content')
44333
44830
  if m.get('CoverUrl') is not None:
44334
44831
  self.cover_url = m.get('CoverUrl')
44832
+ if m.get('Height') is not None:
44833
+ self.height = m.get('Height')
44335
44834
  if m.get('Key') is not None:
44336
44835
  self.key = m.get('Key')
44337
44836
  if m.get('MediaUrl') is not None:
44338
44837
  self.media_url = m.get('MediaUrl')
44838
+ if m.get('TimelineIn') is not None:
44839
+ self.timeline_in = m.get('TimelineIn')
44840
+ if m.get('TimelineOut') is not None:
44841
+ self.timeline_out = m.get('TimelineOut')
44339
44842
  if m.get('Type') is not None:
44340
44843
  self.type = m.get('Type')
44844
+ if m.get('Width') is not None:
44845
+ self.width = m.get('Width')
44846
+ if m.get('X') is not None:
44847
+ self.x = m.get('X')
44848
+ if m.get('Y') is not None:
44849
+ self.y = m.get('Y')
44341
44850
  return self
44342
44851
 
44343
44852
 
@@ -90314,10 +90823,15 @@ class SubmitSceneBatchEditingJobRequest(TeaModel):
90314
90823
  project_ids: str = None,
90315
90824
  user_data: str = None,
90316
90825
  ):
90826
+ # The output configuration. The structure is the same as the [OutputConfig](https://help.aliyun.com/zh/ims/use-cases/create-highlight-videos?spm=a2c4g.11186623.help-menu-193643.d_3_2_0_3.3af86997GreVu9\\&scm=20140722.H_2863940._.OR_help-T_cn~zh-V_1#4111a373d0xbz) for batch video generation, except that Count and GeneratePreviewOnly are not supported.
90827
+ #
90317
90828
  # This parameter is required.
90318
90829
  self.output_config = output_config
90830
+ # A comma-separated list of editing project IDs. The video is rendered based on the timeline from each project.
90831
+ #
90319
90832
  # This parameter is required.
90320
90833
  self.project_ids = project_ids
90834
+ # Custom user data, including callback configurations. For more information, see [UserData](~~357745#section-urj-v3f-0s1~~).
90321
90835
  self.user_data = user_data
90322
90836
 
90323
90837
  def validate(self):
@@ -90354,7 +90868,9 @@ class SubmitSceneBatchEditingJobResponseBody(TeaModel):
90354
90868
  job_id: str = None,
90355
90869
  request_id: str = None,
90356
90870
  ):
90871
+ # The job ID.
90357
90872
  self.job_id = job_id
90873
+ # The request ID.
90358
90874
  self.request_id = request_id
90359
90875
 
90360
90876
  def validate(self):
@@ -90587,15 +91103,42 @@ class SubmitSceneTimelineOrganizationJobRequest(TeaModel):
90587
91103
  output_config: str = None,
90588
91104
  user_data: str = None,
90589
91105
  ):
91106
+ # The editing configuration. Its structure depends on the value of JobType.
91107
+ #
91108
+ # * When JobType is set to Smart_Mix_Timeline_Organize, see [Image-text matching](https://help.aliyun.com/zh/ims/use-cases/intelligent-graphic-matching-into-a-piece/?spm=a2c4g.11186623.help-menu-193643.d_3_2_0_1.7c3d6997qndkZj).
91109
+ # * When JobType is set to Screen_Media_Highlights_Timeline_Organize, see [Highlight mashup](https://help.aliyun.com/zh/ims/use-cases/create-highlight-videos?spm=a2c4g.11186623.help-menu-193643.d_3_2_0_3.84b5661bIcQULE).
90590
91110
  self.editing_config = editing_config
91111
+ # The input configuration. Its structure and required fields depend on the value of JobType.
91112
+ #
91113
+ # * When JobType is set to Smart_Mix_Timeline_Organize, see [Image-text matching](https://help.aliyun.com/zh/ims/use-cases/intelligent-graphic-matching-into-a-piece/?spm=a2c4g.11186623.help-menu-193643.d_3_2_0_1.7c3d6997qndkZj).
91114
+ # * When JobType is set to Screen_Media_Highlights_Timeline_Organize, see [Highlight mashup](https://help.aliyun.com/zh/ims/use-cases/create-highlight-videos?spm=a2c4g.11186623.help-menu-193643.d_3_2_0_3.84b5661bIcQULE).
91115
+ #
90591
91116
  # This parameter is required.
90592
91117
  self.input_config = input_config
91118
+ # The job type. Valid values:
91119
+ #
91120
+ # * Smart_Mix_Timeline_Organize: Image-text matching.
91121
+ # * Screen_Media_Highlights_Timeline_Organize: Highlight mashup.
91122
+ #
91123
+ # Differences:
91124
+ #
91125
+ # * Image-text matching: Arranges a timeline based on the results of matching a voiceover script to media assets. Ideal for bulk marketing videos and general-purpose montages.
91126
+ # * Highlight mashup: Arranges a timeline based on the results of highlight clip selection. Ideal for creating action-packed highlight reels from short-form dramas.
91127
+ #
90593
91128
  # This parameter is required.
90594
91129
  self.job_type = job_type
91130
+ # The media selection results from a previously run SubmitSceneMediaSelectionJob. You can retrieve this result by calling GetBatchMediaProducingJob.
91131
+ #
90595
91132
  # This parameter is required.
90596
91133
  self.media_select_result = media_select_result
91134
+ # The output configuration. Its structure and required fields depend on the value of JobType.
91135
+ #
91136
+ # * When JobType is set to Smart_Mix_Timeline_Organize, see [Image-text matching](https://help.aliyun.com/zh/ims/use-cases/intelligent-graphic-matching-into-a-piece/?spm=a2c4g.11186623.help-menu-193643.d_3_2_0_1.7c3d6997qndkZj).
91137
+ # * When JobType is set to Screen_Media_Highlights_Timeline_Organize, see [Highlight mashup](https://help.aliyun.com/zh/ims/use-cases/create-highlight-videos?spm=a2c4g.11186623.help-menu-193643.d_3_2_0_3.84b5661bIcQULE).
91138
+ #
90597
91139
  # This parameter is required.
90598
91140
  self.output_config = output_config
91141
+ # The user-defined data, including the business and callback configurations. For more information, see [UserData](~~357745#section-urj-v3f-0s1~~).
90599
91142
  self.user_data = user_data
90600
91143
 
90601
91144
  def validate(self):
@@ -90644,7 +91187,9 @@ class SubmitSceneTimelineOrganizationJobResponseBody(TeaModel):
90644
91187
  job_id: str = None,
90645
91188
  request_id: str = None,
90646
91189
  ):
91190
+ # The job ID.
90647
91191
  self.job_id = job_id
91192
+ # The request ID.
90648
91193
  self.request_id = request_id
90649
91194
 
90650
91195
  def validate(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud-ice20201109
3
- Version: 6.8.3
3
+ Version: 6.8.5
4
4
  Summary: Alibaba Cloud ICE (20201109) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -24,7 +24,7 @@ from setuptools import setup, find_packages
24
24
  """
25
25
  setup module for alibabacloud_ice20201109.
26
26
 
27
- Created on 30/11/2025
27
+ Created on 05/12/2025
28
28
 
29
29
  @author: Alibaba Cloud SDK
30
30
  """
@@ -1 +0,0 @@
1
- __version__ = '6.8.3'