alibabacloud-quanmiaolightapp20240801 2.7.2__tar.gz → 2.8.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/ChangeLog.md +3 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/PKG-INFO +1 -1
- alibabacloud_quanmiaolightapp20240801-2.8.0/alibabacloud_quanmiaolightapp20240801/__init__.py +1 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/alibabacloud_quanmiaolightapp20240801/client.py +244 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/alibabacloud_quanmiaolightapp20240801/models.py +616 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/alibabacloud_quanmiaolightapp20240801.egg-info/PKG-INFO +1 -1
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/alibabacloud_quanmiaolightapp20240801.egg-info/requires.txt +1 -1
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/setup.py +2 -2
- alibabacloud_quanmiaolightapp20240801-2.7.2/alibabacloud_quanmiaolightapp20240801/__init__.py +0 -1
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/LICENSE +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/MANIFEST.in +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/README-CN.md +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/README.md +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/alibabacloud_quanmiaolightapp20240801.egg-info/SOURCES.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/alibabacloud_quanmiaolightapp20240801.egg-info/dependency_links.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/alibabacloud_quanmiaolightapp20240801.egg-info/top_level.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/setup.cfg +0 -0
{alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: alibabacloud_quanmiaolightapp20240801
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.8.0
|
|
4
4
|
Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
|
|
5
5
|
Home-page: https://github.com/aliyun/alibabacloud-python-sdk
|
|
6
6
|
Author: Alibaba Cloud SDK
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = '2.8.0'
|
|
@@ -927,6 +927,122 @@ class Client(OpenApiClient):
|
|
|
927
927
|
headers = {}
|
|
928
928
|
return await self.get_video_analysis_task_with_options_async(workspace_id, request, headers, runtime)
|
|
929
929
|
|
|
930
|
+
def list_analysis_tag_detail_by_task_id_with_options(
|
|
931
|
+
self,
|
|
932
|
+
workspace_id: str,
|
|
933
|
+
request: quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdRequest,
|
|
934
|
+
headers: Dict[str, str],
|
|
935
|
+
runtime: util_models.RuntimeOptions,
|
|
936
|
+
) -> quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdResponse:
|
|
937
|
+
"""
|
|
938
|
+
@summary 获取挖掘分析结果明细列表
|
|
939
|
+
|
|
940
|
+
@param request: ListAnalysisTagDetailByTaskIdRequest
|
|
941
|
+
@param headers: map
|
|
942
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
943
|
+
@return: ListAnalysisTagDetailByTaskIdResponse
|
|
944
|
+
"""
|
|
945
|
+
UtilClient.validate_model(request)
|
|
946
|
+
query = {}
|
|
947
|
+
if not UtilClient.is_unset(request.max_results):
|
|
948
|
+
query['maxResults'] = request.max_results
|
|
949
|
+
if not UtilClient.is_unset(request.next_token):
|
|
950
|
+
query['nextToken'] = request.next_token
|
|
951
|
+
if not UtilClient.is_unset(request.task_id):
|
|
952
|
+
query['taskId'] = request.task_id
|
|
953
|
+
req = open_api_models.OpenApiRequest(
|
|
954
|
+
headers=headers,
|
|
955
|
+
query=OpenApiUtilClient.query(query)
|
|
956
|
+
)
|
|
957
|
+
params = open_api_models.Params(
|
|
958
|
+
action='ListAnalysisTagDetailByTaskId',
|
|
959
|
+
version='2024-08-01',
|
|
960
|
+
protocol='HTTPS',
|
|
961
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/listAnalysisTagDetailByTaskId',
|
|
962
|
+
method='GET',
|
|
963
|
+
auth_type='AK',
|
|
964
|
+
style='ROA',
|
|
965
|
+
req_body_type='json',
|
|
966
|
+
body_type='json'
|
|
967
|
+
)
|
|
968
|
+
return TeaCore.from_map(
|
|
969
|
+
quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdResponse(),
|
|
970
|
+
self.call_api(params, req, runtime)
|
|
971
|
+
)
|
|
972
|
+
|
|
973
|
+
async def list_analysis_tag_detail_by_task_id_with_options_async(
|
|
974
|
+
self,
|
|
975
|
+
workspace_id: str,
|
|
976
|
+
request: quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdRequest,
|
|
977
|
+
headers: Dict[str, str],
|
|
978
|
+
runtime: util_models.RuntimeOptions,
|
|
979
|
+
) -> quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdResponse:
|
|
980
|
+
"""
|
|
981
|
+
@summary 获取挖掘分析结果明细列表
|
|
982
|
+
|
|
983
|
+
@param request: ListAnalysisTagDetailByTaskIdRequest
|
|
984
|
+
@param headers: map
|
|
985
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
986
|
+
@return: ListAnalysisTagDetailByTaskIdResponse
|
|
987
|
+
"""
|
|
988
|
+
UtilClient.validate_model(request)
|
|
989
|
+
query = {}
|
|
990
|
+
if not UtilClient.is_unset(request.max_results):
|
|
991
|
+
query['maxResults'] = request.max_results
|
|
992
|
+
if not UtilClient.is_unset(request.next_token):
|
|
993
|
+
query['nextToken'] = request.next_token
|
|
994
|
+
if not UtilClient.is_unset(request.task_id):
|
|
995
|
+
query['taskId'] = request.task_id
|
|
996
|
+
req = open_api_models.OpenApiRequest(
|
|
997
|
+
headers=headers,
|
|
998
|
+
query=OpenApiUtilClient.query(query)
|
|
999
|
+
)
|
|
1000
|
+
params = open_api_models.Params(
|
|
1001
|
+
action='ListAnalysisTagDetailByTaskId',
|
|
1002
|
+
version='2024-08-01',
|
|
1003
|
+
protocol='HTTPS',
|
|
1004
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/listAnalysisTagDetailByTaskId',
|
|
1005
|
+
method='GET',
|
|
1006
|
+
auth_type='AK',
|
|
1007
|
+
style='ROA',
|
|
1008
|
+
req_body_type='json',
|
|
1009
|
+
body_type='json'
|
|
1010
|
+
)
|
|
1011
|
+
return TeaCore.from_map(
|
|
1012
|
+
quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdResponse(),
|
|
1013
|
+
await self.call_api_async(params, req, runtime)
|
|
1014
|
+
)
|
|
1015
|
+
|
|
1016
|
+
def list_analysis_tag_detail_by_task_id(
|
|
1017
|
+
self,
|
|
1018
|
+
workspace_id: str,
|
|
1019
|
+
request: quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdRequest,
|
|
1020
|
+
) -> quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdResponse:
|
|
1021
|
+
"""
|
|
1022
|
+
@summary 获取挖掘分析结果明细列表
|
|
1023
|
+
|
|
1024
|
+
@param request: ListAnalysisTagDetailByTaskIdRequest
|
|
1025
|
+
@return: ListAnalysisTagDetailByTaskIdResponse
|
|
1026
|
+
"""
|
|
1027
|
+
runtime = util_models.RuntimeOptions()
|
|
1028
|
+
headers = {}
|
|
1029
|
+
return self.list_analysis_tag_detail_by_task_id_with_options(workspace_id, request, headers, runtime)
|
|
1030
|
+
|
|
1031
|
+
async def list_analysis_tag_detail_by_task_id_async(
|
|
1032
|
+
self,
|
|
1033
|
+
workspace_id: str,
|
|
1034
|
+
request: quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdRequest,
|
|
1035
|
+
) -> quan_miao_light_app_20240801_models.ListAnalysisTagDetailByTaskIdResponse:
|
|
1036
|
+
"""
|
|
1037
|
+
@summary 获取挖掘分析结果明细列表
|
|
1038
|
+
|
|
1039
|
+
@param request: ListAnalysisTagDetailByTaskIdRequest
|
|
1040
|
+
@return: ListAnalysisTagDetailByTaskIdResponse
|
|
1041
|
+
"""
|
|
1042
|
+
runtime = util_models.RuntimeOptions()
|
|
1043
|
+
headers = {}
|
|
1044
|
+
return await self.list_analysis_tag_detail_by_task_id_with_options_async(workspace_id, request, headers, runtime)
|
|
1045
|
+
|
|
930
1046
|
def list_hot_topic_summaries_with_options(
|
|
931
1047
|
self,
|
|
932
1048
|
workspace_id: str,
|
|
@@ -2689,6 +2805,8 @@ class Client(OpenApiClient):
|
|
|
2689
2805
|
request.generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.generate_options, 'generateOptions', 'json')
|
|
2690
2806
|
if not UtilClient.is_unset(tmp_req.text_process_tasks):
|
|
2691
2807
|
request.text_process_tasks_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.text_process_tasks, 'textProcessTasks', 'json')
|
|
2808
|
+
if not UtilClient.is_unset(tmp_req.video_caption_info):
|
|
2809
|
+
request.video_caption_info_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_caption_info, 'videoCaptionInfo', 'json')
|
|
2692
2810
|
if not UtilClient.is_unset(tmp_req.video_roles):
|
|
2693
2811
|
request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
|
|
2694
2812
|
body = {}
|
|
@@ -2718,6 +2836,8 @@ class Client(OpenApiClient):
|
|
|
2718
2836
|
body['taskId'] = request.task_id
|
|
2719
2837
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
2720
2838
|
body['textProcessTasks'] = request.text_process_tasks_shrink
|
|
2839
|
+
if not UtilClient.is_unset(request.video_caption_info_shrink):
|
|
2840
|
+
body['videoCaptionInfo'] = request.video_caption_info_shrink
|
|
2721
2841
|
if not UtilClient.is_unset(request.video_extra_info):
|
|
2722
2842
|
body['videoExtraInfo'] = request.video_extra_info
|
|
2723
2843
|
if not UtilClient.is_unset(request.video_model_custom_prompt_template):
|
|
@@ -2776,6 +2896,8 @@ class Client(OpenApiClient):
|
|
|
2776
2896
|
request.generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.generate_options, 'generateOptions', 'json')
|
|
2777
2897
|
if not UtilClient.is_unset(tmp_req.text_process_tasks):
|
|
2778
2898
|
request.text_process_tasks_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.text_process_tasks, 'textProcessTasks', 'json')
|
|
2899
|
+
if not UtilClient.is_unset(tmp_req.video_caption_info):
|
|
2900
|
+
request.video_caption_info_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_caption_info, 'videoCaptionInfo', 'json')
|
|
2779
2901
|
if not UtilClient.is_unset(tmp_req.video_roles):
|
|
2780
2902
|
request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
|
|
2781
2903
|
body = {}
|
|
@@ -2805,6 +2927,8 @@ class Client(OpenApiClient):
|
|
|
2805
2927
|
body['taskId'] = request.task_id
|
|
2806
2928
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
2807
2929
|
body['textProcessTasks'] = request.text_process_tasks_shrink
|
|
2930
|
+
if not UtilClient.is_unset(request.video_caption_info_shrink):
|
|
2931
|
+
body['videoCaptionInfo'] = request.video_caption_info_shrink
|
|
2808
2932
|
if not UtilClient.is_unset(request.video_extra_info):
|
|
2809
2933
|
body['videoExtraInfo'] = request.video_extra_info
|
|
2810
2934
|
if not UtilClient.is_unset(request.video_model_custom_prompt_template):
|
|
@@ -3205,6 +3329,8 @@ class Client(OpenApiClient):
|
|
|
3205
3329
|
request.generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.generate_options, 'generateOptions', 'json')
|
|
3206
3330
|
if not UtilClient.is_unset(tmp_req.text_process_tasks):
|
|
3207
3331
|
request.text_process_tasks_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.text_process_tasks, 'textProcessTasks', 'json')
|
|
3332
|
+
if not UtilClient.is_unset(tmp_req.video_caption_info):
|
|
3333
|
+
request.video_caption_info_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_caption_info, 'videoCaptionInfo', 'json')
|
|
3208
3334
|
if not UtilClient.is_unset(tmp_req.video_roles):
|
|
3209
3335
|
request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
|
|
3210
3336
|
body = {}
|
|
@@ -3232,6 +3358,8 @@ class Client(OpenApiClient):
|
|
|
3232
3358
|
body['splitInterval'] = request.split_interval
|
|
3233
3359
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
3234
3360
|
body['textProcessTasks'] = request.text_process_tasks_shrink
|
|
3361
|
+
if not UtilClient.is_unset(request.video_caption_info_shrink):
|
|
3362
|
+
body['videoCaptionInfo'] = request.video_caption_info_shrink
|
|
3235
3363
|
if not UtilClient.is_unset(request.video_extra_info):
|
|
3236
3364
|
body['videoExtraInfo'] = request.video_extra_info
|
|
3237
3365
|
if not UtilClient.is_unset(request.video_model_custom_prompt_template):
|
|
@@ -3290,6 +3418,8 @@ class Client(OpenApiClient):
|
|
|
3290
3418
|
request.generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.generate_options, 'generateOptions', 'json')
|
|
3291
3419
|
if not UtilClient.is_unset(tmp_req.text_process_tasks):
|
|
3292
3420
|
request.text_process_tasks_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.text_process_tasks, 'textProcessTasks', 'json')
|
|
3421
|
+
if not UtilClient.is_unset(tmp_req.video_caption_info):
|
|
3422
|
+
request.video_caption_info_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_caption_info, 'videoCaptionInfo', 'json')
|
|
3293
3423
|
if not UtilClient.is_unset(tmp_req.video_roles):
|
|
3294
3424
|
request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
|
|
3295
3425
|
body = {}
|
|
@@ -3317,6 +3447,8 @@ class Client(OpenApiClient):
|
|
|
3317
3447
|
body['splitInterval'] = request.split_interval
|
|
3318
3448
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
3319
3449
|
body['textProcessTasks'] = request.text_process_tasks_shrink
|
|
3450
|
+
if not UtilClient.is_unset(request.video_caption_info_shrink):
|
|
3451
|
+
body['videoCaptionInfo'] = request.video_caption_info_shrink
|
|
3320
3452
|
if not UtilClient.is_unset(request.video_extra_info):
|
|
3321
3453
|
body['videoExtraInfo'] = request.video_extra_info
|
|
3322
3454
|
if not UtilClient.is_unset(request.video_model_custom_prompt_template):
|
|
@@ -3486,3 +3618,115 @@ class Client(OpenApiClient):
|
|
|
3486
3618
|
runtime = util_models.RuntimeOptions()
|
|
3487
3619
|
headers = {}
|
|
3488
3620
|
return await self.update_video_analysis_config_with_options_async(workspace_id, request, headers, runtime)
|
|
3621
|
+
|
|
3622
|
+
def update_video_analysis_task_with_options(
|
|
3623
|
+
self,
|
|
3624
|
+
workspace_id: str,
|
|
3625
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskRequest,
|
|
3626
|
+
headers: Dict[str, str],
|
|
3627
|
+
runtime: util_models.RuntimeOptions,
|
|
3628
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskResponse:
|
|
3629
|
+
"""
|
|
3630
|
+
@summary 视频理解-修改任务状态
|
|
3631
|
+
|
|
3632
|
+
@param request: UpdateVideoAnalysisTaskRequest
|
|
3633
|
+
@param headers: map
|
|
3634
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
3635
|
+
@return: UpdateVideoAnalysisTaskResponse
|
|
3636
|
+
"""
|
|
3637
|
+
UtilClient.validate_model(request)
|
|
3638
|
+
body = {}
|
|
3639
|
+
if not UtilClient.is_unset(request.task_id):
|
|
3640
|
+
body['taskId'] = request.task_id
|
|
3641
|
+
if not UtilClient.is_unset(request.task_status):
|
|
3642
|
+
body['taskStatus'] = request.task_status
|
|
3643
|
+
req = open_api_models.OpenApiRequest(
|
|
3644
|
+
headers=headers,
|
|
3645
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
3646
|
+
)
|
|
3647
|
+
params = open_api_models.Params(
|
|
3648
|
+
action='UpdateVideoAnalysisTask',
|
|
3649
|
+
version='2024-08-01',
|
|
3650
|
+
protocol='HTTPS',
|
|
3651
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/videoAnalysis/updateVideoAnalysisTask',
|
|
3652
|
+
method='PUT',
|
|
3653
|
+
auth_type='AK',
|
|
3654
|
+
style='ROA',
|
|
3655
|
+
req_body_type='formData',
|
|
3656
|
+
body_type='json'
|
|
3657
|
+
)
|
|
3658
|
+
return TeaCore.from_map(
|
|
3659
|
+
quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskResponse(),
|
|
3660
|
+
self.call_api(params, req, runtime)
|
|
3661
|
+
)
|
|
3662
|
+
|
|
3663
|
+
async def update_video_analysis_task_with_options_async(
|
|
3664
|
+
self,
|
|
3665
|
+
workspace_id: str,
|
|
3666
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskRequest,
|
|
3667
|
+
headers: Dict[str, str],
|
|
3668
|
+
runtime: util_models.RuntimeOptions,
|
|
3669
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskResponse:
|
|
3670
|
+
"""
|
|
3671
|
+
@summary 视频理解-修改任务状态
|
|
3672
|
+
|
|
3673
|
+
@param request: UpdateVideoAnalysisTaskRequest
|
|
3674
|
+
@param headers: map
|
|
3675
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
3676
|
+
@return: UpdateVideoAnalysisTaskResponse
|
|
3677
|
+
"""
|
|
3678
|
+
UtilClient.validate_model(request)
|
|
3679
|
+
body = {}
|
|
3680
|
+
if not UtilClient.is_unset(request.task_id):
|
|
3681
|
+
body['taskId'] = request.task_id
|
|
3682
|
+
if not UtilClient.is_unset(request.task_status):
|
|
3683
|
+
body['taskStatus'] = request.task_status
|
|
3684
|
+
req = open_api_models.OpenApiRequest(
|
|
3685
|
+
headers=headers,
|
|
3686
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
3687
|
+
)
|
|
3688
|
+
params = open_api_models.Params(
|
|
3689
|
+
action='UpdateVideoAnalysisTask',
|
|
3690
|
+
version='2024-08-01',
|
|
3691
|
+
protocol='HTTPS',
|
|
3692
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/videoAnalysis/updateVideoAnalysisTask',
|
|
3693
|
+
method='PUT',
|
|
3694
|
+
auth_type='AK',
|
|
3695
|
+
style='ROA',
|
|
3696
|
+
req_body_type='formData',
|
|
3697
|
+
body_type='json'
|
|
3698
|
+
)
|
|
3699
|
+
return TeaCore.from_map(
|
|
3700
|
+
quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskResponse(),
|
|
3701
|
+
await self.call_api_async(params, req, runtime)
|
|
3702
|
+
)
|
|
3703
|
+
|
|
3704
|
+
def update_video_analysis_task(
|
|
3705
|
+
self,
|
|
3706
|
+
workspace_id: str,
|
|
3707
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskRequest,
|
|
3708
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskResponse:
|
|
3709
|
+
"""
|
|
3710
|
+
@summary 视频理解-修改任务状态
|
|
3711
|
+
|
|
3712
|
+
@param request: UpdateVideoAnalysisTaskRequest
|
|
3713
|
+
@return: UpdateVideoAnalysisTaskResponse
|
|
3714
|
+
"""
|
|
3715
|
+
runtime = util_models.RuntimeOptions()
|
|
3716
|
+
headers = {}
|
|
3717
|
+
return self.update_video_analysis_task_with_options(workspace_id, request, headers, runtime)
|
|
3718
|
+
|
|
3719
|
+
async def update_video_analysis_task_async(
|
|
3720
|
+
self,
|
|
3721
|
+
workspace_id: str,
|
|
3722
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskRequest,
|
|
3723
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoAnalysisTaskResponse:
|
|
3724
|
+
"""
|
|
3725
|
+
@summary 视频理解-修改任务状态
|
|
3726
|
+
|
|
3727
|
+
@param request: UpdateVideoAnalysisTaskRequest
|
|
3728
|
+
@return: UpdateVideoAnalysisTaskResponse
|
|
3729
|
+
"""
|
|
3730
|
+
runtime = util_models.RuntimeOptions()
|
|
3731
|
+
headers = {}
|
|
3732
|
+
return await self.update_video_analysis_task_with_options_async(workspace_id, request, headers, runtime)
|
|
@@ -2038,12 +2038,14 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoCaptionResultVideoCa
|
|
|
2038
2038
|
self,
|
|
2039
2039
|
end_time: int = None,
|
|
2040
2040
|
end_time_format: str = None,
|
|
2041
|
+
speaker: str = None,
|
|
2041
2042
|
start_time: int = None,
|
|
2042
2043
|
start_time_format: str = None,
|
|
2043
2044
|
text: str = None,
|
|
2044
2045
|
):
|
|
2045
2046
|
self.end_time = end_time
|
|
2046
2047
|
self.end_time_format = end_time_format
|
|
2048
|
+
self.speaker = speaker
|
|
2047
2049
|
self.start_time = start_time
|
|
2048
2050
|
self.start_time_format = start_time_format
|
|
2049
2051
|
self.text = text
|
|
@@ -2061,6 +2063,8 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoCaptionResultVideoCa
|
|
|
2061
2063
|
result['endTime'] = self.end_time
|
|
2062
2064
|
if self.end_time_format is not None:
|
|
2063
2065
|
result['endTimeFormat'] = self.end_time_format
|
|
2066
|
+
if self.speaker is not None:
|
|
2067
|
+
result['speaker'] = self.speaker
|
|
2064
2068
|
if self.start_time is not None:
|
|
2065
2069
|
result['startTime'] = self.start_time
|
|
2066
2070
|
if self.start_time_format is not None:
|
|
@@ -2075,6 +2079,8 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoCaptionResultVideoCa
|
|
|
2075
2079
|
self.end_time = m.get('endTime')
|
|
2076
2080
|
if m.get('endTimeFormat') is not None:
|
|
2077
2081
|
self.end_time_format = m.get('endTimeFormat')
|
|
2082
|
+
if m.get('speaker') is not None:
|
|
2083
|
+
self.speaker = m.get('speaker')
|
|
2078
2084
|
if m.get('startTime') is not None:
|
|
2079
2085
|
self.start_time = m.get('startTime')
|
|
2080
2086
|
if m.get('startTimeFormat') is not None:
|
|
@@ -2973,6 +2979,246 @@ class GetVideoAnalysisTaskResponse(TeaModel):
|
|
|
2973
2979
|
return self
|
|
2974
2980
|
|
|
2975
2981
|
|
|
2982
|
+
class ListAnalysisTagDetailByTaskIdRequest(TeaModel):
|
|
2983
|
+
def __init__(
|
|
2984
|
+
self,
|
|
2985
|
+
max_results: int = None,
|
|
2986
|
+
next_token: str = None,
|
|
2987
|
+
task_id: str = None,
|
|
2988
|
+
):
|
|
2989
|
+
self.max_results = max_results
|
|
2990
|
+
self.next_token = next_token
|
|
2991
|
+
# This parameter is required.
|
|
2992
|
+
self.task_id = task_id
|
|
2993
|
+
|
|
2994
|
+
def validate(self):
|
|
2995
|
+
pass
|
|
2996
|
+
|
|
2997
|
+
def to_map(self):
|
|
2998
|
+
_map = super().to_map()
|
|
2999
|
+
if _map is not None:
|
|
3000
|
+
return _map
|
|
3001
|
+
|
|
3002
|
+
result = dict()
|
|
3003
|
+
if self.max_results is not None:
|
|
3004
|
+
result['maxResults'] = self.max_results
|
|
3005
|
+
if self.next_token is not None:
|
|
3006
|
+
result['nextToken'] = self.next_token
|
|
3007
|
+
if self.task_id is not None:
|
|
3008
|
+
result['taskId'] = self.task_id
|
|
3009
|
+
return result
|
|
3010
|
+
|
|
3011
|
+
def from_map(self, m: dict = None):
|
|
3012
|
+
m = m or dict()
|
|
3013
|
+
if m.get('maxResults') is not None:
|
|
3014
|
+
self.max_results = m.get('maxResults')
|
|
3015
|
+
if m.get('nextToken') is not None:
|
|
3016
|
+
self.next_token = m.get('nextToken')
|
|
3017
|
+
if m.get('taskId') is not None:
|
|
3018
|
+
self.task_id = m.get('taskId')
|
|
3019
|
+
return self
|
|
3020
|
+
|
|
3021
|
+
|
|
3022
|
+
class ListAnalysisTagDetailByTaskIdResponseBodyDataContentTags(TeaModel):
|
|
3023
|
+
def __init__(
|
|
3024
|
+
self,
|
|
3025
|
+
tag_name: str = None,
|
|
3026
|
+
tags: List[str] = None,
|
|
3027
|
+
):
|
|
3028
|
+
self.tag_name = tag_name
|
|
3029
|
+
self.tags = tags
|
|
3030
|
+
|
|
3031
|
+
def validate(self):
|
|
3032
|
+
pass
|
|
3033
|
+
|
|
3034
|
+
def to_map(self):
|
|
3035
|
+
_map = super().to_map()
|
|
3036
|
+
if _map is not None:
|
|
3037
|
+
return _map
|
|
3038
|
+
|
|
3039
|
+
result = dict()
|
|
3040
|
+
if self.tag_name is not None:
|
|
3041
|
+
result['tagName'] = self.tag_name
|
|
3042
|
+
if self.tags is not None:
|
|
3043
|
+
result['tags'] = self.tags
|
|
3044
|
+
return result
|
|
3045
|
+
|
|
3046
|
+
def from_map(self, m: dict = None):
|
|
3047
|
+
m = m or dict()
|
|
3048
|
+
if m.get('tagName') is not None:
|
|
3049
|
+
self.tag_name = m.get('tagName')
|
|
3050
|
+
if m.get('tags') is not None:
|
|
3051
|
+
self.tags = m.get('tags')
|
|
3052
|
+
return self
|
|
3053
|
+
|
|
3054
|
+
|
|
3055
|
+
class ListAnalysisTagDetailByTaskIdResponseBodyData(TeaModel):
|
|
3056
|
+
def __init__(
|
|
3057
|
+
self,
|
|
3058
|
+
content: str = None,
|
|
3059
|
+
content_tags: List[ListAnalysisTagDetailByTaskIdResponseBodyDataContentTags] = None,
|
|
3060
|
+
origin_response: str = None,
|
|
3061
|
+
):
|
|
3062
|
+
self.content = content
|
|
3063
|
+
self.content_tags = content_tags
|
|
3064
|
+
self.origin_response = origin_response
|
|
3065
|
+
|
|
3066
|
+
def validate(self):
|
|
3067
|
+
if self.content_tags:
|
|
3068
|
+
for k in self.content_tags:
|
|
3069
|
+
if k:
|
|
3070
|
+
k.validate()
|
|
3071
|
+
|
|
3072
|
+
def to_map(self):
|
|
3073
|
+
_map = super().to_map()
|
|
3074
|
+
if _map is not None:
|
|
3075
|
+
return _map
|
|
3076
|
+
|
|
3077
|
+
result = dict()
|
|
3078
|
+
if self.content is not None:
|
|
3079
|
+
result['content'] = self.content
|
|
3080
|
+
result['contentTags'] = []
|
|
3081
|
+
if self.content_tags is not None:
|
|
3082
|
+
for k in self.content_tags:
|
|
3083
|
+
result['contentTags'].append(k.to_map() if k else None)
|
|
3084
|
+
if self.origin_response is not None:
|
|
3085
|
+
result['originResponse'] = self.origin_response
|
|
3086
|
+
return result
|
|
3087
|
+
|
|
3088
|
+
def from_map(self, m: dict = None):
|
|
3089
|
+
m = m or dict()
|
|
3090
|
+
if m.get('content') is not None:
|
|
3091
|
+
self.content = m.get('content')
|
|
3092
|
+
self.content_tags = []
|
|
3093
|
+
if m.get('contentTags') is not None:
|
|
3094
|
+
for k in m.get('contentTags'):
|
|
3095
|
+
temp_model = ListAnalysisTagDetailByTaskIdResponseBodyDataContentTags()
|
|
3096
|
+
self.content_tags.append(temp_model.from_map(k))
|
|
3097
|
+
if m.get('originResponse') is not None:
|
|
3098
|
+
self.origin_response = m.get('originResponse')
|
|
3099
|
+
return self
|
|
3100
|
+
|
|
3101
|
+
|
|
3102
|
+
class ListAnalysisTagDetailByTaskIdResponseBody(TeaModel):
|
|
3103
|
+
def __init__(
|
|
3104
|
+
self,
|
|
3105
|
+
code: str = None,
|
|
3106
|
+
data: List[ListAnalysisTagDetailByTaskIdResponseBodyData] = None,
|
|
3107
|
+
max_results: int = None,
|
|
3108
|
+
message: str = None,
|
|
3109
|
+
next_token: str = None,
|
|
3110
|
+
request_id: str = None,
|
|
3111
|
+
success: bool = None,
|
|
3112
|
+
total_count: int = None,
|
|
3113
|
+
):
|
|
3114
|
+
self.code = code
|
|
3115
|
+
self.data = data
|
|
3116
|
+
self.max_results = max_results
|
|
3117
|
+
self.message = message
|
|
3118
|
+
# This parameter is required.
|
|
3119
|
+
self.next_token = next_token
|
|
3120
|
+
# Id of the request
|
|
3121
|
+
self.request_id = request_id
|
|
3122
|
+
self.success = success
|
|
3123
|
+
self.total_count = total_count
|
|
3124
|
+
|
|
3125
|
+
def validate(self):
|
|
3126
|
+
if self.data:
|
|
3127
|
+
for k in self.data:
|
|
3128
|
+
if k:
|
|
3129
|
+
k.validate()
|
|
3130
|
+
|
|
3131
|
+
def to_map(self):
|
|
3132
|
+
_map = super().to_map()
|
|
3133
|
+
if _map is not None:
|
|
3134
|
+
return _map
|
|
3135
|
+
|
|
3136
|
+
result = dict()
|
|
3137
|
+
if self.code is not None:
|
|
3138
|
+
result['code'] = self.code
|
|
3139
|
+
result['data'] = []
|
|
3140
|
+
if self.data is not None:
|
|
3141
|
+
for k in self.data:
|
|
3142
|
+
result['data'].append(k.to_map() if k else None)
|
|
3143
|
+
if self.max_results is not None:
|
|
3144
|
+
result['maxResults'] = self.max_results
|
|
3145
|
+
if self.message is not None:
|
|
3146
|
+
result['message'] = self.message
|
|
3147
|
+
if self.next_token is not None:
|
|
3148
|
+
result['nextToken'] = self.next_token
|
|
3149
|
+
if self.request_id is not None:
|
|
3150
|
+
result['requestId'] = self.request_id
|
|
3151
|
+
if self.success is not None:
|
|
3152
|
+
result['success'] = self.success
|
|
3153
|
+
if self.total_count is not None:
|
|
3154
|
+
result['totalCount'] = self.total_count
|
|
3155
|
+
return result
|
|
3156
|
+
|
|
3157
|
+
def from_map(self, m: dict = None):
|
|
3158
|
+
m = m or dict()
|
|
3159
|
+
if m.get('code') is not None:
|
|
3160
|
+
self.code = m.get('code')
|
|
3161
|
+
self.data = []
|
|
3162
|
+
if m.get('data') is not None:
|
|
3163
|
+
for k in m.get('data'):
|
|
3164
|
+
temp_model = ListAnalysisTagDetailByTaskIdResponseBodyData()
|
|
3165
|
+
self.data.append(temp_model.from_map(k))
|
|
3166
|
+
if m.get('maxResults') is not None:
|
|
3167
|
+
self.max_results = m.get('maxResults')
|
|
3168
|
+
if m.get('message') is not None:
|
|
3169
|
+
self.message = m.get('message')
|
|
3170
|
+
if m.get('nextToken') is not None:
|
|
3171
|
+
self.next_token = m.get('nextToken')
|
|
3172
|
+
if m.get('requestId') is not None:
|
|
3173
|
+
self.request_id = m.get('requestId')
|
|
3174
|
+
if m.get('success') is not None:
|
|
3175
|
+
self.success = m.get('success')
|
|
3176
|
+
if m.get('totalCount') is not None:
|
|
3177
|
+
self.total_count = m.get('totalCount')
|
|
3178
|
+
return self
|
|
3179
|
+
|
|
3180
|
+
|
|
3181
|
+
class ListAnalysisTagDetailByTaskIdResponse(TeaModel):
|
|
3182
|
+
def __init__(
|
|
3183
|
+
self,
|
|
3184
|
+
headers: Dict[str, str] = None,
|
|
3185
|
+
status_code: int = None,
|
|
3186
|
+
body: ListAnalysisTagDetailByTaskIdResponseBody = None,
|
|
3187
|
+
):
|
|
3188
|
+
self.headers = headers
|
|
3189
|
+
self.status_code = status_code
|
|
3190
|
+
self.body = body
|
|
3191
|
+
|
|
3192
|
+
def validate(self):
|
|
3193
|
+
if self.body:
|
|
3194
|
+
self.body.validate()
|
|
3195
|
+
|
|
3196
|
+
def to_map(self):
|
|
3197
|
+
_map = super().to_map()
|
|
3198
|
+
if _map is not None:
|
|
3199
|
+
return _map
|
|
3200
|
+
|
|
3201
|
+
result = dict()
|
|
3202
|
+
if self.headers is not None:
|
|
3203
|
+
result['headers'] = self.headers
|
|
3204
|
+
if self.status_code is not None:
|
|
3205
|
+
result['statusCode'] = self.status_code
|
|
3206
|
+
if self.body is not None:
|
|
3207
|
+
result['body'] = self.body.to_map()
|
|
3208
|
+
return result
|
|
3209
|
+
|
|
3210
|
+
def from_map(self, m: dict = None):
|
|
3211
|
+
m = m or dict()
|
|
3212
|
+
if m.get('headers') is not None:
|
|
3213
|
+
self.headers = m.get('headers')
|
|
3214
|
+
if m.get('statusCode') is not None:
|
|
3215
|
+
self.status_code = m.get('statusCode')
|
|
3216
|
+
if m.get('body') is not None:
|
|
3217
|
+
temp_model = ListAnalysisTagDetailByTaskIdResponseBody()
|
|
3218
|
+
self.body = temp_model.from_map(m['body'])
|
|
3219
|
+
return self
|
|
3220
|
+
|
|
3221
|
+
|
|
2976
3222
|
class ListHotTopicSummariesRequest(TeaModel):
|
|
2977
3223
|
def __init__(
|
|
2978
3224
|
self,
|
|
@@ -8403,6 +8649,86 @@ class RunVideoAnalysisRequestTextProcessTasks(TeaModel):
|
|
|
8403
8649
|
return self
|
|
8404
8650
|
|
|
8405
8651
|
|
|
8652
|
+
class RunVideoAnalysisRequestVideoCaptionInfoVideoCaptions(TeaModel):
|
|
8653
|
+
def __init__(
|
|
8654
|
+
self,
|
|
8655
|
+
end_time: int = None,
|
|
8656
|
+
speaker: str = None,
|
|
8657
|
+
start_time: int = None,
|
|
8658
|
+
text: str = None,
|
|
8659
|
+
):
|
|
8660
|
+
self.end_time = end_time
|
|
8661
|
+
self.speaker = speaker
|
|
8662
|
+
self.start_time = start_time
|
|
8663
|
+
self.text = text
|
|
8664
|
+
|
|
8665
|
+
def validate(self):
|
|
8666
|
+
pass
|
|
8667
|
+
|
|
8668
|
+
def to_map(self):
|
|
8669
|
+
_map = super().to_map()
|
|
8670
|
+
if _map is not None:
|
|
8671
|
+
return _map
|
|
8672
|
+
|
|
8673
|
+
result = dict()
|
|
8674
|
+
if self.end_time is not None:
|
|
8675
|
+
result['endTime'] = self.end_time
|
|
8676
|
+
if self.speaker is not None:
|
|
8677
|
+
result['speaker'] = self.speaker
|
|
8678
|
+
if self.start_time is not None:
|
|
8679
|
+
result['startTime'] = self.start_time
|
|
8680
|
+
if self.text is not None:
|
|
8681
|
+
result['text'] = self.text
|
|
8682
|
+
return result
|
|
8683
|
+
|
|
8684
|
+
def from_map(self, m: dict = None):
|
|
8685
|
+
m = m or dict()
|
|
8686
|
+
if m.get('endTime') is not None:
|
|
8687
|
+
self.end_time = m.get('endTime')
|
|
8688
|
+
if m.get('speaker') is not None:
|
|
8689
|
+
self.speaker = m.get('speaker')
|
|
8690
|
+
if m.get('startTime') is not None:
|
|
8691
|
+
self.start_time = m.get('startTime')
|
|
8692
|
+
if m.get('text') is not None:
|
|
8693
|
+
self.text = m.get('text')
|
|
8694
|
+
return self
|
|
8695
|
+
|
|
8696
|
+
|
|
8697
|
+
class RunVideoAnalysisRequestVideoCaptionInfo(TeaModel):
|
|
8698
|
+
def __init__(
|
|
8699
|
+
self,
|
|
8700
|
+
video_captions: List[RunVideoAnalysisRequestVideoCaptionInfoVideoCaptions] = None,
|
|
8701
|
+
):
|
|
8702
|
+
self.video_captions = video_captions
|
|
8703
|
+
|
|
8704
|
+
def validate(self):
|
|
8705
|
+
if self.video_captions:
|
|
8706
|
+
for k in self.video_captions:
|
|
8707
|
+
if k:
|
|
8708
|
+
k.validate()
|
|
8709
|
+
|
|
8710
|
+
def to_map(self):
|
|
8711
|
+
_map = super().to_map()
|
|
8712
|
+
if _map is not None:
|
|
8713
|
+
return _map
|
|
8714
|
+
|
|
8715
|
+
result = dict()
|
|
8716
|
+
result['videoCaptions'] = []
|
|
8717
|
+
if self.video_captions is not None:
|
|
8718
|
+
for k in self.video_captions:
|
|
8719
|
+
result['videoCaptions'].append(k.to_map() if k else None)
|
|
8720
|
+
return result
|
|
8721
|
+
|
|
8722
|
+
def from_map(self, m: dict = None):
|
|
8723
|
+
m = m or dict()
|
|
8724
|
+
self.video_captions = []
|
|
8725
|
+
if m.get('videoCaptions') is not None:
|
|
8726
|
+
for k in m.get('videoCaptions'):
|
|
8727
|
+
temp_model = RunVideoAnalysisRequestVideoCaptionInfoVideoCaptions()
|
|
8728
|
+
self.video_captions.append(temp_model.from_map(k))
|
|
8729
|
+
return self
|
|
8730
|
+
|
|
8731
|
+
|
|
8406
8732
|
class RunVideoAnalysisRequestVideoRoles(TeaModel):
|
|
8407
8733
|
def __init__(
|
|
8408
8734
|
self,
|
|
@@ -8458,6 +8784,7 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
8458
8784
|
split_interval: int = None,
|
|
8459
8785
|
task_id: str = None,
|
|
8460
8786
|
text_process_tasks: List[RunVideoAnalysisRequestTextProcessTasks] = None,
|
|
8787
|
+
video_caption_info: RunVideoAnalysisRequestVideoCaptionInfo = None,
|
|
8461
8788
|
video_extra_info: str = None,
|
|
8462
8789
|
video_model_custom_prompt_template: str = None,
|
|
8463
8790
|
video_model_id: str = None,
|
|
@@ -8478,6 +8805,7 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
8478
8805
|
self.split_interval = split_interval
|
|
8479
8806
|
self.task_id = task_id
|
|
8480
8807
|
self.text_process_tasks = text_process_tasks
|
|
8808
|
+
self.video_caption_info = video_caption_info
|
|
8481
8809
|
self.video_extra_info = video_extra_info
|
|
8482
8810
|
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
8483
8811
|
self.video_model_id = video_model_id
|
|
@@ -8492,6 +8820,8 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
8492
8820
|
for k in self.text_process_tasks:
|
|
8493
8821
|
if k:
|
|
8494
8822
|
k.validate()
|
|
8823
|
+
if self.video_caption_info:
|
|
8824
|
+
self.video_caption_info.validate()
|
|
8495
8825
|
if self.video_roles:
|
|
8496
8826
|
for k in self.video_roles:
|
|
8497
8827
|
if k:
|
|
@@ -8531,6 +8861,8 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
8531
8861
|
if self.text_process_tasks is not None:
|
|
8532
8862
|
for k in self.text_process_tasks:
|
|
8533
8863
|
result['textProcessTasks'].append(k.to_map() if k else None)
|
|
8864
|
+
if self.video_caption_info is not None:
|
|
8865
|
+
result['videoCaptionInfo'] = self.video_caption_info.to_map()
|
|
8534
8866
|
if self.video_extra_info is not None:
|
|
8535
8867
|
result['videoExtraInfo'] = self.video_extra_info
|
|
8536
8868
|
if self.video_model_custom_prompt_template is not None:
|
|
@@ -8579,6 +8911,9 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
8579
8911
|
for k in m.get('textProcessTasks'):
|
|
8580
8912
|
temp_model = RunVideoAnalysisRequestTextProcessTasks()
|
|
8581
8913
|
self.text_process_tasks.append(temp_model.from_map(k))
|
|
8914
|
+
if m.get('videoCaptionInfo') is not None:
|
|
8915
|
+
temp_model = RunVideoAnalysisRequestVideoCaptionInfo()
|
|
8916
|
+
self.video_caption_info = temp_model.from_map(m['videoCaptionInfo'])
|
|
8582
8917
|
if m.get('videoExtraInfo') is not None:
|
|
8583
8918
|
self.video_extra_info = m.get('videoExtraInfo')
|
|
8584
8919
|
if m.get('videoModelCustomPromptTemplate') is not None:
|
|
@@ -8613,6 +8948,7 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
8613
8948
|
split_interval: int = None,
|
|
8614
8949
|
task_id: str = None,
|
|
8615
8950
|
text_process_tasks_shrink: str = None,
|
|
8951
|
+
video_caption_info_shrink: str = None,
|
|
8616
8952
|
video_extra_info: str = None,
|
|
8617
8953
|
video_model_custom_prompt_template: str = None,
|
|
8618
8954
|
video_model_id: str = None,
|
|
@@ -8633,6 +8969,7 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
8633
8969
|
self.split_interval = split_interval
|
|
8634
8970
|
self.task_id = task_id
|
|
8635
8971
|
self.text_process_tasks_shrink = text_process_tasks_shrink
|
|
8972
|
+
self.video_caption_info_shrink = video_caption_info_shrink
|
|
8636
8973
|
self.video_extra_info = video_extra_info
|
|
8637
8974
|
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
8638
8975
|
self.video_model_id = video_model_id
|
|
@@ -8675,6 +9012,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
8675
9012
|
result['taskId'] = self.task_id
|
|
8676
9013
|
if self.text_process_tasks_shrink is not None:
|
|
8677
9014
|
result['textProcessTasks'] = self.text_process_tasks_shrink
|
|
9015
|
+
if self.video_caption_info_shrink is not None:
|
|
9016
|
+
result['videoCaptionInfo'] = self.video_caption_info_shrink
|
|
8678
9017
|
if self.video_extra_info is not None:
|
|
8679
9018
|
result['videoExtraInfo'] = self.video_extra_info
|
|
8680
9019
|
if self.video_model_custom_prompt_template is not None:
|
|
@@ -8717,6 +9056,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
8717
9056
|
self.task_id = m.get('taskId')
|
|
8718
9057
|
if m.get('textProcessTasks') is not None:
|
|
8719
9058
|
self.text_process_tasks_shrink = m.get('textProcessTasks')
|
|
9059
|
+
if m.get('videoCaptionInfo') is not None:
|
|
9060
|
+
self.video_caption_info_shrink = m.get('videoCaptionInfo')
|
|
8720
9061
|
if m.get('videoExtraInfo') is not None:
|
|
8721
9062
|
self.video_extra_info = m.get('videoExtraInfo')
|
|
8722
9063
|
if m.get('videoModelCustomPromptTemplate') is not None:
|
|
@@ -8940,12 +9281,14 @@ class RunVideoAnalysisResponseBodyPayloadOutputVideoCaptionResultVideoCaptions(T
|
|
|
8940
9281
|
self,
|
|
8941
9282
|
end_time: int = None,
|
|
8942
9283
|
end_time_format: str = None,
|
|
9284
|
+
speaker: str = None,
|
|
8943
9285
|
start_time: int = None,
|
|
8944
9286
|
start_time_format: str = None,
|
|
8945
9287
|
text: str = None,
|
|
8946
9288
|
):
|
|
8947
9289
|
self.end_time = end_time
|
|
8948
9290
|
self.end_time_format = end_time_format
|
|
9291
|
+
self.speaker = speaker
|
|
8949
9292
|
self.start_time = start_time
|
|
8950
9293
|
self.start_time_format = start_time_format
|
|
8951
9294
|
self.text = text
|
|
@@ -8963,6 +9306,8 @@ class RunVideoAnalysisResponseBodyPayloadOutputVideoCaptionResultVideoCaptions(T
|
|
|
8963
9306
|
result['endTime'] = self.end_time
|
|
8964
9307
|
if self.end_time_format is not None:
|
|
8965
9308
|
result['endTimeFormat'] = self.end_time_format
|
|
9309
|
+
if self.speaker is not None:
|
|
9310
|
+
result['speaker'] = self.speaker
|
|
8966
9311
|
if self.start_time is not None:
|
|
8967
9312
|
result['startTime'] = self.start_time
|
|
8968
9313
|
if self.start_time_format is not None:
|
|
@@ -8977,6 +9322,8 @@ class RunVideoAnalysisResponseBodyPayloadOutputVideoCaptionResultVideoCaptions(T
|
|
|
8977
9322
|
self.end_time = m.get('endTime')
|
|
8978
9323
|
if m.get('endTimeFormat') is not None:
|
|
8979
9324
|
self.end_time_format = m.get('endTimeFormat')
|
|
9325
|
+
if m.get('speaker') is not None:
|
|
9326
|
+
self.speaker = m.get('speaker')
|
|
8980
9327
|
if m.get('startTime') is not None:
|
|
8981
9328
|
self.start_time = m.get('startTime')
|
|
8982
9329
|
if m.get('startTimeFormat') is not None:
|
|
@@ -10727,6 +11074,86 @@ class SubmitVideoAnalysisTaskRequestTextProcessTasks(TeaModel):
|
|
|
10727
11074
|
return self
|
|
10728
11075
|
|
|
10729
11076
|
|
|
11077
|
+
class SubmitVideoAnalysisTaskRequestVideoCaptionInfoVideoCaptions(TeaModel):
|
|
11078
|
+
def __init__(
|
|
11079
|
+
self,
|
|
11080
|
+
end_time: int = None,
|
|
11081
|
+
speaker: str = None,
|
|
11082
|
+
start_time: int = None,
|
|
11083
|
+
text: str = None,
|
|
11084
|
+
):
|
|
11085
|
+
self.end_time = end_time
|
|
11086
|
+
self.speaker = speaker
|
|
11087
|
+
self.start_time = start_time
|
|
11088
|
+
self.text = text
|
|
11089
|
+
|
|
11090
|
+
def validate(self):
|
|
11091
|
+
pass
|
|
11092
|
+
|
|
11093
|
+
def to_map(self):
|
|
11094
|
+
_map = super().to_map()
|
|
11095
|
+
if _map is not None:
|
|
11096
|
+
return _map
|
|
11097
|
+
|
|
11098
|
+
result = dict()
|
|
11099
|
+
if self.end_time is not None:
|
|
11100
|
+
result['endTime'] = self.end_time
|
|
11101
|
+
if self.speaker is not None:
|
|
11102
|
+
result['speaker'] = self.speaker
|
|
11103
|
+
if self.start_time is not None:
|
|
11104
|
+
result['startTime'] = self.start_time
|
|
11105
|
+
if self.text is not None:
|
|
11106
|
+
result['text'] = self.text
|
|
11107
|
+
return result
|
|
11108
|
+
|
|
11109
|
+
def from_map(self, m: dict = None):
|
|
11110
|
+
m = m or dict()
|
|
11111
|
+
if m.get('endTime') is not None:
|
|
11112
|
+
self.end_time = m.get('endTime')
|
|
11113
|
+
if m.get('speaker') is not None:
|
|
11114
|
+
self.speaker = m.get('speaker')
|
|
11115
|
+
if m.get('startTime') is not None:
|
|
11116
|
+
self.start_time = m.get('startTime')
|
|
11117
|
+
if m.get('text') is not None:
|
|
11118
|
+
self.text = m.get('text')
|
|
11119
|
+
return self
|
|
11120
|
+
|
|
11121
|
+
|
|
11122
|
+
class SubmitVideoAnalysisTaskRequestVideoCaptionInfo(TeaModel):
|
|
11123
|
+
def __init__(
|
|
11124
|
+
self,
|
|
11125
|
+
video_captions: List[SubmitVideoAnalysisTaskRequestVideoCaptionInfoVideoCaptions] = None,
|
|
11126
|
+
):
|
|
11127
|
+
self.video_captions = video_captions
|
|
11128
|
+
|
|
11129
|
+
def validate(self):
|
|
11130
|
+
if self.video_captions:
|
|
11131
|
+
for k in self.video_captions:
|
|
11132
|
+
if k:
|
|
11133
|
+
k.validate()
|
|
11134
|
+
|
|
11135
|
+
def to_map(self):
|
|
11136
|
+
_map = super().to_map()
|
|
11137
|
+
if _map is not None:
|
|
11138
|
+
return _map
|
|
11139
|
+
|
|
11140
|
+
result = dict()
|
|
11141
|
+
result['videoCaptions'] = []
|
|
11142
|
+
if self.video_captions is not None:
|
|
11143
|
+
for k in self.video_captions:
|
|
11144
|
+
result['videoCaptions'].append(k.to_map() if k else None)
|
|
11145
|
+
return result
|
|
11146
|
+
|
|
11147
|
+
def from_map(self, m: dict = None):
|
|
11148
|
+
m = m or dict()
|
|
11149
|
+
self.video_captions = []
|
|
11150
|
+
if m.get('videoCaptions') is not None:
|
|
11151
|
+
for k in m.get('videoCaptions'):
|
|
11152
|
+
temp_model = SubmitVideoAnalysisTaskRequestVideoCaptionInfoVideoCaptions()
|
|
11153
|
+
self.video_captions.append(temp_model.from_map(k))
|
|
11154
|
+
return self
|
|
11155
|
+
|
|
11156
|
+
|
|
10730
11157
|
class SubmitVideoAnalysisTaskRequestVideoRoles(TeaModel):
|
|
10731
11158
|
def __init__(
|
|
10732
11159
|
self,
|
|
@@ -10781,6 +11208,7 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
10781
11208
|
snapshot_interval: float = None,
|
|
10782
11209
|
split_interval: int = None,
|
|
10783
11210
|
text_process_tasks: List[SubmitVideoAnalysisTaskRequestTextProcessTasks] = None,
|
|
11211
|
+
video_caption_info: SubmitVideoAnalysisTaskRequestVideoCaptionInfo = None,
|
|
10784
11212
|
video_extra_info: str = None,
|
|
10785
11213
|
video_model_custom_prompt_template: str = None,
|
|
10786
11214
|
video_model_id: str = None,
|
|
@@ -10800,6 +11228,7 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
10800
11228
|
self.snapshot_interval = snapshot_interval
|
|
10801
11229
|
self.split_interval = split_interval
|
|
10802
11230
|
self.text_process_tasks = text_process_tasks
|
|
11231
|
+
self.video_caption_info = video_caption_info
|
|
10803
11232
|
self.video_extra_info = video_extra_info
|
|
10804
11233
|
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
10805
11234
|
self.video_model_id = video_model_id
|
|
@@ -10815,6 +11244,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
10815
11244
|
for k in self.text_process_tasks:
|
|
10816
11245
|
if k:
|
|
10817
11246
|
k.validate()
|
|
11247
|
+
if self.video_caption_info:
|
|
11248
|
+
self.video_caption_info.validate()
|
|
10818
11249
|
if self.video_roles:
|
|
10819
11250
|
for k in self.video_roles:
|
|
10820
11251
|
if k:
|
|
@@ -10852,6 +11283,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
10852
11283
|
if self.text_process_tasks is not None:
|
|
10853
11284
|
for k in self.text_process_tasks:
|
|
10854
11285
|
result['textProcessTasks'].append(k.to_map() if k else None)
|
|
11286
|
+
if self.video_caption_info is not None:
|
|
11287
|
+
result['videoCaptionInfo'] = self.video_caption_info.to_map()
|
|
10855
11288
|
if self.video_extra_info is not None:
|
|
10856
11289
|
result['videoExtraInfo'] = self.video_extra_info
|
|
10857
11290
|
if self.video_model_custom_prompt_template is not None:
|
|
@@ -10898,6 +11331,9 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
10898
11331
|
for k in m.get('textProcessTasks'):
|
|
10899
11332
|
temp_model = SubmitVideoAnalysisTaskRequestTextProcessTasks()
|
|
10900
11333
|
self.text_process_tasks.append(temp_model.from_map(k))
|
|
11334
|
+
if m.get('videoCaptionInfo') is not None:
|
|
11335
|
+
temp_model = SubmitVideoAnalysisTaskRequestVideoCaptionInfo()
|
|
11336
|
+
self.video_caption_info = temp_model.from_map(m['videoCaptionInfo'])
|
|
10901
11337
|
if m.get('videoExtraInfo') is not None:
|
|
10902
11338
|
self.video_extra_info = m.get('videoExtraInfo')
|
|
10903
11339
|
if m.get('videoModelCustomPromptTemplate') is not None:
|
|
@@ -10931,6 +11367,7 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
10931
11367
|
snapshot_interval: float = None,
|
|
10932
11368
|
split_interval: int = None,
|
|
10933
11369
|
text_process_tasks_shrink: str = None,
|
|
11370
|
+
video_caption_info_shrink: str = None,
|
|
10934
11371
|
video_extra_info: str = None,
|
|
10935
11372
|
video_model_custom_prompt_template: str = None,
|
|
10936
11373
|
video_model_id: str = None,
|
|
@@ -10950,6 +11387,7 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
10950
11387
|
self.snapshot_interval = snapshot_interval
|
|
10951
11388
|
self.split_interval = split_interval
|
|
10952
11389
|
self.text_process_tasks_shrink = text_process_tasks_shrink
|
|
11390
|
+
self.video_caption_info_shrink = video_caption_info_shrink
|
|
10953
11391
|
self.video_extra_info = video_extra_info
|
|
10954
11392
|
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
10955
11393
|
self.video_model_id = video_model_id
|
|
@@ -10991,6 +11429,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
10991
11429
|
result['splitInterval'] = self.split_interval
|
|
10992
11430
|
if self.text_process_tasks_shrink is not None:
|
|
10993
11431
|
result['textProcessTasks'] = self.text_process_tasks_shrink
|
|
11432
|
+
if self.video_caption_info_shrink is not None:
|
|
11433
|
+
result['videoCaptionInfo'] = self.video_caption_info_shrink
|
|
10994
11434
|
if self.video_extra_info is not None:
|
|
10995
11435
|
result['videoExtraInfo'] = self.video_extra_info
|
|
10996
11436
|
if self.video_model_custom_prompt_template is not None:
|
|
@@ -11031,6 +11471,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
11031
11471
|
self.split_interval = m.get('splitInterval')
|
|
11032
11472
|
if m.get('textProcessTasks') is not None:
|
|
11033
11473
|
self.text_process_tasks_shrink = m.get('textProcessTasks')
|
|
11474
|
+
if m.get('videoCaptionInfo') is not None:
|
|
11475
|
+
self.video_caption_info_shrink = m.get('videoCaptionInfo')
|
|
11034
11476
|
if m.get('videoExtraInfo') is not None:
|
|
11035
11477
|
self.video_extra_info = m.get('videoExtraInfo')
|
|
11036
11478
|
if m.get('videoModelCustomPromptTemplate') is not None:
|
|
@@ -11294,3 +11736,177 @@ class UpdateVideoAnalysisConfigResponse(TeaModel):
|
|
|
11294
11736
|
return self
|
|
11295
11737
|
|
|
11296
11738
|
|
|
11739
|
+
class UpdateVideoAnalysisTaskRequest(TeaModel):
|
|
11740
|
+
def __init__(
|
|
11741
|
+
self,
|
|
11742
|
+
task_id: str = None,
|
|
11743
|
+
task_status: str = None,
|
|
11744
|
+
):
|
|
11745
|
+
# This parameter is required.
|
|
11746
|
+
self.task_id = task_id
|
|
11747
|
+
# This parameter is required.
|
|
11748
|
+
self.task_status = task_status
|
|
11749
|
+
|
|
11750
|
+
def validate(self):
|
|
11751
|
+
pass
|
|
11752
|
+
|
|
11753
|
+
def to_map(self):
|
|
11754
|
+
_map = super().to_map()
|
|
11755
|
+
if _map is not None:
|
|
11756
|
+
return _map
|
|
11757
|
+
|
|
11758
|
+
result = dict()
|
|
11759
|
+
if self.task_id is not None:
|
|
11760
|
+
result['taskId'] = self.task_id
|
|
11761
|
+
if self.task_status is not None:
|
|
11762
|
+
result['taskStatus'] = self.task_status
|
|
11763
|
+
return result
|
|
11764
|
+
|
|
11765
|
+
def from_map(self, m: dict = None):
|
|
11766
|
+
m = m or dict()
|
|
11767
|
+
if m.get('taskId') is not None:
|
|
11768
|
+
self.task_id = m.get('taskId')
|
|
11769
|
+
if m.get('taskStatus') is not None:
|
|
11770
|
+
self.task_status = m.get('taskStatus')
|
|
11771
|
+
return self
|
|
11772
|
+
|
|
11773
|
+
|
|
11774
|
+
class UpdateVideoAnalysisTaskResponseBodyData(TeaModel):
|
|
11775
|
+
def __init__(
|
|
11776
|
+
self,
|
|
11777
|
+
task_error_message: str = None,
|
|
11778
|
+
task_id: str = None,
|
|
11779
|
+
task_status: str = None,
|
|
11780
|
+
):
|
|
11781
|
+
self.task_error_message = task_error_message
|
|
11782
|
+
self.task_id = task_id
|
|
11783
|
+
self.task_status = task_status
|
|
11784
|
+
|
|
11785
|
+
def validate(self):
|
|
11786
|
+
pass
|
|
11787
|
+
|
|
11788
|
+
def to_map(self):
|
|
11789
|
+
_map = super().to_map()
|
|
11790
|
+
if _map is not None:
|
|
11791
|
+
return _map
|
|
11792
|
+
|
|
11793
|
+
result = dict()
|
|
11794
|
+
if self.task_error_message is not None:
|
|
11795
|
+
result['taskErrorMessage'] = self.task_error_message
|
|
11796
|
+
if self.task_id is not None:
|
|
11797
|
+
result['taskId'] = self.task_id
|
|
11798
|
+
if self.task_status is not None:
|
|
11799
|
+
result['taskStatus'] = self.task_status
|
|
11800
|
+
return result
|
|
11801
|
+
|
|
11802
|
+
def from_map(self, m: dict = None):
|
|
11803
|
+
m = m or dict()
|
|
11804
|
+
if m.get('taskErrorMessage') is not None:
|
|
11805
|
+
self.task_error_message = m.get('taskErrorMessage')
|
|
11806
|
+
if m.get('taskId') is not None:
|
|
11807
|
+
self.task_id = m.get('taskId')
|
|
11808
|
+
if m.get('taskStatus') is not None:
|
|
11809
|
+
self.task_status = m.get('taskStatus')
|
|
11810
|
+
return self
|
|
11811
|
+
|
|
11812
|
+
|
|
11813
|
+
class UpdateVideoAnalysisTaskResponseBody(TeaModel):
|
|
11814
|
+
def __init__(
|
|
11815
|
+
self,
|
|
11816
|
+
code: str = None,
|
|
11817
|
+
data: UpdateVideoAnalysisTaskResponseBodyData = None,
|
|
11818
|
+
http_status_code: int = None,
|
|
11819
|
+
message: str = None,
|
|
11820
|
+
request_id: str = None,
|
|
11821
|
+
success: bool = None,
|
|
11822
|
+
):
|
|
11823
|
+
self.code = code
|
|
11824
|
+
self.data = data
|
|
11825
|
+
self.http_status_code = http_status_code
|
|
11826
|
+
self.message = message
|
|
11827
|
+
self.request_id = request_id
|
|
11828
|
+
self.success = success
|
|
11829
|
+
|
|
11830
|
+
def validate(self):
|
|
11831
|
+
if self.data:
|
|
11832
|
+
self.data.validate()
|
|
11833
|
+
|
|
11834
|
+
def to_map(self):
|
|
11835
|
+
_map = super().to_map()
|
|
11836
|
+
if _map is not None:
|
|
11837
|
+
return _map
|
|
11838
|
+
|
|
11839
|
+
result = dict()
|
|
11840
|
+
if self.code is not None:
|
|
11841
|
+
result['code'] = self.code
|
|
11842
|
+
if self.data is not None:
|
|
11843
|
+
result['data'] = self.data.to_map()
|
|
11844
|
+
if self.http_status_code is not None:
|
|
11845
|
+
result['httpStatusCode'] = self.http_status_code
|
|
11846
|
+
if self.message is not None:
|
|
11847
|
+
result['message'] = self.message
|
|
11848
|
+
if self.request_id is not None:
|
|
11849
|
+
result['requestId'] = self.request_id
|
|
11850
|
+
if self.success is not None:
|
|
11851
|
+
result['success'] = self.success
|
|
11852
|
+
return result
|
|
11853
|
+
|
|
11854
|
+
def from_map(self, m: dict = None):
|
|
11855
|
+
m = m or dict()
|
|
11856
|
+
if m.get('code') is not None:
|
|
11857
|
+
self.code = m.get('code')
|
|
11858
|
+
if m.get('data') is not None:
|
|
11859
|
+
temp_model = UpdateVideoAnalysisTaskResponseBodyData()
|
|
11860
|
+
self.data = temp_model.from_map(m['data'])
|
|
11861
|
+
if m.get('httpStatusCode') is not None:
|
|
11862
|
+
self.http_status_code = m.get('httpStatusCode')
|
|
11863
|
+
if m.get('message') is not None:
|
|
11864
|
+
self.message = m.get('message')
|
|
11865
|
+
if m.get('requestId') is not None:
|
|
11866
|
+
self.request_id = m.get('requestId')
|
|
11867
|
+
if m.get('success') is not None:
|
|
11868
|
+
self.success = m.get('success')
|
|
11869
|
+
return self
|
|
11870
|
+
|
|
11871
|
+
|
|
11872
|
+
class UpdateVideoAnalysisTaskResponse(TeaModel):
|
|
11873
|
+
def __init__(
|
|
11874
|
+
self,
|
|
11875
|
+
headers: Dict[str, str] = None,
|
|
11876
|
+
status_code: int = None,
|
|
11877
|
+
body: UpdateVideoAnalysisTaskResponseBody = None,
|
|
11878
|
+
):
|
|
11879
|
+
self.headers = headers
|
|
11880
|
+
self.status_code = status_code
|
|
11881
|
+
self.body = body
|
|
11882
|
+
|
|
11883
|
+
def validate(self):
|
|
11884
|
+
if self.body:
|
|
11885
|
+
self.body.validate()
|
|
11886
|
+
|
|
11887
|
+
def to_map(self):
|
|
11888
|
+
_map = super().to_map()
|
|
11889
|
+
if _map is not None:
|
|
11890
|
+
return _map
|
|
11891
|
+
|
|
11892
|
+
result = dict()
|
|
11893
|
+
if self.headers is not None:
|
|
11894
|
+
result['headers'] = self.headers
|
|
11895
|
+
if self.status_code is not None:
|
|
11896
|
+
result['statusCode'] = self.status_code
|
|
11897
|
+
if self.body is not None:
|
|
11898
|
+
result['body'] = self.body.to_map()
|
|
11899
|
+
return result
|
|
11900
|
+
|
|
11901
|
+
def from_map(self, m: dict = None):
|
|
11902
|
+
m = m or dict()
|
|
11903
|
+
if m.get('headers') is not None:
|
|
11904
|
+
self.headers = m.get('headers')
|
|
11905
|
+
if m.get('statusCode') is not None:
|
|
11906
|
+
self.status_code = m.get('statusCode')
|
|
11907
|
+
if m.get('body') is not None:
|
|
11908
|
+
temp_model = UpdateVideoAnalysisTaskResponseBody()
|
|
11909
|
+
self.body = temp_model.from_map(m['body'])
|
|
11910
|
+
return self
|
|
11911
|
+
|
|
11912
|
+
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: alibabacloud-quanmiaolightapp20240801
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.8.0
|
|
4
4
|
Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
|
|
5
5
|
Home-page: https://github.com/aliyun/alibabacloud-python-sdk
|
|
6
6
|
Author: Alibaba Cloud SDK
|
{alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/setup.py
RENAMED
|
@@ -24,7 +24,7 @@ from setuptools import setup, find_packages
|
|
|
24
24
|
"""
|
|
25
25
|
setup module for alibabacloud_quanmiaolightapp20240801.
|
|
26
26
|
|
|
27
|
-
Created on
|
|
27
|
+
Created on 15/05/2025
|
|
28
28
|
|
|
29
29
|
@author: Alibaba Cloud SDK
|
|
30
30
|
"""
|
|
@@ -38,7 +38,7 @@ URL = "https://github.com/aliyun/alibabacloud-python-sdk"
|
|
|
38
38
|
VERSION = __import__(PACKAGE).__version__
|
|
39
39
|
REQUIRES = [
|
|
40
40
|
"alibabacloud_tea_util>=0.3.13, <1.0.0",
|
|
41
|
-
"alibabacloud_tea_openapi>=0.3.
|
|
41
|
+
"alibabacloud_tea_openapi>=0.3.15, <1.0.0",
|
|
42
42
|
"alibabacloud_openapi_util>=0.2.2, <1.0.0",
|
|
43
43
|
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
|
|
44
44
|
]
|
alibabacloud_quanmiaolightapp20240801-2.7.2/alibabacloud_quanmiaolightapp20240801/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = '2.7.2'
|
{alibabacloud_quanmiaolightapp20240801-2.7.2 → alibabacloud_quanmiaolightapp20240801-2.8.0}/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|