alibabacloud-quanmiaolightapp20240801 2.6.2__tar.gz → 2.6.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/ChangeLog.md +9 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/PKG-INFO +1 -1
- alibabacloud_quanmiaolightapp20240801-2.6.4/alibabacloud_quanmiaolightapp20240801/__init__.py +1 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/alibabacloud_quanmiaolightapp20240801/client.py +28 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/alibabacloud_quanmiaolightapp20240801/models.py +60 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/alibabacloud_quanmiaolightapp20240801.egg-info/PKG-INFO +1 -1
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/setup.py +1 -1
- alibabacloud_quanmiaolightapp20240801-2.6.2/alibabacloud_quanmiaolightapp20240801/__init__.py +0 -1
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/LICENSE +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/MANIFEST.in +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/README-CN.md +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/README.md +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/alibabacloud_quanmiaolightapp20240801.egg-info/SOURCES.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/alibabacloud_quanmiaolightapp20240801.egg-info/dependency_links.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/alibabacloud_quanmiaolightapp20240801.egg-info/requires.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/alibabacloud_quanmiaolightapp20240801.egg-info/top_level.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/setup.cfg +0 -0
|
@@ -1,3 +1,12 @@
|
|
|
1
|
+
2025-03-18 Version: 2.6.3
|
|
2
|
+
- Update API RunVideoAnalysis: add param splitInterval.
|
|
3
|
+
- Update API SubmitVideoAnalysisTask: add param splitInterval.
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
2025-03-17 Version: 2.6.2
|
|
7
|
+
- Update API RunHotTopicSummary: update param stepForCustomSummaryStyleConfig.
|
|
8
|
+
|
|
9
|
+
|
|
1
10
|
2025-03-13 Version: 2.6.1
|
|
2
11
|
- Update API GetVideoAnalysisTask: update response param.
|
|
3
12
|
- Update API RunVideoAnalysis: add param faceIdentitySimilarityMinScore.
|
{alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: alibabacloud_quanmiaolightapp20240801
|
|
3
|
-
Version: 2.6.
|
|
3
|
+
Version: 2.6.4
|
|
4
4
|
Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
|
|
5
5
|
Home-page: https://github.com/aliyun/alibabacloud-python-sdk
|
|
6
6
|
Author: Alibaba Cloud SDK
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = '2.6.4'
|
|
@@ -2381,6 +2381,8 @@ class Client(OpenApiClient):
|
|
|
2381
2381
|
UtilClient.validate_model(tmp_req)
|
|
2382
2382
|
request = quan_miao_light_app_20240801_models.RunVideoAnalysisShrinkRequest()
|
|
2383
2383
|
OpenApiUtilClient.convert(tmp_req, request)
|
|
2384
|
+
if not UtilClient.is_unset(tmp_req.exclude_generate_options):
|
|
2385
|
+
request.exclude_generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.exclude_generate_options, 'excludeGenerateOptions', 'json')
|
|
2384
2386
|
if not UtilClient.is_unset(tmp_req.frame_sample_method):
|
|
2385
2387
|
request.frame_sample_method_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.frame_sample_method, 'frameSampleMethod', 'json')
|
|
2386
2388
|
if not UtilClient.is_unset(tmp_req.generate_options):
|
|
@@ -2390,6 +2392,8 @@ class Client(OpenApiClient):
|
|
|
2390
2392
|
if not UtilClient.is_unset(tmp_req.video_roles):
|
|
2391
2393
|
request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
|
|
2392
2394
|
body = {}
|
|
2395
|
+
if not UtilClient.is_unset(request.exclude_generate_options_shrink):
|
|
2396
|
+
body['excludeGenerateOptions'] = request.exclude_generate_options_shrink
|
|
2393
2397
|
if not UtilClient.is_unset(request.face_identity_similarity_min_score):
|
|
2394
2398
|
body['faceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
|
|
2395
2399
|
if not UtilClient.is_unset(request.frame_sample_method_shrink):
|
|
@@ -2408,6 +2412,8 @@ class Client(OpenApiClient):
|
|
|
2408
2412
|
body['originalSessionId'] = request.original_session_id
|
|
2409
2413
|
if not UtilClient.is_unset(request.snapshot_interval):
|
|
2410
2414
|
body['snapshotInterval'] = request.snapshot_interval
|
|
2415
|
+
if not UtilClient.is_unset(request.split_interval):
|
|
2416
|
+
body['splitInterval'] = request.split_interval
|
|
2411
2417
|
if not UtilClient.is_unset(request.task_id):
|
|
2412
2418
|
body['taskId'] = request.task_id
|
|
2413
2419
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
@@ -2468,6 +2474,8 @@ class Client(OpenApiClient):
|
|
|
2468
2474
|
UtilClient.validate_model(tmp_req)
|
|
2469
2475
|
request = quan_miao_light_app_20240801_models.RunVideoAnalysisShrinkRequest()
|
|
2470
2476
|
OpenApiUtilClient.convert(tmp_req, request)
|
|
2477
|
+
if not UtilClient.is_unset(tmp_req.exclude_generate_options):
|
|
2478
|
+
request.exclude_generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.exclude_generate_options, 'excludeGenerateOptions', 'json')
|
|
2471
2479
|
if not UtilClient.is_unset(tmp_req.frame_sample_method):
|
|
2472
2480
|
request.frame_sample_method_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.frame_sample_method, 'frameSampleMethod', 'json')
|
|
2473
2481
|
if not UtilClient.is_unset(tmp_req.generate_options):
|
|
@@ -2477,6 +2485,8 @@ class Client(OpenApiClient):
|
|
|
2477
2485
|
if not UtilClient.is_unset(tmp_req.video_roles):
|
|
2478
2486
|
request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
|
|
2479
2487
|
body = {}
|
|
2488
|
+
if not UtilClient.is_unset(request.exclude_generate_options_shrink):
|
|
2489
|
+
body['excludeGenerateOptions'] = request.exclude_generate_options_shrink
|
|
2480
2490
|
if not UtilClient.is_unset(request.face_identity_similarity_min_score):
|
|
2481
2491
|
body['faceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
|
|
2482
2492
|
if not UtilClient.is_unset(request.frame_sample_method_shrink):
|
|
@@ -2495,6 +2505,8 @@ class Client(OpenApiClient):
|
|
|
2495
2505
|
body['originalSessionId'] = request.original_session_id
|
|
2496
2506
|
if not UtilClient.is_unset(request.snapshot_interval):
|
|
2497
2507
|
body['snapshotInterval'] = request.snapshot_interval
|
|
2508
|
+
if not UtilClient.is_unset(request.split_interval):
|
|
2509
|
+
body['splitInterval'] = request.split_interval
|
|
2498
2510
|
if not UtilClient.is_unset(request.task_id):
|
|
2499
2511
|
body['taskId'] = request.task_id
|
|
2500
2512
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
@@ -2745,6 +2757,8 @@ class Client(OpenApiClient):
|
|
|
2745
2757
|
UtilClient.validate_model(tmp_req)
|
|
2746
2758
|
request = quan_miao_light_app_20240801_models.SubmitVideoAnalysisTaskShrinkRequest()
|
|
2747
2759
|
OpenApiUtilClient.convert(tmp_req, request)
|
|
2760
|
+
if not UtilClient.is_unset(tmp_req.exclude_generate_options):
|
|
2761
|
+
request.exclude_generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.exclude_generate_options, 'excludeGenerateOptions', 'json')
|
|
2748
2762
|
if not UtilClient.is_unset(tmp_req.frame_sample_method):
|
|
2749
2763
|
request.frame_sample_method_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.frame_sample_method, 'frameSampleMethod', 'json')
|
|
2750
2764
|
if not UtilClient.is_unset(tmp_req.generate_options):
|
|
@@ -2754,6 +2768,10 @@ class Client(OpenApiClient):
|
|
|
2754
2768
|
if not UtilClient.is_unset(tmp_req.video_roles):
|
|
2755
2769
|
request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
|
|
2756
2770
|
body = {}
|
|
2771
|
+
if not UtilClient.is_unset(request.deduplication_id):
|
|
2772
|
+
body['deduplicationId'] = request.deduplication_id
|
|
2773
|
+
if not UtilClient.is_unset(request.exclude_generate_options_shrink):
|
|
2774
|
+
body['excludeGenerateOptions'] = request.exclude_generate_options_shrink
|
|
2757
2775
|
if not UtilClient.is_unset(request.face_identity_similarity_min_score):
|
|
2758
2776
|
body['faceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
|
|
2759
2777
|
if not UtilClient.is_unset(request.frame_sample_method_shrink):
|
|
@@ -2770,6 +2788,8 @@ class Client(OpenApiClient):
|
|
|
2770
2788
|
body['modelId'] = request.model_id
|
|
2771
2789
|
if not UtilClient.is_unset(request.snapshot_interval):
|
|
2772
2790
|
body['snapshotInterval'] = request.snapshot_interval
|
|
2791
|
+
if not UtilClient.is_unset(request.split_interval):
|
|
2792
|
+
body['splitInterval'] = request.split_interval
|
|
2773
2793
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
2774
2794
|
body['textProcessTasks'] = request.text_process_tasks_shrink
|
|
2775
2795
|
if not UtilClient.is_unset(request.video_extra_info):
|
|
@@ -2828,6 +2848,8 @@ class Client(OpenApiClient):
|
|
|
2828
2848
|
UtilClient.validate_model(tmp_req)
|
|
2829
2849
|
request = quan_miao_light_app_20240801_models.SubmitVideoAnalysisTaskShrinkRequest()
|
|
2830
2850
|
OpenApiUtilClient.convert(tmp_req, request)
|
|
2851
|
+
if not UtilClient.is_unset(tmp_req.exclude_generate_options):
|
|
2852
|
+
request.exclude_generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.exclude_generate_options, 'excludeGenerateOptions', 'json')
|
|
2831
2853
|
if not UtilClient.is_unset(tmp_req.frame_sample_method):
|
|
2832
2854
|
request.frame_sample_method_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.frame_sample_method, 'frameSampleMethod', 'json')
|
|
2833
2855
|
if not UtilClient.is_unset(tmp_req.generate_options):
|
|
@@ -2837,6 +2859,10 @@ class Client(OpenApiClient):
|
|
|
2837
2859
|
if not UtilClient.is_unset(tmp_req.video_roles):
|
|
2838
2860
|
request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
|
|
2839
2861
|
body = {}
|
|
2862
|
+
if not UtilClient.is_unset(request.deduplication_id):
|
|
2863
|
+
body['deduplicationId'] = request.deduplication_id
|
|
2864
|
+
if not UtilClient.is_unset(request.exclude_generate_options_shrink):
|
|
2865
|
+
body['excludeGenerateOptions'] = request.exclude_generate_options_shrink
|
|
2840
2866
|
if not UtilClient.is_unset(request.face_identity_similarity_min_score):
|
|
2841
2867
|
body['faceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
|
|
2842
2868
|
if not UtilClient.is_unset(request.frame_sample_method_shrink):
|
|
@@ -2853,6 +2879,8 @@ class Client(OpenApiClient):
|
|
|
2853
2879
|
body['modelId'] = request.model_id
|
|
2854
2880
|
if not UtilClient.is_unset(request.snapshot_interval):
|
|
2855
2881
|
body['snapshotInterval'] = request.snapshot_interval
|
|
2882
|
+
if not UtilClient.is_unset(request.split_interval):
|
|
2883
|
+
body['splitInterval'] = request.split_interval
|
|
2856
2884
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
2857
2885
|
body['textProcessTasks'] = request.text_process_tasks_shrink
|
|
2858
2886
|
if not UtilClient.is_unset(request.video_extra_info):
|
|
@@ -7133,6 +7133,7 @@ class RunVideoAnalysisRequestVideoRoles(TeaModel):
|
|
|
7133
7133
|
class RunVideoAnalysisRequest(TeaModel):
|
|
7134
7134
|
def __init__(
|
|
7135
7135
|
self,
|
|
7136
|
+
exclude_generate_options: List[str] = None,
|
|
7136
7137
|
face_identity_similarity_min_score: float = None,
|
|
7137
7138
|
frame_sample_method: RunVideoAnalysisRequestFrameSampleMethod = None,
|
|
7138
7139
|
generate_options: List[str] = None,
|
|
@@ -7142,6 +7143,7 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
7142
7143
|
model_id: str = None,
|
|
7143
7144
|
original_session_id: str = None,
|
|
7144
7145
|
snapshot_interval: float = None,
|
|
7146
|
+
split_interval: int = None,
|
|
7145
7147
|
task_id: str = None,
|
|
7146
7148
|
text_process_tasks: List[RunVideoAnalysisRequestTextProcessTasks] = None,
|
|
7147
7149
|
video_extra_info: str = None,
|
|
@@ -7151,6 +7153,7 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
7151
7153
|
video_shot_face_identity_count: int = None,
|
|
7152
7154
|
video_url: str = None,
|
|
7153
7155
|
):
|
|
7156
|
+
self.exclude_generate_options = exclude_generate_options
|
|
7154
7157
|
self.face_identity_similarity_min_score = face_identity_similarity_min_score
|
|
7155
7158
|
self.frame_sample_method = frame_sample_method
|
|
7156
7159
|
self.generate_options = generate_options
|
|
@@ -7160,6 +7163,7 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
7160
7163
|
self.model_id = model_id
|
|
7161
7164
|
self.original_session_id = original_session_id
|
|
7162
7165
|
self.snapshot_interval = snapshot_interval
|
|
7166
|
+
self.split_interval = split_interval
|
|
7163
7167
|
self.task_id = task_id
|
|
7164
7168
|
self.text_process_tasks = text_process_tasks
|
|
7165
7169
|
self.video_extra_info = video_extra_info
|
|
@@ -7187,6 +7191,8 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
7187
7191
|
return _map
|
|
7188
7192
|
|
|
7189
7193
|
result = dict()
|
|
7194
|
+
if self.exclude_generate_options is not None:
|
|
7195
|
+
result['excludeGenerateOptions'] = self.exclude_generate_options
|
|
7190
7196
|
if self.face_identity_similarity_min_score is not None:
|
|
7191
7197
|
result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
|
|
7192
7198
|
if self.frame_sample_method is not None:
|
|
@@ -7205,6 +7211,8 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
7205
7211
|
result['originalSessionId'] = self.original_session_id
|
|
7206
7212
|
if self.snapshot_interval is not None:
|
|
7207
7213
|
result['snapshotInterval'] = self.snapshot_interval
|
|
7214
|
+
if self.split_interval is not None:
|
|
7215
|
+
result['splitInterval'] = self.split_interval
|
|
7208
7216
|
if self.task_id is not None:
|
|
7209
7217
|
result['taskId'] = self.task_id
|
|
7210
7218
|
result['textProcessTasks'] = []
|
|
@@ -7229,6 +7237,8 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
7229
7237
|
|
|
7230
7238
|
def from_map(self, m: dict = None):
|
|
7231
7239
|
m = m or dict()
|
|
7240
|
+
if m.get('excludeGenerateOptions') is not None:
|
|
7241
|
+
self.exclude_generate_options = m.get('excludeGenerateOptions')
|
|
7232
7242
|
if m.get('faceIdentitySimilarityMinScore') is not None:
|
|
7233
7243
|
self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
|
|
7234
7244
|
if m.get('frameSampleMethod') is not None:
|
|
@@ -7248,6 +7258,8 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
7248
7258
|
self.original_session_id = m.get('originalSessionId')
|
|
7249
7259
|
if m.get('snapshotInterval') is not None:
|
|
7250
7260
|
self.snapshot_interval = m.get('snapshotInterval')
|
|
7261
|
+
if m.get('splitInterval') is not None:
|
|
7262
|
+
self.split_interval = m.get('splitInterval')
|
|
7251
7263
|
if m.get('taskId') is not None:
|
|
7252
7264
|
self.task_id = m.get('taskId')
|
|
7253
7265
|
self.text_process_tasks = []
|
|
@@ -7276,6 +7288,7 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
7276
7288
|
class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
7277
7289
|
def __init__(
|
|
7278
7290
|
self,
|
|
7291
|
+
exclude_generate_options_shrink: str = None,
|
|
7279
7292
|
face_identity_similarity_min_score: float = None,
|
|
7280
7293
|
frame_sample_method_shrink: str = None,
|
|
7281
7294
|
generate_options_shrink: str = None,
|
|
@@ -7285,6 +7298,7 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
7285
7298
|
model_id: str = None,
|
|
7286
7299
|
original_session_id: str = None,
|
|
7287
7300
|
snapshot_interval: float = None,
|
|
7301
|
+
split_interval: int = None,
|
|
7288
7302
|
task_id: str = None,
|
|
7289
7303
|
text_process_tasks_shrink: str = None,
|
|
7290
7304
|
video_extra_info: str = None,
|
|
@@ -7294,6 +7308,7 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
7294
7308
|
video_shot_face_identity_count: int = None,
|
|
7295
7309
|
video_url: str = None,
|
|
7296
7310
|
):
|
|
7311
|
+
self.exclude_generate_options_shrink = exclude_generate_options_shrink
|
|
7297
7312
|
self.face_identity_similarity_min_score = face_identity_similarity_min_score
|
|
7298
7313
|
self.frame_sample_method_shrink = frame_sample_method_shrink
|
|
7299
7314
|
self.generate_options_shrink = generate_options_shrink
|
|
@@ -7303,6 +7318,7 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
7303
7318
|
self.model_id = model_id
|
|
7304
7319
|
self.original_session_id = original_session_id
|
|
7305
7320
|
self.snapshot_interval = snapshot_interval
|
|
7321
|
+
self.split_interval = split_interval
|
|
7306
7322
|
self.task_id = task_id
|
|
7307
7323
|
self.text_process_tasks_shrink = text_process_tasks_shrink
|
|
7308
7324
|
self.video_extra_info = video_extra_info
|
|
@@ -7321,6 +7337,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
7321
7337
|
return _map
|
|
7322
7338
|
|
|
7323
7339
|
result = dict()
|
|
7340
|
+
if self.exclude_generate_options_shrink is not None:
|
|
7341
|
+
result['excludeGenerateOptions'] = self.exclude_generate_options_shrink
|
|
7324
7342
|
if self.face_identity_similarity_min_score is not None:
|
|
7325
7343
|
result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
|
|
7326
7344
|
if self.frame_sample_method_shrink is not None:
|
|
@@ -7339,6 +7357,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
7339
7357
|
result['originalSessionId'] = self.original_session_id
|
|
7340
7358
|
if self.snapshot_interval is not None:
|
|
7341
7359
|
result['snapshotInterval'] = self.snapshot_interval
|
|
7360
|
+
if self.split_interval is not None:
|
|
7361
|
+
result['splitInterval'] = self.split_interval
|
|
7342
7362
|
if self.task_id is not None:
|
|
7343
7363
|
result['taskId'] = self.task_id
|
|
7344
7364
|
if self.text_process_tasks_shrink is not None:
|
|
@@ -7359,6 +7379,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
7359
7379
|
|
|
7360
7380
|
def from_map(self, m: dict = None):
|
|
7361
7381
|
m = m or dict()
|
|
7382
|
+
if m.get('excludeGenerateOptions') is not None:
|
|
7383
|
+
self.exclude_generate_options_shrink = m.get('excludeGenerateOptions')
|
|
7362
7384
|
if m.get('faceIdentitySimilarityMinScore') is not None:
|
|
7363
7385
|
self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
|
|
7364
7386
|
if m.get('frameSampleMethod') is not None:
|
|
@@ -7377,6 +7399,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
7377
7399
|
self.original_session_id = m.get('originalSessionId')
|
|
7378
7400
|
if m.get('snapshotInterval') is not None:
|
|
7379
7401
|
self.snapshot_interval = m.get('snapshotInterval')
|
|
7402
|
+
if m.get('splitInterval') is not None:
|
|
7403
|
+
self.split_interval = m.get('splitInterval')
|
|
7380
7404
|
if m.get('taskId') is not None:
|
|
7381
7405
|
self.task_id = m.get('taskId')
|
|
7382
7406
|
if m.get('textProcessTasks') is not None:
|
|
@@ -9006,6 +9030,8 @@ class SubmitVideoAnalysisTaskRequestVideoRoles(TeaModel):
|
|
|
9006
9030
|
class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
9007
9031
|
def __init__(
|
|
9008
9032
|
self,
|
|
9033
|
+
deduplication_id: str = None,
|
|
9034
|
+
exclude_generate_options: List[str] = None,
|
|
9009
9035
|
face_identity_similarity_min_score: float = None,
|
|
9010
9036
|
frame_sample_method: SubmitVideoAnalysisTaskRequestFrameSampleMethod = None,
|
|
9011
9037
|
generate_options: List[str] = None,
|
|
@@ -9014,6 +9040,7 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
9014
9040
|
model_custom_prompt_template_id: str = None,
|
|
9015
9041
|
model_id: str = None,
|
|
9016
9042
|
snapshot_interval: float = None,
|
|
9043
|
+
split_interval: int = None,
|
|
9017
9044
|
text_process_tasks: List[SubmitVideoAnalysisTaskRequestTextProcessTasks] = None,
|
|
9018
9045
|
video_extra_info: str = None,
|
|
9019
9046
|
video_model_custom_prompt_template: str = None,
|
|
@@ -9022,6 +9049,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
9022
9049
|
video_shot_face_identity_count: int = None,
|
|
9023
9050
|
video_url: str = None,
|
|
9024
9051
|
):
|
|
9052
|
+
self.deduplication_id = deduplication_id
|
|
9053
|
+
self.exclude_generate_options = exclude_generate_options
|
|
9025
9054
|
self.face_identity_similarity_min_score = face_identity_similarity_min_score
|
|
9026
9055
|
self.frame_sample_method = frame_sample_method
|
|
9027
9056
|
self.generate_options = generate_options
|
|
@@ -9030,6 +9059,7 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
9030
9059
|
self.model_custom_prompt_template_id = model_custom_prompt_template_id
|
|
9031
9060
|
self.model_id = model_id
|
|
9032
9061
|
self.snapshot_interval = snapshot_interval
|
|
9062
|
+
self.split_interval = split_interval
|
|
9033
9063
|
self.text_process_tasks = text_process_tasks
|
|
9034
9064
|
self.video_extra_info = video_extra_info
|
|
9035
9065
|
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
@@ -9057,6 +9087,10 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
9057
9087
|
return _map
|
|
9058
9088
|
|
|
9059
9089
|
result = dict()
|
|
9090
|
+
if self.deduplication_id is not None:
|
|
9091
|
+
result['deduplicationId'] = self.deduplication_id
|
|
9092
|
+
if self.exclude_generate_options is not None:
|
|
9093
|
+
result['excludeGenerateOptions'] = self.exclude_generate_options
|
|
9060
9094
|
if self.face_identity_similarity_min_score is not None:
|
|
9061
9095
|
result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
|
|
9062
9096
|
if self.frame_sample_method is not None:
|
|
@@ -9073,6 +9107,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
9073
9107
|
result['modelId'] = self.model_id
|
|
9074
9108
|
if self.snapshot_interval is not None:
|
|
9075
9109
|
result['snapshotInterval'] = self.snapshot_interval
|
|
9110
|
+
if self.split_interval is not None:
|
|
9111
|
+
result['splitInterval'] = self.split_interval
|
|
9076
9112
|
result['textProcessTasks'] = []
|
|
9077
9113
|
if self.text_process_tasks is not None:
|
|
9078
9114
|
for k in self.text_process_tasks:
|
|
@@ -9095,6 +9131,10 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
9095
9131
|
|
|
9096
9132
|
def from_map(self, m: dict = None):
|
|
9097
9133
|
m = m or dict()
|
|
9134
|
+
if m.get('deduplicationId') is not None:
|
|
9135
|
+
self.deduplication_id = m.get('deduplicationId')
|
|
9136
|
+
if m.get('excludeGenerateOptions') is not None:
|
|
9137
|
+
self.exclude_generate_options = m.get('excludeGenerateOptions')
|
|
9098
9138
|
if m.get('faceIdentitySimilarityMinScore') is not None:
|
|
9099
9139
|
self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
|
|
9100
9140
|
if m.get('frameSampleMethod') is not None:
|
|
@@ -9112,6 +9152,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
9112
9152
|
self.model_id = m.get('modelId')
|
|
9113
9153
|
if m.get('snapshotInterval') is not None:
|
|
9114
9154
|
self.snapshot_interval = m.get('snapshotInterval')
|
|
9155
|
+
if m.get('splitInterval') is not None:
|
|
9156
|
+
self.split_interval = m.get('splitInterval')
|
|
9115
9157
|
self.text_process_tasks = []
|
|
9116
9158
|
if m.get('textProcessTasks') is not None:
|
|
9117
9159
|
for k in m.get('textProcessTasks'):
|
|
@@ -9138,6 +9180,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
|
|
|
9138
9180
|
class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
9139
9181
|
def __init__(
|
|
9140
9182
|
self,
|
|
9183
|
+
deduplication_id: str = None,
|
|
9184
|
+
exclude_generate_options_shrink: str = None,
|
|
9141
9185
|
face_identity_similarity_min_score: float = None,
|
|
9142
9186
|
frame_sample_method_shrink: str = None,
|
|
9143
9187
|
generate_options_shrink: str = None,
|
|
@@ -9146,6 +9190,7 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
9146
9190
|
model_custom_prompt_template_id: str = None,
|
|
9147
9191
|
model_id: str = None,
|
|
9148
9192
|
snapshot_interval: float = None,
|
|
9193
|
+
split_interval: int = None,
|
|
9149
9194
|
text_process_tasks_shrink: str = None,
|
|
9150
9195
|
video_extra_info: str = None,
|
|
9151
9196
|
video_model_custom_prompt_template: str = None,
|
|
@@ -9154,6 +9199,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
9154
9199
|
video_shot_face_identity_count: int = None,
|
|
9155
9200
|
video_url: str = None,
|
|
9156
9201
|
):
|
|
9202
|
+
self.deduplication_id = deduplication_id
|
|
9203
|
+
self.exclude_generate_options_shrink = exclude_generate_options_shrink
|
|
9157
9204
|
self.face_identity_similarity_min_score = face_identity_similarity_min_score
|
|
9158
9205
|
self.frame_sample_method_shrink = frame_sample_method_shrink
|
|
9159
9206
|
self.generate_options_shrink = generate_options_shrink
|
|
@@ -9162,6 +9209,7 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
9162
9209
|
self.model_custom_prompt_template_id = model_custom_prompt_template_id
|
|
9163
9210
|
self.model_id = model_id
|
|
9164
9211
|
self.snapshot_interval = snapshot_interval
|
|
9212
|
+
self.split_interval = split_interval
|
|
9165
9213
|
self.text_process_tasks_shrink = text_process_tasks_shrink
|
|
9166
9214
|
self.video_extra_info = video_extra_info
|
|
9167
9215
|
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
@@ -9180,6 +9228,10 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
9180
9228
|
return _map
|
|
9181
9229
|
|
|
9182
9230
|
result = dict()
|
|
9231
|
+
if self.deduplication_id is not None:
|
|
9232
|
+
result['deduplicationId'] = self.deduplication_id
|
|
9233
|
+
if self.exclude_generate_options_shrink is not None:
|
|
9234
|
+
result['excludeGenerateOptions'] = self.exclude_generate_options_shrink
|
|
9183
9235
|
if self.face_identity_similarity_min_score is not None:
|
|
9184
9236
|
result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
|
|
9185
9237
|
if self.frame_sample_method_shrink is not None:
|
|
@@ -9196,6 +9248,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
9196
9248
|
result['modelId'] = self.model_id
|
|
9197
9249
|
if self.snapshot_interval is not None:
|
|
9198
9250
|
result['snapshotInterval'] = self.snapshot_interval
|
|
9251
|
+
if self.split_interval is not None:
|
|
9252
|
+
result['splitInterval'] = self.split_interval
|
|
9199
9253
|
if self.text_process_tasks_shrink is not None:
|
|
9200
9254
|
result['textProcessTasks'] = self.text_process_tasks_shrink
|
|
9201
9255
|
if self.video_extra_info is not None:
|
|
@@ -9214,6 +9268,10 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
9214
9268
|
|
|
9215
9269
|
def from_map(self, m: dict = None):
|
|
9216
9270
|
m = m or dict()
|
|
9271
|
+
if m.get('deduplicationId') is not None:
|
|
9272
|
+
self.deduplication_id = m.get('deduplicationId')
|
|
9273
|
+
if m.get('excludeGenerateOptions') is not None:
|
|
9274
|
+
self.exclude_generate_options_shrink = m.get('excludeGenerateOptions')
|
|
9217
9275
|
if m.get('faceIdentitySimilarityMinScore') is not None:
|
|
9218
9276
|
self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
|
|
9219
9277
|
if m.get('frameSampleMethod') is not None:
|
|
@@ -9230,6 +9288,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
|
|
|
9230
9288
|
self.model_id = m.get('modelId')
|
|
9231
9289
|
if m.get('snapshotInterval') is not None:
|
|
9232
9290
|
self.snapshot_interval = m.get('snapshotInterval')
|
|
9291
|
+
if m.get('splitInterval') is not None:
|
|
9292
|
+
self.split_interval = m.get('splitInterval')
|
|
9233
9293
|
if m.get('textProcessTasks') is not None:
|
|
9234
9294
|
self.text_process_tasks_shrink = m.get('textProcessTasks')
|
|
9235
9295
|
if m.get('videoExtraInfo') is not None:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: alibabacloud-quanmiaolightapp20240801
|
|
3
|
-
Version: 2.6.
|
|
3
|
+
Version: 2.6.4
|
|
4
4
|
Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
|
|
5
5
|
Home-page: https://github.com/aliyun/alibabacloud-python-sdk
|
|
6
6
|
Author: Alibaba Cloud SDK
|
alibabacloud_quanmiaolightapp20240801-2.6.2/alibabacloud_quanmiaolightapp20240801/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = '2.6.2'
|
{alibabacloud_quanmiaolightapp20240801-2.6.2 → alibabacloud_quanmiaolightapp20240801-2.6.4}/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|