tccli-intl-en 3.1.11.1__py2.py3-none-any.whl → 3.1.13.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. tccli/__init__.py +1 -1
  2. tccli/services/__init__.py +6 -0
  3. tccli/services/ai3d/__init__.py +4 -0
  4. tccli/services/ai3d/ai3d_client.py +266 -0
  5. tccli/services/ai3d/v20250513/api.json +264 -0
  6. tccli/services/ai3d/v20250513/examples.json +20 -0
  7. tccli/services/cvm/v20170312/api.json +3 -3
  8. tccli/services/cvm/v20170312/examples.json +31 -31
  9. tccli/services/faceid/v20180301/api.json +2 -2
  10. tccli/services/hunyuan/__init__.py +4 -0
  11. tccli/services/hunyuan/hunyuan_client.py +266 -0
  12. tccli/services/hunyuan/v20230901/api.json +270 -0
  13. tccli/services/hunyuan/v20230901/examples.json +20 -0
  14. tccli/services/mdl/mdl_client.py +192 -24
  15. tccli/services/mdl/v20200326/api.json +418 -5
  16. tccli/services/mdl/v20200326/examples.json +24 -0
  17. tccli/services/mps/mps_client.py +56 -0
  18. tccli/services/mps/v20190612/api.json +208 -9
  19. tccli/services/mps/v20190612/examples.json +36 -22
  20. tccli/services/teo/teo_client.py +56 -0
  21. tccli/services/teo/v20220901/api.json +101 -2
  22. tccli/services/teo/v20220901/examples.json +8 -0
  23. {tccli_intl_en-3.1.11.1.dist-info → tccli_intl_en-3.1.13.1.dist-info}/METADATA +2 -2
  24. {tccli_intl_en-3.1.11.1.dist-info → tccli_intl_en-3.1.13.1.dist-info}/RECORD +28 -20
  25. {tccli_intl_en-3.1.11.1.dist-info → tccli_intl_en-3.1.13.1.dist-info}/LICENSE +0 -0
  26. {tccli_intl_en-3.1.11.1.dist-info → tccli_intl_en-3.1.13.1.dist-info}/WHEEL +0 -0
  27. {tccli_intl_en-3.1.11.1.dist-info → tccli_intl_en-3.1.13.1.dist-info}/entry_points.txt +0 -0
  28. {tccli_intl_en-3.1.11.1.dist-info → tccli_intl_en-3.1.13.1.dist-info}/top_level.txt +0 -0
@@ -43,7 +43,7 @@
43
43
  "status": "online"
44
44
  },
45
45
  "CreateBlindWatermarkTemplate": {
46
- "document": "This API is used to create a user-defined digital watermark template with an upper limit of 1000.",
46
+ "document": "This API is used to create a user-defined digital watermark template.",
47
47
  "input": "CreateBlindWatermarkTemplateRequest",
48
48
  "name": "Creates a digital watermark template",
49
49
  "output": "CreateBlindWatermarkTemplateResponse",
@@ -476,6 +476,13 @@
476
476
  "output": "DescribeTranscodeTemplatesResponse",
477
477
  "status": "online"
478
478
  },
479
+ "DescribeUsageData": {
480
+ "document": "This API is used to return the daily Media Processing Service (MPS) usage information within the specified query time range.\n 1. MPS statistical data from the last 365 days can be queried.\n 2. The query time span should not exceed 90 days.",
481
+ "input": "DescribeUsageDataRequest",
482
+ "name": "Queries usage information",
483
+ "output": "DescribeUsageDataResponse",
484
+ "status": "online"
485
+ },
479
486
  "DescribeWatermarkTemplates": {
480
487
  "document": "This API is used to query custom watermarking templates and supports paged queries by filters.",
481
488
  "input": "DescribeWatermarkTemplatesRequest",
@@ -722,7 +729,7 @@
722
729
  "status": "online"
723
730
  },
724
731
  "ProcessMedia": {
725
- "document": "This API is used to initiate a processing task for URL video links or media files in COS. Features include:.\nThis API is used to perform video transcoding, including standard transcoding, TSC transcoding, and audio/video enhancement.\nThis API is used to generate animated images.\nThis API is used to take screenshots at specified time points.\nThis API is used to take sampled screenshots from videos.\nThis API is used to take sprite screenshots of videos.\nThis API is used to transcode to adaptive bitrate streaming.\nThis API is used to perform intelligent content moderation, such as pornography detection and sensitive information detection.\nThis API is used to perform intelligent content analysis such as tag, category, cover, frame tag, video splitting, highlight, opening and ending clips, and game tracking.\nThis API is used to perform intelligent content recognition such as human face, full text, text keyword, full speech, speech keyword, speech translation, and object recognition.\nThis API is used to perform media quality inspection, such as media format diagnosis, audio and video content detection (jitter, blur, low light, overexposure, screen glitch, noise, mosaic, QR code, and other issues), and no-reference scoring.\n11. Smart subtitle (such as ASR, hotword, and speech translation).\n\nThis API is used to perform intelligent erasure (watermark removal, subtitle removal, privacy protection).",
732
+ "document": "This API is used to initiate a processing task for video URLs or media files in Cloud Object Storage (COS). Features include:\n- Audio/Video transcoding (such as standard transcoding, top speed codec (TSC) transcoding, audio/video enhancement, visible watermark addition, and digital watermark addition).\n- Adaptive bitrate streaming conversion for audios/videos.\n- Video-to-GIF conversion.\n- Time point screenshot of videos.\n- Sampled screenshot of videos.\n- Image sprite of video screenshots.\n- Media quality inspection (such as media format diagnosis, audio/video content detection, and scoring without reference, where audio/video content detection mainly covers jitter, blur, low light, overexposure, screen glitches, noise, mosaic, QR code, and other issues).\n- Smart subtitle (such as subtitle generation and translation).\n- Smart erasing (such as watermark removal, subtitle removal, and privacy protection).\n- Smart content moderation (such as pornography detection and sensitive information detection).\n- Smart content analysis (such as tags, classifications, covers, frame tags, video splitting, highlights, opening and ending clips, and marking points for games).\n- Smart content recognition (such as human faces, full texts, text keywords, full speech, speech keywords, speech translation, and object recognition).",
726
733
  "input": "ProcessMediaRequest",
727
734
  "name": "Starts a media processing task",
728
735
  "output": "ProcessMediaResponse",
@@ -9146,7 +9153,7 @@
9146
9153
  },
9147
9154
  {
9148
9155
  "disabled": false,
9149
- "document": "Sharding type. available values: <li>ts-segment: HLS+ts segment</li> <li>ts-byterange: HLS+ts byte range</li> <li>mp4-segment: HLS+mp4 segment</li> <li>mp4-byterange: HLS+mp4 byte range</li> <li>ts-packed-audio: ts+packed audio</li> <li>mp4-packed-audio: mp4+packed audio</li> default value: ts-segment. \nNote: the shard format of the adaptive bitrate stream is based on this field.",
9156
+ "document": "Segment type. Valid values: <li>ts-segment: HLS+TS segment</li>; <li>ts-byterange: HLS+TS byte range</li>; <li>mp4-segment: HLS+MP4 segment</li>; <li>mp4-byterange: HLS/DASH+MP4 byte range</li>; <li>ts-packed-audio: TS+Packed Audio</li>; <li>mp4-packed-audio: MP4+Packed Audio</li>. The default value is ts-segment.\n \nNote: The segment format for the adaptive bitrate streaming is based on this field. The value of SegmentType can only be mp4-byterange in DASH format.",
9150
9157
  "example": "ts-segment",
9151
9158
  "member": "string",
9152
9159
  "name": "SegmentType",
@@ -11362,7 +11369,7 @@
11362
11369
  "members": [
11363
11370
  {
11364
11371
  "disabled": false,
11365
- "document": "Unique ID filter of video content analysis templates. Array length limit: 10.",
11372
+ "document": "Filter condition for the unique identifier of the video content analysis template. The array can contain up to 100 unique identifiers.",
11366
11373
  "example": "[30]",
11367
11374
  "member": "int64",
11368
11375
  "name": "Definitions",
@@ -11445,7 +11452,7 @@
11445
11452
  "members": [
11446
11453
  {
11447
11454
  "disabled": false,
11448
- "document": "Unique ID filter of video content recognition templates. Array length limit: 10.",
11455
+ "document": "Filter condition for the unique identifier of the video content recognition template. The array can contain up to 100 unique identifiers.",
11449
11456
  "example": "[30]",
11450
11457
  "member": "int64",
11451
11458
  "name": "Definitions",
@@ -13824,6 +13831,70 @@
13824
13831
  ],
13825
13832
  "type": "object"
13826
13833
  },
13834
+ "DescribeUsageDataRequest": {
13835
+ "document": "DescribeUsageData request structure.",
13836
+ "members": [
13837
+ {
13838
+ "disabled": false,
13839
+ "document": "Start date. Use the [ISO date and time format](https://www.tencentcloud.comom/document/product/266/11732?from_cn_redirect=1#iso-.E6.97.A5.E6.9C.9F.E6.A0.BC.E5.BC.8F).",
13840
+ "example": "2019-07-02T00:00:00+08:00",
13841
+ "member": "string",
13842
+ "name": "StartTime",
13843
+ "required": true,
13844
+ "type": "string"
13845
+ },
13846
+ {
13847
+ "disabled": false,
13848
+ "document": "End date, which should be greater than or equal to the start date. Use the [ISO date and time format](https://www.tencentcloud.comom/document/product/266/11732?from_cn_redirect=1#iso-.E6.97.A5.E6.9C.9F.E6.A0.BC.E5.BC.8F).",
13849
+ "example": "2019-07-03T00:00:00+08:00",
13850
+ "member": "string",
13851
+ "name": "EndTime",
13852
+ "required": true,
13853
+ "type": "string"
13854
+ },
13855
+ {
13856
+ "disabled": false,
13857
+ "document": "Queries the MPS task type. The transcoding task is queried by default.\n<li>Transcode: transcoding.</li>\n<li>Enhance: enhancement.</li>\n<li>AIAnalysis: intelligent analysis.</li>\n<li>AIRecognition: intelligent recognition.</li>\n<li>AIReview: content moderation.</li>\n<li>Snapshot: screenshot.</li>\n<li>AnimatedGraphics: conversion to GIF.</li>\n<li>AiQualityControl: media quality inspection.</li>\n<li>Evaluation: video assessment.</li>\n<li>ImageProcess: image processing.</li>\n<li>AddBlindWatermark: add a basic copyright digital watermark.</li>\n<li>AddNagraWatermark: add a NAGRA digital watermark.</li>\n<li>ExtractBlindWatermark: extract a basic copyright digital watermark.</li>",
13858
+ "example": "[\"Transcode\"]",
13859
+ "member": "string",
13860
+ "name": "Types",
13861
+ "required": false,
13862
+ "type": "list"
13863
+ },
13864
+ {
13865
+ "disabled": false,
13866
+ "document": "MPS park. ap-guangzhou park is returned by default.\n<li>ap-guangzhou: Guangzhou.</li>\n<li>ap-hongkong: Hong Kong (China).</li>\n<li>ap-taipei: Taipei (China).</li>\n<li>ap-singapore: Singapore.</li>\n<li>ap-mumbai: India.</li>\n<li>ap-jakarta: Jakarta.</li>\n<li>ap-seoul: Seoul.</li>\n<li>ap-bangkok: Thailand.</li>\n<li>ap-tokyo: Japan.</li>\n<li>na-siliconvalley: Silicon Valley.</li>\n<li>na-ashburn: Virginia.</li>\n<li>na-toronto: Toronto.</li>\n<li>sa-saopaulo: São Paulo.</li>\n<li>eu-frankfurt: Frankfurt.</li>\n<li>eu-moscow: Russia.</li>\n<li>aws: AWS.</li>",
13867
+ "example": "[\"ap-guangzhou\"]",
13868
+ "member": "string",
13869
+ "name": "ProcessRegions",
13870
+ "required": false,
13871
+ "type": "list"
13872
+ }
13873
+ ],
13874
+ "type": "object"
13875
+ },
13876
+ "DescribeUsageDataResponse": {
13877
+ "document": "DescribeUsageData response structure.",
13878
+ "members": [
13879
+ {
13880
+ "disabled": false,
13881
+ "document": "MPS statistical data overview, which displays an overview and detailed data of the queried task.",
13882
+ "example": "无",
13883
+ "member": "TaskStatData",
13884
+ "name": "Data",
13885
+ "output_required": true,
13886
+ "type": "list",
13887
+ "value_allowed_null": false
13888
+ },
13889
+ {
13890
+ "document": "The unique request ID, generated by the server, will be returned for every request (if the request fails to reach the server for other reasons, the request will not obtain a RequestId). RequestId is required for locating a problem.",
13891
+ "member": "string",
13892
+ "name": "RequestId",
13893
+ "type": "string"
13894
+ }
13895
+ ],
13896
+ "type": "object"
13897
+ },
13827
13898
  "DescribeWatermarkTemplatesRequest": {
13828
13899
  "document": "DescribeWatermarkTemplates request structure.",
13829
13900
  "members": [
@@ -18131,7 +18202,7 @@
18131
18202
  {
18132
18203
  "disabled": false,
18133
18204
  "document": "Confidence of the intelligent description, with a value range from 0 to 100.",
18134
- "example": "90",
18205
+ "example": "90.0",
18135
18206
  "member": "float",
18136
18207
  "name": "Confidence",
18137
18208
  "output_required": true,
@@ -18171,12 +18242,42 @@
18171
18242
  {
18172
18243
  "disabled": false,
18173
18244
  "document": "Address of the mind map of a summary task.\nNote: This field may return null, indicating that no valid value can be obtained.",
18174
- "example": "https://xxxx.cos.myqcloud.com",
18245
+ "example": "https://xxxx.cos.myqcloud.com/mindoutput/mindmap_4114033726.png",
18175
18246
  "member": "string",
18176
18247
  "name": "MindMapUrl",
18177
18248
  "output_required": false,
18178
18249
  "type": "string",
18179
18250
  "value_allowed_null": true
18251
+ },
18252
+ {
18253
+ "disabled": false,
18254
+ "document": "Path of the mind map of a summary task.",
18255
+ "example": "/mindoutput/mindmap_4114033726.png",
18256
+ "member": "string",
18257
+ "name": "MindMapPath",
18258
+ "output_required": false,
18259
+ "type": "string",
18260
+ "value_allowed_null": false
18261
+ },
18262
+ {
18263
+ "disabled": false,
18264
+ "document": "Subtitle file path of the video.",
18265
+ "example": "/subtitleoutput/subtitle_149957943.vtt",
18266
+ "member": "string",
18267
+ "name": "SubtitlePath",
18268
+ "output_required": false,
18269
+ "type": "string",
18270
+ "value_allowed_null": false
18271
+ },
18272
+ {
18273
+ "disabled": false,
18274
+ "document": "Storage location of the summary file.",
18275
+ "example": "无",
18276
+ "member": "TaskOutputStorage",
18277
+ "name": "OutputStorage",
18278
+ "output_required": false,
18279
+ "type": "object",
18280
+ "value_allowed_null": false
18180
18281
  }
18181
18282
  ],
18182
18283
  "usage": "out"
@@ -20350,7 +20451,7 @@
20350
20451
  },
20351
20452
  {
20352
20453
  "disabled": false,
20353
- "document": "HLS segment type. Valid values: <li>ts-segment: HLS+TS segment.</li> <li>ts-byterange: HLS+TS byte range.</li> <li>mp4-segment: HLS+MP4 segment.</li> <li>mp4-byterange: HLS+MP4 byte range.</li> <li>ts-packed-audio: TS+Packed audio.</li> <li>mp4-packed-audio: MP4+Packed audio.</li> Default value: ts-segment.\nNote: The HLS segment format for adaptive bitrate streaming is based on this field.",
20454
+ "document": "Segment type. Valid values: <li>ts-segment: HLS+TS segment</li>; <li>ts-byterange: HLS+TS byte range</li>; <li>mp4-segment: HLS+MP4 segment</li>; <li>mp4-byterange: HLS/DASH+MP4 byte range</li>; <li>ts-packed-audio: TS+Packed Audio</li>; <li>mp4-packed-audio: MP4+Packed Audio</li>. The default value is ts-segment.\nNote: The HLS segment format for the adaptive bitrate streaming is based on this field. The value of SegmentType can only be mp4-byterange in DASH format.",
20354
20455
  "example": "ts-segment",
20355
20456
  "member": "string",
20356
20457
  "name": "SegmentType",
@@ -23137,7 +23238,7 @@
23137
23238
  },
23138
23239
  {
23139
23240
  "disabled": false,
23140
- "document": "Unique identifier of the image processing template.",
23241
+ "document": "Unique identifier of the image processing template.\nThe image template feature is in beta testing. If you want to use it, submit a ticket for application.",
23141
23242
  "example": "20001",
23142
23243
  "member": "uint64",
23143
23244
  "name": "Definition",
@@ -27401,6 +27502,32 @@
27401
27502
  ],
27402
27503
  "usage": "out"
27403
27504
  },
27505
+ "SpecificationDataItem": {
27506
+ "document": "Statistical data for the task of the specified specification.",
27507
+ "members": [
27508
+ {
27509
+ "disabled": false,
27510
+ "document": "Task specification.",
27511
+ "example": "",
27512
+ "member": "string",
27513
+ "name": "Specification",
27514
+ "required": true,
27515
+ "type": "string",
27516
+ "value_allowed_null": false
27517
+ },
27518
+ {
27519
+ "disabled": false,
27520
+ "document": "Statistical data.",
27521
+ "example": "",
27522
+ "member": "TaskStatDataItem",
27523
+ "name": "Data",
27524
+ "required": true,
27525
+ "type": "list",
27526
+ "value_allowed_null": false
27527
+ }
27528
+ ],
27529
+ "usage": "out"
27530
+ },
27404
27531
  "SpekeDrm": {
27405
27532
  "document": "FairPlay, WideVine, PlayReady, and other DRM encryption technologies.",
27406
27533
  "members": [
@@ -28159,6 +28286,78 @@
28159
28286
  ],
28160
28287
  "usage": "out"
28161
28288
  },
28289
+ "TaskStatData": {
28290
+ "document": "Statistical data of the task.",
28291
+ "members": [
28292
+ {
28293
+ "disabled": false,
28294
+ "document": "Task type.\n<li>Transcode: transcoding.</li>\n<li>Enhance: enhancement.</li>\n<li>AIAnalysis: intelligent analysis.</li>\n<li>AIRecognition: intelligent recognition.</li>\n<li>AIReview: content moderation.</li>\n<li>Snapshot: screenshot.</li>\n<li>AnimatedGraphics: conversion to GIF.</li>\n<li>ImageProcess: image processing.</li>",
28295
+ "example": "Transcode",
28296
+ "member": "string",
28297
+ "name": "TaskType",
28298
+ "output_required": true,
28299
+ "type": "string",
28300
+ "value_allowed_null": false
28301
+ },
28302
+ {
28303
+ "disabled": false,
28304
+ "document": "Statistical data overview of the number of tasks.\n<li>Transcode: The unit of usage is seconds.</li>\n<li>Enhance: The unit of usage is seconds.</li>\n<li>AIAnalysis: The unit of usage is seconds.</li>\n<li>AIRecognition: The unit of usage is seconds.</li>\n<li>AIReview: The unit of usage is seconds.</li>\n<li>Snapshot: The unit of usage is images.</li>\n<li>AnimatedGraphics: The unit of usage is seconds.</li>\n<li>ImageProcess: The unit of usage is images.</li>.",
28305
+ "example": "无",
28306
+ "member": "TaskStatDataItem",
28307
+ "name": "Summary",
28308
+ "output_required": true,
28309
+ "type": "list",
28310
+ "value_allowed_null": false
28311
+ },
28312
+ {
28313
+ "disabled": false,
28314
+ "document": "Statistical data details for tasks of various specifications.\n1. Transcoding specification:\n<li>Audio: audio-only.</li>\n<li>Remuxing: conversion to muxing.</li>\n<li>Other transcoding specifications: {TYPE}.{CODEC}.{SPECIFICATION}.</li> Specifically, valid values for TYPE:\n    Standard: standard transcoding.\n    TESHD-10: TSC transcoding for videos.\n    TESHD-20: TSC transcoding for audios.\n    TESHD-30: TSC transcoding for audios/videos.\n    TESHD-30-SDK: duration-based billing of TSC transcoding SDK for audios/videos.\n    TESHD-30-SDKCores: core number-based billing of TSC transcoding SDK for audios/videos.\n    Edit: video editing.\n  Specifically, valid values for CODEC:\n    H264: H. 264 encoding.\n    H265: H.265 encoding.\n    AV1: AV1 encoding.\n    MV-HEVC: MV-HEVC encoding.\n  Specifically, valid values for SPECIFICATION:\n    SD: standard definition.\n    HD: high definition.\n    FHD: full HD.\n    2K: 2K.\n    4K: 4K.\nFor example, TESHD-10.H265.HD indicates TSC transcoding using the H.265 encoding method.\n2. Enhancement specification: video enhancement format: {TYPE}.{CODEC}.{SPECIFICATION}.{FPS}, where valid values for CODEC and SPECIFICATION follow the transcoding descriptions mentioned above, and FPS is valid only when the atomic enhancement type is used; audio enhancement format: {TYPE}.\nValid values for enhancement TYPE:\n<li>Enhance: common enhancement type, which might be any atomic enhancement type.</li>\n<li>Atomic enhancement type</li>. Valid values for video atomic enhancement type:\n    Sdr2hdr: SDR2HDR.\n SuperResolution: super resolution.\n    InsertFrame: frame interpolation.\n    ComprehensiveEnhancement: comprehensive enhancement.\n    NoiseReduction: video noise reduction.\n    ColorEnhancement: color enhancement.\n    RemoveScratches: scratch removal.\n    Deburr: artifacts removal.\n    DetailEnhancement: detail enhancement.\n    LightEnhancement: low-light enhancement.\n    FaceEnhancement: face enhancement.\n  Valid value for audio atomic enhancement type.\n    AudioNoiseReduction\n    VolumeBalance\n    AudioBeautify\n    AudioSeparation\n\n3. Screenshot specification:\n<li>ImageSprite: sprite.</li>\n<li>SampleSnapshot: sampled screenshot.</li>\n<li>SnapshotByTime: time point screenshot.</li>\n4. Image processing specification: {TYPE}.{CODEC}.{SPECIFICATION}.\n<li> ImageCompression: image encoding.</li>\n<li> ImageSuperResolution: image super resolution.</li>\n<li>EnhanceImageColor: image color enhancement.</li>\n5. Intelligent analysis specification:\n<li>AIAnalysis: major category for analysis.</li>\n<li>VideoTag: video tag.</li>\n<li>VideoClassification: video category.</li>\n<li>SmartCover: smart cover.</li>\n<li>FrameLabel: frame tag.</li>\n<li>VideoSplit: video splitting.</li>\n<li>Highlights: highlights.</li>\n<li>OpeningAndEnding: opening and ending clips.</li>\n6. Intelligent recognition specification:\n<li>AIRecognition: major category for recognition without splitting.</li>\n<li>FaceRecognition: face recognition.</li>\n<li>TextRecognition: optical character recognition.</li>\n<li>ObjectRecognition: object recognition.</li>\n<li>VoiceRecognition: automatic speech recognition.</li>\n<li>VoiceTranslation: speech translation.</li>\n7. There are no segmentation specifications for content moderation and conversion to GIF.",
28315
+ "example": "无",
28316
+ "member": "SpecificationDataItem",
28317
+ "name": "Details",
28318
+ "output_required": true,
28319
+ "type": "list",
28320
+ "value_allowed_null": false
28321
+ }
28322
+ ],
28323
+ "usage": "out"
28324
+ },
28325
+ "TaskStatDataItem": {
28326
+ "document": "Statistical data of the task, including the number of tasks and usage.",
28327
+ "members": [
28328
+ {
28329
+ "disabled": false,
28330
+ "document": "Start time of the time interval where the data resides. Use the [ISO date and time format](https://www.tencentcloud.comom/document/product/266/11732?from_cn_redirect=1#iso-.E6.97.A5.E6.9C.9F.E6.A0.BC.E5.BC.8F). For example, when the time granularity is day, 2018-12-01T00:00:00+08:00 indicates the interval from December 1, 2018 (inclusive) to December 2, 2018 (exclusive).",
28331
+ "example": "",
28332
+ "member": "string",
28333
+ "name": "Time",
28334
+ "required": true,
28335
+ "type": "string",
28336
+ "value_allowed_null": false
28337
+ },
28338
+ {
28339
+ "disabled": false,
28340
+ "document": "Number of tasks.",
28341
+ "example": "",
28342
+ "member": "int64",
28343
+ "name": "Count",
28344
+ "required": true,
28345
+ "type": "int",
28346
+ "value_allowed_null": false
28347
+ },
28348
+ {
28349
+ "disabled": false,
28350
+ "document": "Task usage.",
28351
+ "example": "",
28352
+ "member": "int64",
28353
+ "name": "Usage",
28354
+ "required": true,
28355
+ "type": "int",
28356
+ "value_allowed_null": false
28357
+ }
28358
+ ],
28359
+ "usage": "out"
28360
+ },
28162
28361
  "TerrorismConfigureInfo": {
28163
28362
  "document": "The parameters for detecting sensitive information.",
28164
28363
  "members": [
@@ -10,35 +10,41 @@
10
10
  ],
11
11
  "CreateAIAnalysisTemplate": [
12
12
  {
13
- "document": "This example shows you how to create a custom video analysis template to perform all intelligent analysis tasks.",
14
- "input": "https://mps.tencentcloudapi.com/?Action=CreateAIAnalysisTemplate\n&Name=Intelligent analysis template\n&Comment=Template 3\n&ClassificationConfigure.Switch=ON\n&TagConfigure.Switch=ON\n&CoverConfigure.Switch=NO\n&FrameTagConfigure.Switch=ON\n&<Common request parameters>",
13
+ "document": "This example shows you how to create a custom video content analysis template with all intelligent analysis tasks enabled.",
14
+ "input": "https://mps.tencentcloudapi.com/?Action=CreateAIAnalysisTemplate\n&Name=Intelligent analysis template.\n&Comment=Template 3.\n&ClassificationConfigure.Switch=ON\n&TagConfigure.Switch=ON\n&CoverConfigure.Switch=NO\n&FrameTagConfigure.Switch=ON\n&<Common request parameters>",
15
15
  "output": "{\n \"Response\": {\n \"Definition\": 33,\n \"RequestId\": \"12ae8d8e-dce3-4151-9d4b-5594145287e1\"\n }\n}",
16
- "title": "Creating a template to perform all content analysis tasks"
16
+ "title": "Creating a Template with All Content Analysis Tasks Enabled"
17
17
  },
18
18
  {
19
- "document": "This example shows you how to create a custom video analysis template to perform intelligent categorization.",
20
- "input": "https://mps.tencentcloudapi.com/?Action=CreateAIAnalysisTemplate\n&Name=Intelligent analysis template\n&Comment=Template 1\n&ClassificationConfigure.Switch=ON\n&<Common request parameters>",
19
+ "document": "This example shows you how to create a custom video content analysis template with the intelligent classification task enabled.",
20
+ "input": "https://mps.tencentcloudapi.com/?Action=CreateAIAnalysisTemplate\n&Name=Intelligent analysis template.\n&Comment=Template 1.\n&ClassificationConfigure.Switch=ON\n&<Common request parameters>",
21
21
  "output": "{\n \"Response\": {\n \"Definition\": 30,\n \"RequestId\": \"12ae8d8e-dce3-4151-9d4b-5594145287e1\"\n }\n}",
22
- "title": "Creating a template for a video analysis task"
22
+ "title": "Creating a Template to Specify an Analysis Task"
23
23
  },
24
24
  {
25
- "document": "This example shows you how to create a custom video analysis template to perform intelligent categorization and labeling.",
26
- "input": "https://mps.tencentcloudapi.com/?Action=CreateAIAnalysisTemplate\n&Name=Intelligent analysis template\n&Comment=Template 2\n&ClassificationConfigure.Switch=ON\n&TagConfigure.Switch=ON\n&<Common request parameters>",
25
+ "document": "This example shows you how to create a custom video content analysis template with the intelligent classification and intelligent tag tasks enabled.",
26
+ "input": "https://mps.tencentcloudapi.com/?Action=CreateAIAnalysisTemplate\n&Name=Intelligent analysis template.\n&Comment=Template 2.\n&ClassificationConfigure.Switch=ON\n&TagConfigure.Switch=ON\n&<Common request parameters>",
27
27
  "output": "{\n \"Response\": {\n \"Definition\": 31,\n \"RequestId\": \"12ae8d8e-dce3-4151-9d4b-5594145287e1\"\n }\n}",
28
- "title": "Creating a template for multiple analysis tasks"
28
+ "title": "Creating a Template to Specify Multiple Analysis Tasks"
29
29
  }
30
30
  ],
31
31
  "CreateAIRecognitionTemplate": [
32
32
  {
33
- "document": "This example shows you how to create a custom video recognition template with face recognition enabled. The default face library is used, and the face recognition filter score is 90.",
34
- "input": "POST / HTTP/1.1\nHost: mps.tencentcloudapi.com\nContent-Type: application/json\nX-TC-Action: CreateAIRecognitionTemplate\n<Common request parameters>\n\n\n\n{\n \"Comment\": \"Template 2\",\n \"FaceConfigure\": {\n \"Switch\": \"ON\",\n \"Score\": \"90\",\n \"FaceLibrary\": \"Default\"\n },\n \"Name\": \"Intelligent Identification Template\"\n}",
33
+ "document": "This example shows you how to create a custom video content recognition template with face recognition tasks enabled. The default face library is used, and the face recognition filtering score is 90.",
34
+ "input": "POST / HTTP/1.1\nHost: mps.tencentcloudapi.com\nContent-Type: application/json\nX-TC-Action: CreateAIRecognitionTemplate\n<Common request parameters>\n\n{\n \"Comment\": \"Template 2.\",\n \"FaceConfigure\": {\n \"Switch\": \"ON\",\n \"Score\": \"90\",\n \"FaceLibrary\": \"Default\"\n },\n \"Name\": \"Intelligent recognition template.\"\n}",
35
+ "output": "{\n \"Response\": {\n \"Definition\": 31,\n \"RequestId\": \"12ae8d8e-dce3-4151-9d4b-5594145287e1\"\n }\n}",
36
+ "title": "Creating a Template for Multiple Video Content Recognition Tasks"
37
+ },
38
+ {
39
+ "document": "This example shows you how to create a custom video content recognition template with face recognition tasks enabled. The default face library and user-defined face libraries are used, and the face recognition filtering score is 90.",
40
+ "input": "POST / HTTP/1.1\nHost: mps.tencentcloudapi.com\nContent-Type: application/json\nX-TC-Action: CreateAIRecognitionTemplate\n<Common request parameters>\n\n{\n \"Comment\": \"Template 3\",\n \"FaceConfigure\": {\n \"Switch\": \"ON\",\n \"FaceLibrary\": \"All\"\n },\n \"Name\": \"Intelligent recognition template.\"\n}",
35
41
  "output": "{\n \"Response\": {\n \"Definition\": 32,\n \"RequestId\": \"12ae8d8e-dce3-4151-9d4b-5594145287e1\"\n }\n}",
36
- "title": "Creating a Template for Multiple Video Recognition Tasks"
42
+ "title": "Creating a Template for Multiple Recognition Tasks, with the Frame Extraction Interval Specified"
37
43
  },
38
44
  {
39
45
  "document": "This example shows you how to create a speech translation template.",
40
- "input": "POST / HTTP/1.1\nHost: mps.tencentcloudapi.com\nContent-Type: application/json\nX-TC-Action: CreateAIRecognitionTemplate\n<Common request parameters>\n\n\n\n{\n \"Name\": \"test\"\n}",
41
- "output": "{\n \"Response\": {\n \"Definition\": 31,\n \"RequestId\": \"12ae8d8e-dce3-4151-9d4b-5594145287e1\"\n }\n}",
46
+ "input": "POST / HTTP/1.1\nHost: mps.tencentcloudapi.com\nContent-Type: application/json\nX-TC-Action: CreateAIRecognitionTemplate\n<Common request parameters>\n\n{\n \"Name\": \"recog\"\n}",
47
+ "output": "{\n \"Response\": {\n \"Definition\": 278654,\n \"RequestId\": \"62cca75c-7dd3-4819-ad9d-13b48a4b4018\"\n }\n}",
42
48
  "title": "Creating a Speech Translation Template"
43
49
  }
44
50
  ],
@@ -422,8 +428,8 @@
422
428
  {
423
429
  "document": " ",
424
430
  "input": "https://mps.tencentcloudapi.com/?Action=DescribeAIRecognitionTemplates\n&Definitions.0=30\n&<Common request parameters>",
425
- "output": "{\n \"Response\": {\n \"TotalCount\": 1,\n \"AIRecognitionTemplateSet\":[\n {\n \"Definition\": 30,\n \"Name\": \"Template 1\",\n \"Type\": \"Preset\",\n \"Comment\": \"Intelligent content recognition template\",\n \"FaceConfigure\":{\n \"Switch\": \"ON\",\n \"Score\": 0.0,\n \"FaceLibrary\": \"All\"\n \"DefaultLibraryLabelSet\": [\n \"xx\"\n ],\n \"UserDefineLibraryLabelSet\": [\n \"xx\"\n ]\n },\n \"OcrFullTextConfigure\": {\n \"Switch\": \"ON\"\n },\n \"OcrWordsConfigure\": {\n \"Switch\": \"OFF\",\n \"LabelSet\": [\n \"xx\"\n ]\n },\n \"AsrFullTextConfigure\": {\n \"Switch\": \"ON\",\n \"SubtitleFormat\": \"xx\"\n },\n \"AsrWordsConfigure\": {\n \"Switch\": \"OFF\",\n \"LabelSet\": [\n \"xx\"\n ]\n },\n \"CreateTime\": \"2019-01-01T12:00:00Z\",\n \"UpdateTime\": \"2019-01-01T16:00:00Z\"\n }\n ],\n \"RequestId\": \"19ae8d8e-dce3-4151-9d4b-5594384987a9\"\n }\n}",
426
- "title": "Obtaining a Video Recognition Template with ID 30"
431
+ "output": "{\n \"Response\": {\n \"TotalCount\": 1,\n \"AIRecognitionTemplateSet\": [\n {\n \"Definition\": 30,\n \"Name\": \"Presetting Template30\",\n \"Comment\": \"Default template with all recognition switches enabled. Use only user-defined libraries without filter tags.\",\n \"Type\": \"Preset\",\n \"FaceConfigure\": {\n \"Switch\": \"ON\",\n \"Score\": 95,\n \"DefaultLibraryLabelSet\": [],\n \"UserDefineLibraryLabelSet\": [],\n \"FaceLibrary\": \"UserDefine\"\n },\n \"OcrFullTextConfigure\": {\n \"Switch\": \"ON\"\n },\n \"OcrWordsConfigure\": {\n \"Switch\": \"ON\",\n \"LabelSet\": []\n },\n \"AsrFullTextConfigure\": {\n \"Switch\": \"ON\",\n \"SubtitleFormat\": \"vtt\"\n },\n \"AsrWordsConfigure\": {\n \"Switch\": \"ON\",\n \"LabelSet\": []\n },\n \"TranslateConfigure\": {\n \"Switch\": \"OFF\",\n \"SourceLanguage\": \"en\",\n \"DestinationLanguage\": \"zh\",\n \"SubtitleFormat\": \"vtt\"\n },\n \"CreateTime\": \"2019-06-13T11:07:07+08:00\",\n \"UpdateTime\": \"2020-01-06T08:21:46+08:00\"\n }\n ],\n \"RequestId\": \"9a12345af0-4a9c-ae02-704f3d5a8040\"\n }\n}",
432
+ "title": "Obtaining a Video Content Recognition Template with the ID of 30"
427
433
  }
428
434
  ],
429
435
  "DescribeAdaptiveDynamicStreamingTemplates": [
@@ -634,6 +640,14 @@
634
640
  "title": "Obtaining Transcoding Templates"
635
641
  }
636
642
  ],
643
+ "DescribeUsageData": [
644
+ {
645
+ "document": "This example shows you how to query usage.",
646
+ "input": "POST / HTTP/1.1\nHost: mps.tencentcloudapi.com\nContent-Type: application/json\nX-TC-Action: DescribeUsageData\n<Common request parameters>\n\n{\n \"EndTime\": \"2019-07-03T00:00:00+08:00\",\n \"StartTime\": \"2019-07-02T00:00:00+08:00\"\n}",
647
+ "output": "{\n \"Response\": {\n \"Data\": [\n {\n \"TaskType\": \"Transcode\",\n \"Summary\": [\n {\n \"Time\": \"2019-07-02T00:00:00+08:00\",\n \"Count\": 22,\n \"Usage\": 2200\n },\n {\n \"Time\": \"2019-07-03T00:00:00+08:00\",\n \"Count\": 22,\n \"Usage\": 2200\n }\n ],\n \"Details\": [\n {\n \"Specification\": \"Audio\",\n \"Data\": [\n {\n \"Time\": \"2019-07-02T00:00:00+08:00\",\n \"Count\": 1,\n \"Usage\": 10\n },\n {\n \"Time\": \"2019-07-03T00:00:00+08:00\",\n \"Count\": 1,\n \"Usage\": 10\n }\n ]\n },\n {\n \"Specification\": \"Standard.H265.4K\",\n \"Data\": [\n {\n \"Time\": \"2019-07-02T00:00:00+08:00\",\n \"Count\": 1,\n \"Usage\": 10\n },\n {\n \"Time\": \"2019-07-03T00:00:00+08:00\",\n \"Count\": 1,\n \"Usage\": 10\n }\n ]\n },\n {\n \"Specification\": \"TESHD-10.H265.4K\",\n \"Data\": [\n {\n \"Time\": \"2019-07-02T00:00:00+08:00\",\n \"Count\": 1,\n \"Usage\": 10\n },\n {\n \"Time\": \"2019-07-03T00:00:00+08:00\",\n \"Count\": 1,\n \"Usage\": 10\n }\n ]\n }\n ]\n }\n ],\n \"RequestId\": \"requestId\"\n }\n}",
648
+ "title": "Querying MPS Usage"
649
+ }
650
+ ],
637
651
  "DescribeWatermarkTemplates": [
638
652
  {
639
653
  "document": " ",
@@ -754,22 +768,22 @@
754
768
  ],
755
769
  "ModifyAIAnalysisTemplate": [
756
770
  {
757
- "document": "This example shows you how to disable intelligent thumbnail generation in a custom video analysis template.",
771
+ "document": "This example shows you how to modify a custom video content analysis template to disable the smart cover task.",
758
772
  "input": "https://mps.tencentcloudapi.com/?Action=ModifyAIAnalysisTemplate\n&Definition=30\n&CoverConfigure.Switch=OFF\n&<Common request parameters>",
759
773
  "output": "{\n \"Response\": {\n \"RequestId\": \"12ae8d8e-dce3-4151-9d4b-5594145287e1\"\n }\n}",
760
- "title": "Disabling Intelligent Thumbnail Generation"
774
+ "title": "Disabling a Smart Cover Task"
761
775
  },
762
776
  {
763
- "document": "This example shows you how to enable intelligent tagging and disable intelligent thumbnail generation in a custom video analysis template.",
777
+ "document": "This example shows you how to modify a custom video content analysis template to enable the smart tag task and disable the smart cover task.",
764
778
  "input": "https://mps.tencentcloudapi.com/?Action=ModifyAIAnalysisTemplate\n&Definition=30\n&TagConfigure.Switch=ON\n&CoverConfigure.Switch=OFF\n&<Common request parameters>",
765
779
  "output": "{\n \"Response\": {\n \"RequestId\": \"12ae8d8e-dce3-4151-9d4b-5594145287e1\"\n }\n}",
766
- "title": "Enabling Intelligent Tagging and Disabling Intelligent Thumbnail Generation"
780
+ "title": "Enabling and Disabling a Content Analysis Task Simultaneously"
767
781
  },
768
782
  {
769
- "document": "This example shows you how to enable intelligent thumbnail generation in a custom video analysis template.",
783
+ "document": "This example shows you how to modify a custom video content analysis template to enable the smart cover task.",
770
784
  "input": "https://mps.tencentcloudapi.com/?Action=ModifyAIAnalysisTemplate\n&Definition=30\n&CoverConfigure.Switch=ON\n&<Common request parameters>",
771
785
  "output": "{\n \"Response\": {\n \"RequestId\": \"12ae8d8e-dce3-4151-9d4b-5594145287e1\"\n }\n}",
772
- "title": "Enabling Intelligent Thumbnail Generation"
786
+ "title": "Enabling a Smart Cover Task"
773
787
  }
774
788
  ],
775
789
  "ModifyAIRecognitionTemplate": [
@@ -11899,6 +11899,61 @@ def doDeleteZone(args, parsed_globals):
11899
11899
  FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
11900
11900
 
11901
11901
 
11902
+ def doDescribeTimingL7OriginPullData(args, parsed_globals):
11903
+ g_param = parse_global_arg(parsed_globals)
11904
+
11905
+ if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
11906
+ cred = credential.CVMRoleCredential()
11907
+ elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
11908
+ cred = credential.STSAssumeRoleCredential(
11909
+ g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
11910
+ g_param[OptionsDefine.RoleSessionName.replace('-', '_')], endpoint=g_param["sts_cred_endpoint"]
11911
+ )
11912
+ elif os.getenv(OptionsDefine.ENV_TKE_REGION) \
11913
+ and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) \
11914
+ and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) \
11915
+ and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
11916
+ cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
11917
+ else:
11918
+ cred = credential.Credential(
11919
+ g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
11920
+ )
11921
+ http_profile = HttpProfile(
11922
+ reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
11923
+ reqMethod="POST",
11924
+ endpoint=g_param[OptionsDefine.Endpoint],
11925
+ proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
11926
+ )
11927
+ profile = ClientProfile(httpProfile=http_profile, signMethod="TC3-HMAC-SHA256")
11928
+ if g_param[OptionsDefine.Language]:
11929
+ profile.language = g_param[OptionsDefine.Language]
11930
+ mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
11931
+ client = mod.TeoClient(cred, g_param[OptionsDefine.Region], profile)
11932
+ client._sdkVersion += ("_CLI_" + __version__)
11933
+ models = MODELS_MAP[g_param[OptionsDefine.Version]]
11934
+ model = models.DescribeTimingL7OriginPullDataRequest()
11935
+ model.from_json_string(json.dumps(args))
11936
+ start_time = time.time()
11937
+ while True:
11938
+ rsp = client.DescribeTimingL7OriginPullData(model)
11939
+ result = rsp.to_json_string()
11940
+ try:
11941
+ json_obj = json.loads(result)
11942
+ except TypeError as e:
11943
+ json_obj = json.loads(result.decode('utf-8')) # python3.3
11944
+ if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
11945
+ break
11946
+ cur_time = time.time()
11947
+ if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
11948
+ raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
11949
+ (g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
11950
+ search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
11951
+ else:
11952
+ print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
11953
+ time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
11954
+ FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
11955
+
11956
+
11902
11957
  def doApplyFreeCertificate(args, parsed_globals):
11903
11958
  g_param = parse_global_arg(parsed_globals)
11904
11959
 
@@ -13613,6 +13668,7 @@ ACTION_MAP = {
13613
13668
  "DescribeFunctions": doDescribeFunctions,
13614
13669
  "ReclaimZone": doReclaimZone,
13615
13670
  "DeleteZone": doDeleteZone,
13671
+ "DescribeTimingL7OriginPullData": doDescribeTimingL7OriginPullData,
13616
13672
  "ApplyFreeCertificate": doApplyFreeCertificate,
13617
13673
  "DescribeDDosAttackEvent": doDescribeDDosAttackEvent,
13618
13674
  "DescribeEnvironments": doDescribeEnvironments,