alibabacloud-aimiaobi20230801 1.37.1__py3-none-any.whl → 1.37.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__ = '1.37.1'
1
+ __version__ = '1.37.3'
@@ -7119,17 +7119,21 @@ class Client(OpenApiClient):
7119
7119
 
7120
7120
  def get_dataset_document_with_options(
7121
7121
  self,
7122
- request: ai_miao_bi_20230801_models.GetDatasetDocumentRequest,
7122
+ tmp_req: ai_miao_bi_20230801_models.GetDatasetDocumentRequest,
7123
7123
  runtime: util_models.RuntimeOptions,
7124
7124
  ) -> ai_miao_bi_20230801_models.GetDatasetDocumentResponse:
7125
7125
  """
7126
7126
  @summary 获取数据集文档
7127
7127
 
7128
- @param request: GetDatasetDocumentRequest
7128
+ @param tmp_req: GetDatasetDocumentRequest
7129
7129
  @param runtime: runtime options for this request RuntimeOptions
7130
7130
  @return: GetDatasetDocumentResponse
7131
7131
  """
7132
- UtilClient.validate_model(request)
7132
+ UtilClient.validate_model(tmp_req)
7133
+ request = ai_miao_bi_20230801_models.GetDatasetDocumentShrinkRequest()
7134
+ OpenApiUtilClient.convert(tmp_req, request)
7135
+ if not UtilClient.is_unset(tmp_req.include_fields):
7136
+ request.include_fields_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.include_fields, 'IncludeFields', 'json')
7133
7137
  body = {}
7134
7138
  if not UtilClient.is_unset(request.dataset_id):
7135
7139
  body['DatasetId'] = request.dataset_id
@@ -7139,6 +7143,8 @@ class Client(OpenApiClient):
7139
7143
  body['DocId'] = request.doc_id
7140
7144
  if not UtilClient.is_unset(request.doc_uuid):
7141
7145
  body['DocUuid'] = request.doc_uuid
7146
+ if not UtilClient.is_unset(request.include_fields_shrink):
7147
+ body['IncludeFields'] = request.include_fields_shrink
7142
7148
  if not UtilClient.is_unset(request.workspace_id):
7143
7149
  body['WorkspaceId'] = request.workspace_id
7144
7150
  req = open_api_models.OpenApiRequest(
@@ -7162,17 +7168,21 @@ class Client(OpenApiClient):
7162
7168
 
7163
7169
  async def get_dataset_document_with_options_async(
7164
7170
  self,
7165
- request: ai_miao_bi_20230801_models.GetDatasetDocumentRequest,
7171
+ tmp_req: ai_miao_bi_20230801_models.GetDatasetDocumentRequest,
7166
7172
  runtime: util_models.RuntimeOptions,
7167
7173
  ) -> ai_miao_bi_20230801_models.GetDatasetDocumentResponse:
7168
7174
  """
7169
7175
  @summary 获取数据集文档
7170
7176
 
7171
- @param request: GetDatasetDocumentRequest
7177
+ @param tmp_req: GetDatasetDocumentRequest
7172
7178
  @param runtime: runtime options for this request RuntimeOptions
7173
7179
  @return: GetDatasetDocumentResponse
7174
7180
  """
7175
- UtilClient.validate_model(request)
7181
+ UtilClient.validate_model(tmp_req)
7182
+ request = ai_miao_bi_20230801_models.GetDatasetDocumentShrinkRequest()
7183
+ OpenApiUtilClient.convert(tmp_req, request)
7184
+ if not UtilClient.is_unset(tmp_req.include_fields):
7185
+ request.include_fields_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.include_fields, 'IncludeFields', 'json')
7176
7186
  body = {}
7177
7187
  if not UtilClient.is_unset(request.dataset_id):
7178
7188
  body['DatasetId'] = request.dataset_id
@@ -7182,6 +7192,8 @@ class Client(OpenApiClient):
7182
7192
  body['DocId'] = request.doc_id
7183
7193
  if not UtilClient.is_unset(request.doc_uuid):
7184
7194
  body['DocUuid'] = request.doc_uuid
7195
+ if not UtilClient.is_unset(request.include_fields_shrink):
7196
+ body['IncludeFields'] = request.include_fields_shrink
7185
7197
  if not UtilClient.is_unset(request.workspace_id):
7186
7198
  body['WorkspaceId'] = request.workspace_id
7187
7199
  req = open_api_models.OpenApiRequest(
@@ -11390,36 +11402,36 @@ class Client(OpenApiClient):
11390
11402
  @return: ListDocumentRetrieveResponse
11391
11403
  """
11392
11404
  UtilClient.validate_model(request)
11393
- query = {}
11405
+ body = {}
11394
11406
  if not UtilClient.is_unset(request.content_type):
11395
- query['ContentType'] = request.content_type
11407
+ body['ContentType'] = request.content_type
11396
11408
  if not UtilClient.is_unset(request.element_scope):
11397
- query['ElementScope'] = request.element_scope
11409
+ body['ElementScope'] = request.element_scope
11398
11410
  if not UtilClient.is_unset(request.end_date):
11399
- query['EndDate'] = request.end_date
11400
- if not UtilClient.is_unset(request.office):
11401
- query['Office'] = request.office
11402
- if not UtilClient.is_unset(request.region):
11403
- query['Region'] = request.region
11404
- if not UtilClient.is_unset(request.source):
11405
- query['Source'] = request.source
11406
- if not UtilClient.is_unset(request.start_date):
11407
- query['StartDate'] = request.start_date
11408
- if not UtilClient.is_unset(request.sub_content_type):
11409
- query['SubContentType'] = request.sub_content_type
11410
- if not UtilClient.is_unset(request.word_size):
11411
- query['WordSize'] = request.word_size
11412
- body = {}
11411
+ body['EndDate'] = request.end_date
11413
11412
  if not UtilClient.is_unset(request.max_results):
11414
11413
  body['MaxResults'] = request.max_results
11415
11414
  if not UtilClient.is_unset(request.next_token):
11416
11415
  body['NextToken'] = request.next_token
11416
+ if not UtilClient.is_unset(request.office):
11417
+ body['Office'] = request.office
11417
11418
  if not UtilClient.is_unset(request.query):
11418
11419
  body['Query'] = request.query
11420
+ if not UtilClient.is_unset(request.region):
11421
+ body['Region'] = request.region
11422
+ if not UtilClient.is_unset(request.source):
11423
+ body['Source'] = request.source
11424
+ if not UtilClient.is_unset(request.start_date):
11425
+ body['StartDate'] = request.start_date
11426
+ if not UtilClient.is_unset(request.sub_content_type):
11427
+ body['SubContentType'] = request.sub_content_type
11428
+ if not UtilClient.is_unset(request.subject_classify):
11429
+ body['SubjectClassify'] = request.subject_classify
11430
+ if not UtilClient.is_unset(request.word_size):
11431
+ body['WordSize'] = request.word_size
11419
11432
  if not UtilClient.is_unset(request.workspace_id):
11420
11433
  body['WorkspaceId'] = request.workspace_id
11421
11434
  req = open_api_models.OpenApiRequest(
11422
- query=OpenApiUtilClient.query(query),
11423
11435
  body=OpenApiUtilClient.parse_to_map(body)
11424
11436
  )
11425
11437
  params = open_api_models.Params(
@@ -11451,36 +11463,36 @@ class Client(OpenApiClient):
11451
11463
  @return: ListDocumentRetrieveResponse
11452
11464
  """
11453
11465
  UtilClient.validate_model(request)
11454
- query = {}
11466
+ body = {}
11455
11467
  if not UtilClient.is_unset(request.content_type):
11456
- query['ContentType'] = request.content_type
11468
+ body['ContentType'] = request.content_type
11457
11469
  if not UtilClient.is_unset(request.element_scope):
11458
- query['ElementScope'] = request.element_scope
11470
+ body['ElementScope'] = request.element_scope
11459
11471
  if not UtilClient.is_unset(request.end_date):
11460
- query['EndDate'] = request.end_date
11461
- if not UtilClient.is_unset(request.office):
11462
- query['Office'] = request.office
11463
- if not UtilClient.is_unset(request.region):
11464
- query['Region'] = request.region
11465
- if not UtilClient.is_unset(request.source):
11466
- query['Source'] = request.source
11467
- if not UtilClient.is_unset(request.start_date):
11468
- query['StartDate'] = request.start_date
11469
- if not UtilClient.is_unset(request.sub_content_type):
11470
- query['SubContentType'] = request.sub_content_type
11471
- if not UtilClient.is_unset(request.word_size):
11472
- query['WordSize'] = request.word_size
11473
- body = {}
11472
+ body['EndDate'] = request.end_date
11474
11473
  if not UtilClient.is_unset(request.max_results):
11475
11474
  body['MaxResults'] = request.max_results
11476
11475
  if not UtilClient.is_unset(request.next_token):
11477
11476
  body['NextToken'] = request.next_token
11477
+ if not UtilClient.is_unset(request.office):
11478
+ body['Office'] = request.office
11478
11479
  if not UtilClient.is_unset(request.query):
11479
11480
  body['Query'] = request.query
11481
+ if not UtilClient.is_unset(request.region):
11482
+ body['Region'] = request.region
11483
+ if not UtilClient.is_unset(request.source):
11484
+ body['Source'] = request.source
11485
+ if not UtilClient.is_unset(request.start_date):
11486
+ body['StartDate'] = request.start_date
11487
+ if not UtilClient.is_unset(request.sub_content_type):
11488
+ body['SubContentType'] = request.sub_content_type
11489
+ if not UtilClient.is_unset(request.subject_classify):
11490
+ body['SubjectClassify'] = request.subject_classify
11491
+ if not UtilClient.is_unset(request.word_size):
11492
+ body['WordSize'] = request.word_size
11480
11493
  if not UtilClient.is_unset(request.workspace_id):
11481
11494
  body['WorkspaceId'] = request.workspace_id
11482
11495
  req = open_api_models.OpenApiRequest(
11483
- query=OpenApiUtilClient.query(query),
11484
11496
  body=OpenApiUtilClient.parse_to_map(body)
11485
11497
  )
11486
11498
  params = open_api_models.Params(
@@ -944,6 +944,140 @@ class AddAuditTermsResponse(TeaModel):
944
944
  return self
945
945
 
946
946
 
947
+ class AddDatasetDocumentRequestDocumentMetadataAsrSentences(TeaModel):
948
+ def __init__(
949
+ self,
950
+ end_time: int = None,
951
+ start_time: int = None,
952
+ text: str = None,
953
+ ):
954
+ self.end_time = end_time
955
+ self.start_time = start_time
956
+ self.text = text
957
+
958
+ def validate(self):
959
+ pass
960
+
961
+ def to_map(self):
962
+ _map = super().to_map()
963
+ if _map is not None:
964
+ return _map
965
+
966
+ result = dict()
967
+ if self.end_time is not None:
968
+ result['EndTime'] = self.end_time
969
+ if self.start_time is not None:
970
+ result['StartTime'] = self.start_time
971
+ if self.text is not None:
972
+ result['Text'] = self.text
973
+ return result
974
+
975
+ def from_map(self, m: dict = None):
976
+ m = m or dict()
977
+ if m.get('EndTime') is not None:
978
+ self.end_time = m.get('EndTime')
979
+ if m.get('StartTime') is not None:
980
+ self.start_time = m.get('StartTime')
981
+ if m.get('Text') is not None:
982
+ self.text = m.get('Text')
983
+ return self
984
+
985
+
986
+ class AddDatasetDocumentRequestDocumentMetadataVideoShots(TeaModel):
987
+ def __init__(
988
+ self,
989
+ end_time: int = None,
990
+ start_time: int = None,
991
+ text: str = None,
992
+ ):
993
+ self.end_time = end_time
994
+ self.start_time = start_time
995
+ self.text = text
996
+
997
+ def validate(self):
998
+ pass
999
+
1000
+ def to_map(self):
1001
+ _map = super().to_map()
1002
+ if _map is not None:
1003
+ return _map
1004
+
1005
+ result = dict()
1006
+ if self.end_time is not None:
1007
+ result['EndTime'] = self.end_time
1008
+ if self.start_time is not None:
1009
+ result['StartTime'] = self.start_time
1010
+ if self.text is not None:
1011
+ result['Text'] = self.text
1012
+ return result
1013
+
1014
+ def from_map(self, m: dict = None):
1015
+ m = m or dict()
1016
+ if m.get('EndTime') is not None:
1017
+ self.end_time = m.get('EndTime')
1018
+ if m.get('StartTime') is not None:
1019
+ self.start_time = m.get('StartTime')
1020
+ if m.get('Text') is not None:
1021
+ self.text = m.get('Text')
1022
+ return self
1023
+
1024
+
1025
+ class AddDatasetDocumentRequestDocumentMetadata(TeaModel):
1026
+ def __init__(
1027
+ self,
1028
+ asr_sentences: List[AddDatasetDocumentRequestDocumentMetadataAsrSentences] = None,
1029
+ text: str = None,
1030
+ video_shots: List[AddDatasetDocumentRequestDocumentMetadataVideoShots] = None,
1031
+ ):
1032
+ self.asr_sentences = asr_sentences
1033
+ self.text = text
1034
+ self.video_shots = video_shots
1035
+
1036
+ def validate(self):
1037
+ if self.asr_sentences:
1038
+ for k in self.asr_sentences:
1039
+ if k:
1040
+ k.validate()
1041
+ if self.video_shots:
1042
+ for k in self.video_shots:
1043
+ if k:
1044
+ k.validate()
1045
+
1046
+ def to_map(self):
1047
+ _map = super().to_map()
1048
+ if _map is not None:
1049
+ return _map
1050
+
1051
+ result = dict()
1052
+ result['AsrSentences'] = []
1053
+ if self.asr_sentences is not None:
1054
+ for k in self.asr_sentences:
1055
+ result['AsrSentences'].append(k.to_map() if k else None)
1056
+ if self.text is not None:
1057
+ result['Text'] = self.text
1058
+ result['VideoShots'] = []
1059
+ if self.video_shots is not None:
1060
+ for k in self.video_shots:
1061
+ result['VideoShots'].append(k.to_map() if k else None)
1062
+ return result
1063
+
1064
+ def from_map(self, m: dict = None):
1065
+ m = m or dict()
1066
+ self.asr_sentences = []
1067
+ if m.get('AsrSentences') is not None:
1068
+ for k in m.get('AsrSentences'):
1069
+ temp_model = AddDatasetDocumentRequestDocumentMetadataAsrSentences()
1070
+ self.asr_sentences.append(temp_model.from_map(k))
1071
+ if m.get('Text') is not None:
1072
+ self.text = m.get('Text')
1073
+ self.video_shots = []
1074
+ if m.get('VideoShots') is not None:
1075
+ for k in m.get('VideoShots'):
1076
+ temp_model = AddDatasetDocumentRequestDocumentMetadataVideoShots()
1077
+ self.video_shots.append(temp_model.from_map(k))
1078
+ return self
1079
+
1080
+
947
1081
  class AddDatasetDocumentRequestDocumentMultimodalMedias(TeaModel):
948
1082
  def __init__(
949
1083
  self,
@@ -994,6 +1128,7 @@ class AddDatasetDocumentRequestDocument(TeaModel):
994
1128
  extend_1: str = None,
995
1129
  extend_2: str = None,
996
1130
  extend_3: str = None,
1131
+ metadata: AddDatasetDocumentRequestDocumentMetadata = None,
997
1132
  multimodal_index_name: str = None,
998
1133
  multimodal_medias: List[AddDatasetDocumentRequestDocumentMultimodalMedias] = None,
999
1134
  pub_time: str = None,
@@ -1010,6 +1145,7 @@ class AddDatasetDocumentRequestDocument(TeaModel):
1010
1145
  self.extend_1 = extend_1
1011
1146
  self.extend_2 = extend_2
1012
1147
  self.extend_3 = extend_3
1148
+ self.metadata = metadata
1013
1149
  self.multimodal_index_name = multimodal_index_name
1014
1150
  self.multimodal_medias = multimodal_medias
1015
1151
  self.pub_time = pub_time
@@ -1019,6 +1155,8 @@ class AddDatasetDocumentRequestDocument(TeaModel):
1019
1155
  self.url = url
1020
1156
 
1021
1157
  def validate(self):
1158
+ if self.metadata:
1159
+ self.metadata.validate()
1022
1160
  if self.multimodal_medias:
1023
1161
  for k in self.multimodal_medias:
1024
1162
  if k:
@@ -1046,6 +1184,8 @@ class AddDatasetDocumentRequestDocument(TeaModel):
1046
1184
  result['Extend2'] = self.extend_2
1047
1185
  if self.extend_3 is not None:
1048
1186
  result['Extend3'] = self.extend_3
1187
+ if self.metadata is not None:
1188
+ result['Metadata'] = self.metadata.to_map()
1049
1189
  if self.multimodal_index_name is not None:
1050
1190
  result['MultimodalIndexName'] = self.multimodal_index_name
1051
1191
  result['MultimodalMedias'] = []
@@ -1082,6 +1222,9 @@ class AddDatasetDocumentRequestDocument(TeaModel):
1082
1222
  self.extend_2 = m.get('Extend2')
1083
1223
  if m.get('Extend3') is not None:
1084
1224
  self.extend_3 = m.get('Extend3')
1225
+ if m.get('Metadata') is not None:
1226
+ temp_model = AddDatasetDocumentRequestDocumentMetadata()
1227
+ self.metadata = temp_model.from_map(m['Metadata'])
1085
1228
  if m.get('MultimodalIndexName') is not None:
1086
1229
  self.multimodal_index_name = m.get('MultimodalIndexName')
1087
1230
  self.multimodal_medias = []
@@ -15058,12 +15201,72 @@ class GetDatasetDocumentRequest(TeaModel):
15058
15201
  dataset_name: str = None,
15059
15202
  doc_id: str = None,
15060
15203
  doc_uuid: str = None,
15204
+ include_fields: List[str] = None,
15205
+ workspace_id: str = None,
15206
+ ):
15207
+ self.dataset_id = dataset_id
15208
+ self.dataset_name = dataset_name
15209
+ self.doc_id = doc_id
15210
+ self.doc_uuid = doc_uuid
15211
+ self.include_fields = include_fields
15212
+ # This parameter is required.
15213
+ self.workspace_id = workspace_id
15214
+
15215
+ def validate(self):
15216
+ pass
15217
+
15218
+ def to_map(self):
15219
+ _map = super().to_map()
15220
+ if _map is not None:
15221
+ return _map
15222
+
15223
+ result = dict()
15224
+ if self.dataset_id is not None:
15225
+ result['DatasetId'] = self.dataset_id
15226
+ if self.dataset_name is not None:
15227
+ result['DatasetName'] = self.dataset_name
15228
+ if self.doc_id is not None:
15229
+ result['DocId'] = self.doc_id
15230
+ if self.doc_uuid is not None:
15231
+ result['DocUuid'] = self.doc_uuid
15232
+ if self.include_fields is not None:
15233
+ result['IncludeFields'] = self.include_fields
15234
+ if self.workspace_id is not None:
15235
+ result['WorkspaceId'] = self.workspace_id
15236
+ return result
15237
+
15238
+ def from_map(self, m: dict = None):
15239
+ m = m or dict()
15240
+ if m.get('DatasetId') is not None:
15241
+ self.dataset_id = m.get('DatasetId')
15242
+ if m.get('DatasetName') is not None:
15243
+ self.dataset_name = m.get('DatasetName')
15244
+ if m.get('DocId') is not None:
15245
+ self.doc_id = m.get('DocId')
15246
+ if m.get('DocUuid') is not None:
15247
+ self.doc_uuid = m.get('DocUuid')
15248
+ if m.get('IncludeFields') is not None:
15249
+ self.include_fields = m.get('IncludeFields')
15250
+ if m.get('WorkspaceId') is not None:
15251
+ self.workspace_id = m.get('WorkspaceId')
15252
+ return self
15253
+
15254
+
15255
+ class GetDatasetDocumentShrinkRequest(TeaModel):
15256
+ def __init__(
15257
+ self,
15258
+ dataset_id: int = None,
15259
+ dataset_name: str = None,
15260
+ doc_id: str = None,
15261
+ doc_uuid: str = None,
15262
+ include_fields_shrink: str = None,
15061
15263
  workspace_id: str = None,
15062
15264
  ):
15063
15265
  self.dataset_id = dataset_id
15064
15266
  self.dataset_name = dataset_name
15065
15267
  self.doc_id = doc_id
15066
15268
  self.doc_uuid = doc_uuid
15269
+ self.include_fields_shrink = include_fields_shrink
15067
15270
  # This parameter is required.
15068
15271
  self.workspace_id = workspace_id
15069
15272
 
@@ -15084,6 +15287,8 @@ class GetDatasetDocumentRequest(TeaModel):
15084
15287
  result['DocId'] = self.doc_id
15085
15288
  if self.doc_uuid is not None:
15086
15289
  result['DocUuid'] = self.doc_uuid
15290
+ if self.include_fields_shrink is not None:
15291
+ result['IncludeFields'] = self.include_fields_shrink
15087
15292
  if self.workspace_id is not None:
15088
15293
  result['WorkspaceId'] = self.workspace_id
15089
15294
  return result
@@ -15098,20 +15303,159 @@ class GetDatasetDocumentRequest(TeaModel):
15098
15303
  self.doc_id = m.get('DocId')
15099
15304
  if m.get('DocUuid') is not None:
15100
15305
  self.doc_uuid = m.get('DocUuid')
15306
+ if m.get('IncludeFields') is not None:
15307
+ self.include_fields_shrink = m.get('IncludeFields')
15101
15308
  if m.get('WorkspaceId') is not None:
15102
15309
  self.workspace_id = m.get('WorkspaceId')
15103
15310
  return self
15104
15311
 
15105
15312
 
15313
+ class GetDatasetDocumentResponseBodyDataMetadataAsrSentences(TeaModel):
15314
+ def __init__(
15315
+ self,
15316
+ end_time: int = None,
15317
+ start_time: int = None,
15318
+ text: str = None,
15319
+ ):
15320
+ self.end_time = end_time
15321
+ self.start_time = start_time
15322
+ self.text = text
15323
+
15324
+ def validate(self):
15325
+ pass
15326
+
15327
+ def to_map(self):
15328
+ _map = super().to_map()
15329
+ if _map is not None:
15330
+ return _map
15331
+
15332
+ result = dict()
15333
+ if self.end_time is not None:
15334
+ result['EndTime'] = self.end_time
15335
+ if self.start_time is not None:
15336
+ result['StartTime'] = self.start_time
15337
+ if self.text is not None:
15338
+ result['Text'] = self.text
15339
+ return result
15340
+
15341
+ def from_map(self, m: dict = None):
15342
+ m = m or dict()
15343
+ if m.get('EndTime') is not None:
15344
+ self.end_time = m.get('EndTime')
15345
+ if m.get('StartTime') is not None:
15346
+ self.start_time = m.get('StartTime')
15347
+ if m.get('Text') is not None:
15348
+ self.text = m.get('Text')
15349
+ return self
15350
+
15351
+
15352
+ class GetDatasetDocumentResponseBodyDataMetadataVideoShots(TeaModel):
15353
+ def __init__(
15354
+ self,
15355
+ end_time: int = None,
15356
+ start_time: int = None,
15357
+ text: str = None,
15358
+ ):
15359
+ self.end_time = end_time
15360
+ self.start_time = start_time
15361
+ self.text = text
15362
+
15363
+ def validate(self):
15364
+ pass
15365
+
15366
+ def to_map(self):
15367
+ _map = super().to_map()
15368
+ if _map is not None:
15369
+ return _map
15370
+
15371
+ result = dict()
15372
+ if self.end_time is not None:
15373
+ result['EndTime'] = self.end_time
15374
+ if self.start_time is not None:
15375
+ result['StartTime'] = self.start_time
15376
+ if self.text is not None:
15377
+ result['Text'] = self.text
15378
+ return result
15379
+
15380
+ def from_map(self, m: dict = None):
15381
+ m = m or dict()
15382
+ if m.get('EndTime') is not None:
15383
+ self.end_time = m.get('EndTime')
15384
+ if m.get('StartTime') is not None:
15385
+ self.start_time = m.get('StartTime')
15386
+ if m.get('Text') is not None:
15387
+ self.text = m.get('Text')
15388
+ return self
15389
+
15390
+
15391
+ class GetDatasetDocumentResponseBodyDataMetadata(TeaModel):
15392
+ def __init__(
15393
+ self,
15394
+ asr_sentences: List[GetDatasetDocumentResponseBodyDataMetadataAsrSentences] = None,
15395
+ text: str = None,
15396
+ video_shots: List[GetDatasetDocumentResponseBodyDataMetadataVideoShots] = None,
15397
+ ):
15398
+ self.asr_sentences = asr_sentences
15399
+ self.text = text
15400
+ self.video_shots = video_shots
15401
+
15402
+ def validate(self):
15403
+ if self.asr_sentences:
15404
+ for k in self.asr_sentences:
15405
+ if k:
15406
+ k.validate()
15407
+ if self.video_shots:
15408
+ for k in self.video_shots:
15409
+ if k:
15410
+ k.validate()
15411
+
15412
+ def to_map(self):
15413
+ _map = super().to_map()
15414
+ if _map is not None:
15415
+ return _map
15416
+
15417
+ result = dict()
15418
+ result['AsrSentences'] = []
15419
+ if self.asr_sentences is not None:
15420
+ for k in self.asr_sentences:
15421
+ result['AsrSentences'].append(k.to_map() if k else None)
15422
+ if self.text is not None:
15423
+ result['Text'] = self.text
15424
+ result['VideoShots'] = []
15425
+ if self.video_shots is not None:
15426
+ for k in self.video_shots:
15427
+ result['VideoShots'].append(k.to_map() if k else None)
15428
+ return result
15429
+
15430
+ def from_map(self, m: dict = None):
15431
+ m = m or dict()
15432
+ self.asr_sentences = []
15433
+ if m.get('AsrSentences') is not None:
15434
+ for k in m.get('AsrSentences'):
15435
+ temp_model = GetDatasetDocumentResponseBodyDataMetadataAsrSentences()
15436
+ self.asr_sentences.append(temp_model.from_map(k))
15437
+ if m.get('Text') is not None:
15438
+ self.text = m.get('Text')
15439
+ self.video_shots = []
15440
+ if m.get('VideoShots') is not None:
15441
+ for k in m.get('VideoShots'):
15442
+ temp_model = GetDatasetDocumentResponseBodyDataMetadataVideoShots()
15443
+ self.video_shots.append(temp_model.from_map(k))
15444
+ return self
15445
+
15446
+
15106
15447
  class GetDatasetDocumentResponseBodyData(TeaModel):
15107
15448
  def __init__(
15108
15449
  self,
15109
15450
  content: str = None,
15110
15451
  disable_handle_multimodal_media: bool = None,
15111
15452
  doc_id: str = None,
15453
+ doc_type: str = None,
15112
15454
  doc_uuid: str = None,
15455
+ metadata: GetDatasetDocumentResponseBodyDataMetadata = None,
15113
15456
  pub_time: str = None,
15114
15457
  source_from: str = None,
15458
+ status: int = None,
15115
15459
  summary: str = None,
15116
15460
  title: str = None,
15117
15461
  url: str = None,
@@ -15119,15 +15463,19 @@ class GetDatasetDocumentResponseBodyData(TeaModel):
15119
15463
  self.content = content
15120
15464
  self.disable_handle_multimodal_media = disable_handle_multimodal_media
15121
15465
  self.doc_id = doc_id
15466
+ self.doc_type = doc_type
15122
15467
  self.doc_uuid = doc_uuid
15468
+ self.metadata = metadata
15123
15469
  self.pub_time = pub_time
15124
15470
  self.source_from = source_from
15471
+ self.status = status
15125
15472
  self.summary = summary
15126
15473
  self.title = title
15127
15474
  self.url = url
15128
15475
 
15129
15476
  def validate(self):
15130
- pass
15477
+ if self.metadata:
15478
+ self.metadata.validate()
15131
15479
 
15132
15480
  def to_map(self):
15133
15481
  _map = super().to_map()
@@ -15141,12 +15489,18 @@ class GetDatasetDocumentResponseBodyData(TeaModel):
15141
15489
  result['DisableHandleMultimodalMedia'] = self.disable_handle_multimodal_media
15142
15490
  if self.doc_id is not None:
15143
15491
  result['DocId'] = self.doc_id
15492
+ if self.doc_type is not None:
15493
+ result['DocType'] = self.doc_type
15144
15494
  if self.doc_uuid is not None:
15145
15495
  result['DocUuid'] = self.doc_uuid
15496
+ if self.metadata is not None:
15497
+ result['Metadata'] = self.metadata.to_map()
15146
15498
  if self.pub_time is not None:
15147
15499
  result['PubTime'] = self.pub_time
15148
15500
  if self.source_from is not None:
15149
15501
  result['SourceFrom'] = self.source_from
15502
+ if self.status is not None:
15503
+ result['Status'] = self.status
15150
15504
  if self.summary is not None:
15151
15505
  result['Summary'] = self.summary
15152
15506
  if self.title is not None:
@@ -15163,12 +15517,19 @@ class GetDatasetDocumentResponseBodyData(TeaModel):
15163
15517
  self.disable_handle_multimodal_media = m.get('DisableHandleMultimodalMedia')
15164
15518
  if m.get('DocId') is not None:
15165
15519
  self.doc_id = m.get('DocId')
15520
+ if m.get('DocType') is not None:
15521
+ self.doc_type = m.get('DocType')
15166
15522
  if m.get('DocUuid') is not None:
15167
15523
  self.doc_uuid = m.get('DocUuid')
15524
+ if m.get('Metadata') is not None:
15525
+ temp_model = GetDatasetDocumentResponseBodyDataMetadata()
15526
+ self.metadata = temp_model.from_map(m['Metadata'])
15168
15527
  if m.get('PubTime') is not None:
15169
15528
  self.pub_time = m.get('PubTime')
15170
15529
  if m.get('SourceFrom') is not None:
15171
15530
  self.source_from = m.get('SourceFrom')
15531
+ if m.get('Status') is not None:
15532
+ self.status = m.get('Status')
15172
15533
  if m.get('Summary') is not None:
15173
15534
  self.summary = m.get('Summary')
15174
15535
  if m.get('Title') is not None:
@@ -27538,6 +27899,7 @@ class ListDocumentRetrieveRequest(TeaModel):
27538
27899
  source: str = None,
27539
27900
  start_date: str = None,
27540
27901
  sub_content_type: str = None,
27902
+ subject_classify: str = None,
27541
27903
  word_size: str = None,
27542
27904
  workspace_id: str = None,
27543
27905
  ):
@@ -27552,6 +27914,7 @@ class ListDocumentRetrieveRequest(TeaModel):
27552
27914
  self.source = source
27553
27915
  self.start_date = start_date
27554
27916
  self.sub_content_type = sub_content_type
27917
+ self.subject_classify = subject_classify
27555
27918
  self.word_size = word_size
27556
27919
  # This parameter is required.
27557
27920
  self.workspace_id = workspace_id
@@ -27587,6 +27950,8 @@ class ListDocumentRetrieveRequest(TeaModel):
27587
27950
  result['StartDate'] = self.start_date
27588
27951
  if self.sub_content_type is not None:
27589
27952
  result['SubContentType'] = self.sub_content_type
27953
+ if self.subject_classify is not None:
27954
+ result['SubjectClassify'] = self.subject_classify
27590
27955
  if self.word_size is not None:
27591
27956
  result['WordSize'] = self.word_size
27592
27957
  if self.workspace_id is not None:
@@ -27617,6 +27982,8 @@ class ListDocumentRetrieveRequest(TeaModel):
27617
27982
  self.start_date = m.get('StartDate')
27618
27983
  if m.get('SubContentType') is not None:
27619
27984
  self.sub_content_type = m.get('SubContentType')
27985
+ if m.get('SubjectClassify') is not None:
27986
+ self.subject_classify = m.get('SubjectClassify')
27620
27987
  if m.get('WordSize') is not None:
27621
27988
  self.word_size = m.get('WordSize')
27622
27989
  if m.get('WorkspaceId') is not None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud-aimiaobi20230801
3
- Version: 1.37.1
3
+ Version: 1.37.3
4
4
  Summary: Alibaba Cloud AiMiaoBi (20230801) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -0,0 +1,8 @@
1
+ alibabacloud_aimiaobi20230801/__init__.py,sha256=UEF8ZmWbj6uhzspQbM5G6iOY7OUZNPUiLEG7uUY66GU,22
2
+ alibabacloud_aimiaobi20230801/client.py,sha256=otS4EuUeX13f9Nh8cA_Nuc5l0I4VS1ojp1mZOc6csLk,918184
3
+ alibabacloud_aimiaobi20230801/models.py,sha256=6kIeNB8w14ObbuJ58OJXxotskZv0ALcYQNen6gP2MIg,2106507
4
+ alibabacloud_aimiaobi20230801-1.37.3.dist-info/LICENSE,sha256=0CFItL6bHvxqS44T6vlLoW2R4Zaic304OO3WxN0oXF0,600
5
+ alibabacloud_aimiaobi20230801-1.37.3.dist-info/METADATA,sha256=X-QkOCQchv9IFXNmmB5VhrLFwB20y5fyXGXdhO655CI,2348
6
+ alibabacloud_aimiaobi20230801-1.37.3.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
7
+ alibabacloud_aimiaobi20230801-1.37.3.dist-info/top_level.txt,sha256=8_10N8zQLrK-NI6L5TUyufvojDqjPl1Q-dHKwoC_b5Q,30
8
+ alibabacloud_aimiaobi20230801-1.37.3.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- alibabacloud_aimiaobi20230801/__init__.py,sha256=Qo9x4_7jE4ioMTSFuUr0gjUxyFcHks8xIxyvRu90rXE,22
2
- alibabacloud_aimiaobi20230801/client.py,sha256=YgKNsgxg81GvQTHtPYLRAVVncQLPLjMlBBeE2YWm1sU,917146
3
- alibabacloud_aimiaobi20230801/models.py,sha256=5JmmFFUFgSNZylhP1LSsWAHhSA8tEOCTgWkJn4TtE2g,2094356
4
- alibabacloud_aimiaobi20230801-1.37.1.dist-info/LICENSE,sha256=0CFItL6bHvxqS44T6vlLoW2R4Zaic304OO3WxN0oXF0,600
5
- alibabacloud_aimiaobi20230801-1.37.1.dist-info/METADATA,sha256=nNFbCbBZCUmcabdEPqx7KDcAFKULyhObfZnRVhbDNmM,2348
6
- alibabacloud_aimiaobi20230801-1.37.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
7
- alibabacloud_aimiaobi20230801-1.37.1.dist-info/top_level.txt,sha256=8_10N8zQLrK-NI6L5TUyufvojDqjPl1Q-dHKwoC_b5Q,30
8
- alibabacloud_aimiaobi20230801-1.37.1.dist-info/RECORD,,