alibabacloud-sls20201230 5.3.0__py3-none-any.whl → 5.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alibabacloud_sls20201230/__init__.py +1 -1
- alibabacloud_sls20201230/client.py +1079 -201
- alibabacloud_sls20201230/models.py +652 -44
- {alibabacloud_sls20201230-5.3.0.dist-info → alibabacloud_sls20201230-5.4.0.dist-info}/METADATA +3 -3
- alibabacloud_sls20201230-5.4.0.dist-info/RECORD +8 -0
- alibabacloud_sls20201230-5.3.0.dist-info/RECORD +0 -8
- {alibabacloud_sls20201230-5.3.0.dist-info → alibabacloud_sls20201230-5.4.0.dist-info}/LICENSE +0 -0
- {alibabacloud_sls20201230-5.3.0.dist-info → alibabacloud_sls20201230-5.4.0.dist-info}/WHEEL +0 -0
- {alibabacloud_sls20201230-5.3.0.dist-info → alibabacloud_sls20201230-5.4.0.dist-info}/top_level.txt +0 -0
@@ -4020,6 +4020,7 @@ class Logstore(TeaModel):
|
|
4020
4020
|
logstore_name: str = None,
|
4021
4021
|
max_split_shard: int = None,
|
4022
4022
|
mode: str = None,
|
4023
|
+
processor_id: str = None,
|
4023
4024
|
product_type: str = None,
|
4024
4025
|
shard_count: int = None,
|
4025
4026
|
telemetry_type: str = None,
|
@@ -4037,6 +4038,7 @@ class Logstore(TeaModel):
|
|
4037
4038
|
self.logstore_name = logstore_name
|
4038
4039
|
self.max_split_shard = max_split_shard
|
4039
4040
|
self.mode = mode
|
4041
|
+
self.processor_id = processor_id
|
4040
4042
|
self.product_type = product_type
|
4041
4043
|
# This parameter is required.
|
4042
4044
|
self.shard_count = shard_count
|
@@ -4076,6 +4078,8 @@ class Logstore(TeaModel):
|
|
4076
4078
|
result['maxSplitShard'] = self.max_split_shard
|
4077
4079
|
if self.mode is not None:
|
4078
4080
|
result['mode'] = self.mode
|
4081
|
+
if self.processor_id is not None:
|
4082
|
+
result['processorId'] = self.processor_id
|
4079
4083
|
if self.product_type is not None:
|
4080
4084
|
result['productType'] = self.product_type
|
4081
4085
|
if self.shard_count is not None:
|
@@ -4111,6 +4115,8 @@ class Logstore(TeaModel):
|
|
4111
4115
|
self.max_split_shard = m.get('maxSplitShard')
|
4112
4116
|
if m.get('mode') is not None:
|
4113
4117
|
self.mode = m.get('mode')
|
4118
|
+
if m.get('processorId') is not None:
|
4119
|
+
self.processor_id = m.get('processorId')
|
4114
4120
|
if m.get('productType') is not None:
|
4115
4121
|
self.product_type = m.get('productType')
|
4116
4122
|
if m.get('shardCount') is not None:
|
@@ -4796,13 +4802,22 @@ class CreateAlertRequest(TeaModel):
|
|
4796
4802
|
name: str = None,
|
4797
4803
|
schedule: Schedule = None,
|
4798
4804
|
):
|
4805
|
+
# The detailed configurations of the alert rule.
|
4806
|
+
#
|
4799
4807
|
# This parameter is required.
|
4800
4808
|
self.configuration = configuration
|
4809
|
+
# The description of the alert rule.
|
4801
4810
|
self.description = description
|
4811
|
+
# The display name of the alert rule.
|
4812
|
+
#
|
4802
4813
|
# This parameter is required.
|
4803
4814
|
self.display_name = display_name
|
4815
|
+
# The name of the alert rule. Make sure that the name is unique in a project.
|
4816
|
+
#
|
4804
4817
|
# This parameter is required.
|
4805
4818
|
self.name = name
|
4819
|
+
# The scheduling configurations of the alert rule.
|
4820
|
+
#
|
4806
4821
|
# This parameter is required.
|
4807
4822
|
self.schedule = schedule
|
4808
4823
|
|
@@ -5804,31 +5819,33 @@ class CreateLogStoreRequest(TeaModel):
|
|
5804
5819
|
logstore_name: str = None,
|
5805
5820
|
max_split_shard: int = None,
|
5806
5821
|
mode: str = None,
|
5822
|
+
processor_id: str = None,
|
5807
5823
|
shard_count: int = None,
|
5808
5824
|
telemetry_type: str = None,
|
5809
5825
|
ttl: int = None,
|
5810
5826
|
):
|
5811
|
-
# Specifies whether to record public IP addresses. Default value: false.
|
5827
|
+
# Specifies whether to record public IP addresses. Default value: false.
|
5812
5828
|
#
|
5813
5829
|
# * true
|
5814
5830
|
# * false
|
5815
5831
|
self.append_meta = append_meta
|
5816
|
-
# Specifies whether to enable automatic sharding.
|
5832
|
+
# Specifies whether to enable automatic sharding.
|
5817
5833
|
#
|
5818
5834
|
# * true
|
5819
5835
|
# * false
|
5820
5836
|
self.auto_split = auto_split
|
5821
|
-
# Specifies whether to enable the web tracking feature. Default value: false.
|
5837
|
+
# Specifies whether to enable the web tracking feature. Default value: false.
|
5822
5838
|
#
|
5823
5839
|
# * true
|
5824
5840
|
# * false
|
5825
5841
|
self.enable_tracking = enable_tracking
|
5826
|
-
# The data structure of the encryption configuration.
|
5842
|
+
# The data structure of the encryption configuration. The following parameters are included: `enable`, `encrypt_type`, and `user_cmk_info`. For more information, see [EncryptConf](https://help.aliyun.com/document_detail/409461.html).
|
5827
5843
|
self.encrypt_conf = encrypt_conf
|
5828
|
-
# The retention period of data in the hot storage tier of the Logstore.
|
5844
|
+
# The retention period of data in the hot storage tier of the Logstore. Valid values: 7 to 3000. Unit: days.
|
5829
5845
|
#
|
5830
|
-
#
|
5846
|
+
# After the retention period that is specified for the hot storage tier elapses, the data is moved to the Infrequent Access (IA) storage tier. For more information, see [Enable hot and cold-tiered storage for a Logstore](https://help.aliyun.com/document_detail/308645.html).
|
5831
5847
|
self.hot_ttl = hot_ttl
|
5848
|
+
# The retention period of data in the IA storage tier of the Logstore. You must set this parameter to at least 30 days. After the data retention period that you specify for the IA storage tier elapses, the data is moved to the Archive storage tier.
|
5832
5849
|
self.infrequent_access_ttl = infrequent_access_ttl
|
5833
5850
|
# The name of the Logstore. The name must meet the following requirements:
|
5834
5851
|
#
|
@@ -5839,27 +5856,28 @@ class CreateLogStoreRequest(TeaModel):
|
|
5839
5856
|
#
|
5840
5857
|
# This parameter is required.
|
5841
5858
|
self.logstore_name = logstore_name
|
5842
|
-
# The maximum number of shards into which existing shards can be automatically split. Valid values: 1 to
|
5859
|
+
# The maximum number of shards into which existing shards can be automatically split. Valid values: 1 to 256.
|
5843
5860
|
#
|
5844
|
-
# >
|
5861
|
+
# > If you set autoSplit to true, you must specify maxSplitShard.
|
5845
5862
|
self.max_split_shard = max_split_shard
|
5846
|
-
# The type of the Logstore. Log Service provides
|
5863
|
+
# The type of the Logstore. Simple Log Service provides two types of Logstores: Standard Logstores and Query Logstores. Valid values:
|
5847
5864
|
#
|
5848
5865
|
# * **standard**: Standard Logstore. This type of Logstore supports the log analysis feature and is suitable for scenarios such as real-time monitoring and interactive analysis. You can also use this type of Logstore to build a comprehensive observability system.
|
5849
|
-
# * **query**: Query Logstore. This type of Logstore supports high-performance queries. The index traffic fee of a
|
5866
|
+
# * **query**: Query Logstore. This type of Logstore supports high-performance queries. The index traffic fee of a query Logstore is approximately half that of a Standard Logstore. Query Logstores do not support SQL analysis. Query Logstores are suitable for scenarios in which the amount of data is large, the log retention period is long, or log analysis is not required. If logs are stored for weeks or months, the log retention period is considered long.
|
5850
5867
|
self.mode = mode
|
5868
|
+
self.processor_id = processor_id
|
5851
5869
|
# The number of shards.
|
5852
5870
|
#
|
5853
|
-
# >
|
5871
|
+
# > You cannot call the CreateLogStore operation to change the number of shards. You can call the SplitShard or MergeShards operation to change the number of shards.
|
5854
5872
|
#
|
5855
5873
|
# This parameter is required.
|
5856
5874
|
self.shard_count = shard_count
|
5857
5875
|
# The type of the observable data. Valid values:
|
5858
5876
|
#
|
5859
|
-
# * None:
|
5860
|
-
# * Metrics
|
5877
|
+
# * **None** (default): log data
|
5878
|
+
# * **Metrics**: metric data
|
5861
5879
|
self.telemetry_type = telemetry_type
|
5862
|
-
# The retention period of data. Unit: days. Valid values: 1 to 3000. If you set this parameter to 3650,
|
5880
|
+
# The retention period of data. Unit: days. Valid values: 1 to 3000. If you set this parameter to 3650, logs are permanently stored.
|
5863
5881
|
#
|
5864
5882
|
# This parameter is required.
|
5865
5883
|
self.ttl = ttl
|
@@ -5892,6 +5910,8 @@ class CreateLogStoreRequest(TeaModel):
|
|
5892
5910
|
result['maxSplitShard'] = self.max_split_shard
|
5893
5911
|
if self.mode is not None:
|
5894
5912
|
result['mode'] = self.mode
|
5913
|
+
if self.processor_id is not None:
|
5914
|
+
result['processorId'] = self.processor_id
|
5895
5915
|
if self.shard_count is not None:
|
5896
5916
|
result['shardCount'] = self.shard_count
|
5897
5917
|
if self.telemetry_type is not None:
|
@@ -5921,6 +5941,8 @@ class CreateLogStoreRequest(TeaModel):
|
|
5921
5941
|
self.max_split_shard = m.get('maxSplitShard')
|
5922
5942
|
if m.get('mode') is not None:
|
5923
5943
|
self.mode = m.get('mode')
|
5944
|
+
if m.get('processorId') is not None:
|
5945
|
+
self.processor_id = m.get('processorId')
|
5924
5946
|
if m.get('shardCount') is not None:
|
5925
5947
|
self.shard_count = m.get('shardCount')
|
5926
5948
|
if m.get('telemetryType') is not None:
|
@@ -6343,6 +6365,105 @@ class CreateMachineGroupResponse(TeaModel):
|
|
6343
6365
|
return self
|
6344
6366
|
|
6345
6367
|
|
6368
|
+
class CreateMetricStoreRequest(TeaModel):
|
6369
|
+
def __init__(
|
6370
|
+
self,
|
6371
|
+
auto_split: bool = None,
|
6372
|
+
max_split_shard: int = None,
|
6373
|
+
metric_type: str = None,
|
6374
|
+
mode: str = None,
|
6375
|
+
name: str = None,
|
6376
|
+
shard_count: int = None,
|
6377
|
+
ttl: int = None,
|
6378
|
+
):
|
6379
|
+
self.auto_split = auto_split
|
6380
|
+
self.max_split_shard = max_split_shard
|
6381
|
+
self.metric_type = metric_type
|
6382
|
+
self.mode = mode
|
6383
|
+
# This parameter is required.
|
6384
|
+
self.name = name
|
6385
|
+
# This parameter is required.
|
6386
|
+
self.shard_count = shard_count
|
6387
|
+
# This parameter is required.
|
6388
|
+
self.ttl = ttl
|
6389
|
+
|
6390
|
+
def validate(self):
|
6391
|
+
pass
|
6392
|
+
|
6393
|
+
def to_map(self):
|
6394
|
+
_map = super().to_map()
|
6395
|
+
if _map is not None:
|
6396
|
+
return _map
|
6397
|
+
|
6398
|
+
result = dict()
|
6399
|
+
if self.auto_split is not None:
|
6400
|
+
result['autoSplit'] = self.auto_split
|
6401
|
+
if self.max_split_shard is not None:
|
6402
|
+
result['maxSplitShard'] = self.max_split_shard
|
6403
|
+
if self.metric_type is not None:
|
6404
|
+
result['metricType'] = self.metric_type
|
6405
|
+
if self.mode is not None:
|
6406
|
+
result['mode'] = self.mode
|
6407
|
+
if self.name is not None:
|
6408
|
+
result['name'] = self.name
|
6409
|
+
if self.shard_count is not None:
|
6410
|
+
result['shardCount'] = self.shard_count
|
6411
|
+
if self.ttl is not None:
|
6412
|
+
result['ttl'] = self.ttl
|
6413
|
+
return result
|
6414
|
+
|
6415
|
+
def from_map(self, m: dict = None):
|
6416
|
+
m = m or dict()
|
6417
|
+
if m.get('autoSplit') is not None:
|
6418
|
+
self.auto_split = m.get('autoSplit')
|
6419
|
+
if m.get('maxSplitShard') is not None:
|
6420
|
+
self.max_split_shard = m.get('maxSplitShard')
|
6421
|
+
if m.get('metricType') is not None:
|
6422
|
+
self.metric_type = m.get('metricType')
|
6423
|
+
if m.get('mode') is not None:
|
6424
|
+
self.mode = m.get('mode')
|
6425
|
+
if m.get('name') is not None:
|
6426
|
+
self.name = m.get('name')
|
6427
|
+
if m.get('shardCount') is not None:
|
6428
|
+
self.shard_count = m.get('shardCount')
|
6429
|
+
if m.get('ttl') is not None:
|
6430
|
+
self.ttl = m.get('ttl')
|
6431
|
+
return self
|
6432
|
+
|
6433
|
+
|
6434
|
+
class CreateMetricStoreResponse(TeaModel):
|
6435
|
+
def __init__(
|
6436
|
+
self,
|
6437
|
+
headers: Dict[str, str] = None,
|
6438
|
+
status_code: int = None,
|
6439
|
+
):
|
6440
|
+
self.headers = headers
|
6441
|
+
self.status_code = status_code
|
6442
|
+
|
6443
|
+
def validate(self):
|
6444
|
+
pass
|
6445
|
+
|
6446
|
+
def to_map(self):
|
6447
|
+
_map = super().to_map()
|
6448
|
+
if _map is not None:
|
6449
|
+
return _map
|
6450
|
+
|
6451
|
+
result = dict()
|
6452
|
+
if self.headers is not None:
|
6453
|
+
result['headers'] = self.headers
|
6454
|
+
if self.status_code is not None:
|
6455
|
+
result['statusCode'] = self.status_code
|
6456
|
+
return result
|
6457
|
+
|
6458
|
+
def from_map(self, m: dict = None):
|
6459
|
+
m = m or dict()
|
6460
|
+
if m.get('headers') is not None:
|
6461
|
+
self.headers = m.get('headers')
|
6462
|
+
if m.get('statusCode') is not None:
|
6463
|
+
self.status_code = m.get('statusCode')
|
6464
|
+
return self
|
6465
|
+
|
6466
|
+
|
6346
6467
|
class CreateOSSExportRequest(TeaModel):
|
6347
6468
|
def __init__(
|
6348
6469
|
self,
|
@@ -6351,11 +6472,18 @@ class CreateOSSExportRequest(TeaModel):
|
|
6351
6472
|
display_name: str = None,
|
6352
6473
|
name: str = None,
|
6353
6474
|
):
|
6475
|
+
# The configuration details of the job.
|
6476
|
+
#
|
6354
6477
|
# This parameter is required.
|
6355
6478
|
self.configuration = configuration
|
6479
|
+
# The description of the job.
|
6356
6480
|
self.description = description
|
6481
|
+
# The display name of the job.
|
6482
|
+
#
|
6357
6483
|
# This parameter is required.
|
6358
6484
|
self.display_name = display_name
|
6485
|
+
# The unique identifier of the OSS data shipping job.
|
6486
|
+
#
|
6359
6487
|
# This parameter is required.
|
6360
6488
|
self.name = name
|
6361
6489
|
|
@@ -6434,11 +6562,18 @@ class CreateOSSHDFSExportRequest(TeaModel):
|
|
6434
6562
|
display_name: str = None,
|
6435
6563
|
name: str = None,
|
6436
6564
|
):
|
6565
|
+
# The configuration details of the job.
|
6566
|
+
#
|
6437
6567
|
# This parameter is required.
|
6438
6568
|
self.configuration = configuration
|
6569
|
+
# The description of the job.
|
6439
6570
|
self.description = description
|
6571
|
+
# The display name of the job.
|
6572
|
+
#
|
6440
6573
|
# This parameter is required.
|
6441
6574
|
self.display_name = display_name
|
6575
|
+
# The unique identifier of the OSS data shipping job.
|
6576
|
+
#
|
6442
6577
|
# This parameter is required.
|
6443
6578
|
self.name = name
|
6444
6579
|
|
@@ -6611,7 +6746,7 @@ class CreateOssExternalStoreRequestParameterColumns(TeaModel):
|
|
6611
6746
|
#
|
6612
6747
|
# This parameter is required.
|
6613
6748
|
self.name = name
|
6614
|
-
# The type of the field.
|
6749
|
+
# The data type of the field.
|
6615
6750
|
#
|
6616
6751
|
# This parameter is required.
|
6617
6752
|
self.type = type
|
@@ -6650,11 +6785,11 @@ class CreateOssExternalStoreRequestParameter(TeaModel):
|
|
6650
6785
|
endpoint: str = None,
|
6651
6786
|
objects: List[str] = None,
|
6652
6787
|
):
|
6653
|
-
# The AccessKey ID
|
6788
|
+
# The AccessKey ID.
|
6654
6789
|
#
|
6655
6790
|
# This parameter is required.
|
6656
6791
|
self.accessid = accessid
|
6657
|
-
# The AccessKey secret
|
6792
|
+
# The AccessKey secret.
|
6658
6793
|
#
|
6659
6794
|
# This parameter is required.
|
6660
6795
|
self.accesskey = accesskey
|
@@ -6662,15 +6797,15 @@ class CreateOssExternalStoreRequestParameter(TeaModel):
|
|
6662
6797
|
#
|
6663
6798
|
# This parameter is required.
|
6664
6799
|
self.bucket = bucket
|
6665
|
-
# The
|
6800
|
+
# The associated fields.
|
6666
6801
|
#
|
6667
6802
|
# This parameter is required.
|
6668
6803
|
self.columns = columns
|
6669
|
-
# The
|
6804
|
+
# The OSS endpoint. For more information, see [Regions and endpoints](https://help.aliyun.com/document_detail/31837.html).
|
6670
6805
|
#
|
6671
6806
|
# This parameter is required.
|
6672
6807
|
self.endpoint = endpoint
|
6673
|
-
# The
|
6808
|
+
# The associated OSS objects. Valid values of n: 1 to 100.
|
6674
6809
|
#
|
6675
6810
|
# This parameter is required.
|
6676
6811
|
self.objects = objects
|
@@ -6734,7 +6869,7 @@ class CreateOssExternalStoreRequest(TeaModel):
|
|
6734
6869
|
#
|
6735
6870
|
# This parameter is required.
|
6736
6871
|
self.external_store_name = external_store_name
|
6737
|
-
# The parameters
|
6872
|
+
# The parameters of the external store.
|
6738
6873
|
#
|
6739
6874
|
# This parameter is required.
|
6740
6875
|
self.parameter = parameter
|
@@ -7272,8 +7407,12 @@ class CreateSqlInstanceRequest(TeaModel):
|
|
7272
7407
|
cu: int = None,
|
7273
7408
|
use_as_default: bool = None,
|
7274
7409
|
):
|
7410
|
+
# The number of compute units (CUs). When you use the Dedicated SQL feature, CUs are used in parallel.
|
7411
|
+
#
|
7275
7412
|
# This parameter is required.
|
7276
7413
|
self.cu = cu
|
7414
|
+
# Specifies whether to enable the Dedicated SQL feature for the project. If you set this parameter to true, the Dedicated SQL feature is enabled for the specified project and takes effect for all query statements that you execute in the project, including the query statements for alerts and dashboards.
|
7415
|
+
#
|
7277
7416
|
# This parameter is required.
|
7278
7417
|
self.use_as_default = use_as_default
|
7279
7418
|
|
@@ -7341,10 +7480,20 @@ class CreateStoreViewRequest(TeaModel):
|
|
7341
7480
|
store_type: str = None,
|
7342
7481
|
stores: List[StoreViewStore] = None,
|
7343
7482
|
):
|
7483
|
+
# The name of the dataset.
|
7484
|
+
#
|
7485
|
+
# * The name can contain lowercase letters, digits, and underscores (_).
|
7486
|
+
# * The name must start with a lowercase letter.
|
7487
|
+
# * The name must be 3 to 62 characters in length.
|
7488
|
+
#
|
7344
7489
|
# This parameter is required.
|
7345
7490
|
self.name = name
|
7491
|
+
# The type of the dataset. Valid values: metricstore and logstore.
|
7492
|
+
#
|
7346
7493
|
# This parameter is required.
|
7347
7494
|
self.store_type = store_type
|
7495
|
+
# The Logstores or Metricstores.
|
7496
|
+
#
|
7348
7497
|
# This parameter is required.
|
7349
7498
|
self.stores = stores
|
7350
7499
|
|
@@ -8112,6 +8261,39 @@ class DeleteMachineGroupResponse(TeaModel):
|
|
8112
8261
|
return self
|
8113
8262
|
|
8114
8263
|
|
8264
|
+
class DeleteMetricStoreResponse(TeaModel):
|
8265
|
+
def __init__(
|
8266
|
+
self,
|
8267
|
+
headers: Dict[str, str] = None,
|
8268
|
+
status_code: int = None,
|
8269
|
+
):
|
8270
|
+
self.headers = headers
|
8271
|
+
self.status_code = status_code
|
8272
|
+
|
8273
|
+
def validate(self):
|
8274
|
+
pass
|
8275
|
+
|
8276
|
+
def to_map(self):
|
8277
|
+
_map = super().to_map()
|
8278
|
+
if _map is not None:
|
8279
|
+
return _map
|
8280
|
+
|
8281
|
+
result = dict()
|
8282
|
+
if self.headers is not None:
|
8283
|
+
result['headers'] = self.headers
|
8284
|
+
if self.status_code is not None:
|
8285
|
+
result['statusCode'] = self.status_code
|
8286
|
+
return result
|
8287
|
+
|
8288
|
+
def from_map(self, m: dict = None):
|
8289
|
+
m = m or dict()
|
8290
|
+
if m.get('headers') is not None:
|
8291
|
+
self.headers = m.get('headers')
|
8292
|
+
if m.get('statusCode') is not None:
|
8293
|
+
self.status_code = m.get('statusCode')
|
8294
|
+
return self
|
8295
|
+
|
8296
|
+
|
8115
8297
|
class DeleteOSSExportResponse(TeaModel):
|
8116
8298
|
def __init__(
|
8117
8299
|
self,
|
@@ -9358,11 +9540,11 @@ class GetContextLogsRequest(TeaModel):
|
|
9358
9540
|
pack_meta: str = None,
|
9359
9541
|
type: str = None,
|
9360
9542
|
):
|
9361
|
-
# The number of logs that you want to obtain and are generated before the generation time of the start log. Valid values: (0,100]
|
9543
|
+
# The number of logs that you want to obtain and are generated before the generation time of the start log. Valid values: `(0,100]`.
|
9362
9544
|
#
|
9363
9545
|
# This parameter is required.
|
9364
9546
|
self.back_lines = back_lines
|
9365
|
-
# The number of logs that you want to obtain and are generated after the generation time of the start log. Valid values: (0,100]
|
9547
|
+
# The number of logs that you want to obtain and are generated after the generation time of the start log. Valid values: `(0,100]`.
|
9366
9548
|
#
|
9367
9549
|
# This parameter is required.
|
9368
9550
|
self.forward_lines = forward_lines
|
@@ -10561,6 +10743,7 @@ class GetLogStoreMeteringModeResponseBody(TeaModel):
|
|
10561
10743
|
self,
|
10562
10744
|
metering_mode: str = None,
|
10563
10745
|
):
|
10746
|
+
# The billing mode. Default value: ChargeByFunction. Valid values: ChargeByFunction and ChargeByDataIngest.
|
10564
10747
|
self.metering_mode = metering_mode
|
10565
10748
|
|
10566
10749
|
def validate(self):
|
@@ -10871,28 +11054,29 @@ class GetLogsV2Request(TeaModel):
|
|
10871
11054
|
to: int = None,
|
10872
11055
|
topic: str = None,
|
10873
11056
|
):
|
10874
|
-
# Specifies whether to page forward or backward for the scan-based query or
|
11057
|
+
# Specifies whether to page forward or backward for the scan-based query or phrase search.
|
10875
11058
|
self.forward = forward
|
10876
11059
|
# The beginning of the time range to query. The value is the log time that is specified when log data is written.
|
10877
11060
|
#
|
10878
|
-
# The time range that is specified in this operation is a left-closed, right-open interval. The interval includes the start time specified by the from parameter, but does not include the end time specified by the to parameter. If you specify the same value for the from and to parameters, the interval is invalid, and an error message is returned. The value is a UNIX
|
11061
|
+
# The time range that is specified in this operation is a left-closed, right-open interval. The interval includes the start time specified by the from parameter, but does not include the end time specified by the to parameter. If you specify the same value for the from and to parameters, the interval is invalid, and an error message is returned. The value is a timestamp that follows the UNIX time format. It is the number of seconds that have elapsed since January 1, 1970, 00:00:00 UTC.
|
10879
11062
|
#
|
10880
11063
|
# This parameter is required.
|
10881
11064
|
self.from_ = from_
|
11065
|
+
# Specifies whether to highlight the returned result.
|
10882
11066
|
self.highlight = highlight
|
10883
|
-
# The maximum number of logs to return for the request. This parameter takes effect only when the query parameter is set to a search statement.
|
11067
|
+
# The maximum number of logs to return for the request. This parameter takes effect only when the query parameter is set to a search statement. Valid values: 0 to 100. Default value: 100.
|
10884
11068
|
self.line = line
|
10885
11069
|
# The line from which the query starts. This parameter takes effect only when the query parameter is set to a search statement. Default value: 0.
|
10886
11070
|
self.offset = offset
|
10887
11071
|
# Specifies whether to enable the SQL enhancement feature. By default, the feature is disabled.
|
10888
11072
|
self.power_sql = power_sql
|
10889
|
-
# The search statement or
|
11073
|
+
# The search statement or query statement. For more information, see the "Log search overview" and "Log analysis overview" topics.
|
10890
11074
|
#
|
10891
|
-
# If you add set session parallel_sql=true; to the analytic statement in the query parameter, Dedicated SQL is used.
|
11075
|
+
# If you add set session parallel_sql=true; to the analytic statement in the query parameter, Dedicated SQL is used. Example: \\* | set session parallel_sql=true; select count(\\*) as pv.
|
10892
11076
|
#
|
10893
|
-
# Note: If you specify an analytic statement in the query parameter, the line and offset parameters do not take effect in this operation. In this case, we recommend that you set the line and offset parameters to 0 and use the LIMIT clause to
|
11077
|
+
# Note: If you specify an analytic statement in the query parameter, the line and offset parameters do not take effect in this operation. In this case, we recommend that you set the line and offset parameters to 0 and use the LIMIT clause to specify the number of logs to return on each page. For more information, see the "Perform paged queries" topic.
|
10894
11078
|
self.query = query
|
10895
|
-
# Specifies whether to return logs in reverse chronological order of log timestamps. The log timestamps are accurate to
|
11079
|
+
# Specifies whether to return logs in reverse chronological order of log timestamps. The log timestamps are accurate to minutes. Valid values:
|
10896
11080
|
#
|
10897
11081
|
# true: Logs are returned in reverse chronological order of log timestamps. false (default): Logs are returned in chronological order of log timestamps. Note: The reverse parameter takes effect only when the query parameter is set to a search statement. The reverse parameter specifies the method used to sort returned logs. If the query parameter is set to a query statement, the reverse parameter does not take effect. The method used to sort returned logs is specified by the ORDER BY clause in the analytic statement. If you use the keyword asc in the ORDER BY clause, the logs are sorted in chronological order. If you use the keyword desc in the ORDER BY clause, the logs are sorted in reverse chronological order. By default, asc is used in the ORDER BY clause.
|
10898
11082
|
self.reverse = reverse
|
@@ -10900,7 +11084,7 @@ class GetLogsV2Request(TeaModel):
|
|
10900
11084
|
self.session = session
|
10901
11085
|
# The end of the time range to query. The value is the log time that is specified when log data is written.
|
10902
11086
|
#
|
10903
|
-
# The time range that is specified in this operation is a left-closed, right-open interval. The interval includes the start time specified by the from parameter, but does not include the end time specified by the to parameter. If you specify the same value for the from and to parameters, the interval is invalid, and an error message is returned. The value is a UNIX
|
11087
|
+
# The time range that is specified in this operation is a left-closed, right-open interval. The interval includes the start time specified by the from parameter, but does not include the end time specified by the to parameter. If you specify the same value for the from and to parameters, the interval is invalid, and an error message is returned. The value is a timestamp that follows the UNIX time format. It is the number of seconds that have elapsed since January 1, 1970, 00:00:00 UTC.
|
10904
11088
|
#
|
10905
11089
|
# This parameter is required.
|
10906
11090
|
self.to = to
|
@@ -11048,7 +11232,7 @@ class GetLogsV2ResponseBodyMeta(TeaModel):
|
|
11048
11232
|
# Indicates whether the query is an SQL query.
|
11049
11233
|
self.has_sql = has_sql
|
11050
11234
|
self.highlights = highlights
|
11051
|
-
# Indicates whether the returned result is accurate.
|
11235
|
+
# Indicates whether the returned result is accurate to seconds.
|
11052
11236
|
self.is_accurate = is_accurate
|
11053
11237
|
# All keys in the query result.
|
11054
11238
|
self.keys = keys
|
@@ -11194,7 +11378,7 @@ class GetLogsV2ResponseBody(TeaModel):
|
|
11194
11378
|
):
|
11195
11379
|
# The returned result.
|
11196
11380
|
self.data = data
|
11197
|
-
# The metadata
|
11381
|
+
# The metadata of the returned data.
|
11198
11382
|
self.meta = meta
|
11199
11383
|
|
11200
11384
|
def validate(self):
|
@@ -11461,11 +11645,128 @@ class GetMachineGroupResponse(TeaModel):
|
|
11461
11645
|
return self
|
11462
11646
|
|
11463
11647
|
|
11648
|
+
class GetMetricStoreResponseBody(TeaModel):
|
11649
|
+
def __init__(
|
11650
|
+
self,
|
11651
|
+
auto_split: bool = None,
|
11652
|
+
create_time: int = None,
|
11653
|
+
last_modify_time: int = None,
|
11654
|
+
max_split_shard: int = None,
|
11655
|
+
metric_type: str = None,
|
11656
|
+
mode: str = None,
|
11657
|
+
name: str = None,
|
11658
|
+
shard_count: int = None,
|
11659
|
+
ttl: int = None,
|
11660
|
+
):
|
11661
|
+
self.auto_split = auto_split
|
11662
|
+
self.create_time = create_time
|
11663
|
+
self.last_modify_time = last_modify_time
|
11664
|
+
self.max_split_shard = max_split_shard
|
11665
|
+
self.metric_type = metric_type
|
11666
|
+
self.mode = mode
|
11667
|
+
self.name = name
|
11668
|
+
self.shard_count = shard_count
|
11669
|
+
self.ttl = ttl
|
11670
|
+
|
11671
|
+
def validate(self):
|
11672
|
+
pass
|
11673
|
+
|
11674
|
+
def to_map(self):
|
11675
|
+
_map = super().to_map()
|
11676
|
+
if _map is not None:
|
11677
|
+
return _map
|
11678
|
+
|
11679
|
+
result = dict()
|
11680
|
+
if self.auto_split is not None:
|
11681
|
+
result['autoSplit'] = self.auto_split
|
11682
|
+
if self.create_time is not None:
|
11683
|
+
result['createTime'] = self.create_time
|
11684
|
+
if self.last_modify_time is not None:
|
11685
|
+
result['lastModifyTime'] = self.last_modify_time
|
11686
|
+
if self.max_split_shard is not None:
|
11687
|
+
result['maxSplitShard'] = self.max_split_shard
|
11688
|
+
if self.metric_type is not None:
|
11689
|
+
result['metricType'] = self.metric_type
|
11690
|
+
if self.mode is not None:
|
11691
|
+
result['mode'] = self.mode
|
11692
|
+
if self.name is not None:
|
11693
|
+
result['name'] = self.name
|
11694
|
+
if self.shard_count is not None:
|
11695
|
+
result['shardCount'] = self.shard_count
|
11696
|
+
if self.ttl is not None:
|
11697
|
+
result['ttl'] = self.ttl
|
11698
|
+
return result
|
11699
|
+
|
11700
|
+
def from_map(self, m: dict = None):
|
11701
|
+
m = m or dict()
|
11702
|
+
if m.get('autoSplit') is not None:
|
11703
|
+
self.auto_split = m.get('autoSplit')
|
11704
|
+
if m.get('createTime') is not None:
|
11705
|
+
self.create_time = m.get('createTime')
|
11706
|
+
if m.get('lastModifyTime') is not None:
|
11707
|
+
self.last_modify_time = m.get('lastModifyTime')
|
11708
|
+
if m.get('maxSplitShard') is not None:
|
11709
|
+
self.max_split_shard = m.get('maxSplitShard')
|
11710
|
+
if m.get('metricType') is not None:
|
11711
|
+
self.metric_type = m.get('metricType')
|
11712
|
+
if m.get('mode') is not None:
|
11713
|
+
self.mode = m.get('mode')
|
11714
|
+
if m.get('name') is not None:
|
11715
|
+
self.name = m.get('name')
|
11716
|
+
if m.get('shardCount') is not None:
|
11717
|
+
self.shard_count = m.get('shardCount')
|
11718
|
+
if m.get('ttl') is not None:
|
11719
|
+
self.ttl = m.get('ttl')
|
11720
|
+
return self
|
11721
|
+
|
11722
|
+
|
11723
|
+
class GetMetricStoreResponse(TeaModel):
|
11724
|
+
def __init__(
|
11725
|
+
self,
|
11726
|
+
headers: Dict[str, str] = None,
|
11727
|
+
status_code: int = None,
|
11728
|
+
body: GetMetricStoreResponseBody = None,
|
11729
|
+
):
|
11730
|
+
self.headers = headers
|
11731
|
+
self.status_code = status_code
|
11732
|
+
self.body = body
|
11733
|
+
|
11734
|
+
def validate(self):
|
11735
|
+
if self.body:
|
11736
|
+
self.body.validate()
|
11737
|
+
|
11738
|
+
def to_map(self):
|
11739
|
+
_map = super().to_map()
|
11740
|
+
if _map is not None:
|
11741
|
+
return _map
|
11742
|
+
|
11743
|
+
result = dict()
|
11744
|
+
if self.headers is not None:
|
11745
|
+
result['headers'] = self.headers
|
11746
|
+
if self.status_code is not None:
|
11747
|
+
result['statusCode'] = self.status_code
|
11748
|
+
if self.body is not None:
|
11749
|
+
result['body'] = self.body.to_map()
|
11750
|
+
return result
|
11751
|
+
|
11752
|
+
def from_map(self, m: dict = None):
|
11753
|
+
m = m or dict()
|
11754
|
+
if m.get('headers') is not None:
|
11755
|
+
self.headers = m.get('headers')
|
11756
|
+
if m.get('statusCode') is not None:
|
11757
|
+
self.status_code = m.get('statusCode')
|
11758
|
+
if m.get('body') is not None:
|
11759
|
+
temp_model = GetMetricStoreResponseBody()
|
11760
|
+
self.body = temp_model.from_map(m['body'])
|
11761
|
+
return self
|
11762
|
+
|
11763
|
+
|
11464
11764
|
class GetMetricStoreMeteringModeResponseBody(TeaModel):
|
11465
11765
|
def __init__(
|
11466
11766
|
self,
|
11467
11767
|
metering_mode: str = None,
|
11468
11768
|
):
|
11769
|
+
# The billing mode. Default value: ChargeByFunction. Valid values: ChargeByFunction and ChargeByDataIngest.
|
11469
11770
|
self.metering_mode = metering_mode
|
11470
11771
|
|
11471
11772
|
def validate(self):
|
@@ -12040,7 +12341,27 @@ class GetStoreViewResponseBody(TeaModel):
|
|
12040
12341
|
store_type: str = None,
|
12041
12342
|
stores: List[StoreViewStore] = None,
|
12042
12343
|
):
|
12344
|
+
# The type of the dataset.
|
12345
|
+
#
|
12346
|
+
# Valid values:
|
12347
|
+
#
|
12348
|
+
# * metricstore
|
12349
|
+
#
|
12350
|
+
# <!-- -->
|
12351
|
+
#
|
12352
|
+
# <!-- -->
|
12353
|
+
#
|
12354
|
+
# <!-- -->
|
12355
|
+
#
|
12356
|
+
# * logstore
|
12357
|
+
#
|
12358
|
+
# <!-- -->
|
12359
|
+
#
|
12360
|
+
# <!-- -->
|
12361
|
+
#
|
12362
|
+
# <!-- -->
|
12043
12363
|
self.store_type = store_type
|
12364
|
+
# The Logstores or Metricstores.
|
12044
12365
|
self.stores = stores
|
12045
12366
|
|
12046
12367
|
def validate(self):
|
@@ -12123,8 +12444,11 @@ class GetStoreViewIndexResponseBodyIndexes(TeaModel):
|
|
12123
12444
|
logstore: str = None,
|
12124
12445
|
project: str = None,
|
12125
12446
|
):
|
12447
|
+
# The index configurations of the Logstore.
|
12126
12448
|
self.index = index
|
12449
|
+
# The name of the Logstore.
|
12127
12450
|
self.logstore = logstore
|
12451
|
+
# The name of the project to which the Logstore belongs.
|
12128
12452
|
self.project = project
|
12129
12453
|
|
12130
12454
|
def validate(self):
|
@@ -12162,6 +12486,7 @@ class GetStoreViewIndexResponseBody(TeaModel):
|
|
12162
12486
|
self,
|
12163
12487
|
indexes: List[GetStoreViewIndexResponseBodyIndexes] = None,
|
12164
12488
|
):
|
12489
|
+
# The index configurations.
|
12165
12490
|
self.indexes = indexes
|
12166
12491
|
|
12167
12492
|
def validate(self):
|
@@ -12279,8 +12604,11 @@ class ListAlertsResponseBody(TeaModel):
|
|
12279
12604
|
results: List[Alert] = None,
|
12280
12605
|
total: int = None,
|
12281
12606
|
):
|
12607
|
+
# The number of alert rules that are returned.
|
12282
12608
|
self.count = count
|
12609
|
+
# The alert rules.
|
12283
12610
|
self.results = results
|
12611
|
+
# The total number of alert rules in the project.
|
12284
12612
|
self.total = total
|
12285
12613
|
|
12286
12614
|
def validate(self):
|
@@ -13219,7 +13547,7 @@ class ListConfigRequest(TeaModel):
|
|
13219
13547
|
offset: int = None,
|
13220
13548
|
size: int = None,
|
13221
13549
|
):
|
13222
|
-
# The name of the Logtail configuration.
|
13550
|
+
# The name of the Logtail configuration, which is used for fuzzy match.
|
13223
13551
|
self.config_name = config_name
|
13224
13552
|
# The name of the Logstore.
|
13225
13553
|
#
|
@@ -14731,6 +15059,131 @@ class ListMachinesResponse(TeaModel):
|
|
14731
15059
|
return self
|
14732
15060
|
|
14733
15061
|
|
15062
|
+
class ListMetricStoresRequest(TeaModel):
|
15063
|
+
def __init__(
|
15064
|
+
self,
|
15065
|
+
mode: str = None,
|
15066
|
+
name: str = None,
|
15067
|
+
offset: int = None,
|
15068
|
+
size: int = None,
|
15069
|
+
):
|
15070
|
+
self.mode = mode
|
15071
|
+
self.name = name
|
15072
|
+
self.offset = offset
|
15073
|
+
self.size = size
|
15074
|
+
|
15075
|
+
def validate(self):
|
15076
|
+
pass
|
15077
|
+
|
15078
|
+
def to_map(self):
|
15079
|
+
_map = super().to_map()
|
15080
|
+
if _map is not None:
|
15081
|
+
return _map
|
15082
|
+
|
15083
|
+
result = dict()
|
15084
|
+
if self.mode is not None:
|
15085
|
+
result['mode'] = self.mode
|
15086
|
+
if self.name is not None:
|
15087
|
+
result['name'] = self.name
|
15088
|
+
if self.offset is not None:
|
15089
|
+
result['offset'] = self.offset
|
15090
|
+
if self.size is not None:
|
15091
|
+
result['size'] = self.size
|
15092
|
+
return result
|
15093
|
+
|
15094
|
+
def from_map(self, m: dict = None):
|
15095
|
+
m = m or dict()
|
15096
|
+
if m.get('mode') is not None:
|
15097
|
+
self.mode = m.get('mode')
|
15098
|
+
if m.get('name') is not None:
|
15099
|
+
self.name = m.get('name')
|
15100
|
+
if m.get('offset') is not None:
|
15101
|
+
self.offset = m.get('offset')
|
15102
|
+
if m.get('size') is not None:
|
15103
|
+
self.size = m.get('size')
|
15104
|
+
return self
|
15105
|
+
|
15106
|
+
|
15107
|
+
class ListMetricStoresResponseBody(TeaModel):
|
15108
|
+
def __init__(
|
15109
|
+
self,
|
15110
|
+
count: int = None,
|
15111
|
+
metricstores: List[str] = None,
|
15112
|
+
total: int = None,
|
15113
|
+
):
|
15114
|
+
self.count = count
|
15115
|
+
self.metricstores = metricstores
|
15116
|
+
self.total = total
|
15117
|
+
|
15118
|
+
def validate(self):
|
15119
|
+
pass
|
15120
|
+
|
15121
|
+
def to_map(self):
|
15122
|
+
_map = super().to_map()
|
15123
|
+
if _map is not None:
|
15124
|
+
return _map
|
15125
|
+
|
15126
|
+
result = dict()
|
15127
|
+
if self.count is not None:
|
15128
|
+
result['count'] = self.count
|
15129
|
+
if self.metricstores is not None:
|
15130
|
+
result['metricstores'] = self.metricstores
|
15131
|
+
if self.total is not None:
|
15132
|
+
result['total'] = self.total
|
15133
|
+
return result
|
15134
|
+
|
15135
|
+
def from_map(self, m: dict = None):
|
15136
|
+
m = m or dict()
|
15137
|
+
if m.get('count') is not None:
|
15138
|
+
self.count = m.get('count')
|
15139
|
+
if m.get('metricstores') is not None:
|
15140
|
+
self.metricstores = m.get('metricstores')
|
15141
|
+
if m.get('total') is not None:
|
15142
|
+
self.total = m.get('total')
|
15143
|
+
return self
|
15144
|
+
|
15145
|
+
|
15146
|
+
class ListMetricStoresResponse(TeaModel):
|
15147
|
+
def __init__(
|
15148
|
+
self,
|
15149
|
+
headers: Dict[str, str] = None,
|
15150
|
+
status_code: int = None,
|
15151
|
+
body: ListMetricStoresResponseBody = None,
|
15152
|
+
):
|
15153
|
+
self.headers = headers
|
15154
|
+
self.status_code = status_code
|
15155
|
+
self.body = body
|
15156
|
+
|
15157
|
+
def validate(self):
|
15158
|
+
if self.body:
|
15159
|
+
self.body.validate()
|
15160
|
+
|
15161
|
+
def to_map(self):
|
15162
|
+
_map = super().to_map()
|
15163
|
+
if _map is not None:
|
15164
|
+
return _map
|
15165
|
+
|
15166
|
+
result = dict()
|
15167
|
+
if self.headers is not None:
|
15168
|
+
result['headers'] = self.headers
|
15169
|
+
if self.status_code is not None:
|
15170
|
+
result['statusCode'] = self.status_code
|
15171
|
+
if self.body is not None:
|
15172
|
+
result['body'] = self.body.to_map()
|
15173
|
+
return result
|
15174
|
+
|
15175
|
+
def from_map(self, m: dict = None):
|
15176
|
+
m = m or dict()
|
15177
|
+
if m.get('headers') is not None:
|
15178
|
+
self.headers = m.get('headers')
|
15179
|
+
if m.get('statusCode') is not None:
|
15180
|
+
self.status_code = m.get('statusCode')
|
15181
|
+
if m.get('body') is not None:
|
15182
|
+
temp_model = ListMetricStoresResponseBody()
|
15183
|
+
self.body = temp_model.from_map(m['body'])
|
15184
|
+
return self
|
15185
|
+
|
15186
|
+
|
14734
15187
|
class ListOSSExportsRequest(TeaModel):
|
14735
15188
|
def __init__(
|
14736
15189
|
self,
|
@@ -15031,8 +15484,11 @@ class ListOSSIngestionsResponseBody(TeaModel):
|
|
15031
15484
|
results: List[OSSIngestion] = None,
|
15032
15485
|
total: int = None,
|
15033
15486
|
):
|
15487
|
+
# The number of OSS data import jobs that are returned.
|
15034
15488
|
self.count = count
|
15489
|
+
# The OSS data import jobs.
|
15035
15490
|
self.results = results
|
15491
|
+
# The total number of OSS data import jobs in the project.
|
15036
15492
|
self.total = total
|
15037
15493
|
|
15038
15494
|
def validate(self):
|
@@ -15390,6 +15846,7 @@ class ListScheduledSQLsRequest(TeaModel):
|
|
15390
15846
|
offset: int = None,
|
15391
15847
|
size: int = None,
|
15392
15848
|
):
|
15849
|
+
# The name of the Logstore.
|
15393
15850
|
self.logstore = logstore
|
15394
15851
|
self.offset = offset
|
15395
15852
|
self.size = size
|
@@ -15565,9 +16022,31 @@ class ListStoreViewsRequest(TeaModel):
|
|
15565
16022
|
size: int = None,
|
15566
16023
|
store_type: str = None,
|
15567
16024
|
):
|
16025
|
+
# The dataset name that is used for fuzzy match.
|
15568
16026
|
self.name = name
|
16027
|
+
# The offset of the datasets to return. Default value: 0.
|
15569
16028
|
self.offset = offset
|
16029
|
+
# The number of datasets to return. Default value: 100.
|
15570
16030
|
self.size = size
|
16031
|
+
# The type of the datasets to return. By default, datasets are not filtered by type.
|
16032
|
+
#
|
16033
|
+
# Valid values:
|
16034
|
+
#
|
16035
|
+
# * metricstore
|
16036
|
+
#
|
16037
|
+
# <!-- -->
|
16038
|
+
#
|
16039
|
+
# <!-- -->
|
16040
|
+
#
|
16041
|
+
# <!-- -->
|
16042
|
+
#
|
16043
|
+
# * logstore
|
16044
|
+
#
|
16045
|
+
# <!-- -->
|
16046
|
+
#
|
16047
|
+
# <!-- -->
|
16048
|
+
#
|
16049
|
+
# <!-- -->
|
15571
16050
|
self.store_type = store_type
|
15572
16051
|
|
15573
16052
|
def validate(self):
|
@@ -15609,8 +16088,11 @@ class ListStoreViewsResponseBody(TeaModel):
|
|
15609
16088
|
storeviews: List[str] = None,
|
15610
16089
|
total: int = None,
|
15611
16090
|
):
|
16091
|
+
# The number of returned datasets.
|
15612
16092
|
self.count = count
|
16093
|
+
# The dataset names.
|
15613
16094
|
self.storeviews = storeviews
|
16095
|
+
# The total number of datasets in the project.
|
15614
16096
|
self.total = total
|
15615
16097
|
|
15616
16098
|
def validate(self):
|
@@ -17162,11 +17644,18 @@ class UpdateAlertRequest(TeaModel):
|
|
17162
17644
|
display_name: str = None,
|
17163
17645
|
schedule: Schedule = None,
|
17164
17646
|
):
|
17647
|
+
# The detailed configurations of the alert rule.
|
17648
|
+
#
|
17165
17649
|
# This parameter is required.
|
17166
17650
|
self.configuration = configuration
|
17651
|
+
# The description of the alert rule.
|
17167
17652
|
self.description = description
|
17653
|
+
# The display name of the alert rule.
|
17654
|
+
#
|
17168
17655
|
# This parameter is required.
|
17169
17656
|
self.display_name = display_name
|
17657
|
+
# The scheduling settings of the alert rule.
|
17658
|
+
#
|
17170
17659
|
# This parameter is required.
|
17171
17660
|
self.schedule = schedule
|
17172
17661
|
|
@@ -17875,6 +18364,7 @@ class UpdateLogStoreRequest(TeaModel):
|
|
17875
18364
|
logstore_name: str = None,
|
17876
18365
|
max_split_shard: int = None,
|
17877
18366
|
mode: str = None,
|
18367
|
+
processor_id: str = None,
|
17878
18368
|
shard_count: int = None,
|
17879
18369
|
telemetry_type: str = None,
|
17880
18370
|
ttl: int = None,
|
@@ -17896,32 +18386,34 @@ class UpdateLogStoreRequest(TeaModel):
|
|
17896
18386
|
self.enable_tracking = enable_tracking
|
17897
18387
|
# The data structure of the encryption configuration.
|
17898
18388
|
self.encrypt_conf = encrypt_conf
|
17899
|
-
# The retention period of data in the hot storage tier of the Logstore.
|
18389
|
+
# The retention period of data in the hot storage tier of the Logstore. Valid values: 7 to 3000. Unit: days. After the retention period that is specified for the hot storage tier elapses, the data is moved to the Infrequent Access (IA) storage tier. For more information, see [Enable hot and cold-tiered storage for a Logstore](https://help.aliyun.com/document_detail/308645.html).
|
17900
18390
|
self.hot_ttl = hot_ttl
|
18391
|
+
# The retention period of data in the IA storage tier of the Logstore. You must set this parameter to at least 30 days. After the data retention period that you specify for the IA storage tier elapses, the data is moved to the Archive storage tier.
|
17901
18392
|
self.infrequent_access_ttl = infrequent_access_ttl
|
17902
18393
|
# The name of the Logstore.
|
17903
18394
|
#
|
17904
18395
|
# This parameter is required.
|
17905
18396
|
self.logstore_name = logstore_name
|
17906
|
-
# The maximum number of shards into which existing shards can be automatically split. Valid values: 1 to
|
18397
|
+
# The maximum number of shards into which existing shards can be automatically split. Valid values: 1 to 256.
|
17907
18398
|
#
|
17908
|
-
# >
|
18399
|
+
# > If you set autoSplit to true, you must specify maxSplitShard.
|
17909
18400
|
self.max_split_shard = max_split_shard
|
17910
|
-
# The type of the Logstore. Simple Log Service provides two types of Logstores: Standard Logstores and Query Logstores.
|
18401
|
+
# The type of the Logstore. Simple Log Service provides two types of Logstores: Standard Logstores and Query Logstores. Valid values:
|
17911
18402
|
#
|
17912
18403
|
# * **standard**: Standard Logstore. This type of Logstore supports the log analysis feature and is suitable for scenarios such as real-time monitoring and interactive analysis. You can also use this type of Logstore to build a comprehensive observability system.
|
17913
|
-
# * **query**: Query Logstore. This type of Logstore supports high-performance queries. The index traffic fee of a Query Logstore is approximately half that of a Standard Logstore. Query Logstores do not support SQL analysis. Query Logstores are suitable for scenarios in which the
|
18404
|
+
# * **query**: Query Logstore. This type of Logstore supports high-performance queries. The index traffic fee of a Query Logstore is approximately half that of a Standard Logstore. Query Logstores do not support SQL analysis. Query Logstores are suitable for scenarios in which the amount of data is large, the log retention period is long, or log analysis is not required. If logs are stored for weeks or months, the log retention period is considered long.
|
17914
18405
|
self.mode = mode
|
18406
|
+
self.processor_id = processor_id
|
17915
18407
|
# The number of shards.
|
17916
18408
|
#
|
17917
|
-
# >
|
18409
|
+
# > You cannot call the UpdateLogStore operation to change the number of shards. You can call the SplitShard or MergeShards operation to change the number of shards.
|
17918
18410
|
self.shard_count = shard_count
|
17919
|
-
# The type of the
|
18411
|
+
# The type of the observable data. Valid values:
|
17920
18412
|
#
|
17921
|
-
# * None:
|
17922
|
-
# * Metrics:
|
18413
|
+
# * None (default): log data.
|
18414
|
+
# * Metrics: metric data.
|
17923
18415
|
self.telemetry_type = telemetry_type
|
17924
|
-
# The retention period of data. Unit:
|
18416
|
+
# The retention period of data. Unit: days. Valid values: 1 to 3650. If you set this parameter to 3650, logs are permanently stored.
|
17925
18417
|
#
|
17926
18418
|
# This parameter is required.
|
17927
18419
|
self.ttl = ttl
|
@@ -17954,6 +18446,8 @@ class UpdateLogStoreRequest(TeaModel):
|
|
17954
18446
|
result['maxSplitShard'] = self.max_split_shard
|
17955
18447
|
if self.mode is not None:
|
17956
18448
|
result['mode'] = self.mode
|
18449
|
+
if self.processor_id is not None:
|
18450
|
+
result['processorId'] = self.processor_id
|
17957
18451
|
if self.shard_count is not None:
|
17958
18452
|
result['shardCount'] = self.shard_count
|
17959
18453
|
if self.telemetry_type is not None:
|
@@ -17983,6 +18477,8 @@ class UpdateLogStoreRequest(TeaModel):
|
|
17983
18477
|
self.max_split_shard = m.get('maxSplitShard')
|
17984
18478
|
if m.get('mode') is not None:
|
17985
18479
|
self.mode = m.get('mode')
|
18480
|
+
if m.get('processorId') is not None:
|
18481
|
+
self.processor_id = m.get('processorId')
|
17986
18482
|
if m.get('shardCount') is not None:
|
17987
18483
|
self.shard_count = m.get('shardCount')
|
17988
18484
|
if m.get('telemetryType') is not None:
|
@@ -18533,6 +19029,84 @@ class UpdateMachineGroupMachineResponse(TeaModel):
|
|
18533
19029
|
return self
|
18534
19030
|
|
18535
19031
|
|
19032
|
+
class UpdateMetricStoreRequest(TeaModel):
|
19033
|
+
def __init__(
|
19034
|
+
self,
|
19035
|
+
auto_split: bool = None,
|
19036
|
+
max_split_shard: int = None,
|
19037
|
+
mode: str = None,
|
19038
|
+
ttl: int = None,
|
19039
|
+
):
|
19040
|
+
self.auto_split = auto_split
|
19041
|
+
self.max_split_shard = max_split_shard
|
19042
|
+
self.mode = mode
|
19043
|
+
self.ttl = ttl
|
19044
|
+
|
19045
|
+
def validate(self):
|
19046
|
+
pass
|
19047
|
+
|
19048
|
+
def to_map(self):
|
19049
|
+
_map = super().to_map()
|
19050
|
+
if _map is not None:
|
19051
|
+
return _map
|
19052
|
+
|
19053
|
+
result = dict()
|
19054
|
+
if self.auto_split is not None:
|
19055
|
+
result['autoSplit'] = self.auto_split
|
19056
|
+
if self.max_split_shard is not None:
|
19057
|
+
result['maxSplitShard'] = self.max_split_shard
|
19058
|
+
if self.mode is not None:
|
19059
|
+
result['mode'] = self.mode
|
19060
|
+
if self.ttl is not None:
|
19061
|
+
result['ttl'] = self.ttl
|
19062
|
+
return result
|
19063
|
+
|
19064
|
+
def from_map(self, m: dict = None):
|
19065
|
+
m = m or dict()
|
19066
|
+
if m.get('autoSplit') is not None:
|
19067
|
+
self.auto_split = m.get('autoSplit')
|
19068
|
+
if m.get('maxSplitShard') is not None:
|
19069
|
+
self.max_split_shard = m.get('maxSplitShard')
|
19070
|
+
if m.get('mode') is not None:
|
19071
|
+
self.mode = m.get('mode')
|
19072
|
+
if m.get('ttl') is not None:
|
19073
|
+
self.ttl = m.get('ttl')
|
19074
|
+
return self
|
19075
|
+
|
19076
|
+
|
19077
|
+
class UpdateMetricStoreResponse(TeaModel):
|
19078
|
+
def __init__(
|
19079
|
+
self,
|
19080
|
+
headers: Dict[str, str] = None,
|
19081
|
+
status_code: int = None,
|
19082
|
+
):
|
19083
|
+
self.headers = headers
|
19084
|
+
self.status_code = status_code
|
19085
|
+
|
19086
|
+
def validate(self):
|
19087
|
+
pass
|
19088
|
+
|
19089
|
+
def to_map(self):
|
19090
|
+
_map = super().to_map()
|
19091
|
+
if _map is not None:
|
19092
|
+
return _map
|
19093
|
+
|
19094
|
+
result = dict()
|
19095
|
+
if self.headers is not None:
|
19096
|
+
result['headers'] = self.headers
|
19097
|
+
if self.status_code is not None:
|
19098
|
+
result['statusCode'] = self.status_code
|
19099
|
+
return result
|
19100
|
+
|
19101
|
+
def from_map(self, m: dict = None):
|
19102
|
+
m = m or dict()
|
19103
|
+
if m.get('headers') is not None:
|
19104
|
+
self.headers = m.get('headers')
|
19105
|
+
if m.get('statusCode') is not None:
|
19106
|
+
self.status_code = m.get('statusCode')
|
19107
|
+
return self
|
19108
|
+
|
19109
|
+
|
18536
19110
|
class UpdateMetricStoreMeteringModeRequest(TeaModel):
|
18537
19111
|
def __init__(
|
18538
19112
|
self,
|
@@ -18601,8 +19175,11 @@ class UpdateOSSExportRequest(TeaModel):
|
|
18601
19175
|
description: str = None,
|
18602
19176
|
display_name: str = None,
|
18603
19177
|
):
|
19178
|
+
# The configuration details of the job.
|
18604
19179
|
self.configuration = configuration
|
19180
|
+
# The description of the job.
|
18605
19181
|
self.description = description
|
19182
|
+
# The display name of the job.
|
18606
19183
|
self.display_name = display_name
|
18607
19184
|
|
18608
19185
|
def validate(self):
|
@@ -18675,8 +19252,11 @@ class UpdateOSSHDFSExportRequest(TeaModel):
|
|
18675
19252
|
description: str = None,
|
18676
19253
|
display_name: str = None,
|
18677
19254
|
):
|
19255
|
+
# The configuration details of the job.
|
18678
19256
|
self.configuration = configuration
|
19257
|
+
# The description of the job.
|
18679
19258
|
self.description = description
|
19259
|
+
# The display name of the job.
|
18680
19260
|
self.display_name = display_name
|
18681
19261
|
|
18682
19262
|
def validate(self):
|
@@ -18750,11 +19330,17 @@ class UpdateOSSIngestionRequest(TeaModel):
|
|
18750
19330
|
display_name: str = None,
|
18751
19331
|
schedule: Schedule = None,
|
18752
19332
|
):
|
19333
|
+
# The configurations of the OSS data import job.
|
19334
|
+
#
|
18753
19335
|
# This parameter is required.
|
18754
19336
|
self.configuration = configuration
|
19337
|
+
# The description of the OSS data import job.
|
18755
19338
|
self.description = description
|
19339
|
+
# The display name of the OSS data import job.
|
19340
|
+
#
|
18756
19341
|
# This parameter is required.
|
18757
19342
|
self.display_name = display_name
|
19343
|
+
# The scheduling type. By default, you do not need to specify this parameter. If you want to import data at regular intervals, such as importing data every Monday at 08: 00., you can specify a cron expression.
|
18758
19344
|
self.schedule = schedule
|
18759
19345
|
|
18760
19346
|
def validate(self):
|
@@ -19533,8 +20119,30 @@ class UpdateStoreViewRequest(TeaModel):
|
|
19533
20119
|
store_type: str = None,
|
19534
20120
|
stores: List[StoreViewStore] = None,
|
19535
20121
|
):
|
20122
|
+
# The type of the dataset.
|
20123
|
+
#
|
20124
|
+
# Valid values:
|
20125
|
+
#
|
20126
|
+
# * metricstore
|
20127
|
+
#
|
20128
|
+
# <!-- -->
|
20129
|
+
#
|
20130
|
+
# <!-- -->
|
20131
|
+
#
|
20132
|
+
# <!-- -->
|
20133
|
+
#
|
20134
|
+
# * logstore
|
20135
|
+
#
|
20136
|
+
# <!-- -->
|
20137
|
+
#
|
20138
|
+
# <!-- -->
|
20139
|
+
#
|
20140
|
+
# <!-- -->
|
20141
|
+
#
|
19536
20142
|
# This parameter is required.
|
19537
20143
|
self.store_type = store_type
|
20144
|
+
# The Logstores or Metricstores.
|
20145
|
+
#
|
19538
20146
|
# This parameter is required.
|
19539
20147
|
self.stores = stores
|
19540
20148
|
|