alibabacloud-r-kvstore20150101 2.24.1__tar.gz → 2.25.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (17) hide show
  1. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/ChangeLog.md +6 -0
  2. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/PKG-INFO +1 -1
  3. alibabacloud_r-kvstore20150101-2.25.1/alibabacloud_r_kvstore20150101/__init__.py +1 -0
  4. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/alibabacloud_r_kvstore20150101/client.py +86 -8
  5. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/alibabacloud_r_kvstore20150101/models.py +442 -5
  6. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/alibabacloud_r_kvstore20150101.egg-info/PKG-INFO +1 -1
  7. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/setup.py +1 -1
  8. alibabacloud_r-kvstore20150101-2.24.1/alibabacloud_r_kvstore20150101/__init__.py +0 -1
  9. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/LICENSE +0 -0
  10. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/MANIFEST.in +0 -0
  11. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/README-CN.md +0 -0
  12. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/README.md +0 -0
  13. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/alibabacloud_r_kvstore20150101.egg-info/SOURCES.txt +0 -0
  14. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/alibabacloud_r_kvstore20150101.egg-info/dependency_links.txt +0 -0
  15. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/alibabacloud_r_kvstore20150101.egg-info/requires.txt +0 -0
  16. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/alibabacloud_r_kvstore20150101.egg-info/top_level.txt +0 -0
  17. {alibabacloud_r-kvstore20150101-2.24.1 → alibabacloud_r-kvstore20150101-2.25.1}/setup.cfg +0 -0
@@ -1,3 +1,9 @@
1
+ 2024-01-12 Version: 2.25.0
2
+ - Generated python 2015-01-01 for R-kvstore.
3
+
4
+ 2023-12-27 Version: 2.24.1
5
+ - Generated python 2015-01-01 for R-kvstore.
6
+
1
7
  2023-12-13 Version: 2.24.0
2
8
  - Generated python 2015-01-01 for R-kvstore.
3
9
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud_r-kvstore20150101
3
- Version: 2.24.1
3
+ Version: 2.25.1
4
4
  Summary: Alibaba Cloud R-kvstore (20150101) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -1303,6 +1303,8 @@ class Client(OpenApiClient):
1303
1303
  query['Capacity'] = request.capacity
1304
1304
  if not UtilClient.is_unset(request.charge_type):
1305
1305
  query['ChargeType'] = request.charge_type
1306
+ if not UtilClient.is_unset(request.cluster_backup_id):
1307
+ query['ClusterBackupId'] = request.cluster_backup_id
1306
1308
  if not UtilClient.is_unset(request.connection_string_prefix):
1307
1309
  query['ConnectionStringPrefix'] = request.connection_string_prefix
1308
1310
  if not UtilClient.is_unset(request.coupon_no):
@@ -1425,6 +1427,8 @@ class Client(OpenApiClient):
1425
1427
  query['Capacity'] = request.capacity
1426
1428
  if not UtilClient.is_unset(request.charge_type):
1427
1429
  query['ChargeType'] = request.charge_type
1430
+ if not UtilClient.is_unset(request.cluster_backup_id):
1431
+ query['ClusterBackupId'] = request.cluster_backup_id
1428
1432
  if not UtilClient.is_unset(request.connection_string_prefix):
1429
1433
  query['ConnectionStringPrefix'] = request.connection_string_prefix
1430
1434
  if not UtilClient.is_unset(request.coupon_no):
@@ -1735,6 +1739,8 @@ class Client(OpenApiClient):
1735
1739
  query['ChargeType'] = request.charge_type
1736
1740
  if not UtilClient.is_unset(request.client_token):
1737
1741
  query['ClientToken'] = request.client_token
1742
+ if not UtilClient.is_unset(request.cluster_backup_id):
1743
+ query['ClusterBackupId'] = request.cluster_backup_id
1738
1744
  if not UtilClient.is_unset(request.coupon_no):
1739
1745
  query['CouponNo'] = request.coupon_no
1740
1746
  if not UtilClient.is_unset(request.dry_run):
@@ -1849,6 +1855,8 @@ class Client(OpenApiClient):
1849
1855
  query['ChargeType'] = request.charge_type
1850
1856
  if not UtilClient.is_unset(request.client_token):
1851
1857
  query['ClientToken'] = request.client_token
1858
+ if not UtilClient.is_unset(request.cluster_backup_id):
1859
+ query['ClusterBackupId'] = request.cluster_backup_id
1852
1860
  if not UtilClient.is_unset(request.coupon_no):
1853
1861
  query['CouponNo'] = request.coupon_no
1854
1862
  if not UtilClient.is_unset(request.dry_run):
@@ -3389,6 +3397,8 @@ class Client(OpenApiClient):
3389
3397
  query = {}
3390
3398
  if not UtilClient.is_unset(request.backup_id):
3391
3399
  query['BackupId'] = request.backup_id
3400
+ if not UtilClient.is_unset(request.backup_job_id):
3401
+ query['BackupJobId'] = request.backup_job_id
3392
3402
  if not UtilClient.is_unset(request.end_time):
3393
3403
  query['EndTime'] = request.end_time
3394
3404
  if not UtilClient.is_unset(request.instance_id):
@@ -3447,6 +3457,8 @@ class Client(OpenApiClient):
3447
3457
  query = {}
3448
3458
  if not UtilClient.is_unset(request.backup_id):
3449
3459
  query['BackupId'] = request.backup_id
3460
+ if not UtilClient.is_unset(request.backup_job_id):
3461
+ query['BackupJobId'] = request.backup_job_id
3450
3462
  if not UtilClient.is_unset(request.end_time):
3451
3463
  query['EndTime'] = request.end_time
3452
3464
  if not UtilClient.is_unset(request.instance_id):
@@ -3808,6 +3820,72 @@ class Client(OpenApiClient):
3808
3820
  runtime = util_models.RuntimeOptions()
3809
3821
  return await self.describe_cache_analysis_report_list_with_options_async(request, runtime)
3810
3822
 
3823
+ def describe_cluster_backup_list_with_options(
3824
+ self,
3825
+ request: r_kvstore_20150101_models.DescribeClusterBackupListRequest,
3826
+ runtime: util_models.RuntimeOptions,
3827
+ ) -> r_kvstore_20150101_models.DescribeClusterBackupListResponse:
3828
+ UtilClient.validate_model(request)
3829
+ query = OpenApiUtilClient.query(UtilClient.to_map(request))
3830
+ req = open_api_models.OpenApiRequest(
3831
+ query=OpenApiUtilClient.query(query)
3832
+ )
3833
+ params = open_api_models.Params(
3834
+ action='DescribeClusterBackupList',
3835
+ version='2015-01-01',
3836
+ protocol='HTTPS',
3837
+ pathname='/',
3838
+ method='GET',
3839
+ auth_type='AK',
3840
+ style='RPC',
3841
+ req_body_type='formData',
3842
+ body_type='json'
3843
+ )
3844
+ return TeaCore.from_map(
3845
+ r_kvstore_20150101_models.DescribeClusterBackupListResponse(),
3846
+ self.call_api(params, req, runtime)
3847
+ )
3848
+
3849
+ async def describe_cluster_backup_list_with_options_async(
3850
+ self,
3851
+ request: r_kvstore_20150101_models.DescribeClusterBackupListRequest,
3852
+ runtime: util_models.RuntimeOptions,
3853
+ ) -> r_kvstore_20150101_models.DescribeClusterBackupListResponse:
3854
+ UtilClient.validate_model(request)
3855
+ query = OpenApiUtilClient.query(UtilClient.to_map(request))
3856
+ req = open_api_models.OpenApiRequest(
3857
+ query=OpenApiUtilClient.query(query)
3858
+ )
3859
+ params = open_api_models.Params(
3860
+ action='DescribeClusterBackupList',
3861
+ version='2015-01-01',
3862
+ protocol='HTTPS',
3863
+ pathname='/',
3864
+ method='GET',
3865
+ auth_type='AK',
3866
+ style='RPC',
3867
+ req_body_type='formData',
3868
+ body_type='json'
3869
+ )
3870
+ return TeaCore.from_map(
3871
+ r_kvstore_20150101_models.DescribeClusterBackupListResponse(),
3872
+ await self.call_api_async(params, req, runtime)
3873
+ )
3874
+
3875
+ def describe_cluster_backup_list(
3876
+ self,
3877
+ request: r_kvstore_20150101_models.DescribeClusterBackupListRequest,
3878
+ ) -> r_kvstore_20150101_models.DescribeClusterBackupListResponse:
3879
+ runtime = util_models.RuntimeOptions()
3880
+ return self.describe_cluster_backup_list_with_options(request, runtime)
3881
+
3882
+ async def describe_cluster_backup_list_async(
3883
+ self,
3884
+ request: r_kvstore_20150101_models.DescribeClusterBackupListRequest,
3885
+ ) -> r_kvstore_20150101_models.DescribeClusterBackupListResponse:
3886
+ runtime = util_models.RuntimeOptions()
3887
+ return await self.describe_cluster_backup_list_with_options_async(request, runtime)
3888
+
3811
3889
  def describe_cluster_member_info_with_options(
3812
3890
  self,
3813
3891
  request: r_kvstore_20150101_models.DescribeClusterMemberInfoRequest,
@@ -8482,7 +8560,7 @@ class Client(OpenApiClient):
8482
8560
  runtime: util_models.RuntimeOptions,
8483
8561
  ) -> r_kvstore_20150101_models.InitializeKvstorePermissionResponse:
8484
8562
  """
8485
- The log management feature of ApsaraDB for Redis requires the resources of [Log Service](~~48869~~). To use the log management feature of ApsaraDB for Redis, you can call this operation to associate the RAM role named AliyunServiceRoleForKvstore with the ApsaraDB for Redis instance. For more information, see [Associated RAM roles of ApsaraDB for Redis](~~184337~~).
8563
+ The log management feature of ApsaraDB for Redis requires the resources of [Log Service](~~48869~~). To use the log management feature of ApsaraDB for Redis, you can call this operation to associate the RAM role named AliyunServiceRoleForKvstore with the ApsaraDB for Redis instance. For more information, see [Associated RAM roles of ApsaraDB for Redis] (~~184337~~).
8486
8564
 
8487
8565
  @param request: InitializeKvstorePermissionRequest
8488
8566
  @param runtime: runtime options for this request RuntimeOptions
@@ -8527,7 +8605,7 @@ class Client(OpenApiClient):
8527
8605
  runtime: util_models.RuntimeOptions,
8528
8606
  ) -> r_kvstore_20150101_models.InitializeKvstorePermissionResponse:
8529
8607
  """
8530
- The log management feature of ApsaraDB for Redis requires the resources of [Log Service](~~48869~~). To use the log management feature of ApsaraDB for Redis, you can call this operation to associate the RAM role named AliyunServiceRoleForKvstore with the ApsaraDB for Redis instance. For more information, see [Associated RAM roles of ApsaraDB for Redis](~~184337~~).
8608
+ The log management feature of ApsaraDB for Redis requires the resources of [Log Service](~~48869~~). To use the log management feature of ApsaraDB for Redis, you can call this operation to associate the RAM role named AliyunServiceRoleForKvstore with the ApsaraDB for Redis instance. For more information, see [Associated RAM roles of ApsaraDB for Redis] (~~184337~~).
8531
8609
 
8532
8610
  @param request: InitializeKvstorePermissionRequest
8533
8611
  @param runtime: runtime options for this request RuntimeOptions
@@ -8571,7 +8649,7 @@ class Client(OpenApiClient):
8571
8649
  request: r_kvstore_20150101_models.InitializeKvstorePermissionRequest,
8572
8650
  ) -> r_kvstore_20150101_models.InitializeKvstorePermissionResponse:
8573
8651
  """
8574
- The log management feature of ApsaraDB for Redis requires the resources of [Log Service](~~48869~~). To use the log management feature of ApsaraDB for Redis, you can call this operation to associate the RAM role named AliyunServiceRoleForKvstore with the ApsaraDB for Redis instance. For more information, see [Associated RAM roles of ApsaraDB for Redis](~~184337~~).
8652
+ The log management feature of ApsaraDB for Redis requires the resources of [Log Service](~~48869~~). To use the log management feature of ApsaraDB for Redis, you can call this operation to associate the RAM role named AliyunServiceRoleForKvstore with the ApsaraDB for Redis instance. For more information, see [Associated RAM roles of ApsaraDB for Redis] (~~184337~~).
8575
8653
 
8576
8654
  @param request: InitializeKvstorePermissionRequest
8577
8655
  @return: InitializeKvstorePermissionResponse
@@ -8584,7 +8662,7 @@ class Client(OpenApiClient):
8584
8662
  request: r_kvstore_20150101_models.InitializeKvstorePermissionRequest,
8585
8663
  ) -> r_kvstore_20150101_models.InitializeKvstorePermissionResponse:
8586
8664
  """
8587
- The log management feature of ApsaraDB for Redis requires the resources of [Log Service](~~48869~~). To use the log management feature of ApsaraDB for Redis, you can call this operation to associate the RAM role named AliyunServiceRoleForKvstore with the ApsaraDB for Redis instance. For more information, see [Associated RAM roles of ApsaraDB for Redis](~~184337~~).
8665
+ The log management feature of ApsaraDB for Redis requires the resources of [Log Service](~~48869~~). To use the log management feature of ApsaraDB for Redis, you can call this operation to associate the RAM role named AliyunServiceRoleForKvstore with the ApsaraDB for Redis instance. For more information, see [Associated RAM roles of ApsaraDB for Redis] (~~184337~~).
8588
8666
 
8589
8667
  @param request: InitializeKvstorePermissionRequest
8590
8668
  @return: InitializeKvstorePermissionResponse
@@ -11750,7 +11828,7 @@ class Client(OpenApiClient):
11750
11828
  runtime: util_models.RuntimeOptions,
11751
11829
  ) -> r_kvstore_20150101_models.ModifySecurityGroupConfigurationResponse:
11752
11830
  """
11753
- > After you call this operation, the security groups that are added to the whitelists of the ApsaraDB for Redis instance are deleted, and the security group specified by the *SecurityGroupId** parameter are added to the whitelists. For more information about how to reset security groups in the ApsaraDB for Redis console, see [Add security groups](~~148267~~).
11831
+ > After you call this operation, the security groups that are added to the whitelists of the ApsaraDB for Redis instance are deleted, and the security group specified by the *SecurityGroupId** parameter is added to the whitelists. For more information about how to reset security groups in the ApsaraDB for Redis console, see [Add security groups](~~148267~~).
11754
11832
 
11755
11833
  @param request: ModifySecurityGroupConfigurationRequest
11756
11834
  @param runtime: runtime options for this request RuntimeOptions
@@ -11797,7 +11875,7 @@ class Client(OpenApiClient):
11797
11875
  runtime: util_models.RuntimeOptions,
11798
11876
  ) -> r_kvstore_20150101_models.ModifySecurityGroupConfigurationResponse:
11799
11877
  """
11800
- > After you call this operation, the security groups that are added to the whitelists of the ApsaraDB for Redis instance are deleted, and the security group specified by the *SecurityGroupId** parameter are added to the whitelists. For more information about how to reset security groups in the ApsaraDB for Redis console, see [Add security groups](~~148267~~).
11878
+ > After you call this operation, the security groups that are added to the whitelists of the ApsaraDB for Redis instance are deleted, and the security group specified by the *SecurityGroupId** parameter is added to the whitelists. For more information about how to reset security groups in the ApsaraDB for Redis console, see [Add security groups](~~148267~~).
11801
11879
 
11802
11880
  @param request: ModifySecurityGroupConfigurationRequest
11803
11881
  @param runtime: runtime options for this request RuntimeOptions
@@ -11843,7 +11921,7 @@ class Client(OpenApiClient):
11843
11921
  request: r_kvstore_20150101_models.ModifySecurityGroupConfigurationRequest,
11844
11922
  ) -> r_kvstore_20150101_models.ModifySecurityGroupConfigurationResponse:
11845
11923
  """
11846
- > After you call this operation, the security groups that are added to the whitelists of the ApsaraDB for Redis instance are deleted, and the security group specified by the *SecurityGroupId** parameter are added to the whitelists. For more information about how to reset security groups in the ApsaraDB for Redis console, see [Add security groups](~~148267~~).
11924
+ > After you call this operation, the security groups that are added to the whitelists of the ApsaraDB for Redis instance are deleted, and the security group specified by the *SecurityGroupId** parameter is added to the whitelists. For more information about how to reset security groups in the ApsaraDB for Redis console, see [Add security groups](~~148267~~).
11847
11925
 
11848
11926
  @param request: ModifySecurityGroupConfigurationRequest
11849
11927
  @return: ModifySecurityGroupConfigurationResponse
@@ -11856,7 +11934,7 @@ class Client(OpenApiClient):
11856
11934
  request: r_kvstore_20150101_models.ModifySecurityGroupConfigurationRequest,
11857
11935
  ) -> r_kvstore_20150101_models.ModifySecurityGroupConfigurationResponse:
11858
11936
  """
11859
- > After you call this operation, the security groups that are added to the whitelists of the ApsaraDB for Redis instance are deleted, and the security group specified by the *SecurityGroupId** parameter are added to the whitelists. For more information about how to reset security groups in the ApsaraDB for Redis console, see [Add security groups](~~148267~~).
11937
+ > After you call this operation, the security groups that are added to the whitelists of the ApsaraDB for Redis instance are deleted, and the security group specified by the *SecurityGroupId** parameter is added to the whitelists. For more information about how to reset security groups in the ApsaraDB for Redis console, see [Add security groups](~~148267~~).
11860
11938
 
11861
11939
  @param request: ModifySecurityGroupConfigurationRequest
11862
11940
  @return: ModifySecurityGroupConfigurationResponse
@@ -32,6 +32,10 @@ class AddShardingNodeRequest(TeaModel):
32
32
  self.business_info = business_info
33
33
  # The ID of the coupon.
34
34
  self.coupon_no = coupon_no
35
+ # Specifies whether to enable forced transmission during a configuration change. Valid values:
36
+ #
37
+ # * **false** (default): Before the configuration change, the system checks the minor version of the instance. If the minor version of the instance is outdated, an error is reported. You must update the minor version of the instance and try again.
38
+ # * **true**: The system skips the version check and directly performs the configuration change.
35
39
  self.force_trans = force_trans
36
40
  # The ID of the instance.
37
41
  self.instance_id = instance_id
@@ -42,12 +46,11 @@ class AddShardingNodeRequest(TeaModel):
42
46
  self.security_token = security_token
43
47
  # The number of data shards that you want to add. Default value: **1**.
44
48
  #
45
- # >
46
- #
47
- # * A cluster instance must contain 2 to 256 data shards. You can add a maximum of 64 data shards at a time.
49
+ # > The instance can contain 2 to 256 data shards. You can add up to 64 data shards at a time. Make sure that the number of shards does not exceed this limit.
48
50
  self.shard_count = shard_count
49
51
  # The source of the operation. This parameter is used only for internal maintenance. You do not need to specify this parameter.
50
52
  self.source_biz = source_biz
53
+ # The vSwitch ID. You can specify a different vSwitch within the same virtual private cloud (VPC). In this case, the new data shards are created in the specified vSwitch. If you do not specify this parameter, the new data shards are created in the original vSwitch.
51
54
  self.v_switch_id = v_switch_id
52
55
 
53
56
  def validate(self):
@@ -1527,6 +1530,7 @@ class CreateInstanceRequest(TeaModel):
1527
1530
  business_info: str = None,
1528
1531
  capacity: int = None,
1529
1532
  charge_type: str = None,
1533
+ cluster_backup_id: str = None,
1530
1534
  connection_string_prefix: str = None,
1531
1535
  coupon_no: str = None,
1532
1536
  dedicated_host_group_id: str = None,
@@ -1601,6 +1605,7 @@ class CreateInstanceRequest(TeaModel):
1601
1605
  # * **PrePaid**: subscription
1602
1606
  # * **PostPaid**: pay-as-you-go
1603
1607
  self.charge_type = charge_type
1608
+ self.cluster_backup_id = cluster_backup_id
1604
1609
  # The operation that you want to perform. Set the value to **AllocateInstancePublicConnection**.
1605
1610
  self.connection_string_prefix = connection_string_prefix
1606
1611
  # The coupon code. Default value: `youhuiquan_promotion_option_id_for_blank`.
@@ -1726,6 +1731,8 @@ class CreateInstanceRequest(TeaModel):
1726
1731
  result['Capacity'] = self.capacity
1727
1732
  if self.charge_type is not None:
1728
1733
  result['ChargeType'] = self.charge_type
1734
+ if self.cluster_backup_id is not None:
1735
+ result['ClusterBackupId'] = self.cluster_backup_id
1729
1736
  if self.connection_string_prefix is not None:
1730
1737
  result['ConnectionStringPrefix'] = self.connection_string_prefix
1731
1738
  if self.coupon_no is not None:
@@ -1818,6 +1825,8 @@ class CreateInstanceRequest(TeaModel):
1818
1825
  self.capacity = m.get('Capacity')
1819
1826
  if m.get('ChargeType') is not None:
1820
1827
  self.charge_type = m.get('ChargeType')
1828
+ if m.get('ClusterBackupId') is not None:
1829
+ self.cluster_backup_id = m.get('ClusterBackupId')
1821
1830
  if m.get('ConnectionStringPrefix') is not None:
1822
1831
  self.connection_string_prefix = m.get('ConnectionStringPrefix')
1823
1832
  if m.get('CouponNo') is not None:
@@ -2424,6 +2433,7 @@ class CreateTairInstanceRequest(TeaModel):
2424
2433
  business_info: str = None,
2425
2434
  charge_type: str = None,
2426
2435
  client_token: str = None,
2436
+ cluster_backup_id: str = None,
2427
2437
  coupon_no: str = None,
2428
2438
  dry_run: bool = None,
2429
2439
  engine_version: str = None,
@@ -2485,6 +2495,7 @@ class CreateTairInstanceRequest(TeaModel):
2485
2495
  self.charge_type = charge_type
2486
2496
  # The client token that is used to ensure the idempotence of the request. You can use the client to generate the value, but you must make sure that the token is unique among different requests. The token is case-sensitive. The token can contain only ASCII characters and cannot exceed 64 characters in length.
2487
2497
  self.client_token = client_token
2498
+ self.cluster_backup_id = cluster_backup_id
2488
2499
  # The coupon code.
2489
2500
  self.coupon_no = coupon_no
2490
2501
  # Specifies whether to perform a dry run. Default value: false. Valid values:
@@ -2616,6 +2627,8 @@ class CreateTairInstanceRequest(TeaModel):
2616
2627
  result['ChargeType'] = self.charge_type
2617
2628
  if self.client_token is not None:
2618
2629
  result['ClientToken'] = self.client_token
2630
+ if self.cluster_backup_id is not None:
2631
+ result['ClusterBackupId'] = self.cluster_backup_id
2619
2632
  if self.coupon_no is not None:
2620
2633
  result['CouponNo'] = self.coupon_no
2621
2634
  if self.dry_run is not None:
@@ -2700,6 +2713,8 @@ class CreateTairInstanceRequest(TeaModel):
2700
2713
  self.charge_type = m.get('ChargeType')
2701
2714
  if m.get('ClientToken') is not None:
2702
2715
  self.client_token = m.get('ClientToken')
2716
+ if m.get('ClusterBackupId') is not None:
2717
+ self.cluster_backup_id = m.get('ClusterBackupId')
2703
2718
  if m.get('CouponNo') is not None:
2704
2719
  self.coupon_no = m.get('CouponNo')
2705
2720
  if m.get('DryRun') is not None:
@@ -3387,6 +3402,10 @@ class DeleteShardingNodeRequest(TeaModel):
3387
3402
  security_token: str = None,
3388
3403
  shard_count: int = None,
3389
3404
  ):
3405
+ # Specifies whether to enable forced transmission during a configuration change. Valid values:
3406
+ #
3407
+ # * **false** (default): Before the configuration change, the system checks the minor version of the instance. If the minor version of the instance is outdated, an error is reported. You must update the minor version of the instance and try again.
3408
+ # * **true**: The system skips the version check and directly performs the configuration change.
3390
3409
  self.force_trans = force_trans
3391
3410
  # The ID of the instance.
3392
3411
  self.instance_id = instance_id
@@ -6213,6 +6232,7 @@ class DescribeBackupsRequest(TeaModel):
6213
6232
  def __init__(
6214
6233
  self,
6215
6234
  backup_id: int = None,
6235
+ backup_job_id: int = None,
6216
6236
  end_time: str = None,
6217
6237
  instance_id: str = None,
6218
6238
  need_aof: str = None,
@@ -6227,6 +6247,7 @@ class DescribeBackupsRequest(TeaModel):
6227
6247
  ):
6228
6248
  # The ID of the backup file.
6229
6249
  self.backup_id = backup_id
6250
+ self.backup_job_id = backup_job_id
6230
6251
  # The end of the time range to query. Specify the time in the *yyyy-MM-dd*T*HH:mm*Z format. The time must be in UTC. The end time must be later than the start time.
6231
6252
  self.end_time = end_time
6232
6253
  # The ID of the instance whose backup files you want to query.
@@ -6261,6 +6282,8 @@ class DescribeBackupsRequest(TeaModel):
6261
6282
  result = dict()
6262
6283
  if self.backup_id is not None:
6263
6284
  result['BackupId'] = self.backup_id
6285
+ if self.backup_job_id is not None:
6286
+ result['BackupJobId'] = self.backup_job_id
6264
6287
  if self.end_time is not None:
6265
6288
  result['EndTime'] = self.end_time
6266
6289
  if self.instance_id is not None:
@@ -6289,6 +6312,8 @@ class DescribeBackupsRequest(TeaModel):
6289
6312
  m = m or dict()
6290
6313
  if m.get('BackupId') is not None:
6291
6314
  self.backup_id = m.get('BackupId')
6315
+ if m.get('BackupJobId') is not None:
6316
+ self.backup_job_id = m.get('BackupJobId')
6292
6317
  if m.get('EndTime') is not None:
6293
6318
  self.end_time = m.get('EndTime')
6294
6319
  if m.get('InstanceId') is not None:
@@ -7212,6 +7237,413 @@ class DescribeCacheAnalysisReportListResponse(TeaModel):
7212
7237
  return self
7213
7238
 
7214
7239
 
7240
+ class DescribeClusterBackupListRequest(TeaModel):
7241
+ def __init__(
7242
+ self,
7243
+ cluster_backup_id: str = None,
7244
+ end_time: str = None,
7245
+ instance_id: str = None,
7246
+ owner_account: str = None,
7247
+ owner_id: int = None,
7248
+ page_number: int = None,
7249
+ page_size: int = None,
7250
+ region_id: str = None,
7251
+ resource_owner_account: str = None,
7252
+ resource_owner_id: int = None,
7253
+ security_token: str = None,
7254
+ start_time: str = None,
7255
+ ):
7256
+ self.cluster_backup_id = cluster_backup_id
7257
+ self.end_time = end_time
7258
+ self.instance_id = instance_id
7259
+ self.owner_account = owner_account
7260
+ self.owner_id = owner_id
7261
+ self.page_number = page_number
7262
+ self.page_size = page_size
7263
+ self.region_id = region_id
7264
+ self.resource_owner_account = resource_owner_account
7265
+ self.resource_owner_id = resource_owner_id
7266
+ self.security_token = security_token
7267
+ self.start_time = start_time
7268
+
7269
+ def validate(self):
7270
+ pass
7271
+
7272
+ def to_map(self):
7273
+ _map = super().to_map()
7274
+ if _map is not None:
7275
+ return _map
7276
+
7277
+ result = dict()
7278
+ if self.cluster_backup_id is not None:
7279
+ result['ClusterBackupId'] = self.cluster_backup_id
7280
+ if self.end_time is not None:
7281
+ result['EndTime'] = self.end_time
7282
+ if self.instance_id is not None:
7283
+ result['InstanceId'] = self.instance_id
7284
+ if self.owner_account is not None:
7285
+ result['OwnerAccount'] = self.owner_account
7286
+ if self.owner_id is not None:
7287
+ result['OwnerId'] = self.owner_id
7288
+ if self.page_number is not None:
7289
+ result['PageNumber'] = self.page_number
7290
+ if self.page_size is not None:
7291
+ result['PageSize'] = self.page_size
7292
+ if self.region_id is not None:
7293
+ result['RegionId'] = self.region_id
7294
+ if self.resource_owner_account is not None:
7295
+ result['ResourceOwnerAccount'] = self.resource_owner_account
7296
+ if self.resource_owner_id is not None:
7297
+ result['ResourceOwnerId'] = self.resource_owner_id
7298
+ if self.security_token is not None:
7299
+ result['SecurityToken'] = self.security_token
7300
+ if self.start_time is not None:
7301
+ result['StartTime'] = self.start_time
7302
+ return result
7303
+
7304
+ def from_map(self, m: dict = None):
7305
+ m = m or dict()
7306
+ if m.get('ClusterBackupId') is not None:
7307
+ self.cluster_backup_id = m.get('ClusterBackupId')
7308
+ if m.get('EndTime') is not None:
7309
+ self.end_time = m.get('EndTime')
7310
+ if m.get('InstanceId') is not None:
7311
+ self.instance_id = m.get('InstanceId')
7312
+ if m.get('OwnerAccount') is not None:
7313
+ self.owner_account = m.get('OwnerAccount')
7314
+ if m.get('OwnerId') is not None:
7315
+ self.owner_id = m.get('OwnerId')
7316
+ if m.get('PageNumber') is not None:
7317
+ self.page_number = m.get('PageNumber')
7318
+ if m.get('PageSize') is not None:
7319
+ self.page_size = m.get('PageSize')
7320
+ if m.get('RegionId') is not None:
7321
+ self.region_id = m.get('RegionId')
7322
+ if m.get('ResourceOwnerAccount') is not None:
7323
+ self.resource_owner_account = m.get('ResourceOwnerAccount')
7324
+ if m.get('ResourceOwnerId') is not None:
7325
+ self.resource_owner_id = m.get('ResourceOwnerId')
7326
+ if m.get('SecurityToken') is not None:
7327
+ self.security_token = m.get('SecurityToken')
7328
+ if m.get('StartTime') is not None:
7329
+ self.start_time = m.get('StartTime')
7330
+ return self
7331
+
7332
+
7333
+ class DescribeClusterBackupListResponseBodyClusterBackupsBackupsExtraInfo(TeaModel):
7334
+ def __init__(
7335
+ self,
7336
+ custins_db_version: str = None,
7337
+ ):
7338
+ self.custins_db_version = custins_db_version
7339
+
7340
+ def validate(self):
7341
+ pass
7342
+
7343
+ def to_map(self):
7344
+ _map = super().to_map()
7345
+ if _map is not None:
7346
+ return _map
7347
+
7348
+ result = dict()
7349
+ if self.custins_db_version is not None:
7350
+ result['CustinsDbVersion'] = self.custins_db_version
7351
+ return result
7352
+
7353
+ def from_map(self, m: dict = None):
7354
+ m = m or dict()
7355
+ if m.get('CustinsDbVersion') is not None:
7356
+ self.custins_db_version = m.get('CustinsDbVersion')
7357
+ return self
7358
+
7359
+
7360
+ class DescribeClusterBackupListResponseBodyClusterBackupsBackups(TeaModel):
7361
+ def __init__(
7362
+ self,
7363
+ backup_download_url: str = None,
7364
+ backup_end_time: str = None,
7365
+ backup_id: str = None,
7366
+ backup_intranet_download_url: str = None,
7367
+ backup_name: str = None,
7368
+ backup_size: str = None,
7369
+ backup_start_time: str = None,
7370
+ backup_status: str = None,
7371
+ engine: str = None,
7372
+ extra_info: DescribeClusterBackupListResponseBodyClusterBackupsBackupsExtraInfo = None,
7373
+ instance_name: str = None,
7374
+ is_avail: str = None,
7375
+ ):
7376
+ self.backup_download_url = backup_download_url
7377
+ self.backup_end_time = backup_end_time
7378
+ self.backup_id = backup_id
7379
+ self.backup_intranet_download_url = backup_intranet_download_url
7380
+ self.backup_name = backup_name
7381
+ self.backup_size = backup_size
7382
+ self.backup_start_time = backup_start_time
7383
+ self.backup_status = backup_status
7384
+ self.engine = engine
7385
+ self.extra_info = extra_info
7386
+ self.instance_name = instance_name
7387
+ self.is_avail = is_avail
7388
+
7389
+ def validate(self):
7390
+ if self.extra_info:
7391
+ self.extra_info.validate()
7392
+
7393
+ def to_map(self):
7394
+ _map = super().to_map()
7395
+ if _map is not None:
7396
+ return _map
7397
+
7398
+ result = dict()
7399
+ if self.backup_download_url is not None:
7400
+ result['BackupDownloadURL'] = self.backup_download_url
7401
+ if self.backup_end_time is not None:
7402
+ result['BackupEndTime'] = self.backup_end_time
7403
+ if self.backup_id is not None:
7404
+ result['BackupId'] = self.backup_id
7405
+ if self.backup_intranet_download_url is not None:
7406
+ result['BackupIntranetDownloadURL'] = self.backup_intranet_download_url
7407
+ if self.backup_name is not None:
7408
+ result['BackupName'] = self.backup_name
7409
+ if self.backup_size is not None:
7410
+ result['BackupSize'] = self.backup_size
7411
+ if self.backup_start_time is not None:
7412
+ result['BackupStartTime'] = self.backup_start_time
7413
+ if self.backup_status is not None:
7414
+ result['BackupStatus'] = self.backup_status
7415
+ if self.engine is not None:
7416
+ result['Engine'] = self.engine
7417
+ if self.extra_info is not None:
7418
+ result['ExtraInfo'] = self.extra_info.to_map()
7419
+ if self.instance_name is not None:
7420
+ result['InstanceName'] = self.instance_name
7421
+ if self.is_avail is not None:
7422
+ result['IsAvail'] = self.is_avail
7423
+ return result
7424
+
7425
+ def from_map(self, m: dict = None):
7426
+ m = m or dict()
7427
+ if m.get('BackupDownloadURL') is not None:
7428
+ self.backup_download_url = m.get('BackupDownloadURL')
7429
+ if m.get('BackupEndTime') is not None:
7430
+ self.backup_end_time = m.get('BackupEndTime')
7431
+ if m.get('BackupId') is not None:
7432
+ self.backup_id = m.get('BackupId')
7433
+ if m.get('BackupIntranetDownloadURL') is not None:
7434
+ self.backup_intranet_download_url = m.get('BackupIntranetDownloadURL')
7435
+ if m.get('BackupName') is not None:
7436
+ self.backup_name = m.get('BackupName')
7437
+ if m.get('BackupSize') is not None:
7438
+ self.backup_size = m.get('BackupSize')
7439
+ if m.get('BackupStartTime') is not None:
7440
+ self.backup_start_time = m.get('BackupStartTime')
7441
+ if m.get('BackupStatus') is not None:
7442
+ self.backup_status = m.get('BackupStatus')
7443
+ if m.get('Engine') is not None:
7444
+ self.engine = m.get('Engine')
7445
+ if m.get('ExtraInfo') is not None:
7446
+ temp_model = DescribeClusterBackupListResponseBodyClusterBackupsBackupsExtraInfo()
7447
+ self.extra_info = temp_model.from_map(m['ExtraInfo'])
7448
+ if m.get('InstanceName') is not None:
7449
+ self.instance_name = m.get('InstanceName')
7450
+ if m.get('IsAvail') is not None:
7451
+ self.is_avail = m.get('IsAvail')
7452
+ return self
7453
+
7454
+
7455
+ class DescribeClusterBackupListResponseBodyClusterBackups(TeaModel):
7456
+ def __init__(
7457
+ self,
7458
+ backups: List[DescribeClusterBackupListResponseBodyClusterBackupsBackups] = None,
7459
+ cluster_backup_end_time: str = None,
7460
+ cluster_backup_id: str = None,
7461
+ cluster_backup_mode: str = None,
7462
+ cluster_backup_size: str = None,
7463
+ cluster_backup_start_time: str = None,
7464
+ cluster_backup_status: str = None,
7465
+ is_avail: int = None,
7466
+ progress: str = None,
7467
+ shard_class_memory: int = None,
7468
+ ):
7469
+ self.backups = backups
7470
+ self.cluster_backup_end_time = cluster_backup_end_time
7471
+ self.cluster_backup_id = cluster_backup_id
7472
+ self.cluster_backup_mode = cluster_backup_mode
7473
+ self.cluster_backup_size = cluster_backup_size
7474
+ self.cluster_backup_start_time = cluster_backup_start_time
7475
+ self.cluster_backup_status = cluster_backup_status
7476
+ self.is_avail = is_avail
7477
+ self.progress = progress
7478
+ self.shard_class_memory = shard_class_memory
7479
+
7480
+ def validate(self):
7481
+ if self.backups:
7482
+ for k in self.backups:
7483
+ if k:
7484
+ k.validate()
7485
+
7486
+ def to_map(self):
7487
+ _map = super().to_map()
7488
+ if _map is not None:
7489
+ return _map
7490
+
7491
+ result = dict()
7492
+ result['Backups'] = []
7493
+ if self.backups is not None:
7494
+ for k in self.backups:
7495
+ result['Backups'].append(k.to_map() if k else None)
7496
+ if self.cluster_backup_end_time is not None:
7497
+ result['ClusterBackupEndTime'] = self.cluster_backup_end_time
7498
+ if self.cluster_backup_id is not None:
7499
+ result['ClusterBackupId'] = self.cluster_backup_id
7500
+ if self.cluster_backup_mode is not None:
7501
+ result['ClusterBackupMode'] = self.cluster_backup_mode
7502
+ if self.cluster_backup_size is not None:
7503
+ result['ClusterBackupSize'] = self.cluster_backup_size
7504
+ if self.cluster_backup_start_time is not None:
7505
+ result['ClusterBackupStartTime'] = self.cluster_backup_start_time
7506
+ if self.cluster_backup_status is not None:
7507
+ result['ClusterBackupStatus'] = self.cluster_backup_status
7508
+ if self.is_avail is not None:
7509
+ result['IsAvail'] = self.is_avail
7510
+ if self.progress is not None:
7511
+ result['Progress'] = self.progress
7512
+ if self.shard_class_memory is not None:
7513
+ result['ShardClassMemory'] = self.shard_class_memory
7514
+ return result
7515
+
7516
+ def from_map(self, m: dict = None):
7517
+ m = m or dict()
7518
+ self.backups = []
7519
+ if m.get('Backups') is not None:
7520
+ for k in m.get('Backups'):
7521
+ temp_model = DescribeClusterBackupListResponseBodyClusterBackupsBackups()
7522
+ self.backups.append(temp_model.from_map(k))
7523
+ if m.get('ClusterBackupEndTime') is not None:
7524
+ self.cluster_backup_end_time = m.get('ClusterBackupEndTime')
7525
+ if m.get('ClusterBackupId') is not None:
7526
+ self.cluster_backup_id = m.get('ClusterBackupId')
7527
+ if m.get('ClusterBackupMode') is not None:
7528
+ self.cluster_backup_mode = m.get('ClusterBackupMode')
7529
+ if m.get('ClusterBackupSize') is not None:
7530
+ self.cluster_backup_size = m.get('ClusterBackupSize')
7531
+ if m.get('ClusterBackupStartTime') is not None:
7532
+ self.cluster_backup_start_time = m.get('ClusterBackupStartTime')
7533
+ if m.get('ClusterBackupStatus') is not None:
7534
+ self.cluster_backup_status = m.get('ClusterBackupStatus')
7535
+ if m.get('IsAvail') is not None:
7536
+ self.is_avail = m.get('IsAvail')
7537
+ if m.get('Progress') is not None:
7538
+ self.progress = m.get('Progress')
7539
+ if m.get('ShardClassMemory') is not None:
7540
+ self.shard_class_memory = m.get('ShardClassMemory')
7541
+ return self
7542
+
7543
+
7544
+ class DescribeClusterBackupListResponseBody(TeaModel):
7545
+ def __init__(
7546
+ self,
7547
+ cluster_backups: List[DescribeClusterBackupListResponseBodyClusterBackups] = None,
7548
+ max_results: int = None,
7549
+ page_number: int = None,
7550
+ page_size: int = None,
7551
+ request_id: str = None,
7552
+ ):
7553
+ self.cluster_backups = cluster_backups
7554
+ self.max_results = max_results
7555
+ self.page_number = page_number
7556
+ self.page_size = page_size
7557
+ self.request_id = request_id
7558
+
7559
+ def validate(self):
7560
+ if self.cluster_backups:
7561
+ for k in self.cluster_backups:
7562
+ if k:
7563
+ k.validate()
7564
+
7565
+ def to_map(self):
7566
+ _map = super().to_map()
7567
+ if _map is not None:
7568
+ return _map
7569
+
7570
+ result = dict()
7571
+ result['ClusterBackups'] = []
7572
+ if self.cluster_backups is not None:
7573
+ for k in self.cluster_backups:
7574
+ result['ClusterBackups'].append(k.to_map() if k else None)
7575
+ if self.max_results is not None:
7576
+ result['MaxResults'] = self.max_results
7577
+ if self.page_number is not None:
7578
+ result['PageNumber'] = self.page_number
7579
+ if self.page_size is not None:
7580
+ result['PageSize'] = self.page_size
7581
+ if self.request_id is not None:
7582
+ result['RequestId'] = self.request_id
7583
+ return result
7584
+
7585
+ def from_map(self, m: dict = None):
7586
+ m = m or dict()
7587
+ self.cluster_backups = []
7588
+ if m.get('ClusterBackups') is not None:
7589
+ for k in m.get('ClusterBackups'):
7590
+ temp_model = DescribeClusterBackupListResponseBodyClusterBackups()
7591
+ self.cluster_backups.append(temp_model.from_map(k))
7592
+ if m.get('MaxResults') is not None:
7593
+ self.max_results = m.get('MaxResults')
7594
+ if m.get('PageNumber') is not None:
7595
+ self.page_number = m.get('PageNumber')
7596
+ if m.get('PageSize') is not None:
7597
+ self.page_size = m.get('PageSize')
7598
+ if m.get('RequestId') is not None:
7599
+ self.request_id = m.get('RequestId')
7600
+ return self
7601
+
7602
+
7603
+ class DescribeClusterBackupListResponse(TeaModel):
7604
+ def __init__(
7605
+ self,
7606
+ headers: Dict[str, str] = None,
7607
+ status_code: int = None,
7608
+ body: DescribeClusterBackupListResponseBody = None,
7609
+ ):
7610
+ self.headers = headers
7611
+ self.status_code = status_code
7612
+ self.body = body
7613
+
7614
+ def validate(self):
7615
+ self.validate_required(self.headers, 'headers')
7616
+ self.validate_required(self.status_code, 'status_code')
7617
+ self.validate_required(self.body, 'body')
7618
+ if self.body:
7619
+ self.body.validate()
7620
+
7621
+ def to_map(self):
7622
+ _map = super().to_map()
7623
+ if _map is not None:
7624
+ return _map
7625
+
7626
+ result = dict()
7627
+ if self.headers is not None:
7628
+ result['headers'] = self.headers
7629
+ if self.status_code is not None:
7630
+ result['statusCode'] = self.status_code
7631
+ if self.body is not None:
7632
+ result['body'] = self.body.to_map()
7633
+ return result
7634
+
7635
+ def from_map(self, m: dict = None):
7636
+ m = m or dict()
7637
+ if m.get('headers') is not None:
7638
+ self.headers = m.get('headers')
7639
+ if m.get('statusCode') is not None:
7640
+ self.status_code = m.get('statusCode')
7641
+ if m.get('body') is not None:
7642
+ temp_model = DescribeClusterBackupListResponseBody()
7643
+ self.body = temp_model.from_map(m['body'])
7644
+ return self
7645
+
7646
+
7215
7647
  class DescribeClusterMemberInfoRequest(TeaModel):
7216
7648
  def __init__(
7217
7649
  self,
@@ -13765,6 +14197,9 @@ class DescribeIntranetAttributeResponseBody(TeaModel):
13765
14197
  #
13766
14198
  # > If no extra internal bandwidth is purchased, this parameter is not returned.
13767
14199
  self.bandwidth_expire_time = bandwidth_expire_time
14200
+ # The billing methods of unexpired bandwith plans. Valid values:
14201
+ # - **0**: Pay-as-you-go
14202
+ # - **1**: Subscription
13768
14203
  self.bandwidth_pre_paid = bandwidth_pre_paid
13769
14204
  # The time when the extra internal bandwidth that you purchased for temporary use expires. The time follows the ISO 8601 standard in the *yyyy-MM-dd*T*HH:mm:ss*Z format. The time is displayed in UTC.
13770
14205
  #
@@ -15690,6 +16125,7 @@ class DescribePriceResponseBodyOrder(TeaModel):
15690
16125
  self.original_amount = original_amount
15691
16126
  # Details about promotion rule IDs.
15692
16127
  self.rule_ids = rule_ids
16128
+ # Indicates whether the discount information is displayed.
15693
16129
  self.show_discount_info = show_discount_info
15694
16130
  # The transaction price of the order.
15695
16131
  self.trade_amount = trade_amount
@@ -15864,9 +16300,9 @@ class DescribePriceResponseBodySubOrdersSubOrder(TeaModel):
15864
16300
  self.discount_amount = discount_amount
15865
16301
  # The instance ID.
15866
16302
  self.instance_id = instance_id
15867
- # The list price of the order.
16303
+ # The original price of the order.
15868
16304
  self.original_amount = original_amount
15869
- # The ID of the promotion rule.
16305
+ # The rule IDs.
15870
16306
  self.rule_ids = rule_ids
15871
16307
  # The final price of the order.
15872
16308
  self.trade_amount = trade_amount
@@ -18343,6 +18779,7 @@ class DescribeZonesResponseBody(TeaModel):
18343
18779
  request_id: str = None,
18344
18780
  zones: DescribeZonesResponseBodyZones = None,
18345
18781
  ):
18782
+ # The ID of the request.
18346
18783
  self.request_id = request_id
18347
18784
  # The queried zones.
18348
18785
  self.zones = zones
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud-r-kvstore20150101
3
- Version: 2.24.1
3
+ Version: 2.25.1
4
4
  Summary: Alibaba Cloud R-kvstore (20150101) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -24,7 +24,7 @@ from setuptools import setup, find_packages
24
24
  """
25
25
  setup module for alibabacloud_r-kvstore20150101.
26
26
 
27
- Created on 27/12/2023
27
+ Created on 18/01/2024
28
28
 
29
29
  @author: Alibaba Cloud SDK
30
30
  """