@volcengine/pulumi-volcenginecc 0.0.31 → 0.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/cen/getInterRegionBandwidth.d.ts +72 -0
  2. package/cen/getInterRegionBandwidth.js +28 -0
  3. package/cen/getInterRegionBandwidth.js.map +1 -0
  4. package/cen/getInterRegionBandwidths.d.ts +22 -0
  5. package/cen/getInterRegionBandwidths.js +24 -0
  6. package/cen/getInterRegionBandwidths.js.map +1 -0
  7. package/cen/getRouteEntries.d.ts +22 -0
  8. package/cen/getRouteEntries.js +24 -0
  9. package/cen/getRouteEntries.js.map +1 -0
  10. package/cen/getRouteEntry.d.ts +72 -0
  11. package/cen/getRouteEntry.js +28 -0
  12. package/cen/getRouteEntry.js.map +1 -0
  13. package/cen/getServiceRouteEntries.d.ts +22 -0
  14. package/cen/getServiceRouteEntries.js +24 -0
  15. package/cen/getServiceRouteEntries.js.map +1 -0
  16. package/cen/getServiceRouteEntry.d.ts +73 -0
  17. package/cen/getServiceRouteEntry.js +28 -0
  18. package/cen/getServiceRouteEntry.js.map +1 -0
  19. package/cen/index.d.ts +27 -0
  20. package/cen/index.js +34 -1
  21. package/cen/index.js.map +1 -1
  22. package/cen/interRegionBandwidth.d.ts +152 -0
  23. package/cen/interRegionBandwidth.js +101 -0
  24. package/cen/interRegionBandwidth.js.map +1 -0
  25. package/cen/routeEntry.d.ts +152 -0
  26. package/cen/routeEntry.js +104 -0
  27. package/cen/routeEntry.js.map +1 -0
  28. package/cen/serviceRouteEntry.d.ts +138 -0
  29. package/cen/serviceRouteEntry.js +83 -0
  30. package/cen/serviceRouteEntry.js.map +1 -0
  31. package/clb/getListener.d.ts +4 -0
  32. package/clb/getListener.js.map +1 -1
  33. package/clb/listener.d.ts +3 -0
  34. package/clb/listener.js +2 -0
  35. package/clb/listener.js.map +1 -1
  36. package/mongodb/getSslState.d.ts +56 -0
  37. package/mongodb/getSslState.js +28 -0
  38. package/mongodb/getSslState.js.map +1 -0
  39. package/mongodb/getSslStates.d.ts +22 -0
  40. package/mongodb/getSslStates.js +24 -0
  41. package/mongodb/getSslStates.js.map +1 -0
  42. package/mongodb/index.d.ts +9 -0
  43. package/mongodb/index.js +12 -1
  44. package/mongodb/index.js.map +1 -1
  45. package/mongodb/sslState.d.ts +105 -0
  46. package/mongodb/sslState.js +81 -0
  47. package/mongodb/sslState.js.map +1 -0
  48. package/package.json +1 -1
  49. package/privatelink/endpointService.d.ts +15 -3
  50. package/privatelink/endpointService.js +2 -0
  51. package/privatelink/endpointService.js.map +1 -1
  52. package/privatelink/getEndpointService.d.ts +5 -1
  53. package/privatelink/getEndpointService.js.map +1 -1
  54. package/privatelink/getVpcEndpointConnection.d.ts +89 -0
  55. package/privatelink/getVpcEndpointConnection.js +28 -0
  56. package/privatelink/getVpcEndpointConnection.js.map +1 -0
  57. package/privatelink/getVpcEndpointConnections.d.ts +22 -0
  58. package/privatelink/getVpcEndpointConnections.js +24 -0
  59. package/privatelink/getVpcEndpointConnections.js.map +1 -0
  60. package/privatelink/index.d.ts +9 -0
  61. package/privatelink/index.js +12 -1
  62. package/privatelink/index.js.map +1 -1
  63. package/privatelink/vpcEndpointConnection.d.ts +156 -0
  64. package/privatelink/vpcEndpointConnection.js +88 -0
  65. package/privatelink/vpcEndpointConnection.js.map +1 -0
  66. package/rabbitmq/allowList.d.ts +151 -0
  67. package/rabbitmq/allowList.js +100 -0
  68. package/rabbitmq/allowList.js.map +1 -0
  69. package/rabbitmq/getAllowList.d.ts +73 -0
  70. package/rabbitmq/getAllowList.js +28 -0
  71. package/rabbitmq/getAllowList.js.map +1 -0
  72. package/rabbitmq/getAllowLists.d.ts +22 -0
  73. package/rabbitmq/getAllowLists.js +24 -0
  74. package/rabbitmq/getAllowLists.js.map +1 -0
  75. package/rabbitmq/index.d.ts +9 -0
  76. package/rabbitmq/index.js +12 -1
  77. package/rabbitmq/index.js.map +1 -1
  78. package/redis/allowList.d.ts +148 -0
  79. package/redis/allowList.js +78 -0
  80. package/redis/allowList.js.map +1 -0
  81. package/redis/getAllowList.d.ts +81 -0
  82. package/redis/getAllowList.js +28 -0
  83. package/redis/getAllowList.js.map +1 -0
  84. package/redis/getAllowLists.d.ts +22 -0
  85. package/redis/getAllowLists.js +24 -0
  86. package/redis/getAllowLists.js.map +1 -0
  87. package/redis/index.d.ts +9 -0
  88. package/redis/index.js +12 -1
  89. package/redis/index.js.map +1 -1
  90. package/storageebs/getSnapshotGroup.d.ts +81 -0
  91. package/storageebs/getSnapshotGroup.js +28 -0
  92. package/storageebs/getSnapshotGroup.js.map +1 -0
  93. package/storageebs/getSnapshotGroups.d.ts +22 -0
  94. package/storageebs/getSnapshotGroups.js +24 -0
  95. package/storageebs/getSnapshotGroups.js.map +1 -0
  96. package/storageebs/index.d.ts +9 -0
  97. package/storageebs/index.js +12 -1
  98. package/storageebs/index.js.map +1 -1
  99. package/storageebs/snapshotGroup.d.ts +168 -0
  100. package/storageebs/snapshotGroup.js +105 -0
  101. package/storageebs/snapshotGroup.js.map +1 -0
  102. package/tls/alarmNotifyGroup.d.ts +113 -0
  103. package/tls/alarmNotifyGroup.js +75 -0
  104. package/tls/alarmNotifyGroup.js.map +1 -0
  105. package/tls/getAlarmNotifyGroup.d.ts +69 -0
  106. package/tls/getAlarmNotifyGroup.js +28 -0
  107. package/tls/getAlarmNotifyGroup.js.map +1 -0
  108. package/tls/getAlarmNotifyGroups.d.ts +22 -0
  109. package/tls/getAlarmNotifyGroups.js +24 -0
  110. package/tls/getAlarmNotifyGroups.js.map +1 -0
  111. package/tls/getRule.d.ts +109 -0
  112. package/tls/getRule.js +28 -0
  113. package/tls/getRule.js.map +1 -0
  114. package/tls/getRules.d.ts +22 -0
  115. package/tls/getRules.js +24 -0
  116. package/tls/getRules.js.map +1 -0
  117. package/tls/getShipper.d.ts +101 -0
  118. package/tls/getShipper.js +28 -0
  119. package/tls/getShipper.js.map +1 -0
  120. package/tls/getShippers.d.ts +22 -0
  121. package/tls/getShippers.js +24 -0
  122. package/tls/getShippers.js.map +1 -0
  123. package/tls/index.d.ts +27 -0
  124. package/tls/index.js +34 -1
  125. package/tls/index.js.map +1 -1
  126. package/tls/rule.d.ts +221 -0
  127. package/tls/rule.js +98 -0
  128. package/tls/rule.js.map +1 -0
  129. package/tls/shipper.d.ts +211 -0
  130. package/tls/shipper.js +97 -0
  131. package/tls/shipper.js.map +1 -0
  132. package/tos/bucketEncryption.d.ts +94 -0
  133. package/tos/bucketEncryption.js +78 -0
  134. package/tos/bucketEncryption.js.map +1 -0
  135. package/tos/bucketInventory.d.ts +155 -0
  136. package/tos/bucketInventory.js +95 -0
  137. package/tos/bucketInventory.js.map +1 -0
  138. package/tos/getBucketEncryption.d.ts +48 -0
  139. package/tos/getBucketEncryption.js +28 -0
  140. package/tos/getBucketEncryption.js.map +1 -0
  141. package/tos/getBucketEncryptions.d.ts +22 -0
  142. package/tos/getBucketEncryptions.js +24 -0
  143. package/tos/getBucketEncryptions.js.map +1 -0
  144. package/tos/getBucketInventories.d.ts +22 -0
  145. package/tos/getBucketInventories.js +24 -0
  146. package/tos/getBucketInventories.js.map +1 -0
  147. package/tos/getBucketInventory.d.ts +73 -0
  148. package/tos/getBucketInventory.js +28 -0
  149. package/tos/getBucketInventory.js.map +1 -0
  150. package/tos/index.d.ts +18 -0
  151. package/tos/index.js +23 -1
  152. package/tos/index.js.map +1 -1
  153. package/types/input.d.ts +934 -31
  154. package/types/output.d.ts +2309 -360
  155. package/vefaas/getRelease.d.ts +120 -0
  156. package/vefaas/getRelease.js +28 -0
  157. package/vefaas/getRelease.js.map +1 -0
  158. package/vefaas/getReleases.d.ts +22 -0
  159. package/vefaas/getReleases.js +24 -0
  160. package/vefaas/getReleases.js.map +1 -0
  161. package/vefaas/index.d.ts +9 -0
  162. package/vefaas/index.js +12 -1
  163. package/vefaas/index.js.map +1 -1
  164. package/vefaas/release.d.ts +253 -0
  165. package/vefaas/release.js +120 -0
  166. package/vefaas/release.js.map +1 -0
  167. package/vepfs/getMountService.d.ts +93 -0
  168. package/vepfs/getMountService.js +28 -0
  169. package/vepfs/getMountService.js.map +1 -0
  170. package/vepfs/getMountServices.d.ts +22 -0
  171. package/vepfs/getMountServices.js +24 -0
  172. package/vepfs/getMountServices.js.map +1 -0
  173. package/vepfs/index.d.ts +9 -0
  174. package/vepfs/index.js +12 -1
  175. package/vepfs/index.js.map +1 -1
  176. package/vepfs/mountService.d.ts +172 -0
  177. package/vepfs/mountService.js +99 -0
  178. package/vepfs/mountService.js.map +1 -0
  179. package/vke/getKubeconfig.d.ts +72 -0
  180. package/vke/getKubeconfig.js +28 -0
  181. package/vke/getKubeconfig.js.map +1 -0
  182. package/vke/getKubeconfigs.d.ts +22 -0
  183. package/vke/getKubeconfigs.js +24 -0
  184. package/vke/getKubeconfigs.js.map +1 -0
  185. package/vke/index.d.ts +9 -0
  186. package/vke/index.js +12 -1
  187. package/vke/index.js.map +1 -1
  188. package/vke/kubeconfig.d.ts +142 -0
  189. package/vke/kubeconfig.js +93 -0
  190. package/vke/kubeconfig.js.map +1 -0
package/types/output.d.ts CHANGED
@@ -8043,6 +8043,34 @@ export declare namespace cen {
8043
8043
  */
8044
8044
  value: string;
8045
8045
  }
8046
+ interface GetServiceRouteEntryPublishToInstance {
8047
+ /**
8048
+ * Network instance ID for published cloud service access route.
8049
+ */
8050
+ instanceId: string;
8051
+ /**
8052
+ * Region for published cloud service access route.
8053
+ */
8054
+ instanceRegionId: string;
8055
+ /**
8056
+ * Network instance type for published cloud service access route.
8057
+ */
8058
+ instanceType: string;
8059
+ }
8060
+ interface ServiceRouteEntryPublishToInstance {
8061
+ /**
8062
+ * Network instance ID for published cloud service access route.
8063
+ */
8064
+ instanceId: string;
8065
+ /**
8066
+ * Region for published cloud service access route.
8067
+ */
8068
+ instanceRegionId: string;
8069
+ /**
8070
+ * Network instance type for published cloud service access route.
8071
+ */
8072
+ instanceType: string;
8073
+ }
8046
8074
  }
8047
8075
  export declare namespace clb {
8048
8076
  interface AclAclEntry {
@@ -8389,6 +8417,24 @@ export declare namespace clb {
8389
8417
  */
8390
8418
  value: string;
8391
8419
  }
8420
+ interface GetListenerDomainExtension {
8421
+ /**
8422
+ * Certificate ID of the extended domain name.
8423
+ */
8424
+ certCenterCertificateId: string;
8425
+ /**
8426
+ * Certificate source for the extended domain name to be added. Value: cert_center: SSL certificate from Volcano Engine Certificate Center. This parameter is required when adding an extended domain name.
8427
+ */
8428
+ certificateSource: string;
8429
+ /**
8430
+ * Domain name. Supports both wildcard and exact domain names. Specifications: 1. Must contain at least one '.', and cannot start or end with '.'. 2. Only letters, numbers, '.', '-', and '*' are allowed. 3. Length must be between 1 and 128 characters. 4. Wildcard domain: Use '*' to replace one or more characters. 5. Exact domain: A domain name that strictly follows domain name specifications.
8431
+ */
8432
+ domain: string;
8433
+ /**
8434
+ * Extended domain name ID.
8435
+ */
8436
+ domainExtensionId: string;
8437
+ }
8392
8438
  interface GetListenerHealthCheck {
8393
8439
  /**
8394
8440
  * Domain name for health check. Must be set to the actual address provided by the backend server. This parameter takes effect when Protocol is set to HTTP or HTTPS and HealthCheck.Enabled is on. Must contain at least one period (.), and cannot start or end with a period (.). Each string can include letters, numbers, hyphens (-), and periods (.), with hyphens (-) not allowed at the beginning or end of the string. Length limit: 1 to 128 characters. If this parameter is not provided or no value is specified, the default is empty, meaning CLB uses the private IP address of each backend server for health checks.
@@ -8757,6 +8803,20 @@ export declare namespace clb {
8757
8803
  */
8758
8804
  value: string;
8759
8805
  }
8806
+ interface ListenerDomainExtension {
8807
+ /**
8808
+ * Certificate ID of the extended domain name.
8809
+ */
8810
+ certCenterCertificateId: string;
8811
+ /**
8812
+ * Certificate source for the extended domain name to be added. Value: cert_center: SSL certificate from Volcano Engine Certificate Center. This parameter is required when adding an extended domain name.
8813
+ */
8814
+ certificateSource: string;
8815
+ /**
8816
+ * Domain name. Supports both wildcard and exact domain names. Specifications: 1. Must contain at least one '.', and cannot start or end with '.'. 2. Only letters, numbers, '.', '-', and '*' are allowed. 3. Length must be between 1 and 128 characters. 4. Wildcard domain: Use '*' to replace one or more characters. 5. Exact domain: A domain name that strictly follows domain name specifications.
8817
+ */
8818
+ domain: string;
8819
+ }
8760
8820
  interface ListenerHealthCheck {
8761
8821
  /**
8762
8822
  * Domain name for health check. Must be set to the actual address provided by the backend server. This parameter takes effect when Protocol is set to HTTP or HTTPS and HealthCheck.Enabled is on. Must contain at least one period (.), and cannot start or end with a period (.). Each string can include letters, numbers, hyphens (-), and periods (.), with hyphens (-) not allowed at the beginning or end of the string. Length limit: 1 to 128 characters. If this parameter is not provided or no value is specified, the default is empty, meaning CLB uses the private IP address of each backend server for health checks.
@@ -15596,10 +15656,6 @@ export declare namespace privatelink {
15596
15656
  value: string;
15597
15657
  }
15598
15658
  interface EndpointServiceResource {
15599
- /**
15600
- * Endpoint service ID.
15601
- */
15602
- instanceId: string;
15603
15659
  /**
15604
15660
  * Service resource ID to be added to the endpoint service.
15605
15661
  */
@@ -15638,14 +15694,14 @@ export declare namespace privatelink {
15638
15694
  value: string;
15639
15695
  }
15640
15696
  interface GetEndpointServiceResource {
15641
- /**
15642
- * Endpoint service ID.
15643
- */
15644
- instanceId: string;
15645
15697
  /**
15646
15698
  * Service resource ID to be added to the endpoint service.
15647
15699
  */
15648
15700
  resourceId: string;
15701
+ /**
15702
+ * Type of service resource.
15703
+ */
15704
+ resourceType: string;
15649
15705
  /**
15650
15706
  * Availability zone where the load balancer provides service.
15651
15707
  */
@@ -15661,6 +15717,54 @@ export declare namespace privatelink {
15661
15717
  */
15662
15718
  value: string;
15663
15719
  }
15720
+ interface GetVpcEndpointConnectionResourcesAllocate {
15721
+ /**
15722
+ * Specify the service resource for the endpoint connection
15723
+ */
15724
+ resourceId: string;
15725
+ /**
15726
+ * Availability zone ID of the service resource to be specified
15727
+ */
15728
+ zoneId: string;
15729
+ }
15730
+ interface GetVpcEndpointConnectionZone {
15731
+ /**
15732
+ * Endpoint network interface ID. If a seamless service resource replacement is in progress, this indicates the endpoint network interface ID connected to the replaced service resource
15733
+ */
15734
+ networkInterfaceId: string;
15735
+ /**
15736
+ * The private IPv4 address of the terminal node NIC. If a smooth service resource replacement is in progress, this refers to the IPv4 address of the terminal node NIC connected to the service resource being replaced.
15737
+ */
15738
+ networkInterfaceIp: string;
15739
+ /**
15740
+ * Private IPv6 address of the endpoint network interface. If a seamless service resource replacement is in progress, this indicates the IPv6 address of the endpoint network interface connected to the replaced service resource. If the endpoint only supports IPv4, this parameter is not returned
15741
+ */
15742
+ networkInterfaceIpv6: string;
15743
+ /**
15744
+ * Service resource ID
15745
+ */
15746
+ resourceId: string;
15747
+ /**
15748
+ * Status of the service resource
15749
+ */
15750
+ serviceStatus: string;
15751
+ /**
15752
+ * ID of the subnet to which the endpoint network interface belongs
15753
+ */
15754
+ subnetId: string;
15755
+ /**
15756
+ * The domain name of the terminal node availability zone.
15757
+ */
15758
+ zoneDomain: string;
15759
+ /**
15760
+ * Endpoint zone ID
15761
+ */
15762
+ zoneId: string;
15763
+ /**
15764
+ * Status of the endpoint zone. PendingAcceptance: waiting for connection. Connecting: connecting. Connected: connected. Disconnecting: disconnecting. Rejected: connection rejected. Failed: connection failed
15765
+ */
15766
+ zoneStatus: string;
15767
+ }
15664
15768
  interface GetVpcEndpointTag {
15665
15769
  /**
15666
15770
  * Tag key of the endpoint user tag. Length limit: 1–128 characters. Case sensitive. Cannot start with 'or' or 'sys:' in any case combination. Cannot start or end with a space. Allowed characters: letters, numbers, spaces ( ), underscores (_), periods (.), colons (:), slashes (/), equal signs (=), plus signs (+), hyphens (-), and @.
@@ -15709,6 +15813,54 @@ export declare namespace privatelink {
15709
15813
  */
15710
15814
  zoneStatus: string;
15711
15815
  }
15816
+ interface VpcEndpointConnectionResourcesAllocate {
15817
+ /**
15818
+ * Specify the service resource for the endpoint connection
15819
+ */
15820
+ resourceId: string;
15821
+ /**
15822
+ * Availability zone ID of the service resource to be specified
15823
+ */
15824
+ zoneId: string;
15825
+ }
15826
+ interface VpcEndpointConnectionZone {
15827
+ /**
15828
+ * Endpoint network interface ID. If a seamless service resource replacement is in progress, this indicates the endpoint network interface ID connected to the replaced service resource
15829
+ */
15830
+ networkInterfaceId: string;
15831
+ /**
15832
+ * The private IPv4 address of the terminal node NIC. If a smooth service resource replacement is in progress, this refers to the IPv4 address of the terminal node NIC connected to the service resource being replaced.
15833
+ */
15834
+ networkInterfaceIp: string;
15835
+ /**
15836
+ * Private IPv6 address of the endpoint network interface. If a seamless service resource replacement is in progress, this indicates the IPv6 address of the endpoint network interface connected to the replaced service resource. If the endpoint only supports IPv4, this parameter is not returned
15837
+ */
15838
+ networkInterfaceIpv6: string;
15839
+ /**
15840
+ * Service resource ID
15841
+ */
15842
+ resourceId: string;
15843
+ /**
15844
+ * Status of the service resource
15845
+ */
15846
+ serviceStatus: string;
15847
+ /**
15848
+ * ID of the subnet to which the endpoint network interface belongs
15849
+ */
15850
+ subnetId: string;
15851
+ /**
15852
+ * The domain name of the terminal node availability zone.
15853
+ */
15854
+ zoneDomain: string;
15855
+ /**
15856
+ * Endpoint zone ID
15857
+ */
15858
+ zoneId: string;
15859
+ /**
15860
+ * Status of the endpoint zone. PendingAcceptance: waiting for connection. Connecting: connecting. Connected: connected. Disconnecting: disconnecting. Rejected: connection rejected. Failed: connection failed
15861
+ */
15862
+ zoneStatus: string;
15863
+ }
15712
15864
  interface VpcEndpointTag {
15713
15865
  /**
15714
15866
  * Tag key of the endpoint user tag. Length limit: 1–128 characters. Case sensitive. Cannot start with 'or' or 'sys:' in any case combination. Cannot start or end with a space. Allowed characters: letters, numbers, spaces ( ), underscores (_), periods (.), colons (:), slashes (/), equal signs (=), plus signs (+), hyphens (-), and @.
@@ -15865,6 +16017,26 @@ export declare namespace privatezone {
15865
16017
  }
15866
16018
  }
15867
16019
  export declare namespace rabbitmq {
16020
+ interface AllowListAssociatedInstance {
16021
+ /**
16022
+ * Instance ID
16023
+ */
16024
+ instanceId: string;
16025
+ }
16026
+ interface GetAllowListAssociatedInstance {
16027
+ /**
16028
+ * Instance ID
16029
+ */
16030
+ instanceId: string;
16031
+ /**
16032
+ * Instance Name
16033
+ */
16034
+ instanceName: string;
16035
+ /**
16036
+ * VPC ID of the instance
16037
+ */
16038
+ vpc: string;
16039
+ }
15868
16040
  interface GetInstanceChargeDetail {
15869
16041
  /**
15870
16042
  * Whether to automatically renew the subscription instance after expiration.
@@ -18087,6 +18259,70 @@ export declare namespace rdspostgresql {
18087
18259
  }
18088
18260
  }
18089
18261
  export declare namespace redis {
18262
+ interface AllowListAssociatedInstance {
18263
+ /**
18264
+ * Instance ID bound to the current allowlist
18265
+ */
18266
+ instanceId: string;
18267
+ /**
18268
+ * Instance names bound to the current allowlist
18269
+ */
18270
+ instanceName: string;
18271
+ /**
18272
+ * Project name associated with the instance
18273
+ */
18274
+ projectName: string;
18275
+ /**
18276
+ * Private network ID associated with the instance
18277
+ */
18278
+ vpc: string;
18279
+ }
18280
+ interface AllowListSecurityGroupBindInfo {
18281
+ /**
18282
+ * Security group association mode. The value range is as follows: IngressDirectionIp: ingress IP, which allows IPs involved in TCP and ALL protocols in the source address of the security group ingress direction to access the database. If the source address is configured as a security group, it will be ignored. AssociateEcsIp: associate ECS IP, which allows cloud servers within the security group to access the database. Currently, only importing IP information of the primary network interface is supported
18283
+ */
18284
+ bindMode: string;
18285
+ /**
18286
+ * Associated security group ID
18287
+ */
18288
+ securityGroupId: string;
18289
+ }
18290
+ interface GetAllowListAssociatedInstance {
18291
+ /**
18292
+ * Instance ID bound to the current allowlist
18293
+ */
18294
+ instanceId: string;
18295
+ /**
18296
+ * Instance names bound to the current allowlist
18297
+ */
18298
+ instanceName: string;
18299
+ /**
18300
+ * Project name associated with the instance
18301
+ */
18302
+ projectName: string;
18303
+ /**
18304
+ * Private network ID associated with the instance
18305
+ */
18306
+ vpc: string;
18307
+ }
18308
+ interface GetAllowListSecurityGroupBindInfo {
18309
+ /**
18310
+ * Security group association mode. The value range is as follows: IngressDirectionIp: ingress IP, which allows IPs involved in TCP and ALL protocols in the source address of the security group ingress direction to access the database. If the source address is configured as a security group, it will be ignored. AssociateEcsIp: associate ECS IP, which allows cloud servers within the security group to access the database. Currently, only importing IP information of the primary network interface is supported
18311
+ */
18312
+ bindMode: string;
18313
+ /**
18314
+ * IP list of security groups associated with the allowlist
18315
+ */
18316
+ ipLists: string[];
18317
+ /**
18318
+ * Associated security group ID
18319
+ */
18320
+ securityGroupId: string;
18321
+ /**
18322
+ * Name of the associated security group
18323
+ */
18324
+ securityGroupName: string;
18325
+ }
18090
18326
  interface GetInstanceCapacity {
18091
18327
  /**
18092
18328
  * Total memory capacity of the current instance. Unit: MiB.
@@ -18620,99 +18856,308 @@ export declare namespace rocketmq {
18620
18856
  }
18621
18857
  }
18622
18858
  export declare namespace storageebs {
18623
- interface GetSnapshotTag {
18859
+ interface GetSnapshotGroupSnapshot {
18624
18860
  /**
18625
- * User tag key added to the resource. Naming rules: Cannot start with volc: or sys: in any case. Keys starting with volc: or sys: are reserved system tag keys and cannot be created. Only language characters, numbers, spaces, and the following English symbols are allowed: '_', '.', ':', '/', '=', '+', '-', '@'. Length must be between 1 and 128 characters.
18861
+ * Snapshot creation time
18626
18862
  */
18627
- key: string;
18863
+ creationTime: string;
18628
18864
  /**
18629
- * User tag value added to the resource. Naming rules: Only language characters, numbers, spaces, and the following English symbols are allowed: '_', '.', ':', '/', '=', '+', '-', '@'. Can be empty. Length must be between 0 and 256 characters.
18865
+ * Snapshot description
18630
18866
  */
18631
- value: string;
18632
- }
18633
- interface GetVolumeBaselinePerformance {
18867
+ description: string;
18634
18868
  /**
18635
- * Total IOPS of the disk, which is the sum of the baseline IOPS and extra IOPS.
18869
+ * Image ID
18636
18870
  */
18637
- iops: number;
18871
+ imageId: string;
18638
18872
  /**
18639
- * The total throughput of the cloud disk is the sum of its baseline throughput and additional throughput.
18873
+ * Whether the snapshot has enabled ultra-fast availability. Values are as follows: - true: Ultra-fast availability enabled - false: Ultra-fast availability not enabled
18640
18874
  */
18641
- throughput: number;
18642
- }
18643
- interface GetVolumeExtraPerformance {
18875
+ instantAccess: boolean;
18644
18876
  /**
18645
- * Type of extra performance. Value description: Balance: Balanced extra performance; IOPS: IOPS extra performance; Throughput: Throughput extra performance.
18877
+ * Dump progress
18646
18878
  */
18647
- extraPerformanceTypeId: string;
18879
+ progress: number;
18648
18880
  /**
18649
- * Extra IOPS of the disk.
18881
+ * Project of the snapshot
18650
18882
  */
18651
- iops: number;
18883
+ projectName: string;
18652
18884
  /**
18653
- * Extra throughput of the disk.
18885
+ * Retention days for automatic snapshots
18654
18886
  */
18655
- throughput: number;
18656
- }
18657
- interface GetVolumeTag {
18887
+ retentionDays: number;
18658
18888
  /**
18659
- * Tag key.
18889
+ * Whether the snapshot is shared with others. Values are as follows: - true: Snapshot is shared with others - false: Snapshot is not shared with others
18660
18890
  */
18661
- key: string;
18891
+ shared: boolean;
18662
18892
  /**
18663
- * Tag value.
18893
+ * Snapshot consistency group ID
18664
18894
  */
18665
- value: string;
18666
- }
18667
- interface GetVolumeTotalPerformance {
18895
+ snapshotGroupId: string;
18668
18896
  /**
18669
- * Total IOPS of the disk, which is the sum of the baseline IOPS and extra IOPS.
18897
+ * Snapshot ID
18670
18898
  */
18671
- iops: number;
18899
+ snapshotId: string;
18672
18900
  /**
18673
- * The total throughput of the cloud disk is the sum of its baseline throughput and additional throughput.
18901
+ * Snapshot name
18674
18902
  */
18675
- throughput: number;
18676
- }
18677
- interface SnapshotTag {
18903
+ snapshotName: string;
18678
18904
  /**
18679
- * User tag key added to the resource. Naming rules: Cannot start with volc: or sys: in any case. Keys starting with volc: or sys: are reserved system tag keys and cannot be created. Only language characters, numbers, spaces, and the following English symbols are allowed: '_', '.', ':', '/', '=', '+', '-', '@'. Length must be between 1 and 128 characters.
18905
+ * Snapshot type. Values are as follows: - user: Manual snapshot - auto: Automatic snapshot
18680
18906
  */
18681
- key: string;
18907
+ snapshotType: string;
18682
18908
  /**
18683
- * User tag value added to the resource. Naming rules: Only language characters, numbers, spaces, and the following English symbols are allowed: '_', '.', ':', '/', '=', '+', '-', '@'. Can be empty. Length must be between 0 and 256 characters.
18909
+ * Snapshot status. Values are as follows: - available: Available - creating: Creating - rollbacking: Rolling back - deleted: Deleted - failed: Error
18684
18910
  */
18685
- value: string;
18686
- }
18687
- interface VolumeBaselinePerformance {
18911
+ status: string;
18688
18912
  /**
18689
- * Total IOPS of the disk, which is the sum of the baseline IOPS and extra IOPS.
18913
+ * Tag information
18690
18914
  */
18691
- iops: number;
18915
+ tags: outputs.storageebs.GetSnapshotGroupSnapshotTag[];
18692
18916
  /**
18693
- * The total throughput of the cloud disk is the sum of its baseline throughput and additional throughput.
18917
+ * Cloud disk ID
18694
18918
  */
18695
- throughput: number;
18696
- }
18697
- interface VolumeExtraPerformance {
18919
+ volumeId: string;
18698
18920
  /**
18699
- * Type of extra performance. Value description: Balance: Balanced extra performance; IOPS: IOPS extra performance; Throughput: Throughput extra performance.
18921
+ * Cloud disk category. Values are as follows: - system: System disk - data: Data disk
18700
18922
  */
18701
- extraPerformanceTypeId: string;
18923
+ volumeKind: string;
18702
18924
  /**
18703
- * Extra IOPS of the disk.
18925
+ * Cloud disk name
18704
18926
  */
18705
- iops: number;
18927
+ volumeName: string;
18706
18928
  /**
18707
- * Extra throughput of the disk.
18929
+ * Cloud disk size (GiB)
18708
18930
  */
18709
- throughput: number;
18710
- }
18711
- interface VolumeTag {
18931
+ volumeSize: number;
18712
18932
  /**
18713
- * Tag key.
18933
+ * Cloud disk status. Values are as follows: - available: Available - attaching: Attaching - attached: Attached - detaching: Detaching - creating: Creating - deleting: Deleting - error: Error - extending: Expanding
18714
18934
  */
18715
- key: string;
18935
+ volumeStatus: string;
18936
+ /**
18937
+ * Cloud disk type. Values are as follows: - ESSD*PL0: Ultra-fast SSD cloud disk, PL0 specification - ESSD*FlexPL: Ultra-fast SSD cloud disk, FlexPL specification - TSSD_TL0: Throughput SSD cloud disk
18938
+ */
18939
+ volumeType: string;
18940
+ /**
18941
+ * Zone ID. If ultra-fast availability is enabled for the snapshot, you can create a cloud disk in this zone using the ultra-fast available snapshot
18942
+ */
18943
+ zoneId: string;
18944
+ }
18945
+ interface GetSnapshotGroupSnapshotTag {
18946
+ /**
18947
+ * Tag key
18948
+ */
18949
+ key: string;
18950
+ /**
18951
+ * Tag value
18952
+ */
18953
+ value: string;
18954
+ }
18955
+ interface GetSnapshotGroupTag {
18956
+ /**
18957
+ * Tag key
18958
+ */
18959
+ key: string;
18960
+ /**
18961
+ * Tag value
18962
+ */
18963
+ value: string;
18964
+ }
18965
+ interface GetSnapshotTag {
18966
+ /**
18967
+ * User tag key added to the resource. Naming rules: Cannot start with volc: or sys: in any case. Keys starting with volc: or sys: are reserved system tag keys and cannot be created. Only language characters, numbers, spaces, and the following English symbols are allowed: '_', '.', ':', '/', '=', '+', '-', '@'. Length must be between 1 and 128 characters.
18968
+ */
18969
+ key: string;
18970
+ /**
18971
+ * User tag value added to the resource. Naming rules: Only language characters, numbers, spaces, and the following English symbols are allowed: '_', '.', ':', '/', '=', '+', '-', '@'. Can be empty. Length must be between 0 and 256 characters.
18972
+ */
18973
+ value: string;
18974
+ }
18975
+ interface GetVolumeBaselinePerformance {
18976
+ /**
18977
+ * Total IOPS of the disk, which is the sum of the baseline IOPS and extra IOPS.
18978
+ */
18979
+ iops: number;
18980
+ /**
18981
+ * The total throughput of the cloud disk is the sum of its baseline throughput and additional throughput.
18982
+ */
18983
+ throughput: number;
18984
+ }
18985
+ interface GetVolumeExtraPerformance {
18986
+ /**
18987
+ * Type of extra performance. Value description: Balance: Balanced extra performance; IOPS: IOPS extra performance; Throughput: Throughput extra performance.
18988
+ */
18989
+ extraPerformanceTypeId: string;
18990
+ /**
18991
+ * Extra IOPS of the disk.
18992
+ */
18993
+ iops: number;
18994
+ /**
18995
+ * Extra throughput of the disk.
18996
+ */
18997
+ throughput: number;
18998
+ }
18999
+ interface GetVolumeTag {
19000
+ /**
19001
+ * Tag key.
19002
+ */
19003
+ key: string;
19004
+ /**
19005
+ * Tag value.
19006
+ */
19007
+ value: string;
19008
+ }
19009
+ interface GetVolumeTotalPerformance {
19010
+ /**
19011
+ * Total IOPS of the disk, which is the sum of the baseline IOPS and extra IOPS.
19012
+ */
19013
+ iops: number;
19014
+ /**
19015
+ * The total throughput of the cloud disk is the sum of its baseline throughput and additional throughput.
19016
+ */
19017
+ throughput: number;
19018
+ }
19019
+ interface SnapshotGroupSnapshot {
19020
+ /**
19021
+ * Snapshot creation time
19022
+ */
19023
+ creationTime: string;
19024
+ /**
19025
+ * Snapshot description
19026
+ */
19027
+ description: string;
19028
+ /**
19029
+ * Image ID
19030
+ */
19031
+ imageId: string;
19032
+ /**
19033
+ * Whether the snapshot has enabled ultra-fast availability. Values are as follows: - true: Ultra-fast availability enabled - false: Ultra-fast availability not enabled
19034
+ */
19035
+ instantAccess: boolean;
19036
+ /**
19037
+ * Dump progress
19038
+ */
19039
+ progress: number;
19040
+ /**
19041
+ * Project of the snapshot
19042
+ */
19043
+ projectName: string;
19044
+ /**
19045
+ * Retention days for automatic snapshots
19046
+ */
19047
+ retentionDays: number;
19048
+ /**
19049
+ * Whether the snapshot is shared with others. Values are as follows: - true: Snapshot is shared with others - false: Snapshot is not shared with others
19050
+ */
19051
+ shared: boolean;
19052
+ /**
19053
+ * Snapshot consistency group ID
19054
+ */
19055
+ snapshotGroupId: string;
19056
+ /**
19057
+ * Snapshot ID
19058
+ */
19059
+ snapshotId: string;
19060
+ /**
19061
+ * Snapshot name
19062
+ */
19063
+ snapshotName: string;
19064
+ /**
19065
+ * Snapshot type. Values are as follows: - user: Manual snapshot - auto: Automatic snapshot
19066
+ */
19067
+ snapshotType: string;
19068
+ /**
19069
+ * Snapshot status. Values are as follows: - available: Available - creating: Creating - rollbacking: Rolling back - deleted: Deleted - failed: Error
19070
+ */
19071
+ status: string;
19072
+ tags: outputs.storageebs.SnapshotGroupSnapshotTag[];
19073
+ /**
19074
+ * Cloud disk ID
19075
+ */
19076
+ volumeId: string;
19077
+ /**
19078
+ * Cloud disk category. Values are as follows: - system: System disk - data: Data disk
19079
+ */
19080
+ volumeKind: string;
19081
+ /**
19082
+ * Cloud disk name
19083
+ */
19084
+ volumeName: string;
19085
+ /**
19086
+ * Cloud disk size (GiB)
19087
+ */
19088
+ volumeSize: number;
19089
+ /**
19090
+ * Cloud disk status. Values are as follows: - available: Available - attaching: Attaching - attached: Attached - detaching: Detaching - creating: Creating - deleting: Deleting - error: Error - extending: Expanding
19091
+ */
19092
+ volumeStatus: string;
19093
+ /**
19094
+ * Cloud disk type. Values are as follows: - ESSD*PL0: Ultra-fast SSD cloud disk, PL0 specification - ESSD*FlexPL: Ultra-fast SSD cloud disk, FlexPL specification - TSSD_TL0: Throughput SSD cloud disk
19095
+ */
19096
+ volumeType: string;
19097
+ /**
19098
+ * Zone ID. If ultra-fast availability is enabled for the snapshot, you can create a cloud disk in this zone using the ultra-fast available snapshot
19099
+ */
19100
+ zoneId: string;
19101
+ }
19102
+ interface SnapshotGroupSnapshotTag {
19103
+ /**
19104
+ * Tag key
19105
+ */
19106
+ key: string;
19107
+ /**
19108
+ * Tag value
19109
+ */
19110
+ value: string;
19111
+ }
19112
+ interface SnapshotGroupTag {
19113
+ /**
19114
+ * Tag key
19115
+ */
19116
+ key: string;
19117
+ /**
19118
+ * Tag value
19119
+ */
19120
+ value: string;
19121
+ }
19122
+ interface SnapshotTag {
19123
+ /**
19124
+ * User tag key added to the resource. Naming rules: Cannot start with volc: or sys: in any case. Keys starting with volc: or sys: are reserved system tag keys and cannot be created. Only language characters, numbers, spaces, and the following English symbols are allowed: '_', '.', ':', '/', '=', '+', '-', '@'. Length must be between 1 and 128 characters.
19125
+ */
19126
+ key: string;
19127
+ /**
19128
+ * User tag value added to the resource. Naming rules: Only language characters, numbers, spaces, and the following English symbols are allowed: '_', '.', ':', '/', '=', '+', '-', '@'. Can be empty. Length must be between 0 and 256 characters.
19129
+ */
19130
+ value: string;
19131
+ }
19132
+ interface VolumeBaselinePerformance {
19133
+ /**
19134
+ * Total IOPS of the disk, which is the sum of the baseline IOPS and extra IOPS.
19135
+ */
19136
+ iops: number;
19137
+ /**
19138
+ * The total throughput of the cloud disk is the sum of its baseline throughput and additional throughput.
19139
+ */
19140
+ throughput: number;
19141
+ }
19142
+ interface VolumeExtraPerformance {
19143
+ /**
19144
+ * Type of extra performance. Value description: Balance: Balanced extra performance; IOPS: IOPS extra performance; Throughput: Throughput extra performance.
19145
+ */
19146
+ extraPerformanceTypeId: string;
19147
+ /**
19148
+ * Extra IOPS of the disk.
19149
+ */
19150
+ iops: number;
19151
+ /**
19152
+ * Extra throughput of the disk.
19153
+ */
19154
+ throughput: number;
19155
+ }
19156
+ interface VolumeTag {
19157
+ /**
19158
+ * Tag key.
19159
+ */
19160
+ key: string;
18716
19161
  /**
18717
19162
  * Tag value.
18718
19163
  */
@@ -18722,25 +19167,1368 @@ export declare namespace storageebs {
18722
19167
  /**
18723
19168
  * Total IOPS of the disk, which is the sum of the baseline IOPS and extra IOPS.
18724
19169
  */
18725
- iops: number;
19170
+ iops: number;
19171
+ /**
19172
+ * The total throughput of the cloud disk is the sum of its baseline throughput and additional throughput.
19173
+ */
19174
+ throughput: number;
19175
+ }
19176
+ }
19177
+ export declare namespace tls {
19178
+ interface AlarmNotifyGroupNoticeRule {
19179
+ /**
19180
+ * Whether there is an end node afterwards.
19181
+ */
19182
+ hasEndNode: boolean;
19183
+ /**
19184
+ * Condition for whether to proceed to the next level.
19185
+ */
19186
+ hasNext: boolean;
19187
+ receiverInfos: outputs.tls.AlarmNotifyGroupNoticeRuleReceiverInfo[];
19188
+ /**
19189
+ * Rule node. JSON format.
19190
+ */
19191
+ ruleNode: string;
19192
+ }
19193
+ interface AlarmNotifyGroupNoticeRuleReceiverInfo {
19194
+ /**
19195
+ * Alarm content template ID.
19196
+ */
19197
+ alarmContentTemplateId: string;
19198
+ /**
19199
+ * User group name to notify when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19200
+ */
19201
+ alarmWebhookAtGroups: string[];
19202
+ /**
19203
+ * Username to notify when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19204
+ */
19205
+ alarmWebhookAtUsers: string[];
19206
+ /**
19207
+ * Alarm webhook integration configuration ID.
19208
+ */
19209
+ alarmWebhookIntegrationId: string;
19210
+ /**
19211
+ * Name of the alarm Webhook integration configuration.
19212
+ */
19213
+ alarmWebhookIntegrationName: string;
19214
+ /**
19215
+ * Whether to notify everyone when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19216
+ */
19217
+ alarmWebhookIsAtAll: boolean;
19218
+ /**
19219
+ * End time for receiving alarm notifications. Uses 24-hour format: HH:mm:ss, with a valid range of 00:00:00–23:59:59. StartTime cannot be greater than EndTime.
19220
+ */
19221
+ endTime: string;
19222
+ /**
19223
+ * Custom WebHook request body. It is recommended to set the request body according to the callback interface requirements of the corresponding service.
19224
+ */
19225
+ generalWebhookBody: string;
19226
+ generalWebhookHeaders: outputs.tls.AlarmNotifyGroupNoticeRuleReceiverInfoGeneralWebhookHeader[];
19227
+ /**
19228
+ * Custom callback method for the interface. Only POST or PUT is supported.
19229
+ */
19230
+ generalWebhookMethod: string;
19231
+ /**
19232
+ * Custom callback URL for the interface.
19233
+ */
19234
+ generalWebhookUrl: string;
19235
+ /**
19236
+ * Notification channels. Supports one or more channels. Options: Email, Sms, Phone, GeneralWebhook, Lark, DingTalk, WeChat.
19237
+ */
19238
+ receiverChannels: string[];
19239
+ /**
19240
+ * IAM user or user group name.
19241
+ */
19242
+ receiverNames: string[];
19243
+ /**
19244
+ * Recipient type. Options: User: IAM user; UserGroup: IAM user group.
19245
+ */
19246
+ receiverType: string;
19247
+ /**
19248
+ * Alarm notification start time. Uses 24-hour format (HH:mm:ss), valid range is 00:00:00–23:59:59. StartTime cannot be later than EndTime.
19249
+ */
19250
+ startTime: string;
19251
+ }
19252
+ interface AlarmNotifyGroupNoticeRuleReceiverInfoGeneralWebhookHeader {
19253
+ /**
19254
+ * Custom request header key.
19255
+ */
19256
+ key: string;
19257
+ /**
19258
+ * Custom request header value.
19259
+ */
19260
+ value: string;
19261
+ }
19262
+ interface AlarmNotifyGroupReceiver {
19263
+ /**
19264
+ * Alarm content template ID.
19265
+ */
19266
+ alarmContentTemplateId: string;
19267
+ /**
19268
+ * User group name to notify when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19269
+ */
19270
+ alarmWebhookAtGroups: string[];
19271
+ /**
19272
+ * Username to notify when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19273
+ */
19274
+ alarmWebhookAtUsers: string[];
19275
+ /**
19276
+ * Alarm webhook integration configuration ID.
19277
+ */
19278
+ alarmWebhookIntegrationId: string;
19279
+ /**
19280
+ * Name of the alarm Webhook integration configuration.
19281
+ */
19282
+ alarmWebhookIntegrationName: string;
19283
+ /**
19284
+ * Whether to notify everyone when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19285
+ */
19286
+ alarmWebhookIsAtAll: boolean;
19287
+ /**
19288
+ * End time for receiving alarm notifications. Uses 24-hour format: HH:mm:ss, with a valid range of 00:00:00–23:59:59. StartTime cannot be greater than EndTime.
19289
+ */
19290
+ endTime: string;
19291
+ /**
19292
+ * Custom WebHook request body. It is recommended to set the request body according to the callback interface requirements of the corresponding service.
19293
+ */
19294
+ generalWebhookBody: string;
19295
+ generalWebhookHeaders: outputs.tls.AlarmNotifyGroupReceiverGeneralWebhookHeader[];
19296
+ /**
19297
+ * Custom callback method for the interface. Only POST or PUT is supported.
19298
+ */
19299
+ generalWebhookMethod: string;
19300
+ /**
19301
+ * Custom callback URL for the interface.
19302
+ */
19303
+ generalWebhookUrl: string;
19304
+ /**
19305
+ * Notification channels. Supports one or more channels. Options: Email, Sms, Phone, GeneralWebhook, Lark, DingTalk, WeChat.
19306
+ */
19307
+ receiverChannels: string[];
19308
+ /**
19309
+ * IAM user or user group name.
19310
+ */
19311
+ receiverNames: string[];
19312
+ /**
19313
+ * Recipient type. Options: User: IAM user; UserGroup: IAM user group.
19314
+ */
19315
+ receiverType: string;
19316
+ /**
19317
+ * Alarm notification start time. Uses 24-hour format (HH:mm:ss), valid range is 00:00:00–23:59:59. StartTime cannot be later than EndTime.
19318
+ */
19319
+ startTime: string;
19320
+ }
19321
+ interface AlarmNotifyGroupReceiverGeneralWebhookHeader {
19322
+ /**
19323
+ * Custom request header key.
19324
+ */
19325
+ key: string;
19326
+ /**
19327
+ * Custom request header value.
19328
+ */
19329
+ value: string;
19330
+ }
19331
+ interface GetAlarmNotifyGroupNoticeRule {
19332
+ /**
19333
+ * Whether there is an end node afterwards.
19334
+ */
19335
+ hasEndNode: boolean;
19336
+ /**
19337
+ * Condition for whether to proceed to the next level.
19338
+ */
19339
+ hasNext: boolean;
19340
+ /**
19341
+ * Notification channel information.
19342
+ */
19343
+ receiverInfos: outputs.tls.GetAlarmNotifyGroupNoticeRuleReceiverInfo[];
19344
+ /**
19345
+ * Rule node. JSON format.
19346
+ */
19347
+ ruleNode: string;
19348
+ }
19349
+ interface GetAlarmNotifyGroupNoticeRuleReceiverInfo {
19350
+ /**
19351
+ * Alarm content template ID.
19352
+ */
19353
+ alarmContentTemplateId: string;
19354
+ /**
19355
+ * User group name to notify when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19356
+ */
19357
+ alarmWebhookAtGroups: string[];
19358
+ /**
19359
+ * Username to notify when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19360
+ */
19361
+ alarmWebhookAtUsers: string[];
19362
+ /**
19363
+ * Alarm webhook integration configuration ID.
19364
+ */
19365
+ alarmWebhookIntegrationId: string;
19366
+ /**
19367
+ * Name of the alarm Webhook integration configuration.
19368
+ */
19369
+ alarmWebhookIntegrationName: string;
19370
+ /**
19371
+ * Whether to notify everyone when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19372
+ */
19373
+ alarmWebhookIsAtAll: boolean;
19374
+ /**
19375
+ * End time for receiving alarm notifications. Uses 24-hour format: HH:mm:ss, with a valid range of 00:00:00–23:59:59. StartTime cannot be greater than EndTime.
19376
+ */
19377
+ endTime: string;
19378
+ /**
19379
+ * Custom WebHook request body. It is recommended to set the request body according to the callback interface requirements of the corresponding service.
19380
+ */
19381
+ generalWebhookBody: string;
19382
+ /**
19383
+ * Custom callback request headers for the interface.
19384
+ */
19385
+ generalWebhookHeaders: outputs.tls.GetAlarmNotifyGroupNoticeRuleReceiverInfoGeneralWebhookHeader[];
19386
+ /**
19387
+ * Custom callback method for the interface. Only POST or PUT is supported.
19388
+ */
19389
+ generalWebhookMethod: string;
19390
+ /**
19391
+ * Custom callback URL for the interface.
19392
+ */
19393
+ generalWebhookUrl: string;
19394
+ /**
19395
+ * Notification channels. Supports one or more channels. Options: Email, Sms, Phone, GeneralWebhook, Lark, DingTalk, WeChat.
19396
+ */
19397
+ receiverChannels: string[];
19398
+ /**
19399
+ * IAM user or user group name.
19400
+ */
19401
+ receiverNames: string[];
19402
+ /**
19403
+ * Recipient type. Options: User: IAM user; UserGroup: IAM user group.
19404
+ */
19405
+ receiverType: string;
19406
+ /**
19407
+ * Alarm notification start time. Uses 24-hour format (HH:mm:ss), valid range is 00:00:00–23:59:59. StartTime cannot be later than EndTime.
19408
+ */
19409
+ startTime: string;
19410
+ }
19411
+ interface GetAlarmNotifyGroupNoticeRuleReceiverInfoGeneralWebhookHeader {
19412
+ /**
19413
+ * Custom request header key.
19414
+ */
19415
+ key: string;
19416
+ /**
19417
+ * Custom request header value.
19418
+ */
19419
+ value: string;
19420
+ }
19421
+ interface GetAlarmNotifyGroupReceiver {
19422
+ /**
19423
+ * Alarm content template ID.
19424
+ */
19425
+ alarmContentTemplateId: string;
19426
+ /**
19427
+ * User group name to notify when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19428
+ */
19429
+ alarmWebhookAtGroups: string[];
19430
+ /**
19431
+ * Username to notify when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19432
+ */
19433
+ alarmWebhookAtUsers: string[];
19434
+ /**
19435
+ * Alarm webhook integration configuration ID.
19436
+ */
19437
+ alarmWebhookIntegrationId: string;
19438
+ /**
19439
+ * Name of the alarm Webhook integration configuration.
19440
+ */
19441
+ alarmWebhookIntegrationName: string;
19442
+ /**
19443
+ * Whether to notify everyone when sending notifications to Feishu, DingTalk, or WeCom via Webhook integration.
19444
+ */
19445
+ alarmWebhookIsAtAll: boolean;
19446
+ /**
19447
+ * End time for receiving alarm notifications. Uses 24-hour format: HH:mm:ss, with a valid range of 00:00:00–23:59:59. StartTime cannot be greater than EndTime.
19448
+ */
19449
+ endTime: string;
19450
+ /**
19451
+ * Custom WebHook request body. It is recommended to set the request body according to the callback interface requirements of the corresponding service.
19452
+ */
19453
+ generalWebhookBody: string;
19454
+ /**
19455
+ * Custom callback request headers for the interface.
19456
+ */
19457
+ generalWebhookHeaders: outputs.tls.GetAlarmNotifyGroupReceiverGeneralWebhookHeader[];
19458
+ /**
19459
+ * Custom callback method for the interface. Only POST or PUT is supported.
19460
+ */
19461
+ generalWebhookMethod: string;
19462
+ /**
19463
+ * Custom callback URL for the interface.
19464
+ */
19465
+ generalWebhookUrl: string;
19466
+ /**
19467
+ * Notification channels. Supports one or more channels. Options: Email, Sms, Phone, GeneralWebhook, Lark, DingTalk, WeChat.
19468
+ */
19469
+ receiverChannels: string[];
19470
+ /**
19471
+ * IAM user or user group name.
19472
+ */
19473
+ receiverNames: string[];
19474
+ /**
19475
+ * Recipient type. Options: User: IAM user; UserGroup: IAM user group.
19476
+ */
19477
+ receiverType: string;
19478
+ /**
19479
+ * Alarm notification start time. Uses 24-hour format (HH:mm:ss), valid range is 00:00:00–23:59:59. StartTime cannot be later than EndTime.
19480
+ */
19481
+ startTime: string;
19482
+ }
19483
+ interface GetAlarmNotifyGroupReceiverGeneralWebhookHeader {
19484
+ /**
19485
+ * Custom request header key.
19486
+ */
19487
+ key: string;
19488
+ /**
19489
+ * Custom request header value.
19490
+ */
19491
+ value: string;
19492
+ }
19493
+ interface GetImportTaskImportSourceInfo {
19494
+ /**
19495
+ * Kafka data source information. When sourceType is kafka, the KafkaSourceInfo field is required
19496
+ */
19497
+ kafkaSourceInfo: outputs.tls.GetImportTaskImportSourceInfoKafkaSourceInfo;
19498
+ /**
19499
+ * TOS data source information. When sourceType is tos, the TosSourceInfo field is required.
19500
+ */
19501
+ tosSourceInfo: outputs.tls.GetImportTaskImportSourceInfoTosSourceInfo;
19502
+ }
19503
+ interface GetImportTaskImportSourceInfoKafkaSourceInfo {
19504
+ /**
19505
+ * Data encoding format. Available options: UTF-8, GBK.
19506
+ */
19507
+ encode: string;
19508
+ /**
19509
+ * Kafka consumer group. If not specified, the system will automatically create a Kafka consumer group.
19510
+ */
19511
+ group: string;
19512
+ /**
19513
+ * The service addresses for different types of Kafka clusters vary. Details are as follows: Message Queue Kafka Edition: Use the access point of the Kafka instance. For more information, see Access Point. If the Kafka instance and the Log Service Project are in the same region, you can use private network access; otherwise, use public network access. Self-hosted Kafka clusters: Use the IP address and port number or the domain name and port number of the Kafka Broker. Only public network access is supported. Separate multiple service addresses with a comma (,).
19514
+ */
19515
+ host: string;
19516
+ /**
19517
+ * Starting position for data import. Options: 0: Earliest time, start importing from the first record in the specified Kafka Topic. 1: Latest time, start importing from the most recently generated record in the specified Kafka Topic.
19518
+ */
19519
+ initialOffset: number;
19520
+ /**
19521
+ * If you are using Message Queue Kafka Edition, set this to the Kafka instance ID.
19522
+ */
19523
+ instanceId: string;
19524
+ /**
19525
+ * Whether to enable authentication. If you use a public service address, it is recommended to enable authentication.
19526
+ */
19527
+ isNeedAuth: boolean;
19528
+ /**
19529
+ * Password authentication mechanism. Available options: PLAIN, SCRAM-SHA-256, and SCRAM-SHA-512.
19530
+ */
19531
+ mechanism: string;
19532
+ /**
19533
+ * Kafka SASL user password for authentication.
19534
+ */
19535
+ password: string;
19536
+ /**
19537
+ * Secure transmission protocol. Options include plaintext, sasl*ssl, ssl, and sasl*plaintext
19538
+ */
19539
+ protocol: string;
19540
+ /**
19541
+ * Specify log time. Options: 0: Use Kafka message timestamp. 1: Use current system time.
19542
+ */
19543
+ timeSourceDefault: number;
19544
+ /**
19545
+ * Kafka Topic name. Separate multiple Kafka Topics with commas (,).
19546
+ */
19547
+ topic: string;
19548
+ /**
19549
+ * Kafka SASL username for authentication.
19550
+ */
19551
+ username: string;
19552
+ }
19553
+ interface GetImportTaskImportSourceInfoTosSourceInfo {
19554
+ /**
19555
+ * TOS bucket name
19556
+ */
19557
+ bucket: string;
19558
+ /**
19559
+ * Compression mode for data in the TOS bucket. none: No compression. snappy: Compress using snappy. gzip: Compress using gzip. lz4: Compress using lz4.
19560
+ */
19561
+ compressType: string;
19562
+ /**
19563
+ * Path of the file to be imported in the TOS bucket.
19564
+ */
19565
+ prefix: string;
19566
+ /**
19567
+ * Region where the TOS bucket is located. Cross-region data import is supported
19568
+ */
19569
+ region: string;
19570
+ }
19571
+ interface GetImportTaskTargetInfo {
19572
+ /**
19573
+ * Log extraction rule.
19574
+ */
19575
+ extractRule: outputs.tls.GetImportTaskTargetInfoExtractRule;
19576
+ /**
19577
+ * Log sample. When LogType is set to multiline_log, you must configure log samples. It is recommended to provide more than two log entries as examples to ensure the regular expression matches the first line of each log. Use real samples from the production environment.
19578
+ */
19579
+ logSample: string;
19580
+ /**
19581
+ * Specify log parsing type during import. delimiter*log: CSV type. multiline*log: multiline full text type. minimalist*log: single line full text type. json*log: JSON type.
19582
+ */
19583
+ logType: string;
19584
+ /**
19585
+ * Region.
19586
+ */
19587
+ region: string;
19588
+ }
19589
+ interface GetImportTaskTargetInfoExtractRule {
19590
+ /**
19591
+ * Basic content of log extraction rules.
19592
+ */
19593
+ extractRule: outputs.tls.GetImportTaskTargetInfoExtractRuleExtractRule;
19594
+ /**
19595
+ * Number of skipped rows. Only valid when the log type is delimiterLog and the import type is tos.
19596
+ */
19597
+ skipLineCount: number;
19598
+ /**
19599
+ * Time extraction regular expression, used to extract the time value from the TimeKey field and parse it as the collection time
19600
+ */
19601
+ timeExtractRegex: string;
19602
+ /**
19603
+ * Time zone. Supports machine time zone (default) and custom time zone. Custom time zone supports GMT and UTC. GMT format: GMT+08:00. UTC format: Asia/Shanghai.
19604
+ */
19605
+ timeZone: string;
19606
+ }
19607
+ interface GetImportTaskTargetInfoExtractRuleExtractRule {
19608
+ /**
19609
+ * Regular expression for identifying the first line of each log. The matched part is treated as the start of the log. When LogType is set to multiline_log, you must configure a log sample
19610
+ */
19611
+ beginRegex: string;
19612
+ /**
19613
+ * Delimiter. Only valid when LogType is delimiter_log.
19614
+ */
19615
+ delimiter: string;
19616
+ /**
19617
+ * Enable nanoseconds.
19618
+ */
19619
+ enableNanosecond: boolean;
19620
+ /**
19621
+ * Filter key regular expression.
19622
+ */
19623
+ filterKeyRegexes: outputs.tls.GetImportTaskTargetInfoExtractRuleExtractRuleFilterKeyRegex[];
19624
+ /**
19625
+ * List of log field names (Key). Valid only when LogType is delimiter_log. Supports up to 100 field names. Duplicate field names are not allowed, and all field names cannot be left blank
19626
+ */
19627
+ keys: string[];
19628
+ /**
19629
+ * Log regular expression
19630
+ */
19631
+ logRegex: string;
19632
+ /**
19633
+ * Log template.
19634
+ */
19635
+ logTemplate: outputs.tls.GetImportTaskTargetInfoExtractRuleExtractRuleLogTemplate;
19636
+ /**
19637
+ * Quotation mark. Content enclosed by the quotation mark will not be separated and will be parsed as a complete field. Only valid when LogType is delimiter_log.
19638
+ */
19639
+ quote: string;
19640
+ /**
19641
+ * Parsing format for the time field. If you use a specified time field in the log as the log timestamp, you must fill in TimeKey and TimeFormat. TimeKey and TimeFormat must be paired. For configuration details, see time format.
19642
+ */
19643
+ timeFormat: string;
19644
+ /**
19645
+ * Name of the log time field. If you use a specific time field in the log as the log timestamp, you must provide both TimeKey and TimeFormat. TimeKey and TimeFormat must appear in pairs
19646
+ */
19647
+ timeKey: string;
19648
+ /**
19649
+ * Time sample. Used to verify whether the entered time parsing format is correct
19650
+ */
19651
+ timeSample: string;
19652
+ /**
19653
+ * When uploading logs that failed to parse, specify the key name for the failed logs. UnMatchUpLoadSwitch=true and UnMatchLogKey must be used together.
19654
+ */
19655
+ unMatchLogKey: string;
19656
+ /**
19657
+ * Whether to upload logs that failed to parse. UnMatchUpLoadSwitch=true and UnMatchLogKey must be paired. true: Upload logs that failed to parse. false: Do not upload logs that failed to parse.
19658
+ */
19659
+ unMatchUpLoadSwitch: boolean;
19660
+ }
19661
+ interface GetImportTaskTargetInfoExtractRuleExtractRuleFilterKeyRegex {
19662
+ /**
19663
+ * Key.
19664
+ */
19665
+ key: string;
19666
+ /**
19667
+ * Regular expression.
19668
+ */
19669
+ regex: string;
19670
+ }
19671
+ interface GetImportTaskTargetInfoExtractRuleExtractRuleLogTemplate {
19672
+ /**
19673
+ * Format.
19674
+ */
19675
+ format: string;
19676
+ /**
19677
+ * Type
19678
+ */
19679
+ type: string;
19680
+ }
19681
+ interface GetImportTaskTaskStatistics {
19682
+ /**
19683
+ * Total resource bytes enumerated
19684
+ */
19685
+ bytesTotal: number;
19686
+ /**
19687
+ * Bytes transferred.
19688
+ */
19689
+ bytesTransferred: number;
19690
+ /**
19691
+ * Number of resources failed to import.
19692
+ */
19693
+ failed: number;
19694
+ /**
19695
+ * Number of resources not found.
19696
+ */
19697
+ notExist: number;
19698
+ /**
19699
+ * Number of resources skipped during import
19700
+ */
19701
+ skipped: number;
19702
+ /**
19703
+ * Task status. Status of the import task. Preparing: Preparing for import. Importing: Importing data. Success: Import completed successfully. Failed: Import failed. Stopped: Import paused.
19704
+ */
19705
+ taskStatus: string;
19706
+ /**
19707
+ * Total number of resources enumerated.
19708
+ */
19709
+ total: number;
19710
+ /**
19711
+ * Number of records transferred.
19712
+ */
19713
+ transferred: number;
19714
+ }
19715
+ interface GetIndexFullText {
19716
+ /**
19717
+ * Case sensitivity. true: Case sensitive. false: Not case sensitive.
19718
+ */
19719
+ caseSensitive: boolean;
19720
+ /**
19721
+ * Token separators for the full-text index. Each character in the string represents a token separator. Length: 1–256 bytes. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
19722
+ */
19723
+ delimiter: string;
19724
+ /**
19725
+ * When searching, specify whether to segment Chinese content in logs according to Chinese grammar. The default is false. true: For Chinese characters in logs, segment the log according to common Chinese grammar rules. Custom segmentation symbols for Chinese content are not supported. For non-Chinese characters in logs, segment the log using the segmentation symbols specified in the parameter. false: Segment the log using the segmentation symbols specified in the parameter.
19726
+ */
19727
+ includeChinese: boolean;
19728
+ }
19729
+ interface GetIndexKeyValue {
19730
+ /**
19731
+ * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
19732
+ */
19733
+ key: string;
19734
+ /**
19735
+ * Field description information required for configuring key-value indexes.
19736
+ */
19737
+ value: outputs.tls.GetIndexKeyValueValue;
19738
+ }
19739
+ interface GetIndexKeyValueValue {
19740
+ /**
19741
+ * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
19742
+ */
19743
+ autoIndexFlag: boolean;
19744
+ /**
19745
+ * Whether to distinguish case. Default is false.
19746
+ */
19747
+ caseSensitive: boolean;
19748
+ /**
19749
+ * Token separators for the field. Default is empty (""). Each character in the string represents a token separator. Length: 0–256 bytes. If the length is 0, segmentation is disabled. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
19750
+ */
19751
+ delimiter: string;
19752
+ /**
19753
+ * When searching, determines whether to segment Chinese log content according to Chinese syntax. Enabled: Chinese characters in logs are segmented based on common Chinese syntax; custom segmentation for Chinese content is not supported. Non-Chinese characters in logs are segmented using the token separators specified in the parameter. Disabled: Logs are segmented using the token separators specified in the parameter.
19754
+ */
19755
+ includeChinese: boolean;
19756
+ /**
19757
+ * Create indexes for all fields with text values in the JSON field.
19758
+ */
19759
+ indexAll: boolean;
19760
+ /**
19761
+ * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
19762
+ */
19763
+ indexSqlAll: boolean;
19764
+ /**
19765
+ * When ValueType is json, you can configure key-value indexes for JSON subfields using the JsonKeys field, which is an array of KeyValueInfo. Each element in the JsonKeys list is a KeyValueInfo, and you can further nest text, long, and double type subfields. For subfields in JSON key-value indexes, use . to indicate the hierarchy between JSON fields. For example, the JSON field namelist contains a text-type subfield totalcount and a JSON-type info, which itself contains the field name. Each field name should be configured as totalcount and info.name, respectively. The SqlFlag setting for subfield key-value indexes defined in JsonKeys must match their parent field, meaning they must be enabled or disabled together. The default is disabled. Subfields do not support separate settings for Delimiter, CaseSensitive, or IncludeChinese. For text-type fields, CaseSensitive, Delimiter, and IncludeChinese always inherit the parent field settings. This is only set when ValueType is json.
19766
+ */
19767
+ jsonKeys: outputs.tls.GetIndexKeyValueValueJsonKey[];
19768
+ /**
19769
+ * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
19770
+ */
19771
+ sqlFlag: boolean;
19772
+ /**
19773
+ * Field type. Currently supports long, double, text, and json. The long and double types do not support configuring delimiters, including Chinese, or case sensitivity. Only the json type supports further configuration of JsonKeys subfields.
19774
+ */
19775
+ valueType: string;
19776
+ }
19777
+ interface GetIndexKeyValueValueJsonKey {
19778
+ /**
19779
+ * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
19780
+ */
19781
+ key: string;
19782
+ /**
19783
+ * Field description information required for configuring key-value indexes.
19784
+ */
19785
+ value: outputs.tls.GetIndexKeyValueValueJsonKeyValue;
19786
+ }
19787
+ interface GetIndexKeyValueValueJsonKeyValue {
19788
+ /**
19789
+ * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
19790
+ */
19791
+ autoIndexFlag: boolean;
19792
+ /**
19793
+ * Whether to distinguish case. Default is false.
19794
+ */
19795
+ caseSensitive: boolean;
19796
+ /**
19797
+ * Token separators for the field. Default is empty (""). Each character in the string represents a token separator. Length: 0–256 bytes. If the length is 0, segmentation is disabled. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
19798
+ */
19799
+ delimiter: string;
19800
+ /**
19801
+ * When searching, determines whether to segment Chinese log content according to Chinese syntax. Enabled: Chinese characters in logs are segmented based on common Chinese syntax; custom segmentation for Chinese content is not supported. Non-Chinese characters in logs are segmented using the token separators specified in the parameter. Disabled: Logs are segmented using the token separators specified in the parameter.
19802
+ */
19803
+ includeChinese: boolean;
19804
+ /**
19805
+ * Create indexes for all fields with text values in the JSON field.
19806
+ */
19807
+ indexAll: boolean;
19808
+ /**
19809
+ * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
19810
+ */
19811
+ indexSqlAll: boolean;
19812
+ /**
19813
+ * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
19814
+ */
19815
+ sqlFlag: boolean;
19816
+ /**
19817
+ * Field type. Currently supports long, double, text, and json. The long and double types do not support configuring delimiters, including Chinese, or case sensitivity. Only the json type supports further configuration of JsonKeys subfields.
19818
+ */
19819
+ valueType: string;
19820
+ }
19821
+ interface GetIndexUserInnerKeyValue {
19822
+ /**
19823
+ * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
19824
+ */
19825
+ key: string;
19826
+ /**
19827
+ * Field description information required for configuring key-value indexes.
19828
+ */
19829
+ value: outputs.tls.GetIndexUserInnerKeyValueValue;
19830
+ }
19831
+ interface GetIndexUserInnerKeyValueValue {
19832
+ /**
19833
+ * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
19834
+ */
19835
+ autoIndexFlag: boolean;
19836
+ /**
19837
+ * Whether to distinguish case. Default is false.
19838
+ */
19839
+ caseSensitive: boolean;
19840
+ /**
19841
+ * Token separators for the field. Default is empty (""). Each character in the string represents a token separator. Length: 0–256 bytes. If the length is 0, segmentation is disabled. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
19842
+ */
19843
+ delimiter: string;
19844
+ /**
19845
+ * When searching, determines whether to segment Chinese log content according to Chinese syntax. Enabled: Chinese characters in logs are segmented based on common Chinese syntax; custom segmentation for Chinese content is not supported. Non-Chinese characters in logs are segmented using the token separators specified in the parameter. Disabled: Logs are segmented using the token separators specified in the parameter.
19846
+ */
19847
+ includeChinese: boolean;
19848
+ /**
19849
+ * Create indexes for all fields with text values in the JSON field.
19850
+ */
19851
+ indexAll: boolean;
19852
+ /**
19853
+ * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
19854
+ */
19855
+ indexSqlAll: boolean;
19856
+ /**
19857
+ * When ValueType is json, you can configure key-value indexes for JSON subfields using the JsonKeys field, which is an array of KeyValueInfo. Each element in the JsonKeys list is a KeyValueInfo, and you can further nest text, long, and double type subfields. For subfields in JSON key-value indexes, use . to indicate the hierarchy between JSON fields. For example, the JSON field namelist contains a text-type subfield totalcount and a JSON-type info, which itself contains the field name. Each field name should be configured as totalcount and info.name, respectively. The SqlFlag setting for subfield key-value indexes defined in JsonKeys must match their parent field, meaning they must be enabled or disabled together. The default is disabled. Subfields do not support separate settings for Delimiter, CaseSensitive, or IncludeChinese. For text-type fields, CaseSensitive, Delimiter, and IncludeChinese always inherit the parent field settings. This is only set when ValueType is json.
19858
+ */
19859
+ jsonKeys: outputs.tls.GetIndexUserInnerKeyValueValueJsonKey[];
19860
+ /**
19861
+ * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
19862
+ */
19863
+ sqlFlag: boolean;
19864
+ /**
19865
+ * Field type. Currently supports long, double, text, and json. The long and double types do not support configuring delimiters, including Chinese, or case sensitivity. Only the json type supports further configuration of JsonKeys subfields.
19866
+ */
19867
+ valueType: string;
19868
+ }
19869
+ interface GetIndexUserInnerKeyValueValueJsonKey {
19870
+ /**
19871
+ * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
19872
+ */
19873
+ key: string;
19874
+ /**
19875
+ * Field description information required for configuring key-value indexes.
19876
+ */
19877
+ value: outputs.tls.GetIndexUserInnerKeyValueValueJsonKeyValue;
19878
+ }
19879
+ interface GetIndexUserInnerKeyValueValueJsonKeyValue {
19880
+ /**
19881
+ * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
19882
+ */
19883
+ autoIndexFlag: boolean;
19884
+ /**
19885
+ * Whether to distinguish case. Default is false.
19886
+ */
19887
+ caseSensitive: boolean;
19888
+ /**
19889
+ * Token separators for the field. Default is empty (""). Each character in the string represents a token separator. Length: 0–256 bytes. If the length is 0, segmentation is disabled. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
19890
+ */
19891
+ delimiter: string;
19892
+ /**
19893
+ * When searching, determines whether to segment Chinese log content according to Chinese syntax. Enabled: Chinese characters in logs are segmented based on common Chinese syntax; custom segmentation for Chinese content is not supported. Non-Chinese characters in logs are segmented using the token separators specified in the parameter. Disabled: Logs are segmented using the token separators specified in the parameter.
19894
+ */
19895
+ includeChinese: boolean;
19896
+ /**
19897
+ * Create indexes for all fields with text values in the JSON field.
19898
+ */
19899
+ indexAll: boolean;
19900
+ /**
19901
+ * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
19902
+ */
19903
+ indexSqlAll: boolean;
19904
+ /**
19905
+ * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
19906
+ */
19907
+ sqlFlag: boolean;
19908
+ /**
19909
+ * Field type. Currently supports long, double, text, and json. The long and double types do not support configuring delimiters, including Chinese, or case sensitivity. Only the json type supports further configuration of JsonKeys subfields.
19910
+ */
19911
+ valueType: string;
19912
+ }
19913
+ interface GetProjectTag {
19914
+ /**
19915
+ * Tag key
19916
+ */
19917
+ key: string;
19918
+ /**
19919
+ * Tag value
19920
+ */
19921
+ value: string;
19922
+ }
19923
+ interface GetRuleContainerRule {
19924
+ /**
19925
+ * Container name to collect. If no container name is specified, all containers in the machine group will be collected. Supports regex matching; for example, setting the container name to ^(container-test)$ will collect all containers named container-test.
19926
+ */
19927
+ containerNameRegex: string;
19928
+ /**
19929
+ * Whether to add environment variables as log tags to the raw log data. When enabled, the log service adds container environment variable fields to the logs. Setting multiple key-value pairs adds multiple fields. For example, set Key to source and Value to data_source. If the container has the environment variable source=DC, the log will include the field **tag**data_source_*: DC
19930
+ */
19931
+ envTags: outputs.tls.GetRuleContainerRuleEnvTag[];
19932
+ /**
19933
+ * The container environment variable denylist specifies which containers are excluded from collection. If the denylist is not enabled, all containers are collected. When the container environment variable denylist is enabled, Key is required and Value is optional. If Value is empty, all containers whose environment variables contain the Key are excluded from collection. If Value is not empty, only containers with a matching key-value pair are excluded, meaning only containers whose environment variables contain the Key and whose value matches the Value are excluded. Value supports regular expressions. For example, if Key is set to module and Value is set to ^(tcp|udp)$, containers with environment variables module:tcp or module:udp are excluded from collection. Multiple key-value pairs are evaluated with a logical OR, so any container environment variable matching any key-value pair will be excluded from collection. When the container environment variable denylist is enabled, Keys must be unique.
19934
+ */
19935
+ excludeContainerEnvRegexes: outputs.tls.GetRuleContainerRuleExcludeContainerEnvRegex[];
19936
+ /**
19937
+ * Container label denylist specifies containers to exclude from collection. If the denylist is not enabled, all containers will be collected. If the container label denylist is enabled, Key is required and Value is optional. If Value is empty, all containers with the Key in their label will be excluded. If Value is not empty, only containers matching the key-value pair will be excluded—that is, containers with the Key in their label and a Value matching the specified pattern. Value supports regex matching; for example, set Key to app and Value to ^(test1|test2)$ to exclude containers with labels app:test1 and app:test2. Multiple key-value pairs use logical OR; any container label matching any pair will be excluded from collection. When the container label denylist is enabled, Keys must not be duplicated.
19938
+ */
19939
+ excludeContainerLabelRegexes: outputs.tls.GetRuleContainerRuleExcludeContainerLabelRegex[];
19940
+ /**
19941
+ * The container environment variable allowlist specifies which containers to collect using container environment variables. If the allowlist is not enabled, all containers are collected. When the container environment variable allowlist is enabled, you must enter key-value pairs. Key is required; Value is optional. If Value is empty, all containers containing the Key in their environment variables are collected. If Value is not empty, only containers with the Key in their environment variables and a Value matching the specified value are collected. Value supports regular expression matching. For example, if Key is set to module and Value is set to ^(tcp|udp)$, only containers with module:tcp or module:udp in their environment variables are collected. The logical relationship between multiple key-value pairs is OR, meaning any container environment variable matching any key-value pair will be included in the collection scope. When the container environment variable allowlist is enabled, Keys must not be duplicated.
19942
+ */
19943
+ includeContainerEnvRegexes: outputs.tls.GetRuleContainerRuleIncludeContainerEnvRegex[];
19944
+ /**
19945
+ * The container label allowlist specifies which containers to collect based on container labels. If the allowlist is not enabled, all containers are collected. When the container label allowlist is enabled, you must specify key-value pairs. Key is required and Value is optional. If Value is empty, all containers whose labels contain the Key are collected. If Value is not empty, only containers whose labels contain the Key and whose value matches the Value are collected. Value supports regular expressions. For example, if Key is set to app and Value is set to ^(test1|test2)$, only containers with labels app:test1 or app:test2 are collected. Multiple key-value pairs are evaluated with a logical OR, so any container label matching any key-value pair will be included in the collection scope. When the container label allowlist is enabled, Keys must be unique.
19946
+ */
19947
+ includeContainerLabelRegexes: outputs.tls.GetRuleContainerRuleIncludeContainerLabelRegex[];
19948
+ /**
19949
+ * Kubernetes container collection rules.
19950
+ */
19951
+ kubernetesRule: outputs.tls.GetRuleContainerRuleKubernetesRule;
19952
+ /**
19953
+ * Collection information. stdout: Collects container standard output (stdout). stderr: Collects container standard error (stderr). all: Collects both container standard output (stdout) and standard error (stderr).
19954
+ */
19955
+ stream: string;
19956
+ }
19957
+ interface GetRuleContainerRuleEnvTag {
19958
+ /**
19959
+ * Key.
19960
+ */
19961
+ key: string;
19962
+ /**
19963
+ * Value.
19964
+ */
19965
+ val: string;
19966
+ }
19967
+ interface GetRuleContainerRuleExcludeContainerEnvRegex {
19968
+ /**
19969
+ * Key.
19970
+ */
19971
+ key: string;
19972
+ /**
19973
+ * Value.
19974
+ */
19975
+ val: string;
19976
+ }
19977
+ interface GetRuleContainerRuleExcludeContainerLabelRegex {
19978
+ /**
19979
+ * Key.
19980
+ */
19981
+ key: string;
19982
+ /**
19983
+ * Value.
19984
+ */
19985
+ val: string;
19986
+ }
19987
+ interface GetRuleContainerRuleIncludeContainerEnvRegex {
19988
+ /**
19989
+ * Key.
19990
+ */
19991
+ key: string;
19992
+ /**
19993
+ * Value.
19994
+ */
19995
+ val: string;
19996
+ }
19997
+ interface GetRuleContainerRuleIncludeContainerLabelRegex {
19998
+ /**
19999
+ * Key.
20000
+ */
20001
+ key: string;
20002
+ /**
20003
+ * Value.
20004
+ */
20005
+ val: string;
20006
+ }
20007
+ interface GetRuleContainerRuleKubernetesRule {
20008
+ /**
20009
+ * Whether to add Kubernetes Annotations as log tags to the original log data. When enabled, the log service will add fields related to Kubernetes Pod Annotations to the logs. Setting multiple key-value pairs means adding multiple fields. For example, if you set Key to sink and Value to data_sink, and the Pod contains the annotation sink=ck, a new field **tag**data_sink_*: ck will be added to the log.
20010
+ */
20011
+ annotationTags: outputs.tls.GetRuleContainerRuleKubernetesRuleAnnotationTag[];
20012
+ /**
20013
+ * Whether to add all Kubernetes Labels as log tags to the original log data. When enabled, the log service adds fields for all Labels in the Kubernetes Pod to the logs. For example, if the Pod contains Label source=DC and destination=CS, the logs will include fields **tag**source__: DC and **tag**destination__: CS.
20014
+ */
20015
+ enableAllLabelTag: boolean;
20016
+ /**
20017
+ * Pod Annotation Denylist is used to specify containers that are not collected. If you need to set a Pod Annotation Denylist, Key is required and Value is optional. If Value is empty, all containers under Pods whose Pod Annotation contains the specified Key are excluded. If Value is not empty, only containers under Pods whose Pod Annotation contains the Key and whose value matches the specified Value are excluded. By default, Value uses string matching, meaning it only matches when Value and the Pod Annotation value are exactly the same. If the value starts with ^ and ends with $, it is treated as a regular expression. For example, if Key is set to app and Value is set to ^(test1|test2)$, it matches containers under Pods with Pod Annotation app:test1 or app:test2. Note: The logical relationship between multiple Key-Value pairs is OR, meaning a Pod Annotation that matches any pair will be excluded from the collection scope. When enabling the Pod Annotation Denylist, Keys must not be duplicated. For regex fuzzy matching, you must add .*. For example, ^(http.*)$ matches strings starting with http
20018
+ */
20019
+ excludePodAnnotationRegexes: outputs.tls.GetRuleContainerRuleKubernetesRuleExcludePodAnnotationRegex[];
20020
+ /**
20021
+ * Specify containers to exclude from collection using the Pod Label denylist. If not enabled, all containers are collected. To set a Pod Label denylist, Key is required and Value is optional. If Value is empty, all containers with the specified Key in their Pod Label are excluded from collection. If Value is not empty, only containers with the specified Key and a Value matching the Value in their Pod Label are excluded. Value supports regular expressions. For example, set Key to module and Value to ^(tcp|udp)$ to exclude containers with module:tcp or module:udp in their Pod Label. Note: The logical relationship between multiple Key-Value pairs is OR. Any Pod Label matching any Key-Value pair will be excluded from collection. When enabling the Pod Label denylist, Keys must not be duplicated. For fuzzy regular expression matching, you must add .*. For example, ^(http.*)$ matches any string starting with http
20022
+ */
20023
+ excludePodLabelRegexes: outputs.tls.GetRuleContainerRuleKubernetesRuleExcludePodLabelRegex[];
20024
+ /**
20025
+ * Pod Annotation Allowlist is used to specify containers to be collected. If you need to set a Pod Annotation Allowlist, Key is required and Value is optional. If Value is empty, all containers under Pods that contain the Key in Pod Annotation will match. If Value is not empty, only containers under Pods that contain the Key and whose value matches Value in Pod Annotation will match. Value defaults to string matching, meaning only when Value and the Pod Annotation value are exactly the same will they match. If the value starts with ^ and ends with $, it is a regex match. For example, set Key to app and Value to ^(test1|test2)$ to match containers under Pods with Pod Annotation app:test1 or app:test2. Note: The logical relationship between multiple Key-Value pairs is OR, meaning as long as Pod Annotation matches any pair, it will be included in the collection scope. When enabling Pod Annotation Allowlist, Keys must not be duplicated. For regex fuzzy matching, you must add .*. For example, ^(http.*)$ matches strings starting with http.
20026
+ */
20027
+ includePodAnnotationRegexes: outputs.tls.GetRuleContainerRuleKubernetesRuleIncludePodAnnotationRegex[];
20028
+ /**
20029
+ * Pod Label Allowlist is used to specify which containers to collect. If Pod Label Allowlist is not enabled, all containers will be collected. If you need to set a Pod Label Allowlist, Key is required: if Value is empty, all containers with the Key in their Pod Label will be collected. If Value is not empty, only containers with the Key in their Pod Label and a Value matching the specified pattern will be collected; regular expression matching is supported. For example, if you set Key to module and Value to ^(tcp|udp)$, only containers with module:tcp or module:udp in their Pod Label will be collected. Note: The logical relationship between multiple key-value pairs is OR, meaning any Pod Label matching any key-value pair will be included in the collection scope. When enabling Pod Label Allowlist, Keys cannot be duplicated. For fuzzy regular expression matching, you must add .*, for example, ^(http.*)$ matches strings starting with http.
20030
+ */
20031
+ includePodLabelRegexes: outputs.tls.GetRuleContainerRuleKubernetesRuleIncludePodLabelRegex[];
20032
+ /**
20033
+ * Whether to add Kubernetes labels as log tags to the raw log data. When enabled, the log service adds Kubernetes Pod label fields to the logs. Setting multiple key-value pairs adds multiple fields. For example, if you set Key to source and Value to data_source, and the Pod contains the label source=DC, the log will include the field **tag**data_source_*: DC.
20034
+ */
20035
+ labelTags: outputs.tls.GetRuleContainerRuleKubernetesRuleLabelTag[];
20036
+ /**
20037
+ * Name of the Kubernetes Namespace to collect. If no Namespace name is specified, all containers will be collected. Namespace names support regular expression matching. For example, if you set the Namespace name to ^(tcp|udp)$, it will collect all containers under the tcp and udp namespaces.
20038
+ */
20039
+ namespaceNameRegex: string;
20040
+ /**
20041
+ * Pod name is used to specify the Pod whose containers will be collected. If no Pod name is specified, all containers will be collected. Pod name supports regular expression matching. For example, setting the Pod name to ^(http.*)$ collects all containers under Pods whose names start with http.
20042
+ */
20043
+ podNameRegex: string;
20044
+ /**
20045
+ * Specify the container to collect by workload name. If no workload name is specified, all containers are collected. Workload names support regular expressions. For example, setting the workload name to ^(http.*)$ collects all containers under workloads starting with http.
20046
+ */
20047
+ workloadNameRegex: string;
20048
+ /**
20049
+ * Specify the container to collect by workload type. Only one type can be selected. If no type is specified, containers of all types will be collected. Supported workload types: Deployment: stateless workload StatefulSet: stateful workload DaemonSet: daemon process Job: task CronJob: scheduled task
20050
+ */
20051
+ workloadType: string;
20052
+ }
20053
+ interface GetRuleContainerRuleKubernetesRuleAnnotationTag {
20054
+ /**
20055
+ * Key.
20056
+ */
20057
+ key: string;
20058
+ /**
20059
+ * Value.
20060
+ */
20061
+ val: string;
20062
+ }
20063
+ interface GetRuleContainerRuleKubernetesRuleExcludePodAnnotationRegex {
20064
+ /**
20065
+ * Key.
20066
+ */
20067
+ key: string;
20068
+ /**
20069
+ * Value.
20070
+ */
20071
+ val: string;
20072
+ }
20073
+ interface GetRuleContainerRuleKubernetesRuleExcludePodLabelRegex {
20074
+ /**
20075
+ * Key.
20076
+ */
20077
+ key: string;
20078
+ /**
20079
+ * Value.
20080
+ */
20081
+ val: string;
20082
+ }
20083
+ interface GetRuleContainerRuleKubernetesRuleIncludePodAnnotationRegex {
20084
+ /**
20085
+ * Key.
20086
+ */
20087
+ key: string;
20088
+ /**
20089
+ * Value.
20090
+ */
20091
+ val: string;
20092
+ }
20093
+ interface GetRuleContainerRuleKubernetesRuleIncludePodLabelRegex {
20094
+ /**
20095
+ * Key.
20096
+ */
20097
+ key: string;
20098
+ /**
20099
+ * Value.
20100
+ */
20101
+ val: string;
20102
+ }
20103
+ interface GetRuleContainerRuleKubernetesRuleLabelTag {
20104
+ /**
20105
+ * Key.
20106
+ */
20107
+ key: string;
20108
+ /**
20109
+ * Value.
20110
+ */
20111
+ val: string;
20112
+ }
20113
+ interface GetRuleExcludePath {
20114
+ /**
20115
+ * Collection path type. File: File name. Path: Directory.
20116
+ */
20117
+ type: string;
20118
+ /**
20119
+ * Collection path. Must be specified as an absolute path. When Type is Path, Value indicates a directory. When Type is File, Value indicates a file name.
20120
+ */
20121
+ value: string;
20122
+ }
20123
+ interface GetRuleExtractRule {
20124
+ /**
20125
+ * Regular expression that the first line of the log must match. Valid only when LogType is multiline*log or fullregex*log. Must be a valid regular expression.
20126
+ */
20127
+ beginRegex: string;
20128
+ /**
20129
+ * Log delimiter. Valid only when LogType is delimiter_log.
20130
+ */
20131
+ delimiter: string;
20132
+ /**
20133
+ * Enable nanosecond precision time. When enabled, log time parsing will include and report nanosecond precision. true: Enable nanosecond precision time. false: Disable nanosecond precision time.
20134
+ */
20135
+ enableNanosecond: boolean;
20136
+ /**
20137
+ * Filter rule list. When LogType is minimalist*log or multiline*log, you can configure up to 1 filter rule, and the filter field name key must be content. When LogType is delimiter*log, json*log, or fullregex*log, you can configure up to 5 filter rules, and the filter field name key cannot be duplicated or empty. The regular expression for the filter field's log content must be a valid regular expression and is limited to 256 characters.
20138
+ */
20139
+ filterKeyRegexes: outputs.tls.GetRuleExtractRuleFilterKeyRegex[];
20140
+ /**
20141
+ * List of log field names (Key). This is only valid when LogType is delimiter*log or fullregex*log. You can configure up to 100 field names. When LogType is delimiter*log, field names must be unique and cannot all be empty. When LogType is fullregex*log, field names must be unique and cannot be empty.
20142
+ */
20143
+ keys: string[];
20144
+ /**
20145
+ * The entire log must match the specified regular expression. This is only valid when the collected log type is fullregex_log. Must be a valid regular expression.
20146
+ */
20147
+ logRegex: string;
20148
+ /**
20149
+ * Automatically extract log fields based on the specified log template
20150
+ */
20151
+ logTemplate: outputs.tls.GetRuleExtractRuleLogTemplate;
20152
+ /**
20153
+ * Quoting character. Content wrapped by a quoting character will not be split but parsed as a complete field. Only valid when LogType is delimiter_log.
20154
+ */
20155
+ quote: string;
20156
+ /**
20157
+ * Regular expression for extracting time, used to extract the time value from the TimeKey field and parse it as the collection time
20158
+ */
20159
+ timeExtractRegex: string;
20160
+ /**
20161
+ * Parsing format for the time field. If you use a specific time field in the log as the log timestamp, you must provide both TimeKey and TimeFormat. TimeKey and TimeFormat must be used together.
20162
+ */
20163
+ timeFormat: string;
20164
+ /**
20165
+ * Name of the log time field. If you want to use a specific time field in the log as the log timestamp, you need to specify both TimeKey and TimeFormat. TimeKey and TimeFormat must be provided together.
20166
+ */
20167
+ timeKey: string;
20168
+ /**
20169
+ * Time sample. Used to check whether the entered time parsing format is correct.
20170
+ */
20171
+ timeSample: string;
20172
+ /**
20173
+ * Time zone. Supports machine time zone (default) and custom time zone. Custom time zone supports GMT and UTC. GMT format: GMT+08:00. UTC format: Asia/Shanghai.
20174
+ */
20175
+ timeZone: string;
20176
+ /**
20177
+ * Key name for unmatched logs.
20178
+ */
20179
+ unMatchLogKey: string;
20180
+ /**
20181
+ * Switch for uploading unmatched logs.
20182
+ */
20183
+ unMatchUpLoadSwitch: boolean;
20184
+ }
20185
+ interface GetRuleExtractRuleFilterKeyRegex {
20186
+ /**
20187
+ * Name of the filter field.
20188
+ */
20189
+ key: string;
20190
+ /**
20191
+ * The log content of the filter field must match the specified regular expression.
20192
+ */
20193
+ regex: string;
20194
+ }
20195
+ interface GetRuleExtractRuleLogTemplate {
20196
+ /**
20197
+ * Log template format.
20198
+ */
20199
+ format: string;
20200
+ /**
20201
+ * Log template types. Supported types: Nginx: Nginx log template.
20202
+ */
20203
+ type: string;
20204
+ }
20205
+ interface GetRuleHostGroupInfo {
20206
+ /**
20207
+ * Number of hosts with heartbeat exceptions.
20208
+ */
20209
+ abnormalHeartbeatStatusCount: number;
20210
+ /**
20211
+ * Latest agent version.
20212
+ */
20213
+ agentLatestVersion: string;
20214
+ /**
20215
+ * Whether LogCollector installed on the machine group server has automatic upgrade enabled. true: The log service will check for upgrades during a specified time period each day. If upgrade conditions are met, LogCollector will be upgraded automatically without manual intervention. false (default): LogCollector does not upgrade automatically. To use a newer version of LogCollector, refer to the LogCollector upgrade procedure.
20216
+ */
20217
+ autoUpdate: boolean;
20218
+ /**
20219
+ * Machine group creation time.
20220
+ */
20221
+ createTime: string;
20222
+ /**
20223
+ * Number of hosts.
20224
+ */
20225
+ hostCount: number;
20226
+ /**
20227
+ * Machine group ID
20228
+ */
20229
+ hostGroupId: string;
20230
+ /**
20231
+ * Name of the machine group.
20232
+ */
20233
+ hostGroupName: string;
20234
+ /**
20235
+ * Machine group type. IP: Machine IP. Label: Machine label.
20236
+ */
20237
+ hostGroupType: string;
20238
+ /**
20239
+ * Machine identifier.
20240
+ */
20241
+ hostIdentifier: string;
20242
+ /**
20243
+ * The IAM project to which the host group belongs.
20244
+ */
20245
+ iamProjectName: string;
20246
+ /**
20247
+ * Time when the machine group was modified.
20248
+ */
20249
+ modifyTime: string;
20250
+ /**
20251
+ * Number of hosts with normal heartbeat.
20252
+ */
20253
+ normalHeartbeatStatusCount: number;
20254
+ /**
20255
+ * The number of bound collection configurations.
20256
+ */
20257
+ ruleCount: number;
20258
+ /**
20259
+ * Whether to enable the Logcollector service log feature. true: enabled. false (default): disabled.
20260
+ */
20261
+ serviceLogging: boolean;
20262
+ /**
20263
+ * End time for LogCollector automatic upgrade.
20264
+ */
20265
+ updateEndTime: string;
20266
+ /**
20267
+ * LogCollector auto-update start time. Note: Only required when AutoUpdate is set to true. It is recommended to schedule auto-updates during off-peak hours. LogCollector may restart during the update process, but logs will not be lost.
20268
+ */
20269
+ updateStartTime: string;
20270
+ }
20271
+ interface GetRuleUserDefineRule {
20272
+ /**
20273
+ * The Advanced parameter is used for extended configuration. After enabling extended configuration, you can customize advanced behaviors of LogCollector, such as when to release file handles. Note: If multiple release conditions are specified, the handle is released and log file monitoring ends as soon as any condition is met.
20274
+ */
20275
+ advanced: outputs.tls.GetRuleUserDefineRuleAdvanced;
20276
+ /**
20277
+ * Whether to upload the label information of the host group to the log service. The default is off. true: LogCollector uploads the label information of the host group to the specified field. You can specify the field name in the HostGroupLabelKey parameter. false (default): Host group label information is not uploaded.
20278
+ */
20279
+ enableHostGroupLabel: boolean;
20280
+ /**
20281
+ * Upload the hostname field. Default is disabled. true: Add a field to the original log to record the source hostname. Specify the hostname field name using HostnameKey. false: (default) Do not add the hostname field.
20282
+ */
20283
+ enableHostname: boolean;
20284
+ /**
20285
+ * Upload raw logs. true: Upload raw logs. false (default): Do not upload raw logs.
20286
+ */
20287
+ enableRawLog: boolean;
20288
+ /**
20289
+ * Add constant fields to logs. Constant fields must follow these restrictions: You can upload up to 5 constant fields. Field names (Key) must be unique and not empty. Length limit: 1–128 characters, including letters, numbers, and special characters (-*./), and cannot start with an underscore. Field values (Value) must not be empty, with a maximum length of 512 KiB.
20290
+ */
20291
+ fields: outputs.tls.GetRuleUserDefineRuleField[];
20292
+ /**
20293
+ * Field name used to store machine group label information
20294
+ */
20295
+ hostGroupLabelKey: string;
20296
+ /**
20297
+ * Hostname field name. Required only when EnableHostname is true.
20298
+ */
20299
+ hostnameKey: string;
20300
+ /**
20301
+ * Ignore log files that have not been updated for a specified period (in hours).
20302
+ */
20303
+ ignoreOlder: number;
20304
+ /**
20305
+ * Allow multiple log file collections. Empty: Use log file ID (including file inode, device, and checksum of the first N bytes) to uniquely identify the log file. RuleID: Use collection rule ID and log file ID to uniquely identify the log file. TopicIDRuleName: Use log topic ID, collection rule name, and log file ID to uniquely identify the log file.
20306
+ */
20307
+ multiCollectsType: string;
20308
+ /**
20309
+ * Rule for parsing the collection path. After setting the rule, fields in the collection path are extracted using the specified regular expression and added as metadata to the log data. Note: This parameter is not supported when collecting container standard output.
20310
+ */
20311
+ parsePathRule: outputs.tls.GetRuleUserDefineRuleParsePathRule;
20312
+ /**
20313
+ * LogCollector plugin configuration. After enabling plugin configuration, you can add one or more LogCollector processor plugins to parse logs with complex or variable structures.
20314
+ */
20315
+ plugin: outputs.tls.GetRuleUserDefineRulePlugin;
20316
+ /**
20317
+ * Name of the raw log field. Only effective when EnableRawLog is set to true. RawLogKey defaults to **raw**, meaning the original log data will be encapsulated in the **raw** field and uploaded to the log service along with the parsed log data.
20318
+ */
20319
+ rawLogKey: string;
20320
+ /**
20321
+ * Rules for routing log partitions. If this parameter is not set, logs are written using the default load balancing mode, and packets are written to any available Shard. If this parameter is set, logs are collected using the HashKey routing Shard mode, and the log service writes data to the Shard containing the specified Key value
20322
+ */
20323
+ shardHashKey: outputs.tls.GetRuleUserDefineRuleShardHashKey;
20324
+ /**
20325
+ * LogCollector collection policy, which specifies whether LogCollector collects incremental logs or full logs. Default is false, meaning full log collection. true: incremental collection. LogCollector only collects newly added content in the file. When new logs are written to monitored log files, LogCollector triggers log collection. For first-time collection, LogCollector automatically determines the collection position based on the incremental threshold TailSizeKb you specify. If the new file size does not exceed the incremental threshold, collection starts from the beginning of the file. If the new file size exceeds the incremental threshold, collection starts from the position at the end of the file minus the incremental threshold, collecting only incremental logs. For subsequent collections, LogCollector determines the collection position based on Checkpoint and continues collecting. false: (default) full collection. LogCollector collects logs from the beginning of each file, including historical log data.
20326
+ */
20327
+ tailFiles: boolean;
20328
+ /**
20329
+ * Backtracking threshold for incremental collection, in KiB. When LogCollector uses incremental collection, for the first collection of a log file: If the new log file size does not exceed the TailSizeKb value, collection starts from the beginning of the file. If the new log file size exceeds the TailSizeKb value, collection starts from the position that is TailSizeKb from the end of the file.
20330
+ */
20331
+ tailSizeKb: number;
20332
+ }
20333
+ interface GetRuleUserDefineRuleAdvanced {
20334
+ /**
20335
+ * After reading to the end of the log file, choose whether to release the file handle. Default is false.
20336
+ */
20337
+ closeEof: boolean;
20338
+ /**
20339
+ * Wait time to release log file handle. If no new logs are written to the log file within the specified time, the handle for that log file is released. Unit: seconds. Range: 1–300 seconds. Default: 60 seconds.
20340
+ */
20341
+ closeInactive: number;
20342
+ /**
20343
+ * Release the file handle after the log file is removed. Default is false.
20344
+ */
20345
+ closeRemoved: boolean;
20346
+ /**
20347
+ * Release the file handle after the log file is renamed. Default is false.
20348
+ */
20349
+ closeRenamed: boolean;
20350
+ /**
20351
+ * Maximum monitoring duration for LogCollector log files, in seconds. The default is 0 seconds, meaning LogCollector does not limit the monitoring duration for log files. Timing starts when LogCollector begins monitoring the log file. Once the specified duration is exceeded, LogCollector immediately releases the file handle and stops monitoring, regardless of whether the log file has been fully read.
20352
+ */
20353
+ closeTimeout: number;
20354
+ /**
20355
+ * Maximum wait time when LogCollector does not detect a line break, in seconds. The default is 5s. Timing starts when LogCollector begins reading the log file. If no line break is detected within the specified time, LogCollector sends the logs in the buffer. If the file write interval is long, a complete log entry may be split into two parts and written separately. Adjust this parameter based on your log write interval.
20356
+ */
20357
+ noLineTerminatorEofMaxTime: number;
20358
+ }
20359
+ interface GetRuleUserDefineRuleField {
20360
+ /**
20361
+ * Key.
20362
+ */
20363
+ key: string;
20364
+ /**
20365
+ * Value.
20366
+ */
20367
+ val: string;
20368
+ }
20369
+ interface GetRuleUserDefineRuleParsePathRule {
20370
+ /**
20371
+ * Field name list. The log service uses a regular expression (Regex) to parse the path sample (PathSample) into multiple fields. Keys specify the name of each field. You can configure up to 100 field names. Field names cannot be empty or duplicated.
20372
+ */
20373
+ keys: string[];
20374
+ /**
20375
+ * Sample collection path for actual scenarios. The sample collection path must be an absolute path. Wildcards *, ?, ** are not allowed in the path sample.
20376
+ */
20377
+ pathSample: string;
20378
+ /**
20379
+ * Regular expression used to extract the path field. Must match the sample collection path, otherwise extraction will fail
20380
+ */
20381
+ regex: string;
20382
+ }
20383
+ interface GetRuleUserDefineRulePlugin {
20384
+ /**
20385
+ * LogCollector plugin. For the list of supported plugins and parameter descriptions, see LogCollector plugin overview.
20386
+ */
20387
+ processors: string;
20388
+ }
20389
+ interface GetRuleUserDefineRuleShardHashKey {
20390
+ /**
20391
+ * HashKey of the log group, used to specify the shard to which the current log group will be written. The value range for this parameter is [00000000000000000000000000000000-ffffffffffffffffffffffffffffffff).
20392
+ */
20393
+ hashKey: string;
20394
+ }
20395
+ interface GetScheduleSqlTaskRequestCycle {
20396
+ /**
20397
+ * Cron expression, with a minimum granularity of minutes, using 24-hour format. For example, 0 18 * * * means execution at 18:00 every day.
20398
+ */
20399
+ cronTab: string;
20400
+ /**
20401
+ * If Type is set to Cron, you must also set the time zone.
20402
+ */
20403
+ cronTimeZone: string;
20404
+ /**
20405
+ * Scheduling period or the time point for periodic execution (minutes from 00:00). Range: 1–1440 minutes.
20406
+ */
20407
+ time: number;
20408
+ /**
20409
+ * Scheduling period type. Options: Period, Fixed, Cron.
20410
+ */
20411
+ type: string;
20412
+ }
20413
+ interface GetShipperContentInfo {
20414
+ /**
20415
+ * CSV format log content configuration.
20416
+ */
20417
+ csvInfo: outputs.tls.GetShipperContentInfoCsvInfo;
20418
+ /**
20419
+ * Log content parsing format. Delivery to TOS supports json, jsonl, csv; delivery to Kafka supports original, json.
20420
+ */
20421
+ format: string;
20422
+ /**
20423
+ * JSON/JSONL format log content configuration.
20424
+ */
20425
+ jsonInfo: outputs.tls.GetShipperContentInfoJsonInfo;
20426
+ }
20427
+ interface GetShipperContentInfoCsvInfo {
20428
+ /**
20429
+ * Delimiter. Supports comma, tab, pipe, semicolon, space.
20430
+ */
20431
+ delimiter: string;
20432
+ /**
20433
+ * Escape character. When field content contains a delimiter, use an escape character to enclose it. Supports single quote, double quote, or empty character.
20434
+ */
20435
+ escapeChar: string;
20436
+ /**
20437
+ * Fields to be delivered. Supports letters, numbers, and _-./. Cannot start with an underscore. Length: 1–128.
20438
+ */
20439
+ keys: string[];
20440
+ /**
20441
+ * Content for invalid field padding. Length: 0–128.
20442
+ */
20443
+ nonFieldContent: string;
20444
+ /**
20445
+ * Whether to print the key in the first row.
20446
+ */
20447
+ printHeader: boolean;
20448
+ }
20449
+ interface GetShipperContentInfoJsonInfo {
20450
+ /**
20451
+ * Enable flag.
20452
+ */
20453
+ enable: boolean;
20454
+ /**
20455
+ * Whether to enable escaping. Must be set to true.
20456
+ */
20457
+ escape: boolean;
20458
+ /**
20459
+ * Delivery field list; if not configured, all fields will be delivered. When delivering in JSON/JSONL format, if this parameter is not set, all fields will be delivered, including **content** (required), **source**, **path**, **time**, **image_name**, **container_name**, **pod_name**, **pod_uid**, namespace, **tag****client_ip**, and **tag****receive_time**.
20460
+ */
20461
+ keys: string[];
20462
+ }
20463
+ interface GetShipperKafkaShipperInfo {
20464
+ /**
20465
+ * Compression format; supports snappy, gzip, lz4, none.
20466
+ */
20467
+ compress: string;
20468
+ /**
20469
+ * Delivery end time, in milliseconds. If not set, delivery continues indefinitely. Note: Milliseconds will be truncated. For example, if 1776761323455 is entered, it will become 1776761323000; if 1776761323 is entered, it will become 1776761323000.
20470
+ */
20471
+ endTime: number;
20472
+ /**
20473
+ * Kafka instance.
20474
+ */
20475
+ instance: string;
20476
+ /**
20477
+ * Kafka topic name. All log data delivered through this configuration will be sent to this topic.
20478
+ */
20479
+ kafkaTopic: string;
20480
+ /**
20481
+ * Start time. If not configured, defaults to the current time. Note: Millisecond data will be ignored. For example, if 1776761323455 is entered, the result will be 1776761323000; if 1776761323 is entered, the result will be 1776761323000.
20482
+ */
20483
+ startTime: number;
20484
+ }
20485
+ interface GetShipperTosShipperInfo {
20486
+ /**
20487
+ * Select a TOS bucket. Must be in the same region as the source log topic. Can only contain numbers, hyphens (-), and letters a–z. Must start and end with a number or letter. Length: 3–63 characters.
20488
+ */
20489
+ bucket: string;
20490
+ /**
20491
+ * Compression format; supports snappy, gzip, lz4, none.
20492
+ */
20493
+ compress: string;
20494
+ /**
20495
+ * Delivery interval in seconds, range: 300–900.
20496
+ */
20497
+ interval: number;
20498
+ /**
20499
+ * Maximum raw file size per partition for delivery, in MiB. Range: 5–256.
20500
+ */
20501
+ maxSize: number;
20502
+ /**
20503
+ * Partition rule for log delivery. Subdirectory naming format supports strftime syntax. Default: %Y/%m/%d/%H/%M.
20504
+ */
20505
+ partitionFormat: string;
20506
+ /**
20507
+ * Top-level directory name for the bucket. Cannot start with / or \, and cannot use consecutive /. Cannot use .. as a folder name. Duplicate names are not allowed within the same bucket.
20508
+ */
20509
+ prefix: string;
20510
+ }
20511
+ interface GetTopicTag {
20512
+ /**
20513
+ * User tag key.
20514
+ */
20515
+ key: string;
18726
20516
  /**
18727
- * The total throughput of the cloud disk is the sum of its baseline throughput and additional throughput.
20517
+ * User tag value.
18728
20518
  */
18729
- throughput: number;
20519
+ value: string;
18730
20520
  }
18731
- }
18732
- export declare namespace tls {
18733
- interface GetImportTaskImportSourceInfo {
20521
+ interface ImportTaskImportSourceInfo {
18734
20522
  /**
18735
20523
  * Kafka data source information. When sourceType is kafka, the KafkaSourceInfo field is required
18736
20524
  */
18737
- kafkaSourceInfo: outputs.tls.GetImportTaskImportSourceInfoKafkaSourceInfo;
20525
+ kafkaSourceInfo: outputs.tls.ImportTaskImportSourceInfoKafkaSourceInfo;
18738
20526
  /**
18739
20527
  * TOS data source information. When sourceType is tos, the TosSourceInfo field is required.
18740
20528
  */
18741
- tosSourceInfo: outputs.tls.GetImportTaskImportSourceInfoTosSourceInfo;
20529
+ tosSourceInfo: outputs.tls.ImportTaskImportSourceInfoTosSourceInfo;
18742
20530
  }
18743
- interface GetImportTaskImportSourceInfoKafkaSourceInfo {
20531
+ interface ImportTaskImportSourceInfoKafkaSourceInfo {
18744
20532
  /**
18745
20533
  * Data encoding format. Available options: UTF-8, GBK.
18746
20534
  */
@@ -18790,7 +20578,7 @@ export declare namespace tls {
18790
20578
  */
18791
20579
  username: string;
18792
20580
  }
18793
- interface GetImportTaskImportSourceInfoTosSourceInfo {
20581
+ interface ImportTaskImportSourceInfoTosSourceInfo {
18794
20582
  /**
18795
20583
  * TOS bucket name
18796
20584
  */
@@ -18808,11 +20596,11 @@ export declare namespace tls {
18808
20596
  */
18809
20597
  region: string;
18810
20598
  }
18811
- interface GetImportTaskTargetInfo {
20599
+ interface ImportTaskTargetInfo {
18812
20600
  /**
18813
20601
  * Log extraction rule.
18814
20602
  */
18815
- extractRule: outputs.tls.GetImportTaskTargetInfoExtractRule;
20603
+ extractRule: outputs.tls.ImportTaskTargetInfoExtractRule;
18816
20604
  /**
18817
20605
  * Log sample. When LogType is set to multiline_log, you must configure log samples. It is recommended to provide more than two log entries as examples to ensure the regular expression matches the first line of each log. Use real samples from the production environment.
18818
20606
  */
@@ -18826,11 +20614,11 @@ export declare namespace tls {
18826
20614
  */
18827
20615
  region: string;
18828
20616
  }
18829
- interface GetImportTaskTargetInfoExtractRule {
20617
+ interface ImportTaskTargetInfoExtractRule {
18830
20618
  /**
18831
20619
  * Basic content of log extraction rules.
18832
20620
  */
18833
- extractRule: outputs.tls.GetImportTaskTargetInfoExtractRuleExtractRule;
20621
+ extractRule: outputs.tls.ImportTaskTargetInfoExtractRuleExtractRule;
18834
20622
  /**
18835
20623
  * Number of skipped rows. Only valid when the log type is delimiterLog and the import type is tos.
18836
20624
  */
@@ -18844,7 +20632,7 @@ export declare namespace tls {
18844
20632
  */
18845
20633
  timeZone: string;
18846
20634
  }
18847
- interface GetImportTaskTargetInfoExtractRuleExtractRule {
20635
+ interface ImportTaskTargetInfoExtractRuleExtractRule {
18848
20636
  /**
18849
20637
  * Regular expression for identifying the first line of each log. The matched part is treated as the start of the log. When LogType is set to multiline_log, you must configure a log sample
18850
20638
  */
@@ -18857,10 +20645,7 @@ export declare namespace tls {
18857
20645
  * Enable nanoseconds.
18858
20646
  */
18859
20647
  enableNanosecond: boolean;
18860
- /**
18861
- * Filter key regular expression.
18862
- */
18863
- filterKeyRegexes: outputs.tls.GetImportTaskTargetInfoExtractRuleExtractRuleFilterKeyRegex[];
20648
+ filterKeyRegexes: outputs.tls.ImportTaskTargetInfoExtractRuleExtractRuleFilterKeyRegex[];
18864
20649
  /**
18865
20650
  * List of log field names (Key). Valid only when LogType is delimiter_log. Supports up to 100 field names. Duplicate field names are not allowed, and all field names cannot be left blank
18866
20651
  */
@@ -18872,7 +20657,7 @@ export declare namespace tls {
18872
20657
  /**
18873
20658
  * Log template.
18874
20659
  */
18875
- logTemplate: outputs.tls.GetImportTaskTargetInfoExtractRuleExtractRuleLogTemplate;
20660
+ logTemplate: outputs.tls.ImportTaskTargetInfoExtractRuleExtractRuleLogTemplate;
18876
20661
  /**
18877
20662
  * Quotation mark. Content enclosed by the quotation mark will not be separated and will be parsed as a complete field. Only valid when LogType is delimiter_log.
18878
20663
  */
@@ -18898,7 +20683,7 @@ export declare namespace tls {
18898
20683
  */
18899
20684
  unMatchUpLoadSwitch: boolean;
18900
20685
  }
18901
- interface GetImportTaskTargetInfoExtractRuleExtractRuleFilterKeyRegex {
20686
+ interface ImportTaskTargetInfoExtractRuleExtractRuleFilterKeyRegex {
18902
20687
  /**
18903
20688
  * Key.
18904
20689
  */
@@ -18908,7 +20693,7 @@ export declare namespace tls {
18908
20693
  */
18909
20694
  regex: string;
18910
20695
  }
18911
- interface GetImportTaskTargetInfoExtractRuleExtractRuleLogTemplate {
20696
+ interface ImportTaskTargetInfoExtractRuleExtractRuleLogTemplate {
18912
20697
  /**
18913
20698
  * Format.
18914
20699
  */
@@ -18918,7 +20703,7 @@ export declare namespace tls {
18918
20703
  */
18919
20704
  type: string;
18920
20705
  }
18921
- interface GetImportTaskTaskStatistics {
20706
+ interface ImportTaskTaskStatistics {
18922
20707
  /**
18923
20708
  * Total resource bytes enumerated
18924
20709
  */
@@ -18952,7 +20737,7 @@ export declare namespace tls {
18952
20737
  */
18953
20738
  transferred: number;
18954
20739
  }
18955
- interface GetIndexFullText {
20740
+ interface IndexFullText {
18956
20741
  /**
18957
20742
  * Case sensitivity. true: Case sensitive. false: Not case sensitive.
18958
20743
  */
@@ -18966,7 +20751,7 @@ export declare namespace tls {
18966
20751
  */
18967
20752
  includeChinese: boolean;
18968
20753
  }
18969
- interface GetIndexKeyValue {
20754
+ interface IndexKeyValue {
18970
20755
  /**
18971
20756
  * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
18972
20757
  */
@@ -18974,9 +20759,9 @@ export declare namespace tls {
18974
20759
  /**
18975
20760
  * Field description information required for configuring key-value indexes.
18976
20761
  */
18977
- value: outputs.tls.GetIndexKeyValueValue;
20762
+ value: outputs.tls.IndexKeyValueValue;
18978
20763
  }
18979
- interface GetIndexKeyValueValue {
20764
+ interface IndexKeyValueValue {
18980
20765
  /**
18981
20766
  * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
18982
20767
  */
@@ -19001,10 +20786,7 @@ export declare namespace tls {
19001
20786
  * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
19002
20787
  */
19003
20788
  indexSqlAll: boolean;
19004
- /**
19005
- * When ValueType is json, you can configure key-value indexes for JSON subfields using the JsonKeys field, which is an array of KeyValueInfo. Each element in the JsonKeys list is a KeyValueInfo, and you can further nest text, long, and double type subfields. For subfields in JSON key-value indexes, use . to indicate the hierarchy between JSON fields. For example, the JSON field namelist contains a text-type subfield totalcount and a JSON-type info, which itself contains the field name. Each field name should be configured as totalcount and info.name, respectively. The SqlFlag setting for subfield key-value indexes defined in JsonKeys must match their parent field, meaning they must be enabled or disabled together. The default is disabled. Subfields do not support separate settings for Delimiter, CaseSensitive, or IncludeChinese. For text-type fields, CaseSensitive, Delimiter, and IncludeChinese always inherit the parent field settings. This is only set when ValueType is json.
19006
- */
19007
- jsonKeys: outputs.tls.GetIndexKeyValueValueJsonKey[];
20789
+ jsonKeys: outputs.tls.IndexKeyValueValueJsonKey[];
19008
20790
  /**
19009
20791
  * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
19010
20792
  */
@@ -19014,7 +20796,7 @@ export declare namespace tls {
19014
20796
  */
19015
20797
  valueType: string;
19016
20798
  }
19017
- interface GetIndexKeyValueValueJsonKey {
20799
+ interface IndexKeyValueValueJsonKey {
19018
20800
  /**
19019
20801
  * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
19020
20802
  */
@@ -19022,9 +20804,9 @@ export declare namespace tls {
19022
20804
  /**
19023
20805
  * Field description information required for configuring key-value indexes.
19024
20806
  */
19025
- value: outputs.tls.GetIndexKeyValueValueJsonKeyValue;
20807
+ value: outputs.tls.IndexKeyValueValueJsonKeyValue;
19026
20808
  }
19027
- interface GetIndexKeyValueValueJsonKeyValue {
20809
+ interface IndexKeyValueValueJsonKeyValue {
19028
20810
  /**
19029
20811
  * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
19030
20812
  */
@@ -19058,7 +20840,7 @@ export declare namespace tls {
19058
20840
  */
19059
20841
  valueType: string;
19060
20842
  }
19061
- interface GetIndexUserInnerKeyValue {
20843
+ interface IndexUserInnerKeyValue {
19062
20844
  /**
19063
20845
  * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
19064
20846
  */
@@ -19066,9 +20848,9 @@ export declare namespace tls {
19066
20848
  /**
19067
20849
  * Field description information required for configuring key-value indexes.
19068
20850
  */
19069
- value: outputs.tls.GetIndexUserInnerKeyValueValue;
20851
+ value: outputs.tls.IndexUserInnerKeyValueValue;
19070
20852
  }
19071
- interface GetIndexUserInnerKeyValueValue {
20853
+ interface IndexUserInnerKeyValueValue {
19072
20854
  /**
19073
20855
  * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
19074
20856
  */
@@ -19093,10 +20875,7 @@ export declare namespace tls {
19093
20875
  * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
19094
20876
  */
19095
20877
  indexSqlAll: boolean;
19096
- /**
19097
- * When ValueType is json, you can configure key-value indexes for JSON subfields using the JsonKeys field, which is an array of KeyValueInfo. Each element in the JsonKeys list is a KeyValueInfo, and you can further nest text, long, and double type subfields. For subfields in JSON key-value indexes, use . to indicate the hierarchy between JSON fields. For example, the JSON field namelist contains a text-type subfield totalcount and a JSON-type info, which itself contains the field name. Each field name should be configured as totalcount and info.name, respectively. The SqlFlag setting for subfield key-value indexes defined in JsonKeys must match their parent field, meaning they must be enabled or disabled together. The default is disabled. Subfields do not support separate settings for Delimiter, CaseSensitive, or IncludeChinese. For text-type fields, CaseSensitive, Delimiter, and IncludeChinese always inherit the parent field settings. This is only set when ValueType is json.
19098
- */
19099
- jsonKeys: outputs.tls.GetIndexUserInnerKeyValueValueJsonKey[];
20878
+ jsonKeys: outputs.tls.IndexUserInnerKeyValueValueJsonKey[];
19100
20879
  /**
19101
20880
  * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
19102
20881
  */
@@ -19106,7 +20885,7 @@ export declare namespace tls {
19106
20885
  */
19107
20886
  valueType: string;
19108
20887
  }
19109
- interface GetIndexUserInnerKeyValueValueJsonKey {
20888
+ interface IndexUserInnerKeyValueValueJsonKey {
19110
20889
  /**
19111
20890
  * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
19112
20891
  */
@@ -19114,9 +20893,9 @@ export declare namespace tls {
19114
20893
  /**
19115
20894
  * Field description information required for configuring key-value indexes.
19116
20895
  */
19117
- value: outputs.tls.GetIndexUserInnerKeyValueValueJsonKeyValue;
20896
+ value: outputs.tls.IndexUserInnerKeyValueValueJsonKeyValue;
19118
20897
  }
19119
- interface GetIndexUserInnerKeyValueValueJsonKeyValue {
20898
+ interface IndexUserInnerKeyValueValueJsonKeyValue {
19120
20899
  /**
19121
20900
  * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
19122
20901
  */
@@ -19150,7 +20929,7 @@ export declare namespace tls {
19150
20929
  */
19151
20930
  valueType: string;
19152
20931
  }
19153
- interface GetProjectTag {
20932
+ interface ProjectTag {
19154
20933
  /**
19155
20934
  * Tag key
19156
20935
  */
@@ -19160,472 +20939,494 @@ export declare namespace tls {
19160
20939
  */
19161
20940
  value: string;
19162
20941
  }
19163
- interface GetScheduleSqlTaskRequestCycle {
19164
- /**
19165
- * Cron expression, with a minimum granularity of minutes, using 24-hour format. For example, 0 18 * * * means execution at 18:00 every day.
19166
- */
19167
- cronTab: string;
20942
+ interface RuleContainerRule {
19168
20943
  /**
19169
- * If Type is set to Cron, you must also set the time zone.
20944
+ * Container name to collect. If no container name is specified, all containers in the machine group will be collected. Supports regex matching; for example, setting the container name to ^(container-test)$ will collect all containers named container-test.
19170
20945
  */
19171
- cronTimeZone: string;
20946
+ containerNameRegex: string;
20947
+ envTags: outputs.tls.RuleContainerRuleEnvTag[];
20948
+ excludeContainerEnvRegexes: outputs.tls.RuleContainerRuleExcludeContainerEnvRegex[];
20949
+ excludeContainerLabelRegexes: outputs.tls.RuleContainerRuleExcludeContainerLabelRegex[];
20950
+ includeContainerEnvRegexes: outputs.tls.RuleContainerRuleIncludeContainerEnvRegex[];
20951
+ includeContainerLabelRegexes: outputs.tls.RuleContainerRuleIncludeContainerLabelRegex[];
19172
20952
  /**
19173
- * Scheduling period or the time point for periodic execution (minutes from 00:00). Range: 1–1440 minutes.
20953
+ * Kubernetes container collection rules.
19174
20954
  */
19175
- time: number;
20955
+ kubernetesRule: outputs.tls.RuleContainerRuleKubernetesRule;
19176
20956
  /**
19177
- * Scheduling period type. Options: Period, Fixed, Cron.
20957
+ * Collection information. stdout: Collects container standard output (stdout). stderr: Collects container standard error (stderr). all: Collects both container standard output (stdout) and standard error (stderr).
19178
20958
  */
19179
- type: string;
20959
+ stream: string;
19180
20960
  }
19181
- interface GetTopicTag {
20961
+ interface RuleContainerRuleEnvTag {
19182
20962
  /**
19183
- * User tag key.
20963
+ * Key.
19184
20964
  */
19185
20965
  key: string;
19186
20966
  /**
19187
- * User tag value.
20967
+ * Value.
19188
20968
  */
19189
- value: string;
20969
+ val: string;
19190
20970
  }
19191
- interface ImportTaskImportSourceInfo {
20971
+ interface RuleContainerRuleExcludeContainerEnvRegex {
19192
20972
  /**
19193
- * Kafka data source information. When sourceType is kafka, the KafkaSourceInfo field is required
20973
+ * Key.
19194
20974
  */
19195
- kafkaSourceInfo: outputs.tls.ImportTaskImportSourceInfoKafkaSourceInfo;
20975
+ key: string;
19196
20976
  /**
19197
- * TOS data source information. When sourceType is tos, the TosSourceInfo field is required.
20977
+ * Value.
19198
20978
  */
19199
- tosSourceInfo: outputs.tls.ImportTaskImportSourceInfoTosSourceInfo;
20979
+ val: string;
19200
20980
  }
19201
- interface ImportTaskImportSourceInfoKafkaSourceInfo {
20981
+ interface RuleContainerRuleExcludeContainerLabelRegex {
19202
20982
  /**
19203
- * Data encoding format. Available options: UTF-8, GBK.
20983
+ * Key.
19204
20984
  */
19205
- encode: string;
20985
+ key: string;
19206
20986
  /**
19207
- * Kafka consumer group. If not specified, the system will automatically create a Kafka consumer group.
20987
+ * Value.
19208
20988
  */
19209
- group: string;
20989
+ val: string;
20990
+ }
20991
+ interface RuleContainerRuleIncludeContainerEnvRegex {
19210
20992
  /**
19211
- * The service addresses for different types of Kafka clusters vary. Details are as follows: Message Queue Kafka Edition: Use the access point of the Kafka instance. For more information, see Access Point. If the Kafka instance and the Log Service Project are in the same region, you can use private network access; otherwise, use public network access. Self-hosted Kafka clusters: Use the IP address and port number or the domain name and port number of the Kafka Broker. Only public network access is supported. Separate multiple service addresses with a comma (,).
20993
+ * Key.
19212
20994
  */
19213
- host: string;
20995
+ key: string;
19214
20996
  /**
19215
- * Starting position for data import. Options: 0: Earliest time, start importing from the first record in the specified Kafka Topic. 1: Latest time, start importing from the most recently generated record in the specified Kafka Topic.
20997
+ * Value.
19216
20998
  */
19217
- initialOffset: number;
20999
+ val: string;
21000
+ }
21001
+ interface RuleContainerRuleIncludeContainerLabelRegex {
19218
21002
  /**
19219
- * If you are using Message Queue Kafka Edition, set this to the Kafka instance ID.
21003
+ * Key.
19220
21004
  */
19221
- instanceId: string;
21005
+ key: string;
19222
21006
  /**
19223
- * Whether to enable authentication. If you use a public service address, it is recommended to enable authentication.
21007
+ * Value.
19224
21008
  */
19225
- isNeedAuth: boolean;
21009
+ val: string;
21010
+ }
21011
+ interface RuleContainerRuleKubernetesRule {
21012
+ annotationTags: outputs.tls.RuleContainerRuleKubernetesRuleAnnotationTag[];
19226
21013
  /**
19227
- * Password authentication mechanism. Available options: PLAIN, SCRAM-SHA-256, and SCRAM-SHA-512.
21014
+ * Whether to add all Kubernetes Labels as log tags to the original log data. When enabled, the log service adds fields for all Labels in the Kubernetes Pod to the logs. For example, if the Pod contains Label source=DC and destination=CS, the logs will include fields **tag**source__: DC and **tag**destination__: CS.
19228
21015
  */
19229
- mechanism: string;
21016
+ enableAllLabelTag: boolean;
21017
+ excludePodAnnotationRegexes: outputs.tls.RuleContainerRuleKubernetesRuleExcludePodAnnotationRegex[];
21018
+ excludePodLabelRegexes: outputs.tls.RuleContainerRuleKubernetesRuleExcludePodLabelRegex[];
21019
+ includePodAnnotationRegexes: outputs.tls.RuleContainerRuleKubernetesRuleIncludePodAnnotationRegex[];
21020
+ includePodLabelRegexes: outputs.tls.RuleContainerRuleKubernetesRuleIncludePodLabelRegex[];
21021
+ labelTags: outputs.tls.RuleContainerRuleKubernetesRuleLabelTag[];
19230
21022
  /**
19231
- * Kafka SASL user password for authentication.
21023
+ * Name of the Kubernetes Namespace to collect. If no Namespace name is specified, all containers will be collected. Namespace names support regular expression matching. For example, if you set the Namespace name to ^(tcp|udp)$, it will collect all containers under the tcp and udp namespaces.
19232
21024
  */
19233
- password: string;
21025
+ namespaceNameRegex: string;
19234
21026
  /**
19235
- * Secure transmission protocol. Options include plaintext, sasl*ssl, ssl, and sasl*plaintext
21027
+ * Pod name is used to specify the Pod whose containers will be collected. If no Pod name is specified, all containers will be collected. Pod name supports regular expression matching. For example, setting the Pod name to ^(http.*)$ collects all containers under Pods whose names start with http.
19236
21028
  */
19237
- protocol: string;
21029
+ podNameRegex: string;
19238
21030
  /**
19239
- * Specify log time. Options: 0: Use Kafka message timestamp. 1: Use current system time.
21031
+ * Specify the container to collect by workload name. If no workload name is specified, all containers are collected. Workload names support regular expressions. For example, setting the workload name to ^(http.*)$ collects all containers under workloads starting with http.
19240
21032
  */
19241
- timeSourceDefault: number;
21033
+ workloadNameRegex: string;
19242
21034
  /**
19243
- * Kafka Topic name. Separate multiple Kafka Topics with commas (,).
21035
+ * Specify the container to collect by workload type. Only one type can be selected. If no type is specified, containers of all types will be collected. Supported workload types: Deployment: stateless workload StatefulSet: stateful workload DaemonSet: daemon process Job: task CronJob: scheduled task
19244
21036
  */
19245
- topic: string;
21037
+ workloadType: string;
21038
+ }
21039
+ interface RuleContainerRuleKubernetesRuleAnnotationTag {
19246
21040
  /**
19247
- * Kafka SASL username for authentication.
21041
+ * Key.
19248
21042
  */
19249
- username: string;
21043
+ key: string;
21044
+ /**
21045
+ * Value.
21046
+ */
21047
+ val: string;
19250
21048
  }
19251
- interface ImportTaskImportSourceInfoTosSourceInfo {
21049
+ interface RuleContainerRuleKubernetesRuleExcludePodAnnotationRegex {
19252
21050
  /**
19253
- * TOS bucket name
21051
+ * Key.
19254
21052
  */
19255
- bucket: string;
21053
+ key: string;
19256
21054
  /**
19257
- * Compression mode for data in the TOS bucket. none: No compression. snappy: Compress using snappy. gzip: Compress using gzip. lz4: Compress using lz4.
21055
+ * Value.
19258
21056
  */
19259
- compressType: string;
21057
+ val: string;
21058
+ }
21059
+ interface RuleContainerRuleKubernetesRuleExcludePodLabelRegex {
19260
21060
  /**
19261
- * Path of the file to be imported in the TOS bucket.
21061
+ * Key.
19262
21062
  */
19263
- prefix: string;
21063
+ key: string;
19264
21064
  /**
19265
- * Region where the TOS bucket is located. Cross-region data import is supported
21065
+ * Value.
19266
21066
  */
19267
- region: string;
21067
+ val: string;
19268
21068
  }
19269
- interface ImportTaskTargetInfo {
21069
+ interface RuleContainerRuleKubernetesRuleIncludePodAnnotationRegex {
19270
21070
  /**
19271
- * Log extraction rule.
21071
+ * Key.
19272
21072
  */
19273
- extractRule: outputs.tls.ImportTaskTargetInfoExtractRule;
21073
+ key: string;
19274
21074
  /**
19275
- * Log sample. When LogType is set to multiline_log, you must configure log samples. It is recommended to provide more than two log entries as examples to ensure the regular expression matches the first line of each log. Use real samples from the production environment.
21075
+ * Value.
19276
21076
  */
19277
- logSample: string;
21077
+ val: string;
21078
+ }
21079
+ interface RuleContainerRuleKubernetesRuleIncludePodLabelRegex {
19278
21080
  /**
19279
- * Specify log parsing type during import. delimiter*log: CSV type. multiline*log: multiline full text type. minimalist*log: single line full text type. json*log: JSON type.
21081
+ * Key.
19280
21082
  */
19281
- logType: string;
21083
+ key: string;
19282
21084
  /**
19283
- * Region.
21085
+ * Value.
19284
21086
  */
19285
- region: string;
21087
+ val: string;
19286
21088
  }
19287
- interface ImportTaskTargetInfoExtractRule {
21089
+ interface RuleContainerRuleKubernetesRuleLabelTag {
19288
21090
  /**
19289
- * Basic content of log extraction rules.
21091
+ * Key.
19290
21092
  */
19291
- extractRule: outputs.tls.ImportTaskTargetInfoExtractRuleExtractRule;
21093
+ key: string;
19292
21094
  /**
19293
- * Number of skipped rows. Only valid when the log type is delimiterLog and the import type is tos.
21095
+ * Value.
19294
21096
  */
19295
- skipLineCount: number;
21097
+ val: string;
21098
+ }
21099
+ interface RuleExcludePath {
19296
21100
  /**
19297
- * Time extraction regular expression, used to extract the time value from the TimeKey field and parse it as the collection time
21101
+ * Collection path type. File: File name. Path: Directory.
19298
21102
  */
19299
- timeExtractRegex: string;
21103
+ type: string;
19300
21104
  /**
19301
- * Time zone. Supports machine time zone (default) and custom time zone. Custom time zone supports GMT and UTC. GMT format: GMT+08:00. UTC format: Asia/Shanghai.
21105
+ * Collection path. Must be specified as an absolute path. When Type is Path, Value indicates a directory. When Type is File, Value indicates a file name.
19302
21106
  */
19303
- timeZone: string;
21107
+ value: string;
19304
21108
  }
19305
- interface ImportTaskTargetInfoExtractRuleExtractRule {
21109
+ interface RuleExtractRule {
19306
21110
  /**
19307
- * Regular expression for identifying the first line of each log. The matched part is treated as the start of the log. When LogType is set to multiline_log, you must configure a log sample
21111
+ * Regular expression that the first line of the log must match. Valid only when LogType is multiline*log or fullregex*log. Must be a valid regular expression.
19308
21112
  */
19309
21113
  beginRegex: string;
19310
21114
  /**
19311
- * Delimiter. Only valid when LogType is delimiter_log.
21115
+ * Log delimiter. Valid only when LogType is delimiter_log.
19312
21116
  */
19313
21117
  delimiter: string;
19314
21118
  /**
19315
- * Enable nanoseconds.
21119
+ * Enable nanosecond precision time. When enabled, log time parsing will include and report nanosecond precision. true: Enable nanosecond precision time. false: Disable nanosecond precision time.
19316
21120
  */
19317
21121
  enableNanosecond: boolean;
19318
- filterKeyRegexes: outputs.tls.ImportTaskTargetInfoExtractRuleExtractRuleFilterKeyRegex[];
21122
+ filterKeyRegexes: outputs.tls.RuleExtractRuleFilterKeyRegex[];
19319
21123
  /**
19320
- * List of log field names (Key). Valid only when LogType is delimiter_log. Supports up to 100 field names. Duplicate field names are not allowed, and all field names cannot be left blank
21124
+ * List of log field names (Key). This is only valid when LogType is delimiter*log or fullregex*log. You can configure up to 100 field names. When LogType is delimiter*log, field names must be unique and cannot all be empty. When LogType is fullregex*log, field names must be unique and cannot be empty.
19321
21125
  */
19322
21126
  keys: string[];
19323
21127
  /**
19324
- * Log regular expression
21128
+ * The entire log must match the specified regular expression. This is only valid when the collected log type is fullregex_log. Must be a valid regular expression.
19325
21129
  */
19326
21130
  logRegex: string;
19327
21131
  /**
19328
- * Log template.
21132
+ * Automatically extract log fields based on the specified log template
19329
21133
  */
19330
- logTemplate: outputs.tls.ImportTaskTargetInfoExtractRuleExtractRuleLogTemplate;
21134
+ logTemplate: outputs.tls.RuleExtractRuleLogTemplate;
19331
21135
  /**
19332
- * Quotation mark. Content enclosed by the quotation mark will not be separated and will be parsed as a complete field. Only valid when LogType is delimiter_log.
21136
+ * Quoting character. Content wrapped by a quoting character will not be split but parsed as a complete field. Only valid when LogType is delimiter_log.
19333
21137
  */
19334
21138
  quote: string;
19335
21139
  /**
19336
- * Parsing format for the time field. If you use a specified time field in the log as the log timestamp, you must fill in TimeKey and TimeFormat. TimeKey and TimeFormat must be paired. For configuration details, see time format.
21140
+ * Regular expression for extracting time, used to extract the time value from the TimeKey field and parse it as the collection time
21141
+ */
21142
+ timeExtractRegex: string;
21143
+ /**
21144
+ * Parsing format for the time field. If you use a specific time field in the log as the log timestamp, you must provide both TimeKey and TimeFormat. TimeKey and TimeFormat must be used together.
19337
21145
  */
19338
21146
  timeFormat: string;
19339
21147
  /**
19340
- * Name of the log time field. If you use a specific time field in the log as the log timestamp, you must provide both TimeKey and TimeFormat. TimeKey and TimeFormat must appear in pairs
21148
+ * Name of the log time field. If you want to use a specific time field in the log as the log timestamp, you need to specify both TimeKey and TimeFormat. TimeKey and TimeFormat must be provided together.
19341
21149
  */
19342
21150
  timeKey: string;
19343
21151
  /**
19344
- * Time sample. Used to verify whether the entered time parsing format is correct
21152
+ * Time sample. Used to check whether the entered time parsing format is correct.
19345
21153
  */
19346
21154
  timeSample: string;
19347
21155
  /**
19348
- * When uploading logs that failed to parse, specify the key name for the failed logs. UnMatchUpLoadSwitch=true and UnMatchLogKey must be used together.
21156
+ * Time zone. Supports machine time zone (default) and custom time zone. Custom time zone supports GMT and UTC. GMT format: GMT+08:00. UTC format: Asia/Shanghai.
21157
+ */
21158
+ timeZone: string;
21159
+ /**
21160
+ * Key name for unmatched logs.
19349
21161
  */
19350
21162
  unMatchLogKey: string;
19351
21163
  /**
19352
- * Whether to upload logs that failed to parse. UnMatchUpLoadSwitch=true and UnMatchLogKey must be paired. true: Upload logs that failed to parse. false: Do not upload logs that failed to parse.
21164
+ * Switch for uploading unmatched logs.
19353
21165
  */
19354
21166
  unMatchUpLoadSwitch: boolean;
19355
21167
  }
19356
- interface ImportTaskTargetInfoExtractRuleExtractRuleFilterKeyRegex {
21168
+ interface RuleExtractRuleFilterKeyRegex {
19357
21169
  /**
19358
- * Key.
21170
+ * Name of the filter field.
19359
21171
  */
19360
21172
  key: string;
19361
21173
  /**
19362
- * Regular expression.
21174
+ * The log content of the filter field must match the specified regular expression.
19363
21175
  */
19364
21176
  regex: string;
19365
21177
  }
19366
- interface ImportTaskTargetInfoExtractRuleExtractRuleLogTemplate {
21178
+ interface RuleExtractRuleLogTemplate {
19367
21179
  /**
19368
- * Format.
21180
+ * Log template format.
19369
21181
  */
19370
21182
  format: string;
19371
21183
  /**
19372
- * Type
21184
+ * Log template types. Supported types: Nginx: Nginx log template.
19373
21185
  */
19374
21186
  type: string;
19375
21187
  }
19376
- interface ImportTaskTaskStatistics {
19377
- /**
19378
- * Total resource bytes enumerated
19379
- */
19380
- bytesTotal: number;
19381
- /**
19382
- * Bytes transferred.
19383
- */
19384
- bytesTransferred: number;
19385
- /**
19386
- * Number of resources failed to import.
19387
- */
19388
- failed: number;
19389
- /**
19390
- * Number of resources not found.
19391
- */
19392
- notExist: number;
19393
- /**
19394
- * Number of resources skipped during import
19395
- */
19396
- skipped: number;
19397
- /**
19398
- * Task status. Status of the import task. Preparing: Preparing for import. Importing: Importing data. Success: Import completed successfully. Failed: Import failed. Stopped: Import paused.
19399
- */
19400
- taskStatus: string;
19401
- /**
19402
- * Total number of resources enumerated.
19403
- */
19404
- total: number;
19405
- /**
19406
- * Number of records transferred.
19407
- */
19408
- transferred: number;
19409
- }
19410
- interface IndexFullText {
19411
- /**
19412
- * Case sensitivity. true: Case sensitive. false: Not case sensitive.
19413
- */
19414
- caseSensitive: boolean;
19415
- /**
19416
- * Token separators for the full-text index. Each character in the string represents a token separator. Length: 1–256 bytes. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
19417
- */
19418
- delimiter: string;
21188
+ interface RuleHostGroupInfo {
19419
21189
  /**
19420
- * When searching, specify whether to segment Chinese content in logs according to Chinese grammar. The default is false. true: For Chinese characters in logs, segment the log according to common Chinese grammar rules. Custom segmentation symbols for Chinese content are not supported. For non-Chinese characters in logs, segment the log using the segmentation symbols specified in the parameter. false: Segment the log using the segmentation symbols specified in the parameter.
21190
+ * Machine group ID
19421
21191
  */
19422
- includeChinese: boolean;
21192
+ hostGroupId: string;
19423
21193
  }
19424
- interface IndexKeyValue {
21194
+ interface RuleUserDefineRule {
19425
21195
  /**
19426
- * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
21196
+ * The Advanced parameter is used for extended configuration. After enabling extended configuration, you can customize advanced behaviors of LogCollector, such as when to release file handles. Note: If multiple release conditions are specified, the handle is released and log file monitoring ends as soon as any condition is met.
19427
21197
  */
19428
- key: string;
21198
+ advanced: outputs.tls.RuleUserDefineRuleAdvanced;
19429
21199
  /**
19430
- * Field description information required for configuring key-value indexes.
21200
+ * Whether to upload the label information of the host group to the log service. The default is off. true: LogCollector uploads the label information of the host group to the specified field. You can specify the field name in the HostGroupLabelKey parameter. false (default): Host group label information is not uploaded.
19431
21201
  */
19432
- value: outputs.tls.IndexKeyValueValue;
19433
- }
19434
- interface IndexKeyValueValue {
21202
+ enableHostGroupLabel: boolean;
19435
21203
  /**
19436
- * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
21204
+ * Upload the hostname field. Default is disabled. true: Add a field to the original log to record the source hostname. Specify the hostname field name using HostnameKey. false: (default) Do not add the hostname field.
19437
21205
  */
19438
- autoIndexFlag: boolean;
21206
+ enableHostname: boolean;
19439
21207
  /**
19440
- * Whether to distinguish case. Default is false.
21208
+ * Upload raw logs. true: Upload raw logs. false (default): Do not upload raw logs.
19441
21209
  */
19442
- caseSensitive: boolean;
21210
+ enableRawLog: boolean;
21211
+ fields: outputs.tls.RuleUserDefineRuleField[];
19443
21212
  /**
19444
- * Token separators for the field. Default is empty (""). Each character in the string represents a token separator. Length: 0–256 bytes. If the length is 0, segmentation is disabled. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
21213
+ * Field name used to store machine group label information
19445
21214
  */
19446
- delimiter: string;
21215
+ hostGroupLabelKey: string;
19447
21216
  /**
19448
- * When searching, determines whether to segment Chinese log content according to Chinese syntax. Enabled: Chinese characters in logs are segmented based on common Chinese syntax; custom segmentation for Chinese content is not supported. Non-Chinese characters in logs are segmented using the token separators specified in the parameter. Disabled: Logs are segmented using the token separators specified in the parameter.
21217
+ * Hostname field name. Required only when EnableHostname is true.
19449
21218
  */
19450
- includeChinese: boolean;
21219
+ hostnameKey: string;
19451
21220
  /**
19452
- * Create indexes for all fields with text values in the JSON field.
21221
+ * Ignore log files that have not been updated for a specified period (in hours).
19453
21222
  */
19454
- indexAll: boolean;
21223
+ ignoreOlder: number;
19455
21224
  /**
19456
- * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
21225
+ * Allow multiple log file collections. Empty: Use log file ID (including file inode, device, and checksum of the first N bytes) to uniquely identify the log file. RuleID: Use collection rule ID and log file ID to uniquely identify the log file. TopicIDRuleName: Use log topic ID, collection rule name, and log file ID to uniquely identify the log file.
19457
21226
  */
19458
- indexSqlAll: boolean;
19459
- jsonKeys: outputs.tls.IndexKeyValueValueJsonKey[];
21227
+ multiCollectsType: string;
19460
21228
  /**
19461
- * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
21229
+ * Rule for parsing the collection path. After setting the rule, fields in the collection path are extracted using the specified regular expression and added as metadata to the log data. Note: This parameter is not supported when collecting container standard output.
19462
21230
  */
19463
- sqlFlag: boolean;
21231
+ parsePathRule: outputs.tls.RuleUserDefineRuleParsePathRule;
19464
21232
  /**
19465
- * Field type. Currently supports long, double, text, and json. The long and double types do not support configuring delimiters, including Chinese, or case sensitivity. Only the json type supports further configuration of JsonKeys subfields.
21233
+ * LogCollector plugin configuration. After enabling plugin configuration, you can add one or more LogCollector processor plugins to parse logs with complex or variable structures.
19466
21234
  */
19467
- valueType: string;
19468
- }
19469
- interface IndexKeyValueValueJsonKey {
21235
+ plugin: outputs.tls.RuleUserDefineRulePlugin;
19470
21236
  /**
19471
- * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
21237
+ * Name of the raw log field. Only effective when EnableRawLog is set to true. RawLogKey defaults to **raw**, meaning the original log data will be encapsulated in the **raw** field and uploaded to the log service along with the parsed log data.
19472
21238
  */
19473
- key: string;
21239
+ rawLogKey: string;
19474
21240
  /**
19475
- * Field description information required for configuring key-value indexes.
21241
+ * Rules for routing log partitions. If this parameter is not set, logs are written using the default load balancing mode, and packets are written to any available Shard. If this parameter is set, logs are collected using the HashKey routing Shard mode, and the log service writes data to the Shard containing the specified Key value
19476
21242
  */
19477
- value: outputs.tls.IndexKeyValueValueJsonKeyValue;
19478
- }
19479
- interface IndexKeyValueValueJsonKeyValue {
21243
+ shardHashKey: outputs.tls.RuleUserDefineRuleShardHashKey;
19480
21244
  /**
19481
- * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
21245
+ * LogCollector collection policy, which specifies whether LogCollector collects incremental logs or full logs. Default is false, meaning full log collection. true: incremental collection. LogCollector only collects newly added content in the file. When new logs are written to monitored log files, LogCollector triggers log collection. For first-time collection, LogCollector automatically determines the collection position based on the incremental threshold TailSizeKb you specify. If the new file size does not exceed the incremental threshold, collection starts from the beginning of the file. If the new file size exceeds the incremental threshold, collection starts from the position at the end of the file minus the incremental threshold, collecting only incremental logs. For subsequent collections, LogCollector determines the collection position based on Checkpoint and continues collecting. false: (default) full collection. LogCollector collects logs from the beginning of each file, including historical log data.
19482
21246
  */
19483
- autoIndexFlag: boolean;
21247
+ tailFiles: boolean;
19484
21248
  /**
19485
- * Whether to distinguish case. Default is false.
21249
+ * Backtracking threshold for incremental collection, in KiB. When LogCollector uses incremental collection, for the first collection of a log file: If the new log file size does not exceed the TailSizeKb value, collection starts from the beginning of the file. If the new log file size exceeds the TailSizeKb value, collection starts from the position that is TailSizeKb from the end of the file.
19486
21250
  */
19487
- caseSensitive: boolean;
21251
+ tailSizeKb: number;
21252
+ }
21253
+ interface RuleUserDefineRuleAdvanced {
19488
21254
  /**
19489
- * Token separators for the field. Default is empty (""). Each character in the string represents a token separator. Length: 0–256 bytes. If the length is 0, segmentation is disabled. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
21255
+ * After reading to the end of the log file, choose whether to release the file handle. Default is false.
19490
21256
  */
19491
- delimiter: string;
21257
+ closeEof: boolean;
19492
21258
  /**
19493
- * When searching, determines whether to segment Chinese log content according to Chinese syntax. Enabled: Chinese characters in logs are segmented based on common Chinese syntax; custom segmentation for Chinese content is not supported. Non-Chinese characters in logs are segmented using the token separators specified in the parameter. Disabled: Logs are segmented using the token separators specified in the parameter.
21259
+ * Wait time to release log file handle. If no new logs are written to the log file within the specified time, the handle for that log file is released. Unit: seconds. Range: 1–300 seconds. Default: 60 seconds.
19494
21260
  */
19495
- includeChinese: boolean;
21261
+ closeInactive: number;
19496
21262
  /**
19497
- * Create indexes for all fields with text values in the JSON field.
21263
+ * Release the file handle after the log file is removed. Default is false.
19498
21264
  */
19499
- indexAll: boolean;
21265
+ closeRemoved: boolean;
19500
21266
  /**
19501
- * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
21267
+ * Release the file handle after the log file is renamed. Default is false.
19502
21268
  */
19503
- indexSqlAll: boolean;
21269
+ closeRenamed: boolean;
19504
21270
  /**
19505
- * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
21271
+ * Maximum monitoring duration for LogCollector log files, in seconds. The default is 0 seconds, meaning LogCollector does not limit the monitoring duration for log files. Timing starts when LogCollector begins monitoring the log file. Once the specified duration is exceeded, LogCollector immediately releases the file handle and stops monitoring, regardless of whether the log file has been fully read.
19506
21272
  */
19507
- sqlFlag: boolean;
21273
+ closeTimeout: number;
19508
21274
  /**
19509
- * Field type. Currently supports long, double, text, and json. The long and double types do not support configuring delimiters, including Chinese, or case sensitivity. Only the json type supports further configuration of JsonKeys subfields.
21275
+ * Maximum wait time when LogCollector does not detect a line break, in seconds. The default is 5s. Timing starts when LogCollector begins reading the log file. If no line break is detected within the specified time, LogCollector sends the logs in the buffer. If the file write interval is long, a complete log entry may be split into two parts and written separately. Adjust this parameter based on your log write interval.
19510
21276
  */
19511
- valueType: string;
21277
+ noLineTerminatorEofMaxTime: number;
19512
21278
  }
19513
- interface IndexUserInnerKeyValue {
21279
+ interface RuleUserDefineRuleField {
19514
21280
  /**
19515
- * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
21281
+ * Key.
19516
21282
  */
19517
21283
  key: string;
19518
21284
  /**
19519
- * Field description information required for configuring key-value indexes.
21285
+ * Value.
19520
21286
  */
19521
- value: outputs.tls.IndexUserInnerKeyValueValue;
21287
+ val: string;
19522
21288
  }
19523
- interface IndexUserInnerKeyValueValue {
21289
+ interface RuleUserDefineRuleParsePathRule {
19524
21290
  /**
19525
- * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
21291
+ * Field name list. The log service uses a regular expression (Regex) to parse the path sample (PathSample) into multiple fields. Keys specify the name of each field. You can configure up to 100 field names. Field names cannot be empty or duplicated.
19526
21292
  */
19527
- autoIndexFlag: boolean;
21293
+ keys: string[];
19528
21294
  /**
19529
- * Whether to distinguish case. Default is false.
21295
+ * Sample collection path for actual scenarios. The sample collection path must be an absolute path. Wildcards *, ?, ** are not allowed in the path sample.
19530
21296
  */
19531
- caseSensitive: boolean;
21297
+ pathSample: string;
19532
21298
  /**
19533
- * Token separators for the field. Default is empty (""). Each character in the string represents a token separator. Length: 0–256 bytes. If the length is 0, segmentation is disabled. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
21299
+ * Regular expression used to extract the path field. Must match the sample collection path, otherwise extraction will fail
19534
21300
  */
19535
- delimiter: string;
21301
+ regex: string;
21302
+ }
21303
+ interface RuleUserDefineRulePlugin {
19536
21304
  /**
19537
- * When searching, determines whether to segment Chinese log content according to Chinese syntax. Enabled: Chinese characters in logs are segmented based on common Chinese syntax; custom segmentation for Chinese content is not supported. Non-Chinese characters in logs are segmented using the token separators specified in the parameter. Disabled: Logs are segmented using the token separators specified in the parameter.
21305
+ * LogCollector plugin. For the list of supported plugins and parameter descriptions, see LogCollector plugin overview.
19538
21306
  */
19539
- includeChinese: boolean;
21307
+ processors: string;
21308
+ }
21309
+ interface RuleUserDefineRuleShardHashKey {
19540
21310
  /**
19541
- * Create indexes for all fields with text values in the JSON field.
21311
+ * HashKey of the log group, used to specify the shard to which the current log group will be written. The value range for this parameter is [00000000000000000000000000000000-ffffffffffffffffffffffffffffffff).
19542
21312
  */
19543
- indexAll: boolean;
21313
+ hashKey: string;
21314
+ }
21315
+ interface ScheduleSqlTaskRequestCycle {
19544
21316
  /**
19545
- * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
21317
+ * Cron expression, with a minimum granularity of minutes, using 24-hour format. For example, 0 18 * * * means execution at 18:00 every day.
19546
21318
  */
19547
- indexSqlAll: boolean;
19548
- jsonKeys: outputs.tls.IndexUserInnerKeyValueValueJsonKey[];
21319
+ cronTab: string;
19549
21320
  /**
19550
- * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
21321
+ * If Type is set to Cron, you must also set the time zone.
19551
21322
  */
19552
- sqlFlag: boolean;
21323
+ cronTimeZone: string;
19553
21324
  /**
19554
- * Field type. Currently supports long, double, text, and json. The long and double types do not support configuring delimiters, including Chinese, or case sensitivity. Only the json type supports further configuration of JsonKeys subfields.
21325
+ * Scheduling period or the time point for periodic execution (minutes from 00:00). Range: 1–1440 minutes.
19555
21326
  */
19556
- valueType: string;
21327
+ time: number;
21328
+ /**
21329
+ * Scheduling period type. Options: Period, Fixed, Cron.
21330
+ */
21331
+ type: string;
19557
21332
  }
19558
- interface IndexUserInnerKeyValueValueJsonKey {
21333
+ interface ShipperContentInfo {
19559
21334
  /**
19560
- * Specify the field names for key-value indexing. Up to 100 fields can be added. Only letters, numbers, spaces, underscores (_), hyphens (-), and slashes (/) are supported, and field names cannot start or end with a space. Each key name must be unique within the same index. Length: 1–128 characters. For subfields in JSON-type key-value indexes, use a dot (.) to indicate the hierarchy between JSON fields. For example, if the JSON field 'namelist' contains a text-type subfield 'totalcount' and a JSON-type subfield 'info', and 'info' contains the field 'name', the field names should be configured as 'totalcount' and 'info.name' respectively.
21335
+ * CSV format log content configuration.
19561
21336
  */
19562
- key: string;
21337
+ csvInfo: outputs.tls.ShipperContentInfoCsvInfo;
19563
21338
  /**
19564
- * Field description information required for configuring key-value indexes.
21339
+ * Log content parsing format. Delivery to TOS supports json, jsonl, csv; delivery to Kafka supports original, json.
19565
21340
  */
19566
- value: outputs.tls.IndexUserInnerKeyValueValueJsonKeyValue;
21341
+ format: string;
21342
+ /**
21343
+ * JSON/JSONL format log content configuration.
21344
+ */
21345
+ jsonInfo: outputs.tls.ShipperContentInfoJsonInfo;
19567
21346
  }
19568
- interface IndexUserInnerKeyValueValueJsonKeyValue {
21347
+ interface ShipperContentInfoCsvInfo {
19569
21348
  /**
19570
- * Indicates whether the index was added automatically. true: The index was added automatically. false: The index was not added automatically.
21349
+ * Delimiter. Supports comma, tab, pipe, semicolon, space.
19571
21350
  */
19572
- autoIndexFlag: boolean;
21351
+ delimiter: string;
19573
21352
  /**
19574
- * Whether to distinguish case. Default is false.
21353
+ * Escape character. When field content contains a delimiter, use an escape character to enclose it. Supports single quote, double quote, or empty character.
19575
21354
  */
19576
- caseSensitive: boolean;
21355
+ escapeChar: string;
19577
21356
  /**
19578
- * Token separators for the field. Default is empty (""). Each character in the string represents a token separator. Length: 0–256 bytes. If the length is 0, segmentation is disabled. Only one or more of the following characters are supported: letters, numbers, and !@#%^&*()-_=\\"', <>/?|;:\ \r[]{}. . Supports configuring both Chinese characters and token separators simultaneously.
21357
+ * Fields to be delivered. Supports letters, numbers, and _-./. Cannot start with an underscore. Length: 1–128.
19579
21358
  */
19580
- delimiter: string;
21359
+ keys: string[];
19581
21360
  /**
19582
- * When searching, determines whether to segment Chinese log content according to Chinese syntax. Enabled: Chinese characters in logs are segmented based on common Chinese syntax; custom segmentation for Chinese content is not supported. Non-Chinese characters in logs are segmented using the token separators specified in the parameter. Disabled: Logs are segmented using the token separators specified in the parameter.
21361
+ * Content for invalid field padding. Length: 0–128.
19583
21362
  */
19584
- includeChinese: boolean;
21363
+ nonFieldContent: string;
19585
21364
  /**
19586
- * Create indexes for all fields with text values in the JSON field.
21365
+ * Whether to print the key in the first row.
19587
21366
  */
19588
- indexAll: boolean;
21367
+ printHeader: boolean;
21368
+ }
21369
+ interface ShipperContentInfoJsonInfo {
19589
21370
  /**
19590
- * Enable automatic indexing and statistics for JSON fields. true: Enable automatic indexing and statistics. false: Disable automatic indexing and statistics.
21371
+ * Enable flag.
19591
21372
  */
19592
- indexSqlAll: boolean;
21373
+ enable: boolean;
19593
21374
  /**
19594
- * Whether analysis is enabled for the field. Default is false. After enabling statistical analysis, you can configure token delimiters and whether to include Chinese content.
21375
+ * Whether to enable escaping. Must be set to true.
19595
21376
  */
19596
- sqlFlag: boolean;
21377
+ escape: boolean;
19597
21378
  /**
19598
- * Field type. Currently supports long, double, text, and json. The long and double types do not support configuring delimiters, including Chinese, or case sensitivity. Only the json type supports further configuration of JsonKeys subfields.
21379
+ * Delivery field list; if not configured, all fields will be delivered. When delivering in JSON/JSONL format, if this parameter is not set, all fields will be delivered, including **content** (required), **source**, **path**, **time**, **image_name**, **container_name**, **pod_name**, **pod_uid**, namespace, **tag****client_ip**, and **tag****receive_time**.
19599
21380
  */
19600
- valueType: string;
21381
+ keys: string[];
19601
21382
  }
19602
- interface ProjectTag {
21383
+ interface ShipperKafkaShipperInfo {
19603
21384
  /**
19604
- * Tag key
21385
+ * Compression format; supports snappy, gzip, lz4, none.
19605
21386
  */
19606
- key: string;
21387
+ compress: string;
19607
21388
  /**
19608
- * Tag value
21389
+ * Delivery end time, in milliseconds. If not set, delivery continues indefinitely. Note: Milliseconds will be truncated. For example, if 1776761323455 is entered, it will become 1776761323000; if 1776761323 is entered, it will become 1776761323000.
19609
21390
  */
19610
- value: string;
21391
+ endTime: number;
21392
+ /**
21393
+ * Kafka instance.
21394
+ */
21395
+ instance: string;
21396
+ /**
21397
+ * Kafka topic name. All log data delivered through this configuration will be sent to this topic.
21398
+ */
21399
+ kafkaTopic: string;
21400
+ /**
21401
+ * Start time. If not configured, defaults to the current time. Note: Millisecond data will be ignored. For example, if 1776761323455 is entered, the result will be 1776761323000; if 1776761323 is entered, the result will be 1776761323000.
21402
+ */
21403
+ startTime: number;
19611
21404
  }
19612
- interface ScheduleSqlTaskRequestCycle {
21405
+ interface ShipperTosShipperInfo {
19613
21406
  /**
19614
- * Cron expression, with a minimum granularity of minutes, using 24-hour format. For example, 0 18 * * * means execution at 18:00 every day.
21407
+ * Select a TOS bucket. Must be in the same region as the source log topic. Can only contain numbers, hyphens (-), and letters a–z. Must start and end with a number or letter. Length: 3–63 characters.
19615
21408
  */
19616
- cronTab: string;
21409
+ bucket: string;
19617
21410
  /**
19618
- * If Type is set to Cron, you must also set the time zone.
21411
+ * Compression format; supports snappy, gzip, lz4, none.
19619
21412
  */
19620
- cronTimeZone: string;
21413
+ compress: string;
19621
21414
  /**
19622
- * Scheduling period or the time point for periodic execution (minutes from 00:00). Range: 11440 minutes.
21415
+ * Delivery interval in seconds, range: 300900.
19623
21416
  */
19624
- time: number;
21417
+ interval: number;
19625
21418
  /**
19626
- * Scheduling period type. Options: Period, Fixed, Cron.
21419
+ * Maximum raw file size per partition for delivery, in MiB. Range: 5–256.
19627
21420
  */
19628
- type: string;
21421
+ maxSize: number;
21422
+ /**
21423
+ * Partition rule for log delivery. Subdirectory naming format supports strftime syntax. Default: %Y/%m/%d/%H/%M.
21424
+ */
21425
+ partitionFormat: string;
21426
+ /**
21427
+ * Top-level directory name for the bucket. Cannot start with / or \, and cannot use consecutive /. Cannot use .. as a folder name. Duplicate names are not allowed within the same bucket.
21428
+ */
21429
+ prefix: string;
19629
21430
  }
19630
21431
  interface TopicTag {
19631
21432
  /**
@@ -19686,6 +21487,52 @@ export declare namespace tos {
19686
21487
  */
19687
21488
  ownerId: string;
19688
21489
  }
21490
+ interface BucketInventoryDestination {
21491
+ /**
21492
+ * Bucket information related to the inventory files.
21493
+ */
21494
+ tosBucketDestination: outputs.tos.BucketInventoryDestinationTosBucketDestination;
21495
+ }
21496
+ interface BucketInventoryDestinationTosBucketDestination {
21497
+ /**
21498
+ * Account ID of the bucket owner.
21499
+ */
21500
+ accountId: string;
21501
+ /**
21502
+ * Specify the bucket to store the inventory files.
21503
+ */
21504
+ bucket: string;
21505
+ /**
21506
+ * Inventory file format. Value is CSV, meaning the inventory file is in CSV format compressed with GZIP.
21507
+ */
21508
+ format: string;
21509
+ /**
21510
+ * Prefix for the storage path of inventory files. By default, files are saved under tos*bucket*inventory/sourceBucketName/inventoryId/YYYY-MM-DDTHH-MMZ/files in the target bucket.
21511
+ */
21512
+ prefix: string;
21513
+ /**
21514
+ * Role name used to grant permission to read all files from the source bucket and write files to the target bucket. The role must have TOS read/write and service access permissions. You can also use the default TOS role TosArchiveTOSInventory.
21515
+ */
21516
+ role: string;
21517
+ }
21518
+ interface BucketInventoryFilter {
21519
+ /**
21520
+ * Prefix matching information for exported files. If not set, an inventory of all objects in the bucket is generated by default.
21521
+ */
21522
+ prefix: string;
21523
+ }
21524
+ interface BucketInventoryOptionalFields {
21525
+ /**
21526
+ * Information about exported inventory files.
21527
+ */
21528
+ fields: string[];
21529
+ }
21530
+ interface BucketInventorySchedule {
21531
+ /**
21532
+ * Export frequency for inventory files. Options: Daily: export inventory files daily. Weekly: export inventory files weekly. Once: export inventory files once.
21533
+ */
21534
+ frequency: string;
21535
+ }
19689
21536
  interface BucketLifecycleConfig {
19690
21537
  /**
19691
21538
  * Specify expiration attributes for unmerged multipart tasks (fragments)
@@ -19858,6 +21705,52 @@ export declare namespace tos {
19858
21705
  */
19859
21706
  ownerId: string;
19860
21707
  }
21708
+ interface GetBucketInventoryDestination {
21709
+ /**
21710
+ * Bucket information related to the inventory files.
21711
+ */
21712
+ tosBucketDestination: outputs.tos.GetBucketInventoryDestinationTosBucketDestination;
21713
+ }
21714
+ interface GetBucketInventoryDestinationTosBucketDestination {
21715
+ /**
21716
+ * Account ID of the bucket owner.
21717
+ */
21718
+ accountId: string;
21719
+ /**
21720
+ * Specify the bucket to store the inventory files.
21721
+ */
21722
+ bucket: string;
21723
+ /**
21724
+ * Inventory file format. Value is CSV, meaning the inventory file is in CSV format compressed with GZIP.
21725
+ */
21726
+ format: string;
21727
+ /**
21728
+ * Prefix for the storage path of inventory files. By default, files are saved under tos*bucket*inventory/sourceBucketName/inventoryId/YYYY-MM-DDTHH-MMZ/files in the target bucket.
21729
+ */
21730
+ prefix: string;
21731
+ /**
21732
+ * Role name used to grant permission to read all files from the source bucket and write files to the target bucket. The role must have TOS read/write and service access permissions. You can also use the default TOS role TosArchiveTOSInventory.
21733
+ */
21734
+ role: string;
21735
+ }
21736
+ interface GetBucketInventoryFilter {
21737
+ /**
21738
+ * Prefix matching information for exported files. If not set, an inventory of all objects in the bucket is generated by default.
21739
+ */
21740
+ prefix: string;
21741
+ }
21742
+ interface GetBucketInventoryOptionalFields {
21743
+ /**
21744
+ * Information about exported inventory files.
21745
+ */
21746
+ fields: string[];
21747
+ }
21748
+ interface GetBucketInventorySchedule {
21749
+ /**
21750
+ * Export frequency for inventory files. Options: Daily: export inventory files daily. Weekly: export inventory files weekly. Once: export inventory files once.
21751
+ */
21752
+ frequency: string;
21753
+ }
19861
21754
  interface GetBucketLifecycleConfig {
19862
21755
  /**
19863
21756
  * Specify expiration attributes for unmerged multipart tasks (fragments)
@@ -20275,14 +22168,6 @@ export declare namespace vedbm {
20275
22168
  * Instance ID.
20276
22169
  */
20277
22170
  instanceId: string;
20278
- /**
20279
- * Instance name.
20280
- */
20281
- instanceName: string;
20282
- /**
20283
- * Instance VPC ID.
20284
- */
20285
- vpc: string;
20286
22171
  }
20287
22172
  interface DatabaseDatabasesPrivilege {
20288
22173
  /**
@@ -21318,6 +23203,38 @@ export declare namespace vepfs {
21318
23203
  */
21319
23204
  value: string;
21320
23205
  }
23206
+ interface GetMountServiceAttachFileSystem {
23207
+ /**
23208
+ * Account ID.
23209
+ */
23210
+ accountId: string;
23211
+ /**
23212
+ * User mount path.
23213
+ */
23214
+ customerPath: string;
23215
+ /**
23216
+ * File system ID.
23217
+ */
23218
+ fileSystemId: string;
23219
+ /**
23220
+ * File system name.
23221
+ */
23222
+ fileSystemName: string;
23223
+ /**
23224
+ * Binding status. Details: Attaching: Attaching. AttachError: Attachment failed. Attached: Attached. Detaching: Detaching. DetachError: Detachment failed.
23225
+ */
23226
+ status: string;
23227
+ }
23228
+ interface GetMountServiceNode {
23229
+ /**
23230
+ * Node initial password.
23231
+ */
23232
+ defaultPassword: string;
23233
+ /**
23234
+ * Node instance ID.
23235
+ */
23236
+ nodeId: string;
23237
+ }
21321
23238
  interface InstanceCapacityInfo {
21322
23239
  /**
21323
23240
  * Total file system capacity, unit: TiB. Note: Disk balancing time varies based on cluster capacity, cluster load, and other factors. For clusters above the PiB level, expansion typically requires day-level disk balancing time. The expansion capacity becomes effective (and is billed) only after disk balancing is complete.
@@ -21338,6 +23255,38 @@ export declare namespace vepfs {
21338
23255
  */
21339
23256
  value: string;
21340
23257
  }
23258
+ interface MountServiceAttachFileSystem {
23259
+ /**
23260
+ * Account ID.
23261
+ */
23262
+ accountId: string;
23263
+ /**
23264
+ * User mount path.
23265
+ */
23266
+ customerPath: string;
23267
+ /**
23268
+ * File system ID.
23269
+ */
23270
+ fileSystemId: string;
23271
+ /**
23272
+ * File system name.
23273
+ */
23274
+ fileSystemName: string;
23275
+ /**
23276
+ * Binding status. Details: Attaching: Attaching. AttachError: Attachment failed. Attached: Attached. Detaching: Detaching. DetachError: Detachment failed.
23277
+ */
23278
+ status: string;
23279
+ }
23280
+ interface MountServiceNode {
23281
+ /**
23282
+ * Node initial password.
23283
+ */
23284
+ defaultPassword: string;
23285
+ /**
23286
+ * Node instance ID.
23287
+ */
23288
+ nodeId: string;
23289
+ }
21341
23290
  }
21342
23291
  export declare namespace vke {
21343
23292
  interface AddonStatus {