alibabacloud-sls20201230 5.7.4__tar.gz → 5.8.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (17) hide show
  1. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/ChangeLog.md +3 -0
  2. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/PKG-INFO +1 -1
  3. alibabacloud_sls20201230-5.8.0/alibabacloud_sls20201230/__init__.py +1 -0
  4. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/alibabacloud_sls20201230/client.py +516 -24
  5. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/alibabacloud_sls20201230/models.py +385 -1
  6. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/alibabacloud_sls20201230.egg-info/PKG-INFO +1 -1
  7. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/alibabacloud_sls20201230.egg-info/requires.txt +1 -1
  8. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/setup.py +2 -2
  9. alibabacloud_sls20201230-5.7.4/alibabacloud_sls20201230/__init__.py +0 -1
  10. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/LICENSE +0 -0
  11. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/MANIFEST.in +0 -0
  12. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/README-CN.md +0 -0
  13. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/README.md +0 -0
  14. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/alibabacloud_sls20201230.egg-info/SOURCES.txt +0 -0
  15. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/alibabacloud_sls20201230.egg-info/dependency_links.txt +0 -0
  16. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/alibabacloud_sls20201230.egg-info/top_level.txt +0 -0
  17. {alibabacloud_sls20201230-5.7.4 → alibabacloud_sls20201230-5.8.0}/setup.cfg +0 -0
@@ -1,3 +1,6 @@
1
+ 2025-06-10 Version: 5.7.4
2
+ - Generated python 2020-12-30 for Sls.
3
+
1
4
  2025-06-09 Version: 5.7.3
2
5
  - Generated python 2020-12-30 for Sls.
3
6
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud_sls20201230
3
- Version: 5.7.4
3
+ Version: 5.8.0
4
4
  Summary: Alibaba Cloud Log Service (20201230) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -0,0 +1 @@
1
+ __version__ = '5.8.0'
@@ -834,7 +834,7 @@ class Client(OpenApiClient):
834
834
  runtime: util_models.RuntimeOptions,
835
835
  ) -> sls_20201230_models.CreateAlertResponse:
836
836
  """
837
- @summary CreateAlert
837
+ @summary Creates an alert rule in a project.
838
838
 
839
839
  @param request: CreateAlertRequest
840
840
  @param headers: map
@@ -884,7 +884,7 @@ class Client(OpenApiClient):
884
884
  runtime: util_models.RuntimeOptions,
885
885
  ) -> sls_20201230_models.CreateAlertResponse:
886
886
  """
887
- @summary CreateAlert
887
+ @summary Creates an alert rule in a project.
888
888
 
889
889
  @param request: CreateAlertRequest
890
890
  @param headers: map
@@ -932,7 +932,7 @@ class Client(OpenApiClient):
932
932
  request: sls_20201230_models.CreateAlertRequest,
933
933
  ) -> sls_20201230_models.CreateAlertResponse:
934
934
  """
935
- @summary CreateAlert
935
+ @summary Creates an alert rule in a project.
936
936
 
937
937
  @param request: CreateAlertRequest
938
938
  @return: CreateAlertResponse
@@ -947,7 +947,7 @@ class Client(OpenApiClient):
947
947
  request: sls_20201230_models.CreateAlertRequest,
948
948
  ) -> sls_20201230_models.CreateAlertResponse:
949
949
  """
950
- @summary CreateAlert
950
+ @summary Creates an alert rule in a project.
951
951
 
952
952
  @param request: CreateAlertRequest
953
953
  @return: CreateAlertResponse
@@ -2479,6 +2479,8 @@ class Client(OpenApiClient):
2479
2479
  body['logSample'] = request.log_sample
2480
2480
  if not UtilClient.is_unset(request.processors):
2481
2481
  body['processors'] = request.processors
2482
+ if not UtilClient.is_unset(request.task):
2483
+ body['task'] = request.task
2482
2484
  req = open_api_models.OpenApiRequest(
2483
2485
  host_map=host_map,
2484
2486
  headers=headers,
@@ -2535,6 +2537,8 @@ class Client(OpenApiClient):
2535
2537
  body['logSample'] = request.log_sample
2536
2538
  if not UtilClient.is_unset(request.processors):
2537
2539
  body['processors'] = request.processors
2540
+ if not UtilClient.is_unset(request.task):
2541
+ body['task'] = request.task
2538
2542
  req = open_api_models.OpenApiRequest(
2539
2543
  host_map=host_map,
2540
2544
  headers=headers,
@@ -4980,6 +4984,106 @@ class Client(OpenApiClient):
4980
4984
  headers = {}
4981
4985
  return await self.delete_config_with_options_async(project, config_name, headers, runtime)
4982
4986
 
4987
+ def delete_consume_processor_with_options(
4988
+ self,
4989
+ project: str,
4990
+ processor_name: str,
4991
+ headers: Dict[str, str],
4992
+ runtime: util_models.RuntimeOptions,
4993
+ ) -> sls_20201230_models.DeleteConsumeProcessorResponse:
4994
+ """
4995
+ @summary DeleteConsumeProcessor
4996
+
4997
+ @param headers: map
4998
+ @param runtime: runtime options for this request RuntimeOptions
4999
+ @return: DeleteConsumeProcessorResponse
5000
+ """
5001
+ host_map = {}
5002
+ host_map['project'] = project
5003
+ req = open_api_models.OpenApiRequest(
5004
+ host_map=host_map,
5005
+ headers=headers
5006
+ )
5007
+ params = open_api_models.Params(
5008
+ action='DeleteConsumeProcessor',
5009
+ version='2020-12-30',
5010
+ protocol='HTTPS',
5011
+ pathname=f'/consumeprocessors/{processor_name}',
5012
+ method='DELETE',
5013
+ auth_type='AK',
5014
+ style='ROA',
5015
+ req_body_type='json',
5016
+ body_type='none'
5017
+ )
5018
+ return TeaCore.from_map(
5019
+ sls_20201230_models.DeleteConsumeProcessorResponse(),
5020
+ self.execute(params, req, runtime)
5021
+ )
5022
+
5023
+ async def delete_consume_processor_with_options_async(
5024
+ self,
5025
+ project: str,
5026
+ processor_name: str,
5027
+ headers: Dict[str, str],
5028
+ runtime: util_models.RuntimeOptions,
5029
+ ) -> sls_20201230_models.DeleteConsumeProcessorResponse:
5030
+ """
5031
+ @summary DeleteConsumeProcessor
5032
+
5033
+ @param headers: map
5034
+ @param runtime: runtime options for this request RuntimeOptions
5035
+ @return: DeleteConsumeProcessorResponse
5036
+ """
5037
+ host_map = {}
5038
+ host_map['project'] = project
5039
+ req = open_api_models.OpenApiRequest(
5040
+ host_map=host_map,
5041
+ headers=headers
5042
+ )
5043
+ params = open_api_models.Params(
5044
+ action='DeleteConsumeProcessor',
5045
+ version='2020-12-30',
5046
+ protocol='HTTPS',
5047
+ pathname=f'/consumeprocessors/{processor_name}',
5048
+ method='DELETE',
5049
+ auth_type='AK',
5050
+ style='ROA',
5051
+ req_body_type='json',
5052
+ body_type='none'
5053
+ )
5054
+ return TeaCore.from_map(
5055
+ sls_20201230_models.DeleteConsumeProcessorResponse(),
5056
+ await self.execute_async(params, req, runtime)
5057
+ )
5058
+
5059
+ def delete_consume_processor(
5060
+ self,
5061
+ project: str,
5062
+ processor_name: str,
5063
+ ) -> sls_20201230_models.DeleteConsumeProcessorResponse:
5064
+ """
5065
+ @summary DeleteConsumeProcessor
5066
+
5067
+ @return: DeleteConsumeProcessorResponse
5068
+ """
5069
+ runtime = util_models.RuntimeOptions()
5070
+ headers = {}
5071
+ return self.delete_consume_processor_with_options(project, processor_name, headers, runtime)
5072
+
5073
+ async def delete_consume_processor_async(
5074
+ self,
5075
+ project: str,
5076
+ processor_name: str,
5077
+ ) -> sls_20201230_models.DeleteConsumeProcessorResponse:
5078
+ """
5079
+ @summary DeleteConsumeProcessor
5080
+
5081
+ @return: DeleteConsumeProcessorResponse
5082
+ """
5083
+ runtime = util_models.RuntimeOptions()
5084
+ headers = {}
5085
+ return await self.delete_consume_processor_with_options_async(project, processor_name, headers, runtime)
5086
+
4983
5087
  def delete_consumer_group_with_options(
4984
5088
  self,
4985
5089
  project: str,
@@ -8974,6 +9078,106 @@ class Client(OpenApiClient):
8974
9078
  headers = {}
8975
9079
  return await self.get_config_with_options_async(project, config_name, headers, runtime)
8976
9080
 
9081
+ def get_consume_processor_with_options(
9082
+ self,
9083
+ project: str,
9084
+ processor_name: str,
9085
+ headers: Dict[str, str],
9086
+ runtime: util_models.RuntimeOptions,
9087
+ ) -> sls_20201230_models.GetConsumeProcessorResponse:
9088
+ """
9089
+ @summary Query the details of a consumer processor
9090
+
9091
+ @param headers: map
9092
+ @param runtime: runtime options for this request RuntimeOptions
9093
+ @return: GetConsumeProcessorResponse
9094
+ """
9095
+ host_map = {}
9096
+ host_map['project'] = project
9097
+ req = open_api_models.OpenApiRequest(
9098
+ host_map=host_map,
9099
+ headers=headers
9100
+ )
9101
+ params = open_api_models.Params(
9102
+ action='GetConsumeProcessor',
9103
+ version='2020-12-30',
9104
+ protocol='HTTPS',
9105
+ pathname=f'/consumeprocessors/{processor_name}',
9106
+ method='GET',
9107
+ auth_type='AK',
9108
+ style='ROA',
9109
+ req_body_type='json',
9110
+ body_type='json'
9111
+ )
9112
+ return TeaCore.from_map(
9113
+ sls_20201230_models.GetConsumeProcessorResponse(),
9114
+ self.execute(params, req, runtime)
9115
+ )
9116
+
9117
+ async def get_consume_processor_with_options_async(
9118
+ self,
9119
+ project: str,
9120
+ processor_name: str,
9121
+ headers: Dict[str, str],
9122
+ runtime: util_models.RuntimeOptions,
9123
+ ) -> sls_20201230_models.GetConsumeProcessorResponse:
9124
+ """
9125
+ @summary Query the details of a consumer processor
9126
+
9127
+ @param headers: map
9128
+ @param runtime: runtime options for this request RuntimeOptions
9129
+ @return: GetConsumeProcessorResponse
9130
+ """
9131
+ host_map = {}
9132
+ host_map['project'] = project
9133
+ req = open_api_models.OpenApiRequest(
9134
+ host_map=host_map,
9135
+ headers=headers
9136
+ )
9137
+ params = open_api_models.Params(
9138
+ action='GetConsumeProcessor',
9139
+ version='2020-12-30',
9140
+ protocol='HTTPS',
9141
+ pathname=f'/consumeprocessors/{processor_name}',
9142
+ method='GET',
9143
+ auth_type='AK',
9144
+ style='ROA',
9145
+ req_body_type='json',
9146
+ body_type='json'
9147
+ )
9148
+ return TeaCore.from_map(
9149
+ sls_20201230_models.GetConsumeProcessorResponse(),
9150
+ await self.execute_async(params, req, runtime)
9151
+ )
9152
+
9153
+ def get_consume_processor(
9154
+ self,
9155
+ project: str,
9156
+ processor_name: str,
9157
+ ) -> sls_20201230_models.GetConsumeProcessorResponse:
9158
+ """
9159
+ @summary Query the details of a consumer processor
9160
+
9161
+ @return: GetConsumeProcessorResponse
9162
+ """
9163
+ runtime = util_models.RuntimeOptions()
9164
+ headers = {}
9165
+ return self.get_consume_processor_with_options(project, processor_name, headers, runtime)
9166
+
9167
+ async def get_consume_processor_async(
9168
+ self,
9169
+ project: str,
9170
+ processor_name: str,
9171
+ ) -> sls_20201230_models.GetConsumeProcessorResponse:
9172
+ """
9173
+ @summary Query the details of a consumer processor
9174
+
9175
+ @return: GetConsumeProcessorResponse
9176
+ """
9177
+ runtime = util_models.RuntimeOptions()
9178
+ headers = {}
9179
+ return await self.get_consume_processor_with_options_async(project, processor_name, headers, runtime)
9180
+
8977
9181
  def get_context_logs_with_options(
8978
9182
  self,
8979
9183
  project: str,
@@ -10695,9 +10899,14 @@ class Client(OpenApiClient):
10695
10899
  """
10696
10900
  @summary Queries the raw log data in a Logstore of a project. The returned result contains the raw log data within a specific time range. The returned result is compressed before transmission.
10697
10901
 
10698
- @description You can call this operation by using Alibaba Cloud SDK for Go, Java, TypeScript, or Python.
10699
- You can call this operation by using Simple Log Service SDK for Go or Java.
10700
- For more information, see [GetLogs](https://help.aliyun.com/document_detail/29029.html).
10902
+ @description You can call this operation by using Simple Log Service SDK for Go, Java, or Python. You can call this operation by using Alibaba Cloud SDK for all programming languages.
10903
+ When you call this operation, take note of the compression method that you use. The supported compression algorithms vary based on the programming language. For more information, see the description of the Accept-Encoding parameter in this topic.
10904
+ For more information, see [GetLogs](https://help.aliyun.com/document_detail/2771313.html).
10905
+ ### Authentication resources
10906
+ The following table describes the authorization information that is required for this operation. You can add the information to the Action element of a Resource Access Management (RAM) policy statement to grant a RAM user or a RAM role the permissions to call this operation.
10907
+ |Action|Resource|
10908
+ |:---|:---|
10909
+ |`log:GetLogStoreLogs`|`acs:log:{#regionId}:{#accountId}:project/{#ProjectName}`|
10701
10910
 
10702
10911
  @param request: GetLogsV2Request
10703
10912
  @param headers: GetLogsV2Headers
@@ -10767,9 +10976,14 @@ class Client(OpenApiClient):
10767
10976
  """
10768
10977
  @summary Queries the raw log data in a Logstore of a project. The returned result contains the raw log data within a specific time range. The returned result is compressed before transmission.
10769
10978
 
10770
- @description You can call this operation by using Alibaba Cloud SDK for Go, Java, TypeScript, or Python.
10771
- You can call this operation by using Simple Log Service SDK for Go or Java.
10772
- For more information, see [GetLogs](https://help.aliyun.com/document_detail/29029.html).
10979
+ @description You can call this operation by using Simple Log Service SDK for Go, Java, or Python. You can call this operation by using Alibaba Cloud SDK for all programming languages.
10980
+ When you call this operation, take note of the compression method that you use. The supported compression algorithms vary based on the programming language. For more information, see the description of the Accept-Encoding parameter in this topic.
10981
+ For more information, see [GetLogs](https://help.aliyun.com/document_detail/2771313.html).
10982
+ ### Authentication resources
10983
+ The following table describes the authorization information that is required for this operation. You can add the information to the Action element of a Resource Access Management (RAM) policy statement to grant a RAM user or a RAM role the permissions to call this operation.
10984
+ |Action|Resource|
10985
+ |:---|:---|
10986
+ |`log:GetLogStoreLogs`|`acs:log:{#regionId}:{#accountId}:project/{#ProjectName}`|
10773
10987
 
10774
10988
  @param request: GetLogsV2Request
10775
10989
  @param headers: GetLogsV2Headers
@@ -10837,9 +11051,14 @@ class Client(OpenApiClient):
10837
11051
  """
10838
11052
  @summary Queries the raw log data in a Logstore of a project. The returned result contains the raw log data within a specific time range. The returned result is compressed before transmission.
10839
11053
 
10840
- @description You can call this operation by using Alibaba Cloud SDK for Go, Java, TypeScript, or Python.
10841
- You can call this operation by using Simple Log Service SDK for Go or Java.
10842
- For more information, see [GetLogs](https://help.aliyun.com/document_detail/29029.html).
11054
+ @description You can call this operation by using Simple Log Service SDK for Go, Java, or Python. You can call this operation by using Alibaba Cloud SDK for all programming languages.
11055
+ When you call this operation, take note of the compression method that you use. The supported compression algorithms vary based on the programming language. For more information, see the description of the Accept-Encoding parameter in this topic.
11056
+ For more information, see [GetLogs](https://help.aliyun.com/document_detail/2771313.html).
11057
+ ### Authentication resources
11058
+ The following table describes the authorization information that is required for this operation. You can add the information to the Action element of a Resource Access Management (RAM) policy statement to grant a RAM user or a RAM role the permissions to call this operation.
11059
+ |Action|Resource|
11060
+ |:---|:---|
11061
+ |`log:GetLogStoreLogs`|`acs:log:{#regionId}:{#accountId}:project/{#ProjectName}`|
10843
11062
 
10844
11063
  @param request: GetLogsV2Request
10845
11064
  @return: GetLogsV2Response
@@ -10857,9 +11076,14 @@ class Client(OpenApiClient):
10857
11076
  """
10858
11077
  @summary Queries the raw log data in a Logstore of a project. The returned result contains the raw log data within a specific time range. The returned result is compressed before transmission.
10859
11078
 
10860
- @description You can call this operation by using Alibaba Cloud SDK for Go, Java, TypeScript, or Python.
10861
- You can call this operation by using Simple Log Service SDK for Go or Java.
10862
- For more information, see [GetLogs](https://help.aliyun.com/document_detail/29029.html).
11079
+ @description You can call this operation by using Simple Log Service SDK for Go, Java, or Python. You can call this operation by using Alibaba Cloud SDK for all programming languages.
11080
+ When you call this operation, take note of the compression method that you use. The supported compression algorithms vary based on the programming language. For more information, see the description of the Accept-Encoding parameter in this topic.
11081
+ For more information, see [GetLogs](https://help.aliyun.com/document_detail/2771313.html).
11082
+ ### Authentication resources
11083
+ The following table describes the authorization information that is required for this operation. You can add the information to the Action element of a Resource Access Management (RAM) policy statement to grant a RAM user or a RAM role the permissions to call this operation.
11084
+ |Action|Resource|
11085
+ |:---|:---|
11086
+ |`log:GetLogStoreLogs`|`acs:log:{#regionId}:{#accountId}:project/{#ProjectName}`|
10863
11087
 
10864
11088
  @param request: GetLogsV2Request
10865
11089
  @return: GetLogsV2Response
@@ -13884,6 +14108,132 @@ class Client(OpenApiClient):
13884
14108
  headers = {}
13885
14109
  return await self.list_config_with_options_async(project, request, headers, runtime)
13886
14110
 
14111
+ def list_consume_processors_with_options(
14112
+ self,
14113
+ project: str,
14114
+ request: sls_20201230_models.ListConsumeProcessorsRequest,
14115
+ headers: Dict[str, str],
14116
+ runtime: util_models.RuntimeOptions,
14117
+ ) -> sls_20201230_models.ListConsumeProcessorsResponse:
14118
+ """
14119
+ @summary Queries a list of consumption processors that meet specific conditions.
14120
+
14121
+ @param request: ListConsumeProcessorsRequest
14122
+ @param headers: map
14123
+ @param runtime: runtime options for this request RuntimeOptions
14124
+ @return: ListConsumeProcessorsResponse
14125
+ """
14126
+ UtilClient.validate_model(request)
14127
+ host_map = {}
14128
+ host_map['project'] = project
14129
+ query = {}
14130
+ if not UtilClient.is_unset(request.display_name):
14131
+ query['displayName'] = request.display_name
14132
+ if not UtilClient.is_unset(request.offset):
14133
+ query['offset'] = request.offset
14134
+ if not UtilClient.is_unset(request.processor_name):
14135
+ query['processorName'] = request.processor_name
14136
+ if not UtilClient.is_unset(request.size):
14137
+ query['size'] = request.size
14138
+ req = open_api_models.OpenApiRequest(
14139
+ host_map=host_map,
14140
+ headers=headers,
14141
+ query=OpenApiUtilClient.query(query)
14142
+ )
14143
+ params = open_api_models.Params(
14144
+ action='ListConsumeProcessors',
14145
+ version='2020-12-30',
14146
+ protocol='HTTPS',
14147
+ pathname=f'/consumeprocessors',
14148
+ method='GET',
14149
+ auth_type='AK',
14150
+ style='ROA',
14151
+ req_body_type='json',
14152
+ body_type='json'
14153
+ )
14154
+ return TeaCore.from_map(
14155
+ sls_20201230_models.ListConsumeProcessorsResponse(),
14156
+ self.execute(params, req, runtime)
14157
+ )
14158
+
14159
+ async def list_consume_processors_with_options_async(
14160
+ self,
14161
+ project: str,
14162
+ request: sls_20201230_models.ListConsumeProcessorsRequest,
14163
+ headers: Dict[str, str],
14164
+ runtime: util_models.RuntimeOptions,
14165
+ ) -> sls_20201230_models.ListConsumeProcessorsResponse:
14166
+ """
14167
+ @summary Queries a list of consumption processors that meet specific conditions.
14168
+
14169
+ @param request: ListConsumeProcessorsRequest
14170
+ @param headers: map
14171
+ @param runtime: runtime options for this request RuntimeOptions
14172
+ @return: ListConsumeProcessorsResponse
14173
+ """
14174
+ UtilClient.validate_model(request)
14175
+ host_map = {}
14176
+ host_map['project'] = project
14177
+ query = {}
14178
+ if not UtilClient.is_unset(request.display_name):
14179
+ query['displayName'] = request.display_name
14180
+ if not UtilClient.is_unset(request.offset):
14181
+ query['offset'] = request.offset
14182
+ if not UtilClient.is_unset(request.processor_name):
14183
+ query['processorName'] = request.processor_name
14184
+ if not UtilClient.is_unset(request.size):
14185
+ query['size'] = request.size
14186
+ req = open_api_models.OpenApiRequest(
14187
+ host_map=host_map,
14188
+ headers=headers,
14189
+ query=OpenApiUtilClient.query(query)
14190
+ )
14191
+ params = open_api_models.Params(
14192
+ action='ListConsumeProcessors',
14193
+ version='2020-12-30',
14194
+ protocol='HTTPS',
14195
+ pathname=f'/consumeprocessors',
14196
+ method='GET',
14197
+ auth_type='AK',
14198
+ style='ROA',
14199
+ req_body_type='json',
14200
+ body_type='json'
14201
+ )
14202
+ return TeaCore.from_map(
14203
+ sls_20201230_models.ListConsumeProcessorsResponse(),
14204
+ await self.execute_async(params, req, runtime)
14205
+ )
14206
+
14207
+ def list_consume_processors(
14208
+ self,
14209
+ project: str,
14210
+ request: sls_20201230_models.ListConsumeProcessorsRequest,
14211
+ ) -> sls_20201230_models.ListConsumeProcessorsResponse:
14212
+ """
14213
+ @summary Queries a list of consumption processors that meet specific conditions.
14214
+
14215
+ @param request: ListConsumeProcessorsRequest
14216
+ @return: ListConsumeProcessorsResponse
14217
+ """
14218
+ runtime = util_models.RuntimeOptions()
14219
+ headers = {}
14220
+ return self.list_consume_processors_with_options(project, request, headers, runtime)
14221
+
14222
+ async def list_consume_processors_async(
14223
+ self,
14224
+ project: str,
14225
+ request: sls_20201230_models.ListConsumeProcessorsRequest,
14226
+ ) -> sls_20201230_models.ListConsumeProcessorsResponse:
14227
+ """
14228
+ @summary Queries a list of consumption processors that meet specific conditions.
14229
+
14230
+ @param request: ListConsumeProcessorsRequest
14231
+ @return: ListConsumeProcessorsResponse
14232
+ """
14233
+ runtime = util_models.RuntimeOptions()
14234
+ headers = {}
14235
+ return await self.list_consume_processors_with_options_async(project, request, headers, runtime)
14236
+
13887
14237
  def list_consumer_group_with_options(
13888
14238
  self,
13889
14239
  project: str,
@@ -14891,6 +15241,8 @@ class Client(OpenApiClient):
14891
15241
  query = {}
14892
15242
  if not UtilClient.is_unset(request.config_name):
14893
15243
  query['configName'] = request.config_name
15244
+ if not UtilClient.is_unset(request.config_type):
15245
+ query['configType'] = request.config_type
14894
15246
  if not UtilClient.is_unset(request.logstore_name):
14895
15247
  query['logstoreName'] = request.logstore_name
14896
15248
  if not UtilClient.is_unset(request.offset):
@@ -14941,6 +15293,8 @@ class Client(OpenApiClient):
14941
15293
  query = {}
14942
15294
  if not UtilClient.is_unset(request.config_name):
14943
15295
  query['configName'] = request.config_name
15296
+ if not UtilClient.is_unset(request.config_type):
15297
+ query['configType'] = request.config_type
14944
15298
  if not UtilClient.is_unset(request.logstore_name):
14945
15299
  query['logstoreName'] = request.logstore_name
14946
15300
  if not UtilClient.is_unset(request.offset):
@@ -17166,6 +17520,132 @@ class Client(OpenApiClient):
17166
17520
  headers = {}
17167
17521
  return await self.put_annotation_data_with_options_async(dataset_id, request, headers, runtime)
17168
17522
 
17523
+ def put_consume_processor_with_options(
17524
+ self,
17525
+ project: str,
17526
+ processor_name: str,
17527
+ request: sls_20201230_models.PutConsumeProcessorRequest,
17528
+ headers: Dict[str, str],
17529
+ runtime: util_models.RuntimeOptions,
17530
+ ) -> sls_20201230_models.PutConsumeProcessorResponse:
17531
+ """
17532
+ @summary Creates or updates a consumption processor.
17533
+
17534
+ @param request: PutConsumeProcessorRequest
17535
+ @param headers: map
17536
+ @param runtime: runtime options for this request RuntimeOptions
17537
+ @return: PutConsumeProcessorResponse
17538
+ """
17539
+ UtilClient.validate_model(request)
17540
+ host_map = {}
17541
+ host_map['project'] = project
17542
+ body = {}
17543
+ if not UtilClient.is_unset(request.configuration):
17544
+ body['configuration'] = request.configuration
17545
+ if not UtilClient.is_unset(request.description):
17546
+ body['description'] = request.description
17547
+ if not UtilClient.is_unset(request.display_name):
17548
+ body['displayName'] = request.display_name
17549
+ req = open_api_models.OpenApiRequest(
17550
+ host_map=host_map,
17551
+ headers=headers,
17552
+ body=OpenApiUtilClient.parse_to_map(body)
17553
+ )
17554
+ params = open_api_models.Params(
17555
+ action='PutConsumeProcessor',
17556
+ version='2020-12-30',
17557
+ protocol='HTTPS',
17558
+ pathname=f'/consumeprocessors/{processor_name}',
17559
+ method='PUT',
17560
+ auth_type='AK',
17561
+ style='ROA',
17562
+ req_body_type='json',
17563
+ body_type='none'
17564
+ )
17565
+ return TeaCore.from_map(
17566
+ sls_20201230_models.PutConsumeProcessorResponse(),
17567
+ self.execute(params, req, runtime)
17568
+ )
17569
+
17570
+ async def put_consume_processor_with_options_async(
17571
+ self,
17572
+ project: str,
17573
+ processor_name: str,
17574
+ request: sls_20201230_models.PutConsumeProcessorRequest,
17575
+ headers: Dict[str, str],
17576
+ runtime: util_models.RuntimeOptions,
17577
+ ) -> sls_20201230_models.PutConsumeProcessorResponse:
17578
+ """
17579
+ @summary Creates or updates a consumption processor.
17580
+
17581
+ @param request: PutConsumeProcessorRequest
17582
+ @param headers: map
17583
+ @param runtime: runtime options for this request RuntimeOptions
17584
+ @return: PutConsumeProcessorResponse
17585
+ """
17586
+ UtilClient.validate_model(request)
17587
+ host_map = {}
17588
+ host_map['project'] = project
17589
+ body = {}
17590
+ if not UtilClient.is_unset(request.configuration):
17591
+ body['configuration'] = request.configuration
17592
+ if not UtilClient.is_unset(request.description):
17593
+ body['description'] = request.description
17594
+ if not UtilClient.is_unset(request.display_name):
17595
+ body['displayName'] = request.display_name
17596
+ req = open_api_models.OpenApiRequest(
17597
+ host_map=host_map,
17598
+ headers=headers,
17599
+ body=OpenApiUtilClient.parse_to_map(body)
17600
+ )
17601
+ params = open_api_models.Params(
17602
+ action='PutConsumeProcessor',
17603
+ version='2020-12-30',
17604
+ protocol='HTTPS',
17605
+ pathname=f'/consumeprocessors/{processor_name}',
17606
+ method='PUT',
17607
+ auth_type='AK',
17608
+ style='ROA',
17609
+ req_body_type='json',
17610
+ body_type='none'
17611
+ )
17612
+ return TeaCore.from_map(
17613
+ sls_20201230_models.PutConsumeProcessorResponse(),
17614
+ await self.execute_async(params, req, runtime)
17615
+ )
17616
+
17617
+ def put_consume_processor(
17618
+ self,
17619
+ project: str,
17620
+ processor_name: str,
17621
+ request: sls_20201230_models.PutConsumeProcessorRequest,
17622
+ ) -> sls_20201230_models.PutConsumeProcessorResponse:
17623
+ """
17624
+ @summary Creates or updates a consumption processor.
17625
+
17626
+ @param request: PutConsumeProcessorRequest
17627
+ @return: PutConsumeProcessorResponse
17628
+ """
17629
+ runtime = util_models.RuntimeOptions()
17630
+ headers = {}
17631
+ return self.put_consume_processor_with_options(project, processor_name, request, headers, runtime)
17632
+
17633
+ async def put_consume_processor_async(
17634
+ self,
17635
+ project: str,
17636
+ processor_name: str,
17637
+ request: sls_20201230_models.PutConsumeProcessorRequest,
17638
+ ) -> sls_20201230_models.PutConsumeProcessorResponse:
17639
+ """
17640
+ @summary Creates or updates a consumption processor.
17641
+
17642
+ @param request: PutConsumeProcessorRequest
17643
+ @return: PutConsumeProcessorResponse
17644
+ """
17645
+ runtime = util_models.RuntimeOptions()
17646
+ headers = {}
17647
+ return await self.put_consume_processor_with_options_async(project, processor_name, request, headers, runtime)
17648
+
17169
17649
  def put_ingest_processor_with_options(
17170
17650
  self,
17171
17651
  project: str,
@@ -17598,7 +18078,7 @@ class Client(OpenApiClient):
17598
18078
  runtime: util_models.RuntimeOptions,
17599
18079
  ) -> sls_20201230_models.PutProjectTransferAccelerationResponse:
17600
18080
  """
17601
- @summary 设置project传输加速状态
18081
+ @summary Enables or disables transfer acceleration.
17602
18082
 
17603
18083
  @param request: PutProjectTransferAccelerationRequest
17604
18084
  @param headers: map
@@ -17640,7 +18120,7 @@ class Client(OpenApiClient):
17640
18120
  runtime: util_models.RuntimeOptions,
17641
18121
  ) -> sls_20201230_models.PutProjectTransferAccelerationResponse:
17642
18122
  """
17643
- @summary 设置project传输加速状态
18123
+ @summary Enables or disables transfer acceleration.
17644
18124
 
17645
18125
  @param request: PutProjectTransferAccelerationRequest
17646
18126
  @param headers: map
@@ -17680,7 +18160,7 @@ class Client(OpenApiClient):
17680
18160
  request: sls_20201230_models.PutProjectTransferAccelerationRequest,
17681
18161
  ) -> sls_20201230_models.PutProjectTransferAccelerationResponse:
17682
18162
  """
17683
- @summary 设置project传输加速状态
18163
+ @summary Enables or disables transfer acceleration.
17684
18164
 
17685
18165
  @param request: PutProjectTransferAccelerationRequest
17686
18166
  @return: PutProjectTransferAccelerationResponse
@@ -17695,7 +18175,7 @@ class Client(OpenApiClient):
17695
18175
  request: sls_20201230_models.PutProjectTransferAccelerationRequest,
17696
18176
  ) -> sls_20201230_models.PutProjectTransferAccelerationResponse:
17697
18177
  """
17698
- @summary 设置project传输加速状态
18178
+ @summary Enables or disables transfer acceleration.
17699
18179
 
17700
18180
  @param request: PutProjectTransferAccelerationRequest
17701
18181
  @return: PutProjectTransferAccelerationResponse
@@ -21510,6 +21990,8 @@ class Client(OpenApiClient):
21510
21990
  body['logSample'] = request.log_sample
21511
21991
  if not UtilClient.is_unset(request.processors):
21512
21992
  body['processors'] = request.processors
21993
+ if not UtilClient.is_unset(request.task):
21994
+ body['task'] = request.task
21513
21995
  req = open_api_models.OpenApiRequest(
21514
21996
  host_map=host_map,
21515
21997
  headers=headers,
@@ -21567,6 +22049,8 @@ class Client(OpenApiClient):
21567
22049
  body['logSample'] = request.log_sample
21568
22050
  if not UtilClient.is_unset(request.processors):
21569
22051
  body['processors'] = request.processors
22052
+ if not UtilClient.is_unset(request.task):
22053
+ body['task'] = request.task
21570
22054
  req = open_api_models.OpenApiRequest(
21571
22055
  host_map=host_map,
21572
22056
  headers=headers,
@@ -23549,7 +24033,9 @@ class Client(OpenApiClient):
23549
24033
  runtime: util_models.RuntimeOptions,
23550
24034
  ) -> sls_20201230_models.UpsertCollectionPolicyResponse:
23551
24035
  """
23552
- @summary 调用UpsertCollectionPolicy接口创建或更新日志采集规则
24036
+ @summary Creates a log collection policy for a cloud service. This way, logs can be automatically collected from the service.
24037
+
24038
+ @description You must use the Simple Log Service endpoint for the China (Shanghai) or Singapore region to call the operation.
23553
24039
 
23554
24040
  @param request: UpsertCollectionPolicyRequest
23555
24041
  @param headers: map
@@ -23603,7 +24089,9 @@ class Client(OpenApiClient):
23603
24089
  runtime: util_models.RuntimeOptions,
23604
24090
  ) -> sls_20201230_models.UpsertCollectionPolicyResponse:
23605
24091
  """
23606
- @summary 调用UpsertCollectionPolicy接口创建或更新日志采集规则
24092
+ @summary Creates a log collection policy for a cloud service. This way, logs can be automatically collected from the service.
24093
+
24094
+ @description You must use the Simple Log Service endpoint for the China (Shanghai) or Singapore region to call the operation.
23607
24095
 
23608
24096
  @param request: UpsertCollectionPolicyRequest
23609
24097
  @param headers: map
@@ -23655,7 +24143,9 @@ class Client(OpenApiClient):
23655
24143
  request: sls_20201230_models.UpsertCollectionPolicyRequest,
23656
24144
  ) -> sls_20201230_models.UpsertCollectionPolicyResponse:
23657
24145
  """
23658
- @summary 调用UpsertCollectionPolicy接口创建或更新日志采集规则
24146
+ @summary Creates a log collection policy for a cloud service. This way, logs can be automatically collected from the service.
24147
+
24148
+ @description You must use the Simple Log Service endpoint for the China (Shanghai) or Singapore region to call the operation.
23659
24149
 
23660
24150
  @param request: UpsertCollectionPolicyRequest
23661
24151
  @return: UpsertCollectionPolicyResponse
@@ -23669,7 +24159,9 @@ class Client(OpenApiClient):
23669
24159
  request: sls_20201230_models.UpsertCollectionPolicyRequest,
23670
24160
  ) -> sls_20201230_models.UpsertCollectionPolicyResponse:
23671
24161
  """
23672
- @summary 调用UpsertCollectionPolicy接口创建或更新日志采集规则
24162
+ @summary Creates a log collection policy for a cloud service. This way, logs can be automatically collected from the service.
24163
+
24164
+ @description You must use the Simple Log Service endpoint for the China (Shanghai) or Singapore region to call the operation.
23673
24165
 
23674
24166
  @param request: UpsertCollectionPolicyRequest
23675
24167
  @return: UpsertCollectionPolicyResponse
@@ -1182,6 +1182,69 @@ class CopilotAction(TeaModel):
1182
1182
  return self
1183
1183
 
1184
1184
 
1185
+ class DeleteLogStoreLogsTask(TeaModel):
1186
+ def __init__(
1187
+ self,
1188
+ error_code: int = None,
1189
+ error_message: str = None,
1190
+ from_: int = None,
1191
+ progress: int = None,
1192
+ query: str = None,
1193
+ task_id: str = None,
1194
+ to: int = None,
1195
+ ):
1196
+ self.error_code = error_code
1197
+ self.error_message = error_message
1198
+ self.from_ = from_
1199
+ self.progress = progress
1200
+ self.query = query
1201
+ self.task_id = task_id
1202
+ self.to = to
1203
+
1204
+ def validate(self):
1205
+ pass
1206
+
1207
+ def to_map(self):
1208
+ _map = super().to_map()
1209
+ if _map is not None:
1210
+ return _map
1211
+
1212
+ result = dict()
1213
+ if self.error_code is not None:
1214
+ result['errorCode'] = self.error_code
1215
+ if self.error_message is not None:
1216
+ result['errorMessage'] = self.error_message
1217
+ if self.from_ is not None:
1218
+ result['from'] = self.from_
1219
+ if self.progress is not None:
1220
+ result['progress'] = self.progress
1221
+ if self.query is not None:
1222
+ result['query'] = self.query
1223
+ if self.task_id is not None:
1224
+ result['taskId'] = self.task_id
1225
+ if self.to is not None:
1226
+ result['to'] = self.to
1227
+ return result
1228
+
1229
+ def from_map(self, m: dict = None):
1230
+ m = m or dict()
1231
+ if m.get('errorCode') is not None:
1232
+ self.error_code = m.get('errorCode')
1233
+ if m.get('errorMessage') is not None:
1234
+ self.error_message = m.get('errorMessage')
1235
+ if m.get('from') is not None:
1236
+ self.from_ = m.get('from')
1237
+ if m.get('progress') is not None:
1238
+ self.progress = m.get('progress')
1239
+ if m.get('query') is not None:
1240
+ self.query = m.get('query')
1241
+ if m.get('taskId') is not None:
1242
+ self.task_id = m.get('taskId')
1243
+ if m.get('to') is not None:
1244
+ self.to = m.get('to')
1245
+ return self
1246
+
1247
+
1185
1248
  class ETLConfigurationSink(TeaModel):
1186
1249
  def __init__(
1187
1250
  self,
@@ -2138,6 +2201,7 @@ class LogtailPipelineConfig(TeaModel):
2138
2201
  last_modify_time: int = None,
2139
2202
  log_sample: str = None,
2140
2203
  processors: List[Dict[str, Any]] = None,
2204
+ task: Dict[str, Any] = None,
2141
2205
  ):
2142
2206
  self.aggregators = aggregators
2143
2207
  # This parameter is required.
@@ -2151,6 +2215,7 @@ class LogtailPipelineConfig(TeaModel):
2151
2215
  self.last_modify_time = last_modify_time
2152
2216
  self.log_sample = log_sample
2153
2217
  self.processors = processors
2218
+ self.task = task
2154
2219
 
2155
2220
  def validate(self):
2156
2221
  pass
@@ -2179,6 +2244,8 @@ class LogtailPipelineConfig(TeaModel):
2179
2244
  result['logSample'] = self.log_sample
2180
2245
  if self.processors is not None:
2181
2246
  result['processors'] = self.processors
2247
+ if self.task is not None:
2248
+ result['task'] = self.task
2182
2249
  return result
2183
2250
 
2184
2251
  def from_map(self, m: dict = None):
@@ -2201,6 +2268,8 @@ class LogtailPipelineConfig(TeaModel):
2201
2268
  self.log_sample = m.get('logSample')
2202
2269
  if m.get('processors') is not None:
2203
2270
  self.processors = m.get('processors')
2271
+ if m.get('task') is not None:
2272
+ self.task = m.get('task')
2204
2273
  return self
2205
2274
 
2206
2275
 
@@ -6736,6 +6805,7 @@ class CreateLogtailPipelineConfigRequest(TeaModel):
6736
6805
  inputs: List[Dict[str, Any]] = None,
6737
6806
  log_sample: str = None,
6738
6807
  processors: List[Dict[str, Any]] = None,
6808
+ task: Dict[str, Any] = None,
6739
6809
  ):
6740
6810
  # The aggregation plug-ins.
6741
6811
  #
@@ -6784,6 +6854,7 @@ class CreateLogtailPipelineConfigRequest(TeaModel):
6784
6854
  # * You must add one of the following Logtail plug-ins for data processing as the first plug-in: Data Parsing (Regex Mode), Data Parsing (Delimiter Mode), Data Parsing (JSON Mode), Data Parsing (NGINX Mode), Data Parsing (Apache Mode), and Data Parsing (IIS Mode).
6785
6855
  # * After you add the first plug-in, you can add one Time Parsing plug-in, one Data Filtering plug-in, and multiple Data Masking plug-ins.
6786
6856
  self.processors = processors
6857
+ self.task = task
6787
6858
 
6788
6859
  def validate(self):
6789
6860
  pass
@@ -6808,6 +6879,8 @@ class CreateLogtailPipelineConfigRequest(TeaModel):
6808
6879
  result['logSample'] = self.log_sample
6809
6880
  if self.processors is not None:
6810
6881
  result['processors'] = self.processors
6882
+ if self.task is not None:
6883
+ result['task'] = self.task
6811
6884
  return result
6812
6885
 
6813
6886
  def from_map(self, m: dict = None):
@@ -6826,6 +6899,8 @@ class CreateLogtailPipelineConfigRequest(TeaModel):
6826
6899
  self.log_sample = m.get('logSample')
6827
6900
  if m.get('processors') is not None:
6828
6901
  self.processors = m.get('processors')
6902
+ if m.get('task') is not None:
6903
+ self.task = m.get('task')
6829
6904
  return self
6830
6905
 
6831
6906
 
@@ -8384,6 +8459,39 @@ class DeleteConfigResponse(TeaModel):
8384
8459
  return self
8385
8460
 
8386
8461
 
8462
+ class DeleteConsumeProcessorResponse(TeaModel):
8463
+ def __init__(
8464
+ self,
8465
+ headers: Dict[str, str] = None,
8466
+ status_code: int = None,
8467
+ ):
8468
+ self.headers = headers
8469
+ self.status_code = status_code
8470
+
8471
+ def validate(self):
8472
+ pass
8473
+
8474
+ def to_map(self):
8475
+ _map = super().to_map()
8476
+ if _map is not None:
8477
+ return _map
8478
+
8479
+ result = dict()
8480
+ if self.headers is not None:
8481
+ result['headers'] = self.headers
8482
+ if self.status_code is not None:
8483
+ result['statusCode'] = self.status_code
8484
+ return result
8485
+
8486
+ def from_map(self, m: dict = None):
8487
+ m = m or dict()
8488
+ if m.get('headers') is not None:
8489
+ self.headers = m.get('headers')
8490
+ if m.get('statusCode') is not None:
8491
+ self.status_code = m.get('statusCode')
8492
+ return self
8493
+
8494
+
8387
8495
  class DeleteConsumerGroupResponse(TeaModel):
8388
8496
  def __init__(
8389
8497
  self,
@@ -10396,6 +10504,47 @@ class GetConfigResponse(TeaModel):
10396
10504
  return self
10397
10505
 
10398
10506
 
10507
+ class GetConsumeProcessorResponse(TeaModel):
10508
+ def __init__(
10509
+ self,
10510
+ headers: Dict[str, str] = None,
10511
+ status_code: int = None,
10512
+ body: ConsumeProcessor = None,
10513
+ ):
10514
+ self.headers = headers
10515
+ self.status_code = status_code
10516
+ self.body = body
10517
+
10518
+ def validate(self):
10519
+ if self.body:
10520
+ self.body.validate()
10521
+
10522
+ def to_map(self):
10523
+ _map = super().to_map()
10524
+ if _map is not None:
10525
+ return _map
10526
+
10527
+ result = dict()
10528
+ if self.headers is not None:
10529
+ result['headers'] = self.headers
10530
+ if self.status_code is not None:
10531
+ result['statusCode'] = self.status_code
10532
+ if self.body is not None:
10533
+ result['body'] = self.body.to_map()
10534
+ return result
10535
+
10536
+ def from_map(self, m: dict = None):
10537
+ m = m or dict()
10538
+ if m.get('headers') is not None:
10539
+ self.headers = m.get('headers')
10540
+ if m.get('statusCode') is not None:
10541
+ self.status_code = m.get('statusCode')
10542
+ if m.get('body') is not None:
10543
+ temp_model = ConsumeProcessor()
10544
+ self.body = temp_model.from_map(m['body'])
10545
+ return self
10546
+
10547
+
10399
10548
  class GetContextLogsRequest(TeaModel):
10400
10549
  def __init__(
10401
10550
  self,
@@ -12110,7 +12259,7 @@ class GetLogsV2ResponseBodyMeta(TeaModel):
12110
12259
  self.phrase_query_info = phrase_query_info
12111
12260
  # The number of logs that are processed in the request.
12112
12261
  self.processed_bytes = processed_bytes
12113
- # The number of rows that are processed in the request.
12262
+ # The number of rows that are processed in the query.
12114
12263
  self.processed_rows = processed_rows
12115
12264
  # Indicates whether the query result is complete. Valid values:
12116
12265
  #
@@ -14925,6 +15074,146 @@ class ListConfigResponse(TeaModel):
14925
15074
  return self
14926
15075
 
14927
15076
 
15077
+ class ListConsumeProcessorsRequest(TeaModel):
15078
+ def __init__(
15079
+ self,
15080
+ display_name: str = None,
15081
+ offset: str = None,
15082
+ processor_name: str = None,
15083
+ size: str = None,
15084
+ ):
15085
+ # The display name of the consumption processor.
15086
+ self.display_name = display_name
15087
+ # The offset. Default value: 0.
15088
+ self.offset = offset
15089
+ # The identifier of the consumption processor.
15090
+ self.processor_name = processor_name
15091
+ # The number of entries. Default value: 200.
15092
+ self.size = size
15093
+
15094
+ def validate(self):
15095
+ pass
15096
+
15097
+ def to_map(self):
15098
+ _map = super().to_map()
15099
+ if _map is not None:
15100
+ return _map
15101
+
15102
+ result = dict()
15103
+ if self.display_name is not None:
15104
+ result['displayName'] = self.display_name
15105
+ if self.offset is not None:
15106
+ result['offset'] = self.offset
15107
+ if self.processor_name is not None:
15108
+ result['processorName'] = self.processor_name
15109
+ if self.size is not None:
15110
+ result['size'] = self.size
15111
+ return result
15112
+
15113
+ def from_map(self, m: dict = None):
15114
+ m = m or dict()
15115
+ if m.get('displayName') is not None:
15116
+ self.display_name = m.get('displayName')
15117
+ if m.get('offset') is not None:
15118
+ self.offset = m.get('offset')
15119
+ if m.get('processorName') is not None:
15120
+ self.processor_name = m.get('processorName')
15121
+ if m.get('size') is not None:
15122
+ self.size = m.get('size')
15123
+ return self
15124
+
15125
+
15126
+ class ListConsumeProcessorsResponseBody(TeaModel):
15127
+ def __init__(
15128
+ self,
15129
+ count: int = None,
15130
+ processors: List[ConsumeProcessor] = None,
15131
+ total: int = None,
15132
+ ):
15133
+ # The number of consumption processors for offset.
15134
+ self.count = count
15135
+ # The list of consumption processor information.
15136
+ self.processors = processors
15137
+ # The total number of consumption processors that meet the query conditions.
15138
+ self.total = total
15139
+
15140
+ def validate(self):
15141
+ if self.processors:
15142
+ for k in self.processors:
15143
+ if k:
15144
+ k.validate()
15145
+
15146
+ def to_map(self):
15147
+ _map = super().to_map()
15148
+ if _map is not None:
15149
+ return _map
15150
+
15151
+ result = dict()
15152
+ if self.count is not None:
15153
+ result['count'] = self.count
15154
+ result['processors'] = []
15155
+ if self.processors is not None:
15156
+ for k in self.processors:
15157
+ result['processors'].append(k.to_map() if k else None)
15158
+ if self.total is not None:
15159
+ result['total'] = self.total
15160
+ return result
15161
+
15162
+ def from_map(self, m: dict = None):
15163
+ m = m or dict()
15164
+ if m.get('count') is not None:
15165
+ self.count = m.get('count')
15166
+ self.processors = []
15167
+ if m.get('processors') is not None:
15168
+ for k in m.get('processors'):
15169
+ temp_model = ConsumeProcessor()
15170
+ self.processors.append(temp_model.from_map(k))
15171
+ if m.get('total') is not None:
15172
+ self.total = m.get('total')
15173
+ return self
15174
+
15175
+
15176
+ class ListConsumeProcessorsResponse(TeaModel):
15177
+ def __init__(
15178
+ self,
15179
+ headers: Dict[str, str] = None,
15180
+ status_code: int = None,
15181
+ body: ListConsumeProcessorsResponseBody = None,
15182
+ ):
15183
+ self.headers = headers
15184
+ self.status_code = status_code
15185
+ self.body = body
15186
+
15187
+ def validate(self):
15188
+ if self.body:
15189
+ self.body.validate()
15190
+
15191
+ def to_map(self):
15192
+ _map = super().to_map()
15193
+ if _map is not None:
15194
+ return _map
15195
+
15196
+ result = dict()
15197
+ if self.headers is not None:
15198
+ result['headers'] = self.headers
15199
+ if self.status_code is not None:
15200
+ result['statusCode'] = self.status_code
15201
+ if self.body is not None:
15202
+ result['body'] = self.body.to_map()
15203
+ return result
15204
+
15205
+ def from_map(self, m: dict = None):
15206
+ m = m or dict()
15207
+ if m.get('headers') is not None:
15208
+ self.headers = m.get('headers')
15209
+ if m.get('statusCode') is not None:
15210
+ self.status_code = m.get('statusCode')
15211
+ if m.get('body') is not None:
15212
+ temp_model = ListConsumeProcessorsResponseBody()
15213
+ self.body = temp_model.from_map(m['body'])
15214
+ return self
15215
+
15216
+
14928
15217
  class ListConsumerGroupResponse(TeaModel):
14929
15218
  def __init__(
14930
15219
  self,
@@ -16196,12 +16485,14 @@ class ListLogtailPipelineConfigRequest(TeaModel):
16196
16485
  def __init__(
16197
16486
  self,
16198
16487
  config_name: str = None,
16488
+ config_type: str = None,
16199
16489
  logstore_name: str = None,
16200
16490
  offset: int = None,
16201
16491
  size: int = None,
16202
16492
  ):
16203
16493
  # The name of the Logtail pipeline configuration.
16204
16494
  self.config_name = config_name
16495
+ self.config_type = config_type
16205
16496
  # The name of the Logstore.
16206
16497
  self.logstore_name = logstore_name
16207
16498
  # The line from which the query starts.
@@ -16220,6 +16511,8 @@ class ListLogtailPipelineConfigRequest(TeaModel):
16220
16511
  result = dict()
16221
16512
  if self.config_name is not None:
16222
16513
  result['configName'] = self.config_name
16514
+ if self.config_type is not None:
16515
+ result['configType'] = self.config_type
16223
16516
  if self.logstore_name is not None:
16224
16517
  result['logstoreName'] = self.logstore_name
16225
16518
  if self.offset is not None:
@@ -16232,6 +16525,8 @@ class ListLogtailPipelineConfigRequest(TeaModel):
16232
16525
  m = m or dict()
16233
16526
  if m.get('configName') is not None:
16234
16527
  self.config_name = m.get('configName')
16528
+ if m.get('configType') is not None:
16529
+ self.config_type = m.get('configType')
16235
16530
  if m.get('logstoreName') is not None:
16236
16531
  self.logstore_name = m.get('logstoreName')
16237
16532
  if m.get('offset') is not None:
@@ -18379,6 +18674,87 @@ class PutAnnotationDataResponse(TeaModel):
18379
18674
  return self
18380
18675
 
18381
18676
 
18677
+ class PutConsumeProcessorRequest(TeaModel):
18678
+ def __init__(
18679
+ self,
18680
+ configuration: ConsumeProcessorConfiguration = None,
18681
+ description: str = None,
18682
+ display_name: str = None,
18683
+ ):
18684
+ # Consumption processor configuration.
18685
+ #
18686
+ # This parameter is required.
18687
+ self.configuration = configuration
18688
+ # The description.
18689
+ self.description = description
18690
+ # The display name.
18691
+ #
18692
+ # This parameter is required.
18693
+ self.display_name = display_name
18694
+
18695
+ def validate(self):
18696
+ if self.configuration:
18697
+ self.configuration.validate()
18698
+
18699
+ def to_map(self):
18700
+ _map = super().to_map()
18701
+ if _map is not None:
18702
+ return _map
18703
+
18704
+ result = dict()
18705
+ if self.configuration is not None:
18706
+ result['configuration'] = self.configuration.to_map()
18707
+ if self.description is not None:
18708
+ result['description'] = self.description
18709
+ if self.display_name is not None:
18710
+ result['displayName'] = self.display_name
18711
+ return result
18712
+
18713
+ def from_map(self, m: dict = None):
18714
+ m = m or dict()
18715
+ if m.get('configuration') is not None:
18716
+ temp_model = ConsumeProcessorConfiguration()
18717
+ self.configuration = temp_model.from_map(m['configuration'])
18718
+ if m.get('description') is not None:
18719
+ self.description = m.get('description')
18720
+ if m.get('displayName') is not None:
18721
+ self.display_name = m.get('displayName')
18722
+ return self
18723
+
18724
+
18725
+ class PutConsumeProcessorResponse(TeaModel):
18726
+ def __init__(
18727
+ self,
18728
+ headers: Dict[str, str] = None,
18729
+ status_code: int = None,
18730
+ ):
18731
+ self.headers = headers
18732
+ self.status_code = status_code
18733
+
18734
+ def validate(self):
18735
+ pass
18736
+
18737
+ def to_map(self):
18738
+ _map = super().to_map()
18739
+ if _map is not None:
18740
+ return _map
18741
+
18742
+ result = dict()
18743
+ if self.headers is not None:
18744
+ result['headers'] = self.headers
18745
+ if self.status_code is not None:
18746
+ result['statusCode'] = self.status_code
18747
+ return result
18748
+
18749
+ def from_map(self, m: dict = None):
18750
+ m = m or dict()
18751
+ if m.get('headers') is not None:
18752
+ self.headers = m.get('headers')
18753
+ if m.get('statusCode') is not None:
18754
+ self.status_code = m.get('statusCode')
18755
+ return self
18756
+
18757
+
18382
18758
  class PutIngestProcessorRequest(TeaModel):
18383
18759
  def __init__(
18384
18760
  self,
@@ -18625,6 +19001,8 @@ class PutProjectTransferAccelerationRequest(TeaModel):
18625
19001
  self,
18626
19002
  enabled: bool = None,
18627
19003
  ):
19004
+ # Whether to enable transfer acceleration.
19005
+ #
18628
19006
  # This parameter is required.
18629
19007
  self.enabled = enabled
18630
19008
 
@@ -20825,6 +21203,7 @@ class UpdateLogtailPipelineConfigRequest(TeaModel):
20825
21203
  inputs: List[Dict[str, Any]] = None,
20826
21204
  log_sample: str = None,
20827
21205
  processors: List[Dict[str, Any]] = None,
21206
+ task: Dict[str, Any] = None,
20828
21207
  ):
20829
21208
  # The aggregation plug-ins.
20830
21209
  #
@@ -20867,6 +21246,7 @@ class UpdateLogtailPipelineConfigRequest(TeaModel):
20867
21246
  # * You must add one of the following Logtail plug-ins for data processing as the first plug-in: Data Parsing (Regex Mode), Data Parsing (Delimiter Mode), Data Parsing (JSON Mode), Data Parsing (NGINX Mode), Data Parsing (Apache Mode), and Data Parsing (IIS Mode).
20868
21247
  # * After you add the first plug-in, you can add one Time Parsing plug-in, one Data Filtering plug-in, and multiple Data Masking plug-ins.
20869
21248
  self.processors = processors
21249
+ self.task = task
20870
21250
 
20871
21251
  def validate(self):
20872
21252
  pass
@@ -20891,6 +21271,8 @@ class UpdateLogtailPipelineConfigRequest(TeaModel):
20891
21271
  result['logSample'] = self.log_sample
20892
21272
  if self.processors is not None:
20893
21273
  result['processors'] = self.processors
21274
+ if self.task is not None:
21275
+ result['task'] = self.task
20894
21276
  return result
20895
21277
 
20896
21278
  def from_map(self, m: dict = None):
@@ -20909,6 +21291,8 @@ class UpdateLogtailPipelineConfigRequest(TeaModel):
20909
21291
  self.log_sample = m.get('logSample')
20910
21292
  if m.get('processors') is not None:
20911
21293
  self.processors = m.get('processors')
21294
+ if m.get('task') is not None:
21295
+ self.task = m.get('task')
20912
21296
  return self
20913
21297
 
20914
21298
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud-sls20201230
3
- Version: 5.7.4
3
+ Version: 5.8.0
4
4
  Summary: Alibaba Cloud Log Service (20201230) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -1,4 +1,4 @@
1
1
  alibabacloud_gateway_sls<1.0.0,>=0.3.0
2
2
  alibabacloud_openapi_util<1.0.0,>=0.2.2
3
- alibabacloud_tea_openapi<1.0.0,>=0.3.15
3
+ alibabacloud_tea_openapi<1.0.0,>=0.3.16
4
4
  alibabacloud_tea_util<1.0.0,>=0.3.13
@@ -24,7 +24,7 @@ from setuptools import setup, find_packages
24
24
  """
25
25
  setup module for alibabacloud_sls20201230.
26
26
 
27
- Created on 10/06/2025
27
+ Created on 10/09/2025
28
28
 
29
29
  @author: Alibaba Cloud SDK
30
30
  """
@@ -39,7 +39,7 @@ VERSION = __import__(PACKAGE).__version__
39
39
  REQUIRES = [
40
40
  "alibabacloud_tea_util>=0.3.13, <1.0.0",
41
41
  "alibabacloud_gateway_sls>=0.3.0, <1.0.0",
42
- "alibabacloud_tea_openapi>=0.3.15, <1.0.0",
42
+ "alibabacloud_tea_openapi>=0.3.16, <1.0.0",
43
43
  "alibabacloud_openapi_util>=0.2.2, <1.0.0"
44
44
  ]
45
45
 
@@ -1 +0,0 @@
1
- __version__ = '5.7.4'