yandexcloud 0.311.0__py3-none-any.whl → 0.313.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of yandexcloud might be problematic. Click here for more details.
- yandex/cloud/ai/translate/v2/translation_service_pb2_grpc.py +6 -0
- yandex/cloud/ai/translate/v2/translation_service_pb2_grpc.pyi +12 -3
- yandex/cloud/audittrails/v1/trail_pb2.py +57 -37
- yandex/cloud/audittrails/v1/trail_pb2.pyi +115 -4
- yandex/cloud/audittrails/v1/trail_service_pb2.py +25 -23
- yandex/cloud/audittrails/v1/trail_service_pb2.pyi +22 -6
- yandex/cloud/backup/v1/resource_pb2.py +9 -9
- yandex/cloud/backup/v1/resource_pb2.pyi +4 -0
- yandex/cloud/compute/v1/disk_pb2.py +12 -11
- yandex/cloud/compute/v1/disk_pb2.pyi +11 -2
- yandex/cloud/compute/v1/disk_service_pb2.py +42 -41
- yandex/cloud/compute/v1/disk_service_pb2.pyi +11 -2
- yandex/cloud/compute/v1/hardware_generation_pb2.py +32 -0
- yandex/cloud/compute/v1/hardware_generation_pb2.pyi +96 -0
- yandex/cloud/compute/v1/hardware_generation_pb2_grpc.py +4 -0
- yandex/cloud/compute/v1/hardware_generation_pb2_grpc.pyi +17 -0
- yandex/cloud/compute/v1/image_pb2.py +12 -11
- yandex/cloud/compute/v1/image_pb2.pyi +11 -2
- yandex/cloud/compute/v1/image_service_pb2.py +32 -31
- yandex/cloud/compute/v1/image_service_pb2.pyi +11 -2
- yandex/cloud/compute/v1/instance_pb2.py +54 -53
- yandex/cloud/compute/v1/instance_pb2.pyi +9 -2
- yandex/cloud/compute/v1/snapshot_pb2.py +8 -7
- yandex/cloud/compute/v1/snapshot_pb2.pyi +11 -2
- yandex/cloud/compute/v1/snapshot_service_pb2.py +30 -29
- yandex/cloud/compute/v1/snapshot_service_pb2.pyi +11 -1
- yandex/cloud/iam/v1/service_account_pb2.py +4 -4
- yandex/cloud/iam/v1/service_account_pb2.pyi +8 -2
- yandex/cloud/iam/v1/workload/__init__.py +0 -0
- yandex/cloud/iam/v1/workload/federated_credential_pb2.py +27 -0
- yandex/cloud/iam/v1/workload/federated_credential_pb2.pyi +49 -0
- yandex/cloud/iam/v1/workload/federated_credential_pb2_grpc.py +4 -0
- yandex/cloud/iam/v1/workload/federated_credential_pb2_grpc.pyi +17 -0
- yandex/cloud/iam/v1/workload/federated_credential_service_pb2.py +69 -0
- yandex/cloud/iam/v1/workload/federated_credential_service_pb2.pyi +170 -0
- yandex/cloud/iam/v1/workload/federated_credential_service_pb2_grpc.py +176 -0
- yandex/cloud/iam/v1/workload/federated_credential_service_pb2_grpc.pyi +121 -0
- yandex/cloud/iam/v1/workload/oidc/__init__.py +0 -0
- yandex/cloud/iam/v1/workload/oidc/federation_pb2.py +31 -0
- yandex/cloud/iam/v1/workload/oidc/federation_pb2.pyi +95 -0
- yandex/cloud/iam/v1/workload/oidc/federation_pb2_grpc.py +4 -0
- yandex/cloud/iam/v1/workload/oidc/federation_pb2_grpc.pyi +17 -0
- yandex/cloud/iam/v1/workload/oidc/federation_service_pb2.py +100 -0
- yandex/cloud/iam/v1/workload/oidc/federation_service_pb2.pyi +304 -0
- yandex/cloud/iam/v1/workload/oidc/federation_service_pb2_grpc.py +210 -0
- yandex/cloud/iam/v1/workload/oidc/federation_service_pb2_grpc.pyi +141 -0
- yandex/cloud/mdb/clickhouse/v1/config/clickhouse_pb2.py +92 -60
- yandex/cloud/mdb/clickhouse/v1/config/clickhouse_pb2.pyi +304 -10
- yandex/cloud/mdb/clickhouse/v1/user_pb2.py +34 -26
- yandex/cloud/mdb/clickhouse/v1/user_pb2.pyi +84 -2
- yandex/cloud/serverless/containers/v1/container_pb2.py +44 -26
- yandex/cloud/serverless/containers/v1/container_pb2.pyi +99 -2
- yandex/cloud/serverless/containers/v1/container_service_pb2.py +18 -18
- yandex/cloud/serverless/containers/v1/container_service_pb2.pyi +8 -2
- yandex/cloud/serverless/functions/v1/function_pb2.py +46 -28
- yandex/cloud/serverless/functions/v1/function_pb2.pyi +98 -1
- yandex/cloud/serverless/functions/v1/function_service_pb2.py +36 -36
- yandex/cloud/serverless/functions/v1/function_service_pb2.pyi +7 -1
- {yandexcloud-0.311.0.dist-info → yandexcloud-0.313.0.dist-info}/METADATA +1 -1
- {yandexcloud-0.311.0.dist-info → yandexcloud-0.313.0.dist-info}/RECORD +64 -42
- {yandexcloud-0.311.0.dist-info → yandexcloud-0.313.0.dist-info}/AUTHORS +0 -0
- {yandexcloud-0.311.0.dist-info → yandexcloud-0.313.0.dist-info}/LICENSE +0 -0
- {yandexcloud-0.311.0.dist-info → yandexcloud-0.313.0.dist-info}/WHEEL +0 -0
- {yandexcloud-0.311.0.dist-info → yandexcloud-0.313.0.dist-info}/top_level.txt +0 -0
|
@@ -81,6 +81,10 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
81
81
|
MIN_AGE_TO_FORCE_MERGE_SECONDS_FIELD_NUMBER: builtins.int
|
|
82
82
|
MIN_AGE_TO_FORCE_MERGE_ON_PARTITION_ONLY_FIELD_NUMBER: builtins.int
|
|
83
83
|
MERGE_SELECTING_SLEEP_MS_FIELD_NUMBER: builtins.int
|
|
84
|
+
MERGE_MAX_BLOCK_SIZE_FIELD_NUMBER: builtins.int
|
|
85
|
+
CHECK_SAMPLE_COLUMN_IS_CORRECT_FIELD_NUMBER: builtins.int
|
|
86
|
+
MAX_MERGE_SELECTING_SLEEP_MS_FIELD_NUMBER: builtins.int
|
|
87
|
+
MAX_CLEANUP_DELAY_PERIOD_FIELD_NUMBER: builtins.int
|
|
84
88
|
@property
|
|
85
89
|
def replicated_deduplication_window(self) -> google.protobuf.wrappers_pb2.Int64Value:
|
|
86
90
|
"""Number of blocks of hashes to keep in ZooKeeper."""
|
|
@@ -186,6 +190,36 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
186
190
|
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#merge_selecting_sleep_ms)
|
|
187
191
|
"""
|
|
188
192
|
|
|
193
|
+
@property
|
|
194
|
+
def merge_max_block_size(self) -> google.protobuf.wrappers_pb2.Int64Value:
|
|
195
|
+
"""The number of rows that are read from the merged parts into memory.
|
|
196
|
+
Default: 8192
|
|
197
|
+
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#merge_max_block_size)
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
@property
|
|
201
|
+
def check_sample_column_is_correct(self) -> google.protobuf.wrappers_pb2.BoolValue:
|
|
202
|
+
"""Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned [integer types](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint): UInt8, UInt16, UInt32, UInt64.
|
|
203
|
+
Default: true
|
|
204
|
+
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#check_sample_column_is_correct)
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
@property
|
|
208
|
+
def max_merge_selecting_sleep_ms(self) -> google.protobuf.wrappers_pb2.Int64Value:
|
|
209
|
+
"""Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters.
|
|
210
|
+
Default: 60000
|
|
211
|
+
Min_version: 23.6
|
|
212
|
+
See in-depth description in [ClickHouse GitHub](https://github.com/ClickHouse/ClickHouse/blob/4add9db84859bff7410cf934a3904b0414e36e51/src/Storages/MergeTree/MergeTreeSettings.h#L71)
|
|
213
|
+
"""
|
|
214
|
+
|
|
215
|
+
@property
|
|
216
|
+
def max_cleanup_delay_period(self) -> google.protobuf.wrappers_pb2.Int64Value:
|
|
217
|
+
"""Maximum period to clean old queue logs, blocks hashes and parts.
|
|
218
|
+
Default: 300
|
|
219
|
+
Min_version: 23.6
|
|
220
|
+
See in-depth description in [ClickHouse GitHub](https://github.com/ClickHouse/ClickHouse/blob/4add9db84859bff7410cf934a3904b0414e36e51/src/Storages/MergeTree/MergeTreeSettings.h#L142)
|
|
221
|
+
"""
|
|
222
|
+
|
|
189
223
|
def __init__(
|
|
190
224
|
self,
|
|
191
225
|
*,
|
|
@@ -213,9 +247,13 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
213
247
|
min_age_to_force_merge_seconds: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
214
248
|
min_age_to_force_merge_on_partition_only: google.protobuf.wrappers_pb2.BoolValue | None = ...,
|
|
215
249
|
merge_selecting_sleep_ms: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
250
|
+
merge_max_block_size: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
251
|
+
check_sample_column_is_correct: google.protobuf.wrappers_pb2.BoolValue | None = ...,
|
|
252
|
+
max_merge_selecting_sleep_ms: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
253
|
+
max_cleanup_delay_period: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
216
254
|
) -> None: ...
|
|
217
|
-
def HasField(self, field_name: typing.Literal["allow_remote_fs_zero_copy_replication", b"allow_remote_fs_zero_copy_replication", "cleanup_delay_period", b"cleanup_delay_period", "inactive_parts_to_delay_insert", b"inactive_parts_to_delay_insert", "inactive_parts_to_throw_insert", b"inactive_parts_to_throw_insert", "max_avg_part_size_for_too_many_parts", b"max_avg_part_size_for_too_many_parts", "max_bytes_to_merge_at_max_space_in_pool", b"max_bytes_to_merge_at_max_space_in_pool", "max_bytes_to_merge_at_min_space_in_pool", b"max_bytes_to_merge_at_min_space_in_pool", "max_number_of_merges_with_ttl_in_pool", b"max_number_of_merges_with_ttl_in_pool", "max_parts_in_total", b"max_parts_in_total", "max_replicated_merges_in_queue", b"max_replicated_merges_in_queue", "merge_selecting_sleep_ms", b"merge_selecting_sleep_ms", "merge_with_recompression_ttl_timeout", b"merge_with_recompression_ttl_timeout", "merge_with_ttl_timeout", b"merge_with_ttl_timeout", "min_age_to_force_merge_on_partition_only", b"min_age_to_force_merge_on_partition_only", "min_age_to_force_merge_seconds", b"min_age_to_force_merge_seconds", "min_bytes_for_wide_part", b"min_bytes_for_wide_part", "min_rows_for_wide_part", b"min_rows_for_wide_part", "number_of_free_entries_in_pool_to_execute_mutation", b"number_of_free_entries_in_pool_to_execute_mutation", "number_of_free_entries_in_pool_to_lower_max_size_of_merge", b"number_of_free_entries_in_pool_to_lower_max_size_of_merge", "parts_to_delay_insert", b"parts_to_delay_insert", "parts_to_throw_insert", b"parts_to_throw_insert", "replicated_deduplication_window", b"replicated_deduplication_window", "replicated_deduplication_window_seconds", b"replicated_deduplication_window_seconds", "ttl_only_drop_parts", b"ttl_only_drop_parts"]) -> builtins.bool: ...
|
|
218
|
-
def ClearField(self, field_name: typing.Literal["allow_remote_fs_zero_copy_replication", b"allow_remote_fs_zero_copy_replication", "cleanup_delay_period", b"cleanup_delay_period", "inactive_parts_to_delay_insert", b"inactive_parts_to_delay_insert", "inactive_parts_to_throw_insert", b"inactive_parts_to_throw_insert", "max_avg_part_size_for_too_many_parts", b"max_avg_part_size_for_too_many_parts", "max_bytes_to_merge_at_max_space_in_pool", b"max_bytes_to_merge_at_max_space_in_pool", "max_bytes_to_merge_at_min_space_in_pool", b"max_bytes_to_merge_at_min_space_in_pool", "max_number_of_merges_with_ttl_in_pool", b"max_number_of_merges_with_ttl_in_pool", "max_parts_in_total", b"max_parts_in_total", "max_replicated_merges_in_queue", b"max_replicated_merges_in_queue", "merge_selecting_sleep_ms", b"merge_selecting_sleep_ms", "merge_with_recompression_ttl_timeout", b"merge_with_recompression_ttl_timeout", "merge_with_ttl_timeout", b"merge_with_ttl_timeout", "min_age_to_force_merge_on_partition_only", b"min_age_to_force_merge_on_partition_only", "min_age_to_force_merge_seconds", b"min_age_to_force_merge_seconds", "min_bytes_for_wide_part", b"min_bytes_for_wide_part", "min_rows_for_wide_part", b"min_rows_for_wide_part", "number_of_free_entries_in_pool_to_execute_mutation", b"number_of_free_entries_in_pool_to_execute_mutation", "number_of_free_entries_in_pool_to_lower_max_size_of_merge", b"number_of_free_entries_in_pool_to_lower_max_size_of_merge", "parts_to_delay_insert", b"parts_to_delay_insert", "parts_to_throw_insert", b"parts_to_throw_insert", "replicated_deduplication_window", b"replicated_deduplication_window", "replicated_deduplication_window_seconds", b"replicated_deduplication_window_seconds", "ttl_only_drop_parts", b"ttl_only_drop_parts"]) -> None: ...
|
|
255
|
+
def HasField(self, field_name: typing.Literal["allow_remote_fs_zero_copy_replication", b"allow_remote_fs_zero_copy_replication", "check_sample_column_is_correct", b"check_sample_column_is_correct", "cleanup_delay_period", b"cleanup_delay_period", "inactive_parts_to_delay_insert", b"inactive_parts_to_delay_insert", "inactive_parts_to_throw_insert", b"inactive_parts_to_throw_insert", "max_avg_part_size_for_too_many_parts", b"max_avg_part_size_for_too_many_parts", "max_bytes_to_merge_at_max_space_in_pool", b"max_bytes_to_merge_at_max_space_in_pool", "max_bytes_to_merge_at_min_space_in_pool", b"max_bytes_to_merge_at_min_space_in_pool", "max_cleanup_delay_period", b"max_cleanup_delay_period", "max_merge_selecting_sleep_ms", b"max_merge_selecting_sleep_ms", "max_number_of_merges_with_ttl_in_pool", b"max_number_of_merges_with_ttl_in_pool", "max_parts_in_total", b"max_parts_in_total", "max_replicated_merges_in_queue", b"max_replicated_merges_in_queue", "merge_max_block_size", b"merge_max_block_size", "merge_selecting_sleep_ms", b"merge_selecting_sleep_ms", "merge_with_recompression_ttl_timeout", b"merge_with_recompression_ttl_timeout", "merge_with_ttl_timeout", b"merge_with_ttl_timeout", "min_age_to_force_merge_on_partition_only", b"min_age_to_force_merge_on_partition_only", "min_age_to_force_merge_seconds", b"min_age_to_force_merge_seconds", "min_bytes_for_wide_part", b"min_bytes_for_wide_part", "min_rows_for_wide_part", b"min_rows_for_wide_part", "number_of_free_entries_in_pool_to_execute_mutation", b"number_of_free_entries_in_pool_to_execute_mutation", "number_of_free_entries_in_pool_to_lower_max_size_of_merge", b"number_of_free_entries_in_pool_to_lower_max_size_of_merge", "parts_to_delay_insert", b"parts_to_delay_insert", "parts_to_throw_insert", b"parts_to_throw_insert", "replicated_deduplication_window", b"replicated_deduplication_window", "replicated_deduplication_window_seconds", b"replicated_deduplication_window_seconds", "ttl_only_drop_parts", b"ttl_only_drop_parts"]) -> builtins.bool: ...
|
|
256
|
+
def ClearField(self, field_name: typing.Literal["allow_remote_fs_zero_copy_replication", b"allow_remote_fs_zero_copy_replication", "check_sample_column_is_correct", b"check_sample_column_is_correct", "cleanup_delay_period", b"cleanup_delay_period", "inactive_parts_to_delay_insert", b"inactive_parts_to_delay_insert", "inactive_parts_to_throw_insert", b"inactive_parts_to_throw_insert", "max_avg_part_size_for_too_many_parts", b"max_avg_part_size_for_too_many_parts", "max_bytes_to_merge_at_max_space_in_pool", b"max_bytes_to_merge_at_max_space_in_pool", "max_bytes_to_merge_at_min_space_in_pool", b"max_bytes_to_merge_at_min_space_in_pool", "max_cleanup_delay_period", b"max_cleanup_delay_period", "max_merge_selecting_sleep_ms", b"max_merge_selecting_sleep_ms", "max_number_of_merges_with_ttl_in_pool", b"max_number_of_merges_with_ttl_in_pool", "max_parts_in_total", b"max_parts_in_total", "max_replicated_merges_in_queue", b"max_replicated_merges_in_queue", "merge_max_block_size", b"merge_max_block_size", "merge_selecting_sleep_ms", b"merge_selecting_sleep_ms", "merge_with_recompression_ttl_timeout", b"merge_with_recompression_ttl_timeout", "merge_with_ttl_timeout", b"merge_with_ttl_timeout", "min_age_to_force_merge_on_partition_only", b"min_age_to_force_merge_on_partition_only", "min_age_to_force_merge_seconds", b"min_age_to_force_merge_seconds", "min_bytes_for_wide_part", b"min_bytes_for_wide_part", "min_rows_for_wide_part", b"min_rows_for_wide_part", "number_of_free_entries_in_pool_to_execute_mutation", b"number_of_free_entries_in_pool_to_execute_mutation", "number_of_free_entries_in_pool_to_lower_max_size_of_merge", b"number_of_free_entries_in_pool_to_lower_max_size_of_merge", "parts_to_delay_insert", b"parts_to_delay_insert", "parts_to_throw_insert", b"parts_to_throw_insert", "replicated_deduplication_window", b"replicated_deduplication_window", "replicated_deduplication_window_seconds", b"replicated_deduplication_window_seconds", "ttl_only_drop_parts", b"ttl_only_drop_parts"]) -> None: ...
|
|
219
257
|
|
|
220
258
|
@typing.final
|
|
221
259
|
class Kafka(google.protobuf.message.Message):
|
|
@@ -259,6 +297,84 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
259
297
|
SASL_MECHANISM_SCRAM_SHA_256: ClickhouseConfig.Kafka.SaslMechanism.ValueType # 3
|
|
260
298
|
SASL_MECHANISM_SCRAM_SHA_512: ClickhouseConfig.Kafka.SaslMechanism.ValueType # 4
|
|
261
299
|
|
|
300
|
+
class _Debug:
|
|
301
|
+
ValueType = typing.NewType("ValueType", builtins.int)
|
|
302
|
+
V: typing_extensions.TypeAlias = ValueType
|
|
303
|
+
|
|
304
|
+
class _DebugEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ClickhouseConfig.Kafka._Debug.ValueType], builtins.type):
|
|
305
|
+
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
|
306
|
+
DEBUG_UNSPECIFIED: ClickhouseConfig.Kafka._Debug.ValueType # 0
|
|
307
|
+
DEBUG_GENERIC: ClickhouseConfig.Kafka._Debug.ValueType # 1
|
|
308
|
+
DEBUG_BROKER: ClickhouseConfig.Kafka._Debug.ValueType # 2
|
|
309
|
+
DEBUG_TOPIC: ClickhouseConfig.Kafka._Debug.ValueType # 3
|
|
310
|
+
DEBUG_METADATA: ClickhouseConfig.Kafka._Debug.ValueType # 4
|
|
311
|
+
DEBUG_FEATURE: ClickhouseConfig.Kafka._Debug.ValueType # 5
|
|
312
|
+
DEBUG_QUEUE: ClickhouseConfig.Kafka._Debug.ValueType # 6
|
|
313
|
+
DEBUG_MSG: ClickhouseConfig.Kafka._Debug.ValueType # 7
|
|
314
|
+
DEBUG_PROTOCOL: ClickhouseConfig.Kafka._Debug.ValueType # 8
|
|
315
|
+
DEBUG_CGRP: ClickhouseConfig.Kafka._Debug.ValueType # 9
|
|
316
|
+
DEBUG_SECURITY: ClickhouseConfig.Kafka._Debug.ValueType # 10
|
|
317
|
+
DEBUG_FETCH: ClickhouseConfig.Kafka._Debug.ValueType # 11
|
|
318
|
+
DEBUG_INTERCEPTOR: ClickhouseConfig.Kafka._Debug.ValueType # 12
|
|
319
|
+
DEBUG_PLUGIN: ClickhouseConfig.Kafka._Debug.ValueType # 13
|
|
320
|
+
DEBUG_CONSUMER: ClickhouseConfig.Kafka._Debug.ValueType # 14
|
|
321
|
+
DEBUG_ADMIN: ClickhouseConfig.Kafka._Debug.ValueType # 15
|
|
322
|
+
DEBUG_EOS: ClickhouseConfig.Kafka._Debug.ValueType # 16
|
|
323
|
+
DEBUG_MOCK: ClickhouseConfig.Kafka._Debug.ValueType # 17
|
|
324
|
+
DEBUG_ASSIGNOR: ClickhouseConfig.Kafka._Debug.ValueType # 18
|
|
325
|
+
DEBUG_CONF: ClickhouseConfig.Kafka._Debug.ValueType # 19
|
|
326
|
+
DEBUG_TELEMETRY: ClickhouseConfig.Kafka._Debug.ValueType # 20
|
|
327
|
+
DEBUG_ALL: ClickhouseConfig.Kafka._Debug.ValueType # 21
|
|
328
|
+
|
|
329
|
+
class Debug(_Debug, metaclass=_DebugEnumTypeWrapper): ...
|
|
330
|
+
DEBUG_UNSPECIFIED: ClickhouseConfig.Kafka.Debug.ValueType # 0
|
|
331
|
+
DEBUG_GENERIC: ClickhouseConfig.Kafka.Debug.ValueType # 1
|
|
332
|
+
DEBUG_BROKER: ClickhouseConfig.Kafka.Debug.ValueType # 2
|
|
333
|
+
DEBUG_TOPIC: ClickhouseConfig.Kafka.Debug.ValueType # 3
|
|
334
|
+
DEBUG_METADATA: ClickhouseConfig.Kafka.Debug.ValueType # 4
|
|
335
|
+
DEBUG_FEATURE: ClickhouseConfig.Kafka.Debug.ValueType # 5
|
|
336
|
+
DEBUG_QUEUE: ClickhouseConfig.Kafka.Debug.ValueType # 6
|
|
337
|
+
DEBUG_MSG: ClickhouseConfig.Kafka.Debug.ValueType # 7
|
|
338
|
+
DEBUG_PROTOCOL: ClickhouseConfig.Kafka.Debug.ValueType # 8
|
|
339
|
+
DEBUG_CGRP: ClickhouseConfig.Kafka.Debug.ValueType # 9
|
|
340
|
+
DEBUG_SECURITY: ClickhouseConfig.Kafka.Debug.ValueType # 10
|
|
341
|
+
DEBUG_FETCH: ClickhouseConfig.Kafka.Debug.ValueType # 11
|
|
342
|
+
DEBUG_INTERCEPTOR: ClickhouseConfig.Kafka.Debug.ValueType # 12
|
|
343
|
+
DEBUG_PLUGIN: ClickhouseConfig.Kafka.Debug.ValueType # 13
|
|
344
|
+
DEBUG_CONSUMER: ClickhouseConfig.Kafka.Debug.ValueType # 14
|
|
345
|
+
DEBUG_ADMIN: ClickhouseConfig.Kafka.Debug.ValueType # 15
|
|
346
|
+
DEBUG_EOS: ClickhouseConfig.Kafka.Debug.ValueType # 16
|
|
347
|
+
DEBUG_MOCK: ClickhouseConfig.Kafka.Debug.ValueType # 17
|
|
348
|
+
DEBUG_ASSIGNOR: ClickhouseConfig.Kafka.Debug.ValueType # 18
|
|
349
|
+
DEBUG_CONF: ClickhouseConfig.Kafka.Debug.ValueType # 19
|
|
350
|
+
DEBUG_TELEMETRY: ClickhouseConfig.Kafka.Debug.ValueType # 20
|
|
351
|
+
DEBUG_ALL: ClickhouseConfig.Kafka.Debug.ValueType # 21
|
|
352
|
+
|
|
353
|
+
class _AutoOffsetReset:
|
|
354
|
+
ValueType = typing.NewType("ValueType", builtins.int)
|
|
355
|
+
V: typing_extensions.TypeAlias = ValueType
|
|
356
|
+
|
|
357
|
+
class _AutoOffsetResetEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ClickhouseConfig.Kafka._AutoOffsetReset.ValueType], builtins.type):
|
|
358
|
+
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
|
359
|
+
AUTO_OFFSET_RESET_UNSPECIFIED: ClickhouseConfig.Kafka._AutoOffsetReset.ValueType # 0
|
|
360
|
+
AUTO_OFFSET_RESET_SMALLEST: ClickhouseConfig.Kafka._AutoOffsetReset.ValueType # 1
|
|
361
|
+
AUTO_OFFSET_RESET_EARLIEST: ClickhouseConfig.Kafka._AutoOffsetReset.ValueType # 2
|
|
362
|
+
AUTO_OFFSET_RESET_BEGINNING: ClickhouseConfig.Kafka._AutoOffsetReset.ValueType # 3
|
|
363
|
+
AUTO_OFFSET_RESET_LARGEST: ClickhouseConfig.Kafka._AutoOffsetReset.ValueType # 4
|
|
364
|
+
AUTO_OFFSET_RESET_LATEST: ClickhouseConfig.Kafka._AutoOffsetReset.ValueType # 5
|
|
365
|
+
AUTO_OFFSET_RESET_END: ClickhouseConfig.Kafka._AutoOffsetReset.ValueType # 6
|
|
366
|
+
AUTO_OFFSET_RESET_ERROR: ClickhouseConfig.Kafka._AutoOffsetReset.ValueType # 7
|
|
367
|
+
|
|
368
|
+
class AutoOffsetReset(_AutoOffsetReset, metaclass=_AutoOffsetResetEnumTypeWrapper): ...
|
|
369
|
+
AUTO_OFFSET_RESET_UNSPECIFIED: ClickhouseConfig.Kafka.AutoOffsetReset.ValueType # 0
|
|
370
|
+
AUTO_OFFSET_RESET_SMALLEST: ClickhouseConfig.Kafka.AutoOffsetReset.ValueType # 1
|
|
371
|
+
AUTO_OFFSET_RESET_EARLIEST: ClickhouseConfig.Kafka.AutoOffsetReset.ValueType # 2
|
|
372
|
+
AUTO_OFFSET_RESET_BEGINNING: ClickhouseConfig.Kafka.AutoOffsetReset.ValueType # 3
|
|
373
|
+
AUTO_OFFSET_RESET_LARGEST: ClickhouseConfig.Kafka.AutoOffsetReset.ValueType # 4
|
|
374
|
+
AUTO_OFFSET_RESET_LATEST: ClickhouseConfig.Kafka.AutoOffsetReset.ValueType # 5
|
|
375
|
+
AUTO_OFFSET_RESET_END: ClickhouseConfig.Kafka.AutoOffsetReset.ValueType # 6
|
|
376
|
+
AUTO_OFFSET_RESET_ERROR: ClickhouseConfig.Kafka.AutoOffsetReset.ValueType # 7
|
|
377
|
+
|
|
262
378
|
SECURITY_PROTOCOL_FIELD_NUMBER: builtins.int
|
|
263
379
|
SASL_MECHANISM_FIELD_NUMBER: builtins.int
|
|
264
380
|
SASL_USERNAME_FIELD_NUMBER: builtins.int
|
|
@@ -266,10 +382,14 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
266
382
|
ENABLE_SSL_CERTIFICATE_VERIFICATION_FIELD_NUMBER: builtins.int
|
|
267
383
|
MAX_POLL_INTERVAL_MS_FIELD_NUMBER: builtins.int
|
|
268
384
|
SESSION_TIMEOUT_MS_FIELD_NUMBER: builtins.int
|
|
385
|
+
DEBUG_FIELD_NUMBER: builtins.int
|
|
386
|
+
AUTO_OFFSET_RESET_FIELD_NUMBER: builtins.int
|
|
269
387
|
security_protocol: global___ClickhouseConfig.Kafka.SecurityProtocol.ValueType
|
|
270
388
|
sasl_mechanism: global___ClickhouseConfig.Kafka.SaslMechanism.ValueType
|
|
271
389
|
sasl_username: builtins.str
|
|
272
390
|
sasl_password: builtins.str
|
|
391
|
+
debug: global___ClickhouseConfig.Kafka.Debug.ValueType
|
|
392
|
+
auto_offset_reset: global___ClickhouseConfig.Kafka.AutoOffsetReset.ValueType
|
|
273
393
|
@property
|
|
274
394
|
def enable_ssl_certificate_verification(self) -> google.protobuf.wrappers_pb2.BoolValue: ...
|
|
275
395
|
@property
|
|
@@ -286,9 +406,11 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
286
406
|
enable_ssl_certificate_verification: google.protobuf.wrappers_pb2.BoolValue | None = ...,
|
|
287
407
|
max_poll_interval_ms: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
288
408
|
session_timeout_ms: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
409
|
+
debug: global___ClickhouseConfig.Kafka.Debug.ValueType = ...,
|
|
410
|
+
auto_offset_reset: global___ClickhouseConfig.Kafka.AutoOffsetReset.ValueType = ...,
|
|
289
411
|
) -> None: ...
|
|
290
412
|
def HasField(self, field_name: typing.Literal["enable_ssl_certificate_verification", b"enable_ssl_certificate_verification", "max_poll_interval_ms", b"max_poll_interval_ms", "session_timeout_ms", b"session_timeout_ms"]) -> builtins.bool: ...
|
|
291
|
-
def ClearField(self, field_name: typing.Literal["enable_ssl_certificate_verification", b"enable_ssl_certificate_verification", "max_poll_interval_ms", b"max_poll_interval_ms", "sasl_mechanism", b"sasl_mechanism", "sasl_password", b"sasl_password", "sasl_username", b"sasl_username", "security_protocol", b"security_protocol", "session_timeout_ms", b"session_timeout_ms"]) -> None: ...
|
|
413
|
+
def ClearField(self, field_name: typing.Literal["auto_offset_reset", b"auto_offset_reset", "debug", b"debug", "enable_ssl_certificate_verification", b"enable_ssl_certificate_verification", "max_poll_interval_ms", b"max_poll_interval_ms", "sasl_mechanism", b"sasl_mechanism", "sasl_password", b"sasl_password", "sasl_username", b"sasl_username", "security_protocol", b"security_protocol", "session_timeout_ms", b"session_timeout_ms"]) -> None: ...
|
|
292
414
|
|
|
293
415
|
@typing.final
|
|
294
416
|
class KafkaTopic(google.protobuf.message.Message):
|
|
@@ -384,19 +506,41 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
384
506
|
class HttpSource(google.protobuf.message.Message):
|
|
385
507
|
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
386
508
|
|
|
509
|
+
@typing.final
|
|
510
|
+
class Header(google.protobuf.message.Message):
|
|
511
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
512
|
+
|
|
513
|
+
NAME_FIELD_NUMBER: builtins.int
|
|
514
|
+
VALUE_FIELD_NUMBER: builtins.int
|
|
515
|
+
name: builtins.str
|
|
516
|
+
value: builtins.str
|
|
517
|
+
def __init__(
|
|
518
|
+
self,
|
|
519
|
+
*,
|
|
520
|
+
name: builtins.str = ...,
|
|
521
|
+
value: builtins.str = ...,
|
|
522
|
+
) -> None: ...
|
|
523
|
+
def ClearField(self, field_name: typing.Literal["name", b"name", "value", b"value"]) -> None: ...
|
|
524
|
+
|
|
387
525
|
URL_FIELD_NUMBER: builtins.int
|
|
388
526
|
FORMAT_FIELD_NUMBER: builtins.int
|
|
527
|
+
HEADERS_FIELD_NUMBER: builtins.int
|
|
389
528
|
url: builtins.str
|
|
390
529
|
"""URL of the source dictionary available over HTTP."""
|
|
391
530
|
format: builtins.str
|
|
392
531
|
"""The data format. Valid values are all formats supported by ClickHouse SQL dialect."""
|
|
532
|
+
@property
|
|
533
|
+
def headers(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ClickhouseConfig.ExternalDictionary.HttpSource.Header]:
|
|
534
|
+
"""HTTP headers."""
|
|
535
|
+
|
|
393
536
|
def __init__(
|
|
394
537
|
self,
|
|
395
538
|
*,
|
|
396
539
|
url: builtins.str = ...,
|
|
397
540
|
format: builtins.str = ...,
|
|
541
|
+
headers: collections.abc.Iterable[global___ClickhouseConfig.ExternalDictionary.HttpSource.Header] | None = ...,
|
|
398
542
|
) -> None: ...
|
|
399
|
-
def ClearField(self, field_name: typing.Literal["format", b"format", "url", b"url"]) -> None: ...
|
|
543
|
+
def ClearField(self, field_name: typing.Literal["format", b"format", "headers", b"headers", "url", b"url"]) -> None: ...
|
|
400
544
|
|
|
401
545
|
@typing.final
|
|
402
546
|
class MysqlSource(google.protobuf.message.Message):
|
|
@@ -444,6 +588,8 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
444
588
|
REPLICAS_FIELD_NUMBER: builtins.int
|
|
445
589
|
WHERE_FIELD_NUMBER: builtins.int
|
|
446
590
|
INVALIDATE_QUERY_FIELD_NUMBER: builtins.int
|
|
591
|
+
CLOSE_CONNECTION_FIELD_NUMBER: builtins.int
|
|
592
|
+
SHARE_CONNECTION_FIELD_NUMBER: builtins.int
|
|
447
593
|
db: builtins.str
|
|
448
594
|
"""Name of the MySQL database to connect to."""
|
|
449
595
|
table: builtins.str
|
|
@@ -464,6 +610,14 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
464
610
|
def replicas(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ClickhouseConfig.ExternalDictionary.MysqlSource.Replica]:
|
|
465
611
|
"""List of MySQL replicas of the database used as dictionary source."""
|
|
466
612
|
|
|
613
|
+
@property
|
|
614
|
+
def close_connection(self) -> google.protobuf.wrappers_pb2.BoolValue:
|
|
615
|
+
"""Should the connection be closed after each request."""
|
|
616
|
+
|
|
617
|
+
@property
|
|
618
|
+
def share_connection(self) -> google.protobuf.wrappers_pb2.BoolValue:
|
|
619
|
+
"""Should a connection be shared for some requests."""
|
|
620
|
+
|
|
467
621
|
def __init__(
|
|
468
622
|
self,
|
|
469
623
|
*,
|
|
@@ -475,8 +629,11 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
475
629
|
replicas: collections.abc.Iterable[global___ClickhouseConfig.ExternalDictionary.MysqlSource.Replica] | None = ...,
|
|
476
630
|
where: builtins.str = ...,
|
|
477
631
|
invalidate_query: builtins.str = ...,
|
|
632
|
+
close_connection: google.protobuf.wrappers_pb2.BoolValue | None = ...,
|
|
633
|
+
share_connection: google.protobuf.wrappers_pb2.BoolValue | None = ...,
|
|
478
634
|
) -> None: ...
|
|
479
|
-
def
|
|
635
|
+
def HasField(self, field_name: typing.Literal["close_connection", b"close_connection", "share_connection", b"share_connection"]) -> builtins.bool: ...
|
|
636
|
+
def ClearField(self, field_name: typing.Literal["close_connection", b"close_connection", "db", b"db", "invalidate_query", b"invalidate_query", "password", b"password", "port", b"port", "replicas", b"replicas", "share_connection", b"share_connection", "table", b"table", "user", b"user", "where", b"where"]) -> None: ...
|
|
480
637
|
|
|
481
638
|
@typing.final
|
|
482
639
|
class ClickhouseSource(google.protobuf.message.Message):
|
|
@@ -489,6 +646,7 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
489
646
|
USER_FIELD_NUMBER: builtins.int
|
|
490
647
|
PASSWORD_FIELD_NUMBER: builtins.int
|
|
491
648
|
WHERE_FIELD_NUMBER: builtins.int
|
|
649
|
+
SECURE_FIELD_NUMBER: builtins.int
|
|
492
650
|
db: builtins.str
|
|
493
651
|
"""Name of the ClickHouse database."""
|
|
494
652
|
table: builtins.str
|
|
@@ -503,6 +661,10 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
503
661
|
"""Password of the ClickHouse database user."""
|
|
504
662
|
where: builtins.str
|
|
505
663
|
"""Selection criteria for the data in the specified ClickHouse table."""
|
|
664
|
+
@property
|
|
665
|
+
def secure(self) -> google.protobuf.wrappers_pb2.BoolValue:
|
|
666
|
+
"""Use ssl for connection."""
|
|
667
|
+
|
|
506
668
|
def __init__(
|
|
507
669
|
self,
|
|
508
670
|
*,
|
|
@@ -513,8 +675,10 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
513
675
|
user: builtins.str = ...,
|
|
514
676
|
password: builtins.str = ...,
|
|
515
677
|
where: builtins.str = ...,
|
|
678
|
+
secure: google.protobuf.wrappers_pb2.BoolValue | None = ...,
|
|
516
679
|
) -> None: ...
|
|
517
|
-
def
|
|
680
|
+
def HasField(self, field_name: typing.Literal["secure", b"secure"]) -> builtins.bool: ...
|
|
681
|
+
def ClearField(self, field_name: typing.Literal["db", b"db", "host", b"host", "password", b"password", "port", b"port", "secure", b"secure", "table", b"table", "user", b"user", "where", b"where"]) -> None: ...
|
|
518
682
|
|
|
519
683
|
@typing.final
|
|
520
684
|
class MongodbSource(google.protobuf.message.Message):
|
|
@@ -822,19 +986,25 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
822
986
|
|
|
823
987
|
TYPE_FIELD_NUMBER: builtins.int
|
|
824
988
|
SIZE_IN_CELLS_FIELD_NUMBER: builtins.int
|
|
989
|
+
MAX_ARRAY_SIZE_FIELD_NUMBER: builtins.int
|
|
825
990
|
type: global___ClickhouseConfig.ExternalDictionary.Layout.Type.ValueType
|
|
826
991
|
"""Layout type for an external dictionary."""
|
|
827
992
|
size_in_cells: builtins.int
|
|
828
993
|
"""Number of cells in the cache. Rounded up to a power of two.
|
|
829
994
|
Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
|
|
830
995
|
"""
|
|
996
|
+
max_array_size: builtins.int
|
|
997
|
+
"""Maximum dictionary key size.
|
|
998
|
+
Applicable only for FLAT layout type.
|
|
999
|
+
"""
|
|
831
1000
|
def __init__(
|
|
832
1001
|
self,
|
|
833
1002
|
*,
|
|
834
1003
|
type: global___ClickhouseConfig.ExternalDictionary.Layout.Type.ValueType = ...,
|
|
835
1004
|
size_in_cells: builtins.int = ...,
|
|
1005
|
+
max_array_size: builtins.int = ...,
|
|
836
1006
|
) -> None: ...
|
|
837
|
-
def ClearField(self, field_name: typing.Literal["size_in_cells", b"size_in_cells", "type", b"type"]) -> None: ...
|
|
1007
|
+
def ClearField(self, field_name: typing.Literal["max_array_size", b"max_array_size", "size_in_cells", b"size_in_cells", "type", b"type"]) -> None: ...
|
|
838
1008
|
|
|
839
1009
|
@typing.final
|
|
840
1010
|
class Range(google.protobuf.message.Message):
|
|
@@ -975,8 +1145,32 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
975
1145
|
|
|
976
1146
|
NAME_FIELD_NUMBER: builtins.int
|
|
977
1147
|
PATTERNS_FIELD_NUMBER: builtins.int
|
|
1148
|
+
PATH_COLUMN_NAME_FIELD_NUMBER: builtins.int
|
|
1149
|
+
TIME_COLUMN_NAME_FIELD_NUMBER: builtins.int
|
|
1150
|
+
VALUE_COLUMN_NAME_FIELD_NUMBER: builtins.int
|
|
1151
|
+
VERSION_COLUMN_NAME_FIELD_NUMBER: builtins.int
|
|
978
1152
|
name: builtins.str
|
|
979
1153
|
"""Name for the specified combination of settings for Graphite rollup."""
|
|
1154
|
+
path_column_name: builtins.str
|
|
1155
|
+
"""The name of the column storing the metric name (Graphite sensor).
|
|
1156
|
+
Default: Path
|
|
1157
|
+
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/ru/engines/table-engines/mergetree-family/graphitemergetree#required-columns)
|
|
1158
|
+
"""
|
|
1159
|
+
time_column_name: builtins.str
|
|
1160
|
+
"""The name of the column storing the time of measuring the metric.
|
|
1161
|
+
Default: Time
|
|
1162
|
+
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/ru/engines/table-engines/mergetree-family/graphitemergetree#required-columns)
|
|
1163
|
+
"""
|
|
1164
|
+
value_column_name: builtins.str
|
|
1165
|
+
"""The name of the column storing the value of the metric at the time set in time_column_name.
|
|
1166
|
+
Default: Value
|
|
1167
|
+
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/ru/engines/table-engines/mergetree-family/graphitemergetree#required-columns)
|
|
1168
|
+
"""
|
|
1169
|
+
version_column_name: builtins.str
|
|
1170
|
+
"""The name of the column storing the version of the metric.
|
|
1171
|
+
Default: Timestamp
|
|
1172
|
+
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/ru/engines/table-engines/mergetree-family/graphitemergetree#required-columns)
|
|
1173
|
+
"""
|
|
980
1174
|
@property
|
|
981
1175
|
def patterns(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ClickhouseConfig.GraphiteRollup.Pattern]:
|
|
982
1176
|
"""Pattern to use for the rollup."""
|
|
@@ -986,8 +1180,81 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
986
1180
|
*,
|
|
987
1181
|
name: builtins.str = ...,
|
|
988
1182
|
patterns: collections.abc.Iterable[global___ClickhouseConfig.GraphiteRollup.Pattern] | None = ...,
|
|
1183
|
+
path_column_name: builtins.str = ...,
|
|
1184
|
+
time_column_name: builtins.str = ...,
|
|
1185
|
+
value_column_name: builtins.str = ...,
|
|
1186
|
+
version_column_name: builtins.str = ...,
|
|
1187
|
+
) -> None: ...
|
|
1188
|
+
def ClearField(self, field_name: typing.Literal["name", b"name", "path_column_name", b"path_column_name", "patterns", b"patterns", "time_column_name", b"time_column_name", "value_column_name", b"value_column_name", "version_column_name", b"version_column_name"]) -> None: ...
|
|
1189
|
+
|
|
1190
|
+
@typing.final
|
|
1191
|
+
class QueryMaskingRule(google.protobuf.message.Message):
|
|
1192
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1193
|
+
|
|
1194
|
+
NAME_FIELD_NUMBER: builtins.int
|
|
1195
|
+
REGEXP_FIELD_NUMBER: builtins.int
|
|
1196
|
+
REPLACE_FIELD_NUMBER: builtins.int
|
|
1197
|
+
name: builtins.str
|
|
1198
|
+
"""Name for the rule."""
|
|
1199
|
+
regexp: builtins.str
|
|
1200
|
+
"""RE2 compatible regular expression.
|
|
1201
|
+
Required.
|
|
1202
|
+
"""
|
|
1203
|
+
replace: builtins.str
|
|
1204
|
+
"""Substitution string for sensitive data.
|
|
1205
|
+
Default: six asterisks
|
|
1206
|
+
"""
|
|
1207
|
+
def __init__(
|
|
1208
|
+
self,
|
|
1209
|
+
*,
|
|
1210
|
+
name: builtins.str = ...,
|
|
1211
|
+
regexp: builtins.str = ...,
|
|
1212
|
+
replace: builtins.str = ...,
|
|
989
1213
|
) -> None: ...
|
|
990
|
-
def ClearField(self, field_name: typing.Literal["name", b"name", "
|
|
1214
|
+
def ClearField(self, field_name: typing.Literal["name", b"name", "regexp", b"regexp", "replace", b"replace"]) -> None: ...
|
|
1215
|
+
|
|
1216
|
+
@typing.final
|
|
1217
|
+
class QueryCache(google.protobuf.message.Message):
|
|
1218
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1219
|
+
|
|
1220
|
+
MAX_SIZE_IN_BYTES_FIELD_NUMBER: builtins.int
|
|
1221
|
+
MAX_ENTRIES_FIELD_NUMBER: builtins.int
|
|
1222
|
+
MAX_ENTRY_SIZE_IN_BYTES_FIELD_NUMBER: builtins.int
|
|
1223
|
+
MAX_ENTRY_SIZE_IN_ROWS_FIELD_NUMBER: builtins.int
|
|
1224
|
+
@property
|
|
1225
|
+
def max_size_in_bytes(self) -> google.protobuf.wrappers_pb2.Int64Value:
|
|
1226
|
+
"""The maximum cache size in bytes.
|
|
1227
|
+
Default: 1073741824 (1 GiB)
|
|
1228
|
+
"""
|
|
1229
|
+
|
|
1230
|
+
@property
|
|
1231
|
+
def max_entries(self) -> google.protobuf.wrappers_pb2.Int64Value:
|
|
1232
|
+
"""The maximum number of SELECT query results stored in the cache.
|
|
1233
|
+
Default: 1024
|
|
1234
|
+
"""
|
|
1235
|
+
|
|
1236
|
+
@property
|
|
1237
|
+
def max_entry_size_in_bytes(self) -> google.protobuf.wrappers_pb2.Int64Value:
|
|
1238
|
+
"""The maximum size in bytes SELECT query results may have to be saved in the cache.
|
|
1239
|
+
Dafault: 1048576 (1 MiB)
|
|
1240
|
+
"""
|
|
1241
|
+
|
|
1242
|
+
@property
|
|
1243
|
+
def max_entry_size_in_rows(self) -> google.protobuf.wrappers_pb2.Int64Value:
|
|
1244
|
+
"""The maximum number of rows SELECT query results may have to be saved in the cache.
|
|
1245
|
+
Default: 30000000 (30 mil)
|
|
1246
|
+
"""
|
|
1247
|
+
|
|
1248
|
+
def __init__(
|
|
1249
|
+
self,
|
|
1250
|
+
*,
|
|
1251
|
+
max_size_in_bytes: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
1252
|
+
max_entries: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
1253
|
+
max_entry_size_in_bytes: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
1254
|
+
max_entry_size_in_rows: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
1255
|
+
) -> None: ...
|
|
1256
|
+
def HasField(self, field_name: typing.Literal["max_entries", b"max_entries", "max_entry_size_in_bytes", b"max_entry_size_in_bytes", "max_entry_size_in_rows", b"max_entry_size_in_rows", "max_size_in_bytes", b"max_size_in_bytes"]) -> builtins.bool: ...
|
|
1257
|
+
def ClearField(self, field_name: typing.Literal["max_entries", b"max_entries", "max_entry_size_in_bytes", b"max_entry_size_in_bytes", "max_entry_size_in_rows", b"max_entry_size_in_rows", "max_size_in_bytes", b"max_size_in_bytes"]) -> None: ...
|
|
991
1258
|
|
|
992
1259
|
LOG_LEVEL_FIELD_NUMBER: builtins.int
|
|
993
1260
|
MERGE_TREE_FIELD_NUMBER: builtins.int
|
|
@@ -1055,6 +1322,9 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
1055
1322
|
DEFAULT_DATABASE_FIELD_NUMBER: builtins.int
|
|
1056
1323
|
TOTAL_MEMORY_PROFILER_STEP_FIELD_NUMBER: builtins.int
|
|
1057
1324
|
TOTAL_MEMORY_TRACKER_SAMPLE_PROBABILITY_FIELD_NUMBER: builtins.int
|
|
1325
|
+
QUERY_MASKING_RULES_FIELD_NUMBER: builtins.int
|
|
1326
|
+
DICTIONARIES_LAZY_LOAD_FIELD_NUMBER: builtins.int
|
|
1327
|
+
QUERY_CACHE_FIELD_NUMBER: builtins.int
|
|
1058
1328
|
log_level: global___ClickhouseConfig.LogLevel.ValueType
|
|
1059
1329
|
"""Logging level for the ClickHouse cluster. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR."""
|
|
1060
1330
|
timezone: builtins.str
|
|
@@ -1364,6 +1634,27 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
1364
1634
|
|
|
1365
1635
|
@property
|
|
1366
1636
|
def total_memory_tracker_sample_probability(self) -> google.protobuf.wrappers_pb2.DoubleValue: ...
|
|
1637
|
+
@property
|
|
1638
|
+
def query_masking_rules(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ClickhouseConfig.QueryMaskingRule]:
|
|
1639
|
+
"""Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs, system.query_log, system.text_log, system.processes tables, and in logs sent to the client. That allows preventing sensitive data leakage from SQL queries (like names, emails, personal identifiers or credit card numbers) to logs.
|
|
1640
|
+
Change of these settings is applied with ClickHouse restart
|
|
1641
|
+
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#query-masking-rules)
|
|
1642
|
+
"""
|
|
1643
|
+
|
|
1644
|
+
@property
|
|
1645
|
+
def dictionaries_lazy_load(self) -> google.protobuf.wrappers_pb2.BoolValue:
|
|
1646
|
+
"""Lazy loading of dictionaries.
|
|
1647
|
+
Default: true
|
|
1648
|
+
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#dictionaries_lazy_load)
|
|
1649
|
+
"""
|
|
1650
|
+
|
|
1651
|
+
@property
|
|
1652
|
+
def query_cache(self) -> global___ClickhouseConfig.QueryCache:
|
|
1653
|
+
"""[Query cache](https://clickhouse.com/docs/en/operations/query-cache) configuration.
|
|
1654
|
+
Min version: 23.5
|
|
1655
|
+
See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#query_cache)
|
|
1656
|
+
"""
|
|
1657
|
+
|
|
1367
1658
|
def __init__(
|
|
1368
1659
|
self,
|
|
1369
1660
|
*,
|
|
@@ -1433,9 +1724,12 @@ class ClickhouseConfig(google.protobuf.message.Message):
|
|
|
1433
1724
|
default_database: google.protobuf.wrappers_pb2.StringValue | None = ...,
|
|
1434
1725
|
total_memory_profiler_step: google.protobuf.wrappers_pb2.Int64Value | None = ...,
|
|
1435
1726
|
total_memory_tracker_sample_probability: google.protobuf.wrappers_pb2.DoubleValue | None = ...,
|
|
1727
|
+
query_masking_rules: collections.abc.Iterable[global___ClickhouseConfig.QueryMaskingRule] | None = ...,
|
|
1728
|
+
dictionaries_lazy_load: google.protobuf.wrappers_pb2.BoolValue | None = ...,
|
|
1729
|
+
query_cache: global___ClickhouseConfig.QueryCache | None = ...,
|
|
1436
1730
|
) -> None: ...
|
|
1437
|
-
def HasField(self, field_name: typing.Literal["asynchronous_insert_log_enabled", b"asynchronous_insert_log_enabled", "asynchronous_insert_log_retention_size", b"asynchronous_insert_log_retention_size", "asynchronous_insert_log_retention_time", b"asynchronous_insert_log_retention_time", "asynchronous_metric_log_enabled", b"asynchronous_metric_log_enabled", "asynchronous_metric_log_retention_size", b"asynchronous_metric_log_retention_size", "asynchronous_metric_log_retention_time", b"asynchronous_metric_log_retention_time", "background_buffer_flush_schedule_pool_size", b"background_buffer_flush_schedule_pool_size", "background_common_pool_size", b"background_common_pool_size", "background_distributed_schedule_pool_size", b"background_distributed_schedule_pool_size", "background_fetches_pool_size", b"background_fetches_pool_size", "background_merges_mutations_concurrency_ratio", b"background_merges_mutations_concurrency_ratio", "background_message_broker_schedule_pool_size", b"background_message_broker_schedule_pool_size", "background_move_pool_size", b"background_move_pool_size", "background_pool_size", b"background_pool_size", "background_schedule_pool_size", b"background_schedule_pool_size", "builtin_dictionaries_reload_interval", b"builtin_dictionaries_reload_interval", "default_database", b"default_database", "geobase_enabled", b"geobase_enabled", "kafka", b"kafka", "keep_alive_timeout", b"keep_alive_timeout", "mark_cache_size", b"mark_cache_size", "max_concurrent_queries", b"max_concurrent_queries", "max_connections", b"max_connections", "max_partition_size_to_drop", b"max_partition_size_to_drop", "max_table_size_to_drop", b"max_table_size_to_drop", "merge_tree", b"merge_tree", "metric_log_enabled", b"metric_log_enabled", "metric_log_retention_size", b"metric_log_retention_size", "metric_log_retention_time", b"metric_log_retention_time", "opentelemetry_span_log_enabled", b"opentelemetry_span_log_enabled", "opentelemetry_span_log_retention_size", b"opentelemetry_span_log_retention_size", "opentelemetry_span_log_retention_time", b"opentelemetry_span_log_retention_time", "part_log_retention_size", b"part_log_retention_size", "part_log_retention_time", b"part_log_retention_time", "query_log_retention_size", b"query_log_retention_size", "query_log_retention_time", b"query_log_retention_time", "query_thread_log_enabled", b"query_thread_log_enabled", "query_thread_log_retention_size", b"query_thread_log_retention_size", "query_thread_log_retention_time", b"query_thread_log_retention_time", "query_views_log_enabled", b"query_views_log_enabled", "query_views_log_retention_size", b"query_views_log_retention_size", "query_views_log_retention_time", b"query_views_log_retention_time", "rabbitmq", b"rabbitmq", "session_log_enabled", b"session_log_enabled", "session_log_retention_size", b"session_log_retention_size", "session_log_retention_time", b"session_log_retention_time", "text_log_enabled", b"text_log_enabled", "text_log_retention_size", b"text_log_retention_size", "text_log_retention_time", b"text_log_retention_time", "total_memory_profiler_step", b"total_memory_profiler_step", "total_memory_tracker_sample_probability", b"total_memory_tracker_sample_probability", "trace_log_enabled", b"trace_log_enabled", "trace_log_retention_size", b"trace_log_retention_size", "trace_log_retention_time", b"trace_log_retention_time", "uncompressed_cache_size", b"uncompressed_cache_size", "zookeeper_log_enabled", b"zookeeper_log_enabled", "zookeeper_log_retention_size", b"zookeeper_log_retention_size", "zookeeper_log_retention_time", b"zookeeper_log_retention_time"]) -> builtins.bool: ...
|
|
1438
|
-
def ClearField(self, field_name: typing.Literal["asynchronous_insert_log_enabled", b"asynchronous_insert_log_enabled", "asynchronous_insert_log_retention_size", b"asynchronous_insert_log_retention_size", "asynchronous_insert_log_retention_time", b"asynchronous_insert_log_retention_time", "asynchronous_metric_log_enabled", b"asynchronous_metric_log_enabled", "asynchronous_metric_log_retention_size", b"asynchronous_metric_log_retention_size", "asynchronous_metric_log_retention_time", b"asynchronous_metric_log_retention_time", "background_buffer_flush_schedule_pool_size", b"background_buffer_flush_schedule_pool_size", "background_common_pool_size", b"background_common_pool_size", "background_distributed_schedule_pool_size", b"background_distributed_schedule_pool_size", "background_fetches_pool_size", b"background_fetches_pool_size", "background_merges_mutations_concurrency_ratio", b"background_merges_mutations_concurrency_ratio", "background_message_broker_schedule_pool_size", b"background_message_broker_schedule_pool_size", "background_move_pool_size", b"background_move_pool_size", "background_pool_size", b"background_pool_size", "background_schedule_pool_size", b"background_schedule_pool_size", "builtin_dictionaries_reload_interval", b"builtin_dictionaries_reload_interval", "compression", b"compression", "default_database", b"default_database", "dictionaries", b"dictionaries", "geobase_enabled", b"geobase_enabled", "geobase_uri", b"geobase_uri", "graphite_rollup", b"graphite_rollup", "kafka", b"kafka", "kafka_topics", b"kafka_topics", "keep_alive_timeout", b"keep_alive_timeout", "log_level", b"log_level", "mark_cache_size", b"mark_cache_size", "max_concurrent_queries", b"max_concurrent_queries", "max_connections", b"max_connections", "max_partition_size_to_drop", b"max_partition_size_to_drop", "max_table_size_to_drop", b"max_table_size_to_drop", "merge_tree", b"merge_tree", "metric_log_enabled", b"metric_log_enabled", "metric_log_retention_size", b"metric_log_retention_size", "metric_log_retention_time", b"metric_log_retention_time", "opentelemetry_span_log_enabled", b"opentelemetry_span_log_enabled", "opentelemetry_span_log_retention_size", b"opentelemetry_span_log_retention_size", "opentelemetry_span_log_retention_time", b"opentelemetry_span_log_retention_time", "part_log_retention_size", b"part_log_retention_size", "part_log_retention_time", b"part_log_retention_time", "query_log_retention_size", b"query_log_retention_size", "query_log_retention_time", b"query_log_retention_time", "query_thread_log_enabled", b"query_thread_log_enabled", "query_thread_log_retention_size", b"query_thread_log_retention_size", "query_thread_log_retention_time", b"query_thread_log_retention_time", "query_views_log_enabled", b"query_views_log_enabled", "query_views_log_retention_size", b"query_views_log_retention_size", "query_views_log_retention_time", b"query_views_log_retention_time", "rabbitmq", b"rabbitmq", "session_log_enabled", b"session_log_enabled", "session_log_retention_size", b"session_log_retention_size", "session_log_retention_time", b"session_log_retention_time", "text_log_enabled", b"text_log_enabled", "text_log_level", b"text_log_level", "text_log_retention_size", b"text_log_retention_size", "text_log_retention_time", b"text_log_retention_time", "timezone", b"timezone", "total_memory_profiler_step", b"total_memory_profiler_step", "total_memory_tracker_sample_probability", b"total_memory_tracker_sample_probability", "trace_log_enabled", b"trace_log_enabled", "trace_log_retention_size", b"trace_log_retention_size", "trace_log_retention_time", b"trace_log_retention_time", "uncompressed_cache_size", b"uncompressed_cache_size", "zookeeper_log_enabled", b"zookeeper_log_enabled", "zookeeper_log_retention_size", b"zookeeper_log_retention_size", "zookeeper_log_retention_time", b"zookeeper_log_retention_time"]) -> None: ...
|
|
1731
|
+
def HasField(self, field_name: typing.Literal["asynchronous_insert_log_enabled", b"asynchronous_insert_log_enabled", "asynchronous_insert_log_retention_size", b"asynchronous_insert_log_retention_size", "asynchronous_insert_log_retention_time", b"asynchronous_insert_log_retention_time", "asynchronous_metric_log_enabled", b"asynchronous_metric_log_enabled", "asynchronous_metric_log_retention_size", b"asynchronous_metric_log_retention_size", "asynchronous_metric_log_retention_time", b"asynchronous_metric_log_retention_time", "background_buffer_flush_schedule_pool_size", b"background_buffer_flush_schedule_pool_size", "background_common_pool_size", b"background_common_pool_size", "background_distributed_schedule_pool_size", b"background_distributed_schedule_pool_size", "background_fetches_pool_size", b"background_fetches_pool_size", "background_merges_mutations_concurrency_ratio", b"background_merges_mutations_concurrency_ratio", "background_message_broker_schedule_pool_size", b"background_message_broker_schedule_pool_size", "background_move_pool_size", b"background_move_pool_size", "background_pool_size", b"background_pool_size", "background_schedule_pool_size", b"background_schedule_pool_size", "builtin_dictionaries_reload_interval", b"builtin_dictionaries_reload_interval", "default_database", b"default_database", "dictionaries_lazy_load", b"dictionaries_lazy_load", "geobase_enabled", b"geobase_enabled", "kafka", b"kafka", "keep_alive_timeout", b"keep_alive_timeout", "mark_cache_size", b"mark_cache_size", "max_concurrent_queries", b"max_concurrent_queries", "max_connections", b"max_connections", "max_partition_size_to_drop", b"max_partition_size_to_drop", "max_table_size_to_drop", b"max_table_size_to_drop", "merge_tree", b"merge_tree", "metric_log_enabled", b"metric_log_enabled", "metric_log_retention_size", b"metric_log_retention_size", "metric_log_retention_time", b"metric_log_retention_time", "opentelemetry_span_log_enabled", b"opentelemetry_span_log_enabled", "opentelemetry_span_log_retention_size", b"opentelemetry_span_log_retention_size", "opentelemetry_span_log_retention_time", b"opentelemetry_span_log_retention_time", "part_log_retention_size", b"part_log_retention_size", "part_log_retention_time", b"part_log_retention_time", "query_cache", b"query_cache", "query_log_retention_size", b"query_log_retention_size", "query_log_retention_time", b"query_log_retention_time", "query_thread_log_enabled", b"query_thread_log_enabled", "query_thread_log_retention_size", b"query_thread_log_retention_size", "query_thread_log_retention_time", b"query_thread_log_retention_time", "query_views_log_enabled", b"query_views_log_enabled", "query_views_log_retention_size", b"query_views_log_retention_size", "query_views_log_retention_time", b"query_views_log_retention_time", "rabbitmq", b"rabbitmq", "session_log_enabled", b"session_log_enabled", "session_log_retention_size", b"session_log_retention_size", "session_log_retention_time", b"session_log_retention_time", "text_log_enabled", b"text_log_enabled", "text_log_retention_size", b"text_log_retention_size", "text_log_retention_time", b"text_log_retention_time", "total_memory_profiler_step", b"total_memory_profiler_step", "total_memory_tracker_sample_probability", b"total_memory_tracker_sample_probability", "trace_log_enabled", b"trace_log_enabled", "trace_log_retention_size", b"trace_log_retention_size", "trace_log_retention_time", b"trace_log_retention_time", "uncompressed_cache_size", b"uncompressed_cache_size", "zookeeper_log_enabled", b"zookeeper_log_enabled", "zookeeper_log_retention_size", b"zookeeper_log_retention_size", "zookeeper_log_retention_time", b"zookeeper_log_retention_time"]) -> builtins.bool: ...
|
|
1732
|
+
def ClearField(self, field_name: typing.Literal["asynchronous_insert_log_enabled", b"asynchronous_insert_log_enabled", "asynchronous_insert_log_retention_size", b"asynchronous_insert_log_retention_size", "asynchronous_insert_log_retention_time", b"asynchronous_insert_log_retention_time", "asynchronous_metric_log_enabled", b"asynchronous_metric_log_enabled", "asynchronous_metric_log_retention_size", b"asynchronous_metric_log_retention_size", "asynchronous_metric_log_retention_time", b"asynchronous_metric_log_retention_time", "background_buffer_flush_schedule_pool_size", b"background_buffer_flush_schedule_pool_size", "background_common_pool_size", b"background_common_pool_size", "background_distributed_schedule_pool_size", b"background_distributed_schedule_pool_size", "background_fetches_pool_size", b"background_fetches_pool_size", "background_merges_mutations_concurrency_ratio", b"background_merges_mutations_concurrency_ratio", "background_message_broker_schedule_pool_size", b"background_message_broker_schedule_pool_size", "background_move_pool_size", b"background_move_pool_size", "background_pool_size", b"background_pool_size", "background_schedule_pool_size", b"background_schedule_pool_size", "builtin_dictionaries_reload_interval", b"builtin_dictionaries_reload_interval", "compression", b"compression", "default_database", b"default_database", "dictionaries", b"dictionaries", "dictionaries_lazy_load", b"dictionaries_lazy_load", "geobase_enabled", b"geobase_enabled", "geobase_uri", b"geobase_uri", "graphite_rollup", b"graphite_rollup", "kafka", b"kafka", "kafka_topics", b"kafka_topics", "keep_alive_timeout", b"keep_alive_timeout", "log_level", b"log_level", "mark_cache_size", b"mark_cache_size", "max_concurrent_queries", b"max_concurrent_queries", "max_connections", b"max_connections", "max_partition_size_to_drop", b"max_partition_size_to_drop", "max_table_size_to_drop", b"max_table_size_to_drop", "merge_tree", b"merge_tree", "metric_log_enabled", b"metric_log_enabled", "metric_log_retention_size", b"metric_log_retention_size", "metric_log_retention_time", b"metric_log_retention_time", "opentelemetry_span_log_enabled", b"opentelemetry_span_log_enabled", "opentelemetry_span_log_retention_size", b"opentelemetry_span_log_retention_size", "opentelemetry_span_log_retention_time", b"opentelemetry_span_log_retention_time", "part_log_retention_size", b"part_log_retention_size", "part_log_retention_time", b"part_log_retention_time", "query_cache", b"query_cache", "query_log_retention_size", b"query_log_retention_size", "query_log_retention_time", b"query_log_retention_time", "query_masking_rules", b"query_masking_rules", "query_thread_log_enabled", b"query_thread_log_enabled", "query_thread_log_retention_size", b"query_thread_log_retention_size", "query_thread_log_retention_time", b"query_thread_log_retention_time", "query_views_log_enabled", b"query_views_log_enabled", "query_views_log_retention_size", b"query_views_log_retention_size", "query_views_log_retention_time", b"query_views_log_retention_time", "rabbitmq", b"rabbitmq", "session_log_enabled", b"session_log_enabled", "session_log_retention_size", b"session_log_retention_size", "session_log_retention_time", b"session_log_retention_time", "text_log_enabled", b"text_log_enabled", "text_log_level", b"text_log_level", "text_log_retention_size", b"text_log_retention_size", "text_log_retention_time", b"text_log_retention_time", "timezone", b"timezone", "total_memory_profiler_step", b"total_memory_profiler_step", "total_memory_tracker_sample_probability", b"total_memory_tracker_sample_probability", "trace_log_enabled", b"trace_log_enabled", "trace_log_retention_size", b"trace_log_retention_size", "trace_log_retention_time", b"trace_log_retention_time", "uncompressed_cache_size", b"uncompressed_cache_size", "zookeeper_log_enabled", b"zookeeper_log_enabled", "zookeeper_log_retention_size", b"zookeeper_log_retention_size", "zookeeper_log_retention_time", b"zookeeper_log_retention_time"]) -> None: ...
|
|
1439
1733
|
|
|
1440
1734
|
global___ClickhouseConfig = ClickhouseConfig
|
|
1441
1735
|
|