cribl-control-plane 0.2.1rc4__py3-none-any.whl → 0.2.1rc5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/groups_sdk.py +3 -0
- cribl_control_plane/mappings.py +1185 -0
- cribl_control_plane/models/__init__.py +149 -105
- cribl_control_plane/models/createadminproductsmappingsactivatebyproductop.py +52 -0
- cribl_control_plane/models/createadminproductsmappingsbyproductop.py +53 -0
- cribl_control_plane/models/deleteadminproductsmappingsbyproductandidop.py +51 -0
- cribl_control_plane/models/getadminproductsmappingsbyproductandidop.py +51 -0
- cribl_control_plane/models/getadminproductsmappingsbyproductop.py +44 -0
- cribl_control_plane/models/input.py +14 -14
- cribl_control_plane/models/inputappscope.py +16 -20
- cribl_control_plane/models/inputconfluentcloud.py +0 -110
- cribl_control_plane/models/inputcriblhttp.py +16 -20
- cribl_control_plane/models/inputcribllakehttp.py +16 -20
- cribl_control_plane/models/inputcribltcp.py +16 -20
- cribl_control_plane/models/inputdatadogagent.py +16 -20
- cribl_control_plane/models/inputedgeprometheus.py +36 -44
- cribl_control_plane/models/inputelastic.py +27 -44
- cribl_control_plane/models/inputeventhub.py +0 -118
- cribl_control_plane/models/inputfirehose.py +16 -20
- cribl_control_plane/models/inputgrafana.py +31 -39
- cribl_control_plane/models/inputhttp.py +16 -20
- cribl_control_plane/models/inputhttpraw.py +16 -20
- cribl_control_plane/models/inputkafka.py +0 -108
- cribl_control_plane/models/inputloki.py +16 -20
- cribl_control_plane/models/inputmetrics.py +16 -20
- cribl_control_plane/models/inputmodeldriventelemetry.py +16 -20
- cribl_control_plane/models/inputopentelemetry.py +15 -19
- cribl_control_plane/models/inputprometheus.py +36 -44
- cribl_control_plane/models/inputprometheusrw.py +16 -20
- cribl_control_plane/models/inputsplunk.py +16 -20
- cribl_control_plane/models/inputsplunkhec.py +15 -19
- cribl_control_plane/models/inputsyslog.py +31 -39
- cribl_control_plane/models/inputsystemmetrics.py +10 -20
- cribl_control_plane/models/inputtcp.py +16 -30
- cribl_control_plane/models/inputtcpjson.py +16 -20
- cribl_control_plane/models/inputwindowsmetrics.py +10 -20
- cribl_control_plane/models/inputwineventlogs.py +0 -14
- cribl_control_plane/models/inputwizwebhook.py +16 -20
- cribl_control_plane/models/inputzscalerhec.py +15 -19
- cribl_control_plane/models/mappingruleset.py +53 -0
- cribl_control_plane/models/mappingrulesetevalmappingfunction.py +71 -0
- cribl_control_plane/models/mappingrulesetgenericmappingfunction.py +29 -0
- cribl_control_plane/models/output.py +22 -22
- cribl_control_plane/models/outputazureblob.py +0 -7
- cribl_control_plane/models/outputazuredataexplorer.py +93 -283
- cribl_control_plane/models/outputazureeventhub.py +21 -169
- cribl_control_plane/models/outputazurelogs.py +21 -49
- cribl_control_plane/models/outputchronicle.py +21 -49
- cribl_control_plane/models/outputclickhouse.py +21 -49
- cribl_control_plane/models/outputcloudwatch.py +21 -49
- cribl_control_plane/models/outputconfluentcloud.py +22 -167
- cribl_control_plane/models/outputcriblhttp.py +21 -49
- cribl_control_plane/models/outputcribltcp.py +21 -49
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +22 -50
- cribl_control_plane/models/outputdatabricks.py +0 -7
- cribl_control_plane/models/outputdatadog.py +21 -49
- cribl_control_plane/models/outputdataset.py +21 -49
- cribl_control_plane/models/outputdls3.py +0 -7
- cribl_control_plane/models/outputdynatracehttp.py +21 -49
- cribl_control_plane/models/outputdynatraceotlp.py +21 -49
- cribl_control_plane/models/outputelastic.py +21 -74
- cribl_control_plane/models/outputelasticcloud.py +21 -74
- cribl_control_plane/models/outputfilesystem.py +0 -7
- cribl_control_plane/models/outputgooglechronicle.py +22 -65
- cribl_control_plane/models/outputgooglecloudlogging.py +22 -50
- cribl_control_plane/models/outputgooglecloudstorage.py +0 -7
- cribl_control_plane/models/outputgooglepubsub.py +21 -49
- cribl_control_plane/models/outputgrafanacloud.py +42 -98
- cribl_control_plane/models/outputgraphite.py +21 -49
- cribl_control_plane/models/outputhoneycomb.py +21 -49
- cribl_control_plane/models/outputhumiohec.py +21 -49
- cribl_control_plane/models/outputinfluxdb.py +21 -49
- cribl_control_plane/models/outputkafka.py +19 -162
- cribl_control_plane/models/outputkinesis.py +21 -56
- cribl_control_plane/models/outputloki.py +19 -47
- cribl_control_plane/models/outputminio.py +0 -7
- cribl_control_plane/models/outputmsk.py +19 -54
- cribl_control_plane/models/outputnewrelic.py +21 -49
- cribl_control_plane/models/outputnewrelicevents.py +22 -50
- cribl_control_plane/models/outputopentelemetry.py +21 -49
- cribl_control_plane/models/outputprometheus.py +21 -49
- cribl_control_plane/models/outputs3.py +0 -7
- cribl_control_plane/models/outputsentinel.py +21 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +22 -50
- cribl_control_plane/models/outputservicenow.py +21 -49
- cribl_control_plane/models/outputsignalfx.py +21 -49
- cribl_control_plane/models/outputsns.py +19 -47
- cribl_control_plane/models/outputsplunk.py +21 -49
- cribl_control_plane/models/outputsplunkhec.py +21 -49
- cribl_control_plane/models/outputsplunklb.py +21 -49
- cribl_control_plane/models/outputsqs.py +19 -47
- cribl_control_plane/models/outputstatsd.py +21 -49
- cribl_control_plane/models/outputstatsdext.py +21 -49
- cribl_control_plane/models/outputsumologic.py +21 -49
- cribl_control_plane/models/outputsyslog.py +99 -129
- cribl_control_plane/models/outputtcpjson.py +21 -49
- cribl_control_plane/models/outputwavefront.py +21 -49
- cribl_control_plane/models/outputwebhook.py +21 -49
- cribl_control_plane/models/outputxsiam.py +19 -47
- cribl_control_plane/models/pipeline.py +4 -4
- cribl_control_plane/models/rulesetid.py +13 -0
- cribl_control_plane/models/runnablejobcollection.py +8 -12
- cribl_control_plane/models/runnablejobexecutor.py +8 -12
- cribl_control_plane/models/runnablejobscheduledsearch.py +8 -12
- cribl_control_plane/models/updateadminproductsmappingsbyproductandidop.py +63 -0
- cribl_control_plane/pipelines.py +8 -8
- {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/METADATA +11 -2
- {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/RECORD +110 -99
- {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/WHEEL +0 -0
|
@@ -35,15 +35,6 @@ class OutputAzureEventhubRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
35
35
|
RAW = "raw"
|
|
36
36
|
|
|
37
37
|
|
|
38
|
-
class OutputAzureEventhubAuthTypeAuthenticationMethod(
|
|
39
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
40
|
-
):
|
|
41
|
-
r"""Enter password directly, or select a stored secret"""
|
|
42
|
-
|
|
43
|
-
MANUAL = "manual"
|
|
44
|
-
SECRET = "secret"
|
|
45
|
-
|
|
46
|
-
|
|
47
38
|
class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
48
39
|
# PLAIN
|
|
49
40
|
PLAIN = "plain"
|
|
@@ -51,59 +42,11 @@ class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
51
42
|
OAUTHBEARER = "oauthbearer"
|
|
52
43
|
|
|
53
44
|
|
|
54
|
-
class OutputAzureEventhubClientSecretAuthTypeAuthenticationMethod(
|
|
55
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
56
|
-
):
|
|
57
|
-
MANUAL = "manual"
|
|
58
|
-
SECRET = "secret"
|
|
59
|
-
CERTIFICATE = "certificate"
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
class OutputAzureEventhubMicrosoftEntraIDAuthenticationEndpoint(
|
|
63
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
64
|
-
):
|
|
65
|
-
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
66
|
-
|
|
67
|
-
HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
|
|
68
|
-
HTTPS_LOGIN_MICROSOFTONLINE_US = "https://login.microsoftonline.us"
|
|
69
|
-
HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
|
|
70
|
-
|
|
71
|
-
|
|
72
45
|
class OutputAzureEventhubAuthenticationTypedDict(TypedDict):
|
|
73
46
|
r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
|
|
74
47
|
|
|
75
48
|
disabled: NotRequired[bool]
|
|
76
|
-
auth_type: NotRequired[OutputAzureEventhubAuthTypeAuthenticationMethod]
|
|
77
|
-
r"""Enter password directly, or select a stored secret"""
|
|
78
|
-
password: NotRequired[str]
|
|
79
|
-
r"""Connection-string primary key, or connection-string secondary key, from the Event Hubs workspace"""
|
|
80
|
-
text_secret: NotRequired[str]
|
|
81
|
-
r"""Select or create a stored text secret"""
|
|
82
49
|
mechanism: NotRequired[OutputAzureEventhubSASLMechanism]
|
|
83
|
-
username: NotRequired[str]
|
|
84
|
-
r"""The username for authentication. For Event Hubs, this should always be $ConnectionString."""
|
|
85
|
-
client_secret_auth_type: NotRequired[
|
|
86
|
-
OutputAzureEventhubClientSecretAuthTypeAuthenticationMethod
|
|
87
|
-
]
|
|
88
|
-
client_secret: NotRequired[str]
|
|
89
|
-
r"""client_secret to pass in the OAuth request parameter"""
|
|
90
|
-
client_text_secret: NotRequired[str]
|
|
91
|
-
r"""Select or create a stored text secret"""
|
|
92
|
-
certificate_name: NotRequired[str]
|
|
93
|
-
r"""Select or create a stored certificate"""
|
|
94
|
-
cert_path: NotRequired[str]
|
|
95
|
-
priv_key_path: NotRequired[str]
|
|
96
|
-
passphrase: NotRequired[str]
|
|
97
|
-
oauth_endpoint: NotRequired[
|
|
98
|
-
OutputAzureEventhubMicrosoftEntraIDAuthenticationEndpoint
|
|
99
|
-
]
|
|
100
|
-
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
101
|
-
client_id: NotRequired[str]
|
|
102
|
-
r"""client_id to pass in the OAuth request parameter"""
|
|
103
|
-
tenant_id: NotRequired[str]
|
|
104
|
-
r"""Directory ID (tenant identifier) in Azure Active Directory"""
|
|
105
|
-
scope: NotRequired[str]
|
|
106
|
-
r"""Scope to pass in the OAuth request parameter"""
|
|
107
50
|
|
|
108
51
|
|
|
109
52
|
class OutputAzureEventhubAuthentication(BaseModel):
|
|
@@ -111,74 +54,11 @@ class OutputAzureEventhubAuthentication(BaseModel):
|
|
|
111
54
|
|
|
112
55
|
disabled: Optional[bool] = False
|
|
113
56
|
|
|
114
|
-
auth_type: Annotated[
|
|
115
|
-
Annotated[
|
|
116
|
-
Optional[OutputAzureEventhubAuthTypeAuthenticationMethod],
|
|
117
|
-
PlainValidator(validate_open_enum(False)),
|
|
118
|
-
],
|
|
119
|
-
pydantic.Field(alias="authType"),
|
|
120
|
-
] = OutputAzureEventhubAuthTypeAuthenticationMethod.MANUAL
|
|
121
|
-
r"""Enter password directly, or select a stored secret"""
|
|
122
|
-
|
|
123
|
-
password: Optional[str] = None
|
|
124
|
-
r"""Connection-string primary key, or connection-string secondary key, from the Event Hubs workspace"""
|
|
125
|
-
|
|
126
|
-
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
127
|
-
r"""Select or create a stored text secret"""
|
|
128
|
-
|
|
129
57
|
mechanism: Annotated[
|
|
130
58
|
Optional[OutputAzureEventhubSASLMechanism],
|
|
131
59
|
PlainValidator(validate_open_enum(False)),
|
|
132
60
|
] = OutputAzureEventhubSASLMechanism.PLAIN
|
|
133
61
|
|
|
134
|
-
username: Optional[str] = "$ConnectionString"
|
|
135
|
-
r"""The username for authentication. For Event Hubs, this should always be $ConnectionString."""
|
|
136
|
-
|
|
137
|
-
client_secret_auth_type: Annotated[
|
|
138
|
-
Annotated[
|
|
139
|
-
Optional[OutputAzureEventhubClientSecretAuthTypeAuthenticationMethod],
|
|
140
|
-
PlainValidator(validate_open_enum(False)),
|
|
141
|
-
],
|
|
142
|
-
pydantic.Field(alias="clientSecretAuthType"),
|
|
143
|
-
] = OutputAzureEventhubClientSecretAuthTypeAuthenticationMethod.MANUAL
|
|
144
|
-
|
|
145
|
-
client_secret: Annotated[Optional[str], pydantic.Field(alias="clientSecret")] = None
|
|
146
|
-
r"""client_secret to pass in the OAuth request parameter"""
|
|
147
|
-
|
|
148
|
-
client_text_secret: Annotated[
|
|
149
|
-
Optional[str], pydantic.Field(alias="clientTextSecret")
|
|
150
|
-
] = None
|
|
151
|
-
r"""Select or create a stored text secret"""
|
|
152
|
-
|
|
153
|
-
certificate_name: Annotated[
|
|
154
|
-
Optional[str], pydantic.Field(alias="certificateName")
|
|
155
|
-
] = None
|
|
156
|
-
r"""Select or create a stored certificate"""
|
|
157
|
-
|
|
158
|
-
cert_path: Annotated[Optional[str], pydantic.Field(alias="certPath")] = None
|
|
159
|
-
|
|
160
|
-
priv_key_path: Annotated[Optional[str], pydantic.Field(alias="privKeyPath")] = None
|
|
161
|
-
|
|
162
|
-
passphrase: Optional[str] = None
|
|
163
|
-
|
|
164
|
-
oauth_endpoint: Annotated[
|
|
165
|
-
Annotated[
|
|
166
|
-
Optional[OutputAzureEventhubMicrosoftEntraIDAuthenticationEndpoint],
|
|
167
|
-
PlainValidator(validate_open_enum(False)),
|
|
168
|
-
],
|
|
169
|
-
pydantic.Field(alias="oauthEndpoint"),
|
|
170
|
-
] = OutputAzureEventhubMicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
|
|
171
|
-
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
172
|
-
|
|
173
|
-
client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
|
|
174
|
-
r"""client_id to pass in the OAuth request parameter"""
|
|
175
|
-
|
|
176
|
-
tenant_id: Annotated[Optional[str], pydantic.Field(alias="tenantId")] = None
|
|
177
|
-
r"""Directory ID (tenant identifier) in Azure Active Directory"""
|
|
178
|
-
|
|
179
|
-
scope: Optional[str] = None
|
|
180
|
-
r"""Scope to pass in the OAuth request parameter"""
|
|
181
|
-
|
|
182
62
|
|
|
183
63
|
class OutputAzureEventhubTLSSettingsClientSideTypedDict(TypedDict):
|
|
184
64
|
disabled: NotRequired[bool]
|
|
@@ -206,17 +86,6 @@ class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnu
|
|
|
206
86
|
QUEUE = "queue"
|
|
207
87
|
|
|
208
88
|
|
|
209
|
-
class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
210
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
211
|
-
|
|
212
|
-
# Error
|
|
213
|
-
ERROR = "error"
|
|
214
|
-
# Backpressure
|
|
215
|
-
ALWAYS = "always"
|
|
216
|
-
# Always On
|
|
217
|
-
BACKPRESSURE = "backpressure"
|
|
218
|
-
|
|
219
|
-
|
|
220
89
|
class OutputAzureEventhubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
221
90
|
r"""Codec to use to compress the persisted data"""
|
|
222
91
|
|
|
@@ -235,6 +104,17 @@ class OutputAzureEventhubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMe
|
|
|
235
104
|
DROP = "drop"
|
|
236
105
|
|
|
237
106
|
|
|
107
|
+
class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
108
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
109
|
+
|
|
110
|
+
# Error
|
|
111
|
+
ERROR = "error"
|
|
112
|
+
# Backpressure
|
|
113
|
+
BACKPRESSURE = "backpressure"
|
|
114
|
+
# Always On
|
|
115
|
+
ALWAYS = "always"
|
|
116
|
+
|
|
117
|
+
|
|
238
118
|
class OutputAzureEventhubPqControlsTypedDict(TypedDict):
|
|
239
119
|
pass
|
|
240
120
|
|
|
@@ -291,16 +171,6 @@ class OutputAzureEventhubTypedDict(TypedDict):
|
|
|
291
171
|
on_backpressure: NotRequired[OutputAzureEventhubBackpressureBehavior]
|
|
292
172
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
293
173
|
description: NotRequired[str]
|
|
294
|
-
pq_strict_ordering: NotRequired[bool]
|
|
295
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
296
|
-
pq_rate_per_sec: NotRequired[float]
|
|
297
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
298
|
-
pq_mode: NotRequired[OutputAzureEventhubMode]
|
|
299
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
300
|
-
pq_max_buffer_size: NotRequired[float]
|
|
301
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
302
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
303
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
304
174
|
pq_max_file_size: NotRequired[str]
|
|
305
175
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
306
176
|
pq_max_size: NotRequired[str]
|
|
@@ -311,6 +181,8 @@ class OutputAzureEventhubTypedDict(TypedDict):
|
|
|
311
181
|
r"""Codec to use to compress the persisted data"""
|
|
312
182
|
pq_on_backpressure: NotRequired[OutputAzureEventhubQueueFullBehavior]
|
|
313
183
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
184
|
+
pq_mode: NotRequired[OutputAzureEventhubMode]
|
|
185
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
314
186
|
pq_controls: NotRequired[OutputAzureEventhubPqControlsTypedDict]
|
|
315
187
|
|
|
316
188
|
|
|
@@ -420,34 +292,6 @@ class OutputAzureEventhub(BaseModel):
|
|
|
420
292
|
|
|
421
293
|
description: Optional[str] = None
|
|
422
294
|
|
|
423
|
-
pq_strict_ordering: Annotated[
|
|
424
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
425
|
-
] = True
|
|
426
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
427
|
-
|
|
428
|
-
pq_rate_per_sec: Annotated[
|
|
429
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
430
|
-
] = 0
|
|
431
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
432
|
-
|
|
433
|
-
pq_mode: Annotated[
|
|
434
|
-
Annotated[
|
|
435
|
-
Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
|
|
436
|
-
],
|
|
437
|
-
pydantic.Field(alias="pqMode"),
|
|
438
|
-
] = OutputAzureEventhubMode.ERROR
|
|
439
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
440
|
-
|
|
441
|
-
pq_max_buffer_size: Annotated[
|
|
442
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
443
|
-
] = 42
|
|
444
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
445
|
-
|
|
446
|
-
pq_max_backpressure_sec: Annotated[
|
|
447
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
448
|
-
] = 30
|
|
449
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
450
|
-
|
|
451
295
|
pq_max_file_size: Annotated[
|
|
452
296
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
453
297
|
] = "1 MB"
|
|
@@ -479,6 +323,14 @@ class OutputAzureEventhub(BaseModel):
|
|
|
479
323
|
] = OutputAzureEventhubQueueFullBehavior.BLOCK
|
|
480
324
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
481
325
|
|
|
326
|
+
pq_mode: Annotated[
|
|
327
|
+
Annotated[
|
|
328
|
+
Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
|
|
329
|
+
],
|
|
330
|
+
pydantic.Field(alias="pqMode"),
|
|
331
|
+
] = OutputAzureEventhubMode.ERROR
|
|
332
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
333
|
+
|
|
482
334
|
pq_controls: Annotated[
|
|
483
335
|
Optional[OutputAzureEventhubPqControls], pydantic.Field(alias="pqControls")
|
|
484
336
|
] = None
|
|
@@ -109,17 +109,6 @@ class OutputAzureLogsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
109
109
|
SECRET = "secret"
|
|
110
110
|
|
|
111
111
|
|
|
112
|
-
class OutputAzureLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
113
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
114
|
-
|
|
115
|
-
# Error
|
|
116
|
-
ERROR = "error"
|
|
117
|
-
# Backpressure
|
|
118
|
-
ALWAYS = "always"
|
|
119
|
-
# Always On
|
|
120
|
-
BACKPRESSURE = "backpressure"
|
|
121
|
-
|
|
122
|
-
|
|
123
112
|
class OutputAzureLogsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
124
113
|
r"""Codec to use to compress the persisted data"""
|
|
125
114
|
|
|
@@ -138,6 +127,17 @@ class OutputAzureLogsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
138
127
|
DROP = "drop"
|
|
139
128
|
|
|
140
129
|
|
|
130
|
+
class OutputAzureLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
131
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
132
|
+
|
|
133
|
+
# Error
|
|
134
|
+
ERROR = "error"
|
|
135
|
+
# Backpressure
|
|
136
|
+
BACKPRESSURE = "backpressure"
|
|
137
|
+
# Always On
|
|
138
|
+
ALWAYS = "always"
|
|
139
|
+
|
|
140
|
+
|
|
141
141
|
class OutputAzureLogsPqControlsTypedDict(TypedDict):
|
|
142
142
|
pass
|
|
143
143
|
|
|
@@ -200,16 +200,6 @@ class OutputAzureLogsTypedDict(TypedDict):
|
|
|
200
200
|
auth_type: NotRequired[OutputAzureLogsAuthenticationMethod]
|
|
201
201
|
r"""Enter workspace ID and workspace key directly, or select a stored secret"""
|
|
202
202
|
description: NotRequired[str]
|
|
203
|
-
pq_strict_ordering: NotRequired[bool]
|
|
204
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
205
|
-
pq_rate_per_sec: NotRequired[float]
|
|
206
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
207
|
-
pq_mode: NotRequired[OutputAzureLogsMode]
|
|
208
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
209
|
-
pq_max_buffer_size: NotRequired[float]
|
|
210
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
211
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
212
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
213
203
|
pq_max_file_size: NotRequired[str]
|
|
214
204
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
215
205
|
pq_max_size: NotRequired[str]
|
|
@@ -220,6 +210,8 @@ class OutputAzureLogsTypedDict(TypedDict):
|
|
|
220
210
|
r"""Codec to use to compress the persisted data"""
|
|
221
211
|
pq_on_backpressure: NotRequired[OutputAzureLogsQueueFullBehavior]
|
|
222
212
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
213
|
+
pq_mode: NotRequired[OutputAzureLogsMode]
|
|
214
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
223
215
|
pq_controls: NotRequired[OutputAzureLogsPqControlsTypedDict]
|
|
224
216
|
workspace_id: NotRequired[str]
|
|
225
217
|
r"""Azure Log Analytics Workspace ID. See Azure Dashboard Workspace > Advanced settings."""
|
|
@@ -352,34 +344,6 @@ class OutputAzureLogs(BaseModel):
|
|
|
352
344
|
|
|
353
345
|
description: Optional[str] = None
|
|
354
346
|
|
|
355
|
-
pq_strict_ordering: Annotated[
|
|
356
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
357
|
-
] = True
|
|
358
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
359
|
-
|
|
360
|
-
pq_rate_per_sec: Annotated[
|
|
361
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
362
|
-
] = 0
|
|
363
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
364
|
-
|
|
365
|
-
pq_mode: Annotated[
|
|
366
|
-
Annotated[
|
|
367
|
-
Optional[OutputAzureLogsMode], PlainValidator(validate_open_enum(False))
|
|
368
|
-
],
|
|
369
|
-
pydantic.Field(alias="pqMode"),
|
|
370
|
-
] = OutputAzureLogsMode.ERROR
|
|
371
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
372
|
-
|
|
373
|
-
pq_max_buffer_size: Annotated[
|
|
374
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
375
|
-
] = 42
|
|
376
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
377
|
-
|
|
378
|
-
pq_max_backpressure_sec: Annotated[
|
|
379
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
380
|
-
] = 30
|
|
381
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
382
|
-
|
|
383
347
|
pq_max_file_size: Annotated[
|
|
384
348
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
385
349
|
] = "1 MB"
|
|
@@ -411,6 +375,14 @@ class OutputAzureLogs(BaseModel):
|
|
|
411
375
|
] = OutputAzureLogsQueueFullBehavior.BLOCK
|
|
412
376
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
413
377
|
|
|
378
|
+
pq_mode: Annotated[
|
|
379
|
+
Annotated[
|
|
380
|
+
Optional[OutputAzureLogsMode], PlainValidator(validate_open_enum(False))
|
|
381
|
+
],
|
|
382
|
+
pydantic.Field(alias="pqMode"),
|
|
383
|
+
] = OutputAzureLogsMode.ERROR
|
|
384
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
385
|
+
|
|
414
386
|
pq_controls: Annotated[
|
|
415
387
|
Optional[OutputAzureLogsPqControls], pydantic.Field(alias="pqControls")
|
|
416
388
|
] = None
|
|
@@ -118,17 +118,6 @@ class OutputChronicleCustomLabel(BaseModel):
|
|
|
118
118
|
value: str
|
|
119
119
|
|
|
120
120
|
|
|
121
|
-
class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
122
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
123
|
-
|
|
124
|
-
# Error
|
|
125
|
-
ERROR = "error"
|
|
126
|
-
# Backpressure
|
|
127
|
-
ALWAYS = "always"
|
|
128
|
-
# Always On
|
|
129
|
-
BACKPRESSURE = "backpressure"
|
|
130
|
-
|
|
131
|
-
|
|
132
121
|
class OutputChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
133
122
|
r"""Codec to use to compress the persisted data"""
|
|
134
123
|
|
|
@@ -147,6 +136,17 @@ class OutputChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
147
136
|
DROP = "drop"
|
|
148
137
|
|
|
149
138
|
|
|
139
|
+
class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
140
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
141
|
+
|
|
142
|
+
# Error
|
|
143
|
+
ERROR = "error"
|
|
144
|
+
# Backpressure
|
|
145
|
+
BACKPRESSURE = "backpressure"
|
|
146
|
+
# Always On
|
|
147
|
+
ALWAYS = "always"
|
|
148
|
+
|
|
149
|
+
|
|
150
150
|
class OutputChroniclePqControlsTypedDict(TypedDict):
|
|
151
151
|
pass
|
|
152
152
|
|
|
@@ -225,16 +225,6 @@ class OutputChronicleTypedDict(TypedDict):
|
|
|
225
225
|
r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
|
|
226
226
|
service_account_credentials_secret: NotRequired[str]
|
|
227
227
|
r"""Select or create a stored text secret"""
|
|
228
|
-
pq_strict_ordering: NotRequired[bool]
|
|
229
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
230
|
-
pq_rate_per_sec: NotRequired[float]
|
|
231
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
232
|
-
pq_mode: NotRequired[OutputChronicleMode]
|
|
233
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
234
|
-
pq_max_buffer_size: NotRequired[float]
|
|
235
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
236
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
237
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
238
228
|
pq_max_file_size: NotRequired[str]
|
|
239
229
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
240
230
|
pq_max_size: NotRequired[str]
|
|
@@ -245,6 +235,8 @@ class OutputChronicleTypedDict(TypedDict):
|
|
|
245
235
|
r"""Codec to use to compress the persisted data"""
|
|
246
236
|
pq_on_backpressure: NotRequired[OutputChronicleQueueFullBehavior]
|
|
247
237
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
238
|
+
pq_mode: NotRequired[OutputChronicleMode]
|
|
239
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
248
240
|
pq_controls: NotRequired[OutputChroniclePqControlsTypedDict]
|
|
249
241
|
|
|
250
242
|
|
|
@@ -408,34 +400,6 @@ class OutputChronicle(BaseModel):
|
|
|
408
400
|
] = None
|
|
409
401
|
r"""Select or create a stored text secret"""
|
|
410
402
|
|
|
411
|
-
pq_strict_ordering: Annotated[
|
|
412
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
413
|
-
] = True
|
|
414
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
415
|
-
|
|
416
|
-
pq_rate_per_sec: Annotated[
|
|
417
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
418
|
-
] = 0
|
|
419
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
420
|
-
|
|
421
|
-
pq_mode: Annotated[
|
|
422
|
-
Annotated[
|
|
423
|
-
Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
|
|
424
|
-
],
|
|
425
|
-
pydantic.Field(alias="pqMode"),
|
|
426
|
-
] = OutputChronicleMode.ERROR
|
|
427
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
428
|
-
|
|
429
|
-
pq_max_buffer_size: Annotated[
|
|
430
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
431
|
-
] = 42
|
|
432
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
433
|
-
|
|
434
|
-
pq_max_backpressure_sec: Annotated[
|
|
435
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
436
|
-
] = 30
|
|
437
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
438
|
-
|
|
439
403
|
pq_max_file_size: Annotated[
|
|
440
404
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
441
405
|
] = "1 MB"
|
|
@@ -467,6 +431,14 @@ class OutputChronicle(BaseModel):
|
|
|
467
431
|
] = OutputChronicleQueueFullBehavior.BLOCK
|
|
468
432
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
469
433
|
|
|
434
|
+
pq_mode: Annotated[
|
|
435
|
+
Annotated[
|
|
436
|
+
Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
|
|
437
|
+
],
|
|
438
|
+
pydantic.Field(alias="pqMode"),
|
|
439
|
+
] = OutputChronicleMode.ERROR
|
|
440
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
441
|
+
|
|
470
442
|
pq_controls: Annotated[
|
|
471
443
|
Optional[OutputChroniclePqControls], pydantic.Field(alias="pqControls")
|
|
472
444
|
] = None
|
|
@@ -254,17 +254,6 @@ class ColumnMapping(BaseModel):
|
|
|
254
254
|
r"""Type of the column in the ClickHouse database"""
|
|
255
255
|
|
|
256
256
|
|
|
257
|
-
class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
258
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
259
|
-
|
|
260
|
-
# Error
|
|
261
|
-
ERROR = "error"
|
|
262
|
-
# Backpressure
|
|
263
|
-
ALWAYS = "always"
|
|
264
|
-
# Always On
|
|
265
|
-
BACKPRESSURE = "backpressure"
|
|
266
|
-
|
|
267
|
-
|
|
268
257
|
class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
269
258
|
r"""Codec to use to compress the persisted data"""
|
|
270
259
|
|
|
@@ -283,6 +272,17 @@ class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
283
272
|
DROP = "drop"
|
|
284
273
|
|
|
285
274
|
|
|
275
|
+
class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
276
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
277
|
+
|
|
278
|
+
# Error
|
|
279
|
+
ERROR = "error"
|
|
280
|
+
# Backpressure
|
|
281
|
+
BACKPRESSURE = "backpressure"
|
|
282
|
+
# Always On
|
|
283
|
+
ALWAYS = "always"
|
|
284
|
+
|
|
285
|
+
|
|
286
286
|
class OutputClickHousePqControlsTypedDict(TypedDict):
|
|
287
287
|
pass
|
|
288
288
|
|
|
@@ -386,16 +386,6 @@ class OutputClickHouseTypedDict(TypedDict):
|
|
|
386
386
|
describe_table: NotRequired[str]
|
|
387
387
|
r"""Retrieves the table schema from ClickHouse and populates the Column Mapping table"""
|
|
388
388
|
column_mappings: NotRequired[List[ColumnMappingTypedDict]]
|
|
389
|
-
pq_strict_ordering: NotRequired[bool]
|
|
390
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
391
|
-
pq_rate_per_sec: NotRequired[float]
|
|
392
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
393
|
-
pq_mode: NotRequired[OutputClickHouseMode]
|
|
394
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
395
|
-
pq_max_buffer_size: NotRequired[float]
|
|
396
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
397
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
398
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
399
389
|
pq_max_file_size: NotRequired[str]
|
|
400
390
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
401
391
|
pq_max_size: NotRequired[str]
|
|
@@ -406,6 +396,8 @@ class OutputClickHouseTypedDict(TypedDict):
|
|
|
406
396
|
r"""Codec to use to compress the persisted data"""
|
|
407
397
|
pq_on_backpressure: NotRequired[OutputClickHouseQueueFullBehavior]
|
|
408
398
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
399
|
+
pq_mode: NotRequired[OutputClickHouseMode]
|
|
400
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
409
401
|
pq_controls: NotRequired[OutputClickHousePqControlsTypedDict]
|
|
410
402
|
|
|
411
403
|
|
|
@@ -629,34 +621,6 @@ class OutputClickHouse(BaseModel):
|
|
|
629
621
|
Optional[List[ColumnMapping]], pydantic.Field(alias="columnMappings")
|
|
630
622
|
] = None
|
|
631
623
|
|
|
632
|
-
pq_strict_ordering: Annotated[
|
|
633
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
634
|
-
] = True
|
|
635
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
636
|
-
|
|
637
|
-
pq_rate_per_sec: Annotated[
|
|
638
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
639
|
-
] = 0
|
|
640
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
641
|
-
|
|
642
|
-
pq_mode: Annotated[
|
|
643
|
-
Annotated[
|
|
644
|
-
Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
|
|
645
|
-
],
|
|
646
|
-
pydantic.Field(alias="pqMode"),
|
|
647
|
-
] = OutputClickHouseMode.ERROR
|
|
648
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
649
|
-
|
|
650
|
-
pq_max_buffer_size: Annotated[
|
|
651
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
652
|
-
] = 42
|
|
653
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
654
|
-
|
|
655
|
-
pq_max_backpressure_sec: Annotated[
|
|
656
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
657
|
-
] = 30
|
|
658
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
659
|
-
|
|
660
624
|
pq_max_file_size: Annotated[
|
|
661
625
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
662
626
|
] = "1 MB"
|
|
@@ -688,6 +652,14 @@ class OutputClickHouse(BaseModel):
|
|
|
688
652
|
] = OutputClickHouseQueueFullBehavior.BLOCK
|
|
689
653
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
690
654
|
|
|
655
|
+
pq_mode: Annotated[
|
|
656
|
+
Annotated[
|
|
657
|
+
Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
|
|
658
|
+
],
|
|
659
|
+
pydantic.Field(alias="pqMode"),
|
|
660
|
+
] = OutputClickHouseMode.ERROR
|
|
661
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
662
|
+
|
|
691
663
|
pq_controls: Annotated[
|
|
692
664
|
Optional[OutputClickHousePqControls], pydantic.Field(alias="pqControls")
|
|
693
665
|
] = None
|