cribl-control-plane 0.3.0b2__py3-none-any.whl → 0.3.0b4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/__init__.py +8 -5
- cribl_control_plane/errors/{healthstatus_error.py → healthserverstatus_error.py} +10 -9
- cribl_control_plane/groups_sdk.py +48 -24
- cribl_control_plane/health.py +22 -16
- cribl_control_plane/models/__init__.py +152 -29
- cribl_control_plane/models/authtoken.py +4 -7
- cribl_control_plane/models/configgroup.py +8 -7
- cribl_control_plane/models/createconfiggroupbyproductop.py +6 -5
- cribl_control_plane/models/createroutesappendbyidop.py +2 -2
- cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
- cribl_control_plane/models/groupcreaterequest.py +152 -0
- cribl_control_plane/models/{healthstatus.py → healthserverstatus.py} +7 -7
- cribl_control_plane/models/input.py +15 -15
- cribl_control_plane/models/inputappscope.py +20 -16
- cribl_control_plane/models/inputconfluentcloud.py +110 -0
- cribl_control_plane/models/inputcriblhttp.py +20 -16
- cribl_control_plane/models/inputcribllakehttp.py +20 -16
- cribl_control_plane/models/inputcribltcp.py +20 -16
- cribl_control_plane/models/inputdatadogagent.py +20 -16
- cribl_control_plane/models/inputedgeprometheus.py +44 -36
- cribl_control_plane/models/inputelastic.py +44 -27
- cribl_control_plane/models/inputeventhub.py +118 -0
- cribl_control_plane/models/inputfile.py +7 -2
- cribl_control_plane/models/inputfirehose.py +20 -16
- cribl_control_plane/models/inputgrafana.py +39 -31
- cribl_control_plane/models/inputhttp.py +20 -16
- cribl_control_plane/models/inputhttpraw.py +20 -16
- cribl_control_plane/models/inputkafka.py +108 -0
- cribl_control_plane/models/inputloki.py +20 -16
- cribl_control_plane/models/inputmetrics.py +20 -16
- cribl_control_plane/models/inputmodeldriventelemetry.py +20 -16
- cribl_control_plane/models/inputopentelemetry.py +19 -15
- cribl_control_plane/models/inputprometheus.py +44 -36
- cribl_control_plane/models/inputprometheusrw.py +20 -16
- cribl_control_plane/models/inputsplunk.py +20 -16
- cribl_control_plane/models/inputsplunkhec.py +19 -15
- cribl_control_plane/models/inputsyslog.py +39 -31
- cribl_control_plane/models/inputsystemmetrics.py +20 -10
- cribl_control_plane/models/inputtcp.py +30 -16
- cribl_control_plane/models/inputtcpjson.py +20 -16
- cribl_control_plane/models/inputwindowsmetrics.py +20 -10
- cribl_control_plane/models/inputwineventlogs.py +14 -0
- cribl_control_plane/models/inputwizwebhook.py +20 -16
- cribl_control_plane/models/inputzscalerhec.py +19 -15
- cribl_control_plane/models/logininfo.py +3 -3
- cribl_control_plane/models/output.py +21 -21
- cribl_control_plane/models/outputazureblob.py +7 -0
- cribl_control_plane/models/outputazuredataexplorer.py +283 -93
- cribl_control_plane/models/outputazureeventhub.py +169 -21
- cribl_control_plane/models/outputazurelogs.py +49 -21
- cribl_control_plane/models/outputchronicle.py +49 -21
- cribl_control_plane/models/outputclickhouse.py +49 -21
- cribl_control_plane/models/outputcloudwatch.py +49 -21
- cribl_control_plane/models/outputconfluentcloud.py +167 -22
- cribl_control_plane/models/outputcriblhttp.py +49 -21
- cribl_control_plane/models/outputcribltcp.py +49 -21
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -22
- cribl_control_plane/models/outputdatabricks.py +7 -0
- cribl_control_plane/models/outputdatadog.py +49 -21
- cribl_control_plane/models/outputdataset.py +49 -21
- cribl_control_plane/models/outputdls3.py +7 -0
- cribl_control_plane/models/outputdynatracehttp.py +49 -21
- cribl_control_plane/models/outputdynatraceotlp.py +49 -21
- cribl_control_plane/models/outputelastic.py +74 -21
- cribl_control_plane/models/outputelasticcloud.py +74 -21
- cribl_control_plane/models/outputfilesystem.py +7 -0
- cribl_control_plane/models/outputgooglechronicle.py +65 -22
- cribl_control_plane/models/outputgooglecloudlogging.py +50 -22
- cribl_control_plane/models/outputgooglecloudstorage.py +7 -0
- cribl_control_plane/models/outputgooglepubsub.py +49 -21
- cribl_control_plane/models/outputgrafanacloud.py +98 -42
- cribl_control_plane/models/outputgraphite.py +49 -21
- cribl_control_plane/models/outputhoneycomb.py +49 -21
- cribl_control_plane/models/outputhumiohec.py +49 -21
- cribl_control_plane/models/outputinfluxdb.py +49 -21
- cribl_control_plane/models/outputkafka.py +162 -19
- cribl_control_plane/models/outputkinesis.py +56 -21
- cribl_control_plane/models/outputloki.py +47 -19
- cribl_control_plane/models/outputminio.py +7 -0
- cribl_control_plane/models/outputmsk.py +54 -19
- cribl_control_plane/models/outputnewrelic.py +49 -21
- cribl_control_plane/models/outputnewrelicevents.py +50 -22
- cribl_control_plane/models/outputopentelemetry.py +49 -21
- cribl_control_plane/models/outputprometheus.py +49 -21
- cribl_control_plane/models/outputs3.py +7 -0
- cribl_control_plane/models/outputsentinel.py +49 -21
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -22
- cribl_control_plane/models/outputservicenow.py +49 -21
- cribl_control_plane/models/outputsignalfx.py +49 -21
- cribl_control_plane/models/outputsns.py +47 -19
- cribl_control_plane/models/outputsplunk.py +49 -21
- cribl_control_plane/models/outputsplunkhec.py +124 -21
- cribl_control_plane/models/outputsplunklb.py +49 -21
- cribl_control_plane/models/outputsqs.py +47 -19
- cribl_control_plane/models/outputstatsd.py +49 -21
- cribl_control_plane/models/outputstatsdext.py +49 -21
- cribl_control_plane/models/outputsumologic.py +49 -21
- cribl_control_plane/models/outputsyslog.py +129 -99
- cribl_control_plane/models/outputtcpjson.py +49 -21
- cribl_control_plane/models/outputwavefront.py +49 -21
- cribl_control_plane/models/outputwebhook.py +49 -21
- cribl_control_plane/models/outputxsiam.py +47 -19
- cribl_control_plane/models/runnablejobcollection.py +12 -8
- cribl_control_plane/models/runnablejobexecutor.py +12 -8
- cribl_control_plane/models/runnablejobscheduledsearch.py +12 -8
- cribl_control_plane/routes_sdk.py +6 -6
- cribl_control_plane/tokens.py +23 -15
- {cribl_control_plane-0.3.0b2.dist-info → cribl_control_plane-0.3.0b4.dist-info}/METADATA +4 -4
- {cribl_control_plane-0.3.0b2.dist-info → cribl_control_plane-0.3.0b4.dist-info}/RECORD +111 -112
- cribl_control_plane/models/error.py +0 -16
- cribl_control_plane/models/gethealthinfoop.py +0 -17
- {cribl_control_plane-0.3.0b2.dist-info → cribl_control_plane-0.3.0b4.dist-info}/WHEEL +0 -0
|
@@ -35,6 +35,15 @@ class OutputAzureEventhubRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
35
35
|
RAW = "raw"
|
|
36
36
|
|
|
37
37
|
|
|
38
|
+
class OutputAzureEventhubAuthTypeAuthenticationMethod(
|
|
39
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
40
|
+
):
|
|
41
|
+
r"""Enter password directly, or select a stored secret"""
|
|
42
|
+
|
|
43
|
+
MANUAL = "manual"
|
|
44
|
+
SECRET = "secret"
|
|
45
|
+
|
|
46
|
+
|
|
38
47
|
class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
39
48
|
# PLAIN
|
|
40
49
|
PLAIN = "plain"
|
|
@@ -42,11 +51,59 @@ class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
42
51
|
OAUTHBEARER = "oauthbearer"
|
|
43
52
|
|
|
44
53
|
|
|
54
|
+
class OutputAzureEventhubClientSecretAuthTypeAuthenticationMethod(
|
|
55
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
56
|
+
):
|
|
57
|
+
MANUAL = "manual"
|
|
58
|
+
SECRET = "secret"
|
|
59
|
+
CERTIFICATE = "certificate"
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class OutputAzureEventhubMicrosoftEntraIDAuthenticationEndpoint(
|
|
63
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
64
|
+
):
|
|
65
|
+
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
66
|
+
|
|
67
|
+
HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
|
|
68
|
+
HTTPS_LOGIN_MICROSOFTONLINE_US = "https://login.microsoftonline.us"
|
|
69
|
+
HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
|
|
70
|
+
|
|
71
|
+
|
|
45
72
|
class OutputAzureEventhubAuthenticationTypedDict(TypedDict):
|
|
46
73
|
r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
|
|
47
74
|
|
|
48
75
|
disabled: NotRequired[bool]
|
|
76
|
+
auth_type: NotRequired[OutputAzureEventhubAuthTypeAuthenticationMethod]
|
|
77
|
+
r"""Enter password directly, or select a stored secret"""
|
|
78
|
+
password: NotRequired[str]
|
|
79
|
+
r"""Connection-string primary key, or connection-string secondary key, from the Event Hubs workspace"""
|
|
80
|
+
text_secret: NotRequired[str]
|
|
81
|
+
r"""Select or create a stored text secret"""
|
|
49
82
|
mechanism: NotRequired[OutputAzureEventhubSASLMechanism]
|
|
83
|
+
username: NotRequired[str]
|
|
84
|
+
r"""The username for authentication. For Event Hubs, this should always be $ConnectionString."""
|
|
85
|
+
client_secret_auth_type: NotRequired[
|
|
86
|
+
OutputAzureEventhubClientSecretAuthTypeAuthenticationMethod
|
|
87
|
+
]
|
|
88
|
+
client_secret: NotRequired[str]
|
|
89
|
+
r"""client_secret to pass in the OAuth request parameter"""
|
|
90
|
+
client_text_secret: NotRequired[str]
|
|
91
|
+
r"""Select or create a stored text secret"""
|
|
92
|
+
certificate_name: NotRequired[str]
|
|
93
|
+
r"""Select or create a stored certificate"""
|
|
94
|
+
cert_path: NotRequired[str]
|
|
95
|
+
priv_key_path: NotRequired[str]
|
|
96
|
+
passphrase: NotRequired[str]
|
|
97
|
+
oauth_endpoint: NotRequired[
|
|
98
|
+
OutputAzureEventhubMicrosoftEntraIDAuthenticationEndpoint
|
|
99
|
+
]
|
|
100
|
+
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
101
|
+
client_id: NotRequired[str]
|
|
102
|
+
r"""client_id to pass in the OAuth request parameter"""
|
|
103
|
+
tenant_id: NotRequired[str]
|
|
104
|
+
r"""Directory ID (tenant identifier) in Azure Active Directory"""
|
|
105
|
+
scope: NotRequired[str]
|
|
106
|
+
r"""Scope to pass in the OAuth request parameter"""
|
|
50
107
|
|
|
51
108
|
|
|
52
109
|
class OutputAzureEventhubAuthentication(BaseModel):
|
|
@@ -54,11 +111,74 @@ class OutputAzureEventhubAuthentication(BaseModel):
|
|
|
54
111
|
|
|
55
112
|
disabled: Optional[bool] = False
|
|
56
113
|
|
|
114
|
+
auth_type: Annotated[
|
|
115
|
+
Annotated[
|
|
116
|
+
Optional[OutputAzureEventhubAuthTypeAuthenticationMethod],
|
|
117
|
+
PlainValidator(validate_open_enum(False)),
|
|
118
|
+
],
|
|
119
|
+
pydantic.Field(alias="authType"),
|
|
120
|
+
] = OutputAzureEventhubAuthTypeAuthenticationMethod.MANUAL
|
|
121
|
+
r"""Enter password directly, or select a stored secret"""
|
|
122
|
+
|
|
123
|
+
password: Optional[str] = None
|
|
124
|
+
r"""Connection-string primary key, or connection-string secondary key, from the Event Hubs workspace"""
|
|
125
|
+
|
|
126
|
+
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
127
|
+
r"""Select or create a stored text secret"""
|
|
128
|
+
|
|
57
129
|
mechanism: Annotated[
|
|
58
130
|
Optional[OutputAzureEventhubSASLMechanism],
|
|
59
131
|
PlainValidator(validate_open_enum(False)),
|
|
60
132
|
] = OutputAzureEventhubSASLMechanism.PLAIN
|
|
61
133
|
|
|
134
|
+
username: Optional[str] = "$ConnectionString"
|
|
135
|
+
r"""The username for authentication. For Event Hubs, this should always be $ConnectionString."""
|
|
136
|
+
|
|
137
|
+
client_secret_auth_type: Annotated[
|
|
138
|
+
Annotated[
|
|
139
|
+
Optional[OutputAzureEventhubClientSecretAuthTypeAuthenticationMethod],
|
|
140
|
+
PlainValidator(validate_open_enum(False)),
|
|
141
|
+
],
|
|
142
|
+
pydantic.Field(alias="clientSecretAuthType"),
|
|
143
|
+
] = OutputAzureEventhubClientSecretAuthTypeAuthenticationMethod.MANUAL
|
|
144
|
+
|
|
145
|
+
client_secret: Annotated[Optional[str], pydantic.Field(alias="clientSecret")] = None
|
|
146
|
+
r"""client_secret to pass in the OAuth request parameter"""
|
|
147
|
+
|
|
148
|
+
client_text_secret: Annotated[
|
|
149
|
+
Optional[str], pydantic.Field(alias="clientTextSecret")
|
|
150
|
+
] = None
|
|
151
|
+
r"""Select or create a stored text secret"""
|
|
152
|
+
|
|
153
|
+
certificate_name: Annotated[
|
|
154
|
+
Optional[str], pydantic.Field(alias="certificateName")
|
|
155
|
+
] = None
|
|
156
|
+
r"""Select or create a stored certificate"""
|
|
157
|
+
|
|
158
|
+
cert_path: Annotated[Optional[str], pydantic.Field(alias="certPath")] = None
|
|
159
|
+
|
|
160
|
+
priv_key_path: Annotated[Optional[str], pydantic.Field(alias="privKeyPath")] = None
|
|
161
|
+
|
|
162
|
+
passphrase: Optional[str] = None
|
|
163
|
+
|
|
164
|
+
oauth_endpoint: Annotated[
|
|
165
|
+
Annotated[
|
|
166
|
+
Optional[OutputAzureEventhubMicrosoftEntraIDAuthenticationEndpoint],
|
|
167
|
+
PlainValidator(validate_open_enum(False)),
|
|
168
|
+
],
|
|
169
|
+
pydantic.Field(alias="oauthEndpoint"),
|
|
170
|
+
] = OutputAzureEventhubMicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
|
|
171
|
+
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
172
|
+
|
|
173
|
+
client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
|
|
174
|
+
r"""client_id to pass in the OAuth request parameter"""
|
|
175
|
+
|
|
176
|
+
tenant_id: Annotated[Optional[str], pydantic.Field(alias="tenantId")] = None
|
|
177
|
+
r"""Directory ID (tenant identifier) in Azure Active Directory"""
|
|
178
|
+
|
|
179
|
+
scope: Optional[str] = None
|
|
180
|
+
r"""Scope to pass in the OAuth request parameter"""
|
|
181
|
+
|
|
62
182
|
|
|
63
183
|
class OutputAzureEventhubTLSSettingsClientSideTypedDict(TypedDict):
|
|
64
184
|
disabled: NotRequired[bool]
|
|
@@ -86,6 +206,17 @@ class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnu
|
|
|
86
206
|
QUEUE = "queue"
|
|
87
207
|
|
|
88
208
|
|
|
209
|
+
class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
210
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
211
|
+
|
|
212
|
+
# Error
|
|
213
|
+
ERROR = "error"
|
|
214
|
+
# Backpressure
|
|
215
|
+
ALWAYS = "always"
|
|
216
|
+
# Always On
|
|
217
|
+
BACKPRESSURE = "backpressure"
|
|
218
|
+
|
|
219
|
+
|
|
89
220
|
class OutputAzureEventhubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
90
221
|
r"""Codec to use to compress the persisted data"""
|
|
91
222
|
|
|
@@ -104,17 +235,6 @@ class OutputAzureEventhubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMe
|
|
|
104
235
|
DROP = "drop"
|
|
105
236
|
|
|
106
237
|
|
|
107
|
-
class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
108
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
109
|
-
|
|
110
|
-
# Error
|
|
111
|
-
ERROR = "error"
|
|
112
|
-
# Backpressure
|
|
113
|
-
BACKPRESSURE = "backpressure"
|
|
114
|
-
# Always On
|
|
115
|
-
ALWAYS = "always"
|
|
116
|
-
|
|
117
|
-
|
|
118
238
|
class OutputAzureEventhubPqControlsTypedDict(TypedDict):
|
|
119
239
|
pass
|
|
120
240
|
|
|
@@ -171,6 +291,16 @@ class OutputAzureEventhubTypedDict(TypedDict):
|
|
|
171
291
|
on_backpressure: NotRequired[OutputAzureEventhubBackpressureBehavior]
|
|
172
292
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
173
293
|
description: NotRequired[str]
|
|
294
|
+
pq_strict_ordering: NotRequired[bool]
|
|
295
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
296
|
+
pq_rate_per_sec: NotRequired[float]
|
|
297
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
298
|
+
pq_mode: NotRequired[OutputAzureEventhubMode]
|
|
299
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
300
|
+
pq_max_buffer_size: NotRequired[float]
|
|
301
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
302
|
+
pq_max_backpressure_sec: NotRequired[float]
|
|
303
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
174
304
|
pq_max_file_size: NotRequired[str]
|
|
175
305
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
176
306
|
pq_max_size: NotRequired[str]
|
|
@@ -181,8 +311,6 @@ class OutputAzureEventhubTypedDict(TypedDict):
|
|
|
181
311
|
r"""Codec to use to compress the persisted data"""
|
|
182
312
|
pq_on_backpressure: NotRequired[OutputAzureEventhubQueueFullBehavior]
|
|
183
313
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
184
|
-
pq_mode: NotRequired[OutputAzureEventhubMode]
|
|
185
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
186
314
|
pq_controls: NotRequired[OutputAzureEventhubPqControlsTypedDict]
|
|
187
315
|
|
|
188
316
|
|
|
@@ -292,6 +420,34 @@ class OutputAzureEventhub(BaseModel):
|
|
|
292
420
|
|
|
293
421
|
description: Optional[str] = None
|
|
294
422
|
|
|
423
|
+
pq_strict_ordering: Annotated[
|
|
424
|
+
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
425
|
+
] = True
|
|
426
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
427
|
+
|
|
428
|
+
pq_rate_per_sec: Annotated[
|
|
429
|
+
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
430
|
+
] = 0
|
|
431
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
432
|
+
|
|
433
|
+
pq_mode: Annotated[
|
|
434
|
+
Annotated[
|
|
435
|
+
Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
|
|
436
|
+
],
|
|
437
|
+
pydantic.Field(alias="pqMode"),
|
|
438
|
+
] = OutputAzureEventhubMode.ERROR
|
|
439
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
440
|
+
|
|
441
|
+
pq_max_buffer_size: Annotated[
|
|
442
|
+
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
443
|
+
] = 42
|
|
444
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
445
|
+
|
|
446
|
+
pq_max_backpressure_sec: Annotated[
|
|
447
|
+
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
448
|
+
] = 30
|
|
449
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
450
|
+
|
|
295
451
|
pq_max_file_size: Annotated[
|
|
296
452
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
297
453
|
] = "1 MB"
|
|
@@ -323,14 +479,6 @@ class OutputAzureEventhub(BaseModel):
|
|
|
323
479
|
] = OutputAzureEventhubQueueFullBehavior.BLOCK
|
|
324
480
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
325
481
|
|
|
326
|
-
pq_mode: Annotated[
|
|
327
|
-
Annotated[
|
|
328
|
-
Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
|
|
329
|
-
],
|
|
330
|
-
pydantic.Field(alias="pqMode"),
|
|
331
|
-
] = OutputAzureEventhubMode.ERROR
|
|
332
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
333
|
-
|
|
334
482
|
pq_controls: Annotated[
|
|
335
483
|
Optional[OutputAzureEventhubPqControls], pydantic.Field(alias="pqControls")
|
|
336
484
|
] = None
|
|
@@ -109,6 +109,17 @@ class OutputAzureLogsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
109
109
|
SECRET = "secret"
|
|
110
110
|
|
|
111
111
|
|
|
112
|
+
class OutputAzureLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
113
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
114
|
+
|
|
115
|
+
# Error
|
|
116
|
+
ERROR = "error"
|
|
117
|
+
# Backpressure
|
|
118
|
+
ALWAYS = "always"
|
|
119
|
+
# Always On
|
|
120
|
+
BACKPRESSURE = "backpressure"
|
|
121
|
+
|
|
122
|
+
|
|
112
123
|
class OutputAzureLogsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
113
124
|
r"""Codec to use to compress the persisted data"""
|
|
114
125
|
|
|
@@ -127,17 +138,6 @@ class OutputAzureLogsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
127
138
|
DROP = "drop"
|
|
128
139
|
|
|
129
140
|
|
|
130
|
-
class OutputAzureLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
131
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
132
|
-
|
|
133
|
-
# Error
|
|
134
|
-
ERROR = "error"
|
|
135
|
-
# Backpressure
|
|
136
|
-
BACKPRESSURE = "backpressure"
|
|
137
|
-
# Always On
|
|
138
|
-
ALWAYS = "always"
|
|
139
|
-
|
|
140
|
-
|
|
141
141
|
class OutputAzureLogsPqControlsTypedDict(TypedDict):
|
|
142
142
|
pass
|
|
143
143
|
|
|
@@ -200,6 +200,16 @@ class OutputAzureLogsTypedDict(TypedDict):
|
|
|
200
200
|
auth_type: NotRequired[OutputAzureLogsAuthenticationMethod]
|
|
201
201
|
r"""Enter workspace ID and workspace key directly, or select a stored secret"""
|
|
202
202
|
description: NotRequired[str]
|
|
203
|
+
pq_strict_ordering: NotRequired[bool]
|
|
204
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
205
|
+
pq_rate_per_sec: NotRequired[float]
|
|
206
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
207
|
+
pq_mode: NotRequired[OutputAzureLogsMode]
|
|
208
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
209
|
+
pq_max_buffer_size: NotRequired[float]
|
|
210
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
211
|
+
pq_max_backpressure_sec: NotRequired[float]
|
|
212
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
203
213
|
pq_max_file_size: NotRequired[str]
|
|
204
214
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
205
215
|
pq_max_size: NotRequired[str]
|
|
@@ -210,8 +220,6 @@ class OutputAzureLogsTypedDict(TypedDict):
|
|
|
210
220
|
r"""Codec to use to compress the persisted data"""
|
|
211
221
|
pq_on_backpressure: NotRequired[OutputAzureLogsQueueFullBehavior]
|
|
212
222
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
213
|
-
pq_mode: NotRequired[OutputAzureLogsMode]
|
|
214
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
215
223
|
pq_controls: NotRequired[OutputAzureLogsPqControlsTypedDict]
|
|
216
224
|
workspace_id: NotRequired[str]
|
|
217
225
|
r"""Azure Log Analytics Workspace ID. See Azure Dashboard Workspace > Advanced settings."""
|
|
@@ -344,6 +352,34 @@ class OutputAzureLogs(BaseModel):
|
|
|
344
352
|
|
|
345
353
|
description: Optional[str] = None
|
|
346
354
|
|
|
355
|
+
pq_strict_ordering: Annotated[
|
|
356
|
+
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
357
|
+
] = True
|
|
358
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
359
|
+
|
|
360
|
+
pq_rate_per_sec: Annotated[
|
|
361
|
+
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
362
|
+
] = 0
|
|
363
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
364
|
+
|
|
365
|
+
pq_mode: Annotated[
|
|
366
|
+
Annotated[
|
|
367
|
+
Optional[OutputAzureLogsMode], PlainValidator(validate_open_enum(False))
|
|
368
|
+
],
|
|
369
|
+
pydantic.Field(alias="pqMode"),
|
|
370
|
+
] = OutputAzureLogsMode.ERROR
|
|
371
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
372
|
+
|
|
373
|
+
pq_max_buffer_size: Annotated[
|
|
374
|
+
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
375
|
+
] = 42
|
|
376
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
377
|
+
|
|
378
|
+
pq_max_backpressure_sec: Annotated[
|
|
379
|
+
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
380
|
+
] = 30
|
|
381
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
382
|
+
|
|
347
383
|
pq_max_file_size: Annotated[
|
|
348
384
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
349
385
|
] = "1 MB"
|
|
@@ -375,14 +411,6 @@ class OutputAzureLogs(BaseModel):
|
|
|
375
411
|
] = OutputAzureLogsQueueFullBehavior.BLOCK
|
|
376
412
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
377
413
|
|
|
378
|
-
pq_mode: Annotated[
|
|
379
|
-
Annotated[
|
|
380
|
-
Optional[OutputAzureLogsMode], PlainValidator(validate_open_enum(False))
|
|
381
|
-
],
|
|
382
|
-
pydantic.Field(alias="pqMode"),
|
|
383
|
-
] = OutputAzureLogsMode.ERROR
|
|
384
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
385
|
-
|
|
386
414
|
pq_controls: Annotated[
|
|
387
415
|
Optional[OutputAzureLogsPqControls], pydantic.Field(alias="pqControls")
|
|
388
416
|
] = None
|
|
@@ -118,6 +118,17 @@ class OutputChronicleCustomLabel(BaseModel):
|
|
|
118
118
|
value: str
|
|
119
119
|
|
|
120
120
|
|
|
121
|
+
class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
122
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
123
|
+
|
|
124
|
+
# Error
|
|
125
|
+
ERROR = "error"
|
|
126
|
+
# Backpressure
|
|
127
|
+
ALWAYS = "always"
|
|
128
|
+
# Always On
|
|
129
|
+
BACKPRESSURE = "backpressure"
|
|
130
|
+
|
|
131
|
+
|
|
121
132
|
class OutputChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
122
133
|
r"""Codec to use to compress the persisted data"""
|
|
123
134
|
|
|
@@ -136,17 +147,6 @@ class OutputChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
136
147
|
DROP = "drop"
|
|
137
148
|
|
|
138
149
|
|
|
139
|
-
class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
140
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
141
|
-
|
|
142
|
-
# Error
|
|
143
|
-
ERROR = "error"
|
|
144
|
-
# Backpressure
|
|
145
|
-
BACKPRESSURE = "backpressure"
|
|
146
|
-
# Always On
|
|
147
|
-
ALWAYS = "always"
|
|
148
|
-
|
|
149
|
-
|
|
150
150
|
class OutputChroniclePqControlsTypedDict(TypedDict):
|
|
151
151
|
pass
|
|
152
152
|
|
|
@@ -225,6 +225,16 @@ class OutputChronicleTypedDict(TypedDict):
|
|
|
225
225
|
r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
|
|
226
226
|
service_account_credentials_secret: NotRequired[str]
|
|
227
227
|
r"""Select or create a stored text secret"""
|
|
228
|
+
pq_strict_ordering: NotRequired[bool]
|
|
229
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
230
|
+
pq_rate_per_sec: NotRequired[float]
|
|
231
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
232
|
+
pq_mode: NotRequired[OutputChronicleMode]
|
|
233
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
234
|
+
pq_max_buffer_size: NotRequired[float]
|
|
235
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
236
|
+
pq_max_backpressure_sec: NotRequired[float]
|
|
237
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
228
238
|
pq_max_file_size: NotRequired[str]
|
|
229
239
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
230
240
|
pq_max_size: NotRequired[str]
|
|
@@ -235,8 +245,6 @@ class OutputChronicleTypedDict(TypedDict):
|
|
|
235
245
|
r"""Codec to use to compress the persisted data"""
|
|
236
246
|
pq_on_backpressure: NotRequired[OutputChronicleQueueFullBehavior]
|
|
237
247
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
238
|
-
pq_mode: NotRequired[OutputChronicleMode]
|
|
239
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
240
248
|
pq_controls: NotRequired[OutputChroniclePqControlsTypedDict]
|
|
241
249
|
|
|
242
250
|
|
|
@@ -400,6 +408,34 @@ class OutputChronicle(BaseModel):
|
|
|
400
408
|
] = None
|
|
401
409
|
r"""Select or create a stored text secret"""
|
|
402
410
|
|
|
411
|
+
pq_strict_ordering: Annotated[
|
|
412
|
+
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
413
|
+
] = True
|
|
414
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
415
|
+
|
|
416
|
+
pq_rate_per_sec: Annotated[
|
|
417
|
+
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
418
|
+
] = 0
|
|
419
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
420
|
+
|
|
421
|
+
pq_mode: Annotated[
|
|
422
|
+
Annotated[
|
|
423
|
+
Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
|
|
424
|
+
],
|
|
425
|
+
pydantic.Field(alias="pqMode"),
|
|
426
|
+
] = OutputChronicleMode.ERROR
|
|
427
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
428
|
+
|
|
429
|
+
pq_max_buffer_size: Annotated[
|
|
430
|
+
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
431
|
+
] = 42
|
|
432
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
433
|
+
|
|
434
|
+
pq_max_backpressure_sec: Annotated[
|
|
435
|
+
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
436
|
+
] = 30
|
|
437
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
438
|
+
|
|
403
439
|
pq_max_file_size: Annotated[
|
|
404
440
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
405
441
|
] = "1 MB"
|
|
@@ -431,14 +467,6 @@ class OutputChronicle(BaseModel):
|
|
|
431
467
|
] = OutputChronicleQueueFullBehavior.BLOCK
|
|
432
468
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
433
469
|
|
|
434
|
-
pq_mode: Annotated[
|
|
435
|
-
Annotated[
|
|
436
|
-
Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
|
|
437
|
-
],
|
|
438
|
-
pydantic.Field(alias="pqMode"),
|
|
439
|
-
] = OutputChronicleMode.ERROR
|
|
440
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
441
|
-
|
|
442
470
|
pq_controls: Annotated[
|
|
443
471
|
Optional[OutputChroniclePqControls], pydantic.Field(alias="pqControls")
|
|
444
472
|
] = None
|
|
@@ -254,6 +254,17 @@ class ColumnMapping(BaseModel):
|
|
|
254
254
|
r"""Type of the column in the ClickHouse database"""
|
|
255
255
|
|
|
256
256
|
|
|
257
|
+
class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
258
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
259
|
+
|
|
260
|
+
# Error
|
|
261
|
+
ERROR = "error"
|
|
262
|
+
# Backpressure
|
|
263
|
+
ALWAYS = "always"
|
|
264
|
+
# Always On
|
|
265
|
+
BACKPRESSURE = "backpressure"
|
|
266
|
+
|
|
267
|
+
|
|
257
268
|
class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
258
269
|
r"""Codec to use to compress the persisted data"""
|
|
259
270
|
|
|
@@ -272,17 +283,6 @@ class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
272
283
|
DROP = "drop"
|
|
273
284
|
|
|
274
285
|
|
|
275
|
-
class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
276
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
277
|
-
|
|
278
|
-
# Error
|
|
279
|
-
ERROR = "error"
|
|
280
|
-
# Backpressure
|
|
281
|
-
BACKPRESSURE = "backpressure"
|
|
282
|
-
# Always On
|
|
283
|
-
ALWAYS = "always"
|
|
284
|
-
|
|
285
|
-
|
|
286
286
|
class OutputClickHousePqControlsTypedDict(TypedDict):
|
|
287
287
|
pass
|
|
288
288
|
|
|
@@ -386,6 +386,16 @@ class OutputClickHouseTypedDict(TypedDict):
|
|
|
386
386
|
describe_table: NotRequired[str]
|
|
387
387
|
r"""Retrieves the table schema from ClickHouse and populates the Column Mapping table"""
|
|
388
388
|
column_mappings: NotRequired[List[ColumnMappingTypedDict]]
|
|
389
|
+
pq_strict_ordering: NotRequired[bool]
|
|
390
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
391
|
+
pq_rate_per_sec: NotRequired[float]
|
|
392
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
393
|
+
pq_mode: NotRequired[OutputClickHouseMode]
|
|
394
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
395
|
+
pq_max_buffer_size: NotRequired[float]
|
|
396
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
397
|
+
pq_max_backpressure_sec: NotRequired[float]
|
|
398
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
389
399
|
pq_max_file_size: NotRequired[str]
|
|
390
400
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
391
401
|
pq_max_size: NotRequired[str]
|
|
@@ -396,8 +406,6 @@ class OutputClickHouseTypedDict(TypedDict):
|
|
|
396
406
|
r"""Codec to use to compress the persisted data"""
|
|
397
407
|
pq_on_backpressure: NotRequired[OutputClickHouseQueueFullBehavior]
|
|
398
408
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
399
|
-
pq_mode: NotRequired[OutputClickHouseMode]
|
|
400
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
401
409
|
pq_controls: NotRequired[OutputClickHousePqControlsTypedDict]
|
|
402
410
|
|
|
403
411
|
|
|
@@ -621,6 +629,34 @@ class OutputClickHouse(BaseModel):
|
|
|
621
629
|
Optional[List[ColumnMapping]], pydantic.Field(alias="columnMappings")
|
|
622
630
|
] = None
|
|
623
631
|
|
|
632
|
+
pq_strict_ordering: Annotated[
|
|
633
|
+
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
634
|
+
] = True
|
|
635
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
636
|
+
|
|
637
|
+
pq_rate_per_sec: Annotated[
|
|
638
|
+
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
639
|
+
] = 0
|
|
640
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
641
|
+
|
|
642
|
+
pq_mode: Annotated[
|
|
643
|
+
Annotated[
|
|
644
|
+
Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
|
|
645
|
+
],
|
|
646
|
+
pydantic.Field(alias="pqMode"),
|
|
647
|
+
] = OutputClickHouseMode.ERROR
|
|
648
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
649
|
+
|
|
650
|
+
pq_max_buffer_size: Annotated[
|
|
651
|
+
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
652
|
+
] = 42
|
|
653
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
654
|
+
|
|
655
|
+
pq_max_backpressure_sec: Annotated[
|
|
656
|
+
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
657
|
+
] = 30
|
|
658
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
659
|
+
|
|
624
660
|
pq_max_file_size: Annotated[
|
|
625
661
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
626
662
|
] = "1 MB"
|
|
@@ -652,14 +688,6 @@ class OutputClickHouse(BaseModel):
|
|
|
652
688
|
] = OutputClickHouseQueueFullBehavior.BLOCK
|
|
653
689
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
654
690
|
|
|
655
|
-
pq_mode: Annotated[
|
|
656
|
-
Annotated[
|
|
657
|
-
Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
|
|
658
|
-
],
|
|
659
|
-
pydantic.Field(alias="pqMode"),
|
|
660
|
-
] = OutputClickHouseMode.ERROR
|
|
661
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
662
|
-
|
|
663
691
|
pq_controls: Annotated[
|
|
664
692
|
Optional[OutputClickHousePqControls], pydantic.Field(alias="pqControls")
|
|
665
693
|
] = None
|