cribl-control-plane 0.0.44a1__py3-none-any.whl → 0.0.44a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_hooks/clientcredentials.py +1 -1
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/acl.py +5 -3
- cribl_control_plane/auth_sdk.py +6 -3
- cribl_control_plane/basesdk.py +11 -1
- cribl_control_plane/commits.py +5 -3
- cribl_control_plane/destinations.py +6 -4
- cribl_control_plane/errors/__init__.py +15 -3
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/groups_configs.py +8 -3
- cribl_control_plane/groups_sdk.py +6 -4
- cribl_control_plane/models/__init__.py +17 -4
- cribl_control_plane/models/appmode.py +2 -1
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +7 -2
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/hbcriblinfo.py +6 -1
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +47 -20
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +46 -16
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +27 -10
- cribl_control_plane/models/inputmsk.py +53 -18
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +74 -29
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +36 -13
- cribl_control_plane/models/outputclickhouse.py +56 -21
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +77 -32
- cribl_control_plane/models/outputcriblhttp.py +46 -18
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -15
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +54 -21
- cribl_control_plane/models/outputdynatraceotlp.py +56 -23
- cribl_control_plane/models/outputelastic.py +44 -19
- cribl_control_plane/models/outputelasticcloud.py +37 -13
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +99 -34
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +36 -13
- cribl_control_plane/models/outputhumiohec.py +44 -17
- cribl_control_plane/models/outputinfluxdb.py +43 -17
- cribl_control_plane/models/outputkafka.py +74 -28
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +82 -30
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +42 -15
- cribl_control_plane/models/outputopentelemetry.py +68 -27
- cribl_control_plane/models/outputprometheus.py +36 -13
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +51 -19
- cribl_control_plane/models/outputservicenow.py +61 -25
- cribl_control_plane/models/outputsignalfx.py +38 -15
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +36 -13
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +36 -13
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +36 -15
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/workertypes.py +2 -1
- cribl_control_plane/nodes.py +5 -3
- cribl_control_plane/sdk.py +15 -2
- cribl_control_plane/sources.py +5 -3
- cribl_control_plane/utils/__init__.py +15 -3
- cribl_control_plane/utils/eventstreaming.py +10 -0
- cribl_control_plane/versions.py +11 -6
- {cribl_control_plane-0.0.44a1.dist-info → cribl_control_plane-0.0.44a2.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.44a2.dist-info/RECORD +325 -0
- cribl_control_plane-0.0.44a1.dist-info/RECORD +0 -325
- {cribl_control_plane-0.0.44a1.dist-info → cribl_control_plane-0.0.44a2.dist-info}/WHEEL +0 -0
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,14 +15,14 @@ class OutputConfluentCloudType(str, Enum):
|
|
|
12
15
|
CONFLUENT_CLOUD = "confluent_cloud"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputConfluentCloudMinimumTLSVersion(str, Enum):
|
|
18
|
+
class OutputConfluentCloudMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
TL_SV1 = "TLSv1"
|
|
17
20
|
TL_SV1_1 = "TLSv1.1"
|
|
18
21
|
TL_SV1_2 = "TLSv1.2"
|
|
19
22
|
TL_SV1_3 = "TLSv1.3"
|
|
20
23
|
|
|
21
24
|
|
|
22
|
-
class OutputConfluentCloudMaximumTLSVersion(str, Enum):
|
|
25
|
+
class OutputConfluentCloudMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
23
26
|
TL_SV1 = "TLSv1"
|
|
24
27
|
TL_SV1_1 = "TLSv1.1"
|
|
25
28
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -79,17 +82,23 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
|
|
|
79
82
|
r"""Passphrase to use to decrypt private key"""
|
|
80
83
|
|
|
81
84
|
min_version: Annotated[
|
|
82
|
-
|
|
85
|
+
Annotated[
|
|
86
|
+
Optional[OutputConfluentCloudMinimumTLSVersion],
|
|
87
|
+
PlainValidator(validate_open_enum(False)),
|
|
88
|
+
],
|
|
83
89
|
pydantic.Field(alias="minVersion"),
|
|
84
90
|
] = None
|
|
85
91
|
|
|
86
92
|
max_version: Annotated[
|
|
87
|
-
|
|
93
|
+
Annotated[
|
|
94
|
+
Optional[OutputConfluentCloudMaximumTLSVersion],
|
|
95
|
+
PlainValidator(validate_open_enum(False)),
|
|
96
|
+
],
|
|
88
97
|
pydantic.Field(alias="maxVersion"),
|
|
89
98
|
] = None
|
|
90
99
|
|
|
91
100
|
|
|
92
|
-
class OutputConfluentCloudAcknowledgments(int, Enum):
|
|
101
|
+
class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
93
102
|
r"""Control the number of required acknowledgments."""
|
|
94
103
|
|
|
95
104
|
ONE = 1
|
|
@@ -97,7 +106,7 @@ class OutputConfluentCloudAcknowledgments(int, Enum):
|
|
|
97
106
|
MINUS_1 = -1
|
|
98
107
|
|
|
99
108
|
|
|
100
|
-
class OutputConfluentCloudRecordDataFormat(str, Enum):
|
|
109
|
+
class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
101
110
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
102
111
|
|
|
103
112
|
JSON = "json"
|
|
@@ -105,7 +114,7 @@ class OutputConfluentCloudRecordDataFormat(str, Enum):
|
|
|
105
114
|
PROTOBUF = "protobuf"
|
|
106
115
|
|
|
107
116
|
|
|
108
|
-
class OutputConfluentCloudCompression(str, Enum):
|
|
117
|
+
class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
109
118
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
110
119
|
|
|
111
120
|
NONE = "none"
|
|
@@ -114,7 +123,7 @@ class OutputConfluentCloudCompression(str, Enum):
|
|
|
114
123
|
LZ4 = "lz4"
|
|
115
124
|
|
|
116
125
|
|
|
117
|
-
class OutputConfluentCloudSchemaType(str, Enum):
|
|
126
|
+
class OutputConfluentCloudSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
118
127
|
r"""The schema format used to encode and decode event data"""
|
|
119
128
|
|
|
120
129
|
AVRO = "avro"
|
|
@@ -140,14 +149,18 @@ class OutputConfluentCloudAuth(BaseModel):
|
|
|
140
149
|
r"""Select or create a secret that references your credentials"""
|
|
141
150
|
|
|
142
151
|
|
|
143
|
-
class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
|
|
152
|
+
class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
|
|
153
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
154
|
+
):
|
|
144
155
|
TL_SV1 = "TLSv1"
|
|
145
156
|
TL_SV1_1 = "TLSv1.1"
|
|
146
157
|
TL_SV1_2 = "TLSv1.2"
|
|
147
158
|
TL_SV1_3 = "TLSv1.3"
|
|
148
159
|
|
|
149
160
|
|
|
150
|
-
class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
|
|
161
|
+
class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
|
|
162
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
163
|
+
):
|
|
151
164
|
TL_SV1 = "TLSv1"
|
|
152
165
|
TL_SV1_1 = "TLSv1.1"
|
|
153
166
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -207,12 +220,18 @@ class OutputConfluentCloudKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
|
207
220
|
r"""Passphrase to use to decrypt private key"""
|
|
208
221
|
|
|
209
222
|
min_version: Annotated[
|
|
210
|
-
|
|
223
|
+
Annotated[
|
|
224
|
+
Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
|
|
225
|
+
PlainValidator(validate_open_enum(False)),
|
|
226
|
+
],
|
|
211
227
|
pydantic.Field(alias="minVersion"),
|
|
212
228
|
] = None
|
|
213
229
|
|
|
214
230
|
max_version: Annotated[
|
|
215
|
-
|
|
231
|
+
Annotated[
|
|
232
|
+
Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
|
|
233
|
+
PlainValidator(validate_open_enum(False)),
|
|
234
|
+
],
|
|
216
235
|
pydantic.Field(alias="maxVersion"),
|
|
217
236
|
] = None
|
|
218
237
|
|
|
@@ -249,7 +268,11 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
249
268
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
250
269
|
|
|
251
270
|
schema_type: Annotated[
|
|
252
|
-
|
|
271
|
+
Annotated[
|
|
272
|
+
Optional[OutputConfluentCloudSchemaType],
|
|
273
|
+
PlainValidator(validate_open_enum(False)),
|
|
274
|
+
],
|
|
275
|
+
pydantic.Field(alias="schemaType"),
|
|
253
276
|
] = OutputConfluentCloudSchemaType.AVRO
|
|
254
277
|
r"""The schema format used to encode and decode event data"""
|
|
255
278
|
|
|
@@ -282,7 +305,7 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
282
305
|
r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
|
|
283
306
|
|
|
284
307
|
|
|
285
|
-
class OutputConfluentCloudSASLMechanism(str, Enum):
|
|
308
|
+
class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
286
309
|
PLAIN = "plain"
|
|
287
310
|
SCRAM_SHA_256 = "scram-sha-256"
|
|
288
311
|
SCRAM_SHA_512 = "scram-sha-512"
|
|
@@ -303,9 +326,10 @@ class OutputConfluentCloudAuthentication(BaseModel):
|
|
|
303
326
|
|
|
304
327
|
disabled: Optional[bool] = True
|
|
305
328
|
|
|
306
|
-
mechanism:
|
|
307
|
-
OutputConfluentCloudSASLMechanism
|
|
308
|
-
|
|
329
|
+
mechanism: Annotated[
|
|
330
|
+
Optional[OutputConfluentCloudSASLMechanism],
|
|
331
|
+
PlainValidator(validate_open_enum(False)),
|
|
332
|
+
] = OutputConfluentCloudSASLMechanism.PLAIN
|
|
309
333
|
|
|
310
334
|
oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
|
|
311
335
|
False
|
|
@@ -313,7 +337,7 @@ class OutputConfluentCloudAuthentication(BaseModel):
|
|
|
313
337
|
r"""Enable OAuth authentication"""
|
|
314
338
|
|
|
315
339
|
|
|
316
|
-
class OutputConfluentCloudBackpressureBehavior(str, Enum):
|
|
340
|
+
class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
317
341
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
318
342
|
|
|
319
343
|
BLOCK = "block"
|
|
@@ -321,21 +345,23 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum):
|
|
|
321
345
|
QUEUE = "queue"
|
|
322
346
|
|
|
323
347
|
|
|
324
|
-
class OutputConfluentCloudPqCompressCompression(
|
|
348
|
+
class OutputConfluentCloudPqCompressCompression(
|
|
349
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
350
|
+
):
|
|
325
351
|
r"""Codec to use to compress the persisted data"""
|
|
326
352
|
|
|
327
353
|
NONE = "none"
|
|
328
354
|
GZIP = "gzip"
|
|
329
355
|
|
|
330
356
|
|
|
331
|
-
class OutputConfluentCloudQueueFullBehavior(str, Enum):
|
|
357
|
+
class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
332
358
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
333
359
|
|
|
334
360
|
BLOCK = "block"
|
|
335
361
|
DROP = "drop"
|
|
336
362
|
|
|
337
363
|
|
|
338
|
-
class OutputConfluentCloudMode(str, Enum):
|
|
364
|
+
class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
339
365
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
340
366
|
|
|
341
367
|
ERROR = "error"
|
|
@@ -449,19 +475,25 @@ class OutputConfluentCloud(BaseModel):
|
|
|
449
475
|
|
|
450
476
|
tls: Optional[OutputConfluentCloudTLSSettingsClientSide] = None
|
|
451
477
|
|
|
452
|
-
ack:
|
|
453
|
-
OutputConfluentCloudAcknowledgments
|
|
454
|
-
|
|
478
|
+
ack: Annotated[
|
|
479
|
+
Optional[OutputConfluentCloudAcknowledgments],
|
|
480
|
+
PlainValidator(validate_open_enum(True)),
|
|
481
|
+
] = OutputConfluentCloudAcknowledgments.ONE
|
|
455
482
|
r"""Control the number of required acknowledgments."""
|
|
456
483
|
|
|
457
484
|
format_: Annotated[
|
|
458
|
-
|
|
485
|
+
Annotated[
|
|
486
|
+
Optional[OutputConfluentCloudRecordDataFormat],
|
|
487
|
+
PlainValidator(validate_open_enum(False)),
|
|
488
|
+
],
|
|
489
|
+
pydantic.Field(alias="format"),
|
|
459
490
|
] = OutputConfluentCloudRecordDataFormat.JSON
|
|
460
491
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
461
492
|
|
|
462
|
-
compression:
|
|
463
|
-
OutputConfluentCloudCompression
|
|
464
|
-
|
|
493
|
+
compression: Annotated[
|
|
494
|
+
Optional[OutputConfluentCloudCompression],
|
|
495
|
+
PlainValidator(validate_open_enum(False)),
|
|
496
|
+
] = OutputConfluentCloudCompression.GZIP
|
|
465
497
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
466
498
|
|
|
467
499
|
max_record_size_kb: Annotated[
|
|
@@ -522,7 +554,10 @@ class OutputConfluentCloud(BaseModel):
|
|
|
522
554
|
r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
|
|
523
555
|
|
|
524
556
|
on_backpressure: Annotated[
|
|
525
|
-
|
|
557
|
+
Annotated[
|
|
558
|
+
Optional[OutputConfluentCloudBackpressureBehavior],
|
|
559
|
+
PlainValidator(validate_open_enum(False)),
|
|
560
|
+
],
|
|
526
561
|
pydantic.Field(alias="onBackpressure"),
|
|
527
562
|
] = OutputConfluentCloudBackpressureBehavior.BLOCK
|
|
528
563
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -548,19 +583,29 @@ class OutputConfluentCloud(BaseModel):
|
|
|
548
583
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
549
584
|
|
|
550
585
|
pq_compress: Annotated[
|
|
551
|
-
|
|
586
|
+
Annotated[
|
|
587
|
+
Optional[OutputConfluentCloudPqCompressCompression],
|
|
588
|
+
PlainValidator(validate_open_enum(False)),
|
|
589
|
+
],
|
|
552
590
|
pydantic.Field(alias="pqCompress"),
|
|
553
591
|
] = OutputConfluentCloudPqCompressCompression.NONE
|
|
554
592
|
r"""Codec to use to compress the persisted data"""
|
|
555
593
|
|
|
556
594
|
pq_on_backpressure: Annotated[
|
|
557
|
-
|
|
595
|
+
Annotated[
|
|
596
|
+
Optional[OutputConfluentCloudQueueFullBehavior],
|
|
597
|
+
PlainValidator(validate_open_enum(False)),
|
|
598
|
+
],
|
|
558
599
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
559
600
|
] = OutputConfluentCloudQueueFullBehavior.BLOCK
|
|
560
601
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
561
602
|
|
|
562
603
|
pq_mode: Annotated[
|
|
563
|
-
|
|
604
|
+
Annotated[
|
|
605
|
+
Optional[OutputConfluentCloudMode],
|
|
606
|
+
PlainValidator(validate_open_enum(False)),
|
|
607
|
+
],
|
|
608
|
+
pydantic.Field(alias="pqMode"),
|
|
564
609
|
] = OutputConfluentCloudMode.ERROR
|
|
565
610
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
566
611
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,14 +15,14 @@ class OutputCriblHTTPType(str, Enum):
|
|
|
12
15
|
CRIBL_HTTP = "cribl_http"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputCriblHTTPMinimumTLSVersion(str, Enum):
|
|
18
|
+
class OutputCriblHTTPMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
TL_SV1 = "TLSv1"
|
|
17
20
|
TL_SV1_1 = "TLSv1.1"
|
|
18
21
|
TL_SV1_2 = "TLSv1.2"
|
|
19
22
|
TL_SV1_3 = "TLSv1.3"
|
|
20
23
|
|
|
21
24
|
|
|
22
|
-
class OutputCriblHTTPMaximumTLSVersion(str, Enum):
|
|
25
|
+
class OutputCriblHTTPMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
23
26
|
TL_SV1 = "TLSv1"
|
|
24
27
|
TL_SV1_1 = "TLSv1.1"
|
|
25
28
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -79,15 +82,23 @@ class OutputCriblHTTPTLSSettingsClientSide(BaseModel):
|
|
|
79
82
|
r"""Passphrase to use to decrypt private key"""
|
|
80
83
|
|
|
81
84
|
min_version: Annotated[
|
|
82
|
-
|
|
85
|
+
Annotated[
|
|
86
|
+
Optional[OutputCriblHTTPMinimumTLSVersion],
|
|
87
|
+
PlainValidator(validate_open_enum(False)),
|
|
88
|
+
],
|
|
89
|
+
pydantic.Field(alias="minVersion"),
|
|
83
90
|
] = None
|
|
84
91
|
|
|
85
92
|
max_version: Annotated[
|
|
86
|
-
|
|
93
|
+
Annotated[
|
|
94
|
+
Optional[OutputCriblHTTPMaximumTLSVersion],
|
|
95
|
+
PlainValidator(validate_open_enum(False)),
|
|
96
|
+
],
|
|
97
|
+
pydantic.Field(alias="maxVersion"),
|
|
87
98
|
] = None
|
|
88
99
|
|
|
89
100
|
|
|
90
|
-
class OutputCriblHTTPCompression(str, Enum):
|
|
101
|
+
class OutputCriblHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
91
102
|
r"""Codec to use to compress the data before sending"""
|
|
92
103
|
|
|
93
104
|
NONE = "none"
|
|
@@ -105,7 +116,7 @@ class OutputCriblHTTPExtraHTTPHeader(BaseModel):
|
|
|
105
116
|
name: Optional[str] = None
|
|
106
117
|
|
|
107
118
|
|
|
108
|
-
class OutputCriblHTTPFailedRequestLoggingMode(str, Enum):
|
|
119
|
+
class OutputCriblHTTPFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
109
120
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
110
121
|
|
|
111
122
|
PAYLOAD = "payload"
|
|
@@ -152,7 +163,7 @@ class OutputCriblHTTPTimeoutRetrySettingsTypedDict(TypedDict):
|
|
|
152
163
|
|
|
153
164
|
class OutputCriblHTTPTimeoutRetrySettings(BaseModel):
|
|
154
165
|
timeout_retry: Annotated[Optional[bool], pydantic.Field(alias="timeoutRetry")] = (
|
|
155
|
-
|
|
166
|
+
True
|
|
156
167
|
)
|
|
157
168
|
|
|
158
169
|
initial_backoff: Annotated[
|
|
@@ -167,7 +178,7 @@ class OutputCriblHTTPTimeoutRetrySettings(BaseModel):
|
|
|
167
178
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
168
179
|
|
|
169
180
|
|
|
170
|
-
class OutputCriblHTTPBackpressureBehavior(str, Enum):
|
|
181
|
+
class OutputCriblHTTPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
171
182
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
172
183
|
|
|
173
184
|
BLOCK = "block"
|
|
@@ -190,21 +201,21 @@ class OutputCriblHTTPURL(BaseModel):
|
|
|
190
201
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
191
202
|
|
|
192
203
|
|
|
193
|
-
class OutputCriblHTTPPqCompressCompression(str, Enum):
|
|
204
|
+
class OutputCriblHTTPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
194
205
|
r"""Codec to use to compress the persisted data"""
|
|
195
206
|
|
|
196
207
|
NONE = "none"
|
|
197
208
|
GZIP = "gzip"
|
|
198
209
|
|
|
199
210
|
|
|
200
|
-
class OutputCriblHTTPQueueFullBehavior(str, Enum):
|
|
211
|
+
class OutputCriblHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
201
212
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
202
213
|
|
|
203
214
|
BLOCK = "block"
|
|
204
215
|
DROP = "drop"
|
|
205
216
|
|
|
206
217
|
|
|
207
|
-
class OutputCriblHTTPMode(str, Enum):
|
|
218
|
+
class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
208
219
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
209
220
|
|
|
210
221
|
ERROR = "error"
|
|
@@ -335,7 +346,9 @@ class OutputCriblHTTP(BaseModel):
|
|
|
335
346
|
] = None
|
|
336
347
|
r"""Fields to exclude from the event. By default, all internal fields except `__output` are sent. Example: `cribl_pipe`, `c*`. Wildcards supported."""
|
|
337
348
|
|
|
338
|
-
compression:
|
|
349
|
+
compression: Annotated[
|
|
350
|
+
Optional[OutputCriblHTTPCompression], PlainValidator(validate_open_enum(False))
|
|
351
|
+
] = OutputCriblHTTPCompression.GZIP
|
|
339
352
|
r"""Codec to use to compress the data before sending"""
|
|
340
353
|
|
|
341
354
|
concurrency: Optional[float] = 5
|
|
@@ -374,7 +387,10 @@ class OutputCriblHTTP(BaseModel):
|
|
|
374
387
|
r"""Headers to add to all events"""
|
|
375
388
|
|
|
376
389
|
failed_request_logging_mode: Annotated[
|
|
377
|
-
|
|
390
|
+
Annotated[
|
|
391
|
+
Optional[OutputCriblHTTPFailedRequestLoggingMode],
|
|
392
|
+
PlainValidator(validate_open_enum(False)),
|
|
393
|
+
],
|
|
378
394
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
379
395
|
] = OutputCriblHTTPFailedRequestLoggingMode.NONE
|
|
380
396
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -397,11 +413,14 @@ class OutputCriblHTTP(BaseModel):
|
|
|
397
413
|
|
|
398
414
|
response_honor_retry_after_header: Annotated[
|
|
399
415
|
Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
|
|
400
|
-
] =
|
|
416
|
+
] = True
|
|
401
417
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
402
418
|
|
|
403
419
|
on_backpressure: Annotated[
|
|
404
|
-
|
|
420
|
+
Annotated[
|
|
421
|
+
Optional[OutputCriblHTTPBackpressureBehavior],
|
|
422
|
+
PlainValidator(validate_open_enum(False)),
|
|
423
|
+
],
|
|
405
424
|
pydantic.Field(alias="onBackpressure"),
|
|
406
425
|
] = OutputCriblHTTPBackpressureBehavior.BLOCK
|
|
407
426
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -445,19 +464,28 @@ class OutputCriblHTTP(BaseModel):
|
|
|
445
464
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
446
465
|
|
|
447
466
|
pq_compress: Annotated[
|
|
448
|
-
|
|
467
|
+
Annotated[
|
|
468
|
+
Optional[OutputCriblHTTPPqCompressCompression],
|
|
469
|
+
PlainValidator(validate_open_enum(False)),
|
|
470
|
+
],
|
|
449
471
|
pydantic.Field(alias="pqCompress"),
|
|
450
472
|
] = OutputCriblHTTPPqCompressCompression.NONE
|
|
451
473
|
r"""Codec to use to compress the persisted data"""
|
|
452
474
|
|
|
453
475
|
pq_on_backpressure: Annotated[
|
|
454
|
-
|
|
476
|
+
Annotated[
|
|
477
|
+
Optional[OutputCriblHTTPQueueFullBehavior],
|
|
478
|
+
PlainValidator(validate_open_enum(False)),
|
|
479
|
+
],
|
|
455
480
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
456
481
|
] = OutputCriblHTTPQueueFullBehavior.BLOCK
|
|
457
482
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
458
483
|
|
|
459
484
|
pq_mode: Annotated[
|
|
460
|
-
|
|
485
|
+
Annotated[
|
|
486
|
+
Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
|
|
487
|
+
],
|
|
488
|
+
pydantic.Field(alias="pqMode"),
|
|
461
489
|
] = OutputCriblHTTPMode.ERROR
|
|
462
490
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
463
491
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,14 +15,14 @@ class OutputCriblLakeType(str, Enum):
|
|
|
12
15
|
CRIBL_LAKE = "cribl_lake"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputCriblLakeSignatureVersion(str, Enum):
|
|
18
|
+
class OutputCriblLakeSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Signature version to use for signing S3 requests"""
|
|
17
20
|
|
|
18
21
|
V2 = "v2"
|
|
19
22
|
V4 = "v4"
|
|
20
23
|
|
|
21
24
|
|
|
22
|
-
class OutputCriblLakeObjectACL(str, Enum):
|
|
25
|
+
class OutputCriblLakeObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
23
26
|
r"""Object ACL to assign to uploaded objects"""
|
|
24
27
|
|
|
25
28
|
PRIVATE = "private"
|
|
@@ -31,7 +34,7 @@ class OutputCriblLakeObjectACL(str, Enum):
|
|
|
31
34
|
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
|
|
32
35
|
|
|
33
36
|
|
|
34
|
-
class OutputCriblLakeStorageClass(str, Enum):
|
|
37
|
+
class OutputCriblLakeStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
35
38
|
r"""Storage class to select for uploaded objects"""
|
|
36
39
|
|
|
37
40
|
STANDARD = "STANDARD"
|
|
@@ -44,32 +47,34 @@ class OutputCriblLakeStorageClass(str, Enum):
|
|
|
44
47
|
DEEP_ARCHIVE = "DEEP_ARCHIVE"
|
|
45
48
|
|
|
46
49
|
|
|
47
|
-
class OutputCriblLakeServerSideEncryptionForUploadedObjects(
|
|
50
|
+
class OutputCriblLakeServerSideEncryptionForUploadedObjects(
|
|
51
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
52
|
+
):
|
|
48
53
|
AES256 = "AES256"
|
|
49
54
|
AWS_KMS = "aws:kms"
|
|
50
55
|
|
|
51
56
|
|
|
52
|
-
class OutputCriblLakeBackpressureBehavior(str, Enum):
|
|
57
|
+
class OutputCriblLakeBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
53
58
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
54
59
|
|
|
55
60
|
BLOCK = "block"
|
|
56
61
|
DROP = "drop"
|
|
57
62
|
|
|
58
63
|
|
|
59
|
-
class OutputCriblLakeDiskSpaceProtection(str, Enum):
|
|
64
|
+
class OutputCriblLakeDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
60
65
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
61
66
|
|
|
62
67
|
BLOCK = "block"
|
|
63
68
|
DROP = "drop"
|
|
64
69
|
|
|
65
70
|
|
|
66
|
-
class AwsAuthenticationMethod(str, Enum):
|
|
71
|
+
class AwsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
67
72
|
AUTO = "auto"
|
|
68
73
|
AUTO_RPC = "auto_rpc"
|
|
69
74
|
MANUAL = "manual"
|
|
70
75
|
|
|
71
76
|
|
|
72
|
-
class OutputCriblLakeFormat(str, Enum):
|
|
77
|
+
class OutputCriblLakeFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
73
78
|
JSON = "json"
|
|
74
79
|
PARQUET = "parquet"
|
|
75
80
|
DDSS = "ddss"
|
|
@@ -200,7 +205,10 @@ class OutputCriblLake(BaseModel):
|
|
|
200
205
|
r"""S3 service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to S3-compatible endpoint."""
|
|
201
206
|
|
|
202
207
|
signature_version: Annotated[
|
|
203
|
-
|
|
208
|
+
Annotated[
|
|
209
|
+
Optional[OutputCriblLakeSignatureVersion],
|
|
210
|
+
PlainValidator(validate_open_enum(False)),
|
|
211
|
+
],
|
|
204
212
|
pydantic.Field(alias="signatureVersion"),
|
|
205
213
|
] = OutputCriblLakeSignatureVersion.V4
|
|
206
214
|
r"""Signature version to use for signing S3 requests"""
|
|
@@ -249,17 +257,28 @@ class OutputCriblLake(BaseModel):
|
|
|
249
257
|
r"""Lake dataset to send the data to."""
|
|
250
258
|
|
|
251
259
|
object_acl: Annotated[
|
|
252
|
-
|
|
260
|
+
Annotated[
|
|
261
|
+
Optional[OutputCriblLakeObjectACL],
|
|
262
|
+
PlainValidator(validate_open_enum(False)),
|
|
263
|
+
],
|
|
264
|
+
pydantic.Field(alias="objectACL"),
|
|
253
265
|
] = OutputCriblLakeObjectACL.PRIVATE
|
|
254
266
|
r"""Object ACL to assign to uploaded objects"""
|
|
255
267
|
|
|
256
268
|
storage_class: Annotated[
|
|
257
|
-
|
|
269
|
+
Annotated[
|
|
270
|
+
Optional[OutputCriblLakeStorageClass],
|
|
271
|
+
PlainValidator(validate_open_enum(False)),
|
|
272
|
+
],
|
|
273
|
+
pydantic.Field(alias="storageClass"),
|
|
258
274
|
] = None
|
|
259
275
|
r"""Storage class to select for uploaded objects"""
|
|
260
276
|
|
|
261
277
|
server_side_encryption: Annotated[
|
|
262
|
-
|
|
278
|
+
Annotated[
|
|
279
|
+
Optional[OutputCriblLakeServerSideEncryptionForUploadedObjects],
|
|
280
|
+
PlainValidator(validate_open_enum(False)),
|
|
281
|
+
],
|
|
263
282
|
pydantic.Field(alias="serverSideEncryption"),
|
|
264
283
|
] = None
|
|
265
284
|
|
|
@@ -300,7 +319,10 @@ class OutputCriblLake(BaseModel):
|
|
|
300
319
|
r"""Buffer size used to write to a file"""
|
|
301
320
|
|
|
302
321
|
on_backpressure: Annotated[
|
|
303
|
-
|
|
322
|
+
Annotated[
|
|
323
|
+
Optional[OutputCriblLakeBackpressureBehavior],
|
|
324
|
+
PlainValidator(validate_open_enum(False)),
|
|
325
|
+
],
|
|
304
326
|
pydantic.Field(alias="onBackpressure"),
|
|
305
327
|
] = OutputCriblLakeBackpressureBehavior.BLOCK
|
|
306
328
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -311,7 +333,10 @@ class OutputCriblLake(BaseModel):
|
|
|
311
333
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
312
334
|
|
|
313
335
|
on_disk_full_backpressure: Annotated[
|
|
314
|
-
|
|
336
|
+
Annotated[
|
|
337
|
+
Optional[OutputCriblLakeDiskSpaceProtection],
|
|
338
|
+
PlainValidator(validate_open_enum(False)),
|
|
339
|
+
],
|
|
315
340
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
316
341
|
] = OutputCriblLakeDiskSpaceProtection.BLOCK
|
|
317
342
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -337,12 +362,17 @@ class OutputCriblLake(BaseModel):
|
|
|
337
362
|
r"""Maximum number of files that can be waiting for upload before backpressure is applied"""
|
|
338
363
|
|
|
339
364
|
aws_authentication_method: Annotated[
|
|
340
|
-
|
|
365
|
+
Annotated[
|
|
366
|
+
Optional[AwsAuthenticationMethod], PlainValidator(validate_open_enum(False))
|
|
367
|
+
],
|
|
341
368
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
342
369
|
] = AwsAuthenticationMethod.AUTO
|
|
343
370
|
|
|
344
371
|
format_: Annotated[
|
|
345
|
-
|
|
372
|
+
Annotated[
|
|
373
|
+
Optional[OutputCriblLakeFormat], PlainValidator(validate_open_enum(False))
|
|
374
|
+
],
|
|
375
|
+
pydantic.Field(alias="format"),
|
|
346
376
|
] = None
|
|
347
377
|
|
|
348
378
|
max_concurrent_file_parts: Annotated[
|