cribl-control-plane 0.0.50rc1__py3-none-any.whl → 0.0.51__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +6 -4
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/health.py +2 -6
- cribl_control_plane/httpclient.py +0 -1
- cribl_control_plane/models/__init__.py +4 -21
- cribl_control_plane/models/appmode.py +1 -2
- cribl_control_plane/models/cacheconnection.py +2 -10
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
- cribl_control_plane/models/cloudprovider.py +1 -2
- cribl_control_plane/models/configgroup.py +2 -7
- cribl_control_plane/models/configgroupcloud.py +2 -6
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
- cribl_control_plane/models/createinputhectokenbyidop.py +5 -6
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +2 -8
- cribl_control_plane/models/datasetmetadata.py +2 -8
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
- cribl_control_plane/models/getsummaryop.py +2 -7
- cribl_control_plane/models/getversionshowop.py +5 -6
- cribl_control_plane/models/gitinfo.py +3 -14
- cribl_control_plane/models/hbcriblinfo.py +1 -11
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +14 -34
- cribl_control_plane/models/inputazureblob.py +6 -17
- cribl_control_plane/models/inputcollection.py +4 -11
- cribl_control_plane/models/inputconfluentcloud.py +20 -47
- cribl_control_plane/models/inputcribl.py +4 -11
- cribl_control_plane/models/inputcriblhttp.py +8 -23
- cribl_control_plane/models/inputcribllakehttp.py +10 -22
- cribl_control_plane/models/inputcriblmetrics.py +4 -12
- cribl_control_plane/models/inputcribltcp.py +8 -23
- cribl_control_plane/models/inputcrowdstrike.py +10 -26
- cribl_control_plane/models/inputdatadogagent.py +8 -24
- cribl_control_plane/models/inputdatagen.py +4 -11
- cribl_control_plane/models/inputedgeprometheus.py +24 -58
- cribl_control_plane/models/inputelastic.py +14 -40
- cribl_control_plane/models/inputeventhub.py +6 -15
- cribl_control_plane/models/inputexec.py +6 -14
- cribl_control_plane/models/inputfile.py +6 -15
- cribl_control_plane/models/inputfirehose.py +8 -23
- cribl_control_plane/models/inputgooglepubsub.py +6 -19
- cribl_control_plane/models/inputgrafana.py +24 -67
- cribl_control_plane/models/inputhttp.py +8 -23
- cribl_control_plane/models/inputhttpraw.py +8 -23
- cribl_control_plane/models/inputjournalfiles.py +4 -12
- cribl_control_plane/models/inputkafka.py +16 -46
- cribl_control_plane/models/inputkinesis.py +14 -38
- cribl_control_plane/models/inputkubeevents.py +4 -11
- cribl_control_plane/models/inputkubelogs.py +8 -16
- cribl_control_plane/models/inputkubemetrics.py +8 -16
- cribl_control_plane/models/inputloki.py +10 -29
- cribl_control_plane/models/inputmetrics.py +8 -23
- cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
- cribl_control_plane/models/inputmsk.py +18 -53
- cribl_control_plane/models/inputnetflow.py +4 -11
- cribl_control_plane/models/inputoffice365mgmt.py +14 -33
- cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
- cribl_control_plane/models/inputoffice365service.py +16 -35
- cribl_control_plane/models/inputopentelemetry.py +16 -38
- cribl_control_plane/models/inputprometheus.py +18 -50
- cribl_control_plane/models/inputprometheusrw.py +10 -30
- cribl_control_plane/models/inputrawudp.py +4 -11
- cribl_control_plane/models/inputs3.py +8 -21
- cribl_control_plane/models/inputs3inventory.py +10 -26
- cribl_control_plane/models/inputsecuritylake.py +10 -27
- cribl_control_plane/models/inputsnmp.py +6 -16
- cribl_control_plane/models/inputsplunk.py +12 -33
- cribl_control_plane/models/inputsplunkhec.py +10 -29
- cribl_control_plane/models/inputsplunksearch.py +14 -33
- cribl_control_plane/models/inputsqs.py +10 -27
- cribl_control_plane/models/inputsyslog.py +16 -43
- cribl_control_plane/models/inputsystemmetrics.py +24 -48
- cribl_control_plane/models/inputsystemstate.py +8 -16
- cribl_control_plane/models/inputtcp.py +10 -29
- cribl_control_plane/models/inputtcpjson.py +10 -29
- cribl_control_plane/models/inputwef.py +14 -37
- cribl_control_plane/models/inputwindowsmetrics.py +24 -44
- cribl_control_plane/models/inputwineventlogs.py +10 -20
- cribl_control_plane/models/inputwiz.py +8 -21
- cribl_control_plane/models/inputwizwebhook.py +8 -23
- cribl_control_plane/models/inputzscalerhec.py +10 -29
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
- cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
- cribl_control_plane/models/masterworkerentry.py +2 -7
- cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
- cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeprovidedinfo.py +1 -4
- cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeupgradestate.py +1 -2
- cribl_control_plane/models/nodeupgradestatus.py +5 -13
- cribl_control_plane/models/outputazureblob.py +18 -48
- cribl_control_plane/models/outputazuredataexplorer.py +28 -73
- cribl_control_plane/models/outputazureeventhub.py +18 -40
- cribl_control_plane/models/outputazurelogs.py +12 -35
- cribl_control_plane/models/outputclickhouse.py +20 -55
- cribl_control_plane/models/outputcloudwatch.py +10 -29
- cribl_control_plane/models/outputconfluentcloud.py +32 -77
- cribl_control_plane/models/outputcriblhttp.py +16 -44
- cribl_control_plane/models/outputcribllake.py +16 -46
- cribl_control_plane/models/outputcribltcp.py +18 -45
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
- cribl_control_plane/models/outputdatadog.py +20 -48
- cribl_control_plane/models/outputdataset.py +18 -46
- cribl_control_plane/models/outputdiskspool.py +2 -7
- cribl_control_plane/models/outputdls3.py +24 -68
- cribl_control_plane/models/outputdynatracehttp.py +20 -53
- cribl_control_plane/models/outputdynatraceotlp.py +22 -55
- cribl_control_plane/models/outputelastic.py +18 -43
- cribl_control_plane/models/outputelasticcloud.py +12 -36
- cribl_control_plane/models/outputexabeam.py +10 -29
- cribl_control_plane/models/outputfilesystem.py +14 -39
- cribl_control_plane/models/outputgooglechronicle.py +16 -50
- cribl_control_plane/models/outputgooglecloudlogging.py +18 -50
- cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
- cribl_control_plane/models/outputgooglepubsub.py +10 -31
- cribl_control_plane/models/outputgrafanacloud.py +32 -97
- cribl_control_plane/models/outputgraphite.py +14 -31
- cribl_control_plane/models/outputhoneycomb.py +12 -35
- cribl_control_plane/models/outputhumiohec.py +16 -43
- cribl_control_plane/models/outputinfluxdb.py +16 -42
- cribl_control_plane/models/outputkafka.py +28 -74
- cribl_control_plane/models/outputkinesis.py +16 -40
- cribl_control_plane/models/outputloki.py +16 -41
- cribl_control_plane/models/outputminio.py +24 -65
- cribl_control_plane/models/outputmsk.py +30 -82
- cribl_control_plane/models/outputnewrelic.py +18 -43
- cribl_control_plane/models/outputnewrelicevents.py +14 -41
- cribl_control_plane/models/outputopentelemetry.py +26 -67
- cribl_control_plane/models/outputprometheus.py +12 -35
- cribl_control_plane/models/outputring.py +8 -19
- cribl_control_plane/models/outputs3.py +26 -68
- cribl_control_plane/models/outputsecuritylake.py +18 -52
- cribl_control_plane/models/outputsentinel.py +18 -45
- cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
- cribl_control_plane/models/outputservicenow.py +24 -60
- cribl_control_plane/models/outputsignalfx.py +14 -37
- cribl_control_plane/models/outputsns.py +14 -36
- cribl_control_plane/models/outputsplunk.py +24 -60
- cribl_control_plane/models/outputsplunkhec.py +12 -35
- cribl_control_plane/models/outputsplunklb.py +30 -77
- cribl_control_plane/models/outputsqs.py +16 -41
- cribl_control_plane/models/outputstatsd.py +14 -30
- cribl_control_plane/models/outputstatsdext.py +12 -29
- cribl_control_plane/models/outputsumologic.py +12 -35
- cribl_control_plane/models/outputsyslog.py +24 -58
- cribl_control_plane/models/outputtcpjson.py +20 -52
- cribl_control_plane/models/outputwavefront.py +12 -35
- cribl_control_plane/models/outputwebhook.py +22 -58
- cribl_control_plane/models/outputxsiam.py +14 -35
- cribl_control_plane/models/packinfo.py +0 -3
- cribl_control_plane/models/packinstallinfo.py +0 -3
- cribl_control_plane/models/productscore.py +1 -2
- cribl_control_plane/models/rbacresource.py +1 -2
- cribl_control_plane/models/resourcepolicy.py +2 -4
- cribl_control_plane/models/runnablejobcollection.py +13 -30
- cribl_control_plane/models/runnablejobexecutor.py +4 -13
- cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -6
- cribl_control_plane/models/workertypes.py +1 -2
- {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/METADATA +14 -5
- cribl_control_plane-0.0.51.dist-info/RECORD +325 -0
- cribl_control_plane/models/error.py +0 -16
- cribl_control_plane/models/gethealthinfoop.py +0 -17
- cribl_control_plane/models/gitshowresult.py +0 -19
- cribl_control_plane-0.0.50rc1.dist-info/RECORD +0 -328
- {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,14 +12,14 @@ class OutputConfluentCloudType(str, Enum):
|
|
|
15
12
|
CONFLUENT_CLOUD = "confluent_cloud"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputConfluentCloudMinimumTLSVersion(str, Enum
|
|
15
|
+
class OutputConfluentCloudMinimumTLSVersion(str, Enum):
|
|
19
16
|
TL_SV1 = "TLSv1"
|
|
20
17
|
TL_SV1_1 = "TLSv1.1"
|
|
21
18
|
TL_SV1_2 = "TLSv1.2"
|
|
22
19
|
TL_SV1_3 = "TLSv1.3"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputConfluentCloudMaximumTLSVersion(str, Enum
|
|
22
|
+
class OutputConfluentCloudMaximumTLSVersion(str, Enum):
|
|
26
23
|
TL_SV1 = "TLSv1"
|
|
27
24
|
TL_SV1_1 = "TLSv1.1"
|
|
28
25
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -82,23 +79,17 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
|
|
|
82
79
|
r"""Passphrase to use to decrypt private key"""
|
|
83
80
|
|
|
84
81
|
min_version: Annotated[
|
|
85
|
-
|
|
86
|
-
Optional[OutputConfluentCloudMinimumTLSVersion],
|
|
87
|
-
PlainValidator(validate_open_enum(False)),
|
|
88
|
-
],
|
|
82
|
+
Optional[OutputConfluentCloudMinimumTLSVersion],
|
|
89
83
|
pydantic.Field(alias="minVersion"),
|
|
90
84
|
] = None
|
|
91
85
|
|
|
92
86
|
max_version: Annotated[
|
|
93
|
-
|
|
94
|
-
Optional[OutputConfluentCloudMaximumTLSVersion],
|
|
95
|
-
PlainValidator(validate_open_enum(False)),
|
|
96
|
-
],
|
|
87
|
+
Optional[OutputConfluentCloudMaximumTLSVersion],
|
|
97
88
|
pydantic.Field(alias="maxVersion"),
|
|
98
89
|
] = None
|
|
99
90
|
|
|
100
91
|
|
|
101
|
-
class OutputConfluentCloudAcknowledgments(int, Enum
|
|
92
|
+
class OutputConfluentCloudAcknowledgments(int, Enum):
|
|
102
93
|
r"""Control the number of required acknowledgments."""
|
|
103
94
|
|
|
104
95
|
ONE = 1
|
|
@@ -106,7 +97,7 @@ class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMet
|
|
|
106
97
|
MINUS_1 = -1
|
|
107
98
|
|
|
108
99
|
|
|
109
|
-
class OutputConfluentCloudRecordDataFormat(str, Enum
|
|
100
|
+
class OutputConfluentCloudRecordDataFormat(str, Enum):
|
|
110
101
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
111
102
|
|
|
112
103
|
JSON = "json"
|
|
@@ -114,7 +105,7 @@ class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMe
|
|
|
114
105
|
PROTOBUF = "protobuf"
|
|
115
106
|
|
|
116
107
|
|
|
117
|
-
class OutputConfluentCloudCompression(str, Enum
|
|
108
|
+
class OutputConfluentCloudCompression(str, Enum):
|
|
118
109
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
119
110
|
|
|
120
111
|
NONE = "none"
|
|
@@ -123,7 +114,7 @@ class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
123
114
|
LZ4 = "lz4"
|
|
124
115
|
|
|
125
116
|
|
|
126
|
-
class OutputConfluentCloudSchemaType(str, Enum
|
|
117
|
+
class OutputConfluentCloudSchemaType(str, Enum):
|
|
127
118
|
r"""The schema format used to encode and decode event data"""
|
|
128
119
|
|
|
129
120
|
AVRO = "avro"
|
|
@@ -149,18 +140,14 @@ class OutputConfluentCloudAuth(BaseModel):
|
|
|
149
140
|
r"""Select or create a secret that references your credentials"""
|
|
150
141
|
|
|
151
142
|
|
|
152
|
-
class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
|
|
153
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
154
|
-
):
|
|
143
|
+
class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
|
|
155
144
|
TL_SV1 = "TLSv1"
|
|
156
145
|
TL_SV1_1 = "TLSv1.1"
|
|
157
146
|
TL_SV1_2 = "TLSv1.2"
|
|
158
147
|
TL_SV1_3 = "TLSv1.3"
|
|
159
148
|
|
|
160
149
|
|
|
161
|
-
class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
|
|
162
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
163
|
-
):
|
|
150
|
+
class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
|
|
164
151
|
TL_SV1 = "TLSv1"
|
|
165
152
|
TL_SV1_1 = "TLSv1.1"
|
|
166
153
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -220,18 +207,12 @@ class OutputConfluentCloudKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
|
220
207
|
r"""Passphrase to use to decrypt private key"""
|
|
221
208
|
|
|
222
209
|
min_version: Annotated[
|
|
223
|
-
|
|
224
|
-
Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
|
|
225
|
-
PlainValidator(validate_open_enum(False)),
|
|
226
|
-
],
|
|
210
|
+
Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
|
|
227
211
|
pydantic.Field(alias="minVersion"),
|
|
228
212
|
] = None
|
|
229
213
|
|
|
230
214
|
max_version: Annotated[
|
|
231
|
-
|
|
232
|
-
Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
|
|
233
|
-
PlainValidator(validate_open_enum(False)),
|
|
234
|
-
],
|
|
215
|
+
Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
|
|
235
216
|
pydantic.Field(alias="maxVersion"),
|
|
236
217
|
] = None
|
|
237
218
|
|
|
@@ -268,11 +249,7 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
268
249
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
269
250
|
|
|
270
251
|
schema_type: Annotated[
|
|
271
|
-
|
|
272
|
-
Optional[OutputConfluentCloudSchemaType],
|
|
273
|
-
PlainValidator(validate_open_enum(False)),
|
|
274
|
-
],
|
|
275
|
-
pydantic.Field(alias="schemaType"),
|
|
252
|
+
Optional[OutputConfluentCloudSchemaType], pydantic.Field(alias="schemaType")
|
|
276
253
|
] = OutputConfluentCloudSchemaType.AVRO
|
|
277
254
|
r"""The schema format used to encode and decode event data"""
|
|
278
255
|
|
|
@@ -305,7 +282,7 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
305
282
|
r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
|
|
306
283
|
|
|
307
284
|
|
|
308
|
-
class OutputConfluentCloudSASLMechanism(str, Enum
|
|
285
|
+
class OutputConfluentCloudSASLMechanism(str, Enum):
|
|
309
286
|
PLAIN = "plain"
|
|
310
287
|
SCRAM_SHA_256 = "scram-sha-256"
|
|
311
288
|
SCRAM_SHA_512 = "scram-sha-512"
|
|
@@ -326,10 +303,9 @@ class OutputConfluentCloudAuthentication(BaseModel):
|
|
|
326
303
|
|
|
327
304
|
disabled: Optional[bool] = True
|
|
328
305
|
|
|
329
|
-
mechanism:
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
] = OutputConfluentCloudSASLMechanism.PLAIN
|
|
306
|
+
mechanism: Optional[OutputConfluentCloudSASLMechanism] = (
|
|
307
|
+
OutputConfluentCloudSASLMechanism.PLAIN
|
|
308
|
+
)
|
|
333
309
|
|
|
334
310
|
oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
|
|
335
311
|
False
|
|
@@ -337,7 +313,7 @@ class OutputConfluentCloudAuthentication(BaseModel):
|
|
|
337
313
|
r"""Enable OAuth authentication"""
|
|
338
314
|
|
|
339
315
|
|
|
340
|
-
class OutputConfluentCloudBackpressureBehavior(str, Enum
|
|
316
|
+
class OutputConfluentCloudBackpressureBehavior(str, Enum):
|
|
341
317
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
342
318
|
|
|
343
319
|
BLOCK = "block"
|
|
@@ -345,23 +321,21 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
|
|
|
345
321
|
QUEUE = "queue"
|
|
346
322
|
|
|
347
323
|
|
|
348
|
-
class OutputConfluentCloudPqCompressCompression(
|
|
349
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
350
|
-
):
|
|
324
|
+
class OutputConfluentCloudPqCompressCompression(str, Enum):
|
|
351
325
|
r"""Codec to use to compress the persisted data"""
|
|
352
326
|
|
|
353
327
|
NONE = "none"
|
|
354
328
|
GZIP = "gzip"
|
|
355
329
|
|
|
356
330
|
|
|
357
|
-
class OutputConfluentCloudQueueFullBehavior(str, Enum
|
|
331
|
+
class OutputConfluentCloudQueueFullBehavior(str, Enum):
|
|
358
332
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
359
333
|
|
|
360
334
|
BLOCK = "block"
|
|
361
335
|
DROP = "drop"
|
|
362
336
|
|
|
363
337
|
|
|
364
|
-
class OutputConfluentCloudMode(str, Enum
|
|
338
|
+
class OutputConfluentCloudMode(str, Enum):
|
|
365
339
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
366
340
|
|
|
367
341
|
ERROR = "error"
|
|
@@ -475,25 +449,19 @@ class OutputConfluentCloud(BaseModel):
|
|
|
475
449
|
|
|
476
450
|
tls: Optional[OutputConfluentCloudTLSSettingsClientSide] = None
|
|
477
451
|
|
|
478
|
-
ack:
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
] = OutputConfluentCloudAcknowledgments.ONE
|
|
452
|
+
ack: Optional[OutputConfluentCloudAcknowledgments] = (
|
|
453
|
+
OutputConfluentCloudAcknowledgments.ONE
|
|
454
|
+
)
|
|
482
455
|
r"""Control the number of required acknowledgments."""
|
|
483
456
|
|
|
484
457
|
format_: Annotated[
|
|
485
|
-
|
|
486
|
-
Optional[OutputConfluentCloudRecordDataFormat],
|
|
487
|
-
PlainValidator(validate_open_enum(False)),
|
|
488
|
-
],
|
|
489
|
-
pydantic.Field(alias="format"),
|
|
458
|
+
Optional[OutputConfluentCloudRecordDataFormat], pydantic.Field(alias="format")
|
|
490
459
|
] = OutputConfluentCloudRecordDataFormat.JSON
|
|
491
460
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
492
461
|
|
|
493
|
-
compression:
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
] = OutputConfluentCloudCompression.GZIP
|
|
462
|
+
compression: Optional[OutputConfluentCloudCompression] = (
|
|
463
|
+
OutputConfluentCloudCompression.GZIP
|
|
464
|
+
)
|
|
497
465
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
498
466
|
|
|
499
467
|
max_record_size_kb: Annotated[
|
|
@@ -554,10 +522,7 @@ class OutputConfluentCloud(BaseModel):
|
|
|
554
522
|
r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
|
|
555
523
|
|
|
556
524
|
on_backpressure: Annotated[
|
|
557
|
-
|
|
558
|
-
Optional[OutputConfluentCloudBackpressureBehavior],
|
|
559
|
-
PlainValidator(validate_open_enum(False)),
|
|
560
|
-
],
|
|
525
|
+
Optional[OutputConfluentCloudBackpressureBehavior],
|
|
561
526
|
pydantic.Field(alias="onBackpressure"),
|
|
562
527
|
] = OutputConfluentCloudBackpressureBehavior.BLOCK
|
|
563
528
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -583,29 +548,19 @@ class OutputConfluentCloud(BaseModel):
|
|
|
583
548
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
584
549
|
|
|
585
550
|
pq_compress: Annotated[
|
|
586
|
-
|
|
587
|
-
Optional[OutputConfluentCloudPqCompressCompression],
|
|
588
|
-
PlainValidator(validate_open_enum(False)),
|
|
589
|
-
],
|
|
551
|
+
Optional[OutputConfluentCloudPqCompressCompression],
|
|
590
552
|
pydantic.Field(alias="pqCompress"),
|
|
591
553
|
] = OutputConfluentCloudPqCompressCompression.NONE
|
|
592
554
|
r"""Codec to use to compress the persisted data"""
|
|
593
555
|
|
|
594
556
|
pq_on_backpressure: Annotated[
|
|
595
|
-
|
|
596
|
-
Optional[OutputConfluentCloudQueueFullBehavior],
|
|
597
|
-
PlainValidator(validate_open_enum(False)),
|
|
598
|
-
],
|
|
557
|
+
Optional[OutputConfluentCloudQueueFullBehavior],
|
|
599
558
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
600
559
|
] = OutputConfluentCloudQueueFullBehavior.BLOCK
|
|
601
560
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
602
561
|
|
|
603
562
|
pq_mode: Annotated[
|
|
604
|
-
|
|
605
|
-
Optional[OutputConfluentCloudMode],
|
|
606
|
-
PlainValidator(validate_open_enum(False)),
|
|
607
|
-
],
|
|
608
|
-
pydantic.Field(alias="pqMode"),
|
|
563
|
+
Optional[OutputConfluentCloudMode], pydantic.Field(alias="pqMode")
|
|
609
564
|
] = OutputConfluentCloudMode.ERROR
|
|
610
565
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
611
566
|
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,14 +12,14 @@ class OutputCriblHTTPType(str, Enum):
|
|
|
15
12
|
CRIBL_HTTP = "cribl_http"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputCriblHTTPMinimumTLSVersion(str, Enum
|
|
15
|
+
class OutputCriblHTTPMinimumTLSVersion(str, Enum):
|
|
19
16
|
TL_SV1 = "TLSv1"
|
|
20
17
|
TL_SV1_1 = "TLSv1.1"
|
|
21
18
|
TL_SV1_2 = "TLSv1.2"
|
|
22
19
|
TL_SV1_3 = "TLSv1.3"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputCriblHTTPMaximumTLSVersion(str, Enum
|
|
22
|
+
class OutputCriblHTTPMaximumTLSVersion(str, Enum):
|
|
26
23
|
TL_SV1 = "TLSv1"
|
|
27
24
|
TL_SV1_1 = "TLSv1.1"
|
|
28
25
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -82,23 +79,15 @@ class OutputCriblHTTPTLSSettingsClientSide(BaseModel):
|
|
|
82
79
|
r"""Passphrase to use to decrypt private key"""
|
|
83
80
|
|
|
84
81
|
min_version: Annotated[
|
|
85
|
-
|
|
86
|
-
Optional[OutputCriblHTTPMinimumTLSVersion],
|
|
87
|
-
PlainValidator(validate_open_enum(False)),
|
|
88
|
-
],
|
|
89
|
-
pydantic.Field(alias="minVersion"),
|
|
82
|
+
Optional[OutputCriblHTTPMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
90
83
|
] = None
|
|
91
84
|
|
|
92
85
|
max_version: Annotated[
|
|
93
|
-
|
|
94
|
-
Optional[OutputCriblHTTPMaximumTLSVersion],
|
|
95
|
-
PlainValidator(validate_open_enum(False)),
|
|
96
|
-
],
|
|
97
|
-
pydantic.Field(alias="maxVersion"),
|
|
86
|
+
Optional[OutputCriblHTTPMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
98
87
|
] = None
|
|
99
88
|
|
|
100
89
|
|
|
101
|
-
class OutputCriblHTTPCompression(str, Enum
|
|
90
|
+
class OutputCriblHTTPCompression(str, Enum):
|
|
102
91
|
r"""Codec to use to compress the data before sending"""
|
|
103
92
|
|
|
104
93
|
NONE = "none"
|
|
@@ -116,7 +105,7 @@ class OutputCriblHTTPExtraHTTPHeader(BaseModel):
|
|
|
116
105
|
name: Optional[str] = None
|
|
117
106
|
|
|
118
107
|
|
|
119
|
-
class OutputCriblHTTPFailedRequestLoggingMode(str, Enum
|
|
108
|
+
class OutputCriblHTTPFailedRequestLoggingMode(str, Enum):
|
|
120
109
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
121
110
|
|
|
122
111
|
PAYLOAD = "payload"
|
|
@@ -178,7 +167,7 @@ class OutputCriblHTTPTimeoutRetrySettings(BaseModel):
|
|
|
178
167
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
179
168
|
|
|
180
169
|
|
|
181
|
-
class OutputCriblHTTPBackpressureBehavior(str, Enum
|
|
170
|
+
class OutputCriblHTTPBackpressureBehavior(str, Enum):
|
|
182
171
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
183
172
|
|
|
184
173
|
BLOCK = "block"
|
|
@@ -201,21 +190,21 @@ class OutputCriblHTTPURL(BaseModel):
|
|
|
201
190
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
202
191
|
|
|
203
192
|
|
|
204
|
-
class OutputCriblHTTPPqCompressCompression(str, Enum
|
|
193
|
+
class OutputCriblHTTPPqCompressCompression(str, Enum):
|
|
205
194
|
r"""Codec to use to compress the persisted data"""
|
|
206
195
|
|
|
207
196
|
NONE = "none"
|
|
208
197
|
GZIP = "gzip"
|
|
209
198
|
|
|
210
199
|
|
|
211
|
-
class OutputCriblHTTPQueueFullBehavior(str, Enum
|
|
200
|
+
class OutputCriblHTTPQueueFullBehavior(str, Enum):
|
|
212
201
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
213
202
|
|
|
214
203
|
BLOCK = "block"
|
|
215
204
|
DROP = "drop"
|
|
216
205
|
|
|
217
206
|
|
|
218
|
-
class OutputCriblHTTPMode(str, Enum
|
|
207
|
+
class OutputCriblHTTPMode(str, Enum):
|
|
219
208
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
220
209
|
|
|
221
210
|
ERROR = "error"
|
|
@@ -346,9 +335,7 @@ class OutputCriblHTTP(BaseModel):
|
|
|
346
335
|
] = None
|
|
347
336
|
r"""Fields to exclude from the event. By default, all internal fields except `__output` are sent. Example: `cribl_pipe`, `c*`. Wildcards supported."""
|
|
348
337
|
|
|
349
|
-
compression:
|
|
350
|
-
Optional[OutputCriblHTTPCompression], PlainValidator(validate_open_enum(False))
|
|
351
|
-
] = OutputCriblHTTPCompression.GZIP
|
|
338
|
+
compression: Optional[OutputCriblHTTPCompression] = OutputCriblHTTPCompression.GZIP
|
|
352
339
|
r"""Codec to use to compress the data before sending"""
|
|
353
340
|
|
|
354
341
|
concurrency: Optional[float] = 5
|
|
@@ -387,10 +374,7 @@ class OutputCriblHTTP(BaseModel):
|
|
|
387
374
|
r"""Headers to add to all events"""
|
|
388
375
|
|
|
389
376
|
failed_request_logging_mode: Annotated[
|
|
390
|
-
|
|
391
|
-
Optional[OutputCriblHTTPFailedRequestLoggingMode],
|
|
392
|
-
PlainValidator(validate_open_enum(False)),
|
|
393
|
-
],
|
|
377
|
+
Optional[OutputCriblHTTPFailedRequestLoggingMode],
|
|
394
378
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
395
379
|
] = OutputCriblHTTPFailedRequestLoggingMode.NONE
|
|
396
380
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -417,10 +401,7 @@ class OutputCriblHTTP(BaseModel):
|
|
|
417
401
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
418
402
|
|
|
419
403
|
on_backpressure: Annotated[
|
|
420
|
-
|
|
421
|
-
Optional[OutputCriblHTTPBackpressureBehavior],
|
|
422
|
-
PlainValidator(validate_open_enum(False)),
|
|
423
|
-
],
|
|
404
|
+
Optional[OutputCriblHTTPBackpressureBehavior],
|
|
424
405
|
pydantic.Field(alias="onBackpressure"),
|
|
425
406
|
] = OutputCriblHTTPBackpressureBehavior.BLOCK
|
|
426
407
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -464,28 +445,19 @@ class OutputCriblHTTP(BaseModel):
|
|
|
464
445
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
465
446
|
|
|
466
447
|
pq_compress: Annotated[
|
|
467
|
-
|
|
468
|
-
Optional[OutputCriblHTTPPqCompressCompression],
|
|
469
|
-
PlainValidator(validate_open_enum(False)),
|
|
470
|
-
],
|
|
448
|
+
Optional[OutputCriblHTTPPqCompressCompression],
|
|
471
449
|
pydantic.Field(alias="pqCompress"),
|
|
472
450
|
] = OutputCriblHTTPPqCompressCompression.NONE
|
|
473
451
|
r"""Codec to use to compress the persisted data"""
|
|
474
452
|
|
|
475
453
|
pq_on_backpressure: Annotated[
|
|
476
|
-
|
|
477
|
-
Optional[OutputCriblHTTPQueueFullBehavior],
|
|
478
|
-
PlainValidator(validate_open_enum(False)),
|
|
479
|
-
],
|
|
454
|
+
Optional[OutputCriblHTTPQueueFullBehavior],
|
|
480
455
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
481
456
|
] = OutputCriblHTTPQueueFullBehavior.BLOCK
|
|
482
457
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
483
458
|
|
|
484
459
|
pq_mode: Annotated[
|
|
485
|
-
|
|
486
|
-
Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
|
|
487
|
-
],
|
|
488
|
-
pydantic.Field(alias="pqMode"),
|
|
460
|
+
Optional[OutputCriblHTTPMode], pydantic.Field(alias="pqMode")
|
|
489
461
|
] = OutputCriblHTTPMode.ERROR
|
|
490
462
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
491
463
|
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,14 +12,14 @@ class OutputCriblLakeType(str, Enum):
|
|
|
15
12
|
CRIBL_LAKE = "cribl_lake"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputCriblLakeSignatureVersion(str, Enum
|
|
15
|
+
class OutputCriblLakeSignatureVersion(str, Enum):
|
|
19
16
|
r"""Signature version to use for signing S3 requests"""
|
|
20
17
|
|
|
21
18
|
V2 = "v2"
|
|
22
19
|
V4 = "v4"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputCriblLakeObjectACL(str, Enum
|
|
22
|
+
class OutputCriblLakeObjectACL(str, Enum):
|
|
26
23
|
r"""Object ACL to assign to uploaded objects"""
|
|
27
24
|
|
|
28
25
|
PRIVATE = "private"
|
|
@@ -34,7 +31,7 @@ class OutputCriblLakeObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
34
31
|
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
|
|
35
32
|
|
|
36
33
|
|
|
37
|
-
class OutputCriblLakeStorageClass(str, Enum
|
|
34
|
+
class OutputCriblLakeStorageClass(str, Enum):
|
|
38
35
|
r"""Storage class to select for uploaded objects"""
|
|
39
36
|
|
|
40
37
|
STANDARD = "STANDARD"
|
|
@@ -47,34 +44,32 @@ class OutputCriblLakeStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
47
44
|
DEEP_ARCHIVE = "DEEP_ARCHIVE"
|
|
48
45
|
|
|
49
46
|
|
|
50
|
-
class OutputCriblLakeServerSideEncryptionForUploadedObjects(
|
|
51
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
52
|
-
):
|
|
47
|
+
class OutputCriblLakeServerSideEncryptionForUploadedObjects(str, Enum):
|
|
53
48
|
AES256 = "AES256"
|
|
54
49
|
AWS_KMS = "aws:kms"
|
|
55
50
|
|
|
56
51
|
|
|
57
|
-
class OutputCriblLakeBackpressureBehavior(str, Enum
|
|
52
|
+
class OutputCriblLakeBackpressureBehavior(str, Enum):
|
|
58
53
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
59
54
|
|
|
60
55
|
BLOCK = "block"
|
|
61
56
|
DROP = "drop"
|
|
62
57
|
|
|
63
58
|
|
|
64
|
-
class OutputCriblLakeDiskSpaceProtection(str, Enum
|
|
59
|
+
class OutputCriblLakeDiskSpaceProtection(str, Enum):
|
|
65
60
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
66
61
|
|
|
67
62
|
BLOCK = "block"
|
|
68
63
|
DROP = "drop"
|
|
69
64
|
|
|
70
65
|
|
|
71
|
-
class AwsAuthenticationMethod(str, Enum
|
|
66
|
+
class AwsAuthenticationMethod(str, Enum):
|
|
72
67
|
AUTO = "auto"
|
|
73
68
|
AUTO_RPC = "auto_rpc"
|
|
74
69
|
MANUAL = "manual"
|
|
75
70
|
|
|
76
71
|
|
|
77
|
-
class OutputCriblLakeFormat(str, Enum
|
|
72
|
+
class OutputCriblLakeFormat(str, Enum):
|
|
78
73
|
JSON = "json"
|
|
79
74
|
PARQUET = "parquet"
|
|
80
75
|
DDSS = "ddss"
|
|
@@ -205,10 +200,7 @@ class OutputCriblLake(BaseModel):
|
|
|
205
200
|
r"""S3 service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to S3-compatible endpoint."""
|
|
206
201
|
|
|
207
202
|
signature_version: Annotated[
|
|
208
|
-
|
|
209
|
-
Optional[OutputCriblLakeSignatureVersion],
|
|
210
|
-
PlainValidator(validate_open_enum(False)),
|
|
211
|
-
],
|
|
203
|
+
Optional[OutputCriblLakeSignatureVersion],
|
|
212
204
|
pydantic.Field(alias="signatureVersion"),
|
|
213
205
|
] = OutputCriblLakeSignatureVersion.V4
|
|
214
206
|
r"""Signature version to use for signing S3 requests"""
|
|
@@ -257,28 +249,17 @@ class OutputCriblLake(BaseModel):
|
|
|
257
249
|
r"""Lake dataset to send the data to."""
|
|
258
250
|
|
|
259
251
|
object_acl: Annotated[
|
|
260
|
-
|
|
261
|
-
Optional[OutputCriblLakeObjectACL],
|
|
262
|
-
PlainValidator(validate_open_enum(False)),
|
|
263
|
-
],
|
|
264
|
-
pydantic.Field(alias="objectACL"),
|
|
252
|
+
Optional[OutputCriblLakeObjectACL], pydantic.Field(alias="objectACL")
|
|
265
253
|
] = OutputCriblLakeObjectACL.PRIVATE
|
|
266
254
|
r"""Object ACL to assign to uploaded objects"""
|
|
267
255
|
|
|
268
256
|
storage_class: Annotated[
|
|
269
|
-
|
|
270
|
-
Optional[OutputCriblLakeStorageClass],
|
|
271
|
-
PlainValidator(validate_open_enum(False)),
|
|
272
|
-
],
|
|
273
|
-
pydantic.Field(alias="storageClass"),
|
|
257
|
+
Optional[OutputCriblLakeStorageClass], pydantic.Field(alias="storageClass")
|
|
274
258
|
] = None
|
|
275
259
|
r"""Storage class to select for uploaded objects"""
|
|
276
260
|
|
|
277
261
|
server_side_encryption: Annotated[
|
|
278
|
-
|
|
279
|
-
Optional[OutputCriblLakeServerSideEncryptionForUploadedObjects],
|
|
280
|
-
PlainValidator(validate_open_enum(False)),
|
|
281
|
-
],
|
|
262
|
+
Optional[OutputCriblLakeServerSideEncryptionForUploadedObjects],
|
|
282
263
|
pydantic.Field(alias="serverSideEncryption"),
|
|
283
264
|
] = None
|
|
284
265
|
|
|
@@ -319,10 +300,7 @@ class OutputCriblLake(BaseModel):
|
|
|
319
300
|
r"""Buffer size used to write to a file"""
|
|
320
301
|
|
|
321
302
|
on_backpressure: Annotated[
|
|
322
|
-
|
|
323
|
-
Optional[OutputCriblLakeBackpressureBehavior],
|
|
324
|
-
PlainValidator(validate_open_enum(False)),
|
|
325
|
-
],
|
|
303
|
+
Optional[OutputCriblLakeBackpressureBehavior],
|
|
326
304
|
pydantic.Field(alias="onBackpressure"),
|
|
327
305
|
] = OutputCriblLakeBackpressureBehavior.BLOCK
|
|
328
306
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -333,10 +311,7 @@ class OutputCriblLake(BaseModel):
|
|
|
333
311
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
334
312
|
|
|
335
313
|
on_disk_full_backpressure: Annotated[
|
|
336
|
-
|
|
337
|
-
Optional[OutputCriblLakeDiskSpaceProtection],
|
|
338
|
-
PlainValidator(validate_open_enum(False)),
|
|
339
|
-
],
|
|
314
|
+
Optional[OutputCriblLakeDiskSpaceProtection],
|
|
340
315
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
341
316
|
] = OutputCriblLakeDiskSpaceProtection.BLOCK
|
|
342
317
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -362,17 +337,12 @@ class OutputCriblLake(BaseModel):
|
|
|
362
337
|
r"""Maximum number of files that can be waiting for upload before backpressure is applied"""
|
|
363
338
|
|
|
364
339
|
aws_authentication_method: Annotated[
|
|
365
|
-
|
|
366
|
-
Optional[AwsAuthenticationMethod], PlainValidator(validate_open_enum(False))
|
|
367
|
-
],
|
|
340
|
+
Optional[AwsAuthenticationMethod],
|
|
368
341
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
369
342
|
] = AwsAuthenticationMethod.AUTO
|
|
370
343
|
|
|
371
344
|
format_: Annotated[
|
|
372
|
-
|
|
373
|
-
Optional[OutputCriblLakeFormat], PlainValidator(validate_open_enum(False))
|
|
374
|
-
],
|
|
375
|
-
pydantic.Field(alias="format"),
|
|
345
|
+
Optional[OutputCriblLakeFormat], pydantic.Field(alias="format")
|
|
376
346
|
] = None
|
|
377
347
|
|
|
378
348
|
max_concurrent_file_parts: Annotated[
|