cribl-control-plane 0.0.50rc1__py3-none-any.whl → 0.0.51__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +6 -4
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/health.py +2 -6
- cribl_control_plane/httpclient.py +0 -1
- cribl_control_plane/models/__init__.py +4 -21
- cribl_control_plane/models/appmode.py +1 -2
- cribl_control_plane/models/cacheconnection.py +2 -10
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
- cribl_control_plane/models/cloudprovider.py +1 -2
- cribl_control_plane/models/configgroup.py +2 -7
- cribl_control_plane/models/configgroupcloud.py +2 -6
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
- cribl_control_plane/models/createinputhectokenbyidop.py +5 -6
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +2 -8
- cribl_control_plane/models/datasetmetadata.py +2 -8
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
- cribl_control_plane/models/getsummaryop.py +2 -7
- cribl_control_plane/models/getversionshowop.py +5 -6
- cribl_control_plane/models/gitinfo.py +3 -14
- cribl_control_plane/models/hbcriblinfo.py +1 -11
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +14 -34
- cribl_control_plane/models/inputazureblob.py +6 -17
- cribl_control_plane/models/inputcollection.py +4 -11
- cribl_control_plane/models/inputconfluentcloud.py +20 -47
- cribl_control_plane/models/inputcribl.py +4 -11
- cribl_control_plane/models/inputcriblhttp.py +8 -23
- cribl_control_plane/models/inputcribllakehttp.py +10 -22
- cribl_control_plane/models/inputcriblmetrics.py +4 -12
- cribl_control_plane/models/inputcribltcp.py +8 -23
- cribl_control_plane/models/inputcrowdstrike.py +10 -26
- cribl_control_plane/models/inputdatadogagent.py +8 -24
- cribl_control_plane/models/inputdatagen.py +4 -11
- cribl_control_plane/models/inputedgeprometheus.py +24 -58
- cribl_control_plane/models/inputelastic.py +14 -40
- cribl_control_plane/models/inputeventhub.py +6 -15
- cribl_control_plane/models/inputexec.py +6 -14
- cribl_control_plane/models/inputfile.py +6 -15
- cribl_control_plane/models/inputfirehose.py +8 -23
- cribl_control_plane/models/inputgooglepubsub.py +6 -19
- cribl_control_plane/models/inputgrafana.py +24 -67
- cribl_control_plane/models/inputhttp.py +8 -23
- cribl_control_plane/models/inputhttpraw.py +8 -23
- cribl_control_plane/models/inputjournalfiles.py +4 -12
- cribl_control_plane/models/inputkafka.py +16 -46
- cribl_control_plane/models/inputkinesis.py +14 -38
- cribl_control_plane/models/inputkubeevents.py +4 -11
- cribl_control_plane/models/inputkubelogs.py +8 -16
- cribl_control_plane/models/inputkubemetrics.py +8 -16
- cribl_control_plane/models/inputloki.py +10 -29
- cribl_control_plane/models/inputmetrics.py +8 -23
- cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
- cribl_control_plane/models/inputmsk.py +18 -53
- cribl_control_plane/models/inputnetflow.py +4 -11
- cribl_control_plane/models/inputoffice365mgmt.py +14 -33
- cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
- cribl_control_plane/models/inputoffice365service.py +16 -35
- cribl_control_plane/models/inputopentelemetry.py +16 -38
- cribl_control_plane/models/inputprometheus.py +18 -50
- cribl_control_plane/models/inputprometheusrw.py +10 -30
- cribl_control_plane/models/inputrawudp.py +4 -11
- cribl_control_plane/models/inputs3.py +8 -21
- cribl_control_plane/models/inputs3inventory.py +10 -26
- cribl_control_plane/models/inputsecuritylake.py +10 -27
- cribl_control_plane/models/inputsnmp.py +6 -16
- cribl_control_plane/models/inputsplunk.py +12 -33
- cribl_control_plane/models/inputsplunkhec.py +10 -29
- cribl_control_plane/models/inputsplunksearch.py +14 -33
- cribl_control_plane/models/inputsqs.py +10 -27
- cribl_control_plane/models/inputsyslog.py +16 -43
- cribl_control_plane/models/inputsystemmetrics.py +24 -48
- cribl_control_plane/models/inputsystemstate.py +8 -16
- cribl_control_plane/models/inputtcp.py +10 -29
- cribl_control_plane/models/inputtcpjson.py +10 -29
- cribl_control_plane/models/inputwef.py +14 -37
- cribl_control_plane/models/inputwindowsmetrics.py +24 -44
- cribl_control_plane/models/inputwineventlogs.py +10 -20
- cribl_control_plane/models/inputwiz.py +8 -21
- cribl_control_plane/models/inputwizwebhook.py +8 -23
- cribl_control_plane/models/inputzscalerhec.py +10 -29
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
- cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
- cribl_control_plane/models/masterworkerentry.py +2 -7
- cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
- cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeprovidedinfo.py +1 -4
- cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeupgradestate.py +1 -2
- cribl_control_plane/models/nodeupgradestatus.py +5 -13
- cribl_control_plane/models/outputazureblob.py +18 -48
- cribl_control_plane/models/outputazuredataexplorer.py +28 -73
- cribl_control_plane/models/outputazureeventhub.py +18 -40
- cribl_control_plane/models/outputazurelogs.py +12 -35
- cribl_control_plane/models/outputclickhouse.py +20 -55
- cribl_control_plane/models/outputcloudwatch.py +10 -29
- cribl_control_plane/models/outputconfluentcloud.py +32 -77
- cribl_control_plane/models/outputcriblhttp.py +16 -44
- cribl_control_plane/models/outputcribllake.py +16 -46
- cribl_control_plane/models/outputcribltcp.py +18 -45
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
- cribl_control_plane/models/outputdatadog.py +20 -48
- cribl_control_plane/models/outputdataset.py +18 -46
- cribl_control_plane/models/outputdiskspool.py +2 -7
- cribl_control_plane/models/outputdls3.py +24 -68
- cribl_control_plane/models/outputdynatracehttp.py +20 -53
- cribl_control_plane/models/outputdynatraceotlp.py +22 -55
- cribl_control_plane/models/outputelastic.py +18 -43
- cribl_control_plane/models/outputelasticcloud.py +12 -36
- cribl_control_plane/models/outputexabeam.py +10 -29
- cribl_control_plane/models/outputfilesystem.py +14 -39
- cribl_control_plane/models/outputgooglechronicle.py +16 -50
- cribl_control_plane/models/outputgooglecloudlogging.py +18 -50
- cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
- cribl_control_plane/models/outputgooglepubsub.py +10 -31
- cribl_control_plane/models/outputgrafanacloud.py +32 -97
- cribl_control_plane/models/outputgraphite.py +14 -31
- cribl_control_plane/models/outputhoneycomb.py +12 -35
- cribl_control_plane/models/outputhumiohec.py +16 -43
- cribl_control_plane/models/outputinfluxdb.py +16 -42
- cribl_control_plane/models/outputkafka.py +28 -74
- cribl_control_plane/models/outputkinesis.py +16 -40
- cribl_control_plane/models/outputloki.py +16 -41
- cribl_control_plane/models/outputminio.py +24 -65
- cribl_control_plane/models/outputmsk.py +30 -82
- cribl_control_plane/models/outputnewrelic.py +18 -43
- cribl_control_plane/models/outputnewrelicevents.py +14 -41
- cribl_control_plane/models/outputopentelemetry.py +26 -67
- cribl_control_plane/models/outputprometheus.py +12 -35
- cribl_control_plane/models/outputring.py +8 -19
- cribl_control_plane/models/outputs3.py +26 -68
- cribl_control_plane/models/outputsecuritylake.py +18 -52
- cribl_control_plane/models/outputsentinel.py +18 -45
- cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
- cribl_control_plane/models/outputservicenow.py +24 -60
- cribl_control_plane/models/outputsignalfx.py +14 -37
- cribl_control_plane/models/outputsns.py +14 -36
- cribl_control_plane/models/outputsplunk.py +24 -60
- cribl_control_plane/models/outputsplunkhec.py +12 -35
- cribl_control_plane/models/outputsplunklb.py +30 -77
- cribl_control_plane/models/outputsqs.py +16 -41
- cribl_control_plane/models/outputstatsd.py +14 -30
- cribl_control_plane/models/outputstatsdext.py +12 -29
- cribl_control_plane/models/outputsumologic.py +12 -35
- cribl_control_plane/models/outputsyslog.py +24 -58
- cribl_control_plane/models/outputtcpjson.py +20 -52
- cribl_control_plane/models/outputwavefront.py +12 -35
- cribl_control_plane/models/outputwebhook.py +22 -58
- cribl_control_plane/models/outputxsiam.py +14 -35
- cribl_control_plane/models/packinfo.py +0 -3
- cribl_control_plane/models/packinstallinfo.py +0 -3
- cribl_control_plane/models/productscore.py +1 -2
- cribl_control_plane/models/rbacresource.py +1 -2
- cribl_control_plane/models/resourcepolicy.py +2 -4
- cribl_control_plane/models/runnablejobcollection.py +13 -30
- cribl_control_plane/models/runnablejobexecutor.py +4 -13
- cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -6
- cribl_control_plane/models/workertypes.py +1 -2
- {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/METADATA +14 -5
- cribl_control_plane-0.0.51.dist-info/RECORD +325 -0
- cribl_control_plane/models/error.py +0 -16
- cribl_control_plane/models/gethealthinfoop.py +0 -17
- cribl_control_plane/models/gitshowresult.py +0 -19
- cribl_control_plane-0.0.50rc1.dist-info/RECORD +0 -328
- {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,19 +12,19 @@ class OutputDynatraceOtlpType(str, Enum):
|
|
|
15
12
|
DYNATRACE_OTLP = "dynatrace_otlp"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputDynatraceOtlpProtocol(str, Enum
|
|
15
|
+
class OutputDynatraceOtlpProtocol(str, Enum):
|
|
19
16
|
r"""Select a transport option for Dynatrace"""
|
|
20
17
|
|
|
21
18
|
HTTP = "http"
|
|
22
19
|
|
|
23
20
|
|
|
24
|
-
class OutputDynatraceOTLPOTLPVersion(str, Enum
|
|
21
|
+
class OutputDynatraceOTLPOTLPVersion(str, Enum):
|
|
25
22
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
26
23
|
|
|
27
24
|
ONE_DOT_3_DOT_1 = "1.3.1"
|
|
28
25
|
|
|
29
26
|
|
|
30
|
-
class OutputDynatraceOtlpCompressCompression(str, Enum
|
|
27
|
+
class OutputDynatraceOtlpCompressCompression(str, Enum):
|
|
31
28
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
32
29
|
|
|
33
30
|
NONE = "none"
|
|
@@ -35,9 +32,7 @@ class OutputDynatraceOtlpCompressCompression(str, Enum, metaclass=utils.OpenEnum
|
|
|
35
32
|
GZIP = "gzip"
|
|
36
33
|
|
|
37
34
|
|
|
38
|
-
class OutputDynatraceOtlpHTTPCompressCompression(
|
|
39
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
40
|
-
):
|
|
35
|
+
class OutputDynatraceOtlpHTTPCompressCompression(str, Enum):
|
|
41
36
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
42
37
|
|
|
43
38
|
NONE = "none"
|
|
@@ -55,9 +50,7 @@ class OutputDynatraceOtlpMetadatum(BaseModel):
|
|
|
55
50
|
key: Optional[str] = ""
|
|
56
51
|
|
|
57
52
|
|
|
58
|
-
class OutputDynatraceOtlpFailedRequestLoggingMode(
|
|
59
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
60
|
-
):
|
|
53
|
+
class OutputDynatraceOtlpFailedRequestLoggingMode(str, Enum):
|
|
61
54
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
62
55
|
|
|
63
56
|
PAYLOAD = "payload"
|
|
@@ -65,14 +58,14 @@ class OutputDynatraceOtlpFailedRequestLoggingMode(
|
|
|
65
58
|
NONE = "none"
|
|
66
59
|
|
|
67
60
|
|
|
68
|
-
class EndpointType(str, Enum
|
|
61
|
+
class EndpointType(str, Enum):
|
|
69
62
|
r"""Select the type of Dynatrace endpoint configured"""
|
|
70
63
|
|
|
71
64
|
SAAS = "saas"
|
|
72
65
|
AG = "ag"
|
|
73
66
|
|
|
74
67
|
|
|
75
|
-
class OutputDynatraceOtlpBackpressureBehavior(str, Enum
|
|
68
|
+
class OutputDynatraceOtlpBackpressureBehavior(str, Enum):
|
|
76
69
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
77
70
|
|
|
78
71
|
BLOCK = "block"
|
|
@@ -145,21 +138,21 @@ class OutputDynatraceOtlpTimeoutRetrySettings(BaseModel):
|
|
|
145
138
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
146
139
|
|
|
147
140
|
|
|
148
|
-
class OutputDynatraceOtlpPqCompressCompression(str, Enum
|
|
141
|
+
class OutputDynatraceOtlpPqCompressCompression(str, Enum):
|
|
149
142
|
r"""Codec to use to compress the persisted data"""
|
|
150
143
|
|
|
151
144
|
NONE = "none"
|
|
152
145
|
GZIP = "gzip"
|
|
153
146
|
|
|
154
147
|
|
|
155
|
-
class OutputDynatraceOtlpQueueFullBehavior(str, Enum
|
|
148
|
+
class OutputDynatraceOtlpQueueFullBehavior(str, Enum):
|
|
156
149
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
157
150
|
|
|
158
151
|
BLOCK = "block"
|
|
159
152
|
DROP = "drop"
|
|
160
153
|
|
|
161
154
|
|
|
162
|
-
class OutputDynatraceOtlpMode(str, Enum
|
|
155
|
+
class OutputDynatraceOtlpMode(str, Enum):
|
|
163
156
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
164
157
|
|
|
165
158
|
ERROR = "error"
|
|
@@ -289,9 +282,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
289
282
|
streamtags: Optional[List[str]] = None
|
|
290
283
|
r"""Tags for filtering and grouping in @{product}"""
|
|
291
284
|
|
|
292
|
-
protocol:
|
|
293
|
-
Optional[OutputDynatraceOtlpProtocol], PlainValidator(validate_open_enum(False))
|
|
294
|
-
] = OutputDynatraceOtlpProtocol.HTTP
|
|
285
|
+
protocol: Optional[OutputDynatraceOtlpProtocol] = OutputDynatraceOtlpProtocol.HTTP
|
|
295
286
|
r"""Select a transport option for Dynatrace"""
|
|
296
287
|
|
|
297
288
|
endpoint: Optional[str] = (
|
|
@@ -300,25 +291,17 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
300
291
|
r"""The endpoint where Dynatrace events will be sent. Enter any valid URL or an IP address (IPv4 or IPv6; enclose IPv6 addresses in square brackets)"""
|
|
301
292
|
|
|
302
293
|
otlp_version: Annotated[
|
|
303
|
-
|
|
304
|
-
Optional[OutputDynatraceOTLPOTLPVersion],
|
|
305
|
-
PlainValidator(validate_open_enum(False)),
|
|
306
|
-
],
|
|
307
|
-
pydantic.Field(alias="otlpVersion"),
|
|
294
|
+
Optional[OutputDynatraceOTLPOTLPVersion], pydantic.Field(alias="otlpVersion")
|
|
308
295
|
] = OutputDynatraceOTLPOTLPVersion.ONE_DOT_3_DOT_1
|
|
309
296
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
310
297
|
|
|
311
|
-
compress:
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
] = OutputDynatraceOtlpCompressCompression.GZIP
|
|
298
|
+
compress: Optional[OutputDynatraceOtlpCompressCompression] = (
|
|
299
|
+
OutputDynatraceOtlpCompressCompression.GZIP
|
|
300
|
+
)
|
|
315
301
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
316
302
|
|
|
317
303
|
http_compress: Annotated[
|
|
318
|
-
|
|
319
|
-
Optional[OutputDynatraceOtlpHTTPCompressCompression],
|
|
320
|
-
PlainValidator(validate_open_enum(False)),
|
|
321
|
-
],
|
|
304
|
+
Optional[OutputDynatraceOtlpHTTPCompressCompression],
|
|
322
305
|
pydantic.Field(alias="httpCompress"),
|
|
323
306
|
] = OutputDynatraceOtlpHTTPCompressCompression.GZIP
|
|
324
307
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
@@ -358,10 +341,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
358
341
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
359
342
|
|
|
360
343
|
failed_request_logging_mode: Annotated[
|
|
361
|
-
|
|
362
|
-
Optional[OutputDynatraceOtlpFailedRequestLoggingMode],
|
|
363
|
-
PlainValidator(validate_open_enum(False)),
|
|
364
|
-
],
|
|
344
|
+
Optional[OutputDynatraceOtlpFailedRequestLoggingMode],
|
|
365
345
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
366
346
|
] = OutputDynatraceOtlpFailedRequestLoggingMode.NONE
|
|
367
347
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -380,8 +360,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
380
360
|
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
381
361
|
|
|
382
362
|
endpoint_type: Annotated[
|
|
383
|
-
|
|
384
|
-
pydantic.Field(alias="endpointType"),
|
|
363
|
+
Optional[EndpointType], pydantic.Field(alias="endpointType")
|
|
385
364
|
] = EndpointType.SAAS
|
|
386
365
|
r"""Select the type of Dynatrace endpoint configured"""
|
|
387
366
|
|
|
@@ -390,10 +369,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
390
369
|
)
|
|
391
370
|
|
|
392
371
|
on_backpressure: Annotated[
|
|
393
|
-
|
|
394
|
-
Optional[OutputDynatraceOtlpBackpressureBehavior],
|
|
395
|
-
PlainValidator(validate_open_enum(False)),
|
|
396
|
-
],
|
|
372
|
+
Optional[OutputDynatraceOtlpBackpressureBehavior],
|
|
397
373
|
pydantic.Field(alias="onBackpressure"),
|
|
398
374
|
] = OutputDynatraceOtlpBackpressureBehavior.BLOCK
|
|
399
375
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -454,28 +430,19 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
454
430
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
455
431
|
|
|
456
432
|
pq_compress: Annotated[
|
|
457
|
-
|
|
458
|
-
Optional[OutputDynatraceOtlpPqCompressCompression],
|
|
459
|
-
PlainValidator(validate_open_enum(False)),
|
|
460
|
-
],
|
|
433
|
+
Optional[OutputDynatraceOtlpPqCompressCompression],
|
|
461
434
|
pydantic.Field(alias="pqCompress"),
|
|
462
435
|
] = OutputDynatraceOtlpPqCompressCompression.NONE
|
|
463
436
|
r"""Codec to use to compress the persisted data"""
|
|
464
437
|
|
|
465
438
|
pq_on_backpressure: Annotated[
|
|
466
|
-
|
|
467
|
-
Optional[OutputDynatraceOtlpQueueFullBehavior],
|
|
468
|
-
PlainValidator(validate_open_enum(False)),
|
|
469
|
-
],
|
|
439
|
+
Optional[OutputDynatraceOtlpQueueFullBehavior],
|
|
470
440
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
471
441
|
] = OutputDynatraceOtlpQueueFullBehavior.BLOCK
|
|
472
442
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
473
443
|
|
|
474
444
|
pq_mode: Annotated[
|
|
475
|
-
|
|
476
|
-
Optional[OutputDynatraceOtlpMode], PlainValidator(validate_open_enum(False))
|
|
477
|
-
],
|
|
478
|
-
pydantic.Field(alias="pqMode"),
|
|
445
|
+
Optional[OutputDynatraceOtlpMode], pydantic.Field(alias="pqMode")
|
|
479
446
|
] = OutputDynatraceOtlpMode.ERROR
|
|
480
447
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
481
448
|
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -26,7 +23,7 @@ class OutputElasticExtraHTTPHeader(BaseModel):
|
|
|
26
23
|
name: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class OutputElasticFailedRequestLoggingMode(str, Enum
|
|
26
|
+
class OutputElasticFailedRequestLoggingMode(str, Enum):
|
|
30
27
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
31
28
|
|
|
32
29
|
PAYLOAD = "payload"
|
|
@@ -99,7 +96,7 @@ class OutputElasticExtraParam(BaseModel):
|
|
|
99
96
|
value: str
|
|
100
97
|
|
|
101
98
|
|
|
102
|
-
class OutputElasticAuthenticationMethod(str, Enum
|
|
99
|
+
class OutputElasticAuthenticationMethod(str, Enum):
|
|
103
100
|
r"""Enter credentials directly, or select a stored secret"""
|
|
104
101
|
|
|
105
102
|
MANUAL = "manual"
|
|
@@ -118,16 +115,12 @@ class OutputElasticAuth(BaseModel):
|
|
|
118
115
|
disabled: Optional[bool] = True
|
|
119
116
|
|
|
120
117
|
auth_type: Annotated[
|
|
121
|
-
|
|
122
|
-
Optional[OutputElasticAuthenticationMethod],
|
|
123
|
-
PlainValidator(validate_open_enum(False)),
|
|
124
|
-
],
|
|
125
|
-
pydantic.Field(alias="authType"),
|
|
118
|
+
Optional[OutputElasticAuthenticationMethod], pydantic.Field(alias="authType")
|
|
126
119
|
] = OutputElasticAuthenticationMethod.MANUAL
|
|
127
120
|
r"""Enter credentials directly, or select a stored secret"""
|
|
128
121
|
|
|
129
122
|
|
|
130
|
-
class ElasticVersion(str, Enum
|
|
123
|
+
class ElasticVersion(str, Enum):
|
|
131
124
|
r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
|
|
132
125
|
|
|
133
126
|
AUTO = "auto"
|
|
@@ -135,14 +128,14 @@ class ElasticVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
135
128
|
SEVEN = "7"
|
|
136
129
|
|
|
137
130
|
|
|
138
|
-
class WriteAction(str, Enum
|
|
131
|
+
class WriteAction(str, Enum):
|
|
139
132
|
r"""Action to use when writing events. Must be set to `Create` when writing to a data stream."""
|
|
140
133
|
|
|
141
134
|
INDEX = "index"
|
|
142
135
|
CREATE = "create"
|
|
143
136
|
|
|
144
137
|
|
|
145
|
-
class OutputElasticBackpressureBehavior(str, Enum
|
|
138
|
+
class OutputElasticBackpressureBehavior(str, Enum):
|
|
146
139
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
147
140
|
|
|
148
141
|
BLOCK = "block"
|
|
@@ -165,21 +158,21 @@ class OutputElasticURL(BaseModel):
|
|
|
165
158
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
166
159
|
|
|
167
160
|
|
|
168
|
-
class OutputElasticCompression(str, Enum
|
|
161
|
+
class OutputElasticCompression(str, Enum):
|
|
169
162
|
r"""Codec to use to compress the persisted data"""
|
|
170
163
|
|
|
171
164
|
NONE = "none"
|
|
172
165
|
GZIP = "gzip"
|
|
173
166
|
|
|
174
167
|
|
|
175
|
-
class OutputElasticQueueFullBehavior(str, Enum
|
|
168
|
+
class OutputElasticQueueFullBehavior(str, Enum):
|
|
176
169
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
177
170
|
|
|
178
171
|
BLOCK = "block"
|
|
179
172
|
DROP = "drop"
|
|
180
173
|
|
|
181
174
|
|
|
182
|
-
class OutputElasticMode(str, Enum
|
|
175
|
+
class OutputElasticMode(str, Enum):
|
|
183
176
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
184
177
|
|
|
185
178
|
ERROR = "error"
|
|
@@ -354,10 +347,7 @@ class OutputElastic(BaseModel):
|
|
|
354
347
|
r"""Headers to add to all events"""
|
|
355
348
|
|
|
356
349
|
failed_request_logging_mode: Annotated[
|
|
357
|
-
|
|
358
|
-
Optional[OutputElasticFailedRequestLoggingMode],
|
|
359
|
-
PlainValidator(validate_open_enum(False)),
|
|
360
|
-
],
|
|
350
|
+
Optional[OutputElasticFailedRequestLoggingMode],
|
|
361
351
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
362
352
|
] = OutputElasticFailedRequestLoggingMode.NONE
|
|
363
353
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -390,8 +380,7 @@ class OutputElastic(BaseModel):
|
|
|
390
380
|
auth: Optional[OutputElasticAuth] = None
|
|
391
381
|
|
|
392
382
|
elastic_version: Annotated[
|
|
393
|
-
|
|
394
|
-
pydantic.Field(alias="elasticVersion"),
|
|
383
|
+
Optional[ElasticVersion], pydantic.Field(alias="elasticVersion")
|
|
395
384
|
] = ElasticVersion.AUTO
|
|
396
385
|
r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
|
|
397
386
|
|
|
@@ -406,8 +395,7 @@ class OutputElastic(BaseModel):
|
|
|
406
395
|
r"""Include the `document_id` field when sending events to an Elastic TSDS (time series data stream)"""
|
|
407
396
|
|
|
408
397
|
write_action: Annotated[
|
|
409
|
-
|
|
410
|
-
pydantic.Field(alias="writeAction"),
|
|
398
|
+
Optional[WriteAction], pydantic.Field(alias="writeAction")
|
|
411
399
|
] = WriteAction.CREATE
|
|
412
400
|
r"""Action to use when writing events. Must be set to `Create` when writing to a data stream."""
|
|
413
401
|
|
|
@@ -417,10 +405,7 @@ class OutputElastic(BaseModel):
|
|
|
417
405
|
r"""Retry failed events when a bulk request to Elastic is successful, but the response body returns an error for one or more events in the batch"""
|
|
418
406
|
|
|
419
407
|
on_backpressure: Annotated[
|
|
420
|
-
|
|
421
|
-
Optional[OutputElasticBackpressureBehavior],
|
|
422
|
-
PlainValidator(validate_open_enum(False)),
|
|
423
|
-
],
|
|
408
|
+
Optional[OutputElasticBackpressureBehavior],
|
|
424
409
|
pydantic.Field(alias="onBackpressure"),
|
|
425
410
|
] = OutputElasticBackpressureBehavior.BLOCK
|
|
426
411
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -464,29 +449,19 @@ class OutputElastic(BaseModel):
|
|
|
464
449
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
465
450
|
|
|
466
451
|
pq_compress: Annotated[
|
|
467
|
-
|
|
468
|
-
Optional[OutputElasticCompression],
|
|
469
|
-
PlainValidator(validate_open_enum(False)),
|
|
470
|
-
],
|
|
471
|
-
pydantic.Field(alias="pqCompress"),
|
|
452
|
+
Optional[OutputElasticCompression], pydantic.Field(alias="pqCompress")
|
|
472
453
|
] = OutputElasticCompression.NONE
|
|
473
454
|
r"""Codec to use to compress the persisted data"""
|
|
474
455
|
|
|
475
456
|
pq_on_backpressure: Annotated[
|
|
476
|
-
|
|
477
|
-
Optional[OutputElasticQueueFullBehavior],
|
|
478
|
-
PlainValidator(validate_open_enum(False)),
|
|
479
|
-
],
|
|
457
|
+
Optional[OutputElasticQueueFullBehavior],
|
|
480
458
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
481
459
|
] = OutputElasticQueueFullBehavior.BLOCK
|
|
482
460
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
483
461
|
|
|
484
|
-
pq_mode: Annotated[
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
],
|
|
488
|
-
pydantic.Field(alias="pqMode"),
|
|
489
|
-
] = OutputElasticMode.ERROR
|
|
462
|
+
pq_mode: Annotated[Optional[OutputElasticMode], pydantic.Field(alias="pqMode")] = (
|
|
463
|
+
OutputElasticMode.ERROR
|
|
464
|
+
)
|
|
490
465
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
491
466
|
|
|
492
467
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -26,9 +23,7 @@ class OutputElasticCloudExtraHTTPHeader(BaseModel):
|
|
|
26
23
|
name: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class OutputElasticCloudFailedRequestLoggingMode(
|
|
30
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
31
|
-
):
|
|
26
|
+
class OutputElasticCloudFailedRequestLoggingMode(str, Enum):
|
|
32
27
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
33
28
|
|
|
34
29
|
PAYLOAD = "payload"
|
|
@@ -47,7 +42,7 @@ class OutputElasticCloudExtraParam(BaseModel):
|
|
|
47
42
|
value: str
|
|
48
43
|
|
|
49
44
|
|
|
50
|
-
class OutputElasticCloudAuthenticationMethod(str, Enum
|
|
45
|
+
class OutputElasticCloudAuthenticationMethod(str, Enum):
|
|
51
46
|
r"""Enter credentials directly, or select a stored secret"""
|
|
52
47
|
|
|
53
48
|
MANUAL = "manual"
|
|
@@ -66,10 +61,7 @@ class OutputElasticCloudAuth(BaseModel):
|
|
|
66
61
|
disabled: Optional[bool] = False
|
|
67
62
|
|
|
68
63
|
auth_type: Annotated[
|
|
69
|
-
|
|
70
|
-
Optional[OutputElasticCloudAuthenticationMethod],
|
|
71
|
-
PlainValidator(validate_open_enum(False)),
|
|
72
|
-
],
|
|
64
|
+
Optional[OutputElasticCloudAuthenticationMethod],
|
|
73
65
|
pydantic.Field(alias="authType"),
|
|
74
66
|
] = OutputElasticCloudAuthenticationMethod.MANUAL
|
|
75
67
|
r"""Enter credentials directly, or select a stored secret"""
|
|
@@ -129,7 +121,7 @@ class OutputElasticCloudTimeoutRetrySettings(BaseModel):
|
|
|
129
121
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
130
122
|
|
|
131
123
|
|
|
132
|
-
class OutputElasticCloudBackpressureBehavior(str, Enum
|
|
124
|
+
class OutputElasticCloudBackpressureBehavior(str, Enum):
|
|
133
125
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
134
126
|
|
|
135
127
|
BLOCK = "block"
|
|
@@ -137,21 +129,21 @@ class OutputElasticCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnum
|
|
|
137
129
|
QUEUE = "queue"
|
|
138
130
|
|
|
139
131
|
|
|
140
|
-
class OutputElasticCloudCompression(str, Enum
|
|
132
|
+
class OutputElasticCloudCompression(str, Enum):
|
|
141
133
|
r"""Codec to use to compress the persisted data"""
|
|
142
134
|
|
|
143
135
|
NONE = "none"
|
|
144
136
|
GZIP = "gzip"
|
|
145
137
|
|
|
146
138
|
|
|
147
|
-
class OutputElasticCloudQueueFullBehavior(str, Enum
|
|
139
|
+
class OutputElasticCloudQueueFullBehavior(str, Enum):
|
|
148
140
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
149
141
|
|
|
150
142
|
BLOCK = "block"
|
|
151
143
|
DROP = "drop"
|
|
152
144
|
|
|
153
145
|
|
|
154
|
-
class OutputElasticCloudMode(str, Enum
|
|
146
|
+
class OutputElasticCloudMode(str, Enum):
|
|
155
147
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
156
148
|
|
|
157
149
|
ERROR = "error"
|
|
@@ -303,10 +295,7 @@ class OutputElasticCloud(BaseModel):
|
|
|
303
295
|
r"""Headers to add to all events"""
|
|
304
296
|
|
|
305
297
|
failed_request_logging_mode: Annotated[
|
|
306
|
-
|
|
307
|
-
Optional[OutputElasticCloudFailedRequestLoggingMode],
|
|
308
|
-
PlainValidator(validate_open_enum(False)),
|
|
309
|
-
],
|
|
298
|
+
Optional[OutputElasticCloudFailedRequestLoggingMode],
|
|
310
299
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
311
300
|
] = OutputElasticCloudFailedRequestLoggingMode.NONE
|
|
312
301
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -351,10 +340,7 @@ class OutputElasticCloud(BaseModel):
|
|
|
351
340
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
352
341
|
|
|
353
342
|
on_backpressure: Annotated[
|
|
354
|
-
|
|
355
|
-
Optional[OutputElasticCloudBackpressureBehavior],
|
|
356
|
-
PlainValidator(validate_open_enum(False)),
|
|
357
|
-
],
|
|
343
|
+
Optional[OutputElasticCloudBackpressureBehavior],
|
|
358
344
|
pydantic.Field(alias="onBackpressure"),
|
|
359
345
|
] = OutputElasticCloudBackpressureBehavior.BLOCK
|
|
360
346
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -375,28 +361,18 @@ class OutputElasticCloud(BaseModel):
|
|
|
375
361
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
376
362
|
|
|
377
363
|
pq_compress: Annotated[
|
|
378
|
-
|
|
379
|
-
Optional[OutputElasticCloudCompression],
|
|
380
|
-
PlainValidator(validate_open_enum(False)),
|
|
381
|
-
],
|
|
382
|
-
pydantic.Field(alias="pqCompress"),
|
|
364
|
+
Optional[OutputElasticCloudCompression], pydantic.Field(alias="pqCompress")
|
|
383
365
|
] = OutputElasticCloudCompression.NONE
|
|
384
366
|
r"""Codec to use to compress the persisted data"""
|
|
385
367
|
|
|
386
368
|
pq_on_backpressure: Annotated[
|
|
387
|
-
|
|
388
|
-
Optional[OutputElasticCloudQueueFullBehavior],
|
|
389
|
-
PlainValidator(validate_open_enum(False)),
|
|
390
|
-
],
|
|
369
|
+
Optional[OutputElasticCloudQueueFullBehavior],
|
|
391
370
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
392
371
|
] = OutputElasticCloudQueueFullBehavior.BLOCK
|
|
393
372
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
394
373
|
|
|
395
374
|
pq_mode: Annotated[
|
|
396
|
-
|
|
397
|
-
Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
|
|
398
|
-
],
|
|
399
|
-
pydantic.Field(alias="pqMode"),
|
|
375
|
+
Optional[OutputElasticCloudMode], pydantic.Field(alias="pqMode")
|
|
400
376
|
] = OutputElasticCloudMode.ERROR
|
|
401
377
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
402
378
|
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,14 +12,14 @@ class OutputExabeamType(str, Enum):
|
|
|
15
12
|
EXABEAM = "exabeam"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputExabeamSignatureVersion(str, Enum
|
|
15
|
+
class OutputExabeamSignatureVersion(str, Enum):
|
|
19
16
|
r"""Signature version to use for signing Google Cloud Storage requests"""
|
|
20
17
|
|
|
21
18
|
V2 = "v2"
|
|
22
19
|
V4 = "v4"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputExabeamObjectACL(str, Enum
|
|
22
|
+
class OutputExabeamObjectACL(str, Enum):
|
|
26
23
|
r"""Object ACL to assign to uploaded objects"""
|
|
27
24
|
|
|
28
25
|
PRIVATE = "private"
|
|
@@ -33,7 +30,7 @@ class OutputExabeamObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
33
30
|
PUBLIC_READ = "public-read"
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class OutputExabeamStorageClass(str, Enum
|
|
33
|
+
class OutputExabeamStorageClass(str, Enum):
|
|
37
34
|
r"""Storage class to select for uploaded objects"""
|
|
38
35
|
|
|
39
36
|
STANDARD = "STANDARD"
|
|
@@ -42,14 +39,14 @@ class OutputExabeamStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
42
39
|
ARCHIVE = "ARCHIVE"
|
|
43
40
|
|
|
44
41
|
|
|
45
|
-
class OutputExabeamBackpressureBehavior(str, Enum
|
|
42
|
+
class OutputExabeamBackpressureBehavior(str, Enum):
|
|
46
43
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
47
44
|
|
|
48
45
|
BLOCK = "block"
|
|
49
46
|
DROP = "drop"
|
|
50
47
|
|
|
51
48
|
|
|
52
|
-
class OutputExabeamDiskSpaceProtection(str, Enum
|
|
49
|
+
class OutputExabeamDiskSpaceProtection(str, Enum):
|
|
53
50
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
54
51
|
|
|
55
52
|
BLOCK = "block"
|
|
@@ -168,28 +165,18 @@ class OutputExabeam(BaseModel):
|
|
|
168
165
|
r"""Google Cloud Storage service endpoint"""
|
|
169
166
|
|
|
170
167
|
signature_version: Annotated[
|
|
171
|
-
|
|
172
|
-
Optional[OutputExabeamSignatureVersion],
|
|
173
|
-
PlainValidator(validate_open_enum(False)),
|
|
174
|
-
],
|
|
168
|
+
Optional[OutputExabeamSignatureVersion],
|
|
175
169
|
pydantic.Field(alias="signatureVersion"),
|
|
176
170
|
] = OutputExabeamSignatureVersion.V4
|
|
177
171
|
r"""Signature version to use for signing Google Cloud Storage requests"""
|
|
178
172
|
|
|
179
173
|
object_acl: Annotated[
|
|
180
|
-
|
|
181
|
-
Optional[OutputExabeamObjectACL], PlainValidator(validate_open_enum(False))
|
|
182
|
-
],
|
|
183
|
-
pydantic.Field(alias="objectACL"),
|
|
174
|
+
Optional[OutputExabeamObjectACL], pydantic.Field(alias="objectACL")
|
|
184
175
|
] = OutputExabeamObjectACL.PRIVATE
|
|
185
176
|
r"""Object ACL to assign to uploaded objects"""
|
|
186
177
|
|
|
187
178
|
storage_class: Annotated[
|
|
188
|
-
|
|
189
|
-
Optional[OutputExabeamStorageClass],
|
|
190
|
-
PlainValidator(validate_open_enum(False)),
|
|
191
|
-
],
|
|
192
|
-
pydantic.Field(alias="storageClass"),
|
|
179
|
+
Optional[OutputExabeamStorageClass], pydantic.Field(alias="storageClass")
|
|
193
180
|
] = None
|
|
194
181
|
r"""Storage class to select for uploaded objects"""
|
|
195
182
|
|
|
@@ -229,10 +216,7 @@ class OutputExabeam(BaseModel):
|
|
|
229
216
|
r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
|
|
230
217
|
|
|
231
218
|
on_backpressure: Annotated[
|
|
232
|
-
|
|
233
|
-
Optional[OutputExabeamBackpressureBehavior],
|
|
234
|
-
PlainValidator(validate_open_enum(False)),
|
|
235
|
-
],
|
|
219
|
+
Optional[OutputExabeamBackpressureBehavior],
|
|
236
220
|
pydantic.Field(alias="onBackpressure"),
|
|
237
221
|
] = OutputExabeamBackpressureBehavior.BLOCK
|
|
238
222
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -243,10 +227,7 @@ class OutputExabeam(BaseModel):
|
|
|
243
227
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
244
228
|
|
|
245
229
|
on_disk_full_backpressure: Annotated[
|
|
246
|
-
|
|
247
|
-
Optional[OutputExabeamDiskSpaceProtection],
|
|
248
|
-
PlainValidator(validate_open_enum(False)),
|
|
249
|
-
],
|
|
230
|
+
Optional[OutputExabeamDiskSpaceProtection],
|
|
250
231
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
251
232
|
] = OutputExabeamDiskSpaceProtection.BLOCK
|
|
252
233
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|