cribl-control-plane 0.0.44__py3-none-any.whl → 0.0.44a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/models/__init__.py +3 -3
- cribl_control_plane/models/appmode.py +2 -1
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +7 -2
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/hbcriblinfo.py +6 -1
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +47 -20
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +46 -16
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +27 -10
- cribl_control_plane/models/inputmsk.py +53 -18
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +74 -29
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +36 -13
- cribl_control_plane/models/outputclickhouse.py +56 -21
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +77 -32
- cribl_control_plane/models/outputcriblhttp.py +46 -18
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -15
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +54 -21
- cribl_control_plane/models/outputdynatraceotlp.py +56 -23
- cribl_control_plane/models/outputelastic.py +44 -19
- cribl_control_plane/models/outputelasticcloud.py +37 -13
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +99 -34
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +36 -13
- cribl_control_plane/models/outputhumiohec.py +44 -17
- cribl_control_plane/models/outputinfluxdb.py +43 -17
- cribl_control_plane/models/outputkafka.py +74 -28
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +82 -30
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +42 -15
- cribl_control_plane/models/outputopentelemetry.py +68 -27
- cribl_control_plane/models/outputprometheus.py +36 -13
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +51 -19
- cribl_control_plane/models/outputservicenow.py +61 -25
- cribl_control_plane/models/outputsignalfx.py +38 -15
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +36 -13
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +36 -13
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +36 -15
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.44.dist-info → cribl_control_plane-0.0.44a2.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.0.44.dist-info → cribl_control_plane-0.0.44a2.dist-info}/RECORD +158 -158
- {cribl_control_plane-0.0.44.dist-info → cribl_control_plane-0.0.44a2.dist-info}/WHEEL +0 -0
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputDynatraceHTTPType(str, Enum):
|
|
|
12
15
|
DYNATRACE_HTTP = "dynatrace_http"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputDynatraceHTTPMethod(str, Enum):
|
|
18
|
+
class OutputDynatraceHTTPMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""The method to use when sending events"""
|
|
17
20
|
|
|
18
21
|
POST = "POST"
|
|
@@ -31,7 +34,9 @@ class OutputDynatraceHTTPExtraHTTPHeader(BaseModel):
|
|
|
31
34
|
name: Optional[str] = None
|
|
32
35
|
|
|
33
36
|
|
|
34
|
-
class OutputDynatraceHTTPFailedRequestLoggingMode(
|
|
37
|
+
class OutputDynatraceHTTPFailedRequestLoggingMode(
|
|
38
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
39
|
+
):
|
|
35
40
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
36
41
|
|
|
37
42
|
PAYLOAD = "payload"
|
|
@@ -93,7 +98,7 @@ class OutputDynatraceHTTPTimeoutRetrySettings(BaseModel):
|
|
|
93
98
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
94
99
|
|
|
95
100
|
|
|
96
|
-
class OutputDynatraceHTTPBackpressureBehavior(str, Enum):
|
|
101
|
+
class OutputDynatraceHTTPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
97
102
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
98
103
|
|
|
99
104
|
BLOCK = "block"
|
|
@@ -101,44 +106,44 @@ class OutputDynatraceHTTPBackpressureBehavior(str, Enum):
|
|
|
101
106
|
QUEUE = "queue"
|
|
102
107
|
|
|
103
108
|
|
|
104
|
-
class OutputDynatraceHTTPAuthenticationType(str, Enum):
|
|
109
|
+
class OutputDynatraceHTTPAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
105
110
|
TOKEN = "token"
|
|
106
111
|
TEXT_SECRET = "textSecret"
|
|
107
112
|
|
|
108
113
|
|
|
109
|
-
class OutputDynatraceHTTPFormat(str, Enum):
|
|
114
|
+
class OutputDynatraceHTTPFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
110
115
|
r"""How to format events before sending. Defaults to JSON. Plaintext is not currently supported."""
|
|
111
116
|
|
|
112
117
|
JSON_ARRAY = "json_array"
|
|
113
118
|
PLAINTEXT = "plaintext"
|
|
114
119
|
|
|
115
120
|
|
|
116
|
-
class Endpoint(str, Enum):
|
|
121
|
+
class Endpoint(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
117
122
|
CLOUD = "cloud"
|
|
118
123
|
ACTIVE_GATE = "activeGate"
|
|
119
124
|
MANUAL = "manual"
|
|
120
125
|
|
|
121
126
|
|
|
122
|
-
class TelemetryType(str, Enum):
|
|
127
|
+
class TelemetryType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
123
128
|
LOGS = "logs"
|
|
124
129
|
METRICS = "metrics"
|
|
125
130
|
|
|
126
131
|
|
|
127
|
-
class OutputDynatraceHTTPCompression(str, Enum):
|
|
132
|
+
class OutputDynatraceHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
128
133
|
r"""Codec to use to compress the persisted data"""
|
|
129
134
|
|
|
130
135
|
NONE = "none"
|
|
131
136
|
GZIP = "gzip"
|
|
132
137
|
|
|
133
138
|
|
|
134
|
-
class OutputDynatraceHTTPQueueFullBehavior(str, Enum):
|
|
139
|
+
class OutputDynatraceHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
135
140
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
136
141
|
|
|
137
142
|
BLOCK = "block"
|
|
138
143
|
DROP = "drop"
|
|
139
144
|
|
|
140
145
|
|
|
141
|
-
class OutputDynatraceHTTPMode(str, Enum):
|
|
146
|
+
class OutputDynatraceHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
142
147
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
143
148
|
|
|
144
149
|
ERROR = "error"
|
|
@@ -261,7 +266,9 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
261
266
|
streamtags: Optional[List[str]] = None
|
|
262
267
|
r"""Tags for filtering and grouping in @{product}"""
|
|
263
268
|
|
|
264
|
-
method:
|
|
269
|
+
method: Annotated[
|
|
270
|
+
Optional[OutputDynatraceHTTPMethod], PlainValidator(validate_open_enum(False))
|
|
271
|
+
] = OutputDynatraceHTTPMethod.POST
|
|
265
272
|
r"""The method to use when sending events"""
|
|
266
273
|
|
|
267
274
|
keep_alive: Annotated[Optional[bool], pydantic.Field(alias="keepAlive")] = True
|
|
@@ -311,7 +318,10 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
311
318
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
312
319
|
|
|
313
320
|
failed_request_logging_mode: Annotated[
|
|
314
|
-
|
|
321
|
+
Annotated[
|
|
322
|
+
Optional[OutputDynatraceHTTPFailedRequestLoggingMode],
|
|
323
|
+
PlainValidator(validate_open_enum(False)),
|
|
324
|
+
],
|
|
315
325
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
316
326
|
] = OutputDynatraceHTTPFailedRequestLoggingMode.NONE
|
|
317
327
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -334,29 +344,42 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
334
344
|
|
|
335
345
|
response_honor_retry_after_header: Annotated[
|
|
336
346
|
Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
|
|
337
|
-
] =
|
|
347
|
+
] = True
|
|
338
348
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
339
349
|
|
|
340
350
|
on_backpressure: Annotated[
|
|
341
|
-
|
|
351
|
+
Annotated[
|
|
352
|
+
Optional[OutputDynatraceHTTPBackpressureBehavior],
|
|
353
|
+
PlainValidator(validate_open_enum(False)),
|
|
354
|
+
],
|
|
342
355
|
pydantic.Field(alias="onBackpressure"),
|
|
343
356
|
] = OutputDynatraceHTTPBackpressureBehavior.BLOCK
|
|
344
357
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
345
358
|
|
|
346
359
|
auth_type: Annotated[
|
|
347
|
-
|
|
360
|
+
Annotated[
|
|
361
|
+
Optional[OutputDynatraceHTTPAuthenticationType],
|
|
362
|
+
PlainValidator(validate_open_enum(False)),
|
|
363
|
+
],
|
|
348
364
|
pydantic.Field(alias="authType"),
|
|
349
365
|
] = OutputDynatraceHTTPAuthenticationType.TOKEN
|
|
350
366
|
|
|
351
367
|
format_: Annotated[
|
|
352
|
-
|
|
368
|
+
Annotated[
|
|
369
|
+
Optional[OutputDynatraceHTTPFormat],
|
|
370
|
+
PlainValidator(validate_open_enum(False)),
|
|
371
|
+
],
|
|
372
|
+
pydantic.Field(alias="format"),
|
|
353
373
|
] = OutputDynatraceHTTPFormat.JSON_ARRAY
|
|
354
374
|
r"""How to format events before sending. Defaults to JSON. Plaintext is not currently supported."""
|
|
355
375
|
|
|
356
|
-
endpoint:
|
|
376
|
+
endpoint: Annotated[
|
|
377
|
+
Optional[Endpoint], PlainValidator(validate_open_enum(False))
|
|
378
|
+
] = Endpoint.CLOUD
|
|
357
379
|
|
|
358
380
|
telemetry_type: Annotated[
|
|
359
|
-
Optional[TelemetryType],
|
|
381
|
+
Annotated[Optional[TelemetryType], PlainValidator(validate_open_enum(False))],
|
|
382
|
+
pydantic.Field(alias="telemetryType"),
|
|
360
383
|
] = TelemetryType.LOGS
|
|
361
384
|
|
|
362
385
|
total_memory_limit_kb: Annotated[
|
|
@@ -380,18 +403,28 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
380
403
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
381
404
|
|
|
382
405
|
pq_compress: Annotated[
|
|
383
|
-
|
|
406
|
+
Annotated[
|
|
407
|
+
Optional[OutputDynatraceHTTPCompression],
|
|
408
|
+
PlainValidator(validate_open_enum(False)),
|
|
409
|
+
],
|
|
410
|
+
pydantic.Field(alias="pqCompress"),
|
|
384
411
|
] = OutputDynatraceHTTPCompression.NONE
|
|
385
412
|
r"""Codec to use to compress the persisted data"""
|
|
386
413
|
|
|
387
414
|
pq_on_backpressure: Annotated[
|
|
388
|
-
|
|
415
|
+
Annotated[
|
|
416
|
+
Optional[OutputDynatraceHTTPQueueFullBehavior],
|
|
417
|
+
PlainValidator(validate_open_enum(False)),
|
|
418
|
+
],
|
|
389
419
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
390
420
|
] = OutputDynatraceHTTPQueueFullBehavior.BLOCK
|
|
391
421
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
392
422
|
|
|
393
423
|
pq_mode: Annotated[
|
|
394
|
-
|
|
424
|
+
Annotated[
|
|
425
|
+
Optional[OutputDynatraceHTTPMode], PlainValidator(validate_open_enum(False))
|
|
426
|
+
],
|
|
427
|
+
pydantic.Field(alias="pqMode"),
|
|
395
428
|
] = OutputDynatraceHTTPMode.ERROR
|
|
396
429
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
397
430
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,19 +15,19 @@ class OutputDynatraceOtlpType(str, Enum):
|
|
|
12
15
|
DYNATRACE_OTLP = "dynatrace_otlp"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputDynatraceOtlpProtocol(str, Enum):
|
|
18
|
+
class OutputDynatraceOtlpProtocol(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Select a transport option for Dynatrace"""
|
|
17
20
|
|
|
18
21
|
HTTP = "http"
|
|
19
22
|
|
|
20
23
|
|
|
21
|
-
class OutputDynatraceOTLPOTLPVersion(str, Enum):
|
|
24
|
+
class OutputDynatraceOTLPOTLPVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
22
25
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
23
26
|
|
|
24
27
|
ONE_DOT_3_DOT_1 = "1.3.1"
|
|
25
28
|
|
|
26
29
|
|
|
27
|
-
class OutputDynatraceOtlpCompressCompression(str, Enum):
|
|
30
|
+
class OutputDynatraceOtlpCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
28
31
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
29
32
|
|
|
30
33
|
NONE = "none"
|
|
@@ -32,7 +35,9 @@ class OutputDynatraceOtlpCompressCompression(str, Enum):
|
|
|
32
35
|
GZIP = "gzip"
|
|
33
36
|
|
|
34
37
|
|
|
35
|
-
class OutputDynatraceOtlpHTTPCompressCompression(
|
|
38
|
+
class OutputDynatraceOtlpHTTPCompressCompression(
|
|
39
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
40
|
+
):
|
|
36
41
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
37
42
|
|
|
38
43
|
NONE = "none"
|
|
@@ -50,7 +55,9 @@ class OutputDynatraceOtlpMetadatum(BaseModel):
|
|
|
50
55
|
key: Optional[str] = ""
|
|
51
56
|
|
|
52
57
|
|
|
53
|
-
class OutputDynatraceOtlpFailedRequestLoggingMode(
|
|
58
|
+
class OutputDynatraceOtlpFailedRequestLoggingMode(
|
|
59
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
60
|
+
):
|
|
54
61
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
55
62
|
|
|
56
63
|
PAYLOAD = "payload"
|
|
@@ -58,14 +65,14 @@ class OutputDynatraceOtlpFailedRequestLoggingMode(str, Enum):
|
|
|
58
65
|
NONE = "none"
|
|
59
66
|
|
|
60
67
|
|
|
61
|
-
class EndpointType(str, Enum):
|
|
68
|
+
class EndpointType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
62
69
|
r"""Select the type of Dynatrace endpoint configured"""
|
|
63
70
|
|
|
64
71
|
SAAS = "saas"
|
|
65
72
|
AG = "ag"
|
|
66
73
|
|
|
67
74
|
|
|
68
|
-
class OutputDynatraceOtlpBackpressureBehavior(str, Enum):
|
|
75
|
+
class OutputDynatraceOtlpBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
69
76
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
70
77
|
|
|
71
78
|
BLOCK = "block"
|
|
@@ -138,21 +145,21 @@ class OutputDynatraceOtlpTimeoutRetrySettings(BaseModel):
|
|
|
138
145
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
139
146
|
|
|
140
147
|
|
|
141
|
-
class OutputDynatraceOtlpPqCompressCompression(str, Enum):
|
|
148
|
+
class OutputDynatraceOtlpPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
142
149
|
r"""Codec to use to compress the persisted data"""
|
|
143
150
|
|
|
144
151
|
NONE = "none"
|
|
145
152
|
GZIP = "gzip"
|
|
146
153
|
|
|
147
154
|
|
|
148
|
-
class OutputDynatraceOtlpQueueFullBehavior(str, Enum):
|
|
155
|
+
class OutputDynatraceOtlpQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
149
156
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
150
157
|
|
|
151
158
|
BLOCK = "block"
|
|
152
159
|
DROP = "drop"
|
|
153
160
|
|
|
154
161
|
|
|
155
|
-
class OutputDynatraceOtlpMode(str, Enum):
|
|
162
|
+
class OutputDynatraceOtlpMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
156
163
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
157
164
|
|
|
158
165
|
ERROR = "error"
|
|
@@ -282,7 +289,9 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
282
289
|
streamtags: Optional[List[str]] = None
|
|
283
290
|
r"""Tags for filtering and grouping in @{product}"""
|
|
284
291
|
|
|
285
|
-
protocol:
|
|
292
|
+
protocol: Annotated[
|
|
293
|
+
Optional[OutputDynatraceOtlpProtocol], PlainValidator(validate_open_enum(False))
|
|
294
|
+
] = OutputDynatraceOtlpProtocol.HTTP
|
|
286
295
|
r"""Select a transport option for Dynatrace"""
|
|
287
296
|
|
|
288
297
|
endpoint: Optional[str] = (
|
|
@@ -291,17 +300,25 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
291
300
|
r"""The endpoint where Dynatrace events will be sent. Enter any valid URL or an IP address (IPv4 or IPv6; enclose IPv6 addresses in square brackets)"""
|
|
292
301
|
|
|
293
302
|
otlp_version: Annotated[
|
|
294
|
-
|
|
303
|
+
Annotated[
|
|
304
|
+
Optional[OutputDynatraceOTLPOTLPVersion],
|
|
305
|
+
PlainValidator(validate_open_enum(False)),
|
|
306
|
+
],
|
|
307
|
+
pydantic.Field(alias="otlpVersion"),
|
|
295
308
|
] = OutputDynatraceOTLPOTLPVersion.ONE_DOT_3_DOT_1
|
|
296
309
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
297
310
|
|
|
298
|
-
compress:
|
|
299
|
-
OutputDynatraceOtlpCompressCompression
|
|
300
|
-
|
|
311
|
+
compress: Annotated[
|
|
312
|
+
Optional[OutputDynatraceOtlpCompressCompression],
|
|
313
|
+
PlainValidator(validate_open_enum(False)),
|
|
314
|
+
] = OutputDynatraceOtlpCompressCompression.GZIP
|
|
301
315
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
302
316
|
|
|
303
317
|
http_compress: Annotated[
|
|
304
|
-
|
|
318
|
+
Annotated[
|
|
319
|
+
Optional[OutputDynatraceOtlpHTTPCompressCompression],
|
|
320
|
+
PlainValidator(validate_open_enum(False)),
|
|
321
|
+
],
|
|
305
322
|
pydantic.Field(alias="httpCompress"),
|
|
306
323
|
] = OutputDynatraceOtlpHTTPCompressCompression.GZIP
|
|
307
324
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
@@ -341,7 +358,10 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
341
358
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
342
359
|
|
|
343
360
|
failed_request_logging_mode: Annotated[
|
|
344
|
-
|
|
361
|
+
Annotated[
|
|
362
|
+
Optional[OutputDynatraceOtlpFailedRequestLoggingMode],
|
|
363
|
+
PlainValidator(validate_open_enum(False)),
|
|
364
|
+
],
|
|
345
365
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
346
366
|
] = OutputDynatraceOtlpFailedRequestLoggingMode.NONE
|
|
347
367
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -360,7 +380,8 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
360
380
|
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
361
381
|
|
|
362
382
|
endpoint_type: Annotated[
|
|
363
|
-
Optional[EndpointType],
|
|
383
|
+
Annotated[Optional[EndpointType], PlainValidator(validate_open_enum(False))],
|
|
384
|
+
pydantic.Field(alias="endpointType"),
|
|
364
385
|
] = EndpointType.SAAS
|
|
365
386
|
r"""Select the type of Dynatrace endpoint configured"""
|
|
366
387
|
|
|
@@ -369,7 +390,10 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
369
390
|
)
|
|
370
391
|
|
|
371
392
|
on_backpressure: Annotated[
|
|
372
|
-
|
|
393
|
+
Annotated[
|
|
394
|
+
Optional[OutputDynatraceOtlpBackpressureBehavior],
|
|
395
|
+
PlainValidator(validate_open_enum(False)),
|
|
396
|
+
],
|
|
373
397
|
pydantic.Field(alias="onBackpressure"),
|
|
374
398
|
] = OutputDynatraceOtlpBackpressureBehavior.BLOCK
|
|
375
399
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -413,7 +437,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
413
437
|
|
|
414
438
|
response_honor_retry_after_header: Annotated[
|
|
415
439
|
Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
|
|
416
|
-
] =
|
|
440
|
+
] = True
|
|
417
441
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
418
442
|
|
|
419
443
|
pq_max_file_size: Annotated[
|
|
@@ -430,19 +454,28 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
430
454
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
431
455
|
|
|
432
456
|
pq_compress: Annotated[
|
|
433
|
-
|
|
457
|
+
Annotated[
|
|
458
|
+
Optional[OutputDynatraceOtlpPqCompressCompression],
|
|
459
|
+
PlainValidator(validate_open_enum(False)),
|
|
460
|
+
],
|
|
434
461
|
pydantic.Field(alias="pqCompress"),
|
|
435
462
|
] = OutputDynatraceOtlpPqCompressCompression.NONE
|
|
436
463
|
r"""Codec to use to compress the persisted data"""
|
|
437
464
|
|
|
438
465
|
pq_on_backpressure: Annotated[
|
|
439
|
-
|
|
466
|
+
Annotated[
|
|
467
|
+
Optional[OutputDynatraceOtlpQueueFullBehavior],
|
|
468
|
+
PlainValidator(validate_open_enum(False)),
|
|
469
|
+
],
|
|
440
470
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
441
471
|
] = OutputDynatraceOtlpQueueFullBehavior.BLOCK
|
|
442
472
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
443
473
|
|
|
444
474
|
pq_mode: Annotated[
|
|
445
|
-
|
|
475
|
+
Annotated[
|
|
476
|
+
Optional[OutputDynatraceOtlpMode], PlainValidator(validate_open_enum(False))
|
|
477
|
+
],
|
|
478
|
+
pydantic.Field(alias="pqMode"),
|
|
446
479
|
] = OutputDynatraceOtlpMode.ERROR
|
|
447
480
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
448
481
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,7 +26,7 @@ class OutputElasticExtraHTTPHeader(BaseModel):
|
|
|
23
26
|
name: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class OutputElasticFailedRequestLoggingMode(str, Enum):
|
|
29
|
+
class OutputElasticFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
28
31
|
|
|
29
32
|
PAYLOAD = "payload"
|
|
@@ -96,7 +99,7 @@ class OutputElasticExtraParam(BaseModel):
|
|
|
96
99
|
value: str
|
|
97
100
|
|
|
98
101
|
|
|
99
|
-
class OutputElasticAuthenticationMethod(str, Enum):
|
|
102
|
+
class OutputElasticAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
100
103
|
r"""Enter credentials directly, or select a stored secret"""
|
|
101
104
|
|
|
102
105
|
MANUAL = "manual"
|
|
@@ -115,12 +118,16 @@ class OutputElasticAuth(BaseModel):
|
|
|
115
118
|
disabled: Optional[bool] = True
|
|
116
119
|
|
|
117
120
|
auth_type: Annotated[
|
|
118
|
-
|
|
121
|
+
Annotated[
|
|
122
|
+
Optional[OutputElasticAuthenticationMethod],
|
|
123
|
+
PlainValidator(validate_open_enum(False)),
|
|
124
|
+
],
|
|
125
|
+
pydantic.Field(alias="authType"),
|
|
119
126
|
] = OutputElasticAuthenticationMethod.MANUAL
|
|
120
127
|
r"""Enter credentials directly, or select a stored secret"""
|
|
121
128
|
|
|
122
129
|
|
|
123
|
-
class ElasticVersion(str, Enum):
|
|
130
|
+
class ElasticVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
124
131
|
r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
|
|
125
132
|
|
|
126
133
|
AUTO = "auto"
|
|
@@ -128,14 +135,14 @@ class ElasticVersion(str, Enum):
|
|
|
128
135
|
SEVEN = "7"
|
|
129
136
|
|
|
130
137
|
|
|
131
|
-
class WriteAction(str, Enum):
|
|
138
|
+
class WriteAction(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
132
139
|
r"""Action to use when writing events. Must be set to `Create` when writing to a data stream."""
|
|
133
140
|
|
|
134
141
|
INDEX = "index"
|
|
135
142
|
CREATE = "create"
|
|
136
143
|
|
|
137
144
|
|
|
138
|
-
class OutputElasticBackpressureBehavior(str, Enum):
|
|
145
|
+
class OutputElasticBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
139
146
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
140
147
|
|
|
141
148
|
BLOCK = "block"
|
|
@@ -158,21 +165,21 @@ class OutputElasticURL(BaseModel):
|
|
|
158
165
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
159
166
|
|
|
160
167
|
|
|
161
|
-
class OutputElasticCompression(str, Enum):
|
|
168
|
+
class OutputElasticCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
162
169
|
r"""Codec to use to compress the persisted data"""
|
|
163
170
|
|
|
164
171
|
NONE = "none"
|
|
165
172
|
GZIP = "gzip"
|
|
166
173
|
|
|
167
174
|
|
|
168
|
-
class OutputElasticQueueFullBehavior(str, Enum):
|
|
175
|
+
class OutputElasticQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
169
176
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
170
177
|
|
|
171
178
|
BLOCK = "block"
|
|
172
179
|
DROP = "drop"
|
|
173
180
|
|
|
174
181
|
|
|
175
|
-
class OutputElasticMode(str, Enum):
|
|
182
|
+
class OutputElasticMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
176
183
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
177
184
|
|
|
178
185
|
ERROR = "error"
|
|
@@ -347,7 +354,10 @@ class OutputElastic(BaseModel):
|
|
|
347
354
|
r"""Headers to add to all events"""
|
|
348
355
|
|
|
349
356
|
failed_request_logging_mode: Annotated[
|
|
350
|
-
|
|
357
|
+
Annotated[
|
|
358
|
+
Optional[OutputElasticFailedRequestLoggingMode],
|
|
359
|
+
PlainValidator(validate_open_enum(False)),
|
|
360
|
+
],
|
|
351
361
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
352
362
|
] = OutputElasticFailedRequestLoggingMode.NONE
|
|
353
363
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -370,7 +380,7 @@ class OutputElastic(BaseModel):
|
|
|
370
380
|
|
|
371
381
|
response_honor_retry_after_header: Annotated[
|
|
372
382
|
Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
|
|
373
|
-
] =
|
|
383
|
+
] = True
|
|
374
384
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
375
385
|
|
|
376
386
|
extra_params: Annotated[
|
|
@@ -380,7 +390,8 @@ class OutputElastic(BaseModel):
|
|
|
380
390
|
auth: Optional[OutputElasticAuth] = None
|
|
381
391
|
|
|
382
392
|
elastic_version: Annotated[
|
|
383
|
-
Optional[ElasticVersion],
|
|
393
|
+
Annotated[Optional[ElasticVersion], PlainValidator(validate_open_enum(False))],
|
|
394
|
+
pydantic.Field(alias="elasticVersion"),
|
|
384
395
|
] = ElasticVersion.AUTO
|
|
385
396
|
r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
|
|
386
397
|
|
|
@@ -395,7 +406,8 @@ class OutputElastic(BaseModel):
|
|
|
395
406
|
r"""Include the `document_id` field when sending events to an Elastic TSDS (time series data stream)"""
|
|
396
407
|
|
|
397
408
|
write_action: Annotated[
|
|
398
|
-
Optional[WriteAction],
|
|
409
|
+
Annotated[Optional[WriteAction], PlainValidator(validate_open_enum(False))],
|
|
410
|
+
pydantic.Field(alias="writeAction"),
|
|
399
411
|
] = WriteAction.CREATE
|
|
400
412
|
r"""Action to use when writing events. Must be set to `Create` when writing to a data stream."""
|
|
401
413
|
|
|
@@ -405,7 +417,10 @@ class OutputElastic(BaseModel):
|
|
|
405
417
|
r"""Retry failed events when a bulk request to Elastic is successful, but the response body returns an error for one or more events in the batch"""
|
|
406
418
|
|
|
407
419
|
on_backpressure: Annotated[
|
|
408
|
-
|
|
420
|
+
Annotated[
|
|
421
|
+
Optional[OutputElasticBackpressureBehavior],
|
|
422
|
+
PlainValidator(validate_open_enum(False)),
|
|
423
|
+
],
|
|
409
424
|
pydantic.Field(alias="onBackpressure"),
|
|
410
425
|
] = OutputElasticBackpressureBehavior.BLOCK
|
|
411
426
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -449,19 +464,29 @@ class OutputElastic(BaseModel):
|
|
|
449
464
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
450
465
|
|
|
451
466
|
pq_compress: Annotated[
|
|
452
|
-
|
|
467
|
+
Annotated[
|
|
468
|
+
Optional[OutputElasticCompression],
|
|
469
|
+
PlainValidator(validate_open_enum(False)),
|
|
470
|
+
],
|
|
471
|
+
pydantic.Field(alias="pqCompress"),
|
|
453
472
|
] = OutputElasticCompression.NONE
|
|
454
473
|
r"""Codec to use to compress the persisted data"""
|
|
455
474
|
|
|
456
475
|
pq_on_backpressure: Annotated[
|
|
457
|
-
|
|
476
|
+
Annotated[
|
|
477
|
+
Optional[OutputElasticQueueFullBehavior],
|
|
478
|
+
PlainValidator(validate_open_enum(False)),
|
|
479
|
+
],
|
|
458
480
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
459
481
|
] = OutputElasticQueueFullBehavior.BLOCK
|
|
460
482
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
461
483
|
|
|
462
|
-
pq_mode: Annotated[
|
|
463
|
-
|
|
464
|
-
|
|
484
|
+
pq_mode: Annotated[
|
|
485
|
+
Annotated[
|
|
486
|
+
Optional[OutputElasticMode], PlainValidator(validate_open_enum(False))
|
|
487
|
+
],
|
|
488
|
+
pydantic.Field(alias="pqMode"),
|
|
489
|
+
] = OutputElasticMode.ERROR
|
|
465
490
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
466
491
|
|
|
467
492
|
pq_controls: Annotated[
|