cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/groups_sdk.py +2 -2
- cribl_control_plane/lakedatasets.py +28 -0
- cribl_control_plane/models/__init__.py +124 -5
- cribl_control_plane/models/cacheconnection.py +20 -0
- cribl_control_plane/models/configgroup.py +20 -1
- cribl_control_plane/models/configgroupcloud.py +11 -1
- cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
- cribl_control_plane/models/cribllakedataset.py +15 -1
- cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
- cribl_control_plane/models/datasetmetadata.py +11 -1
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
- cribl_control_plane/models/getsummaryop.py +11 -0
- cribl_control_plane/models/groupcreaterequest.py +20 -1
- cribl_control_plane/models/hbcriblinfo.py +11 -1
- cribl_control_plane/models/healthserverstatus.py +20 -1
- cribl_control_plane/models/input.py +15 -15
- cribl_control_plane/models/inputappscope.py +76 -17
- cribl_control_plane/models/inputazureblob.py +29 -1
- cribl_control_plane/models/inputcollection.py +20 -1
- cribl_control_plane/models/inputconfluentcloud.py +188 -1
- cribl_control_plane/models/inputcribl.py +20 -1
- cribl_control_plane/models/inputcriblhttp.py +58 -17
- cribl_control_plane/models/inputcribllakehttp.py +58 -17
- cribl_control_plane/models/inputcriblmetrics.py +20 -1
- cribl_control_plane/models/inputcribltcp.py +58 -17
- cribl_control_plane/models/inputcrowdstrike.py +47 -1
- cribl_control_plane/models/inputdatadogagent.py +58 -17
- cribl_control_plane/models/inputdatagen.py +20 -1
- cribl_control_plane/models/inputedgeprometheus.py +138 -37
- cribl_control_plane/models/inputelastic.py +108 -27
- cribl_control_plane/models/inputeventhub.py +176 -1
- cribl_control_plane/models/inputexec.py +29 -1
- cribl_control_plane/models/inputfile.py +40 -7
- cribl_control_plane/models/inputfirehose.py +58 -17
- cribl_control_plane/models/inputgooglepubsub.py +29 -1
- cribl_control_plane/models/inputgrafana.py +149 -32
- cribl_control_plane/models/inputhttp.py +58 -17
- cribl_control_plane/models/inputhttpraw.py +58 -17
- cribl_control_plane/models/inputjournalfiles.py +20 -1
- cribl_control_plane/models/inputkafka.py +182 -1
- cribl_control_plane/models/inputkinesis.py +65 -1
- cribl_control_plane/models/inputkubeevents.py +20 -1
- cribl_control_plane/models/inputkubelogs.py +29 -1
- cribl_control_plane/models/inputkubemetrics.py +29 -1
- cribl_control_plane/models/inputloki.py +67 -17
- cribl_control_plane/models/inputmetrics.py +58 -17
- cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
- cribl_control_plane/models/inputmsk.py +74 -1
- cribl_control_plane/models/inputnetflow.py +20 -1
- cribl_control_plane/models/inputoffice365mgmt.py +56 -1
- cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
- cribl_control_plane/models/inputoffice365service.py +56 -1
- cribl_control_plane/models/inputopentelemetry.py +84 -16
- cribl_control_plane/models/inputprometheus.py +131 -37
- cribl_control_plane/models/inputprometheusrw.py +67 -17
- cribl_control_plane/models/inputrawudp.py +20 -1
- cribl_control_plane/models/inputs3.py +38 -1
- cribl_control_plane/models/inputs3inventory.py +47 -1
- cribl_control_plane/models/inputsecuritylake.py +47 -1
- cribl_control_plane/models/inputsnmp.py +29 -1
- cribl_control_plane/models/inputsplunk.py +76 -17
- cribl_control_plane/models/inputsplunkhec.py +66 -16
- cribl_control_plane/models/inputsplunksearch.py +56 -1
- cribl_control_plane/models/inputsqs.py +47 -1
- cribl_control_plane/models/inputsyslog.py +113 -32
- cribl_control_plane/models/inputsystemmetrics.py +110 -9
- cribl_control_plane/models/inputsystemstate.py +29 -1
- cribl_control_plane/models/inputtcp.py +77 -17
- cribl_control_plane/models/inputtcpjson.py +67 -17
- cribl_control_plane/models/inputwef.py +65 -1
- cribl_control_plane/models/inputwindowsmetrics.py +101 -9
- cribl_control_plane/models/inputwineventlogs.py +52 -1
- cribl_control_plane/models/inputwiz.py +38 -1
- cribl_control_plane/models/inputwizwebhook.py +58 -17
- cribl_control_plane/models/inputzscalerhec.py +66 -16
- cribl_control_plane/models/jobinfo.py +10 -4
- cribl_control_plane/models/jobstatus.py +34 -3
- cribl_control_plane/models/lakedatasetmetrics.py +17 -0
- cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
- cribl_control_plane/models/masterworkerentry.py +11 -1
- cribl_control_plane/models/nodeupgradestatus.py +38 -0
- cribl_control_plane/models/output.py +21 -21
- cribl_control_plane/models/outputazureblob.py +90 -1
- cribl_control_plane/models/outputazuredataexplorer.py +430 -93
- cribl_control_plane/models/outputazureeventhub.py +267 -22
- cribl_control_plane/models/outputazurelogs.py +105 -22
- cribl_control_plane/models/outputchronicle.py +105 -22
- cribl_control_plane/models/outputclickhouse.py +141 -22
- cribl_control_plane/models/outputcloudwatch.py +96 -22
- cribl_control_plane/models/outputconfluentcloud.py +292 -23
- cribl_control_plane/models/outputcriblhttp.py +123 -22
- cribl_control_plane/models/outputcribllake.py +76 -1
- cribl_control_plane/models/outputcribltcp.py +123 -22
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
- cribl_control_plane/models/outputdatabricks.py +76 -5
- cribl_control_plane/models/outputdatadog.py +132 -22
- cribl_control_plane/models/outputdataset.py +123 -22
- cribl_control_plane/models/outputdiskspool.py +11 -1
- cribl_control_plane/models/outputdls3.py +117 -1
- cribl_control_plane/models/outputdynatracehttp.py +141 -22
- cribl_control_plane/models/outputdynatraceotlp.py +141 -22
- cribl_control_plane/models/outputelastic.py +148 -22
- cribl_control_plane/models/outputelasticcloud.py +130 -22
- cribl_control_plane/models/outputexabeam.py +47 -1
- cribl_control_plane/models/outputfilesystem.py +72 -1
- cribl_control_plane/models/outputgooglechronicle.py +148 -23
- cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
- cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
- cribl_control_plane/models/outputgooglepubsub.py +96 -22
- cribl_control_plane/models/outputgrafanacloud.py +244 -43
- cribl_control_plane/models/outputgraphite.py +96 -22
- cribl_control_plane/models/outputhoneycomb.py +105 -22
- cribl_control_plane/models/outputhumiohec.py +114 -22
- cribl_control_plane/models/outputinfluxdb.py +114 -22
- cribl_control_plane/models/outputkafka.py +283 -20
- cribl_control_plane/models/outputkinesis.py +121 -22
- cribl_control_plane/models/outputloki.py +112 -20
- cribl_control_plane/models/outputminio.py +117 -1
- cribl_control_plane/models/outputmsk.py +175 -20
- cribl_control_plane/models/outputnewrelic.py +123 -22
- cribl_control_plane/models/outputnewrelicevents.py +115 -23
- cribl_control_plane/models/outputopentelemetry.py +159 -22
- cribl_control_plane/models/outputprometheus.py +105 -22
- cribl_control_plane/models/outputring.py +29 -1
- cribl_control_plane/models/outputs3.py +117 -1
- cribl_control_plane/models/outputsecuritylake.py +85 -1
- cribl_control_plane/models/outputsentinel.py +123 -22
- cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
- cribl_control_plane/models/outputservicenow.py +150 -22
- cribl_control_plane/models/outputsignalfx.py +105 -22
- cribl_control_plane/models/outputsns.py +103 -20
- cribl_control_plane/models/outputsplunk.py +141 -22
- cribl_control_plane/models/outputsplunkhec.py +198 -22
- cribl_control_plane/models/outputsplunklb.py +170 -22
- cribl_control_plane/models/outputsqs.py +112 -20
- cribl_control_plane/models/outputstatsd.py +96 -22
- cribl_control_plane/models/outputstatsdext.py +96 -22
- cribl_control_plane/models/outputsumologic.py +105 -22
- cribl_control_plane/models/outputsyslog.py +238 -99
- cribl_control_plane/models/outputtcpjson.py +132 -22
- cribl_control_plane/models/outputwavefront.py +105 -22
- cribl_control_plane/models/outputwebhook.py +141 -22
- cribl_control_plane/models/outputxsiam.py +103 -20
- cribl_control_plane/models/resourcepolicy.py +11 -0
- cribl_control_plane/models/runnablejobcollection.py +68 -9
- cribl_control_plane/models/runnablejobexecutor.py +32 -9
- cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
- cribl_control_plane/sdk.py +2 -2
- {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
- {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
- {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
4
|
+
from cribl_control_plane import models, utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
+
from pydantic import field_serializer
|
|
9
10
|
from pydantic.functional_validators import PlainValidator
|
|
10
11
|
from typing import List, Optional
|
|
11
12
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -144,6 +145,17 @@ class TelemetryType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
144
145
|
METRICS = "metrics"
|
|
145
146
|
|
|
146
147
|
|
|
148
|
+
class OutputDynatraceHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
149
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
150
|
+
|
|
151
|
+
# Error
|
|
152
|
+
ERROR = "error"
|
|
153
|
+
# Backpressure
|
|
154
|
+
ALWAYS = "always"
|
|
155
|
+
# Always On
|
|
156
|
+
BACKPRESSURE = "backpressure"
|
|
157
|
+
|
|
158
|
+
|
|
147
159
|
class OutputDynatraceHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
148
160
|
r"""Codec to use to compress the persisted data"""
|
|
149
161
|
|
|
@@ -162,17 +174,6 @@ class OutputDynatraceHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMe
|
|
|
162
174
|
DROP = "drop"
|
|
163
175
|
|
|
164
176
|
|
|
165
|
-
class OutputDynatraceHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
166
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
167
|
-
|
|
168
|
-
# Error
|
|
169
|
-
ERROR = "error"
|
|
170
|
-
# Backpressure
|
|
171
|
-
BACKPRESSURE = "backpressure"
|
|
172
|
-
# Always On
|
|
173
|
-
ALWAYS = "always"
|
|
174
|
-
|
|
175
|
-
|
|
176
177
|
class OutputDynatraceHTTPPqControlsTypedDict(TypedDict):
|
|
177
178
|
pass
|
|
178
179
|
|
|
@@ -243,6 +244,16 @@ class OutputDynatraceHTTPTypedDict(TypedDict):
|
|
|
243
244
|
total_memory_limit_kb: NotRequired[float]
|
|
244
245
|
r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
|
|
245
246
|
description: NotRequired[str]
|
|
247
|
+
pq_strict_ordering: NotRequired[bool]
|
|
248
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
249
|
+
pq_rate_per_sec: NotRequired[float]
|
|
250
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
251
|
+
pq_mode: NotRequired[OutputDynatraceHTTPMode]
|
|
252
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
253
|
+
pq_max_buffer_size: NotRequired[float]
|
|
254
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
255
|
+
pq_max_backpressure_sec: NotRequired[float]
|
|
256
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
246
257
|
pq_max_file_size: NotRequired[str]
|
|
247
258
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
248
259
|
pq_max_size: NotRequired[str]
|
|
@@ -253,8 +264,6 @@ class OutputDynatraceHTTPTypedDict(TypedDict):
|
|
|
253
264
|
r"""Codec to use to compress the persisted data"""
|
|
254
265
|
pq_on_backpressure: NotRequired[OutputDynatraceHTTPQueueFullBehavior]
|
|
255
266
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
256
|
-
pq_mode: NotRequired[OutputDynatraceHTTPMode]
|
|
257
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
258
267
|
pq_controls: NotRequired[OutputDynatraceHTTPPqControlsTypedDict]
|
|
259
268
|
token: NotRequired[str]
|
|
260
269
|
r"""Bearer token to include in the authorization header"""
|
|
@@ -411,6 +420,34 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
411
420
|
|
|
412
421
|
description: Optional[str] = None
|
|
413
422
|
|
|
423
|
+
pq_strict_ordering: Annotated[
|
|
424
|
+
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
425
|
+
] = True
|
|
426
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
427
|
+
|
|
428
|
+
pq_rate_per_sec: Annotated[
|
|
429
|
+
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
430
|
+
] = 0
|
|
431
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
432
|
+
|
|
433
|
+
pq_mode: Annotated[
|
|
434
|
+
Annotated[
|
|
435
|
+
Optional[OutputDynatraceHTTPMode], PlainValidator(validate_open_enum(False))
|
|
436
|
+
],
|
|
437
|
+
pydantic.Field(alias="pqMode"),
|
|
438
|
+
] = OutputDynatraceHTTPMode.ERROR
|
|
439
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
440
|
+
|
|
441
|
+
pq_max_buffer_size: Annotated[
|
|
442
|
+
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
443
|
+
] = 42
|
|
444
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
445
|
+
|
|
446
|
+
pq_max_backpressure_sec: Annotated[
|
|
447
|
+
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
448
|
+
] = 30
|
|
449
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
450
|
+
|
|
414
451
|
pq_max_file_size: Annotated[
|
|
415
452
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
416
453
|
] = "1 MB"
|
|
@@ -442,14 +479,6 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
442
479
|
] = OutputDynatraceHTTPQueueFullBehavior.BLOCK
|
|
443
480
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
444
481
|
|
|
445
|
-
pq_mode: Annotated[
|
|
446
|
-
Annotated[
|
|
447
|
-
Optional[OutputDynatraceHTTPMode], PlainValidator(validate_open_enum(False))
|
|
448
|
-
],
|
|
449
|
-
pydantic.Field(alias="pqMode"),
|
|
450
|
-
] = OutputDynatraceHTTPMode.ERROR
|
|
451
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
452
|
-
|
|
453
482
|
pq_controls: Annotated[
|
|
454
483
|
Optional[OutputDynatraceHTTPPqControls], pydantic.Field(alias="pqControls")
|
|
455
484
|
] = None
|
|
@@ -472,3 +501,93 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
472
501
|
|
|
473
502
|
url: Optional[str] = None
|
|
474
503
|
r"""URL to send events to. Can be overwritten by an event's __url field."""
|
|
504
|
+
|
|
505
|
+
@field_serializer("method")
|
|
506
|
+
def serialize_method(self, value):
|
|
507
|
+
if isinstance(value, str):
|
|
508
|
+
try:
|
|
509
|
+
return models.OutputDynatraceHTTPMethod(value)
|
|
510
|
+
except ValueError:
|
|
511
|
+
return value
|
|
512
|
+
return value
|
|
513
|
+
|
|
514
|
+
@field_serializer("failed_request_logging_mode")
|
|
515
|
+
def serialize_failed_request_logging_mode(self, value):
|
|
516
|
+
if isinstance(value, str):
|
|
517
|
+
try:
|
|
518
|
+
return models.OutputDynatraceHTTPFailedRequestLoggingMode(value)
|
|
519
|
+
except ValueError:
|
|
520
|
+
return value
|
|
521
|
+
return value
|
|
522
|
+
|
|
523
|
+
@field_serializer("on_backpressure")
|
|
524
|
+
def serialize_on_backpressure(self, value):
|
|
525
|
+
if isinstance(value, str):
|
|
526
|
+
try:
|
|
527
|
+
return models.OutputDynatraceHTTPBackpressureBehavior(value)
|
|
528
|
+
except ValueError:
|
|
529
|
+
return value
|
|
530
|
+
return value
|
|
531
|
+
|
|
532
|
+
@field_serializer("auth_type")
|
|
533
|
+
def serialize_auth_type(self, value):
|
|
534
|
+
if isinstance(value, str):
|
|
535
|
+
try:
|
|
536
|
+
return models.OutputDynatraceHTTPAuthenticationType(value)
|
|
537
|
+
except ValueError:
|
|
538
|
+
return value
|
|
539
|
+
return value
|
|
540
|
+
|
|
541
|
+
@field_serializer("format_")
|
|
542
|
+
def serialize_format_(self, value):
|
|
543
|
+
if isinstance(value, str):
|
|
544
|
+
try:
|
|
545
|
+
return models.OutputDynatraceHTTPFormat(value)
|
|
546
|
+
except ValueError:
|
|
547
|
+
return value
|
|
548
|
+
return value
|
|
549
|
+
|
|
550
|
+
@field_serializer("endpoint")
|
|
551
|
+
def serialize_endpoint(self, value):
|
|
552
|
+
if isinstance(value, str):
|
|
553
|
+
try:
|
|
554
|
+
return models.Endpoint(value)
|
|
555
|
+
except ValueError:
|
|
556
|
+
return value
|
|
557
|
+
return value
|
|
558
|
+
|
|
559
|
+
@field_serializer("telemetry_type")
|
|
560
|
+
def serialize_telemetry_type(self, value):
|
|
561
|
+
if isinstance(value, str):
|
|
562
|
+
try:
|
|
563
|
+
return models.TelemetryType(value)
|
|
564
|
+
except ValueError:
|
|
565
|
+
return value
|
|
566
|
+
return value
|
|
567
|
+
|
|
568
|
+
@field_serializer("pq_mode")
|
|
569
|
+
def serialize_pq_mode(self, value):
|
|
570
|
+
if isinstance(value, str):
|
|
571
|
+
try:
|
|
572
|
+
return models.OutputDynatraceHTTPMode(value)
|
|
573
|
+
except ValueError:
|
|
574
|
+
return value
|
|
575
|
+
return value
|
|
576
|
+
|
|
577
|
+
@field_serializer("pq_compress")
|
|
578
|
+
def serialize_pq_compress(self, value):
|
|
579
|
+
if isinstance(value, str):
|
|
580
|
+
try:
|
|
581
|
+
return models.OutputDynatraceHTTPCompression(value)
|
|
582
|
+
except ValueError:
|
|
583
|
+
return value
|
|
584
|
+
return value
|
|
585
|
+
|
|
586
|
+
@field_serializer("pq_on_backpressure")
|
|
587
|
+
def serialize_pq_on_backpressure(self, value):
|
|
588
|
+
if isinstance(value, str):
|
|
589
|
+
try:
|
|
590
|
+
return models.OutputDynatraceHTTPQueueFullBehavior(value)
|
|
591
|
+
except ValueError:
|
|
592
|
+
return value
|
|
593
|
+
return value
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
4
|
+
from cribl_control_plane import models, utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
+
from pydantic import field_serializer
|
|
9
10
|
from pydantic.functional_validators import PlainValidator
|
|
10
11
|
from typing import List, Optional
|
|
11
12
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -160,6 +161,17 @@ class OutputDynatraceOtlpTimeoutRetrySettings(BaseModel):
|
|
|
160
161
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
161
162
|
|
|
162
163
|
|
|
164
|
+
class OutputDynatraceOtlpMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
165
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
166
|
+
|
|
167
|
+
# Error
|
|
168
|
+
ERROR = "error"
|
|
169
|
+
# Backpressure
|
|
170
|
+
ALWAYS = "always"
|
|
171
|
+
# Always On
|
|
172
|
+
BACKPRESSURE = "backpressure"
|
|
173
|
+
|
|
174
|
+
|
|
163
175
|
class OutputDynatraceOtlpPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
164
176
|
r"""Codec to use to compress the persisted data"""
|
|
165
177
|
|
|
@@ -178,17 +190,6 @@ class OutputDynatraceOtlpQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMe
|
|
|
178
190
|
DROP = "drop"
|
|
179
191
|
|
|
180
192
|
|
|
181
|
-
class OutputDynatraceOtlpMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
182
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
183
|
-
|
|
184
|
-
# Error
|
|
185
|
-
ERROR = "error"
|
|
186
|
-
# Backpressure
|
|
187
|
-
BACKPRESSURE = "backpressure"
|
|
188
|
-
# Always On
|
|
189
|
-
ALWAYS = "always"
|
|
190
|
-
|
|
191
|
-
|
|
192
193
|
class OutputDynatraceOtlpPqControlsTypedDict(TypedDict):
|
|
193
194
|
pass
|
|
194
195
|
|
|
@@ -273,6 +274,16 @@ class OutputDynatraceOtlpTypedDict(TypedDict):
|
|
|
273
274
|
]
|
|
274
275
|
response_honor_retry_after_header: NotRequired[bool]
|
|
275
276
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
277
|
+
pq_strict_ordering: NotRequired[bool]
|
|
278
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
279
|
+
pq_rate_per_sec: NotRequired[float]
|
|
280
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
281
|
+
pq_mode: NotRequired[OutputDynatraceOtlpMode]
|
|
282
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
283
|
+
pq_max_buffer_size: NotRequired[float]
|
|
284
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
285
|
+
pq_max_backpressure_sec: NotRequired[float]
|
|
286
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
276
287
|
pq_max_file_size: NotRequired[str]
|
|
277
288
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
278
289
|
pq_max_size: NotRequired[str]
|
|
@@ -283,8 +294,6 @@ class OutputDynatraceOtlpTypedDict(TypedDict):
|
|
|
283
294
|
r"""Codec to use to compress the persisted data"""
|
|
284
295
|
pq_on_backpressure: NotRequired[OutputDynatraceOtlpQueueFullBehavior]
|
|
285
296
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
286
|
-
pq_mode: NotRequired[OutputDynatraceOtlpMode]
|
|
287
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
288
297
|
pq_controls: NotRequired[OutputDynatraceOtlpPqControlsTypedDict]
|
|
289
298
|
|
|
290
299
|
|
|
@@ -462,6 +471,34 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
462
471
|
] = True
|
|
463
472
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
464
473
|
|
|
474
|
+
pq_strict_ordering: Annotated[
|
|
475
|
+
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
476
|
+
] = True
|
|
477
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
478
|
+
|
|
479
|
+
pq_rate_per_sec: Annotated[
|
|
480
|
+
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
481
|
+
] = 0
|
|
482
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
483
|
+
|
|
484
|
+
pq_mode: Annotated[
|
|
485
|
+
Annotated[
|
|
486
|
+
Optional[OutputDynatraceOtlpMode], PlainValidator(validate_open_enum(False))
|
|
487
|
+
],
|
|
488
|
+
pydantic.Field(alias="pqMode"),
|
|
489
|
+
] = OutputDynatraceOtlpMode.ERROR
|
|
490
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
491
|
+
|
|
492
|
+
pq_max_buffer_size: Annotated[
|
|
493
|
+
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
494
|
+
] = 42
|
|
495
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
496
|
+
|
|
497
|
+
pq_max_backpressure_sec: Annotated[
|
|
498
|
+
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
499
|
+
] = 30
|
|
500
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
501
|
+
|
|
465
502
|
pq_max_file_size: Annotated[
|
|
466
503
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
467
504
|
] = "1 MB"
|
|
@@ -493,14 +530,96 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
493
530
|
] = OutputDynatraceOtlpQueueFullBehavior.BLOCK
|
|
494
531
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
495
532
|
|
|
496
|
-
pq_mode: Annotated[
|
|
497
|
-
Annotated[
|
|
498
|
-
Optional[OutputDynatraceOtlpMode], PlainValidator(validate_open_enum(False))
|
|
499
|
-
],
|
|
500
|
-
pydantic.Field(alias="pqMode"),
|
|
501
|
-
] = OutputDynatraceOtlpMode.ERROR
|
|
502
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
503
|
-
|
|
504
533
|
pq_controls: Annotated[
|
|
505
534
|
Optional[OutputDynatraceOtlpPqControls], pydantic.Field(alias="pqControls")
|
|
506
535
|
] = None
|
|
536
|
+
|
|
537
|
+
@field_serializer("protocol")
|
|
538
|
+
def serialize_protocol(self, value):
|
|
539
|
+
if isinstance(value, str):
|
|
540
|
+
try:
|
|
541
|
+
return models.OutputDynatraceOtlpProtocol(value)
|
|
542
|
+
except ValueError:
|
|
543
|
+
return value
|
|
544
|
+
return value
|
|
545
|
+
|
|
546
|
+
@field_serializer("otlp_version")
|
|
547
|
+
def serialize_otlp_version(self, value):
|
|
548
|
+
if isinstance(value, str):
|
|
549
|
+
try:
|
|
550
|
+
return models.OutputDynatraceOTLPOTLPVersion(value)
|
|
551
|
+
except ValueError:
|
|
552
|
+
return value
|
|
553
|
+
return value
|
|
554
|
+
|
|
555
|
+
@field_serializer("compress")
|
|
556
|
+
def serialize_compress(self, value):
|
|
557
|
+
if isinstance(value, str):
|
|
558
|
+
try:
|
|
559
|
+
return models.OutputDynatraceOtlpCompressCompression(value)
|
|
560
|
+
except ValueError:
|
|
561
|
+
return value
|
|
562
|
+
return value
|
|
563
|
+
|
|
564
|
+
@field_serializer("http_compress")
|
|
565
|
+
def serialize_http_compress(self, value):
|
|
566
|
+
if isinstance(value, str):
|
|
567
|
+
try:
|
|
568
|
+
return models.OutputDynatraceOtlpHTTPCompressCompression(value)
|
|
569
|
+
except ValueError:
|
|
570
|
+
return value
|
|
571
|
+
return value
|
|
572
|
+
|
|
573
|
+
@field_serializer("failed_request_logging_mode")
|
|
574
|
+
def serialize_failed_request_logging_mode(self, value):
|
|
575
|
+
if isinstance(value, str):
|
|
576
|
+
try:
|
|
577
|
+
return models.OutputDynatraceOtlpFailedRequestLoggingMode(value)
|
|
578
|
+
except ValueError:
|
|
579
|
+
return value
|
|
580
|
+
return value
|
|
581
|
+
|
|
582
|
+
@field_serializer("endpoint_type")
|
|
583
|
+
def serialize_endpoint_type(self, value):
|
|
584
|
+
if isinstance(value, str):
|
|
585
|
+
try:
|
|
586
|
+
return models.EndpointType(value)
|
|
587
|
+
except ValueError:
|
|
588
|
+
return value
|
|
589
|
+
return value
|
|
590
|
+
|
|
591
|
+
@field_serializer("on_backpressure")
|
|
592
|
+
def serialize_on_backpressure(self, value):
|
|
593
|
+
if isinstance(value, str):
|
|
594
|
+
try:
|
|
595
|
+
return models.OutputDynatraceOtlpBackpressureBehavior(value)
|
|
596
|
+
except ValueError:
|
|
597
|
+
return value
|
|
598
|
+
return value
|
|
599
|
+
|
|
600
|
+
@field_serializer("pq_mode")
|
|
601
|
+
def serialize_pq_mode(self, value):
|
|
602
|
+
if isinstance(value, str):
|
|
603
|
+
try:
|
|
604
|
+
return models.OutputDynatraceOtlpMode(value)
|
|
605
|
+
except ValueError:
|
|
606
|
+
return value
|
|
607
|
+
return value
|
|
608
|
+
|
|
609
|
+
@field_serializer("pq_compress")
|
|
610
|
+
def serialize_pq_compress(self, value):
|
|
611
|
+
if isinstance(value, str):
|
|
612
|
+
try:
|
|
613
|
+
return models.OutputDynatraceOtlpPqCompressCompression(value)
|
|
614
|
+
except ValueError:
|
|
615
|
+
return value
|
|
616
|
+
return value
|
|
617
|
+
|
|
618
|
+
@field_serializer("pq_on_backpressure")
|
|
619
|
+
def serialize_pq_on_backpressure(self, value):
|
|
620
|
+
if isinstance(value, str):
|
|
621
|
+
try:
|
|
622
|
+
return models.OutputDynatraceOtlpQueueFullBehavior(value)
|
|
623
|
+
except ValueError:
|
|
624
|
+
return value
|
|
625
|
+
return value
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
4
|
+
from cribl_control_plane import models, utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
+
from pydantic import field_serializer
|
|
9
10
|
from pydantic.functional_validators import PlainValidator
|
|
10
11
|
from typing import List, Optional
|
|
11
12
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -113,13 +114,25 @@ class OutputElasticAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
113
114
|
|
|
114
115
|
class OutputElasticAuthTypedDict(TypedDict):
|
|
115
116
|
disabled: NotRequired[bool]
|
|
117
|
+
username: NotRequired[str]
|
|
118
|
+
password: NotRequired[str]
|
|
116
119
|
auth_type: NotRequired[OutputElasticAuthenticationMethod]
|
|
117
120
|
r"""Enter credentials directly, or select a stored secret"""
|
|
121
|
+
credentials_secret: NotRequired[str]
|
|
122
|
+
r"""Select or create a secret that references your credentials"""
|
|
123
|
+
manual_api_key: NotRequired[str]
|
|
124
|
+
r"""Enter API key directly"""
|
|
125
|
+
text_secret: NotRequired[str]
|
|
126
|
+
r"""Select or create a stored text secret"""
|
|
118
127
|
|
|
119
128
|
|
|
120
129
|
class OutputElasticAuth(BaseModel):
|
|
121
130
|
disabled: Optional[bool] = True
|
|
122
131
|
|
|
132
|
+
username: Optional[str] = None
|
|
133
|
+
|
|
134
|
+
password: Optional[str] = None
|
|
135
|
+
|
|
123
136
|
auth_type: Annotated[
|
|
124
137
|
Annotated[
|
|
125
138
|
Optional[OutputElasticAuthenticationMethod],
|
|
@@ -129,6 +142,28 @@ class OutputElasticAuth(BaseModel):
|
|
|
129
142
|
] = OutputElasticAuthenticationMethod.MANUAL
|
|
130
143
|
r"""Enter credentials directly, or select a stored secret"""
|
|
131
144
|
|
|
145
|
+
credentials_secret: Annotated[
|
|
146
|
+
Optional[str], pydantic.Field(alias="credentialsSecret")
|
|
147
|
+
] = None
|
|
148
|
+
r"""Select or create a secret that references your credentials"""
|
|
149
|
+
|
|
150
|
+
manual_api_key: Annotated[Optional[str], pydantic.Field(alias="manualAPIKey")] = (
|
|
151
|
+
None
|
|
152
|
+
)
|
|
153
|
+
r"""Enter API key directly"""
|
|
154
|
+
|
|
155
|
+
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
156
|
+
r"""Select or create a stored text secret"""
|
|
157
|
+
|
|
158
|
+
@field_serializer("auth_type")
|
|
159
|
+
def serialize_auth_type(self, value):
|
|
160
|
+
if isinstance(value, str):
|
|
161
|
+
try:
|
|
162
|
+
return models.OutputElasticAuthenticationMethod(value)
|
|
163
|
+
except ValueError:
|
|
164
|
+
return value
|
|
165
|
+
return value
|
|
166
|
+
|
|
132
167
|
|
|
133
168
|
class ElasticVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
134
169
|
r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
|
|
@@ -176,6 +211,17 @@ class OutputElasticURL(BaseModel):
|
|
|
176
211
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
177
212
|
|
|
178
213
|
|
|
214
|
+
class OutputElasticMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
215
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
216
|
+
|
|
217
|
+
# Error
|
|
218
|
+
ERROR = "error"
|
|
219
|
+
# Backpressure
|
|
220
|
+
ALWAYS = "always"
|
|
221
|
+
# Always On
|
|
222
|
+
BACKPRESSURE = "backpressure"
|
|
223
|
+
|
|
224
|
+
|
|
179
225
|
class OutputElasticCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
180
226
|
r"""Codec to use to compress the persisted data"""
|
|
181
227
|
|
|
@@ -194,17 +240,6 @@ class OutputElasticQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
194
240
|
DROP = "drop"
|
|
195
241
|
|
|
196
242
|
|
|
197
|
-
class OutputElasticMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
198
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
199
|
-
|
|
200
|
-
# Error
|
|
201
|
-
ERROR = "error"
|
|
202
|
-
# Backpressure
|
|
203
|
-
BACKPRESSURE = "backpressure"
|
|
204
|
-
# Always On
|
|
205
|
-
ALWAYS = "always"
|
|
206
|
-
|
|
207
|
-
|
|
208
243
|
class OutputElasticPqControlsTypedDict(TypedDict):
|
|
209
244
|
pass
|
|
210
245
|
|
|
@@ -287,6 +322,16 @@ class OutputElasticTypedDict(TypedDict):
|
|
|
287
322
|
r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
|
|
288
323
|
load_balance_stats_period_sec: NotRequired[float]
|
|
289
324
|
r"""How far back in time to keep traffic stats for load balancing purposes"""
|
|
325
|
+
pq_strict_ordering: NotRequired[bool]
|
|
326
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
327
|
+
pq_rate_per_sec: NotRequired[float]
|
|
328
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
329
|
+
pq_mode: NotRequired[OutputElasticMode]
|
|
330
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
331
|
+
pq_max_buffer_size: NotRequired[float]
|
|
332
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
333
|
+
pq_max_backpressure_sec: NotRequired[float]
|
|
334
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
290
335
|
pq_max_file_size: NotRequired[str]
|
|
291
336
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
292
337
|
pq_max_size: NotRequired[str]
|
|
@@ -297,8 +342,6 @@ class OutputElasticTypedDict(TypedDict):
|
|
|
297
342
|
r"""Codec to use to compress the persisted data"""
|
|
298
343
|
pq_on_backpressure: NotRequired[OutputElasticQueueFullBehavior]
|
|
299
344
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
300
|
-
pq_mode: NotRequired[OutputElasticMode]
|
|
301
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
302
345
|
pq_controls: NotRequired[OutputElasticPqControlsTypedDict]
|
|
303
346
|
|
|
304
347
|
|
|
@@ -468,6 +511,34 @@ class OutputElastic(BaseModel):
|
|
|
468
511
|
] = 300
|
|
469
512
|
r"""How far back in time to keep traffic stats for load balancing purposes"""
|
|
470
513
|
|
|
514
|
+
pq_strict_ordering: Annotated[
|
|
515
|
+
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
516
|
+
] = True
|
|
517
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
518
|
+
|
|
519
|
+
pq_rate_per_sec: Annotated[
|
|
520
|
+
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
521
|
+
] = 0
|
|
522
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
523
|
+
|
|
524
|
+
pq_mode: Annotated[
|
|
525
|
+
Annotated[
|
|
526
|
+
Optional[OutputElasticMode], PlainValidator(validate_open_enum(False))
|
|
527
|
+
],
|
|
528
|
+
pydantic.Field(alias="pqMode"),
|
|
529
|
+
] = OutputElasticMode.ERROR
|
|
530
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
531
|
+
|
|
532
|
+
pq_max_buffer_size: Annotated[
|
|
533
|
+
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
534
|
+
] = 42
|
|
535
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
536
|
+
|
|
537
|
+
pq_max_backpressure_sec: Annotated[
|
|
538
|
+
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
539
|
+
] = 30
|
|
540
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
541
|
+
|
|
471
542
|
pq_max_file_size: Annotated[
|
|
472
543
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
473
544
|
] = "1 MB"
|
|
@@ -499,14 +570,69 @@ class OutputElastic(BaseModel):
|
|
|
499
570
|
] = OutputElasticQueueFullBehavior.BLOCK
|
|
500
571
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
501
572
|
|
|
502
|
-
pq_mode: Annotated[
|
|
503
|
-
Annotated[
|
|
504
|
-
Optional[OutputElasticMode], PlainValidator(validate_open_enum(False))
|
|
505
|
-
],
|
|
506
|
-
pydantic.Field(alias="pqMode"),
|
|
507
|
-
] = OutputElasticMode.ERROR
|
|
508
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
509
|
-
|
|
510
573
|
pq_controls: Annotated[
|
|
511
574
|
Optional[OutputElasticPqControls], pydantic.Field(alias="pqControls")
|
|
512
575
|
] = None
|
|
576
|
+
|
|
577
|
+
@field_serializer("failed_request_logging_mode")
|
|
578
|
+
def serialize_failed_request_logging_mode(self, value):
|
|
579
|
+
if isinstance(value, str):
|
|
580
|
+
try:
|
|
581
|
+
return models.OutputElasticFailedRequestLoggingMode(value)
|
|
582
|
+
except ValueError:
|
|
583
|
+
return value
|
|
584
|
+
return value
|
|
585
|
+
|
|
586
|
+
@field_serializer("elastic_version")
|
|
587
|
+
def serialize_elastic_version(self, value):
|
|
588
|
+
if isinstance(value, str):
|
|
589
|
+
try:
|
|
590
|
+
return models.ElasticVersion(value)
|
|
591
|
+
except ValueError:
|
|
592
|
+
return value
|
|
593
|
+
return value
|
|
594
|
+
|
|
595
|
+
@field_serializer("write_action")
|
|
596
|
+
def serialize_write_action(self, value):
|
|
597
|
+
if isinstance(value, str):
|
|
598
|
+
try:
|
|
599
|
+
return models.WriteAction(value)
|
|
600
|
+
except ValueError:
|
|
601
|
+
return value
|
|
602
|
+
return value
|
|
603
|
+
|
|
604
|
+
@field_serializer("on_backpressure")
|
|
605
|
+
def serialize_on_backpressure(self, value):
|
|
606
|
+
if isinstance(value, str):
|
|
607
|
+
try:
|
|
608
|
+
return models.OutputElasticBackpressureBehavior(value)
|
|
609
|
+
except ValueError:
|
|
610
|
+
return value
|
|
611
|
+
return value
|
|
612
|
+
|
|
613
|
+
@field_serializer("pq_mode")
|
|
614
|
+
def serialize_pq_mode(self, value):
|
|
615
|
+
if isinstance(value, str):
|
|
616
|
+
try:
|
|
617
|
+
return models.OutputElasticMode(value)
|
|
618
|
+
except ValueError:
|
|
619
|
+
return value
|
|
620
|
+
return value
|
|
621
|
+
|
|
622
|
+
@field_serializer("pq_compress")
|
|
623
|
+
def serialize_pq_compress(self, value):
|
|
624
|
+
if isinstance(value, str):
|
|
625
|
+
try:
|
|
626
|
+
return models.OutputElasticCompression(value)
|
|
627
|
+
except ValueError:
|
|
628
|
+
return value
|
|
629
|
+
return value
|
|
630
|
+
|
|
631
|
+
@field_serializer("pq_on_backpressure")
|
|
632
|
+
def serialize_pq_on_backpressure(self, value):
|
|
633
|
+
if isinstance(value, str):
|
|
634
|
+
try:
|
|
635
|
+
return models.OutputElasticQueueFullBehavior(value)
|
|
636
|
+
except ValueError:
|
|
637
|
+
return value
|
|
638
|
+
return value
|