cribl-control-plane 0.2.1rc7__py3-none-any.whl → 0.3.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/errors/__init__.py +5 -8
- cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
- cribl_control_plane/groups_sdk.py +28 -52
- cribl_control_plane/health.py +16 -22
- cribl_control_plane/models/__init__.py +54 -217
- cribl_control_plane/models/appmode.py +14 -0
- cribl_control_plane/models/authtoken.py +1 -5
- cribl_control_plane/models/cacheconnection.py +0 -20
- cribl_control_plane/models/configgroup.py +7 -55
- cribl_control_plane/models/configgroupcloud.py +1 -11
- cribl_control_plane/models/createconfiggroupbyproductop.py +5 -17
- cribl_control_plane/models/createroutesappendbyidop.py +2 -2
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +1 -11
- cribl_control_plane/models/cribllakedatasetupdate.py +1 -11
- cribl_control_plane/models/datasetmetadata.py +1 -11
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
- cribl_control_plane/models/distributedsummary.py +0 -6
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +0 -11
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +0 -11
- cribl_control_plane/models/hbcriblinfo.py +3 -24
- cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +8 -27
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/input.py +78 -80
- cribl_control_plane/models/inputappscope.py +17 -80
- cribl_control_plane/models/inputazureblob.py +1 -33
- cribl_control_plane/models/inputcollection.py +1 -24
- cribl_control_plane/models/inputconfluentcloud.py +18 -195
- cribl_control_plane/models/inputcribl.py +1 -24
- cribl_control_plane/models/inputcriblhttp.py +17 -62
- cribl_control_plane/models/inputcribllakehttp.py +17 -62
- cribl_control_plane/models/inputcriblmetrics.py +1 -24
- cribl_control_plane/models/inputcribltcp.py +17 -62
- cribl_control_plane/models/inputcrowdstrike.py +1 -54
- cribl_control_plane/models/inputdatadogagent.py +17 -62
- cribl_control_plane/models/inputdatagen.py +1 -24
- cribl_control_plane/models/inputedgeprometheus.py +34 -147
- cribl_control_plane/models/inputelastic.py +27 -119
- cribl_control_plane/models/inputeventhub.py +1 -182
- cribl_control_plane/models/inputexec.py +1 -33
- cribl_control_plane/models/inputfile.py +3 -42
- cribl_control_plane/models/inputfirehose.py +17 -62
- cribl_control_plane/models/inputgooglepubsub.py +1 -36
- cribl_control_plane/models/inputgrafana.py +32 -157
- cribl_control_plane/models/inputhttp.py +17 -62
- cribl_control_plane/models/inputhttpraw.py +17 -62
- cribl_control_plane/models/inputjournalfiles.py +1 -24
- cribl_control_plane/models/inputkafka.py +17 -189
- cribl_control_plane/models/inputkinesis.py +1 -80
- cribl_control_plane/models/inputkubeevents.py +1 -24
- cribl_control_plane/models/inputkubelogs.py +1 -33
- cribl_control_plane/models/inputkubemetrics.py +1 -33
- cribl_control_plane/models/inputloki.py +17 -71
- cribl_control_plane/models/inputmetrics.py +17 -62
- cribl_control_plane/models/inputmodeldriventelemetry.py +17 -62
- cribl_control_plane/models/inputmsk.py +18 -81
- cribl_control_plane/models/inputnetflow.py +1 -24
- cribl_control_plane/models/inputoffice365mgmt.py +1 -67
- cribl_control_plane/models/inputoffice365msgtrace.py +1 -67
- cribl_control_plane/models/inputoffice365service.py +1 -67
- cribl_control_plane/models/inputopentelemetry.py +16 -92
- cribl_control_plane/models/inputprometheus.py +34 -138
- cribl_control_plane/models/inputprometheusrw.py +17 -71
- cribl_control_plane/models/inputrawudp.py +1 -24
- cribl_control_plane/models/inputs3.py +1 -45
- cribl_control_plane/models/inputs3inventory.py +1 -54
- cribl_control_plane/models/inputsecuritylake.py +1 -54
- cribl_control_plane/models/inputsnmp.py +1 -40
- cribl_control_plane/models/inputsplunk.py +17 -85
- cribl_control_plane/models/inputsplunkhec.py +16 -70
- cribl_control_plane/models/inputsplunksearch.py +1 -63
- cribl_control_plane/models/inputsqs.py +1 -56
- cribl_control_plane/models/inputsyslog.py +32 -121
- cribl_control_plane/models/inputsystemmetrics.py +9 -142
- cribl_control_plane/models/inputsystemstate.py +1 -33
- cribl_control_plane/models/inputtcp.py +17 -81
- cribl_control_plane/models/inputtcpjson.py +17 -71
- cribl_control_plane/models/inputwef.py +1 -71
- cribl_control_plane/models/inputwindowsmetrics.py +9 -129
- cribl_control_plane/models/inputwineventlogs.py +1 -60
- cribl_control_plane/models/inputwiz.py +1 -45
- cribl_control_plane/models/inputwizwebhook.py +17 -62
- cribl_control_plane/models/inputzscalerhec.py +16 -70
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/jobstatus.py +3 -34
- cribl_control_plane/models/listconfiggroupbyproductop.py +0 -11
- cribl_control_plane/models/logininfo.py +3 -3
- cribl_control_plane/models/masterworkerentry.py +1 -11
- cribl_control_plane/models/nodeprovidedinfo.py +1 -11
- cribl_control_plane/models/nodeupgradestatus.py +0 -38
- cribl_control_plane/models/output.py +88 -93
- cribl_control_plane/models/outputazureblob.py +1 -110
- cribl_control_plane/models/outputazuredataexplorer.py +87 -452
- cribl_control_plane/models/outputazureeventhub.py +19 -281
- cribl_control_plane/models/outputazurelogs.py +19 -115
- cribl_control_plane/models/outputchronicle.py +19 -115
- cribl_control_plane/models/outputclickhouse.py +19 -155
- cribl_control_plane/models/outputcloudwatch.py +19 -106
- cribl_control_plane/models/outputconfluentcloud.py +38 -311
- cribl_control_plane/models/outputcriblhttp.py +19 -135
- cribl_control_plane/models/outputcribllake.py +1 -97
- cribl_control_plane/models/outputcribltcp.py +19 -132
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +20 -129
- cribl_control_plane/models/outputdatadog.py +19 -159
- cribl_control_plane/models/outputdataset.py +19 -143
- cribl_control_plane/models/outputdiskspool.py +1 -11
- cribl_control_plane/models/outputdls3.py +1 -152
- cribl_control_plane/models/outputdynatracehttp.py +19 -160
- cribl_control_plane/models/outputdynatraceotlp.py +19 -160
- cribl_control_plane/models/outputelastic.py +19 -163
- cribl_control_plane/models/outputelasticcloud.py +19 -140
- cribl_control_plane/models/outputexabeam.py +1 -61
- cribl_control_plane/models/outputfilesystem.py +1 -87
- cribl_control_plane/models/outputgooglechronicle.py +20 -166
- cribl_control_plane/models/outputgooglecloudlogging.py +20 -131
- cribl_control_plane/models/outputgooglecloudstorage.py +1 -136
- cribl_control_plane/models/outputgooglepubsub.py +19 -106
- cribl_control_plane/models/outputgrafanacloud.py +37 -288
- cribl_control_plane/models/outputgraphite.py +19 -105
- cribl_control_plane/models/outputhoneycomb.py +19 -115
- cribl_control_plane/models/outputhumiohec.py +19 -126
- cribl_control_plane/models/outputinfluxdb.py +19 -130
- cribl_control_plane/models/outputkafka.py +34 -302
- cribl_control_plane/models/outputkinesis.py +19 -133
- cribl_control_plane/models/outputloki.py +17 -129
- cribl_control_plane/models/outputminio.py +1 -145
- cribl_control_plane/models/outputmsk.py +34 -193
- cribl_control_plane/models/outputnewrelic.py +19 -136
- cribl_control_plane/models/outputnewrelicevents.py +20 -128
- cribl_control_plane/models/outputopentelemetry.py +19 -178
- cribl_control_plane/models/outputprometheus.py +19 -115
- cribl_control_plane/models/outputring.py +1 -31
- cribl_control_plane/models/outputs3.py +1 -152
- cribl_control_plane/models/outputsecuritylake.py +1 -114
- cribl_control_plane/models/outputsentinel.py +19 -135
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -134
- cribl_control_plane/models/outputservicenow.py +19 -168
- cribl_control_plane/models/outputsignalfx.py +19 -115
- cribl_control_plane/models/outputsns.py +17 -113
- cribl_control_plane/models/outputsplunk.py +19 -153
- cribl_control_plane/models/outputsplunkhec.py +19 -208
- cribl_control_plane/models/outputsplunklb.py +19 -182
- cribl_control_plane/models/outputsqs.py +17 -124
- cribl_control_plane/models/outputstatsd.py +19 -105
- cribl_control_plane/models/outputstatsdext.py +19 -105
- cribl_control_plane/models/outputsumologic.py +19 -117
- cribl_control_plane/models/outputsyslog.py +96 -259
- cribl_control_plane/models/outputtcpjson.py +19 -141
- cribl_control_plane/models/outputwavefront.py +19 -115
- cribl_control_plane/models/outputwebhook.py +19 -161
- cribl_control_plane/models/outputxsiam.py +17 -113
- cribl_control_plane/models/packinfo.py +5 -8
- cribl_control_plane/models/packinstallinfo.py +5 -8
- cribl_control_plane/models/resourcepolicy.py +0 -11
- cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/runnablejobcollection.py +9 -72
- cribl_control_plane/models/runnablejobexecutor.py +9 -32
- cribl_control_plane/models/runnablejobscheduledsearch.py +9 -23
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +0 -11
- cribl_control_plane/packs.py +7 -202
- cribl_control_plane/routes_sdk.py +6 -6
- cribl_control_plane/tokens.py +15 -23
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
- cribl_control_plane-0.3.0a1.dist-info/RECORD +330 -0
- cribl_control_plane/models/groupcreaterequest.py +0 -171
- cribl_control_plane/models/outpostnodeinfo.py +0 -16
- cribl_control_plane/models/outputdatabricks.py +0 -482
- cribl_control_plane/models/updatepacksop.py +0 -25
- cribl_control_plane-0.2.1rc7.dist-info/RECORD +0 -331
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -98,31 +97,11 @@ class OutputCriblHTTPTLSSettingsClientSide(BaseModel):
|
|
|
98
97
|
pydantic.Field(alias="maxVersion"),
|
|
99
98
|
] = None
|
|
100
99
|
|
|
101
|
-
@field_serializer("min_version")
|
|
102
|
-
def serialize_min_version(self, value):
|
|
103
|
-
if isinstance(value, str):
|
|
104
|
-
try:
|
|
105
|
-
return models.OutputCriblHTTPMinimumTLSVersion(value)
|
|
106
|
-
except ValueError:
|
|
107
|
-
return value
|
|
108
|
-
return value
|
|
109
|
-
|
|
110
|
-
@field_serializer("max_version")
|
|
111
|
-
def serialize_max_version(self, value):
|
|
112
|
-
if isinstance(value, str):
|
|
113
|
-
try:
|
|
114
|
-
return models.OutputCriblHTTPMaximumTLSVersion(value)
|
|
115
|
-
except ValueError:
|
|
116
|
-
return value
|
|
117
|
-
return value
|
|
118
|
-
|
|
119
100
|
|
|
120
101
|
class OutputCriblHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
121
102
|
r"""Codec to use to compress the data before sending"""
|
|
122
103
|
|
|
123
|
-
# None
|
|
124
104
|
NONE = "none"
|
|
125
|
-
# Gzip
|
|
126
105
|
GZIP = "gzip"
|
|
127
106
|
|
|
128
107
|
|
|
@@ -140,11 +119,8 @@ class OutputCriblHTTPExtraHTTPHeader(BaseModel):
|
|
|
140
119
|
class OutputCriblHTTPFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
141
120
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
142
121
|
|
|
143
|
-
# Payload
|
|
144
122
|
PAYLOAD = "payload"
|
|
145
|
-
# Payload + Headers
|
|
146
123
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
147
|
-
# None
|
|
148
124
|
NONE = "none"
|
|
149
125
|
|
|
150
126
|
|
|
@@ -205,11 +181,8 @@ class OutputCriblHTTPTimeoutRetrySettings(BaseModel):
|
|
|
205
181
|
class OutputCriblHTTPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
206
182
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
207
183
|
|
|
208
|
-
# Block
|
|
209
184
|
BLOCK = "block"
|
|
210
|
-
# Drop
|
|
211
185
|
DROP = "drop"
|
|
212
|
-
# Persistent Queue
|
|
213
186
|
QUEUE = "queue"
|
|
214
187
|
|
|
215
188
|
|
|
@@ -228,35 +201,28 @@ class OutputCriblHTTPURL(BaseModel):
|
|
|
228
201
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
229
202
|
|
|
230
203
|
|
|
231
|
-
class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
232
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
233
|
-
|
|
234
|
-
# Error
|
|
235
|
-
ERROR = "error"
|
|
236
|
-
# Backpressure
|
|
237
|
-
ALWAYS = "always"
|
|
238
|
-
# Always On
|
|
239
|
-
BACKPRESSURE = "backpressure"
|
|
240
|
-
|
|
241
|
-
|
|
242
204
|
class OutputCriblHTTPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
243
205
|
r"""Codec to use to compress the persisted data"""
|
|
244
206
|
|
|
245
|
-
# None
|
|
246
207
|
NONE = "none"
|
|
247
|
-
# Gzip
|
|
248
208
|
GZIP = "gzip"
|
|
249
209
|
|
|
250
210
|
|
|
251
211
|
class OutputCriblHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
252
212
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
253
213
|
|
|
254
|
-
# Block
|
|
255
214
|
BLOCK = "block"
|
|
256
|
-
# Drop new data
|
|
257
215
|
DROP = "drop"
|
|
258
216
|
|
|
259
217
|
|
|
218
|
+
class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
219
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
220
|
+
|
|
221
|
+
ERROR = "error"
|
|
222
|
+
BACKPRESSURE = "backpressure"
|
|
223
|
+
ALWAYS = "always"
|
|
224
|
+
|
|
225
|
+
|
|
260
226
|
class OutputCriblHTTPPqControlsTypedDict(TypedDict):
|
|
261
227
|
pass
|
|
262
228
|
|
|
@@ -328,16 +294,6 @@ class OutputCriblHTTPTypedDict(TypedDict):
|
|
|
328
294
|
r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
|
|
329
295
|
load_balance_stats_period_sec: NotRequired[float]
|
|
330
296
|
r"""How far back in time to keep traffic stats for load balancing purposes"""
|
|
331
|
-
pq_strict_ordering: NotRequired[bool]
|
|
332
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
333
|
-
pq_rate_per_sec: NotRequired[float]
|
|
334
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
335
|
-
pq_mode: NotRequired[OutputCriblHTTPMode]
|
|
336
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
337
|
-
pq_max_buffer_size: NotRequired[float]
|
|
338
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
339
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
340
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
341
297
|
pq_max_file_size: NotRequired[str]
|
|
342
298
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
343
299
|
pq_max_size: NotRequired[str]
|
|
@@ -348,6 +304,8 @@ class OutputCriblHTTPTypedDict(TypedDict):
|
|
|
348
304
|
r"""Codec to use to compress the persisted data"""
|
|
349
305
|
pq_on_backpressure: NotRequired[OutputCriblHTTPQueueFullBehavior]
|
|
350
306
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
307
|
+
pq_mode: NotRequired[OutputCriblHTTPMode]
|
|
308
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
351
309
|
pq_controls: NotRequired[OutputCriblHTTPPqControlsTypedDict]
|
|
352
310
|
|
|
353
311
|
|
|
@@ -492,34 +450,6 @@ class OutputCriblHTTP(BaseModel):
|
|
|
492
450
|
] = 300
|
|
493
451
|
r"""How far back in time to keep traffic stats for load balancing purposes"""
|
|
494
452
|
|
|
495
|
-
pq_strict_ordering: Annotated[
|
|
496
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
497
|
-
] = True
|
|
498
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
499
|
-
|
|
500
|
-
pq_rate_per_sec: Annotated[
|
|
501
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
502
|
-
] = 0
|
|
503
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
504
|
-
|
|
505
|
-
pq_mode: Annotated[
|
|
506
|
-
Annotated[
|
|
507
|
-
Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
|
|
508
|
-
],
|
|
509
|
-
pydantic.Field(alias="pqMode"),
|
|
510
|
-
] = OutputCriblHTTPMode.ERROR
|
|
511
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
512
|
-
|
|
513
|
-
pq_max_buffer_size: Annotated[
|
|
514
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
515
|
-
] = 42
|
|
516
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
517
|
-
|
|
518
|
-
pq_max_backpressure_sec: Annotated[
|
|
519
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
520
|
-
] = 30
|
|
521
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
522
|
-
|
|
523
453
|
pq_max_file_size: Annotated[
|
|
524
454
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
525
455
|
] = "1 MB"
|
|
@@ -551,60 +481,14 @@ class OutputCriblHTTP(BaseModel):
|
|
|
551
481
|
] = OutputCriblHTTPQueueFullBehavior.BLOCK
|
|
552
482
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
553
483
|
|
|
484
|
+
pq_mode: Annotated[
|
|
485
|
+
Annotated[
|
|
486
|
+
Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
|
|
487
|
+
],
|
|
488
|
+
pydantic.Field(alias="pqMode"),
|
|
489
|
+
] = OutputCriblHTTPMode.ERROR
|
|
490
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
491
|
+
|
|
554
492
|
pq_controls: Annotated[
|
|
555
493
|
Optional[OutputCriblHTTPPqControls], pydantic.Field(alias="pqControls")
|
|
556
494
|
] = None
|
|
557
|
-
|
|
558
|
-
@field_serializer("compression")
|
|
559
|
-
def serialize_compression(self, value):
|
|
560
|
-
if isinstance(value, str):
|
|
561
|
-
try:
|
|
562
|
-
return models.OutputCriblHTTPCompression(value)
|
|
563
|
-
except ValueError:
|
|
564
|
-
return value
|
|
565
|
-
return value
|
|
566
|
-
|
|
567
|
-
@field_serializer("failed_request_logging_mode")
|
|
568
|
-
def serialize_failed_request_logging_mode(self, value):
|
|
569
|
-
if isinstance(value, str):
|
|
570
|
-
try:
|
|
571
|
-
return models.OutputCriblHTTPFailedRequestLoggingMode(value)
|
|
572
|
-
except ValueError:
|
|
573
|
-
return value
|
|
574
|
-
return value
|
|
575
|
-
|
|
576
|
-
@field_serializer("on_backpressure")
|
|
577
|
-
def serialize_on_backpressure(self, value):
|
|
578
|
-
if isinstance(value, str):
|
|
579
|
-
try:
|
|
580
|
-
return models.OutputCriblHTTPBackpressureBehavior(value)
|
|
581
|
-
except ValueError:
|
|
582
|
-
return value
|
|
583
|
-
return value
|
|
584
|
-
|
|
585
|
-
@field_serializer("pq_mode")
|
|
586
|
-
def serialize_pq_mode(self, value):
|
|
587
|
-
if isinstance(value, str):
|
|
588
|
-
try:
|
|
589
|
-
return models.OutputCriblHTTPMode(value)
|
|
590
|
-
except ValueError:
|
|
591
|
-
return value
|
|
592
|
-
return value
|
|
593
|
-
|
|
594
|
-
@field_serializer("pq_compress")
|
|
595
|
-
def serialize_pq_compress(self, value):
|
|
596
|
-
if isinstance(value, str):
|
|
597
|
-
try:
|
|
598
|
-
return models.OutputCriblHTTPPqCompressCompression(value)
|
|
599
|
-
except ValueError:
|
|
600
|
-
return value
|
|
601
|
-
return value
|
|
602
|
-
|
|
603
|
-
@field_serializer("pq_on_backpressure")
|
|
604
|
-
def serialize_pq_on_backpressure(self, value):
|
|
605
|
-
if isinstance(value, str):
|
|
606
|
-
try:
|
|
607
|
-
return models.OutputCriblHTTPQueueFullBehavior(value)
|
|
608
|
-
except ValueError:
|
|
609
|
-
return value
|
|
610
|
-
return value
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -26,67 +25,46 @@ class OutputCriblLakeSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
26
25
|
class OutputCriblLakeObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
26
|
r"""Object ACL to assign to uploaded objects"""
|
|
28
27
|
|
|
29
|
-
# Private
|
|
30
28
|
PRIVATE = "private"
|
|
31
|
-
# Public Read Only
|
|
32
29
|
PUBLIC_READ = "public-read"
|
|
33
|
-
# Public Read/Write
|
|
34
30
|
PUBLIC_READ_WRITE = "public-read-write"
|
|
35
|
-
# Authenticated Read Only
|
|
36
31
|
AUTHENTICATED_READ = "authenticated-read"
|
|
37
|
-
# AWS EC2 AMI Read Only
|
|
38
32
|
AWS_EXEC_READ = "aws-exec-read"
|
|
39
|
-
# Bucket Owner Read Only
|
|
40
33
|
BUCKET_OWNER_READ = "bucket-owner-read"
|
|
41
|
-
# Bucket Owner Full Control
|
|
42
34
|
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
|
|
43
35
|
|
|
44
36
|
|
|
45
37
|
class OutputCriblLakeStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
46
38
|
r"""Storage class to select for uploaded objects"""
|
|
47
39
|
|
|
48
|
-
# Standard
|
|
49
40
|
STANDARD = "STANDARD"
|
|
50
|
-
# Reduced Redundancy Storage
|
|
51
41
|
REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
|
|
52
|
-
# Standard, Infrequent Access
|
|
53
42
|
STANDARD_IA = "STANDARD_IA"
|
|
54
|
-
# One Zone, Infrequent Access
|
|
55
43
|
ONEZONE_IA = "ONEZONE_IA"
|
|
56
|
-
# Intelligent Tiering
|
|
57
44
|
INTELLIGENT_TIERING = "INTELLIGENT_TIERING"
|
|
58
|
-
# Glacier Flexible Retrieval
|
|
59
45
|
GLACIER = "GLACIER"
|
|
60
|
-
# Glacier Instant Retrieval
|
|
61
46
|
GLACIER_IR = "GLACIER_IR"
|
|
62
|
-
# Glacier Deep Archive
|
|
63
47
|
DEEP_ARCHIVE = "DEEP_ARCHIVE"
|
|
64
48
|
|
|
65
49
|
|
|
66
50
|
class OutputCriblLakeServerSideEncryptionForUploadedObjects(
|
|
67
51
|
str, Enum, metaclass=utils.OpenEnumMeta
|
|
68
52
|
):
|
|
69
|
-
# Amazon S3 Managed Key
|
|
70
53
|
AES256 = "AES256"
|
|
71
|
-
# AWS KMS Managed Key
|
|
72
54
|
AWS_KMS = "aws:kms"
|
|
73
55
|
|
|
74
56
|
|
|
75
57
|
class OutputCriblLakeBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
76
58
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
77
59
|
|
|
78
|
-
# Block
|
|
79
60
|
BLOCK = "block"
|
|
80
|
-
# Drop
|
|
81
61
|
DROP = "drop"
|
|
82
62
|
|
|
83
63
|
|
|
84
64
|
class OutputCriblLakeDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
85
65
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
86
66
|
|
|
87
|
-
# Block
|
|
88
67
|
BLOCK = "block"
|
|
89
|
-
# Drop
|
|
90
68
|
DROP = "drop"
|
|
91
69
|
|
|
92
70
|
|
|
@@ -416,77 +394,3 @@ class OutputCriblLake(BaseModel):
|
|
|
416
394
|
|
|
417
395
|
max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
|
|
418
396
|
r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
|
|
419
|
-
|
|
420
|
-
@field_serializer("signature_version")
|
|
421
|
-
def serialize_signature_version(self, value):
|
|
422
|
-
if isinstance(value, str):
|
|
423
|
-
try:
|
|
424
|
-
return models.OutputCriblLakeSignatureVersion(value)
|
|
425
|
-
except ValueError:
|
|
426
|
-
return value
|
|
427
|
-
return value
|
|
428
|
-
|
|
429
|
-
@field_serializer("object_acl")
|
|
430
|
-
def serialize_object_acl(self, value):
|
|
431
|
-
if isinstance(value, str):
|
|
432
|
-
try:
|
|
433
|
-
return models.OutputCriblLakeObjectACL(value)
|
|
434
|
-
except ValueError:
|
|
435
|
-
return value
|
|
436
|
-
return value
|
|
437
|
-
|
|
438
|
-
@field_serializer("storage_class")
|
|
439
|
-
def serialize_storage_class(self, value):
|
|
440
|
-
if isinstance(value, str):
|
|
441
|
-
try:
|
|
442
|
-
return models.OutputCriblLakeStorageClass(value)
|
|
443
|
-
except ValueError:
|
|
444
|
-
return value
|
|
445
|
-
return value
|
|
446
|
-
|
|
447
|
-
@field_serializer("server_side_encryption")
|
|
448
|
-
def serialize_server_side_encryption(self, value):
|
|
449
|
-
if isinstance(value, str):
|
|
450
|
-
try:
|
|
451
|
-
return models.OutputCriblLakeServerSideEncryptionForUploadedObjects(
|
|
452
|
-
value
|
|
453
|
-
)
|
|
454
|
-
except ValueError:
|
|
455
|
-
return value
|
|
456
|
-
return value
|
|
457
|
-
|
|
458
|
-
@field_serializer("on_backpressure")
|
|
459
|
-
def serialize_on_backpressure(self, value):
|
|
460
|
-
if isinstance(value, str):
|
|
461
|
-
try:
|
|
462
|
-
return models.OutputCriblLakeBackpressureBehavior(value)
|
|
463
|
-
except ValueError:
|
|
464
|
-
return value
|
|
465
|
-
return value
|
|
466
|
-
|
|
467
|
-
@field_serializer("on_disk_full_backpressure")
|
|
468
|
-
def serialize_on_disk_full_backpressure(self, value):
|
|
469
|
-
if isinstance(value, str):
|
|
470
|
-
try:
|
|
471
|
-
return models.OutputCriblLakeDiskSpaceProtection(value)
|
|
472
|
-
except ValueError:
|
|
473
|
-
return value
|
|
474
|
-
return value
|
|
475
|
-
|
|
476
|
-
@field_serializer("aws_authentication_method")
|
|
477
|
-
def serialize_aws_authentication_method(self, value):
|
|
478
|
-
if isinstance(value, str):
|
|
479
|
-
try:
|
|
480
|
-
return models.AwsAuthenticationMethod(value)
|
|
481
|
-
except ValueError:
|
|
482
|
-
return value
|
|
483
|
-
return value
|
|
484
|
-
|
|
485
|
-
@field_serializer("format_")
|
|
486
|
-
def serialize_format_(self, value):
|
|
487
|
-
if isinstance(value, str):
|
|
488
|
-
try:
|
|
489
|
-
return models.OutputCriblLakeFormat(value)
|
|
490
|
-
except ValueError:
|
|
491
|
-
return value
|
|
492
|
-
return value
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,9 +18,7 @@ class OutputCriblTCPType(str, Enum):
|
|
|
19
18
|
class OutputCriblTCPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""Codec to use to compress the data before sending"""
|
|
21
20
|
|
|
22
|
-
# None
|
|
23
21
|
NONE = "none"
|
|
24
|
-
# Gzip
|
|
25
22
|
GZIP = "gzip"
|
|
26
23
|
|
|
27
24
|
|
|
@@ -107,33 +104,12 @@ class OutputCriblTCPTLSSettingsClientSide(BaseModel):
|
|
|
107
104
|
pydantic.Field(alias="maxVersion"),
|
|
108
105
|
] = None
|
|
109
106
|
|
|
110
|
-
@field_serializer("min_version")
|
|
111
|
-
def serialize_min_version(self, value):
|
|
112
|
-
if isinstance(value, str):
|
|
113
|
-
try:
|
|
114
|
-
return models.OutputCriblTCPMinimumTLSVersion(value)
|
|
115
|
-
except ValueError:
|
|
116
|
-
return value
|
|
117
|
-
return value
|
|
118
|
-
|
|
119
|
-
@field_serializer("max_version")
|
|
120
|
-
def serialize_max_version(self, value):
|
|
121
|
-
if isinstance(value, str):
|
|
122
|
-
try:
|
|
123
|
-
return models.OutputCriblTCPMaximumTLSVersion(value)
|
|
124
|
-
except ValueError:
|
|
125
|
-
return value
|
|
126
|
-
return value
|
|
127
|
-
|
|
128
107
|
|
|
129
108
|
class OutputCriblTCPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
130
109
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
131
110
|
|
|
132
|
-
# Block
|
|
133
111
|
BLOCK = "block"
|
|
134
|
-
# Drop
|
|
135
112
|
DROP = "drop"
|
|
136
|
-
# Persistent Queue
|
|
137
113
|
QUEUE = "queue"
|
|
138
114
|
|
|
139
115
|
|
|
@@ -175,45 +151,29 @@ class OutputCriblTCPHost(BaseModel):
|
|
|
175
151
|
weight: Optional[float] = 1
|
|
176
152
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
177
153
|
|
|
178
|
-
@field_serializer("tls")
|
|
179
|
-
def serialize_tls(self, value):
|
|
180
|
-
if isinstance(value, str):
|
|
181
|
-
try:
|
|
182
|
-
return models.OutputCriblTCPTLS(value)
|
|
183
|
-
except ValueError:
|
|
184
|
-
return value
|
|
185
|
-
return value
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
class OutputCriblTCPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
189
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
190
|
-
|
|
191
|
-
# Error
|
|
192
|
-
ERROR = "error"
|
|
193
|
-
# Backpressure
|
|
194
|
-
ALWAYS = "always"
|
|
195
|
-
# Always On
|
|
196
|
-
BACKPRESSURE = "backpressure"
|
|
197
|
-
|
|
198
154
|
|
|
199
155
|
class OutputCriblTCPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
200
156
|
r"""Codec to use to compress the persisted data"""
|
|
201
157
|
|
|
202
|
-
# None
|
|
203
158
|
NONE = "none"
|
|
204
|
-
# Gzip
|
|
205
159
|
GZIP = "gzip"
|
|
206
160
|
|
|
207
161
|
|
|
208
162
|
class OutputCriblTCPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
209
163
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
210
164
|
|
|
211
|
-
# Block
|
|
212
165
|
BLOCK = "block"
|
|
213
|
-
# Drop new data
|
|
214
166
|
DROP = "drop"
|
|
215
167
|
|
|
216
168
|
|
|
169
|
+
class OutputCriblTCPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
170
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
171
|
+
|
|
172
|
+
ERROR = "error"
|
|
173
|
+
BACKPRESSURE = "backpressure"
|
|
174
|
+
ALWAYS = "always"
|
|
175
|
+
|
|
176
|
+
|
|
217
177
|
class OutputCriblTCPPqControlsTypedDict(TypedDict):
|
|
218
178
|
pass
|
|
219
179
|
|
|
@@ -268,16 +228,6 @@ class OutputCriblTCPTypedDict(TypedDict):
|
|
|
268
228
|
r"""How far back in time to keep traffic stats for load balancing purposes"""
|
|
269
229
|
max_concurrent_senders: NotRequired[float]
|
|
270
230
|
r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
|
|
271
|
-
pq_strict_ordering: NotRequired[bool]
|
|
272
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
273
|
-
pq_rate_per_sec: NotRequired[float]
|
|
274
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
275
|
-
pq_mode: NotRequired[OutputCriblTCPMode]
|
|
276
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
277
|
-
pq_max_buffer_size: NotRequired[float]
|
|
278
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
279
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
280
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
281
231
|
pq_max_file_size: NotRequired[str]
|
|
282
232
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
283
233
|
pq_max_size: NotRequired[str]
|
|
@@ -288,6 +238,8 @@ class OutputCriblTCPTypedDict(TypedDict):
|
|
|
288
238
|
r"""Codec to use to compress the persisted data"""
|
|
289
239
|
pq_on_backpressure: NotRequired[OutputCriblTCPQueueFullBehavior]
|
|
290
240
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
241
|
+
pq_mode: NotRequired[OutputCriblTCPMode]
|
|
242
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
291
243
|
pq_controls: NotRequired[OutputCriblTCPPqControlsTypedDict]
|
|
292
244
|
|
|
293
245
|
|
|
@@ -391,34 +343,6 @@ class OutputCriblTCP(BaseModel):
|
|
|
391
343
|
] = 0
|
|
392
344
|
r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
|
|
393
345
|
|
|
394
|
-
pq_strict_ordering: Annotated[
|
|
395
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
396
|
-
] = True
|
|
397
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
398
|
-
|
|
399
|
-
pq_rate_per_sec: Annotated[
|
|
400
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
401
|
-
] = 0
|
|
402
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
403
|
-
|
|
404
|
-
pq_mode: Annotated[
|
|
405
|
-
Annotated[
|
|
406
|
-
Optional[OutputCriblTCPMode], PlainValidator(validate_open_enum(False))
|
|
407
|
-
],
|
|
408
|
-
pydantic.Field(alias="pqMode"),
|
|
409
|
-
] = OutputCriblTCPMode.ERROR
|
|
410
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
411
|
-
|
|
412
|
-
pq_max_buffer_size: Annotated[
|
|
413
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
414
|
-
] = 42
|
|
415
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
416
|
-
|
|
417
|
-
pq_max_backpressure_sec: Annotated[
|
|
418
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
419
|
-
] = 30
|
|
420
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
421
|
-
|
|
422
346
|
pq_max_file_size: Annotated[
|
|
423
347
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
424
348
|
] = "1 MB"
|
|
@@ -450,51 +374,14 @@ class OutputCriblTCP(BaseModel):
|
|
|
450
374
|
] = OutputCriblTCPQueueFullBehavior.BLOCK
|
|
451
375
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
452
376
|
|
|
377
|
+
pq_mode: Annotated[
|
|
378
|
+
Annotated[
|
|
379
|
+
Optional[OutputCriblTCPMode], PlainValidator(validate_open_enum(False))
|
|
380
|
+
],
|
|
381
|
+
pydantic.Field(alias="pqMode"),
|
|
382
|
+
] = OutputCriblTCPMode.ERROR
|
|
383
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
384
|
+
|
|
453
385
|
pq_controls: Annotated[
|
|
454
386
|
Optional[OutputCriblTCPPqControls], pydantic.Field(alias="pqControls")
|
|
455
387
|
] = None
|
|
456
|
-
|
|
457
|
-
@field_serializer("compression")
|
|
458
|
-
def serialize_compression(self, value):
|
|
459
|
-
if isinstance(value, str):
|
|
460
|
-
try:
|
|
461
|
-
return models.OutputCriblTCPCompression(value)
|
|
462
|
-
except ValueError:
|
|
463
|
-
return value
|
|
464
|
-
return value
|
|
465
|
-
|
|
466
|
-
@field_serializer("on_backpressure")
|
|
467
|
-
def serialize_on_backpressure(self, value):
|
|
468
|
-
if isinstance(value, str):
|
|
469
|
-
try:
|
|
470
|
-
return models.OutputCriblTCPBackpressureBehavior(value)
|
|
471
|
-
except ValueError:
|
|
472
|
-
return value
|
|
473
|
-
return value
|
|
474
|
-
|
|
475
|
-
@field_serializer("pq_mode")
|
|
476
|
-
def serialize_pq_mode(self, value):
|
|
477
|
-
if isinstance(value, str):
|
|
478
|
-
try:
|
|
479
|
-
return models.OutputCriblTCPMode(value)
|
|
480
|
-
except ValueError:
|
|
481
|
-
return value
|
|
482
|
-
return value
|
|
483
|
-
|
|
484
|
-
@field_serializer("pq_compress")
|
|
485
|
-
def serialize_pq_compress(self, value):
|
|
486
|
-
if isinstance(value, str):
|
|
487
|
-
try:
|
|
488
|
-
return models.OutputCriblTCPPqCompressCompression(value)
|
|
489
|
-
except ValueError:
|
|
490
|
-
return value
|
|
491
|
-
return value
|
|
492
|
-
|
|
493
|
-
@field_serializer("pq_on_backpressure")
|
|
494
|
-
def serialize_pq_on_backpressure(self, value):
|
|
495
|
-
if isinstance(value, str):
|
|
496
|
-
try:
|
|
497
|
-
return models.OutputCriblTCPQueueFullBehavior(value)
|
|
498
|
-
except ValueError:
|
|
499
|
-
return value
|
|
500
|
-
return value
|