cribl-control-plane 0.2.1rc7__py3-none-any.whl → 0.3.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/errors/__init__.py +5 -8
- cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
- cribl_control_plane/groups_sdk.py +28 -52
- cribl_control_plane/health.py +16 -22
- cribl_control_plane/models/__init__.py +54 -217
- cribl_control_plane/models/appmode.py +14 -0
- cribl_control_plane/models/authtoken.py +1 -5
- cribl_control_plane/models/cacheconnection.py +0 -20
- cribl_control_plane/models/configgroup.py +7 -55
- cribl_control_plane/models/configgroupcloud.py +1 -11
- cribl_control_plane/models/createconfiggroupbyproductop.py +5 -17
- cribl_control_plane/models/createroutesappendbyidop.py +2 -2
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +1 -11
- cribl_control_plane/models/cribllakedatasetupdate.py +1 -11
- cribl_control_plane/models/datasetmetadata.py +1 -11
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
- cribl_control_plane/models/distributedsummary.py +0 -6
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +0 -11
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +0 -11
- cribl_control_plane/models/hbcriblinfo.py +3 -24
- cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +8 -27
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/input.py +78 -80
- cribl_control_plane/models/inputappscope.py +17 -80
- cribl_control_plane/models/inputazureblob.py +1 -33
- cribl_control_plane/models/inputcollection.py +1 -24
- cribl_control_plane/models/inputconfluentcloud.py +18 -195
- cribl_control_plane/models/inputcribl.py +1 -24
- cribl_control_plane/models/inputcriblhttp.py +17 -62
- cribl_control_plane/models/inputcribllakehttp.py +17 -62
- cribl_control_plane/models/inputcriblmetrics.py +1 -24
- cribl_control_plane/models/inputcribltcp.py +17 -62
- cribl_control_plane/models/inputcrowdstrike.py +1 -54
- cribl_control_plane/models/inputdatadogagent.py +17 -62
- cribl_control_plane/models/inputdatagen.py +1 -24
- cribl_control_plane/models/inputedgeprometheus.py +34 -147
- cribl_control_plane/models/inputelastic.py +27 -119
- cribl_control_plane/models/inputeventhub.py +1 -182
- cribl_control_plane/models/inputexec.py +1 -33
- cribl_control_plane/models/inputfile.py +3 -42
- cribl_control_plane/models/inputfirehose.py +17 -62
- cribl_control_plane/models/inputgooglepubsub.py +1 -36
- cribl_control_plane/models/inputgrafana.py +32 -157
- cribl_control_plane/models/inputhttp.py +17 -62
- cribl_control_plane/models/inputhttpraw.py +17 -62
- cribl_control_plane/models/inputjournalfiles.py +1 -24
- cribl_control_plane/models/inputkafka.py +17 -189
- cribl_control_plane/models/inputkinesis.py +1 -80
- cribl_control_plane/models/inputkubeevents.py +1 -24
- cribl_control_plane/models/inputkubelogs.py +1 -33
- cribl_control_plane/models/inputkubemetrics.py +1 -33
- cribl_control_plane/models/inputloki.py +17 -71
- cribl_control_plane/models/inputmetrics.py +17 -62
- cribl_control_plane/models/inputmodeldriventelemetry.py +17 -62
- cribl_control_plane/models/inputmsk.py +18 -81
- cribl_control_plane/models/inputnetflow.py +1 -24
- cribl_control_plane/models/inputoffice365mgmt.py +1 -67
- cribl_control_plane/models/inputoffice365msgtrace.py +1 -67
- cribl_control_plane/models/inputoffice365service.py +1 -67
- cribl_control_plane/models/inputopentelemetry.py +16 -92
- cribl_control_plane/models/inputprometheus.py +34 -138
- cribl_control_plane/models/inputprometheusrw.py +17 -71
- cribl_control_plane/models/inputrawudp.py +1 -24
- cribl_control_plane/models/inputs3.py +1 -45
- cribl_control_plane/models/inputs3inventory.py +1 -54
- cribl_control_plane/models/inputsecuritylake.py +1 -54
- cribl_control_plane/models/inputsnmp.py +1 -40
- cribl_control_plane/models/inputsplunk.py +17 -85
- cribl_control_plane/models/inputsplunkhec.py +16 -70
- cribl_control_plane/models/inputsplunksearch.py +1 -63
- cribl_control_plane/models/inputsqs.py +1 -56
- cribl_control_plane/models/inputsyslog.py +32 -121
- cribl_control_plane/models/inputsystemmetrics.py +9 -142
- cribl_control_plane/models/inputsystemstate.py +1 -33
- cribl_control_plane/models/inputtcp.py +17 -81
- cribl_control_plane/models/inputtcpjson.py +17 -71
- cribl_control_plane/models/inputwef.py +1 -71
- cribl_control_plane/models/inputwindowsmetrics.py +9 -129
- cribl_control_plane/models/inputwineventlogs.py +1 -60
- cribl_control_plane/models/inputwiz.py +1 -45
- cribl_control_plane/models/inputwizwebhook.py +17 -62
- cribl_control_plane/models/inputzscalerhec.py +16 -70
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/jobstatus.py +3 -34
- cribl_control_plane/models/listconfiggroupbyproductop.py +0 -11
- cribl_control_plane/models/logininfo.py +3 -3
- cribl_control_plane/models/masterworkerentry.py +1 -11
- cribl_control_plane/models/nodeprovidedinfo.py +1 -11
- cribl_control_plane/models/nodeupgradestatus.py +0 -38
- cribl_control_plane/models/output.py +88 -93
- cribl_control_plane/models/outputazureblob.py +1 -110
- cribl_control_plane/models/outputazuredataexplorer.py +87 -452
- cribl_control_plane/models/outputazureeventhub.py +19 -281
- cribl_control_plane/models/outputazurelogs.py +19 -115
- cribl_control_plane/models/outputchronicle.py +19 -115
- cribl_control_plane/models/outputclickhouse.py +19 -155
- cribl_control_plane/models/outputcloudwatch.py +19 -106
- cribl_control_plane/models/outputconfluentcloud.py +38 -311
- cribl_control_plane/models/outputcriblhttp.py +19 -135
- cribl_control_plane/models/outputcribllake.py +1 -97
- cribl_control_plane/models/outputcribltcp.py +19 -132
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +20 -129
- cribl_control_plane/models/outputdatadog.py +19 -159
- cribl_control_plane/models/outputdataset.py +19 -143
- cribl_control_plane/models/outputdiskspool.py +1 -11
- cribl_control_plane/models/outputdls3.py +1 -152
- cribl_control_plane/models/outputdynatracehttp.py +19 -160
- cribl_control_plane/models/outputdynatraceotlp.py +19 -160
- cribl_control_plane/models/outputelastic.py +19 -163
- cribl_control_plane/models/outputelasticcloud.py +19 -140
- cribl_control_plane/models/outputexabeam.py +1 -61
- cribl_control_plane/models/outputfilesystem.py +1 -87
- cribl_control_plane/models/outputgooglechronicle.py +20 -166
- cribl_control_plane/models/outputgooglecloudlogging.py +20 -131
- cribl_control_plane/models/outputgooglecloudstorage.py +1 -136
- cribl_control_plane/models/outputgooglepubsub.py +19 -106
- cribl_control_plane/models/outputgrafanacloud.py +37 -288
- cribl_control_plane/models/outputgraphite.py +19 -105
- cribl_control_plane/models/outputhoneycomb.py +19 -115
- cribl_control_plane/models/outputhumiohec.py +19 -126
- cribl_control_plane/models/outputinfluxdb.py +19 -130
- cribl_control_plane/models/outputkafka.py +34 -302
- cribl_control_plane/models/outputkinesis.py +19 -133
- cribl_control_plane/models/outputloki.py +17 -129
- cribl_control_plane/models/outputminio.py +1 -145
- cribl_control_plane/models/outputmsk.py +34 -193
- cribl_control_plane/models/outputnewrelic.py +19 -136
- cribl_control_plane/models/outputnewrelicevents.py +20 -128
- cribl_control_plane/models/outputopentelemetry.py +19 -178
- cribl_control_plane/models/outputprometheus.py +19 -115
- cribl_control_plane/models/outputring.py +1 -31
- cribl_control_plane/models/outputs3.py +1 -152
- cribl_control_plane/models/outputsecuritylake.py +1 -114
- cribl_control_plane/models/outputsentinel.py +19 -135
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -134
- cribl_control_plane/models/outputservicenow.py +19 -168
- cribl_control_plane/models/outputsignalfx.py +19 -115
- cribl_control_plane/models/outputsns.py +17 -113
- cribl_control_plane/models/outputsplunk.py +19 -153
- cribl_control_plane/models/outputsplunkhec.py +19 -208
- cribl_control_plane/models/outputsplunklb.py +19 -182
- cribl_control_plane/models/outputsqs.py +17 -124
- cribl_control_plane/models/outputstatsd.py +19 -105
- cribl_control_plane/models/outputstatsdext.py +19 -105
- cribl_control_plane/models/outputsumologic.py +19 -117
- cribl_control_plane/models/outputsyslog.py +96 -259
- cribl_control_plane/models/outputtcpjson.py +19 -141
- cribl_control_plane/models/outputwavefront.py +19 -115
- cribl_control_plane/models/outputwebhook.py +19 -161
- cribl_control_plane/models/outputxsiam.py +17 -113
- cribl_control_plane/models/packinfo.py +5 -8
- cribl_control_plane/models/packinstallinfo.py +5 -8
- cribl_control_plane/models/resourcepolicy.py +0 -11
- cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/runnablejobcollection.py +9 -72
- cribl_control_plane/models/runnablejobexecutor.py +9 -32
- cribl_control_plane/models/runnablejobscheduledsearch.py +9 -23
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +0 -11
- cribl_control_plane/packs.py +7 -202
- cribl_control_plane/routes_sdk.py +6 -6
- cribl_control_plane/tokens.py +15 -23
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
- cribl_control_plane-0.3.0a1.dist-info/RECORD +330 -0
- cribl_control_plane/models/groupcreaterequest.py +0 -171
- cribl_control_plane/models/outpostnodeinfo.py +0 -16
- cribl_control_plane/models/outputdatabricks.py +0 -482
- cribl_control_plane/models/updatepacksop.py +0 -25
- cribl_control_plane-0.2.1rc7.dist-info/RECORD +0 -331
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,9 +18,7 @@ class OutputTcpjsonType(str, Enum):
|
|
|
19
18
|
class OutputTcpjsonCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""Codec to use to compress the data before sending"""
|
|
21
20
|
|
|
22
|
-
# None
|
|
23
21
|
NONE = "none"
|
|
24
|
-
# Gzip
|
|
25
22
|
GZIP = "gzip"
|
|
26
23
|
|
|
27
24
|
|
|
@@ -107,33 +104,12 @@ class OutputTcpjsonTLSSettingsClientSide(BaseModel):
|
|
|
107
104
|
pydantic.Field(alias="maxVersion"),
|
|
108
105
|
] = None
|
|
109
106
|
|
|
110
|
-
@field_serializer("min_version")
|
|
111
|
-
def serialize_min_version(self, value):
|
|
112
|
-
if isinstance(value, str):
|
|
113
|
-
try:
|
|
114
|
-
return models.OutputTcpjsonMinimumTLSVersion(value)
|
|
115
|
-
except ValueError:
|
|
116
|
-
return value
|
|
117
|
-
return value
|
|
118
|
-
|
|
119
|
-
@field_serializer("max_version")
|
|
120
|
-
def serialize_max_version(self, value):
|
|
121
|
-
if isinstance(value, str):
|
|
122
|
-
try:
|
|
123
|
-
return models.OutputTcpjsonMaximumTLSVersion(value)
|
|
124
|
-
except ValueError:
|
|
125
|
-
return value
|
|
126
|
-
return value
|
|
127
|
-
|
|
128
107
|
|
|
129
108
|
class OutputTcpjsonBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
130
109
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
131
110
|
|
|
132
|
-
# Block
|
|
133
111
|
BLOCK = "block"
|
|
134
|
-
# Drop
|
|
135
112
|
DROP = "drop"
|
|
136
|
-
# Persistent Queue
|
|
137
113
|
QUEUE = "queue"
|
|
138
114
|
|
|
139
115
|
|
|
@@ -182,45 +158,29 @@ class OutputTcpjsonHost(BaseModel):
|
|
|
182
158
|
weight: Optional[float] = 1
|
|
183
159
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
184
160
|
|
|
185
|
-
@field_serializer("tls")
|
|
186
|
-
def serialize_tls(self, value):
|
|
187
|
-
if isinstance(value, str):
|
|
188
|
-
try:
|
|
189
|
-
return models.OutputTcpjsonTLS(value)
|
|
190
|
-
except ValueError:
|
|
191
|
-
return value
|
|
192
|
-
return value
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
class OutputTcpjsonMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
196
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
197
|
-
|
|
198
|
-
# Error
|
|
199
|
-
ERROR = "error"
|
|
200
|
-
# Backpressure
|
|
201
|
-
ALWAYS = "always"
|
|
202
|
-
# Always On
|
|
203
|
-
BACKPRESSURE = "backpressure"
|
|
204
|
-
|
|
205
161
|
|
|
206
162
|
class OutputTcpjsonPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
207
163
|
r"""Codec to use to compress the persisted data"""
|
|
208
164
|
|
|
209
|
-
# None
|
|
210
165
|
NONE = "none"
|
|
211
|
-
# Gzip
|
|
212
166
|
GZIP = "gzip"
|
|
213
167
|
|
|
214
168
|
|
|
215
169
|
class OutputTcpjsonQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
216
170
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
217
171
|
|
|
218
|
-
# Block
|
|
219
172
|
BLOCK = "block"
|
|
220
|
-
# Drop new data
|
|
221
173
|
DROP = "drop"
|
|
222
174
|
|
|
223
175
|
|
|
176
|
+
class OutputTcpjsonMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
177
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
178
|
+
|
|
179
|
+
ERROR = "error"
|
|
180
|
+
BACKPRESSURE = "backpressure"
|
|
181
|
+
ALWAYS = "always"
|
|
182
|
+
|
|
183
|
+
|
|
224
184
|
class OutputTcpjsonPqControlsTypedDict(TypedDict):
|
|
225
185
|
pass
|
|
226
186
|
|
|
@@ -277,16 +237,6 @@ class OutputTcpjsonTypedDict(TypedDict):
|
|
|
277
237
|
r"""How far back in time to keep traffic stats for load balancing purposes"""
|
|
278
238
|
max_concurrent_senders: NotRequired[float]
|
|
279
239
|
r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
|
|
280
|
-
pq_strict_ordering: NotRequired[bool]
|
|
281
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
282
|
-
pq_rate_per_sec: NotRequired[float]
|
|
283
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
284
|
-
pq_mode: NotRequired[OutputTcpjsonMode]
|
|
285
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
286
|
-
pq_max_buffer_size: NotRequired[float]
|
|
287
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
288
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
289
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
290
240
|
pq_max_file_size: NotRequired[str]
|
|
291
241
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
292
242
|
pq_max_size: NotRequired[str]
|
|
@@ -297,6 +247,8 @@ class OutputTcpjsonTypedDict(TypedDict):
|
|
|
297
247
|
r"""Codec to use to compress the persisted data"""
|
|
298
248
|
pq_on_backpressure: NotRequired[OutputTcpjsonQueueFullBehavior]
|
|
299
249
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
250
|
+
pq_mode: NotRequired[OutputTcpjsonMode]
|
|
251
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
300
252
|
pq_controls: NotRequired[OutputTcpjsonPqControlsTypedDict]
|
|
301
253
|
auth_token: NotRequired[str]
|
|
302
254
|
r"""Optional authentication token to include as part of the connection header"""
|
|
@@ -411,34 +363,6 @@ class OutputTcpjson(BaseModel):
|
|
|
411
363
|
] = 0
|
|
412
364
|
r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
|
|
413
365
|
|
|
414
|
-
pq_strict_ordering: Annotated[
|
|
415
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
416
|
-
] = True
|
|
417
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
418
|
-
|
|
419
|
-
pq_rate_per_sec: Annotated[
|
|
420
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
421
|
-
] = 0
|
|
422
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
423
|
-
|
|
424
|
-
pq_mode: Annotated[
|
|
425
|
-
Annotated[
|
|
426
|
-
Optional[OutputTcpjsonMode], PlainValidator(validate_open_enum(False))
|
|
427
|
-
],
|
|
428
|
-
pydantic.Field(alias="pqMode"),
|
|
429
|
-
] = OutputTcpjsonMode.ERROR
|
|
430
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
431
|
-
|
|
432
|
-
pq_max_buffer_size: Annotated[
|
|
433
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
434
|
-
] = 42
|
|
435
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
436
|
-
|
|
437
|
-
pq_max_backpressure_sec: Annotated[
|
|
438
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
439
|
-
] = 30
|
|
440
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
441
|
-
|
|
442
366
|
pq_max_file_size: Annotated[
|
|
443
367
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
444
368
|
] = "1 MB"
|
|
@@ -470,6 +394,14 @@ class OutputTcpjson(BaseModel):
|
|
|
470
394
|
] = OutputTcpjsonQueueFullBehavior.BLOCK
|
|
471
395
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
472
396
|
|
|
397
|
+
pq_mode: Annotated[
|
|
398
|
+
Annotated[
|
|
399
|
+
Optional[OutputTcpjsonMode], PlainValidator(validate_open_enum(False))
|
|
400
|
+
],
|
|
401
|
+
pydantic.Field(alias="pqMode"),
|
|
402
|
+
] = OutputTcpjsonMode.ERROR
|
|
403
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
404
|
+
|
|
473
405
|
pq_controls: Annotated[
|
|
474
406
|
Optional[OutputTcpjsonPqControls], pydantic.Field(alias="pqControls")
|
|
475
407
|
] = None
|
|
@@ -479,57 +411,3 @@ class OutputTcpjson(BaseModel):
|
|
|
479
411
|
|
|
480
412
|
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
481
413
|
r"""Select or create a stored text secret"""
|
|
482
|
-
|
|
483
|
-
@field_serializer("compression")
|
|
484
|
-
def serialize_compression(self, value):
|
|
485
|
-
if isinstance(value, str):
|
|
486
|
-
try:
|
|
487
|
-
return models.OutputTcpjsonCompression(value)
|
|
488
|
-
except ValueError:
|
|
489
|
-
return value
|
|
490
|
-
return value
|
|
491
|
-
|
|
492
|
-
@field_serializer("on_backpressure")
|
|
493
|
-
def serialize_on_backpressure(self, value):
|
|
494
|
-
if isinstance(value, str):
|
|
495
|
-
try:
|
|
496
|
-
return models.OutputTcpjsonBackpressureBehavior(value)
|
|
497
|
-
except ValueError:
|
|
498
|
-
return value
|
|
499
|
-
return value
|
|
500
|
-
|
|
501
|
-
@field_serializer("auth_type")
|
|
502
|
-
def serialize_auth_type(self, value):
|
|
503
|
-
if isinstance(value, str):
|
|
504
|
-
try:
|
|
505
|
-
return models.OutputTcpjsonAuthenticationMethod(value)
|
|
506
|
-
except ValueError:
|
|
507
|
-
return value
|
|
508
|
-
return value
|
|
509
|
-
|
|
510
|
-
@field_serializer("pq_mode")
|
|
511
|
-
def serialize_pq_mode(self, value):
|
|
512
|
-
if isinstance(value, str):
|
|
513
|
-
try:
|
|
514
|
-
return models.OutputTcpjsonMode(value)
|
|
515
|
-
except ValueError:
|
|
516
|
-
return value
|
|
517
|
-
return value
|
|
518
|
-
|
|
519
|
-
@field_serializer("pq_compress")
|
|
520
|
-
def serialize_pq_compress(self, value):
|
|
521
|
-
if isinstance(value, str):
|
|
522
|
-
try:
|
|
523
|
-
return models.OutputTcpjsonPqCompressCompression(value)
|
|
524
|
-
except ValueError:
|
|
525
|
-
return value
|
|
526
|
-
return value
|
|
527
|
-
|
|
528
|
-
@field_serializer("pq_on_backpressure")
|
|
529
|
-
def serialize_pq_on_backpressure(self, value):
|
|
530
|
-
if isinstance(value, str):
|
|
531
|
-
try:
|
|
532
|
-
return models.OutputTcpjsonQueueFullBehavior(value)
|
|
533
|
-
except ValueError:
|
|
534
|
-
return value
|
|
535
|
-
return value
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -37,11 +36,8 @@ class OutputWavefrontExtraHTTPHeader(BaseModel):
|
|
|
37
36
|
class OutputWavefrontFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
38
37
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
39
38
|
|
|
40
|
-
# Payload
|
|
41
39
|
PAYLOAD = "payload"
|
|
42
|
-
# Payload + Headers
|
|
43
40
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
44
|
-
# None
|
|
45
41
|
NONE = "none"
|
|
46
42
|
|
|
47
43
|
|
|
@@ -102,43 +98,33 @@ class OutputWavefrontTimeoutRetrySettings(BaseModel):
|
|
|
102
98
|
class OutputWavefrontBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
103
99
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
104
100
|
|
|
105
|
-
# Block
|
|
106
101
|
BLOCK = "block"
|
|
107
|
-
# Drop
|
|
108
102
|
DROP = "drop"
|
|
109
|
-
# Persistent Queue
|
|
110
103
|
QUEUE = "queue"
|
|
111
104
|
|
|
112
105
|
|
|
113
|
-
class OutputWavefrontMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
114
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
115
|
-
|
|
116
|
-
# Error
|
|
117
|
-
ERROR = "error"
|
|
118
|
-
# Backpressure
|
|
119
|
-
ALWAYS = "always"
|
|
120
|
-
# Always On
|
|
121
|
-
BACKPRESSURE = "backpressure"
|
|
122
|
-
|
|
123
|
-
|
|
124
106
|
class OutputWavefrontCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
125
107
|
r"""Codec to use to compress the persisted data"""
|
|
126
108
|
|
|
127
|
-
# None
|
|
128
109
|
NONE = "none"
|
|
129
|
-
# Gzip
|
|
130
110
|
GZIP = "gzip"
|
|
131
111
|
|
|
132
112
|
|
|
133
113
|
class OutputWavefrontQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
134
114
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
135
115
|
|
|
136
|
-
# Block
|
|
137
116
|
BLOCK = "block"
|
|
138
|
-
# Drop new data
|
|
139
117
|
DROP = "drop"
|
|
140
118
|
|
|
141
119
|
|
|
120
|
+
class OutputWavefrontMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
121
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
122
|
+
|
|
123
|
+
ERROR = "error"
|
|
124
|
+
BACKPRESSURE = "backpressure"
|
|
125
|
+
ALWAYS = "always"
|
|
126
|
+
|
|
127
|
+
|
|
142
128
|
class OutputWavefrontPqControlsTypedDict(TypedDict):
|
|
143
129
|
pass
|
|
144
130
|
|
|
@@ -202,16 +188,6 @@ class OutputWavefrontTypedDict(TypedDict):
|
|
|
202
188
|
r"""WaveFront API authentication token (see [here](https://docs.wavefront.com/wavefront_api.html#generating-an-api-token))"""
|
|
203
189
|
text_secret: NotRequired[str]
|
|
204
190
|
r"""Select or create a stored text secret"""
|
|
205
|
-
pq_strict_ordering: NotRequired[bool]
|
|
206
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
207
|
-
pq_rate_per_sec: NotRequired[float]
|
|
208
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
209
|
-
pq_mode: NotRequired[OutputWavefrontMode]
|
|
210
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
211
|
-
pq_max_buffer_size: NotRequired[float]
|
|
212
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
213
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
214
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
215
191
|
pq_max_file_size: NotRequired[str]
|
|
216
192
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
217
193
|
pq_max_size: NotRequired[str]
|
|
@@ -222,6 +198,8 @@ class OutputWavefrontTypedDict(TypedDict):
|
|
|
222
198
|
r"""Codec to use to compress the persisted data"""
|
|
223
199
|
pq_on_backpressure: NotRequired[OutputWavefrontQueueFullBehavior]
|
|
224
200
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
201
|
+
pq_mode: NotRequired[OutputWavefrontMode]
|
|
202
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
225
203
|
pq_controls: NotRequired[OutputWavefrontPqControlsTypedDict]
|
|
226
204
|
|
|
227
205
|
|
|
@@ -347,34 +325,6 @@ class OutputWavefront(BaseModel):
|
|
|
347
325
|
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
348
326
|
r"""Select or create a stored text secret"""
|
|
349
327
|
|
|
350
|
-
pq_strict_ordering: Annotated[
|
|
351
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
352
|
-
] = True
|
|
353
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
354
|
-
|
|
355
|
-
pq_rate_per_sec: Annotated[
|
|
356
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
357
|
-
] = 0
|
|
358
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
359
|
-
|
|
360
|
-
pq_mode: Annotated[
|
|
361
|
-
Annotated[
|
|
362
|
-
Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
|
|
363
|
-
],
|
|
364
|
-
pydantic.Field(alias="pqMode"),
|
|
365
|
-
] = OutputWavefrontMode.ERROR
|
|
366
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
367
|
-
|
|
368
|
-
pq_max_buffer_size: Annotated[
|
|
369
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
370
|
-
] = 42
|
|
371
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
372
|
-
|
|
373
|
-
pq_max_backpressure_sec: Annotated[
|
|
374
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
375
|
-
] = 30
|
|
376
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
377
|
-
|
|
378
328
|
pq_max_file_size: Annotated[
|
|
379
329
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
380
330
|
] = "1 MB"
|
|
@@ -406,60 +356,14 @@ class OutputWavefront(BaseModel):
|
|
|
406
356
|
] = OutputWavefrontQueueFullBehavior.BLOCK
|
|
407
357
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
408
358
|
|
|
359
|
+
pq_mode: Annotated[
|
|
360
|
+
Annotated[
|
|
361
|
+
Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
|
|
362
|
+
],
|
|
363
|
+
pydantic.Field(alias="pqMode"),
|
|
364
|
+
] = OutputWavefrontMode.ERROR
|
|
365
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
366
|
+
|
|
409
367
|
pq_controls: Annotated[
|
|
410
368
|
Optional[OutputWavefrontPqControls], pydantic.Field(alias="pqControls")
|
|
411
369
|
] = None
|
|
412
|
-
|
|
413
|
-
@field_serializer("auth_type")
|
|
414
|
-
def serialize_auth_type(self, value):
|
|
415
|
-
if isinstance(value, str):
|
|
416
|
-
try:
|
|
417
|
-
return models.OutputWavefrontAuthenticationMethod(value)
|
|
418
|
-
except ValueError:
|
|
419
|
-
return value
|
|
420
|
-
return value
|
|
421
|
-
|
|
422
|
-
@field_serializer("failed_request_logging_mode")
|
|
423
|
-
def serialize_failed_request_logging_mode(self, value):
|
|
424
|
-
if isinstance(value, str):
|
|
425
|
-
try:
|
|
426
|
-
return models.OutputWavefrontFailedRequestLoggingMode(value)
|
|
427
|
-
except ValueError:
|
|
428
|
-
return value
|
|
429
|
-
return value
|
|
430
|
-
|
|
431
|
-
@field_serializer("on_backpressure")
|
|
432
|
-
def serialize_on_backpressure(self, value):
|
|
433
|
-
if isinstance(value, str):
|
|
434
|
-
try:
|
|
435
|
-
return models.OutputWavefrontBackpressureBehavior(value)
|
|
436
|
-
except ValueError:
|
|
437
|
-
return value
|
|
438
|
-
return value
|
|
439
|
-
|
|
440
|
-
@field_serializer("pq_mode")
|
|
441
|
-
def serialize_pq_mode(self, value):
|
|
442
|
-
if isinstance(value, str):
|
|
443
|
-
try:
|
|
444
|
-
return models.OutputWavefrontMode(value)
|
|
445
|
-
except ValueError:
|
|
446
|
-
return value
|
|
447
|
-
return value
|
|
448
|
-
|
|
449
|
-
@field_serializer("pq_compress")
|
|
450
|
-
def serialize_pq_compress(self, value):
|
|
451
|
-
if isinstance(value, str):
|
|
452
|
-
try:
|
|
453
|
-
return models.OutputWavefrontCompression(value)
|
|
454
|
-
except ValueError:
|
|
455
|
-
return value
|
|
456
|
-
return value
|
|
457
|
-
|
|
458
|
-
@field_serializer("pq_on_backpressure")
|
|
459
|
-
def serialize_pq_on_backpressure(self, value):
|
|
460
|
-
if isinstance(value, str):
|
|
461
|
-
try:
|
|
462
|
-
return models.OutputWavefrontQueueFullBehavior(value)
|
|
463
|
-
except ValueError:
|
|
464
|
-
return value
|
|
465
|
-
return value
|