cribl-control-plane 0.2.1rc7__py3-none-any.whl → 0.3.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/errors/__init__.py +5 -8
- cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
- cribl_control_plane/groups_sdk.py +28 -52
- cribl_control_plane/health.py +16 -22
- cribl_control_plane/models/__init__.py +54 -217
- cribl_control_plane/models/appmode.py +14 -0
- cribl_control_plane/models/authtoken.py +1 -5
- cribl_control_plane/models/cacheconnection.py +0 -20
- cribl_control_plane/models/configgroup.py +7 -55
- cribl_control_plane/models/configgroupcloud.py +1 -11
- cribl_control_plane/models/createconfiggroupbyproductop.py +5 -17
- cribl_control_plane/models/createroutesappendbyidop.py +2 -2
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +1 -11
- cribl_control_plane/models/cribllakedatasetupdate.py +1 -11
- cribl_control_plane/models/datasetmetadata.py +1 -11
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
- cribl_control_plane/models/distributedsummary.py +0 -6
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +0 -11
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +0 -11
- cribl_control_plane/models/hbcriblinfo.py +3 -24
- cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +8 -27
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/input.py +78 -80
- cribl_control_plane/models/inputappscope.py +17 -80
- cribl_control_plane/models/inputazureblob.py +1 -33
- cribl_control_plane/models/inputcollection.py +1 -24
- cribl_control_plane/models/inputconfluentcloud.py +18 -195
- cribl_control_plane/models/inputcribl.py +1 -24
- cribl_control_plane/models/inputcriblhttp.py +17 -62
- cribl_control_plane/models/inputcribllakehttp.py +17 -62
- cribl_control_plane/models/inputcriblmetrics.py +1 -24
- cribl_control_plane/models/inputcribltcp.py +17 -62
- cribl_control_plane/models/inputcrowdstrike.py +1 -54
- cribl_control_plane/models/inputdatadogagent.py +17 -62
- cribl_control_plane/models/inputdatagen.py +1 -24
- cribl_control_plane/models/inputedgeprometheus.py +34 -147
- cribl_control_plane/models/inputelastic.py +27 -119
- cribl_control_plane/models/inputeventhub.py +1 -182
- cribl_control_plane/models/inputexec.py +1 -33
- cribl_control_plane/models/inputfile.py +3 -42
- cribl_control_plane/models/inputfirehose.py +17 -62
- cribl_control_plane/models/inputgooglepubsub.py +1 -36
- cribl_control_plane/models/inputgrafana.py +32 -157
- cribl_control_plane/models/inputhttp.py +17 -62
- cribl_control_plane/models/inputhttpraw.py +17 -62
- cribl_control_plane/models/inputjournalfiles.py +1 -24
- cribl_control_plane/models/inputkafka.py +17 -189
- cribl_control_plane/models/inputkinesis.py +1 -80
- cribl_control_plane/models/inputkubeevents.py +1 -24
- cribl_control_plane/models/inputkubelogs.py +1 -33
- cribl_control_plane/models/inputkubemetrics.py +1 -33
- cribl_control_plane/models/inputloki.py +17 -71
- cribl_control_plane/models/inputmetrics.py +17 -62
- cribl_control_plane/models/inputmodeldriventelemetry.py +17 -62
- cribl_control_plane/models/inputmsk.py +18 -81
- cribl_control_plane/models/inputnetflow.py +1 -24
- cribl_control_plane/models/inputoffice365mgmt.py +1 -67
- cribl_control_plane/models/inputoffice365msgtrace.py +1 -67
- cribl_control_plane/models/inputoffice365service.py +1 -67
- cribl_control_plane/models/inputopentelemetry.py +16 -92
- cribl_control_plane/models/inputprometheus.py +34 -138
- cribl_control_plane/models/inputprometheusrw.py +17 -71
- cribl_control_plane/models/inputrawudp.py +1 -24
- cribl_control_plane/models/inputs3.py +1 -45
- cribl_control_plane/models/inputs3inventory.py +1 -54
- cribl_control_plane/models/inputsecuritylake.py +1 -54
- cribl_control_plane/models/inputsnmp.py +1 -40
- cribl_control_plane/models/inputsplunk.py +17 -85
- cribl_control_plane/models/inputsplunkhec.py +16 -70
- cribl_control_plane/models/inputsplunksearch.py +1 -63
- cribl_control_plane/models/inputsqs.py +1 -56
- cribl_control_plane/models/inputsyslog.py +32 -121
- cribl_control_plane/models/inputsystemmetrics.py +9 -142
- cribl_control_plane/models/inputsystemstate.py +1 -33
- cribl_control_plane/models/inputtcp.py +17 -81
- cribl_control_plane/models/inputtcpjson.py +17 -71
- cribl_control_plane/models/inputwef.py +1 -71
- cribl_control_plane/models/inputwindowsmetrics.py +9 -129
- cribl_control_plane/models/inputwineventlogs.py +1 -60
- cribl_control_plane/models/inputwiz.py +1 -45
- cribl_control_plane/models/inputwizwebhook.py +17 -62
- cribl_control_plane/models/inputzscalerhec.py +16 -70
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/jobstatus.py +3 -34
- cribl_control_plane/models/listconfiggroupbyproductop.py +0 -11
- cribl_control_plane/models/logininfo.py +3 -3
- cribl_control_plane/models/masterworkerentry.py +1 -11
- cribl_control_plane/models/nodeprovidedinfo.py +1 -11
- cribl_control_plane/models/nodeupgradestatus.py +0 -38
- cribl_control_plane/models/output.py +88 -93
- cribl_control_plane/models/outputazureblob.py +1 -110
- cribl_control_plane/models/outputazuredataexplorer.py +87 -452
- cribl_control_plane/models/outputazureeventhub.py +19 -281
- cribl_control_plane/models/outputazurelogs.py +19 -115
- cribl_control_plane/models/outputchronicle.py +19 -115
- cribl_control_plane/models/outputclickhouse.py +19 -155
- cribl_control_plane/models/outputcloudwatch.py +19 -106
- cribl_control_plane/models/outputconfluentcloud.py +38 -311
- cribl_control_plane/models/outputcriblhttp.py +19 -135
- cribl_control_plane/models/outputcribllake.py +1 -97
- cribl_control_plane/models/outputcribltcp.py +19 -132
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +20 -129
- cribl_control_plane/models/outputdatadog.py +19 -159
- cribl_control_plane/models/outputdataset.py +19 -143
- cribl_control_plane/models/outputdiskspool.py +1 -11
- cribl_control_plane/models/outputdls3.py +1 -152
- cribl_control_plane/models/outputdynatracehttp.py +19 -160
- cribl_control_plane/models/outputdynatraceotlp.py +19 -160
- cribl_control_plane/models/outputelastic.py +19 -163
- cribl_control_plane/models/outputelasticcloud.py +19 -140
- cribl_control_plane/models/outputexabeam.py +1 -61
- cribl_control_plane/models/outputfilesystem.py +1 -87
- cribl_control_plane/models/outputgooglechronicle.py +20 -166
- cribl_control_plane/models/outputgooglecloudlogging.py +20 -131
- cribl_control_plane/models/outputgooglecloudstorage.py +1 -136
- cribl_control_plane/models/outputgooglepubsub.py +19 -106
- cribl_control_plane/models/outputgrafanacloud.py +37 -288
- cribl_control_plane/models/outputgraphite.py +19 -105
- cribl_control_plane/models/outputhoneycomb.py +19 -115
- cribl_control_plane/models/outputhumiohec.py +19 -126
- cribl_control_plane/models/outputinfluxdb.py +19 -130
- cribl_control_plane/models/outputkafka.py +34 -302
- cribl_control_plane/models/outputkinesis.py +19 -133
- cribl_control_plane/models/outputloki.py +17 -129
- cribl_control_plane/models/outputminio.py +1 -145
- cribl_control_plane/models/outputmsk.py +34 -193
- cribl_control_plane/models/outputnewrelic.py +19 -136
- cribl_control_plane/models/outputnewrelicevents.py +20 -128
- cribl_control_plane/models/outputopentelemetry.py +19 -178
- cribl_control_plane/models/outputprometheus.py +19 -115
- cribl_control_plane/models/outputring.py +1 -31
- cribl_control_plane/models/outputs3.py +1 -152
- cribl_control_plane/models/outputsecuritylake.py +1 -114
- cribl_control_plane/models/outputsentinel.py +19 -135
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -134
- cribl_control_plane/models/outputservicenow.py +19 -168
- cribl_control_plane/models/outputsignalfx.py +19 -115
- cribl_control_plane/models/outputsns.py +17 -113
- cribl_control_plane/models/outputsplunk.py +19 -153
- cribl_control_plane/models/outputsplunkhec.py +19 -208
- cribl_control_plane/models/outputsplunklb.py +19 -182
- cribl_control_plane/models/outputsqs.py +17 -124
- cribl_control_plane/models/outputstatsd.py +19 -105
- cribl_control_plane/models/outputstatsdext.py +19 -105
- cribl_control_plane/models/outputsumologic.py +19 -117
- cribl_control_plane/models/outputsyslog.py +96 -259
- cribl_control_plane/models/outputtcpjson.py +19 -141
- cribl_control_plane/models/outputwavefront.py +19 -115
- cribl_control_plane/models/outputwebhook.py +19 -161
- cribl_control_plane/models/outputxsiam.py +17 -113
- cribl_control_plane/models/packinfo.py +5 -8
- cribl_control_plane/models/packinstallinfo.py +5 -8
- cribl_control_plane/models/resourcepolicy.py +0 -11
- cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/runnablejobcollection.py +9 -72
- cribl_control_plane/models/runnablejobexecutor.py +9 -32
- cribl_control_plane/models/runnablejobscheduledsearch.py +9 -23
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +0 -11
- cribl_control_plane/packs.py +7 -202
- cribl_control_plane/routes_sdk.py +6 -6
- cribl_control_plane/tokens.py +15 -23
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
- cribl_control_plane-0.3.0a1.dist-info/RECORD +330 -0
- cribl_control_plane/models/groupcreaterequest.py +0 -171
- cribl_control_plane/models/outpostnodeinfo.py +0 -16
- cribl_control_plane/models/outputdatabricks.py +0 -482
- cribl_control_plane/models/updatepacksop.py +0 -25
- cribl_control_plane-0.2.1rc7.dist-info/RECORD +0 -331
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,11 +18,8 @@ class OutputSnsType(str, Enum):
|
|
|
19
18
|
class OutputSnsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
21
20
|
|
|
22
|
-
# Auto
|
|
23
21
|
AUTO = "auto"
|
|
24
|
-
# Manual
|
|
25
22
|
MANUAL = "manual"
|
|
26
|
-
# Secret Key pair
|
|
27
23
|
SECRET = "secret"
|
|
28
24
|
|
|
29
25
|
|
|
@@ -37,43 +33,33 @@ class OutputSnsSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
37
33
|
class OutputSnsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
38
34
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
39
35
|
|
|
40
|
-
# Block
|
|
41
36
|
BLOCK = "block"
|
|
42
|
-
# Drop
|
|
43
37
|
DROP = "drop"
|
|
44
|
-
# Persistent Queue
|
|
45
38
|
QUEUE = "queue"
|
|
46
39
|
|
|
47
40
|
|
|
48
|
-
class OutputSnsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
49
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
50
|
-
|
|
51
|
-
# Error
|
|
52
|
-
ERROR = "error"
|
|
53
|
-
# Backpressure
|
|
54
|
-
ALWAYS = "always"
|
|
55
|
-
# Always On
|
|
56
|
-
BACKPRESSURE = "backpressure"
|
|
57
|
-
|
|
58
|
-
|
|
59
41
|
class OutputSnsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
60
42
|
r"""Codec to use to compress the persisted data"""
|
|
61
43
|
|
|
62
|
-
# None
|
|
63
44
|
NONE = "none"
|
|
64
|
-
# Gzip
|
|
65
45
|
GZIP = "gzip"
|
|
66
46
|
|
|
67
47
|
|
|
68
48
|
class OutputSnsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
69
49
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
70
50
|
|
|
71
|
-
# Block
|
|
72
51
|
BLOCK = "block"
|
|
73
|
-
# Drop new data
|
|
74
52
|
DROP = "drop"
|
|
75
53
|
|
|
76
54
|
|
|
55
|
+
class OutputSnsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
56
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
57
|
+
|
|
58
|
+
ERROR = "error"
|
|
59
|
+
BACKPRESSURE = "backpressure"
|
|
60
|
+
ALWAYS = "always"
|
|
61
|
+
|
|
62
|
+
|
|
77
63
|
class OutputSnsPqControlsTypedDict(TypedDict):
|
|
78
64
|
pass
|
|
79
65
|
|
|
@@ -127,16 +113,6 @@ class OutputSnsTypedDict(TypedDict):
|
|
|
127
113
|
aws_api_key: NotRequired[str]
|
|
128
114
|
aws_secret: NotRequired[str]
|
|
129
115
|
r"""Select or create a stored secret that references your access key and secret key"""
|
|
130
|
-
pq_strict_ordering: NotRequired[bool]
|
|
131
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
132
|
-
pq_rate_per_sec: NotRequired[float]
|
|
133
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
134
|
-
pq_mode: NotRequired[OutputSnsMode]
|
|
135
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
136
|
-
pq_max_buffer_size: NotRequired[float]
|
|
137
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
138
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
139
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
140
116
|
pq_max_file_size: NotRequired[str]
|
|
141
117
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
142
118
|
pq_max_size: NotRequired[str]
|
|
@@ -147,6 +123,8 @@ class OutputSnsTypedDict(TypedDict):
|
|
|
147
123
|
r"""Codec to use to compress the persisted data"""
|
|
148
124
|
pq_on_backpressure: NotRequired[OutputSnsQueueFullBehavior]
|
|
149
125
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
126
|
+
pq_mode: NotRequired[OutputSnsMode]
|
|
127
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
150
128
|
pq_controls: NotRequired[OutputSnsPqControlsTypedDict]
|
|
151
129
|
|
|
152
130
|
|
|
@@ -253,32 +231,6 @@ class OutputSns(BaseModel):
|
|
|
253
231
|
aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
|
|
254
232
|
r"""Select or create a stored secret that references your access key and secret key"""
|
|
255
233
|
|
|
256
|
-
pq_strict_ordering: Annotated[
|
|
257
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
258
|
-
] = True
|
|
259
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
260
|
-
|
|
261
|
-
pq_rate_per_sec: Annotated[
|
|
262
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
263
|
-
] = 0
|
|
264
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
265
|
-
|
|
266
|
-
pq_mode: Annotated[
|
|
267
|
-
Annotated[Optional[OutputSnsMode], PlainValidator(validate_open_enum(False))],
|
|
268
|
-
pydantic.Field(alias="pqMode"),
|
|
269
|
-
] = OutputSnsMode.ERROR
|
|
270
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
271
|
-
|
|
272
|
-
pq_max_buffer_size: Annotated[
|
|
273
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
274
|
-
] = 42
|
|
275
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
276
|
-
|
|
277
|
-
pq_max_backpressure_sec: Annotated[
|
|
278
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
279
|
-
] = 30
|
|
280
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
281
|
-
|
|
282
234
|
pq_max_file_size: Annotated[
|
|
283
235
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
284
236
|
] = "1 MB"
|
|
@@ -309,60 +261,12 @@ class OutputSns(BaseModel):
|
|
|
309
261
|
] = OutputSnsQueueFullBehavior.BLOCK
|
|
310
262
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
311
263
|
|
|
264
|
+
pq_mode: Annotated[
|
|
265
|
+
Annotated[Optional[OutputSnsMode], PlainValidator(validate_open_enum(False))],
|
|
266
|
+
pydantic.Field(alias="pqMode"),
|
|
267
|
+
] = OutputSnsMode.ERROR
|
|
268
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
269
|
+
|
|
312
270
|
pq_controls: Annotated[
|
|
313
271
|
Optional[OutputSnsPqControls], pydantic.Field(alias="pqControls")
|
|
314
272
|
] = None
|
|
315
|
-
|
|
316
|
-
@field_serializer("aws_authentication_method")
|
|
317
|
-
def serialize_aws_authentication_method(self, value):
|
|
318
|
-
if isinstance(value, str):
|
|
319
|
-
try:
|
|
320
|
-
return models.OutputSnsAuthenticationMethod(value)
|
|
321
|
-
except ValueError:
|
|
322
|
-
return value
|
|
323
|
-
return value
|
|
324
|
-
|
|
325
|
-
@field_serializer("signature_version")
|
|
326
|
-
def serialize_signature_version(self, value):
|
|
327
|
-
if isinstance(value, str):
|
|
328
|
-
try:
|
|
329
|
-
return models.OutputSnsSignatureVersion(value)
|
|
330
|
-
except ValueError:
|
|
331
|
-
return value
|
|
332
|
-
return value
|
|
333
|
-
|
|
334
|
-
@field_serializer("on_backpressure")
|
|
335
|
-
def serialize_on_backpressure(self, value):
|
|
336
|
-
if isinstance(value, str):
|
|
337
|
-
try:
|
|
338
|
-
return models.OutputSnsBackpressureBehavior(value)
|
|
339
|
-
except ValueError:
|
|
340
|
-
return value
|
|
341
|
-
return value
|
|
342
|
-
|
|
343
|
-
@field_serializer("pq_mode")
|
|
344
|
-
def serialize_pq_mode(self, value):
|
|
345
|
-
if isinstance(value, str):
|
|
346
|
-
try:
|
|
347
|
-
return models.OutputSnsMode(value)
|
|
348
|
-
except ValueError:
|
|
349
|
-
return value
|
|
350
|
-
return value
|
|
351
|
-
|
|
352
|
-
@field_serializer("pq_compress")
|
|
353
|
-
def serialize_pq_compress(self, value):
|
|
354
|
-
if isinstance(value, str):
|
|
355
|
-
try:
|
|
356
|
-
return models.OutputSnsCompression(value)
|
|
357
|
-
except ValueError:
|
|
358
|
-
return value
|
|
359
|
-
return value
|
|
360
|
-
|
|
361
|
-
@field_serializer("pq_on_backpressure")
|
|
362
|
-
def serialize_pq_on_backpressure(self, value):
|
|
363
|
-
if isinstance(value, str):
|
|
364
|
-
try:
|
|
365
|
-
return models.OutputSnsQueueFullBehavior(value)
|
|
366
|
-
except ValueError:
|
|
367
|
-
return value
|
|
368
|
-
return value
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,9 +18,7 @@ class OutputSplunkType(str, Enum):
|
|
|
19
18
|
class OutputSplunkNestedFieldSerialization(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""How to serialize nested fields into index-time fields"""
|
|
21
20
|
|
|
22
|
-
# JSON
|
|
23
21
|
JSON = "json"
|
|
24
|
-
# None
|
|
25
22
|
NONE = "none"
|
|
26
23
|
|
|
27
24
|
|
|
@@ -107,24 +104,6 @@ class OutputSplunkTLSSettingsClientSide(BaseModel):
|
|
|
107
104
|
pydantic.Field(alias="maxVersion"),
|
|
108
105
|
] = None
|
|
109
106
|
|
|
110
|
-
@field_serializer("min_version")
|
|
111
|
-
def serialize_min_version(self, value):
|
|
112
|
-
if isinstance(value, str):
|
|
113
|
-
try:
|
|
114
|
-
return models.OutputSplunkMinimumTLSVersion(value)
|
|
115
|
-
except ValueError:
|
|
116
|
-
return value
|
|
117
|
-
return value
|
|
118
|
-
|
|
119
|
-
@field_serializer("max_version")
|
|
120
|
-
def serialize_max_version(self, value):
|
|
121
|
-
if isinstance(value, str):
|
|
122
|
-
try:
|
|
123
|
-
return models.OutputSplunkMaximumTLSVersion(value)
|
|
124
|
-
except ValueError:
|
|
125
|
-
return value
|
|
126
|
-
return value
|
|
127
|
-
|
|
128
107
|
|
|
129
108
|
class OutputSplunkMaxS2SVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
130
109
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
@@ -136,11 +115,8 @@ class OutputSplunkMaxS2SVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
136
115
|
class OutputSplunkBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
137
116
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
138
117
|
|
|
139
|
-
# Block
|
|
140
118
|
BLOCK = "block"
|
|
141
|
-
# Drop
|
|
142
119
|
DROP = "drop"
|
|
143
|
-
# Persistent Queue
|
|
144
120
|
QUEUE = "queue"
|
|
145
121
|
|
|
146
122
|
|
|
@@ -154,43 +130,33 @@ class OutputSplunkAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
154
130
|
class OutputSplunkCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
155
131
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
156
132
|
|
|
157
|
-
# Disabled
|
|
158
133
|
DISABLED = "disabled"
|
|
159
|
-
# Automatic
|
|
160
134
|
AUTO = "auto"
|
|
161
|
-
# Always
|
|
162
135
|
ALWAYS = "always"
|
|
163
136
|
|
|
164
137
|
|
|
165
|
-
class OutputSplunkMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
166
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
167
|
-
|
|
168
|
-
# Error
|
|
169
|
-
ERROR = "error"
|
|
170
|
-
# Backpressure
|
|
171
|
-
ALWAYS = "always"
|
|
172
|
-
# Always On
|
|
173
|
-
BACKPRESSURE = "backpressure"
|
|
174
|
-
|
|
175
|
-
|
|
176
138
|
class OutputSplunkPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
177
139
|
r"""Codec to use to compress the persisted data"""
|
|
178
140
|
|
|
179
|
-
# None
|
|
180
141
|
NONE = "none"
|
|
181
|
-
# Gzip
|
|
182
142
|
GZIP = "gzip"
|
|
183
143
|
|
|
184
144
|
|
|
185
145
|
class OutputSplunkQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
186
146
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
187
147
|
|
|
188
|
-
# Block
|
|
189
148
|
BLOCK = "block"
|
|
190
|
-
# Drop new data
|
|
191
149
|
DROP = "drop"
|
|
192
150
|
|
|
193
151
|
|
|
152
|
+
class OutputSplunkMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
153
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
154
|
+
|
|
155
|
+
ERROR = "error"
|
|
156
|
+
BACKPRESSURE = "backpressure"
|
|
157
|
+
ALWAYS = "always"
|
|
158
|
+
|
|
159
|
+
|
|
194
160
|
class OutputSplunkPqControlsTypedDict(TypedDict):
|
|
195
161
|
pass
|
|
196
162
|
|
|
@@ -241,16 +207,6 @@ class OutputSplunkTypedDict(TypedDict):
|
|
|
241
207
|
r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
|
|
242
208
|
compress: NotRequired[OutputSplunkCompressCompression]
|
|
243
209
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
244
|
-
pq_strict_ordering: NotRequired[bool]
|
|
245
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
246
|
-
pq_rate_per_sec: NotRequired[float]
|
|
247
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
248
|
-
pq_mode: NotRequired[OutputSplunkMode]
|
|
249
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
250
|
-
pq_max_buffer_size: NotRequired[float]
|
|
251
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
252
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
253
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
254
210
|
pq_max_file_size: NotRequired[str]
|
|
255
211
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
256
212
|
pq_max_size: NotRequired[str]
|
|
@@ -261,6 +217,8 @@ class OutputSplunkTypedDict(TypedDict):
|
|
|
261
217
|
r"""Codec to use to compress the persisted data"""
|
|
262
218
|
pq_on_backpressure: NotRequired[OutputSplunkQueueFullBehavior]
|
|
263
219
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
220
|
+
pq_mode: NotRequired[OutputSplunkMode]
|
|
221
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
264
222
|
pq_controls: NotRequired[OutputSplunkPqControlsTypedDict]
|
|
265
223
|
auth_token: NotRequired[str]
|
|
266
224
|
r"""Shared secret token to use when establishing a connection to a Splunk indexer."""
|
|
@@ -373,34 +331,6 @@ class OutputSplunk(BaseModel):
|
|
|
373
331
|
] = OutputSplunkCompressCompression.DISABLED
|
|
374
332
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
375
333
|
|
|
376
|
-
pq_strict_ordering: Annotated[
|
|
377
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
378
|
-
] = True
|
|
379
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
380
|
-
|
|
381
|
-
pq_rate_per_sec: Annotated[
|
|
382
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
383
|
-
] = 0
|
|
384
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
385
|
-
|
|
386
|
-
pq_mode: Annotated[
|
|
387
|
-
Annotated[
|
|
388
|
-
Optional[OutputSplunkMode], PlainValidator(validate_open_enum(False))
|
|
389
|
-
],
|
|
390
|
-
pydantic.Field(alias="pqMode"),
|
|
391
|
-
] = OutputSplunkMode.ERROR
|
|
392
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
393
|
-
|
|
394
|
-
pq_max_buffer_size: Annotated[
|
|
395
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
396
|
-
] = 42
|
|
397
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
398
|
-
|
|
399
|
-
pq_max_backpressure_sec: Annotated[
|
|
400
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
401
|
-
] = 30
|
|
402
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
403
|
-
|
|
404
334
|
pq_max_file_size: Annotated[
|
|
405
335
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
406
336
|
] = "1 MB"
|
|
@@ -432,6 +362,14 @@ class OutputSplunk(BaseModel):
|
|
|
432
362
|
] = OutputSplunkQueueFullBehavior.BLOCK
|
|
433
363
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
434
364
|
|
|
365
|
+
pq_mode: Annotated[
|
|
366
|
+
Annotated[
|
|
367
|
+
Optional[OutputSplunkMode], PlainValidator(validate_open_enum(False))
|
|
368
|
+
],
|
|
369
|
+
pydantic.Field(alias="pqMode"),
|
|
370
|
+
] = OutputSplunkMode.ERROR
|
|
371
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
372
|
+
|
|
435
373
|
pq_controls: Annotated[
|
|
436
374
|
Optional[OutputSplunkPqControls], pydantic.Field(alias="pqControls")
|
|
437
375
|
] = None
|
|
@@ -441,75 +379,3 @@ class OutputSplunk(BaseModel):
|
|
|
441
379
|
|
|
442
380
|
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
443
381
|
r"""Select or create a stored text secret"""
|
|
444
|
-
|
|
445
|
-
@field_serializer("nested_fields")
|
|
446
|
-
def serialize_nested_fields(self, value):
|
|
447
|
-
if isinstance(value, str):
|
|
448
|
-
try:
|
|
449
|
-
return models.OutputSplunkNestedFieldSerialization(value)
|
|
450
|
-
except ValueError:
|
|
451
|
-
return value
|
|
452
|
-
return value
|
|
453
|
-
|
|
454
|
-
@field_serializer("max_s2_sversion")
|
|
455
|
-
def serialize_max_s2_sversion(self, value):
|
|
456
|
-
if isinstance(value, str):
|
|
457
|
-
try:
|
|
458
|
-
return models.OutputSplunkMaxS2SVersion(value)
|
|
459
|
-
except ValueError:
|
|
460
|
-
return value
|
|
461
|
-
return value
|
|
462
|
-
|
|
463
|
-
@field_serializer("on_backpressure")
|
|
464
|
-
def serialize_on_backpressure(self, value):
|
|
465
|
-
if isinstance(value, str):
|
|
466
|
-
try:
|
|
467
|
-
return models.OutputSplunkBackpressureBehavior(value)
|
|
468
|
-
except ValueError:
|
|
469
|
-
return value
|
|
470
|
-
return value
|
|
471
|
-
|
|
472
|
-
@field_serializer("auth_type")
|
|
473
|
-
def serialize_auth_type(self, value):
|
|
474
|
-
if isinstance(value, str):
|
|
475
|
-
try:
|
|
476
|
-
return models.OutputSplunkAuthenticationMethod(value)
|
|
477
|
-
except ValueError:
|
|
478
|
-
return value
|
|
479
|
-
return value
|
|
480
|
-
|
|
481
|
-
@field_serializer("compress")
|
|
482
|
-
def serialize_compress(self, value):
|
|
483
|
-
if isinstance(value, str):
|
|
484
|
-
try:
|
|
485
|
-
return models.OutputSplunkCompressCompression(value)
|
|
486
|
-
except ValueError:
|
|
487
|
-
return value
|
|
488
|
-
return value
|
|
489
|
-
|
|
490
|
-
@field_serializer("pq_mode")
|
|
491
|
-
def serialize_pq_mode(self, value):
|
|
492
|
-
if isinstance(value, str):
|
|
493
|
-
try:
|
|
494
|
-
return models.OutputSplunkMode(value)
|
|
495
|
-
except ValueError:
|
|
496
|
-
return value
|
|
497
|
-
return value
|
|
498
|
-
|
|
499
|
-
@field_serializer("pq_compress")
|
|
500
|
-
def serialize_pq_compress(self, value):
|
|
501
|
-
if isinstance(value, str):
|
|
502
|
-
try:
|
|
503
|
-
return models.OutputSplunkPqCompressCompression(value)
|
|
504
|
-
except ValueError:
|
|
505
|
-
return value
|
|
506
|
-
return value
|
|
507
|
-
|
|
508
|
-
@field_serializer("pq_on_backpressure")
|
|
509
|
-
def serialize_pq_on_backpressure(self, value):
|
|
510
|
-
if isinstance(value, str):
|
|
511
|
-
try:
|
|
512
|
-
return models.OutputSplunkQueueFullBehavior(value)
|
|
513
|
-
except ValueError:
|
|
514
|
-
return value
|
|
515
|
-
return value
|