cribl-control-plane 0.2.1rc5__py3-none-any.whl → 0.2.1rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/groups_sdk.py +0 -3
- cribl_control_plane/models/__init__.py +120 -150
- cribl_control_plane/models/cacheconnection.py +20 -0
- cribl_control_plane/models/configgroup.py +20 -1
- cribl_control_plane/models/configgroupcloud.py +11 -1
- cribl_control_plane/models/createconfiggroupbyproductop.py +11 -0
- cribl_control_plane/models/cribllakedataset.py +11 -1
- cribl_control_plane/models/cribllakedatasetupdate.py +11 -1
- cribl_control_plane/models/datasetmetadata.py +11 -1
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
- cribl_control_plane/models/getsummaryop.py +11 -0
- cribl_control_plane/models/groupcreaterequest.py +20 -1
- cribl_control_plane/models/hbcriblinfo.py +11 -1
- cribl_control_plane/models/healthserverstatus.py +20 -1
- cribl_control_plane/models/input.py +15 -15
- cribl_control_plane/models/inputappscope.py +76 -17
- cribl_control_plane/models/inputazureblob.py +29 -1
- cribl_control_plane/models/inputcollection.py +20 -1
- cribl_control_plane/models/inputconfluentcloud.py +188 -1
- cribl_control_plane/models/inputcribl.py +20 -1
- cribl_control_plane/models/inputcriblhttp.py +58 -17
- cribl_control_plane/models/inputcribllakehttp.py +58 -17
- cribl_control_plane/models/inputcriblmetrics.py +20 -1
- cribl_control_plane/models/inputcribltcp.py +58 -17
- cribl_control_plane/models/inputcrowdstrike.py +47 -1
- cribl_control_plane/models/inputdatadogagent.py +58 -17
- cribl_control_plane/models/inputdatagen.py +20 -1
- cribl_control_plane/models/inputedgeprometheus.py +138 -37
- cribl_control_plane/models/inputelastic.py +108 -27
- cribl_control_plane/models/inputeventhub.py +176 -1
- cribl_control_plane/models/inputexec.py +29 -1
- cribl_control_plane/models/inputfile.py +36 -3
- cribl_control_plane/models/inputfirehose.py +58 -17
- cribl_control_plane/models/inputgooglepubsub.py +29 -1
- cribl_control_plane/models/inputgrafana.py +149 -32
- cribl_control_plane/models/inputhttp.py +58 -17
- cribl_control_plane/models/inputhttpraw.py +58 -17
- cribl_control_plane/models/inputjournalfiles.py +20 -1
- cribl_control_plane/models/inputkafka.py +182 -1
- cribl_control_plane/models/inputkinesis.py +65 -1
- cribl_control_plane/models/inputkubeevents.py +20 -1
- cribl_control_plane/models/inputkubelogs.py +29 -1
- cribl_control_plane/models/inputkubemetrics.py +29 -1
- cribl_control_plane/models/inputloki.py +67 -17
- cribl_control_plane/models/inputmetrics.py +58 -17
- cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
- cribl_control_plane/models/inputmsk.py +74 -1
- cribl_control_plane/models/inputnetflow.py +20 -1
- cribl_control_plane/models/inputoffice365mgmt.py +56 -1
- cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
- cribl_control_plane/models/inputoffice365service.py +56 -1
- cribl_control_plane/models/inputopentelemetry.py +84 -16
- cribl_control_plane/models/inputprometheus.py +131 -37
- cribl_control_plane/models/inputprometheusrw.py +67 -17
- cribl_control_plane/models/inputrawudp.py +20 -1
- cribl_control_plane/models/inputs3.py +38 -1
- cribl_control_plane/models/inputs3inventory.py +47 -1
- cribl_control_plane/models/inputsecuritylake.py +47 -1
- cribl_control_plane/models/inputsnmp.py +29 -1
- cribl_control_plane/models/inputsplunk.py +76 -17
- cribl_control_plane/models/inputsplunkhec.py +66 -16
- cribl_control_plane/models/inputsplunksearch.py +56 -1
- cribl_control_plane/models/inputsqs.py +47 -1
- cribl_control_plane/models/inputsyslog.py +113 -32
- cribl_control_plane/models/inputsystemmetrics.py +110 -9
- cribl_control_plane/models/inputsystemstate.py +29 -1
- cribl_control_plane/models/inputtcp.py +77 -17
- cribl_control_plane/models/inputtcpjson.py +67 -17
- cribl_control_plane/models/inputwef.py +65 -1
- cribl_control_plane/models/inputwindowsmetrics.py +101 -9
- cribl_control_plane/models/inputwineventlogs.py +52 -1
- cribl_control_plane/models/inputwiz.py +38 -1
- cribl_control_plane/models/inputwizwebhook.py +58 -17
- cribl_control_plane/models/inputzscalerhec.py +66 -16
- cribl_control_plane/models/jobstatus.py +34 -3
- cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
- cribl_control_plane/models/masterworkerentry.py +11 -1
- cribl_control_plane/models/nodeupgradestatus.py +38 -0
- cribl_control_plane/models/output.py +21 -21
- cribl_control_plane/models/outputazureblob.py +90 -1
- cribl_control_plane/models/outputazuredataexplorer.py +430 -93
- cribl_control_plane/models/outputazureeventhub.py +267 -22
- cribl_control_plane/models/outputazurelogs.py +105 -22
- cribl_control_plane/models/outputchronicle.py +105 -22
- cribl_control_plane/models/outputclickhouse.py +141 -22
- cribl_control_plane/models/outputcloudwatch.py +96 -22
- cribl_control_plane/models/outputconfluentcloud.py +290 -23
- cribl_control_plane/models/outputcriblhttp.py +123 -22
- cribl_control_plane/models/outputcribllake.py +76 -1
- cribl_control_plane/models/outputcribltcp.py +123 -22
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
- cribl_control_plane/models/outputdatabricks.py +72 -1
- cribl_control_plane/models/outputdatadog.py +132 -22
- cribl_control_plane/models/outputdataset.py +123 -22
- cribl_control_plane/models/outputdiskspool.py +11 -1
- cribl_control_plane/models/outputdls3.py +117 -1
- cribl_control_plane/models/outputdynatracehttp.py +141 -22
- cribl_control_plane/models/outputdynatraceotlp.py +141 -22
- cribl_control_plane/models/outputelastic.py +148 -22
- cribl_control_plane/models/outputelasticcloud.py +130 -22
- cribl_control_plane/models/outputexabeam.py +47 -1
- cribl_control_plane/models/outputfilesystem.py +72 -1
- cribl_control_plane/models/outputgooglechronicle.py +148 -23
- cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
- cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
- cribl_control_plane/models/outputgooglepubsub.py +96 -22
- cribl_control_plane/models/outputgrafanacloud.py +244 -43
- cribl_control_plane/models/outputgraphite.py +96 -22
- cribl_control_plane/models/outputhoneycomb.py +105 -22
- cribl_control_plane/models/outputhumiohec.py +114 -22
- cribl_control_plane/models/outputinfluxdb.py +114 -22
- cribl_control_plane/models/outputkafka.py +281 -20
- cribl_control_plane/models/outputkinesis.py +121 -22
- cribl_control_plane/models/outputloki.py +112 -20
- cribl_control_plane/models/outputminio.py +117 -1
- cribl_control_plane/models/outputmsk.py +173 -20
- cribl_control_plane/models/outputnewrelic.py +123 -22
- cribl_control_plane/models/outputnewrelicevents.py +115 -23
- cribl_control_plane/models/outputopentelemetry.py +159 -22
- cribl_control_plane/models/outputprometheus.py +105 -22
- cribl_control_plane/models/outputring.py +29 -1
- cribl_control_plane/models/outputs3.py +117 -1
- cribl_control_plane/models/outputsecuritylake.py +85 -1
- cribl_control_plane/models/outputsentinel.py +123 -22
- cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
- cribl_control_plane/models/outputservicenow.py +150 -22
- cribl_control_plane/models/outputsignalfx.py +105 -22
- cribl_control_plane/models/outputsns.py +103 -20
- cribl_control_plane/models/outputsplunk.py +141 -22
- cribl_control_plane/models/outputsplunkhec.py +198 -22
- cribl_control_plane/models/outputsplunklb.py +170 -22
- cribl_control_plane/models/outputsqs.py +112 -20
- cribl_control_plane/models/outputstatsd.py +96 -22
- cribl_control_plane/models/outputstatsdext.py +96 -22
- cribl_control_plane/models/outputsumologic.py +105 -22
- cribl_control_plane/models/outputsyslog.py +238 -99
- cribl_control_plane/models/outputtcpjson.py +132 -22
- cribl_control_plane/models/outputwavefront.py +105 -22
- cribl_control_plane/models/outputwebhook.py +141 -22
- cribl_control_plane/models/outputxsiam.py +103 -20
- cribl_control_plane/models/pipeline.py +4 -4
- cribl_control_plane/models/resourcepolicy.py +11 -0
- cribl_control_plane/models/runnablejobcollection.py +68 -9
- cribl_control_plane/models/runnablejobexecutor.py +32 -9
- cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
- cribl_control_plane/pipelines.py +8 -8
- {cribl_control_plane-0.2.1rc5.dist-info → cribl_control_plane-0.2.1rc7.dist-info}/METADATA +2 -11
- {cribl_control_plane-0.2.1rc5.dist-info → cribl_control_plane-0.2.1rc7.dist-info}/RECORD +156 -167
- cribl_control_plane/mappings.py +0 -1185
- cribl_control_plane/models/createadminproductsmappingsactivatebyproductop.py +0 -52
- cribl_control_plane/models/createadminproductsmappingsbyproductop.py +0 -53
- cribl_control_plane/models/deleteadminproductsmappingsbyproductandidop.py +0 -51
- cribl_control_plane/models/getadminproductsmappingsbyproductandidop.py +0 -51
- cribl_control_plane/models/getadminproductsmappingsbyproductop.py +0 -44
- cribl_control_plane/models/mappingruleset.py +0 -53
- cribl_control_plane/models/mappingrulesetevalmappingfunction.py +0 -71
- cribl_control_plane/models/mappingrulesetgenericmappingfunction.py +0 -29
- cribl_control_plane/models/rulesetid.py +0 -13
- cribl_control_plane/models/updateadminproductsmappingsbyproductandidop.py +0 -63
- {cribl_control_plane-0.2.1rc5.dist-info → cribl_control_plane-0.2.1rc7.dist-info}/WHEEL +0 -0
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
4
|
+
from cribl_control_plane import models, utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
+
from pydantic import field_serializer
|
|
9
10
|
from pydantic.functional_validators import PlainValidator
|
|
10
11
|
from typing import List, Optional
|
|
11
12
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -44,6 +45,17 @@ class OutputSnsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
44
45
|
QUEUE = "queue"
|
|
45
46
|
|
|
46
47
|
|
|
48
|
+
class OutputSnsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
49
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
50
|
+
|
|
51
|
+
# Error
|
|
52
|
+
ERROR = "error"
|
|
53
|
+
# Backpressure
|
|
54
|
+
ALWAYS = "always"
|
|
55
|
+
# Always On
|
|
56
|
+
BACKPRESSURE = "backpressure"
|
|
57
|
+
|
|
58
|
+
|
|
47
59
|
class OutputSnsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
48
60
|
r"""Codec to use to compress the persisted data"""
|
|
49
61
|
|
|
@@ -62,17 +74,6 @@ class OutputSnsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
62
74
|
DROP = "drop"
|
|
63
75
|
|
|
64
76
|
|
|
65
|
-
class OutputSnsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
66
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
67
|
-
|
|
68
|
-
# Error
|
|
69
|
-
ERROR = "error"
|
|
70
|
-
# Backpressure
|
|
71
|
-
BACKPRESSURE = "backpressure"
|
|
72
|
-
# Always On
|
|
73
|
-
ALWAYS = "always"
|
|
74
|
-
|
|
75
|
-
|
|
76
77
|
class OutputSnsPqControlsTypedDict(TypedDict):
|
|
77
78
|
pass
|
|
78
79
|
|
|
@@ -126,6 +127,16 @@ class OutputSnsTypedDict(TypedDict):
|
|
|
126
127
|
aws_api_key: NotRequired[str]
|
|
127
128
|
aws_secret: NotRequired[str]
|
|
128
129
|
r"""Select or create a stored secret that references your access key and secret key"""
|
|
130
|
+
pq_strict_ordering: NotRequired[bool]
|
|
131
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
132
|
+
pq_rate_per_sec: NotRequired[float]
|
|
133
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
134
|
+
pq_mode: NotRequired[OutputSnsMode]
|
|
135
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
136
|
+
pq_max_buffer_size: NotRequired[float]
|
|
137
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
138
|
+
pq_max_backpressure_sec: NotRequired[float]
|
|
139
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
129
140
|
pq_max_file_size: NotRequired[str]
|
|
130
141
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
131
142
|
pq_max_size: NotRequired[str]
|
|
@@ -136,8 +147,6 @@ class OutputSnsTypedDict(TypedDict):
|
|
|
136
147
|
r"""Codec to use to compress the persisted data"""
|
|
137
148
|
pq_on_backpressure: NotRequired[OutputSnsQueueFullBehavior]
|
|
138
149
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
139
|
-
pq_mode: NotRequired[OutputSnsMode]
|
|
140
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
141
150
|
pq_controls: NotRequired[OutputSnsPqControlsTypedDict]
|
|
142
151
|
|
|
143
152
|
|
|
@@ -244,6 +253,32 @@ class OutputSns(BaseModel):
|
|
|
244
253
|
aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
|
|
245
254
|
r"""Select or create a stored secret that references your access key and secret key"""
|
|
246
255
|
|
|
256
|
+
pq_strict_ordering: Annotated[
|
|
257
|
+
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
258
|
+
] = True
|
|
259
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
260
|
+
|
|
261
|
+
pq_rate_per_sec: Annotated[
|
|
262
|
+
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
263
|
+
] = 0
|
|
264
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
265
|
+
|
|
266
|
+
pq_mode: Annotated[
|
|
267
|
+
Annotated[Optional[OutputSnsMode], PlainValidator(validate_open_enum(False))],
|
|
268
|
+
pydantic.Field(alias="pqMode"),
|
|
269
|
+
] = OutputSnsMode.ERROR
|
|
270
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
271
|
+
|
|
272
|
+
pq_max_buffer_size: Annotated[
|
|
273
|
+
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
274
|
+
] = 42
|
|
275
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
276
|
+
|
|
277
|
+
pq_max_backpressure_sec: Annotated[
|
|
278
|
+
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
279
|
+
] = 30
|
|
280
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
281
|
+
|
|
247
282
|
pq_max_file_size: Annotated[
|
|
248
283
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
249
284
|
] = "1 MB"
|
|
@@ -274,12 +309,60 @@ class OutputSns(BaseModel):
|
|
|
274
309
|
] = OutputSnsQueueFullBehavior.BLOCK
|
|
275
310
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
276
311
|
|
|
277
|
-
pq_mode: Annotated[
|
|
278
|
-
Annotated[Optional[OutputSnsMode], PlainValidator(validate_open_enum(False))],
|
|
279
|
-
pydantic.Field(alias="pqMode"),
|
|
280
|
-
] = OutputSnsMode.ERROR
|
|
281
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
282
|
-
|
|
283
312
|
pq_controls: Annotated[
|
|
284
313
|
Optional[OutputSnsPqControls], pydantic.Field(alias="pqControls")
|
|
285
314
|
] = None
|
|
315
|
+
|
|
316
|
+
@field_serializer("aws_authentication_method")
|
|
317
|
+
def serialize_aws_authentication_method(self, value):
|
|
318
|
+
if isinstance(value, str):
|
|
319
|
+
try:
|
|
320
|
+
return models.OutputSnsAuthenticationMethod(value)
|
|
321
|
+
except ValueError:
|
|
322
|
+
return value
|
|
323
|
+
return value
|
|
324
|
+
|
|
325
|
+
@field_serializer("signature_version")
|
|
326
|
+
def serialize_signature_version(self, value):
|
|
327
|
+
if isinstance(value, str):
|
|
328
|
+
try:
|
|
329
|
+
return models.OutputSnsSignatureVersion(value)
|
|
330
|
+
except ValueError:
|
|
331
|
+
return value
|
|
332
|
+
return value
|
|
333
|
+
|
|
334
|
+
@field_serializer("on_backpressure")
|
|
335
|
+
def serialize_on_backpressure(self, value):
|
|
336
|
+
if isinstance(value, str):
|
|
337
|
+
try:
|
|
338
|
+
return models.OutputSnsBackpressureBehavior(value)
|
|
339
|
+
except ValueError:
|
|
340
|
+
return value
|
|
341
|
+
return value
|
|
342
|
+
|
|
343
|
+
@field_serializer("pq_mode")
|
|
344
|
+
def serialize_pq_mode(self, value):
|
|
345
|
+
if isinstance(value, str):
|
|
346
|
+
try:
|
|
347
|
+
return models.OutputSnsMode(value)
|
|
348
|
+
except ValueError:
|
|
349
|
+
return value
|
|
350
|
+
return value
|
|
351
|
+
|
|
352
|
+
@field_serializer("pq_compress")
|
|
353
|
+
def serialize_pq_compress(self, value):
|
|
354
|
+
if isinstance(value, str):
|
|
355
|
+
try:
|
|
356
|
+
return models.OutputSnsCompression(value)
|
|
357
|
+
except ValueError:
|
|
358
|
+
return value
|
|
359
|
+
return value
|
|
360
|
+
|
|
361
|
+
@field_serializer("pq_on_backpressure")
|
|
362
|
+
def serialize_pq_on_backpressure(self, value):
|
|
363
|
+
if isinstance(value, str):
|
|
364
|
+
try:
|
|
365
|
+
return models.OutputSnsQueueFullBehavior(value)
|
|
366
|
+
except ValueError:
|
|
367
|
+
return value
|
|
368
|
+
return value
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
4
|
+
from cribl_control_plane import models, utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
+
from pydantic import field_serializer
|
|
9
10
|
from pydantic.functional_validators import PlainValidator
|
|
10
11
|
from typing import List, Optional
|
|
11
12
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -106,6 +107,24 @@ class OutputSplunkTLSSettingsClientSide(BaseModel):
|
|
|
106
107
|
pydantic.Field(alias="maxVersion"),
|
|
107
108
|
] = None
|
|
108
109
|
|
|
110
|
+
@field_serializer("min_version")
|
|
111
|
+
def serialize_min_version(self, value):
|
|
112
|
+
if isinstance(value, str):
|
|
113
|
+
try:
|
|
114
|
+
return models.OutputSplunkMinimumTLSVersion(value)
|
|
115
|
+
except ValueError:
|
|
116
|
+
return value
|
|
117
|
+
return value
|
|
118
|
+
|
|
119
|
+
@field_serializer("max_version")
|
|
120
|
+
def serialize_max_version(self, value):
|
|
121
|
+
if isinstance(value, str):
|
|
122
|
+
try:
|
|
123
|
+
return models.OutputSplunkMaximumTLSVersion(value)
|
|
124
|
+
except ValueError:
|
|
125
|
+
return value
|
|
126
|
+
return value
|
|
127
|
+
|
|
109
128
|
|
|
110
129
|
class OutputSplunkMaxS2SVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
111
130
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
@@ -143,6 +162,17 @@ class OutputSplunkCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
143
162
|
ALWAYS = "always"
|
|
144
163
|
|
|
145
164
|
|
|
165
|
+
class OutputSplunkMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
166
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
167
|
+
|
|
168
|
+
# Error
|
|
169
|
+
ERROR = "error"
|
|
170
|
+
# Backpressure
|
|
171
|
+
ALWAYS = "always"
|
|
172
|
+
# Always On
|
|
173
|
+
BACKPRESSURE = "backpressure"
|
|
174
|
+
|
|
175
|
+
|
|
146
176
|
class OutputSplunkPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
147
177
|
r"""Codec to use to compress the persisted data"""
|
|
148
178
|
|
|
@@ -161,17 +191,6 @@ class OutputSplunkQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
161
191
|
DROP = "drop"
|
|
162
192
|
|
|
163
193
|
|
|
164
|
-
class OutputSplunkMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
165
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
166
|
-
|
|
167
|
-
# Error
|
|
168
|
-
ERROR = "error"
|
|
169
|
-
# Backpressure
|
|
170
|
-
BACKPRESSURE = "backpressure"
|
|
171
|
-
# Always On
|
|
172
|
-
ALWAYS = "always"
|
|
173
|
-
|
|
174
|
-
|
|
175
194
|
class OutputSplunkPqControlsTypedDict(TypedDict):
|
|
176
195
|
pass
|
|
177
196
|
|
|
@@ -222,6 +241,16 @@ class OutputSplunkTypedDict(TypedDict):
|
|
|
222
241
|
r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
|
|
223
242
|
compress: NotRequired[OutputSplunkCompressCompression]
|
|
224
243
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
244
|
+
pq_strict_ordering: NotRequired[bool]
|
|
245
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
246
|
+
pq_rate_per_sec: NotRequired[float]
|
|
247
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
248
|
+
pq_mode: NotRequired[OutputSplunkMode]
|
|
249
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
250
|
+
pq_max_buffer_size: NotRequired[float]
|
|
251
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
252
|
+
pq_max_backpressure_sec: NotRequired[float]
|
|
253
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
225
254
|
pq_max_file_size: NotRequired[str]
|
|
226
255
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
227
256
|
pq_max_size: NotRequired[str]
|
|
@@ -232,8 +261,6 @@ class OutputSplunkTypedDict(TypedDict):
|
|
|
232
261
|
r"""Codec to use to compress the persisted data"""
|
|
233
262
|
pq_on_backpressure: NotRequired[OutputSplunkQueueFullBehavior]
|
|
234
263
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
235
|
-
pq_mode: NotRequired[OutputSplunkMode]
|
|
236
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
237
264
|
pq_controls: NotRequired[OutputSplunkPqControlsTypedDict]
|
|
238
265
|
auth_token: NotRequired[str]
|
|
239
266
|
r"""Shared secret token to use when establishing a connection to a Splunk indexer."""
|
|
@@ -346,6 +373,34 @@ class OutputSplunk(BaseModel):
|
|
|
346
373
|
] = OutputSplunkCompressCompression.DISABLED
|
|
347
374
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
348
375
|
|
|
376
|
+
pq_strict_ordering: Annotated[
|
|
377
|
+
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
378
|
+
] = True
|
|
379
|
+
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
380
|
+
|
|
381
|
+
pq_rate_per_sec: Annotated[
|
|
382
|
+
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
383
|
+
] = 0
|
|
384
|
+
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
385
|
+
|
|
386
|
+
pq_mode: Annotated[
|
|
387
|
+
Annotated[
|
|
388
|
+
Optional[OutputSplunkMode], PlainValidator(validate_open_enum(False))
|
|
389
|
+
],
|
|
390
|
+
pydantic.Field(alias="pqMode"),
|
|
391
|
+
] = OutputSplunkMode.ERROR
|
|
392
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
393
|
+
|
|
394
|
+
pq_max_buffer_size: Annotated[
|
|
395
|
+
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
396
|
+
] = 42
|
|
397
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
398
|
+
|
|
399
|
+
pq_max_backpressure_sec: Annotated[
|
|
400
|
+
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
401
|
+
] = 30
|
|
402
|
+
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
403
|
+
|
|
349
404
|
pq_max_file_size: Annotated[
|
|
350
405
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
351
406
|
] = "1 MB"
|
|
@@ -377,14 +432,6 @@ class OutputSplunk(BaseModel):
|
|
|
377
432
|
] = OutputSplunkQueueFullBehavior.BLOCK
|
|
378
433
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
379
434
|
|
|
380
|
-
pq_mode: Annotated[
|
|
381
|
-
Annotated[
|
|
382
|
-
Optional[OutputSplunkMode], PlainValidator(validate_open_enum(False))
|
|
383
|
-
],
|
|
384
|
-
pydantic.Field(alias="pqMode"),
|
|
385
|
-
] = OutputSplunkMode.ERROR
|
|
386
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
387
|
-
|
|
388
435
|
pq_controls: Annotated[
|
|
389
436
|
Optional[OutputSplunkPqControls], pydantic.Field(alias="pqControls")
|
|
390
437
|
] = None
|
|
@@ -394,3 +441,75 @@ class OutputSplunk(BaseModel):
|
|
|
394
441
|
|
|
395
442
|
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
396
443
|
r"""Select or create a stored text secret"""
|
|
444
|
+
|
|
445
|
+
@field_serializer("nested_fields")
|
|
446
|
+
def serialize_nested_fields(self, value):
|
|
447
|
+
if isinstance(value, str):
|
|
448
|
+
try:
|
|
449
|
+
return models.OutputSplunkNestedFieldSerialization(value)
|
|
450
|
+
except ValueError:
|
|
451
|
+
return value
|
|
452
|
+
return value
|
|
453
|
+
|
|
454
|
+
@field_serializer("max_s2_sversion")
|
|
455
|
+
def serialize_max_s2_sversion(self, value):
|
|
456
|
+
if isinstance(value, str):
|
|
457
|
+
try:
|
|
458
|
+
return models.OutputSplunkMaxS2SVersion(value)
|
|
459
|
+
except ValueError:
|
|
460
|
+
return value
|
|
461
|
+
return value
|
|
462
|
+
|
|
463
|
+
@field_serializer("on_backpressure")
|
|
464
|
+
def serialize_on_backpressure(self, value):
|
|
465
|
+
if isinstance(value, str):
|
|
466
|
+
try:
|
|
467
|
+
return models.OutputSplunkBackpressureBehavior(value)
|
|
468
|
+
except ValueError:
|
|
469
|
+
return value
|
|
470
|
+
return value
|
|
471
|
+
|
|
472
|
+
@field_serializer("auth_type")
|
|
473
|
+
def serialize_auth_type(self, value):
|
|
474
|
+
if isinstance(value, str):
|
|
475
|
+
try:
|
|
476
|
+
return models.OutputSplunkAuthenticationMethod(value)
|
|
477
|
+
except ValueError:
|
|
478
|
+
return value
|
|
479
|
+
return value
|
|
480
|
+
|
|
481
|
+
@field_serializer("compress")
|
|
482
|
+
def serialize_compress(self, value):
|
|
483
|
+
if isinstance(value, str):
|
|
484
|
+
try:
|
|
485
|
+
return models.OutputSplunkCompressCompression(value)
|
|
486
|
+
except ValueError:
|
|
487
|
+
return value
|
|
488
|
+
return value
|
|
489
|
+
|
|
490
|
+
@field_serializer("pq_mode")
|
|
491
|
+
def serialize_pq_mode(self, value):
|
|
492
|
+
if isinstance(value, str):
|
|
493
|
+
try:
|
|
494
|
+
return models.OutputSplunkMode(value)
|
|
495
|
+
except ValueError:
|
|
496
|
+
return value
|
|
497
|
+
return value
|
|
498
|
+
|
|
499
|
+
@field_serializer("pq_compress")
|
|
500
|
+
def serialize_pq_compress(self, value):
|
|
501
|
+
if isinstance(value, str):
|
|
502
|
+
try:
|
|
503
|
+
return models.OutputSplunkPqCompressCompression(value)
|
|
504
|
+
except ValueError:
|
|
505
|
+
return value
|
|
506
|
+
return value
|
|
507
|
+
|
|
508
|
+
@field_serializer("pq_on_backpressure")
|
|
509
|
+
def serialize_pq_on_backpressure(self, value):
|
|
510
|
+
if isinstance(value, str):
|
|
511
|
+
try:
|
|
512
|
+
return models.OutputSplunkQueueFullBehavior(value)
|
|
513
|
+
except ValueError:
|
|
514
|
+
return value
|
|
515
|
+
return value
|