cribl-control-plane 0.0.44a2__py3-none-any.whl → 0.0.46__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +5 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +3 -3
- cribl_control_plane/models/appmode.py +1 -2
- cribl_control_plane/models/cacheconnection.py +2 -10
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
- cribl_control_plane/models/cloudprovider.py +1 -2
- cribl_control_plane/models/configgroup.py +2 -7
- cribl_control_plane/models/configgroupcloud.py +2 -6
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
- cribl_control_plane/models/cribllakedataset.py +2 -8
- cribl_control_plane/models/datasetmetadata.py +2 -8
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
- cribl_control_plane/models/getsummaryop.py +2 -7
- cribl_control_plane/models/hbcriblinfo.py +6 -6
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +14 -34
- cribl_control_plane/models/inputazureblob.py +6 -17
- cribl_control_plane/models/inputcollection.py +4 -11
- cribl_control_plane/models/inputconfluentcloud.py +20 -47
- cribl_control_plane/models/inputcribl.py +4 -11
- cribl_control_plane/models/inputcriblhttp.py +8 -23
- cribl_control_plane/models/inputcribllakehttp.py +10 -22
- cribl_control_plane/models/inputcriblmetrics.py +4 -12
- cribl_control_plane/models/inputcribltcp.py +8 -23
- cribl_control_plane/models/inputcrowdstrike.py +10 -26
- cribl_control_plane/models/inputdatadogagent.py +8 -24
- cribl_control_plane/models/inputdatagen.py +4 -11
- cribl_control_plane/models/inputedgeprometheus.py +24 -58
- cribl_control_plane/models/inputelastic.py +14 -40
- cribl_control_plane/models/inputeventhub.py +6 -15
- cribl_control_plane/models/inputexec.py +6 -14
- cribl_control_plane/models/inputfile.py +6 -15
- cribl_control_plane/models/inputfirehose.py +8 -23
- cribl_control_plane/models/inputgooglepubsub.py +6 -19
- cribl_control_plane/models/inputgrafana.py +24 -67
- cribl_control_plane/models/inputhttp.py +8 -23
- cribl_control_plane/models/inputhttpraw.py +8 -23
- cribl_control_plane/models/inputjournalfiles.py +4 -12
- cribl_control_plane/models/inputkafka.py +16 -46
- cribl_control_plane/models/inputkinesis.py +14 -38
- cribl_control_plane/models/inputkubeevents.py +4 -11
- cribl_control_plane/models/inputkubelogs.py +8 -16
- cribl_control_plane/models/inputkubemetrics.py +8 -16
- cribl_control_plane/models/inputloki.py +10 -29
- cribl_control_plane/models/inputmetrics.py +8 -23
- cribl_control_plane/models/inputmodeldriventelemetry.py +10 -27
- cribl_control_plane/models/inputmsk.py +18 -53
- cribl_control_plane/models/inputnetflow.py +4 -11
- cribl_control_plane/models/inputoffice365mgmt.py +14 -33
- cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
- cribl_control_plane/models/inputoffice365service.py +16 -35
- cribl_control_plane/models/inputopentelemetry.py +16 -38
- cribl_control_plane/models/inputprometheus.py +18 -50
- cribl_control_plane/models/inputprometheusrw.py +10 -30
- cribl_control_plane/models/inputrawudp.py +4 -11
- cribl_control_plane/models/inputs3.py +8 -21
- cribl_control_plane/models/inputs3inventory.py +10 -26
- cribl_control_plane/models/inputsecuritylake.py +10 -27
- cribl_control_plane/models/inputsnmp.py +6 -16
- cribl_control_plane/models/inputsplunk.py +12 -33
- cribl_control_plane/models/inputsplunkhec.py +10 -29
- cribl_control_plane/models/inputsplunksearch.py +14 -33
- cribl_control_plane/models/inputsqs.py +10 -27
- cribl_control_plane/models/inputsyslog.py +16 -43
- cribl_control_plane/models/inputsystemmetrics.py +24 -48
- cribl_control_plane/models/inputsystemstate.py +8 -16
- cribl_control_plane/models/inputtcp.py +10 -29
- cribl_control_plane/models/inputtcpjson.py +10 -29
- cribl_control_plane/models/inputwef.py +14 -37
- cribl_control_plane/models/inputwindowsmetrics.py +24 -44
- cribl_control_plane/models/inputwineventlogs.py +10 -20
- cribl_control_plane/models/inputwiz.py +8 -21
- cribl_control_plane/models/inputwizwebhook.py +8 -23
- cribl_control_plane/models/inputzscalerhec.py +10 -29
- cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
- cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
- cribl_control_plane/models/masterworkerentry.py +2 -7
- cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
- cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeupgradestate.py +1 -2
- cribl_control_plane/models/nodeupgradestatus.py +5 -13
- cribl_control_plane/models/outputazureblob.py +18 -48
- cribl_control_plane/models/outputazuredataexplorer.py +28 -73
- cribl_control_plane/models/outputazureeventhub.py +18 -40
- cribl_control_plane/models/outputazurelogs.py +12 -35
- cribl_control_plane/models/outputclickhouse.py +20 -55
- cribl_control_plane/models/outputcloudwatch.py +10 -29
- cribl_control_plane/models/outputconfluentcloud.py +32 -77
- cribl_control_plane/models/outputcriblhttp.py +16 -44
- cribl_control_plane/models/outputcribllake.py +16 -46
- cribl_control_plane/models/outputcribltcp.py +18 -45
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
- cribl_control_plane/models/outputdatadog.py +20 -48
- cribl_control_plane/models/outputdataset.py +18 -46
- cribl_control_plane/models/outputdiskspool.py +2 -7
- cribl_control_plane/models/outputdls3.py +24 -68
- cribl_control_plane/models/outputdynatracehttp.py +20 -53
- cribl_control_plane/models/outputdynatraceotlp.py +22 -55
- cribl_control_plane/models/outputelastic.py +18 -43
- cribl_control_plane/models/outputelasticcloud.py +12 -36
- cribl_control_plane/models/outputexabeam.py +10 -29
- cribl_control_plane/models/outputfilesystem.py +14 -39
- cribl_control_plane/models/outputgooglechronicle.py +16 -50
- cribl_control_plane/models/outputgooglecloudlogging.py +14 -41
- cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
- cribl_control_plane/models/outputgooglepubsub.py +10 -31
- cribl_control_plane/models/outputgrafanacloud.py +32 -97
- cribl_control_plane/models/outputgraphite.py +14 -31
- cribl_control_plane/models/outputhoneycomb.py +12 -35
- cribl_control_plane/models/outputhumiohec.py +16 -43
- cribl_control_plane/models/outputinfluxdb.py +16 -42
- cribl_control_plane/models/outputkafka.py +28 -74
- cribl_control_plane/models/outputkinesis.py +16 -40
- cribl_control_plane/models/outputloki.py +16 -41
- cribl_control_plane/models/outputminio.py +24 -65
- cribl_control_plane/models/outputmsk.py +30 -82
- cribl_control_plane/models/outputnewrelic.py +18 -43
- cribl_control_plane/models/outputnewrelicevents.py +14 -41
- cribl_control_plane/models/outputopentelemetry.py +26 -67
- cribl_control_plane/models/outputprometheus.py +12 -35
- cribl_control_plane/models/outputring.py +8 -19
- cribl_control_plane/models/outputs3.py +26 -68
- cribl_control_plane/models/outputsecuritylake.py +18 -52
- cribl_control_plane/models/outputsentinel.py +18 -45
- cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
- cribl_control_plane/models/outputservicenow.py +24 -60
- cribl_control_plane/models/outputsignalfx.py +14 -37
- cribl_control_plane/models/outputsns.py +14 -36
- cribl_control_plane/models/outputsplunk.py +24 -60
- cribl_control_plane/models/outputsplunkhec.py +12 -35
- cribl_control_plane/models/outputsplunklb.py +30 -77
- cribl_control_plane/models/outputsqs.py +16 -41
- cribl_control_plane/models/outputstatsd.py +14 -30
- cribl_control_plane/models/outputstatsdext.py +12 -29
- cribl_control_plane/models/outputsumologic.py +12 -35
- cribl_control_plane/models/outputsyslog.py +24 -58
- cribl_control_plane/models/outputtcpjson.py +20 -52
- cribl_control_plane/models/outputwavefront.py +12 -35
- cribl_control_plane/models/outputwebhook.py +22 -58
- cribl_control_plane/models/outputxsiam.py +14 -35
- cribl_control_plane/models/productscore.py +1 -2
- cribl_control_plane/models/rbacresource.py +1 -2
- cribl_control_plane/models/resourcepolicy.py +2 -4
- cribl_control_plane/models/runnablejobcollection.py +13 -30
- cribl_control_plane/models/runnablejobexecutor.py +4 -13
- cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
- cribl_control_plane/models/workertypes.py +1 -2
- {cribl_control_plane-0.0.44a2.dist-info → cribl_control_plane-0.0.46.dist-info}/METADATA +3 -2
- {cribl_control_plane-0.0.44a2.dist-info → cribl_control_plane-0.0.46.dist-info}/RECORD +158 -158
- {cribl_control_plane-0.0.44a2.dist-info → cribl_control_plane-0.0.46.dist-info}/WHEEL +1 -1
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,21 +12,21 @@ class OutputSplunkType(str, Enum):
|
|
|
15
12
|
SPLUNK = "splunk"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSplunkNestedFieldSerialization(str, Enum
|
|
15
|
+
class OutputSplunkNestedFieldSerialization(str, Enum):
|
|
19
16
|
r"""How to serialize nested fields into index-time fields"""
|
|
20
17
|
|
|
21
18
|
JSON = "json"
|
|
22
19
|
NONE = "none"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputSplunkMinimumTLSVersion(str, Enum
|
|
22
|
+
class OutputSplunkMinimumTLSVersion(str, Enum):
|
|
26
23
|
TL_SV1 = "TLSv1"
|
|
27
24
|
TL_SV1_1 = "TLSv1.1"
|
|
28
25
|
TL_SV1_2 = "TLSv1.2"
|
|
29
26
|
TL_SV1_3 = "TLSv1.3"
|
|
30
27
|
|
|
31
28
|
|
|
32
|
-
class OutputSplunkMaximumTLSVersion(str, Enum
|
|
29
|
+
class OutputSplunkMaximumTLSVersion(str, Enum):
|
|
33
30
|
TL_SV1 = "TLSv1"
|
|
34
31
|
TL_SV1_1 = "TLSv1.1"
|
|
35
32
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -89,30 +86,22 @@ class OutputSplunkTLSSettingsClientSide(BaseModel):
|
|
|
89
86
|
r"""Passphrase to use to decrypt private key"""
|
|
90
87
|
|
|
91
88
|
min_version: Annotated[
|
|
92
|
-
|
|
93
|
-
Optional[OutputSplunkMinimumTLSVersion],
|
|
94
|
-
PlainValidator(validate_open_enum(False)),
|
|
95
|
-
],
|
|
96
|
-
pydantic.Field(alias="minVersion"),
|
|
89
|
+
Optional[OutputSplunkMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
97
90
|
] = None
|
|
98
91
|
|
|
99
92
|
max_version: Annotated[
|
|
100
|
-
|
|
101
|
-
Optional[OutputSplunkMaximumTLSVersion],
|
|
102
|
-
PlainValidator(validate_open_enum(False)),
|
|
103
|
-
],
|
|
104
|
-
pydantic.Field(alias="maxVersion"),
|
|
93
|
+
Optional[OutputSplunkMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
105
94
|
] = None
|
|
106
95
|
|
|
107
96
|
|
|
108
|
-
class OutputSplunkMaxS2SVersion(str, Enum
|
|
97
|
+
class OutputSplunkMaxS2SVersion(str, Enum):
|
|
109
98
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
110
99
|
|
|
111
100
|
V3 = "v3"
|
|
112
101
|
V4 = "v4"
|
|
113
102
|
|
|
114
103
|
|
|
115
|
-
class OutputSplunkBackpressureBehavior(str, Enum
|
|
104
|
+
class OutputSplunkBackpressureBehavior(str, Enum):
|
|
116
105
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
117
106
|
|
|
118
107
|
BLOCK = "block"
|
|
@@ -120,14 +109,14 @@ class OutputSplunkBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
120
109
|
QUEUE = "queue"
|
|
121
110
|
|
|
122
111
|
|
|
123
|
-
class OutputSplunkAuthenticationMethod(str, Enum
|
|
112
|
+
class OutputSplunkAuthenticationMethod(str, Enum):
|
|
124
113
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
125
114
|
|
|
126
115
|
MANUAL = "manual"
|
|
127
116
|
SECRET = "secret"
|
|
128
117
|
|
|
129
118
|
|
|
130
|
-
class OutputSplunkCompressCompression(str, Enum
|
|
119
|
+
class OutputSplunkCompressCompression(str, Enum):
|
|
131
120
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
132
121
|
|
|
133
122
|
DISABLED = "disabled"
|
|
@@ -135,21 +124,21 @@ class OutputSplunkCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
135
124
|
ALWAYS = "always"
|
|
136
125
|
|
|
137
126
|
|
|
138
|
-
class OutputSplunkPqCompressCompression(str, Enum
|
|
127
|
+
class OutputSplunkPqCompressCompression(str, Enum):
|
|
139
128
|
r"""Codec to use to compress the persisted data"""
|
|
140
129
|
|
|
141
130
|
NONE = "none"
|
|
142
131
|
GZIP = "gzip"
|
|
143
132
|
|
|
144
133
|
|
|
145
|
-
class OutputSplunkQueueFullBehavior(str, Enum
|
|
134
|
+
class OutputSplunkQueueFullBehavior(str, Enum):
|
|
146
135
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
147
136
|
|
|
148
137
|
BLOCK = "block"
|
|
149
138
|
DROP = "drop"
|
|
150
139
|
|
|
151
140
|
|
|
152
|
-
class OutputSplunkMode(str, Enum
|
|
141
|
+
class OutputSplunkMode(str, Enum):
|
|
153
142
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
154
143
|
|
|
155
144
|
ERROR = "error"
|
|
@@ -253,10 +242,7 @@ class OutputSplunk(BaseModel):
|
|
|
253
242
|
r"""The port to connect to on the provided host"""
|
|
254
243
|
|
|
255
244
|
nested_fields: Annotated[
|
|
256
|
-
|
|
257
|
-
Optional[OutputSplunkNestedFieldSerialization],
|
|
258
|
-
PlainValidator(validate_open_enum(False)),
|
|
259
|
-
],
|
|
245
|
+
Optional[OutputSplunkNestedFieldSerialization],
|
|
260
246
|
pydantic.Field(alias="nestedFields"),
|
|
261
247
|
] = OutputSplunkNestedFieldSerialization.NONE
|
|
262
248
|
r"""How to serialize nested fields into index-time fields"""
|
|
@@ -292,29 +278,18 @@ class OutputSplunk(BaseModel):
|
|
|
292
278
|
r"""Use to troubleshoot issues with sending data"""
|
|
293
279
|
|
|
294
280
|
max_s2_sversion: Annotated[
|
|
295
|
-
|
|
296
|
-
Optional[OutputSplunkMaxS2SVersion],
|
|
297
|
-
PlainValidator(validate_open_enum(False)),
|
|
298
|
-
],
|
|
299
|
-
pydantic.Field(alias="maxS2Sversion"),
|
|
281
|
+
Optional[OutputSplunkMaxS2SVersion], pydantic.Field(alias="maxS2Sversion")
|
|
300
282
|
] = OutputSplunkMaxS2SVersion.V3
|
|
301
283
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
302
284
|
|
|
303
285
|
on_backpressure: Annotated[
|
|
304
|
-
|
|
305
|
-
Optional[OutputSplunkBackpressureBehavior],
|
|
306
|
-
PlainValidator(validate_open_enum(False)),
|
|
307
|
-
],
|
|
286
|
+
Optional[OutputSplunkBackpressureBehavior],
|
|
308
287
|
pydantic.Field(alias="onBackpressure"),
|
|
309
288
|
] = OutputSplunkBackpressureBehavior.BLOCK
|
|
310
289
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
311
290
|
|
|
312
291
|
auth_type: Annotated[
|
|
313
|
-
|
|
314
|
-
Optional[OutputSplunkAuthenticationMethod],
|
|
315
|
-
PlainValidator(validate_open_enum(False)),
|
|
316
|
-
],
|
|
317
|
-
pydantic.Field(alias="authType"),
|
|
292
|
+
Optional[OutputSplunkAuthenticationMethod], pydantic.Field(alias="authType")
|
|
318
293
|
] = OutputSplunkAuthenticationMethod.MANUAL
|
|
319
294
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
320
295
|
|
|
@@ -325,10 +300,9 @@ class OutputSplunk(BaseModel):
|
|
|
325
300
|
] = 1
|
|
326
301
|
r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
|
|
327
302
|
|
|
328
|
-
compress:
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
] = OutputSplunkCompressCompression.DISABLED
|
|
303
|
+
compress: Optional[OutputSplunkCompressCompression] = (
|
|
304
|
+
OutputSplunkCompressCompression.DISABLED
|
|
305
|
+
)
|
|
332
306
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
333
307
|
|
|
334
308
|
pq_max_file_size: Annotated[
|
|
@@ -345,29 +319,19 @@ class OutputSplunk(BaseModel):
|
|
|
345
319
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
346
320
|
|
|
347
321
|
pq_compress: Annotated[
|
|
348
|
-
|
|
349
|
-
Optional[OutputSplunkPqCompressCompression],
|
|
350
|
-
PlainValidator(validate_open_enum(False)),
|
|
351
|
-
],
|
|
352
|
-
pydantic.Field(alias="pqCompress"),
|
|
322
|
+
Optional[OutputSplunkPqCompressCompression], pydantic.Field(alias="pqCompress")
|
|
353
323
|
] = OutputSplunkPqCompressCompression.NONE
|
|
354
324
|
r"""Codec to use to compress the persisted data"""
|
|
355
325
|
|
|
356
326
|
pq_on_backpressure: Annotated[
|
|
357
|
-
|
|
358
|
-
Optional[OutputSplunkQueueFullBehavior],
|
|
359
|
-
PlainValidator(validate_open_enum(False)),
|
|
360
|
-
],
|
|
327
|
+
Optional[OutputSplunkQueueFullBehavior],
|
|
361
328
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
362
329
|
] = OutputSplunkQueueFullBehavior.BLOCK
|
|
363
330
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
364
331
|
|
|
365
|
-
pq_mode: Annotated[
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
],
|
|
369
|
-
pydantic.Field(alias="pqMode"),
|
|
370
|
-
] = OutputSplunkMode.ERROR
|
|
332
|
+
pq_mode: Annotated[Optional[OutputSplunkMode], pydantic.Field(alias="pqMode")] = (
|
|
333
|
+
OutputSplunkMode.ERROR
|
|
334
|
+
)
|
|
371
335
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
372
336
|
|
|
373
337
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -26,7 +23,7 @@ class OutputSplunkHecExtraHTTPHeader(BaseModel):
|
|
|
26
23
|
name: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class OutputSplunkHecFailedRequestLoggingMode(str, Enum
|
|
26
|
+
class OutputSplunkHecFailedRequestLoggingMode(str, Enum):
|
|
30
27
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
31
28
|
|
|
32
29
|
PAYLOAD = "payload"
|
|
@@ -34,7 +31,7 @@ class OutputSplunkHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnu
|
|
|
34
31
|
NONE = "none"
|
|
35
32
|
|
|
36
33
|
|
|
37
|
-
class OutputSplunkHecAuthenticationMethod(str, Enum
|
|
34
|
+
class OutputSplunkHecAuthenticationMethod(str, Enum):
|
|
38
35
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
39
36
|
|
|
40
37
|
MANUAL = "manual"
|
|
@@ -95,7 +92,7 @@ class OutputSplunkHecTimeoutRetrySettings(BaseModel):
|
|
|
95
92
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
96
93
|
|
|
97
94
|
|
|
98
|
-
class OutputSplunkHecBackpressureBehavior(str, Enum
|
|
95
|
+
class OutputSplunkHecBackpressureBehavior(str, Enum):
|
|
99
96
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
100
97
|
|
|
101
98
|
BLOCK = "block"
|
|
@@ -118,21 +115,21 @@ class OutputSplunkHecURL(BaseModel):
|
|
|
118
115
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
119
116
|
|
|
120
117
|
|
|
121
|
-
class OutputSplunkHecCompression(str, Enum
|
|
118
|
+
class OutputSplunkHecCompression(str, Enum):
|
|
122
119
|
r"""Codec to use to compress the persisted data"""
|
|
123
120
|
|
|
124
121
|
NONE = "none"
|
|
125
122
|
GZIP = "gzip"
|
|
126
123
|
|
|
127
124
|
|
|
128
|
-
class OutputSplunkHecQueueFullBehavior(str, Enum
|
|
125
|
+
class OutputSplunkHecQueueFullBehavior(str, Enum):
|
|
129
126
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
130
127
|
|
|
131
128
|
BLOCK = "block"
|
|
132
129
|
DROP = "drop"
|
|
133
130
|
|
|
134
131
|
|
|
135
|
-
class OutputSplunkHecMode(str, Enum
|
|
132
|
+
class OutputSplunkHecMode(str, Enum):
|
|
136
133
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
137
134
|
|
|
138
135
|
ERROR = "error"
|
|
@@ -307,10 +304,7 @@ class OutputSplunkHec(BaseModel):
|
|
|
307
304
|
r"""Headers to add to all events"""
|
|
308
305
|
|
|
309
306
|
failed_request_logging_mode: Annotated[
|
|
310
|
-
|
|
311
|
-
Optional[OutputSplunkHecFailedRequestLoggingMode],
|
|
312
|
-
PlainValidator(validate_open_enum(False)),
|
|
313
|
-
],
|
|
307
|
+
Optional[OutputSplunkHecFailedRequestLoggingMode],
|
|
314
308
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
315
309
|
] = OutputSplunkHecFailedRequestLoggingMode.NONE
|
|
316
310
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -326,11 +320,7 @@ class OutputSplunkHec(BaseModel):
|
|
|
326
320
|
r"""Output metrics in multiple-metric format, supported in Splunk 8.0 and above to allow multiple metrics in a single event."""
|
|
327
321
|
|
|
328
322
|
auth_type: Annotated[
|
|
329
|
-
|
|
330
|
-
Optional[OutputSplunkHecAuthenticationMethod],
|
|
331
|
-
PlainValidator(validate_open_enum(False)),
|
|
332
|
-
],
|
|
333
|
-
pydantic.Field(alias="authType"),
|
|
323
|
+
Optional[OutputSplunkHecAuthenticationMethod], pydantic.Field(alias="authType")
|
|
334
324
|
] = OutputSplunkHecAuthenticationMethod.MANUAL
|
|
335
325
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
336
326
|
|
|
@@ -351,10 +341,7 @@ class OutputSplunkHec(BaseModel):
|
|
|
351
341
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
352
342
|
|
|
353
343
|
on_backpressure: Annotated[
|
|
354
|
-
|
|
355
|
-
Optional[OutputSplunkHecBackpressureBehavior],
|
|
356
|
-
PlainValidator(validate_open_enum(False)),
|
|
357
|
-
],
|
|
344
|
+
Optional[OutputSplunkHecBackpressureBehavior],
|
|
358
345
|
pydantic.Field(alias="onBackpressure"),
|
|
359
346
|
] = OutputSplunkHecBackpressureBehavior.BLOCK
|
|
360
347
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -404,28 +391,18 @@ class OutputSplunkHec(BaseModel):
|
|
|
404
391
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
405
392
|
|
|
406
393
|
pq_compress: Annotated[
|
|
407
|
-
|
|
408
|
-
Optional[OutputSplunkHecCompression],
|
|
409
|
-
PlainValidator(validate_open_enum(False)),
|
|
410
|
-
],
|
|
411
|
-
pydantic.Field(alias="pqCompress"),
|
|
394
|
+
Optional[OutputSplunkHecCompression], pydantic.Field(alias="pqCompress")
|
|
412
395
|
] = OutputSplunkHecCompression.NONE
|
|
413
396
|
r"""Codec to use to compress the persisted data"""
|
|
414
397
|
|
|
415
398
|
pq_on_backpressure: Annotated[
|
|
416
|
-
|
|
417
|
-
Optional[OutputSplunkHecQueueFullBehavior],
|
|
418
|
-
PlainValidator(validate_open_enum(False)),
|
|
419
|
-
],
|
|
399
|
+
Optional[OutputSplunkHecQueueFullBehavior],
|
|
420
400
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
421
401
|
] = OutputSplunkHecQueueFullBehavior.BLOCK
|
|
422
402
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
423
403
|
|
|
424
404
|
pq_mode: Annotated[
|
|
425
|
-
|
|
426
|
-
Optional[OutputSplunkHecMode], PlainValidator(validate_open_enum(False))
|
|
427
|
-
],
|
|
428
|
-
pydantic.Field(alias="pqMode"),
|
|
405
|
+
Optional[OutputSplunkHecMode], pydantic.Field(alias="pqMode")
|
|
429
406
|
] = OutputSplunkHecMode.ERROR
|
|
430
407
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
431
408
|
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,21 +12,21 @@ class OutputSplunkLbType(str, Enum):
|
|
|
15
12
|
SPLUNK_LB = "splunk_lb"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSplunkLbNestedFieldSerialization(str, Enum
|
|
15
|
+
class OutputSplunkLbNestedFieldSerialization(str, Enum):
|
|
19
16
|
r"""How to serialize nested fields into index-time fields"""
|
|
20
17
|
|
|
21
18
|
JSON = "json"
|
|
22
19
|
NONE = "none"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputSplunkLbMinimumTLSVersion(str, Enum
|
|
22
|
+
class OutputSplunkLbMinimumTLSVersion(str, Enum):
|
|
26
23
|
TL_SV1 = "TLSv1"
|
|
27
24
|
TL_SV1_1 = "TLSv1.1"
|
|
28
25
|
TL_SV1_2 = "TLSv1.2"
|
|
29
26
|
TL_SV1_3 = "TLSv1.3"
|
|
30
27
|
|
|
31
28
|
|
|
32
|
-
class OutputSplunkLbMaximumTLSVersion(str, Enum
|
|
29
|
+
class OutputSplunkLbMaximumTLSVersion(str, Enum):
|
|
33
30
|
TL_SV1 = "TLSv1"
|
|
34
31
|
TL_SV1_1 = "TLSv1.1"
|
|
35
32
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -89,30 +86,22 @@ class OutputSplunkLbTLSSettingsClientSide(BaseModel):
|
|
|
89
86
|
r"""Passphrase to use to decrypt private key"""
|
|
90
87
|
|
|
91
88
|
min_version: Annotated[
|
|
92
|
-
|
|
93
|
-
Optional[OutputSplunkLbMinimumTLSVersion],
|
|
94
|
-
PlainValidator(validate_open_enum(False)),
|
|
95
|
-
],
|
|
96
|
-
pydantic.Field(alias="minVersion"),
|
|
89
|
+
Optional[OutputSplunkLbMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
97
90
|
] = None
|
|
98
91
|
|
|
99
92
|
max_version: Annotated[
|
|
100
|
-
|
|
101
|
-
Optional[OutputSplunkLbMaximumTLSVersion],
|
|
102
|
-
PlainValidator(validate_open_enum(False)),
|
|
103
|
-
],
|
|
104
|
-
pydantic.Field(alias="maxVersion"),
|
|
93
|
+
Optional[OutputSplunkLbMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
105
94
|
] = None
|
|
106
95
|
|
|
107
96
|
|
|
108
|
-
class OutputSplunkLbMaxS2SVersion(str, Enum
|
|
97
|
+
class OutputSplunkLbMaxS2SVersion(str, Enum):
|
|
109
98
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
110
99
|
|
|
111
100
|
V3 = "v3"
|
|
112
101
|
V4 = "v4"
|
|
113
102
|
|
|
114
103
|
|
|
115
|
-
class OutputSplunkLbBackpressureBehavior(str, Enum
|
|
104
|
+
class OutputSplunkLbBackpressureBehavior(str, Enum):
|
|
116
105
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
117
106
|
|
|
118
107
|
BLOCK = "block"
|
|
@@ -120,14 +109,14 @@ class OutputSplunkLbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
|
|
|
120
109
|
QUEUE = "queue"
|
|
121
110
|
|
|
122
111
|
|
|
123
|
-
class OutputSplunkLbAuthenticationMethod(str, Enum
|
|
112
|
+
class OutputSplunkLbAuthenticationMethod(str, Enum):
|
|
124
113
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
125
114
|
|
|
126
115
|
MANUAL = "manual"
|
|
127
116
|
SECRET = "secret"
|
|
128
117
|
|
|
129
118
|
|
|
130
|
-
class OutputSplunkLbCompressCompression(str, Enum
|
|
119
|
+
class OutputSplunkLbCompressCompression(str, Enum):
|
|
131
120
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
132
121
|
|
|
133
122
|
DISABLED = "disabled"
|
|
@@ -135,9 +124,7 @@ class OutputSplunkLbCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
135
124
|
ALWAYS = "always"
|
|
136
125
|
|
|
137
126
|
|
|
138
|
-
class IndexerDiscoveryConfigsAuthTokenAuthenticationMethod(
|
|
139
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
140
|
-
):
|
|
127
|
+
class IndexerDiscoveryConfigsAuthTokenAuthenticationMethod(str, Enum):
|
|
141
128
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
142
129
|
|
|
143
130
|
MANUAL = "manual"
|
|
@@ -151,18 +138,13 @@ class OutputSplunkLbAuthTokenTypedDict(TypedDict):
|
|
|
151
138
|
|
|
152
139
|
class OutputSplunkLbAuthToken(BaseModel):
|
|
153
140
|
auth_type: Annotated[
|
|
154
|
-
|
|
155
|
-
Optional[IndexerDiscoveryConfigsAuthTokenAuthenticationMethod],
|
|
156
|
-
PlainValidator(validate_open_enum(False)),
|
|
157
|
-
],
|
|
141
|
+
Optional[IndexerDiscoveryConfigsAuthTokenAuthenticationMethod],
|
|
158
142
|
pydantic.Field(alias="authType"),
|
|
159
143
|
] = IndexerDiscoveryConfigsAuthTokenAuthenticationMethod.MANUAL
|
|
160
144
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
161
145
|
|
|
162
146
|
|
|
163
|
-
class IndexerDiscoveryConfigsAuthenticationMethod(
|
|
164
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
165
|
-
):
|
|
147
|
+
class IndexerDiscoveryConfigsAuthenticationMethod(str, Enum):
|
|
166
148
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
167
149
|
|
|
168
150
|
MANUAL = "manual"
|
|
@@ -215,10 +197,7 @@ class IndexerDiscoveryConfigs(BaseModel):
|
|
|
215
197
|
r"""Tokens required to authenticate to cluster manager for indexer discovery"""
|
|
216
198
|
|
|
217
199
|
auth_type: Annotated[
|
|
218
|
-
|
|
219
|
-
Optional[IndexerDiscoveryConfigsAuthenticationMethod],
|
|
220
|
-
PlainValidator(validate_open_enum(False)),
|
|
221
|
-
],
|
|
200
|
+
Optional[IndexerDiscoveryConfigsAuthenticationMethod],
|
|
222
201
|
pydantic.Field(alias="authType"),
|
|
223
202
|
] = IndexerDiscoveryConfigsAuthenticationMethod.MANUAL
|
|
224
203
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
@@ -230,7 +209,7 @@ class IndexerDiscoveryConfigs(BaseModel):
|
|
|
230
209
|
r"""Select or create a stored text secret"""
|
|
231
210
|
|
|
232
211
|
|
|
233
|
-
class OutputSplunkLbTLS(str, Enum
|
|
212
|
+
class OutputSplunkLbTLS(str, Enum):
|
|
234
213
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
235
214
|
|
|
236
215
|
INHERIT = "inherit"
|
|
@@ -257,9 +236,7 @@ class OutputSplunkLbHost(BaseModel):
|
|
|
257
236
|
port: Optional[float] = 9997
|
|
258
237
|
r"""The port to connect to on the provided host"""
|
|
259
238
|
|
|
260
|
-
tls:
|
|
261
|
-
Optional[OutputSplunkLbTLS], PlainValidator(validate_open_enum(False))
|
|
262
|
-
] = OutputSplunkLbTLS.INHERIT
|
|
239
|
+
tls: Optional[OutputSplunkLbTLS] = OutputSplunkLbTLS.INHERIT
|
|
263
240
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
264
241
|
|
|
265
242
|
servername: Optional[str] = None
|
|
@@ -269,21 +246,21 @@ class OutputSplunkLbHost(BaseModel):
|
|
|
269
246
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
270
247
|
|
|
271
248
|
|
|
272
|
-
class OutputSplunkLbPqCompressCompression(str, Enum
|
|
249
|
+
class OutputSplunkLbPqCompressCompression(str, Enum):
|
|
273
250
|
r"""Codec to use to compress the persisted data"""
|
|
274
251
|
|
|
275
252
|
NONE = "none"
|
|
276
253
|
GZIP = "gzip"
|
|
277
254
|
|
|
278
255
|
|
|
279
|
-
class OutputSplunkLbQueueFullBehavior(str, Enum
|
|
256
|
+
class OutputSplunkLbQueueFullBehavior(str, Enum):
|
|
280
257
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
281
258
|
|
|
282
259
|
BLOCK = "block"
|
|
283
260
|
DROP = "drop"
|
|
284
261
|
|
|
285
262
|
|
|
286
|
-
class OutputSplunkLbMode(str, Enum
|
|
263
|
+
class OutputSplunkLbMode(str, Enum):
|
|
287
264
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
288
265
|
|
|
289
266
|
ERROR = "error"
|
|
@@ -411,10 +388,7 @@ class OutputSplunkLb(BaseModel):
|
|
|
411
388
|
r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
|
|
412
389
|
|
|
413
390
|
nested_fields: Annotated[
|
|
414
|
-
|
|
415
|
-
Optional[OutputSplunkLbNestedFieldSerialization],
|
|
416
|
-
PlainValidator(validate_open_enum(False)),
|
|
417
|
-
],
|
|
391
|
+
Optional[OutputSplunkLbNestedFieldSerialization],
|
|
418
392
|
pydantic.Field(alias="nestedFields"),
|
|
419
393
|
] = OutputSplunkLbNestedFieldSerialization.NONE
|
|
420
394
|
r"""How to serialize nested fields into index-time fields"""
|
|
@@ -450,19 +424,12 @@ class OutputSplunkLb(BaseModel):
|
|
|
450
424
|
r"""Use to troubleshoot issues with sending data"""
|
|
451
425
|
|
|
452
426
|
max_s2_sversion: Annotated[
|
|
453
|
-
|
|
454
|
-
Optional[OutputSplunkLbMaxS2SVersion],
|
|
455
|
-
PlainValidator(validate_open_enum(False)),
|
|
456
|
-
],
|
|
457
|
-
pydantic.Field(alias="maxS2Sversion"),
|
|
427
|
+
Optional[OutputSplunkLbMaxS2SVersion], pydantic.Field(alias="maxS2Sversion")
|
|
458
428
|
] = OutputSplunkLbMaxS2SVersion.V3
|
|
459
429
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
460
430
|
|
|
461
431
|
on_backpressure: Annotated[
|
|
462
|
-
|
|
463
|
-
Optional[OutputSplunkLbBackpressureBehavior],
|
|
464
|
-
PlainValidator(validate_open_enum(False)),
|
|
465
|
-
],
|
|
432
|
+
Optional[OutputSplunkLbBackpressureBehavior],
|
|
466
433
|
pydantic.Field(alias="onBackpressure"),
|
|
467
434
|
] = OutputSplunkLbBackpressureBehavior.BLOCK
|
|
468
435
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -478,11 +445,7 @@ class OutputSplunkLb(BaseModel):
|
|
|
478
445
|
r"""How long (in milliseconds) each LB endpoint can report blocked before the Destination reports unhealthy, blocking the sender. (Grace period for fluctuations.) Use 0 to disable; max 1 minute."""
|
|
479
446
|
|
|
480
447
|
auth_type: Annotated[
|
|
481
|
-
|
|
482
|
-
Optional[OutputSplunkLbAuthenticationMethod],
|
|
483
|
-
PlainValidator(validate_open_enum(False)),
|
|
484
|
-
],
|
|
485
|
-
pydantic.Field(alias="authType"),
|
|
448
|
+
Optional[OutputSplunkLbAuthenticationMethod], pydantic.Field(alias="authType")
|
|
486
449
|
] = OutputSplunkLbAuthenticationMethod.MANUAL
|
|
487
450
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
488
451
|
|
|
@@ -493,10 +456,9 @@ class OutputSplunkLb(BaseModel):
|
|
|
493
456
|
] = 1
|
|
494
457
|
r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
|
|
495
458
|
|
|
496
|
-
compress:
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
] = OutputSplunkLbCompressCompression.DISABLED
|
|
459
|
+
compress: Optional[OutputSplunkLbCompressCompression] = (
|
|
460
|
+
OutputSplunkLbCompressCompression.DISABLED
|
|
461
|
+
)
|
|
500
462
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
501
463
|
|
|
502
464
|
indexer_discovery_configs: Annotated[
|
|
@@ -522,29 +484,20 @@ class OutputSplunkLb(BaseModel):
|
|
|
522
484
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
523
485
|
|
|
524
486
|
pq_compress: Annotated[
|
|
525
|
-
|
|
526
|
-
Optional[OutputSplunkLbPqCompressCompression],
|
|
527
|
-
PlainValidator(validate_open_enum(False)),
|
|
528
|
-
],
|
|
487
|
+
Optional[OutputSplunkLbPqCompressCompression],
|
|
529
488
|
pydantic.Field(alias="pqCompress"),
|
|
530
489
|
] = OutputSplunkLbPqCompressCompression.NONE
|
|
531
490
|
r"""Codec to use to compress the persisted data"""
|
|
532
491
|
|
|
533
492
|
pq_on_backpressure: Annotated[
|
|
534
|
-
|
|
535
|
-
Optional[OutputSplunkLbQueueFullBehavior],
|
|
536
|
-
PlainValidator(validate_open_enum(False)),
|
|
537
|
-
],
|
|
493
|
+
Optional[OutputSplunkLbQueueFullBehavior],
|
|
538
494
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
539
495
|
] = OutputSplunkLbQueueFullBehavior.BLOCK
|
|
540
496
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
541
497
|
|
|
542
|
-
pq_mode: Annotated[
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
],
|
|
546
|
-
pydantic.Field(alias="pqMode"),
|
|
547
|
-
] = OutputSplunkLbMode.ERROR
|
|
498
|
+
pq_mode: Annotated[Optional[OutputSplunkLbMode], pydantic.Field(alias="pqMode")] = (
|
|
499
|
+
OutputSplunkLbMode.ERROR
|
|
500
|
+
)
|
|
548
501
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
549
502
|
|
|
550
503
|
pq_controls: Annotated[
|