cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +4124 -4124
- cribl_control_plane/models/createinputop.py +1734 -2771
- cribl_control_plane/models/createoutputop.py +2153 -4314
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +20 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +10 -27
- cribl_control_plane/models/inputcribllakehttp.py +12 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +10 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +10 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +31 -64
- cribl_control_plane/models/inputelastic.py +16 -44
- cribl_control_plane/models/inputeventhub.py +8 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +10 -27
- cribl_control_plane/models/inputgooglepubsub.py +8 -23
- cribl_control_plane/models/inputgrafana_union.py +35 -81
- cribl_control_plane/models/inputhttp.py +10 -27
- cribl_control_plane/models/inputhttpraw.py +10 -27
- cribl_control_plane/models/inputjournalfiles.py +6 -16
- cribl_control_plane/models/inputkafka.py +16 -45
- cribl_control_plane/models/inputkinesis.py +16 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +12 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
- cribl_control_plane/models/inputmsk.py +18 -52
- cribl_control_plane/models/inputnetflow.py +6 -15
- cribl_control_plane/models/inputoffice365mgmt.py +16 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
- cribl_control_plane/models/inputoffice365service.py +18 -39
- cribl_control_plane/models/inputopentelemetry.py +18 -42
- cribl_control_plane/models/inputprometheus.py +20 -54
- cribl_control_plane/models/inputprometheusrw.py +12 -34
- cribl_control_plane/models/inputrawudp.py +6 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +8 -20
- cribl_control_plane/models/inputsplunk.py +14 -37
- cribl_control_plane/models/inputsplunkhec.py +12 -33
- cribl_control_plane/models/inputsplunksearch.py +16 -37
- cribl_control_plane/models/inputsqs.py +12 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +12 -33
- cribl_control_plane/models/inputtcpjson.py +12 -33
- cribl_control_plane/models/inputwef.py +20 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +10 -25
- cribl_control_plane/models/inputzscalerhec.py +12 -33
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.17.dist-info/RECORD +215 -0
- cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/WHEEL +0 -0
|
@@ -1,35 +1,32 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputSplunkType(str, Enum
|
|
11
|
+
class OutputSplunkType(str, Enum):
|
|
15
12
|
SPLUNK = "splunk"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSplunkNestedFieldSerialization(str, Enum
|
|
15
|
+
class OutputSplunkNestedFieldSerialization(str, Enum):
|
|
19
16
|
r"""How to serialize nested fields into index-time fields"""
|
|
20
17
|
|
|
21
18
|
JSON = "json"
|
|
22
19
|
NONE = "none"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputSplunkMinimumTLSVersion(str, Enum
|
|
22
|
+
class OutputSplunkMinimumTLSVersion(str, Enum):
|
|
26
23
|
TL_SV1 = "TLSv1"
|
|
27
24
|
TL_SV1_1 = "TLSv1.1"
|
|
28
25
|
TL_SV1_2 = "TLSv1.2"
|
|
29
26
|
TL_SV1_3 = "TLSv1.3"
|
|
30
27
|
|
|
31
28
|
|
|
32
|
-
class OutputSplunkMaximumTLSVersion(str, Enum
|
|
29
|
+
class OutputSplunkMaximumTLSVersion(str, Enum):
|
|
33
30
|
TL_SV1 = "TLSv1"
|
|
34
31
|
TL_SV1_1 = "TLSv1.1"
|
|
35
32
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -89,30 +86,22 @@ class OutputSplunkTLSSettingsClientSide(BaseModel):
|
|
|
89
86
|
r"""Passphrase to use to decrypt private key"""
|
|
90
87
|
|
|
91
88
|
min_version: Annotated[
|
|
92
|
-
|
|
93
|
-
Optional[OutputSplunkMinimumTLSVersion],
|
|
94
|
-
PlainValidator(validate_open_enum(False)),
|
|
95
|
-
],
|
|
96
|
-
pydantic.Field(alias="minVersion"),
|
|
89
|
+
Optional[OutputSplunkMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
97
90
|
] = None
|
|
98
91
|
|
|
99
92
|
max_version: Annotated[
|
|
100
|
-
|
|
101
|
-
Optional[OutputSplunkMaximumTLSVersion],
|
|
102
|
-
PlainValidator(validate_open_enum(False)),
|
|
103
|
-
],
|
|
104
|
-
pydantic.Field(alias="maxVersion"),
|
|
93
|
+
Optional[OutputSplunkMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
105
94
|
] = None
|
|
106
95
|
|
|
107
96
|
|
|
108
|
-
class OutputSplunkMaxS2SVersion(str, Enum
|
|
97
|
+
class OutputSplunkMaxS2SVersion(str, Enum):
|
|
109
98
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
110
99
|
|
|
111
100
|
V3 = "v3"
|
|
112
101
|
V4 = "v4"
|
|
113
102
|
|
|
114
103
|
|
|
115
|
-
class OutputSplunkBackpressureBehavior(str, Enum
|
|
104
|
+
class OutputSplunkBackpressureBehavior(str, Enum):
|
|
116
105
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
117
106
|
|
|
118
107
|
BLOCK = "block"
|
|
@@ -120,14 +109,14 @@ class OutputSplunkBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
120
109
|
QUEUE = "queue"
|
|
121
110
|
|
|
122
111
|
|
|
123
|
-
class OutputSplunkAuthenticationMethod(str, Enum
|
|
112
|
+
class OutputSplunkAuthenticationMethod(str, Enum):
|
|
124
113
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
125
114
|
|
|
126
115
|
MANUAL = "manual"
|
|
127
116
|
SECRET = "secret"
|
|
128
117
|
|
|
129
118
|
|
|
130
|
-
class OutputSplunkCompressCompression(str, Enum
|
|
119
|
+
class OutputSplunkCompressCompression(str, Enum):
|
|
131
120
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
132
121
|
|
|
133
122
|
DISABLED = "disabled"
|
|
@@ -135,21 +124,21 @@ class OutputSplunkCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
135
124
|
ALWAYS = "always"
|
|
136
125
|
|
|
137
126
|
|
|
138
|
-
class OutputSplunkPqCompressCompression(str, Enum
|
|
127
|
+
class OutputSplunkPqCompressCompression(str, Enum):
|
|
139
128
|
r"""Codec to use to compress the persisted data"""
|
|
140
129
|
|
|
141
130
|
NONE = "none"
|
|
142
131
|
GZIP = "gzip"
|
|
143
132
|
|
|
144
133
|
|
|
145
|
-
class OutputSplunkQueueFullBehavior(str, Enum
|
|
134
|
+
class OutputSplunkQueueFullBehavior(str, Enum):
|
|
146
135
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
147
136
|
|
|
148
137
|
BLOCK = "block"
|
|
149
138
|
DROP = "drop"
|
|
150
139
|
|
|
151
140
|
|
|
152
|
-
class OutputSplunkMode(str, Enum
|
|
141
|
+
class OutputSplunkMode(str, Enum):
|
|
153
142
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
154
143
|
|
|
155
144
|
ERROR = "error"
|
|
@@ -233,9 +222,7 @@ class OutputSplunk(BaseModel):
|
|
|
233
222
|
id: Optional[str] = None
|
|
234
223
|
r"""Unique ID for this output"""
|
|
235
224
|
|
|
236
|
-
type:
|
|
237
|
-
Optional[OutputSplunkType], PlainValidator(validate_open_enum(False))
|
|
238
|
-
] = None
|
|
225
|
+
type: Optional[OutputSplunkType] = None
|
|
239
226
|
|
|
240
227
|
pipeline: Optional[str] = None
|
|
241
228
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -255,10 +242,7 @@ class OutputSplunk(BaseModel):
|
|
|
255
242
|
r"""The port to connect to on the provided host"""
|
|
256
243
|
|
|
257
244
|
nested_fields: Annotated[
|
|
258
|
-
|
|
259
|
-
Optional[OutputSplunkNestedFieldSerialization],
|
|
260
|
-
PlainValidator(validate_open_enum(False)),
|
|
261
|
-
],
|
|
245
|
+
Optional[OutputSplunkNestedFieldSerialization],
|
|
262
246
|
pydantic.Field(alias="nestedFields"),
|
|
263
247
|
] = OutputSplunkNestedFieldSerialization.NONE
|
|
264
248
|
r"""How to serialize nested fields into index-time fields"""
|
|
@@ -294,29 +278,18 @@ class OutputSplunk(BaseModel):
|
|
|
294
278
|
r"""Use to troubleshoot issues with sending data"""
|
|
295
279
|
|
|
296
280
|
max_s2_sversion: Annotated[
|
|
297
|
-
|
|
298
|
-
Optional[OutputSplunkMaxS2SVersion],
|
|
299
|
-
PlainValidator(validate_open_enum(False)),
|
|
300
|
-
],
|
|
301
|
-
pydantic.Field(alias="maxS2Sversion"),
|
|
281
|
+
Optional[OutputSplunkMaxS2SVersion], pydantic.Field(alias="maxS2Sversion")
|
|
302
282
|
] = OutputSplunkMaxS2SVersion.V3
|
|
303
283
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
304
284
|
|
|
305
285
|
on_backpressure: Annotated[
|
|
306
|
-
|
|
307
|
-
Optional[OutputSplunkBackpressureBehavior],
|
|
308
|
-
PlainValidator(validate_open_enum(False)),
|
|
309
|
-
],
|
|
286
|
+
Optional[OutputSplunkBackpressureBehavior],
|
|
310
287
|
pydantic.Field(alias="onBackpressure"),
|
|
311
288
|
] = OutputSplunkBackpressureBehavior.BLOCK
|
|
312
289
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
313
290
|
|
|
314
291
|
auth_type: Annotated[
|
|
315
|
-
|
|
316
|
-
Optional[OutputSplunkAuthenticationMethod],
|
|
317
|
-
PlainValidator(validate_open_enum(False)),
|
|
318
|
-
],
|
|
319
|
-
pydantic.Field(alias="authType"),
|
|
292
|
+
Optional[OutputSplunkAuthenticationMethod], pydantic.Field(alias="authType")
|
|
320
293
|
] = OutputSplunkAuthenticationMethod.MANUAL
|
|
321
294
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
322
295
|
|
|
@@ -327,10 +300,9 @@ class OutputSplunk(BaseModel):
|
|
|
327
300
|
] = 1
|
|
328
301
|
r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
|
|
329
302
|
|
|
330
|
-
compress:
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
] = OutputSplunkCompressCompression.DISABLED
|
|
303
|
+
compress: Optional[OutputSplunkCompressCompression] = (
|
|
304
|
+
OutputSplunkCompressCompression.DISABLED
|
|
305
|
+
)
|
|
334
306
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
335
307
|
|
|
336
308
|
pq_max_file_size: Annotated[
|
|
@@ -347,29 +319,19 @@ class OutputSplunk(BaseModel):
|
|
|
347
319
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
348
320
|
|
|
349
321
|
pq_compress: Annotated[
|
|
350
|
-
|
|
351
|
-
Optional[OutputSplunkPqCompressCompression],
|
|
352
|
-
PlainValidator(validate_open_enum(False)),
|
|
353
|
-
],
|
|
354
|
-
pydantic.Field(alias="pqCompress"),
|
|
322
|
+
Optional[OutputSplunkPqCompressCompression], pydantic.Field(alias="pqCompress")
|
|
355
323
|
] = OutputSplunkPqCompressCompression.NONE
|
|
356
324
|
r"""Codec to use to compress the persisted data"""
|
|
357
325
|
|
|
358
326
|
pq_on_backpressure: Annotated[
|
|
359
|
-
|
|
360
|
-
Optional[OutputSplunkQueueFullBehavior],
|
|
361
|
-
PlainValidator(validate_open_enum(False)),
|
|
362
|
-
],
|
|
327
|
+
Optional[OutputSplunkQueueFullBehavior],
|
|
363
328
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
364
329
|
] = OutputSplunkQueueFullBehavior.BLOCK
|
|
365
330
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
366
331
|
|
|
367
|
-
pq_mode: Annotated[
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
],
|
|
371
|
-
pydantic.Field(alias="pqMode"),
|
|
372
|
-
] = OutputSplunkMode.ERROR
|
|
332
|
+
pq_mode: Annotated[Optional[OutputSplunkMode], pydantic.Field(alias="pqMode")] = (
|
|
333
|
+
OutputSplunkMode.ERROR
|
|
334
|
+
)
|
|
373
335
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
374
336
|
|
|
375
337
|
pq_controls: Annotated[
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputSplunkHecType(str, Enum
|
|
11
|
+
class OutputSplunkHecType(str, Enum):
|
|
15
12
|
SPLUNK_HEC = "splunk_hec"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,7 +23,7 @@ class OutputSplunkHecExtraHTTPHeader(BaseModel):
|
|
|
26
23
|
name: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class OutputSplunkHecFailedRequestLoggingMode(str, Enum
|
|
26
|
+
class OutputSplunkHecFailedRequestLoggingMode(str, Enum):
|
|
30
27
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
31
28
|
|
|
32
29
|
PAYLOAD = "payload"
|
|
@@ -34,7 +31,7 @@ class OutputSplunkHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnu
|
|
|
34
31
|
NONE = "none"
|
|
35
32
|
|
|
36
33
|
|
|
37
|
-
class OutputSplunkHecAuthenticationMethod(str, Enum
|
|
34
|
+
class OutputSplunkHecAuthenticationMethod(str, Enum):
|
|
38
35
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
39
36
|
|
|
40
37
|
MANUAL = "manual"
|
|
@@ -95,7 +92,7 @@ class OutputSplunkHecTimeoutRetrySettings(BaseModel):
|
|
|
95
92
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
96
93
|
|
|
97
94
|
|
|
98
|
-
class OutputSplunkHecBackpressureBehavior(str, Enum
|
|
95
|
+
class OutputSplunkHecBackpressureBehavior(str, Enum):
|
|
99
96
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
100
97
|
|
|
101
98
|
BLOCK = "block"
|
|
@@ -118,21 +115,21 @@ class OutputSplunkHecURL(BaseModel):
|
|
|
118
115
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
119
116
|
|
|
120
117
|
|
|
121
|
-
class OutputSplunkHecCompression(str, Enum
|
|
118
|
+
class OutputSplunkHecCompression(str, Enum):
|
|
122
119
|
r"""Codec to use to compress the persisted data"""
|
|
123
120
|
|
|
124
121
|
NONE = "none"
|
|
125
122
|
GZIP = "gzip"
|
|
126
123
|
|
|
127
124
|
|
|
128
|
-
class OutputSplunkHecQueueFullBehavior(str, Enum
|
|
125
|
+
class OutputSplunkHecQueueFullBehavior(str, Enum):
|
|
129
126
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
130
127
|
|
|
131
128
|
BLOCK = "block"
|
|
132
129
|
DROP = "drop"
|
|
133
130
|
|
|
134
131
|
|
|
135
|
-
class OutputSplunkHecMode(str, Enum
|
|
132
|
+
class OutputSplunkHecMode(str, Enum):
|
|
136
133
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
137
134
|
|
|
138
135
|
ERROR = "error"
|
|
@@ -237,7 +234,7 @@ class OutputSplunkHec(BaseModel):
|
|
|
237
234
|
id: str
|
|
238
235
|
r"""Unique ID for this output"""
|
|
239
236
|
|
|
240
|
-
type:
|
|
237
|
+
type: OutputSplunkHecType
|
|
241
238
|
|
|
242
239
|
pipeline: Optional[str] = None
|
|
243
240
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -307,10 +304,7 @@ class OutputSplunkHec(BaseModel):
|
|
|
307
304
|
r"""Headers to add to all events"""
|
|
308
305
|
|
|
309
306
|
failed_request_logging_mode: Annotated[
|
|
310
|
-
|
|
311
|
-
Optional[OutputSplunkHecFailedRequestLoggingMode],
|
|
312
|
-
PlainValidator(validate_open_enum(False)),
|
|
313
|
-
],
|
|
307
|
+
Optional[OutputSplunkHecFailedRequestLoggingMode],
|
|
314
308
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
315
309
|
] = OutputSplunkHecFailedRequestLoggingMode.NONE
|
|
316
310
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -326,11 +320,7 @@ class OutputSplunkHec(BaseModel):
|
|
|
326
320
|
r"""Output metrics in multiple-metric format, supported in Splunk 8.0 and above to allow multiple metrics in a single event."""
|
|
327
321
|
|
|
328
322
|
auth_type: Annotated[
|
|
329
|
-
|
|
330
|
-
Optional[OutputSplunkHecAuthenticationMethod],
|
|
331
|
-
PlainValidator(validate_open_enum(False)),
|
|
332
|
-
],
|
|
333
|
-
pydantic.Field(alias="authType"),
|
|
323
|
+
Optional[OutputSplunkHecAuthenticationMethod], pydantic.Field(alias="authType")
|
|
334
324
|
] = OutputSplunkHecAuthenticationMethod.MANUAL
|
|
335
325
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
336
326
|
|
|
@@ -351,10 +341,7 @@ class OutputSplunkHec(BaseModel):
|
|
|
351
341
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
352
342
|
|
|
353
343
|
on_backpressure: Annotated[
|
|
354
|
-
|
|
355
|
-
Optional[OutputSplunkHecBackpressureBehavior],
|
|
356
|
-
PlainValidator(validate_open_enum(False)),
|
|
357
|
-
],
|
|
344
|
+
Optional[OutputSplunkHecBackpressureBehavior],
|
|
358
345
|
pydantic.Field(alias="onBackpressure"),
|
|
359
346
|
] = OutputSplunkHecBackpressureBehavior.BLOCK
|
|
360
347
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -404,28 +391,18 @@ class OutputSplunkHec(BaseModel):
|
|
|
404
391
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
405
392
|
|
|
406
393
|
pq_compress: Annotated[
|
|
407
|
-
|
|
408
|
-
Optional[OutputSplunkHecCompression],
|
|
409
|
-
PlainValidator(validate_open_enum(False)),
|
|
410
|
-
],
|
|
411
|
-
pydantic.Field(alias="pqCompress"),
|
|
394
|
+
Optional[OutputSplunkHecCompression], pydantic.Field(alias="pqCompress")
|
|
412
395
|
] = OutputSplunkHecCompression.NONE
|
|
413
396
|
r"""Codec to use to compress the persisted data"""
|
|
414
397
|
|
|
415
398
|
pq_on_backpressure: Annotated[
|
|
416
|
-
|
|
417
|
-
Optional[OutputSplunkHecQueueFullBehavior],
|
|
418
|
-
PlainValidator(validate_open_enum(False)),
|
|
419
|
-
],
|
|
399
|
+
Optional[OutputSplunkHecQueueFullBehavior],
|
|
420
400
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
421
401
|
] = OutputSplunkHecQueueFullBehavior.BLOCK
|
|
422
402
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
423
403
|
|
|
424
404
|
pq_mode: Annotated[
|
|
425
|
-
|
|
426
|
-
Optional[OutputSplunkHecMode], PlainValidator(validate_open_enum(False))
|
|
427
|
-
],
|
|
428
|
-
pydantic.Field(alias="pqMode"),
|
|
405
|
+
Optional[OutputSplunkHecMode], pydantic.Field(alias="pqMode")
|
|
429
406
|
] = OutputSplunkHecMode.ERROR
|
|
430
407
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
431
408
|
|
|
@@ -1,35 +1,32 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputSplunkLbType(str, Enum
|
|
11
|
+
class OutputSplunkLbType(str, Enum):
|
|
15
12
|
SPLUNK_LB = "splunk_lb"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSplunkLbNestedFieldSerialization(str, Enum
|
|
15
|
+
class OutputSplunkLbNestedFieldSerialization(str, Enum):
|
|
19
16
|
r"""How to serialize nested fields into index-time fields"""
|
|
20
17
|
|
|
21
18
|
JSON = "json"
|
|
22
19
|
NONE = "none"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputSplunkLbMinimumTLSVersion(str, Enum
|
|
22
|
+
class OutputSplunkLbMinimumTLSVersion(str, Enum):
|
|
26
23
|
TL_SV1 = "TLSv1"
|
|
27
24
|
TL_SV1_1 = "TLSv1.1"
|
|
28
25
|
TL_SV1_2 = "TLSv1.2"
|
|
29
26
|
TL_SV1_3 = "TLSv1.3"
|
|
30
27
|
|
|
31
28
|
|
|
32
|
-
class OutputSplunkLbMaximumTLSVersion(str, Enum
|
|
29
|
+
class OutputSplunkLbMaximumTLSVersion(str, Enum):
|
|
33
30
|
TL_SV1 = "TLSv1"
|
|
34
31
|
TL_SV1_1 = "TLSv1.1"
|
|
35
32
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -89,30 +86,22 @@ class OutputSplunkLbTLSSettingsClientSide(BaseModel):
|
|
|
89
86
|
r"""Passphrase to use to decrypt private key"""
|
|
90
87
|
|
|
91
88
|
min_version: Annotated[
|
|
92
|
-
|
|
93
|
-
Optional[OutputSplunkLbMinimumTLSVersion],
|
|
94
|
-
PlainValidator(validate_open_enum(False)),
|
|
95
|
-
],
|
|
96
|
-
pydantic.Field(alias="minVersion"),
|
|
89
|
+
Optional[OutputSplunkLbMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
97
90
|
] = None
|
|
98
91
|
|
|
99
92
|
max_version: Annotated[
|
|
100
|
-
|
|
101
|
-
Optional[OutputSplunkLbMaximumTLSVersion],
|
|
102
|
-
PlainValidator(validate_open_enum(False)),
|
|
103
|
-
],
|
|
104
|
-
pydantic.Field(alias="maxVersion"),
|
|
93
|
+
Optional[OutputSplunkLbMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
105
94
|
] = None
|
|
106
95
|
|
|
107
96
|
|
|
108
|
-
class OutputSplunkLbMaxS2SVersion(str, Enum
|
|
97
|
+
class OutputSplunkLbMaxS2SVersion(str, Enum):
|
|
109
98
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
110
99
|
|
|
111
100
|
V3 = "v3"
|
|
112
101
|
V4 = "v4"
|
|
113
102
|
|
|
114
103
|
|
|
115
|
-
class OutputSplunkLbBackpressureBehavior(str, Enum
|
|
104
|
+
class OutputSplunkLbBackpressureBehavior(str, Enum):
|
|
116
105
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
117
106
|
|
|
118
107
|
BLOCK = "block"
|
|
@@ -120,14 +109,14 @@ class OutputSplunkLbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
|
|
|
120
109
|
QUEUE = "queue"
|
|
121
110
|
|
|
122
111
|
|
|
123
|
-
class OutputSplunkLbAuthenticationMethod(str, Enum
|
|
112
|
+
class OutputSplunkLbAuthenticationMethod(str, Enum):
|
|
124
113
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
125
114
|
|
|
126
115
|
MANUAL = "manual"
|
|
127
116
|
SECRET = "secret"
|
|
128
117
|
|
|
129
118
|
|
|
130
|
-
class OutputSplunkLbCompressCompression(str, Enum
|
|
119
|
+
class OutputSplunkLbCompressCompression(str, Enum):
|
|
131
120
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
132
121
|
|
|
133
122
|
DISABLED = "disabled"
|
|
@@ -135,9 +124,7 @@ class OutputSplunkLbCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
135
124
|
ALWAYS = "always"
|
|
136
125
|
|
|
137
126
|
|
|
138
|
-
class
|
|
139
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
140
|
-
):
|
|
127
|
+
class OutputSplunkLbAuthTokenAuthenticationMethod(str, Enum):
|
|
141
128
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
142
129
|
|
|
143
130
|
MANUAL = "manual"
|
|
@@ -145,24 +132,19 @@ class IndexerDiscoveryConfigsAuthTokenAuthenticationMethod(
|
|
|
145
132
|
|
|
146
133
|
|
|
147
134
|
class OutputSplunkLbAuthTokenTypedDict(TypedDict):
|
|
148
|
-
auth_type: NotRequired[
|
|
135
|
+
auth_type: NotRequired[OutputSplunkLbAuthTokenAuthenticationMethod]
|
|
149
136
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
150
137
|
|
|
151
138
|
|
|
152
139
|
class OutputSplunkLbAuthToken(BaseModel):
|
|
153
140
|
auth_type: Annotated[
|
|
154
|
-
|
|
155
|
-
Optional[IndexerDiscoveryConfigsAuthTokenAuthenticationMethod],
|
|
156
|
-
PlainValidator(validate_open_enum(False)),
|
|
157
|
-
],
|
|
141
|
+
Optional[OutputSplunkLbAuthTokenAuthenticationMethod],
|
|
158
142
|
pydantic.Field(alias="authType"),
|
|
159
|
-
] =
|
|
143
|
+
] = OutputSplunkLbAuthTokenAuthenticationMethod.MANUAL
|
|
160
144
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
161
145
|
|
|
162
146
|
|
|
163
|
-
class
|
|
164
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
165
|
-
):
|
|
147
|
+
class OutputSplunkLbIndexerDiscoveryConfigsAuthenticationMethod(str, Enum):
|
|
166
148
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
167
149
|
|
|
168
150
|
MANUAL = "manual"
|
|
@@ -182,7 +164,7 @@ class IndexerDiscoveryConfigsTypedDict(TypedDict):
|
|
|
182
164
|
r"""During indexer discovery, reject cluster manager certificates that are not authorized by the system's CA. Disable to allow untrusted (for example, self-signed) certificates."""
|
|
183
165
|
auth_tokens: NotRequired[List[OutputSplunkLbAuthTokenTypedDict]]
|
|
184
166
|
r"""Tokens required to authenticate to cluster manager for indexer discovery"""
|
|
185
|
-
auth_type: NotRequired[
|
|
167
|
+
auth_type: NotRequired[OutputSplunkLbIndexerDiscoveryConfigsAuthenticationMethod]
|
|
186
168
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
187
169
|
auth_token: NotRequired[str]
|
|
188
170
|
r"""Shared secret to be provided by any client (in authToken header field). If empty, unauthorized access is permitted."""
|
|
@@ -215,12 +197,9 @@ class IndexerDiscoveryConfigs(BaseModel):
|
|
|
215
197
|
r"""Tokens required to authenticate to cluster manager for indexer discovery"""
|
|
216
198
|
|
|
217
199
|
auth_type: Annotated[
|
|
218
|
-
|
|
219
|
-
Optional[IndexerDiscoveryConfigsAuthenticationMethod],
|
|
220
|
-
PlainValidator(validate_open_enum(False)),
|
|
221
|
-
],
|
|
200
|
+
Optional[OutputSplunkLbIndexerDiscoveryConfigsAuthenticationMethod],
|
|
222
201
|
pydantic.Field(alias="authType"),
|
|
223
|
-
] =
|
|
202
|
+
] = OutputSplunkLbIndexerDiscoveryConfigsAuthenticationMethod.MANUAL
|
|
224
203
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
225
204
|
|
|
226
205
|
auth_token: Annotated[Optional[str], pydantic.Field(alias="authToken")] = ""
|
|
@@ -230,7 +209,7 @@ class IndexerDiscoveryConfigs(BaseModel):
|
|
|
230
209
|
r"""Select or create a stored text secret"""
|
|
231
210
|
|
|
232
211
|
|
|
233
|
-
class OutputSplunkLbTLS(str, Enum
|
|
212
|
+
class OutputSplunkLbTLS(str, Enum):
|
|
234
213
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
235
214
|
|
|
236
215
|
INHERIT = "inherit"
|
|
@@ -257,9 +236,7 @@ class OutputSplunkLbHost(BaseModel):
|
|
|
257
236
|
port: Optional[float] = 9997
|
|
258
237
|
r"""The port to connect to on the provided host"""
|
|
259
238
|
|
|
260
|
-
tls:
|
|
261
|
-
Optional[OutputSplunkLbTLS], PlainValidator(validate_open_enum(False))
|
|
262
|
-
] = OutputSplunkLbTLS.INHERIT
|
|
239
|
+
tls: Optional[OutputSplunkLbTLS] = OutputSplunkLbTLS.INHERIT
|
|
263
240
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
264
241
|
|
|
265
242
|
servername: Optional[str] = None
|
|
@@ -269,21 +246,21 @@ class OutputSplunkLbHost(BaseModel):
|
|
|
269
246
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
270
247
|
|
|
271
248
|
|
|
272
|
-
class OutputSplunkLbPqCompressCompression(str, Enum
|
|
249
|
+
class OutputSplunkLbPqCompressCompression(str, Enum):
|
|
273
250
|
r"""Codec to use to compress the persisted data"""
|
|
274
251
|
|
|
275
252
|
NONE = "none"
|
|
276
253
|
GZIP = "gzip"
|
|
277
254
|
|
|
278
255
|
|
|
279
|
-
class OutputSplunkLbQueueFullBehavior(str, Enum
|
|
256
|
+
class OutputSplunkLbQueueFullBehavior(str, Enum):
|
|
280
257
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
281
258
|
|
|
282
259
|
BLOCK = "block"
|
|
283
260
|
DROP = "drop"
|
|
284
261
|
|
|
285
262
|
|
|
286
|
-
class OutputSplunkLbMode(str, Enum
|
|
263
|
+
class OutputSplunkLbMode(str, Enum):
|
|
287
264
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
288
265
|
|
|
289
266
|
ERROR = "error"
|
|
@@ -373,7 +350,7 @@ class OutputSplunkLbTypedDict(TypedDict):
|
|
|
373
350
|
|
|
374
351
|
|
|
375
352
|
class OutputSplunkLb(BaseModel):
|
|
376
|
-
type:
|
|
353
|
+
type: OutputSplunkLbType
|
|
377
354
|
|
|
378
355
|
hosts: List[OutputSplunkLbHost]
|
|
379
356
|
r"""Set of Splunk indexers to load-balance data to."""
|
|
@@ -411,10 +388,7 @@ class OutputSplunkLb(BaseModel):
|
|
|
411
388
|
r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
|
|
412
389
|
|
|
413
390
|
nested_fields: Annotated[
|
|
414
|
-
|
|
415
|
-
Optional[OutputSplunkLbNestedFieldSerialization],
|
|
416
|
-
PlainValidator(validate_open_enum(False)),
|
|
417
|
-
],
|
|
391
|
+
Optional[OutputSplunkLbNestedFieldSerialization],
|
|
418
392
|
pydantic.Field(alias="nestedFields"),
|
|
419
393
|
] = OutputSplunkLbNestedFieldSerialization.NONE
|
|
420
394
|
r"""How to serialize nested fields into index-time fields"""
|
|
@@ -450,19 +424,12 @@ class OutputSplunkLb(BaseModel):
|
|
|
450
424
|
r"""Use to troubleshoot issues with sending data"""
|
|
451
425
|
|
|
452
426
|
max_s2_sversion: Annotated[
|
|
453
|
-
|
|
454
|
-
Optional[OutputSplunkLbMaxS2SVersion],
|
|
455
|
-
PlainValidator(validate_open_enum(False)),
|
|
456
|
-
],
|
|
457
|
-
pydantic.Field(alias="maxS2Sversion"),
|
|
427
|
+
Optional[OutputSplunkLbMaxS2SVersion], pydantic.Field(alias="maxS2Sversion")
|
|
458
428
|
] = OutputSplunkLbMaxS2SVersion.V3
|
|
459
429
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
460
430
|
|
|
461
431
|
on_backpressure: Annotated[
|
|
462
|
-
|
|
463
|
-
Optional[OutputSplunkLbBackpressureBehavior],
|
|
464
|
-
PlainValidator(validate_open_enum(False)),
|
|
465
|
-
],
|
|
432
|
+
Optional[OutputSplunkLbBackpressureBehavior],
|
|
466
433
|
pydantic.Field(alias="onBackpressure"),
|
|
467
434
|
] = OutputSplunkLbBackpressureBehavior.BLOCK
|
|
468
435
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -478,11 +445,7 @@ class OutputSplunkLb(BaseModel):
|
|
|
478
445
|
r"""How long (in milliseconds) each LB endpoint can report blocked before the Destination reports unhealthy, blocking the sender. (Grace period for fluctuations.) Use 0 to disable; max 1 minute."""
|
|
479
446
|
|
|
480
447
|
auth_type: Annotated[
|
|
481
|
-
|
|
482
|
-
Optional[OutputSplunkLbAuthenticationMethod],
|
|
483
|
-
PlainValidator(validate_open_enum(False)),
|
|
484
|
-
],
|
|
485
|
-
pydantic.Field(alias="authType"),
|
|
448
|
+
Optional[OutputSplunkLbAuthenticationMethod], pydantic.Field(alias="authType")
|
|
486
449
|
] = OutputSplunkLbAuthenticationMethod.MANUAL
|
|
487
450
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
488
451
|
|
|
@@ -493,10 +456,9 @@ class OutputSplunkLb(BaseModel):
|
|
|
493
456
|
] = 1
|
|
494
457
|
r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
|
|
495
458
|
|
|
496
|
-
compress:
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
] = OutputSplunkLbCompressCompression.DISABLED
|
|
459
|
+
compress: Optional[OutputSplunkLbCompressCompression] = (
|
|
460
|
+
OutputSplunkLbCompressCompression.DISABLED
|
|
461
|
+
)
|
|
500
462
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
501
463
|
|
|
502
464
|
indexer_discovery_configs: Annotated[
|
|
@@ -522,29 +484,20 @@ class OutputSplunkLb(BaseModel):
|
|
|
522
484
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
523
485
|
|
|
524
486
|
pq_compress: Annotated[
|
|
525
|
-
|
|
526
|
-
Optional[OutputSplunkLbPqCompressCompression],
|
|
527
|
-
PlainValidator(validate_open_enum(False)),
|
|
528
|
-
],
|
|
487
|
+
Optional[OutputSplunkLbPqCompressCompression],
|
|
529
488
|
pydantic.Field(alias="pqCompress"),
|
|
530
489
|
] = OutputSplunkLbPqCompressCompression.NONE
|
|
531
490
|
r"""Codec to use to compress the persisted data"""
|
|
532
491
|
|
|
533
492
|
pq_on_backpressure: Annotated[
|
|
534
|
-
|
|
535
|
-
Optional[OutputSplunkLbQueueFullBehavior],
|
|
536
|
-
PlainValidator(validate_open_enum(False)),
|
|
537
|
-
],
|
|
493
|
+
Optional[OutputSplunkLbQueueFullBehavior],
|
|
538
494
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
539
495
|
] = OutputSplunkLbQueueFullBehavior.BLOCK
|
|
540
496
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
541
497
|
|
|
542
|
-
pq_mode: Annotated[
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
],
|
|
546
|
-
pydantic.Field(alias="pqMode"),
|
|
547
|
-
] = OutputSplunkLbMode.ERROR
|
|
498
|
+
pq_mode: Annotated[Optional[OutputSplunkLbMode], pydantic.Field(alias="pqMode")] = (
|
|
499
|
+
OutputSplunkLbMode.ERROR
|
|
500
|
+
)
|
|
548
501
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
549
502
|
|
|
550
503
|
pq_controls: Annotated[
|