cribl-control-plane 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/__init__.py +17 -0
- cribl_control_plane/_hooks/__init__.py +5 -0
- cribl_control_plane/_hooks/clientcredentials.py +211 -0
- cribl_control_plane/_hooks/registration.py +13 -0
- cribl_control_plane/_hooks/sdkhooks.py +81 -0
- cribl_control_plane/_hooks/types.py +112 -0
- cribl_control_plane/_version.py +15 -0
- cribl_control_plane/auth_sdk.py +184 -0
- cribl_control_plane/basesdk.py +358 -0
- cribl_control_plane/errors/__init__.py +60 -0
- cribl_control_plane/errors/apierror.py +38 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
- cribl_control_plane/errors/error.py +24 -0
- cribl_control_plane/errors/healthstatus_error.py +38 -0
- cribl_control_plane/errors/no_response_error.py +13 -0
- cribl_control_plane/errors/responsevalidationerror.py +25 -0
- cribl_control_plane/health.py +166 -0
- cribl_control_plane/httpclient.py +126 -0
- cribl_control_plane/models/__init__.py +7305 -0
- cribl_control_plane/models/addhectokenrequest.py +34 -0
- cribl_control_plane/models/authtoken.py +13 -0
- cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
- cribl_control_plane/models/createinputop.py +24 -0
- cribl_control_plane/models/createoutputop.py +24 -0
- cribl_control_plane/models/createoutputtestbyidop.py +46 -0
- cribl_control_plane/models/criblevent.py +14 -0
- cribl_control_plane/models/deleteinputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getinputbyidop.py +37 -0
- cribl_control_plane/models/getoutputbyidop.py +37 -0
- cribl_control_plane/models/getoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
- cribl_control_plane/models/healthstatus.py +36 -0
- cribl_control_plane/models/input.py +199 -0
- cribl_control_plane/models/inputappscope.py +448 -0
- cribl_control_plane/models/inputazureblob.py +308 -0
- cribl_control_plane/models/inputcollection.py +208 -0
- cribl_control_plane/models/inputconfluentcloud.py +585 -0
- cribl_control_plane/models/inputcribl.py +165 -0
- cribl_control_plane/models/inputcriblhttp.py +341 -0
- cribl_control_plane/models/inputcribllakehttp.py +342 -0
- cribl_control_plane/models/inputcriblmetrics.py +175 -0
- cribl_control_plane/models/inputcribltcp.py +299 -0
- cribl_control_plane/models/inputcrowdstrike.py +410 -0
- cribl_control_plane/models/inputdatadogagent.py +364 -0
- cribl_control_plane/models/inputdatagen.py +180 -0
- cribl_control_plane/models/inputedgeprometheus.py +551 -0
- cribl_control_plane/models/inputelastic.py +494 -0
- cribl_control_plane/models/inputeventhub.py +360 -0
- cribl_control_plane/models/inputexec.py +213 -0
- cribl_control_plane/models/inputfile.py +259 -0
- cribl_control_plane/models/inputfirehose.py +341 -0
- cribl_control_plane/models/inputgooglepubsub.py +247 -0
- cribl_control_plane/models/inputgrafana_union.py +1247 -0
- cribl_control_plane/models/inputhttp.py +403 -0
- cribl_control_plane/models/inputhttpraw.py +407 -0
- cribl_control_plane/models/inputjournalfiles.py +208 -0
- cribl_control_plane/models/inputkafka.py +581 -0
- cribl_control_plane/models/inputkinesis.py +363 -0
- cribl_control_plane/models/inputkubeevents.py +182 -0
- cribl_control_plane/models/inputkubelogs.py +256 -0
- cribl_control_plane/models/inputkubemetrics.py +233 -0
- cribl_control_plane/models/inputloki.py +468 -0
- cribl_control_plane/models/inputmetrics.py +290 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
- cribl_control_plane/models/inputmsk.py +654 -0
- cribl_control_plane/models/inputnetflow.py +224 -0
- cribl_control_plane/models/inputoffice365mgmt.py +384 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
- cribl_control_plane/models/inputoffice365service.py +377 -0
- cribl_control_plane/models/inputopentelemetry.py +516 -0
- cribl_control_plane/models/inputprometheus.py +464 -0
- cribl_control_plane/models/inputprometheusrw.py +470 -0
- cribl_control_plane/models/inputrawudp.py +207 -0
- cribl_control_plane/models/inputs3.py +416 -0
- cribl_control_plane/models/inputs3inventory.py +440 -0
- cribl_control_plane/models/inputsecuritylake.py +425 -0
- cribl_control_plane/models/inputsnmp.py +274 -0
- cribl_control_plane/models/inputsplunk.py +387 -0
- cribl_control_plane/models/inputsplunkhec.py +478 -0
- cribl_control_plane/models/inputsplunksearch.py +537 -0
- cribl_control_plane/models/inputsqs.py +320 -0
- cribl_control_plane/models/inputsyslog_union.py +759 -0
- cribl_control_plane/models/inputsystemmetrics.py +533 -0
- cribl_control_plane/models/inputsystemstate.py +417 -0
- cribl_control_plane/models/inputtcp.py +359 -0
- cribl_control_plane/models/inputtcpjson.py +334 -0
- cribl_control_plane/models/inputwef.py +498 -0
- cribl_control_plane/models/inputwindowsmetrics.py +457 -0
- cribl_control_plane/models/inputwineventlogs.py +222 -0
- cribl_control_plane/models/inputwiz.py +334 -0
- cribl_control_plane/models/inputzscalerhec.py +439 -0
- cribl_control_plane/models/listinputop.py +24 -0
- cribl_control_plane/models/listoutputop.py +24 -0
- cribl_control_plane/models/logininfo.py +16 -0
- cribl_control_plane/models/output.py +229 -0
- cribl_control_plane/models/outputazureblob.py +471 -0
- cribl_control_plane/models/outputazuredataexplorer.py +660 -0
- cribl_control_plane/models/outputazureeventhub.py +321 -0
- cribl_control_plane/models/outputazurelogs.py +386 -0
- cribl_control_plane/models/outputclickhouse.py +650 -0
- cribl_control_plane/models/outputcloudwatch.py +273 -0
- cribl_control_plane/models/outputconfluentcloud.py +591 -0
- cribl_control_plane/models/outputcriblhttp.py +494 -0
- cribl_control_plane/models/outputcribllake.py +396 -0
- cribl_control_plane/models/outputcribltcp.py +387 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
- cribl_control_plane/models/outputdatadog.py +472 -0
- cribl_control_plane/models/outputdataset.py +437 -0
- cribl_control_plane/models/outputdefault.py +55 -0
- cribl_control_plane/models/outputdevnull.py +50 -0
- cribl_control_plane/models/outputdiskspool.py +89 -0
- cribl_control_plane/models/outputdls3.py +560 -0
- cribl_control_plane/models/outputdynatracehttp.py +454 -0
- cribl_control_plane/models/outputdynatraceotlp.py +486 -0
- cribl_control_plane/models/outputelastic.py +494 -0
- cribl_control_plane/models/outputelasticcloud.py +407 -0
- cribl_control_plane/models/outputexabeam.py +297 -0
- cribl_control_plane/models/outputfilesystem.py +357 -0
- cribl_control_plane/models/outputgooglechronicle.py +486 -0
- cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
- cribl_control_plane/models/outputgooglepubsub.py +274 -0
- cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
- cribl_control_plane/models/outputgraphite.py +225 -0
- cribl_control_plane/models/outputhoneycomb.py +369 -0
- cribl_control_plane/models/outputhumiohec.py +389 -0
- cribl_control_plane/models/outputinfluxdb.py +523 -0
- cribl_control_plane/models/outputkafka.py +581 -0
- cribl_control_plane/models/outputkinesis.py +312 -0
- cribl_control_plane/models/outputloki.py +425 -0
- cribl_control_plane/models/outputminio.py +512 -0
- cribl_control_plane/models/outputmsk.py +654 -0
- cribl_control_plane/models/outputnetflow.py +80 -0
- cribl_control_plane/models/outputnewrelic.py +424 -0
- cribl_control_plane/models/outputnewrelicevents.py +401 -0
- cribl_control_plane/models/outputopentelemetry.py +669 -0
- cribl_control_plane/models/outputprometheus.py +485 -0
- cribl_control_plane/models/outputring.py +121 -0
- cribl_control_plane/models/outputrouter.py +83 -0
- cribl_control_plane/models/outputs3.py +556 -0
- cribl_control_plane/models/outputsamplesresponse.py +14 -0
- cribl_control_plane/models/outputsecuritylake.py +505 -0
- cribl_control_plane/models/outputsentinel.py +488 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
- cribl_control_plane/models/outputservicenow.py +543 -0
- cribl_control_plane/models/outputsignalfx.py +369 -0
- cribl_control_plane/models/outputsnmp.py +80 -0
- cribl_control_plane/models/outputsns.py +274 -0
- cribl_control_plane/models/outputsplunk.py +383 -0
- cribl_control_plane/models/outputsplunkhec.py +434 -0
- cribl_control_plane/models/outputsplunklb.py +558 -0
- cribl_control_plane/models/outputsqs.py +328 -0
- cribl_control_plane/models/outputstatsd.py +224 -0
- cribl_control_plane/models/outputstatsdext.py +225 -0
- cribl_control_plane/models/outputsumologic.py +378 -0
- cribl_control_plane/models/outputsyslog.py +415 -0
- cribl_control_plane/models/outputtcpjson.py +413 -0
- cribl_control_plane/models/outputtestrequest.py +15 -0
- cribl_control_plane/models/outputtestresponse.py +29 -0
- cribl_control_plane/models/outputwavefront.py +369 -0
- cribl_control_plane/models/outputwebhook.py +689 -0
- cribl_control_plane/models/outputxsiam.py +415 -0
- cribl_control_plane/models/schemeclientoauth.py +24 -0
- cribl_control_plane/models/security.py +36 -0
- cribl_control_plane/models/updatehectokenrequest.py +31 -0
- cribl_control_plane/models/updateinputbyidop.py +44 -0
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
- cribl_control_plane/models/updateoutputbyidop.py +44 -0
- cribl_control_plane/outputs.py +1615 -0
- cribl_control_plane/py.typed +1 -0
- cribl_control_plane/sdk.py +164 -0
- cribl_control_plane/sdkconfiguration.py +36 -0
- cribl_control_plane/sources.py +1355 -0
- cribl_control_plane/types/__init__.py +21 -0
- cribl_control_plane/types/basemodel.py +39 -0
- cribl_control_plane/utils/__init__.py +187 -0
- cribl_control_plane/utils/annotations.py +55 -0
- cribl_control_plane/utils/datetimes.py +23 -0
- cribl_control_plane/utils/enums.py +74 -0
- cribl_control_plane/utils/eventstreaming.py +238 -0
- cribl_control_plane/utils/forms.py +223 -0
- cribl_control_plane/utils/headers.py +136 -0
- cribl_control_plane/utils/logger.py +27 -0
- cribl_control_plane/utils/metadata.py +118 -0
- cribl_control_plane/utils/queryparams.py +205 -0
- cribl_control_plane/utils/requestbodies.py +66 -0
- cribl_control_plane/utils/retries.py +217 -0
- cribl_control_plane/utils/security.py +207 -0
- cribl_control_plane/utils/serializers.py +249 -0
- cribl_control_plane/utils/unmarshal_json_response.py +24 -0
- cribl_control_plane/utils/url.py +155 -0
- cribl_control_plane/utils/values.py +137 -0
- cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
- cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
- cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,558 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OutputSplunkLbType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
SPLUNK_LB = "splunk_lb"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutputSplunkLbNestedFieldSerialization(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
|
+
r"""How to serialize nested fields into index-time fields"""
|
|
20
|
+
|
|
21
|
+
JSON = "json"
|
|
22
|
+
NONE = "none"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class OutputSplunkLbMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
26
|
+
TL_SV1 = "TLSv1"
|
|
27
|
+
TL_SV1_1 = "TLSv1.1"
|
|
28
|
+
TL_SV1_2 = "TLSv1.2"
|
|
29
|
+
TL_SV1_3 = "TLSv1.3"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class OutputSplunkLbMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
33
|
+
TL_SV1 = "TLSv1"
|
|
34
|
+
TL_SV1_1 = "TLSv1.1"
|
|
35
|
+
TL_SV1_2 = "TLSv1.2"
|
|
36
|
+
TL_SV1_3 = "TLSv1.3"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class OutputSplunkLbTLSSettingsClientSideTypedDict(TypedDict):
|
|
40
|
+
disabled: NotRequired[bool]
|
|
41
|
+
reject_unauthorized: NotRequired[bool]
|
|
42
|
+
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another
|
|
43
|
+
trusted CA (such as the system's). Defaults to Enabled. Overrides the toggle from Advanced Settings, when also present.
|
|
44
|
+
"""
|
|
45
|
+
servername: NotRequired[str]
|
|
46
|
+
r"""Server name for the SNI (Server Name Indication) TLS extension. It must be a host name, and not an IP address."""
|
|
47
|
+
certificate_name: NotRequired[str]
|
|
48
|
+
r"""The name of the predefined certificate"""
|
|
49
|
+
ca_path: NotRequired[str]
|
|
50
|
+
r"""Path on client in which to find CA certificates to verify the server's cert. PEM format. Can reference $ENV_VARS."""
|
|
51
|
+
priv_key_path: NotRequired[str]
|
|
52
|
+
r"""Path on client in which to find the private key to use. PEM format. Can reference $ENV_VARS."""
|
|
53
|
+
cert_path: NotRequired[str]
|
|
54
|
+
r"""Path on client in which to find certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
55
|
+
passphrase: NotRequired[str]
|
|
56
|
+
r"""Passphrase to use to decrypt private key"""
|
|
57
|
+
min_version: NotRequired[OutputSplunkLbMinimumTLSVersion]
|
|
58
|
+
max_version: NotRequired[OutputSplunkLbMaximumTLSVersion]
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class OutputSplunkLbTLSSettingsClientSide(BaseModel):
|
|
62
|
+
disabled: Optional[bool] = True
|
|
63
|
+
|
|
64
|
+
reject_unauthorized: Annotated[
|
|
65
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
66
|
+
] = True
|
|
67
|
+
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another
|
|
68
|
+
trusted CA (such as the system's). Defaults to Enabled. Overrides the toggle from Advanced Settings, when also present.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
servername: Optional[str] = None
|
|
72
|
+
r"""Server name for the SNI (Server Name Indication) TLS extension. It must be a host name, and not an IP address."""
|
|
73
|
+
|
|
74
|
+
certificate_name: Annotated[
|
|
75
|
+
Optional[str], pydantic.Field(alias="certificateName")
|
|
76
|
+
] = None
|
|
77
|
+
r"""The name of the predefined certificate"""
|
|
78
|
+
|
|
79
|
+
ca_path: Annotated[Optional[str], pydantic.Field(alias="caPath")] = None
|
|
80
|
+
r"""Path on client in which to find CA certificates to verify the server's cert. PEM format. Can reference $ENV_VARS."""
|
|
81
|
+
|
|
82
|
+
priv_key_path: Annotated[Optional[str], pydantic.Field(alias="privKeyPath")] = None
|
|
83
|
+
r"""Path on client in which to find the private key to use. PEM format. Can reference $ENV_VARS."""
|
|
84
|
+
|
|
85
|
+
cert_path: Annotated[Optional[str], pydantic.Field(alias="certPath")] = None
|
|
86
|
+
r"""Path on client in which to find certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
87
|
+
|
|
88
|
+
passphrase: Optional[str] = None
|
|
89
|
+
r"""Passphrase to use to decrypt private key"""
|
|
90
|
+
|
|
91
|
+
min_version: Annotated[
|
|
92
|
+
Annotated[
|
|
93
|
+
Optional[OutputSplunkLbMinimumTLSVersion],
|
|
94
|
+
PlainValidator(validate_open_enum(False)),
|
|
95
|
+
],
|
|
96
|
+
pydantic.Field(alias="minVersion"),
|
|
97
|
+
] = None
|
|
98
|
+
|
|
99
|
+
max_version: Annotated[
|
|
100
|
+
Annotated[
|
|
101
|
+
Optional[OutputSplunkLbMaximumTLSVersion],
|
|
102
|
+
PlainValidator(validate_open_enum(False)),
|
|
103
|
+
],
|
|
104
|
+
pydantic.Field(alias="maxVersion"),
|
|
105
|
+
] = None
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class OutputSplunkLbMaxS2SVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
109
|
+
r"""The highest S2S protocol version to advertise during handshake"""
|
|
110
|
+
|
|
111
|
+
V3 = "v3"
|
|
112
|
+
V4 = "v4"
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class OutputSplunkLbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
116
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
117
|
+
|
|
118
|
+
BLOCK = "block"
|
|
119
|
+
DROP = "drop"
|
|
120
|
+
QUEUE = "queue"
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class OutputSplunkLbAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
124
|
+
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
125
|
+
|
|
126
|
+
MANUAL = "manual"
|
|
127
|
+
SECRET = "secret"
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class OutputSplunkLbCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
131
|
+
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
132
|
+
|
|
133
|
+
DISABLED = "disabled"
|
|
134
|
+
AUTO = "auto"
|
|
135
|
+
ALWAYS = "always"
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class IndexerDiscoveryConfigsAuthTokenAuthenticationMethod(
|
|
139
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
140
|
+
):
|
|
141
|
+
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
142
|
+
|
|
143
|
+
MANUAL = "manual"
|
|
144
|
+
SECRET = "secret"
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class OutputSplunkLbAuthTokenTypedDict(TypedDict):
|
|
148
|
+
auth_type: NotRequired[IndexerDiscoveryConfigsAuthTokenAuthenticationMethod]
|
|
149
|
+
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class OutputSplunkLbAuthToken(BaseModel):
|
|
153
|
+
auth_type: Annotated[
|
|
154
|
+
Annotated[
|
|
155
|
+
Optional[IndexerDiscoveryConfigsAuthTokenAuthenticationMethod],
|
|
156
|
+
PlainValidator(validate_open_enum(False)),
|
|
157
|
+
],
|
|
158
|
+
pydantic.Field(alias="authType"),
|
|
159
|
+
] = IndexerDiscoveryConfigsAuthTokenAuthenticationMethod.MANUAL
|
|
160
|
+
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class IndexerDiscoveryConfigsAuthenticationMethod(
|
|
164
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
165
|
+
):
|
|
166
|
+
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
167
|
+
|
|
168
|
+
MANUAL = "manual"
|
|
169
|
+
SECRET = "secret"
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class IndexerDiscoveryConfigsTypedDict(TypedDict):
|
|
173
|
+
r"""List of configurations to set up indexer discovery in Splunk Indexer clustering environment."""
|
|
174
|
+
|
|
175
|
+
master_uri: str
|
|
176
|
+
r"""Full URI of Splunk cluster manager (scheme://host:port). Example: https://managerAddress:8089"""
|
|
177
|
+
site: NotRequired[str]
|
|
178
|
+
r"""Clustering site of the indexers from where indexers need to be discovered. In case of single site cluster, it defaults to 'default' site."""
|
|
179
|
+
refresh_interval_sec: NotRequired[float]
|
|
180
|
+
r"""Time interval, in seconds, between two consecutive indexer list fetches from cluster manager"""
|
|
181
|
+
reject_unauthorized: NotRequired[bool]
|
|
182
|
+
r"""During indexer discovery, reject cluster manager certificates that are not authorized by the system's CA. Disable to allow untrusted (for example, self-signed) certificates."""
|
|
183
|
+
auth_tokens: NotRequired[List[OutputSplunkLbAuthTokenTypedDict]]
|
|
184
|
+
r"""Tokens required to authenticate to cluster manager for indexer discovery"""
|
|
185
|
+
auth_type: NotRequired[IndexerDiscoveryConfigsAuthenticationMethod]
|
|
186
|
+
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
187
|
+
auth_token: NotRequired[str]
|
|
188
|
+
r"""Shared secret to be provided by any client (in authToken header field). If empty, unauthorized access is permitted."""
|
|
189
|
+
text_secret: NotRequired[str]
|
|
190
|
+
r"""Select or create a stored text secret"""
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
class IndexerDiscoveryConfigs(BaseModel):
|
|
194
|
+
r"""List of configurations to set up indexer discovery in Splunk Indexer clustering environment."""
|
|
195
|
+
|
|
196
|
+
master_uri: Annotated[str, pydantic.Field(alias="masterUri")]
|
|
197
|
+
r"""Full URI of Splunk cluster manager (scheme://host:port). Example: https://managerAddress:8089"""
|
|
198
|
+
|
|
199
|
+
site: Optional[str] = "default"
|
|
200
|
+
r"""Clustering site of the indexers from where indexers need to be discovered. In case of single site cluster, it defaults to 'default' site."""
|
|
201
|
+
|
|
202
|
+
refresh_interval_sec: Annotated[
|
|
203
|
+
Optional[float], pydantic.Field(alias="refreshIntervalSec")
|
|
204
|
+
] = 300
|
|
205
|
+
r"""Time interval, in seconds, between two consecutive indexer list fetches from cluster manager"""
|
|
206
|
+
|
|
207
|
+
reject_unauthorized: Annotated[
|
|
208
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
209
|
+
] = False
|
|
210
|
+
r"""During indexer discovery, reject cluster manager certificates that are not authorized by the system's CA. Disable to allow untrusted (for example, self-signed) certificates."""
|
|
211
|
+
|
|
212
|
+
auth_tokens: Annotated[
|
|
213
|
+
Optional[List[OutputSplunkLbAuthToken]], pydantic.Field(alias="authTokens")
|
|
214
|
+
] = None
|
|
215
|
+
r"""Tokens required to authenticate to cluster manager for indexer discovery"""
|
|
216
|
+
|
|
217
|
+
auth_type: Annotated[
|
|
218
|
+
Annotated[
|
|
219
|
+
Optional[IndexerDiscoveryConfigsAuthenticationMethod],
|
|
220
|
+
PlainValidator(validate_open_enum(False)),
|
|
221
|
+
],
|
|
222
|
+
pydantic.Field(alias="authType"),
|
|
223
|
+
] = IndexerDiscoveryConfigsAuthenticationMethod.MANUAL
|
|
224
|
+
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
225
|
+
|
|
226
|
+
auth_token: Annotated[Optional[str], pydantic.Field(alias="authToken")] = ""
|
|
227
|
+
r"""Shared secret to be provided by any client (in authToken header field). If empty, unauthorized access is permitted."""
|
|
228
|
+
|
|
229
|
+
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
230
|
+
r"""Select or create a stored text secret"""
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
class OutputSplunkLbTLS(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
234
|
+
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
235
|
+
|
|
236
|
+
INHERIT = "inherit"
|
|
237
|
+
OFF = "off"
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
class OutputSplunkLbHostTypedDict(TypedDict):
|
|
241
|
+
host: str
|
|
242
|
+
r"""The hostname of the receiver"""
|
|
243
|
+
port: NotRequired[float]
|
|
244
|
+
r"""The port to connect to on the provided host"""
|
|
245
|
+
tls: NotRequired[OutputSplunkLbTLS]
|
|
246
|
+
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
247
|
+
servername: NotRequired[str]
|
|
248
|
+
r"""Servername to use if establishing a TLS connection. If not specified, defaults to connection host (if not an IP); otherwise, uses the global TLS settings."""
|
|
249
|
+
weight: NotRequired[float]
|
|
250
|
+
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
class OutputSplunkLbHost(BaseModel):
|
|
254
|
+
host: str
|
|
255
|
+
r"""The hostname of the receiver"""
|
|
256
|
+
|
|
257
|
+
port: Optional[float] = 9997
|
|
258
|
+
r"""The port to connect to on the provided host"""
|
|
259
|
+
|
|
260
|
+
tls: Annotated[
|
|
261
|
+
Optional[OutputSplunkLbTLS], PlainValidator(validate_open_enum(False))
|
|
262
|
+
] = OutputSplunkLbTLS.INHERIT
|
|
263
|
+
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
264
|
+
|
|
265
|
+
servername: Optional[str] = None
|
|
266
|
+
r"""Servername to use if establishing a TLS connection. If not specified, defaults to connection host (if not an IP); otherwise, uses the global TLS settings."""
|
|
267
|
+
|
|
268
|
+
weight: Optional[float] = 1
|
|
269
|
+
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
class OutputSplunkLbPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
273
|
+
r"""Codec to use to compress the persisted data"""
|
|
274
|
+
|
|
275
|
+
NONE = "none"
|
|
276
|
+
GZIP = "gzip"
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
class OutputSplunkLbQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
280
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
281
|
+
|
|
282
|
+
BLOCK = "block"
|
|
283
|
+
DROP = "drop"
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
class OutputSplunkLbMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
287
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
288
|
+
|
|
289
|
+
ERROR = "error"
|
|
290
|
+
BACKPRESSURE = "backpressure"
|
|
291
|
+
ALWAYS = "always"
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
class OutputSplunkLbPqControlsTypedDict(TypedDict):
|
|
295
|
+
pass
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
class OutputSplunkLbPqControls(BaseModel):
|
|
299
|
+
pass
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
class OutputSplunkLbTypedDict(TypedDict):
|
|
303
|
+
type: OutputSplunkLbType
|
|
304
|
+
hosts: List[OutputSplunkLbHostTypedDict]
|
|
305
|
+
r"""Set of Splunk indexers to load-balance data to."""
|
|
306
|
+
id: NotRequired[str]
|
|
307
|
+
r"""Unique ID for this output"""
|
|
308
|
+
pipeline: NotRequired[str]
|
|
309
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
310
|
+
system_fields: NotRequired[List[str]]
|
|
311
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
312
|
+
environment: NotRequired[str]
|
|
313
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
314
|
+
streamtags: NotRequired[List[str]]
|
|
315
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
316
|
+
dns_resolve_period_sec: NotRequired[float]
|
|
317
|
+
r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
|
|
318
|
+
load_balance_stats_period_sec: NotRequired[float]
|
|
319
|
+
r"""How far back in time to keep traffic stats for load balancing purposes"""
|
|
320
|
+
max_concurrent_senders: NotRequired[float]
|
|
321
|
+
r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
|
|
322
|
+
nested_fields: NotRequired[OutputSplunkLbNestedFieldSerialization]
|
|
323
|
+
r"""How to serialize nested fields into index-time fields"""
|
|
324
|
+
throttle_rate_per_sec: NotRequired[str]
|
|
325
|
+
r"""Rate (in bytes per second) to throttle while writing to an output. Accepts values with multiple-byte units, such as KB, MB, and GB. (Example: 42 MB) Default value of 0 specifies no throttling."""
|
|
326
|
+
connection_timeout: NotRequired[float]
|
|
327
|
+
r"""Amount of time (milliseconds) to wait for the connection to establish before retrying"""
|
|
328
|
+
write_timeout: NotRequired[float]
|
|
329
|
+
r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
|
|
330
|
+
tls: NotRequired[OutputSplunkLbTLSSettingsClientSideTypedDict]
|
|
331
|
+
enable_multi_metrics: NotRequired[bool]
|
|
332
|
+
r"""Output metrics in multiple-metric format in a single event. Supported in Splunk 8.0 and above."""
|
|
333
|
+
enable_ack: NotRequired[bool]
|
|
334
|
+
r"""Check if indexer is shutting down and stop sending data. This helps minimize data loss during shutdown."""
|
|
335
|
+
log_failed_requests: NotRequired[bool]
|
|
336
|
+
r"""Use to troubleshoot issues with sending data"""
|
|
337
|
+
max_s2_sversion: NotRequired[OutputSplunkLbMaxS2SVersion]
|
|
338
|
+
r"""The highest S2S protocol version to advertise during handshake"""
|
|
339
|
+
on_backpressure: NotRequired[OutputSplunkLbBackpressureBehavior]
|
|
340
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
341
|
+
indexer_discovery: NotRequired[bool]
|
|
342
|
+
r"""Automatically discover indexers in indexer clustering environment."""
|
|
343
|
+
sender_unhealthy_time_allowance: NotRequired[float]
|
|
344
|
+
r"""How long (in milliseconds) each LB endpoint can report blocked before the Destination reports unhealthy, blocking the sender. (Grace period for fluctuations.) Use 0 to disable; max 1 minute."""
|
|
345
|
+
auth_type: NotRequired[OutputSplunkLbAuthenticationMethod]
|
|
346
|
+
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
347
|
+
description: NotRequired[str]
|
|
348
|
+
max_failed_health_checks: NotRequired[float]
|
|
349
|
+
r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
|
|
350
|
+
compress: NotRequired[OutputSplunkLbCompressCompression]
|
|
351
|
+
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
352
|
+
indexer_discovery_configs: NotRequired[IndexerDiscoveryConfigsTypedDict]
|
|
353
|
+
r"""List of configurations to set up indexer discovery in Splunk Indexer clustering environment."""
|
|
354
|
+
exclude_self: NotRequired[bool]
|
|
355
|
+
r"""Exclude all IPs of the current host from the list of any resolved hostnames"""
|
|
356
|
+
pq_max_file_size: NotRequired[str]
|
|
357
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
358
|
+
pq_max_size: NotRequired[str]
|
|
359
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
360
|
+
pq_path: NotRequired[str]
|
|
361
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
362
|
+
pq_compress: NotRequired[OutputSplunkLbPqCompressCompression]
|
|
363
|
+
r"""Codec to use to compress the persisted data"""
|
|
364
|
+
pq_on_backpressure: NotRequired[OutputSplunkLbQueueFullBehavior]
|
|
365
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
366
|
+
pq_mode: NotRequired[OutputSplunkLbMode]
|
|
367
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
368
|
+
pq_controls: NotRequired[OutputSplunkLbPqControlsTypedDict]
|
|
369
|
+
auth_token: NotRequired[str]
|
|
370
|
+
r"""Shared secret token to use when establishing a connection to a Splunk indexer."""
|
|
371
|
+
text_secret: NotRequired[str]
|
|
372
|
+
r"""Select or create a stored text secret"""
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
class OutputSplunkLb(BaseModel):
|
|
376
|
+
type: Annotated[OutputSplunkLbType, PlainValidator(validate_open_enum(False))]
|
|
377
|
+
|
|
378
|
+
hosts: List[OutputSplunkLbHost]
|
|
379
|
+
r"""Set of Splunk indexers to load-balance data to."""
|
|
380
|
+
|
|
381
|
+
id: Optional[str] = None
|
|
382
|
+
r"""Unique ID for this output"""
|
|
383
|
+
|
|
384
|
+
pipeline: Optional[str] = None
|
|
385
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
386
|
+
|
|
387
|
+
system_fields: Annotated[
|
|
388
|
+
Optional[List[str]], pydantic.Field(alias="systemFields")
|
|
389
|
+
] = None
|
|
390
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
391
|
+
|
|
392
|
+
environment: Optional[str] = None
|
|
393
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
394
|
+
|
|
395
|
+
streamtags: Optional[List[str]] = None
|
|
396
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
397
|
+
|
|
398
|
+
dns_resolve_period_sec: Annotated[
|
|
399
|
+
Optional[float], pydantic.Field(alias="dnsResolvePeriodSec")
|
|
400
|
+
] = 600
|
|
401
|
+
r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
|
|
402
|
+
|
|
403
|
+
load_balance_stats_period_sec: Annotated[
|
|
404
|
+
Optional[float], pydantic.Field(alias="loadBalanceStatsPeriodSec")
|
|
405
|
+
] = 300
|
|
406
|
+
r"""How far back in time to keep traffic stats for load balancing purposes"""
|
|
407
|
+
|
|
408
|
+
max_concurrent_senders: Annotated[
|
|
409
|
+
Optional[float], pydantic.Field(alias="maxConcurrentSenders")
|
|
410
|
+
] = 0
|
|
411
|
+
r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
|
|
412
|
+
|
|
413
|
+
nested_fields: Annotated[
|
|
414
|
+
Annotated[
|
|
415
|
+
Optional[OutputSplunkLbNestedFieldSerialization],
|
|
416
|
+
PlainValidator(validate_open_enum(False)),
|
|
417
|
+
],
|
|
418
|
+
pydantic.Field(alias="nestedFields"),
|
|
419
|
+
] = OutputSplunkLbNestedFieldSerialization.NONE
|
|
420
|
+
r"""How to serialize nested fields into index-time fields"""
|
|
421
|
+
|
|
422
|
+
throttle_rate_per_sec: Annotated[
|
|
423
|
+
Optional[str], pydantic.Field(alias="throttleRatePerSec")
|
|
424
|
+
] = "0"
|
|
425
|
+
r"""Rate (in bytes per second) to throttle while writing to an output. Accepts values with multiple-byte units, such as KB, MB, and GB. (Example: 42 MB) Default value of 0 specifies no throttling."""
|
|
426
|
+
|
|
427
|
+
connection_timeout: Annotated[
|
|
428
|
+
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
429
|
+
] = 10000
|
|
430
|
+
r"""Amount of time (milliseconds) to wait for the connection to establish before retrying"""
|
|
431
|
+
|
|
432
|
+
write_timeout: Annotated[Optional[float], pydantic.Field(alias="writeTimeout")] = (
|
|
433
|
+
60000
|
|
434
|
+
)
|
|
435
|
+
r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
|
|
436
|
+
|
|
437
|
+
tls: Optional[OutputSplunkLbTLSSettingsClientSide] = None
|
|
438
|
+
|
|
439
|
+
enable_multi_metrics: Annotated[
|
|
440
|
+
Optional[bool], pydantic.Field(alias="enableMultiMetrics")
|
|
441
|
+
] = False
|
|
442
|
+
r"""Output metrics in multiple-metric format in a single event. Supported in Splunk 8.0 and above."""
|
|
443
|
+
|
|
444
|
+
enable_ack: Annotated[Optional[bool], pydantic.Field(alias="enableACK")] = True
|
|
445
|
+
r"""Check if indexer is shutting down and stop sending data. This helps minimize data loss during shutdown."""
|
|
446
|
+
|
|
447
|
+
log_failed_requests: Annotated[
|
|
448
|
+
Optional[bool], pydantic.Field(alias="logFailedRequests")
|
|
449
|
+
] = False
|
|
450
|
+
r"""Use to troubleshoot issues with sending data"""
|
|
451
|
+
|
|
452
|
+
max_s2_sversion: Annotated[
|
|
453
|
+
Annotated[
|
|
454
|
+
Optional[OutputSplunkLbMaxS2SVersion],
|
|
455
|
+
PlainValidator(validate_open_enum(False)),
|
|
456
|
+
],
|
|
457
|
+
pydantic.Field(alias="maxS2Sversion"),
|
|
458
|
+
] = OutputSplunkLbMaxS2SVersion.V3
|
|
459
|
+
r"""The highest S2S protocol version to advertise during handshake"""
|
|
460
|
+
|
|
461
|
+
on_backpressure: Annotated[
|
|
462
|
+
Annotated[
|
|
463
|
+
Optional[OutputSplunkLbBackpressureBehavior],
|
|
464
|
+
PlainValidator(validate_open_enum(False)),
|
|
465
|
+
],
|
|
466
|
+
pydantic.Field(alias="onBackpressure"),
|
|
467
|
+
] = OutputSplunkLbBackpressureBehavior.BLOCK
|
|
468
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
469
|
+
|
|
470
|
+
indexer_discovery: Annotated[
|
|
471
|
+
Optional[bool], pydantic.Field(alias="indexerDiscovery")
|
|
472
|
+
] = False
|
|
473
|
+
r"""Automatically discover indexers in indexer clustering environment."""
|
|
474
|
+
|
|
475
|
+
sender_unhealthy_time_allowance: Annotated[
|
|
476
|
+
Optional[float], pydantic.Field(alias="senderUnhealthyTimeAllowance")
|
|
477
|
+
] = 100
|
|
478
|
+
r"""How long (in milliseconds) each LB endpoint can report blocked before the Destination reports unhealthy, blocking the sender. (Grace period for fluctuations.) Use 0 to disable; max 1 minute."""
|
|
479
|
+
|
|
480
|
+
auth_type: Annotated[
|
|
481
|
+
Annotated[
|
|
482
|
+
Optional[OutputSplunkLbAuthenticationMethod],
|
|
483
|
+
PlainValidator(validate_open_enum(False)),
|
|
484
|
+
],
|
|
485
|
+
pydantic.Field(alias="authType"),
|
|
486
|
+
] = OutputSplunkLbAuthenticationMethod.MANUAL
|
|
487
|
+
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
488
|
+
|
|
489
|
+
description: Optional[str] = None
|
|
490
|
+
|
|
491
|
+
max_failed_health_checks: Annotated[
|
|
492
|
+
Optional[float], pydantic.Field(alias="maxFailedHealthChecks")
|
|
493
|
+
] = 1
|
|
494
|
+
r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
|
|
495
|
+
|
|
496
|
+
compress: Annotated[
|
|
497
|
+
Optional[OutputSplunkLbCompressCompression],
|
|
498
|
+
PlainValidator(validate_open_enum(False)),
|
|
499
|
+
] = OutputSplunkLbCompressCompression.DISABLED
|
|
500
|
+
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
501
|
+
|
|
502
|
+
indexer_discovery_configs: Annotated[
|
|
503
|
+
Optional[IndexerDiscoveryConfigs],
|
|
504
|
+
pydantic.Field(alias="indexerDiscoveryConfigs"),
|
|
505
|
+
] = None
|
|
506
|
+
r"""List of configurations to set up indexer discovery in Splunk Indexer clustering environment."""
|
|
507
|
+
|
|
508
|
+
exclude_self: Annotated[Optional[bool], pydantic.Field(alias="excludeSelf")] = False
|
|
509
|
+
r"""Exclude all IPs of the current host from the list of any resolved hostnames"""
|
|
510
|
+
|
|
511
|
+
pq_max_file_size: Annotated[
|
|
512
|
+
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
513
|
+
] = "1 MB"
|
|
514
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
515
|
+
|
|
516
|
+
pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
|
|
517
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
518
|
+
|
|
519
|
+
pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
|
|
520
|
+
"$CRIBL_HOME/state/queues"
|
|
521
|
+
)
|
|
522
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
523
|
+
|
|
524
|
+
pq_compress: Annotated[
|
|
525
|
+
Annotated[
|
|
526
|
+
Optional[OutputSplunkLbPqCompressCompression],
|
|
527
|
+
PlainValidator(validate_open_enum(False)),
|
|
528
|
+
],
|
|
529
|
+
pydantic.Field(alias="pqCompress"),
|
|
530
|
+
] = OutputSplunkLbPqCompressCompression.NONE
|
|
531
|
+
r"""Codec to use to compress the persisted data"""
|
|
532
|
+
|
|
533
|
+
pq_on_backpressure: Annotated[
|
|
534
|
+
Annotated[
|
|
535
|
+
Optional[OutputSplunkLbQueueFullBehavior],
|
|
536
|
+
PlainValidator(validate_open_enum(False)),
|
|
537
|
+
],
|
|
538
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
539
|
+
] = OutputSplunkLbQueueFullBehavior.BLOCK
|
|
540
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
541
|
+
|
|
542
|
+
pq_mode: Annotated[
|
|
543
|
+
Annotated[
|
|
544
|
+
Optional[OutputSplunkLbMode], PlainValidator(validate_open_enum(False))
|
|
545
|
+
],
|
|
546
|
+
pydantic.Field(alias="pqMode"),
|
|
547
|
+
] = OutputSplunkLbMode.ERROR
|
|
548
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
549
|
+
|
|
550
|
+
pq_controls: Annotated[
|
|
551
|
+
Optional[OutputSplunkLbPqControls], pydantic.Field(alias="pqControls")
|
|
552
|
+
] = None
|
|
553
|
+
|
|
554
|
+
auth_token: Annotated[Optional[str], pydantic.Field(alias="authToken")] = ""
|
|
555
|
+
r"""Shared secret token to use when establishing a connection to a Splunk indexer."""
|
|
556
|
+
|
|
557
|
+
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
558
|
+
r"""Select or create a stored text secret"""
|