cribl-control-plane 0.0.44__py3-none-any.whl → 0.0.44a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/models/__init__.py +3 -3
- cribl_control_plane/models/appmode.py +2 -1
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +7 -2
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/hbcriblinfo.py +6 -1
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +47 -20
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +46 -16
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +27 -10
- cribl_control_plane/models/inputmsk.py +53 -18
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +74 -29
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +36 -13
- cribl_control_plane/models/outputclickhouse.py +56 -21
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +77 -32
- cribl_control_plane/models/outputcriblhttp.py +46 -18
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -15
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +54 -21
- cribl_control_plane/models/outputdynatraceotlp.py +56 -23
- cribl_control_plane/models/outputelastic.py +44 -19
- cribl_control_plane/models/outputelasticcloud.py +37 -13
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +99 -34
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +36 -13
- cribl_control_plane/models/outputhumiohec.py +44 -17
- cribl_control_plane/models/outputinfluxdb.py +43 -17
- cribl_control_plane/models/outputkafka.py +74 -28
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +82 -30
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +42 -15
- cribl_control_plane/models/outputopentelemetry.py +68 -27
- cribl_control_plane/models/outputprometheus.py +36 -13
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +51 -19
- cribl_control_plane/models/outputservicenow.py +61 -25
- cribl_control_plane/models/outputsignalfx.py +38 -15
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +36 -13
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +36 -13
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +36 -15
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.44.dist-info → cribl_control_plane-0.0.44a2.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.0.44.dist-info → cribl_control_plane-0.0.44a2.dist-info}/RECORD +158 -158
- {cribl_control_plane-0.0.44.dist-info → cribl_control_plane-0.0.44a2.dist-info}/WHEEL +0 -0
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputSnsType(str, Enum):
|
|
|
12
15
|
SNS = "sns"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputSnsAuthenticationMethod(str, Enum):
|
|
18
|
+
class OutputSnsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
17
20
|
|
|
18
21
|
AUTO = "auto"
|
|
@@ -20,14 +23,14 @@ class OutputSnsAuthenticationMethod(str, Enum):
|
|
|
20
23
|
SECRET = "secret"
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class OutputSnsSignatureVersion(str, Enum):
|
|
26
|
+
class OutputSnsSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
r"""Signature version to use for signing SNS requests"""
|
|
25
28
|
|
|
26
29
|
V2 = "v2"
|
|
27
30
|
V4 = "v4"
|
|
28
31
|
|
|
29
32
|
|
|
30
|
-
class OutputSnsBackpressureBehavior(str, Enum):
|
|
33
|
+
class OutputSnsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
31
34
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
32
35
|
|
|
33
36
|
BLOCK = "block"
|
|
@@ -35,21 +38,21 @@ class OutputSnsBackpressureBehavior(str, Enum):
|
|
|
35
38
|
QUEUE = "queue"
|
|
36
39
|
|
|
37
40
|
|
|
38
|
-
class OutputSnsCompression(str, Enum):
|
|
41
|
+
class OutputSnsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
39
42
|
r"""Codec to use to compress the persisted data"""
|
|
40
43
|
|
|
41
44
|
NONE = "none"
|
|
42
45
|
GZIP = "gzip"
|
|
43
46
|
|
|
44
47
|
|
|
45
|
-
class OutputSnsQueueFullBehavior(str, Enum):
|
|
48
|
+
class OutputSnsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
46
49
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
47
50
|
|
|
48
51
|
BLOCK = "block"
|
|
49
52
|
DROP = "drop"
|
|
50
53
|
|
|
51
54
|
|
|
52
|
-
class OutputSnsMode(str, Enum):
|
|
55
|
+
class OutputSnsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
53
56
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
54
57
|
|
|
55
58
|
ERROR = "error"
|
|
@@ -155,7 +158,10 @@ class OutputSns(BaseModel):
|
|
|
155
158
|
r"""Maximum number of retries before the output returns an error. Note that not all errors are retryable. The retries use an exponential backoff policy."""
|
|
156
159
|
|
|
157
160
|
aws_authentication_method: Annotated[
|
|
158
|
-
|
|
161
|
+
Annotated[
|
|
162
|
+
Optional[OutputSnsAuthenticationMethod],
|
|
163
|
+
PlainValidator(validate_open_enum(False)),
|
|
164
|
+
],
|
|
159
165
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
160
166
|
] = OutputSnsAuthenticationMethod.AUTO
|
|
161
167
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -171,7 +177,11 @@ class OutputSns(BaseModel):
|
|
|
171
177
|
r"""SNS service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to SNS-compatible endpoint."""
|
|
172
178
|
|
|
173
179
|
signature_version: Annotated[
|
|
174
|
-
|
|
180
|
+
Annotated[
|
|
181
|
+
Optional[OutputSnsSignatureVersion],
|
|
182
|
+
PlainValidator(validate_open_enum(False)),
|
|
183
|
+
],
|
|
184
|
+
pydantic.Field(alias="signatureVersion"),
|
|
175
185
|
] = OutputSnsSignatureVersion.V4
|
|
176
186
|
r"""Signature version to use for signing SNS requests"""
|
|
177
187
|
|
|
@@ -206,7 +216,11 @@ class OutputSns(BaseModel):
|
|
|
206
216
|
r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
|
|
207
217
|
|
|
208
218
|
on_backpressure: Annotated[
|
|
209
|
-
|
|
219
|
+
Annotated[
|
|
220
|
+
Optional[OutputSnsBackpressureBehavior],
|
|
221
|
+
PlainValidator(validate_open_enum(False)),
|
|
222
|
+
],
|
|
223
|
+
pydantic.Field(alias="onBackpressure"),
|
|
210
224
|
] = OutputSnsBackpressureBehavior.BLOCK
|
|
211
225
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
212
226
|
|
|
@@ -231,18 +245,26 @@ class OutputSns(BaseModel):
|
|
|
231
245
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
232
246
|
|
|
233
247
|
pq_compress: Annotated[
|
|
234
|
-
|
|
248
|
+
Annotated[
|
|
249
|
+
Optional[OutputSnsCompression], PlainValidator(validate_open_enum(False))
|
|
250
|
+
],
|
|
251
|
+
pydantic.Field(alias="pqCompress"),
|
|
235
252
|
] = OutputSnsCompression.NONE
|
|
236
253
|
r"""Codec to use to compress the persisted data"""
|
|
237
254
|
|
|
238
255
|
pq_on_backpressure: Annotated[
|
|
239
|
-
|
|
256
|
+
Annotated[
|
|
257
|
+
Optional[OutputSnsQueueFullBehavior],
|
|
258
|
+
PlainValidator(validate_open_enum(False)),
|
|
259
|
+
],
|
|
260
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
240
261
|
] = OutputSnsQueueFullBehavior.BLOCK
|
|
241
262
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
242
263
|
|
|
243
|
-
pq_mode: Annotated[
|
|
244
|
-
OutputSnsMode
|
|
245
|
-
|
|
264
|
+
pq_mode: Annotated[
|
|
265
|
+
Annotated[Optional[OutputSnsMode], PlainValidator(validate_open_enum(False))],
|
|
266
|
+
pydantic.Field(alias="pqMode"),
|
|
267
|
+
] = OutputSnsMode.ERROR
|
|
246
268
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
247
269
|
|
|
248
270
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,21 +15,21 @@ class OutputSplunkType(str, Enum):
|
|
|
12
15
|
SPLUNK = "splunk"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputSplunkNestedFieldSerialization(str, Enum):
|
|
18
|
+
class OutputSplunkNestedFieldSerialization(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""How to serialize nested fields into index-time fields"""
|
|
17
20
|
|
|
18
21
|
JSON = "json"
|
|
19
22
|
NONE = "none"
|
|
20
23
|
|
|
21
24
|
|
|
22
|
-
class OutputSplunkMinimumTLSVersion(str, Enum):
|
|
25
|
+
class OutputSplunkMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
23
26
|
TL_SV1 = "TLSv1"
|
|
24
27
|
TL_SV1_1 = "TLSv1.1"
|
|
25
28
|
TL_SV1_2 = "TLSv1.2"
|
|
26
29
|
TL_SV1_3 = "TLSv1.3"
|
|
27
30
|
|
|
28
31
|
|
|
29
|
-
class OutputSplunkMaximumTLSVersion(str, Enum):
|
|
32
|
+
class OutputSplunkMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
33
|
TL_SV1 = "TLSv1"
|
|
31
34
|
TL_SV1_1 = "TLSv1.1"
|
|
32
35
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -86,22 +89,30 @@ class OutputSplunkTLSSettingsClientSide(BaseModel):
|
|
|
86
89
|
r"""Passphrase to use to decrypt private key"""
|
|
87
90
|
|
|
88
91
|
min_version: Annotated[
|
|
89
|
-
|
|
92
|
+
Annotated[
|
|
93
|
+
Optional[OutputSplunkMinimumTLSVersion],
|
|
94
|
+
PlainValidator(validate_open_enum(False)),
|
|
95
|
+
],
|
|
96
|
+
pydantic.Field(alias="minVersion"),
|
|
90
97
|
] = None
|
|
91
98
|
|
|
92
99
|
max_version: Annotated[
|
|
93
|
-
|
|
100
|
+
Annotated[
|
|
101
|
+
Optional[OutputSplunkMaximumTLSVersion],
|
|
102
|
+
PlainValidator(validate_open_enum(False)),
|
|
103
|
+
],
|
|
104
|
+
pydantic.Field(alias="maxVersion"),
|
|
94
105
|
] = None
|
|
95
106
|
|
|
96
107
|
|
|
97
|
-
class OutputSplunkMaxS2SVersion(str, Enum):
|
|
108
|
+
class OutputSplunkMaxS2SVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
98
109
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
99
110
|
|
|
100
111
|
V3 = "v3"
|
|
101
112
|
V4 = "v4"
|
|
102
113
|
|
|
103
114
|
|
|
104
|
-
class OutputSplunkBackpressureBehavior(str, Enum):
|
|
115
|
+
class OutputSplunkBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
105
116
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
106
117
|
|
|
107
118
|
BLOCK = "block"
|
|
@@ -109,14 +120,14 @@ class OutputSplunkBackpressureBehavior(str, Enum):
|
|
|
109
120
|
QUEUE = "queue"
|
|
110
121
|
|
|
111
122
|
|
|
112
|
-
class OutputSplunkAuthenticationMethod(str, Enum):
|
|
123
|
+
class OutputSplunkAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
113
124
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
114
125
|
|
|
115
126
|
MANUAL = "manual"
|
|
116
127
|
SECRET = "secret"
|
|
117
128
|
|
|
118
129
|
|
|
119
|
-
class OutputSplunkCompressCompression(str, Enum):
|
|
130
|
+
class OutputSplunkCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
120
131
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
121
132
|
|
|
122
133
|
DISABLED = "disabled"
|
|
@@ -124,21 +135,21 @@ class OutputSplunkCompressCompression(str, Enum):
|
|
|
124
135
|
ALWAYS = "always"
|
|
125
136
|
|
|
126
137
|
|
|
127
|
-
class OutputSplunkPqCompressCompression(str, Enum):
|
|
138
|
+
class OutputSplunkPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
128
139
|
r"""Codec to use to compress the persisted data"""
|
|
129
140
|
|
|
130
141
|
NONE = "none"
|
|
131
142
|
GZIP = "gzip"
|
|
132
143
|
|
|
133
144
|
|
|
134
|
-
class OutputSplunkQueueFullBehavior(str, Enum):
|
|
145
|
+
class OutputSplunkQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
135
146
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
136
147
|
|
|
137
148
|
BLOCK = "block"
|
|
138
149
|
DROP = "drop"
|
|
139
150
|
|
|
140
151
|
|
|
141
|
-
class OutputSplunkMode(str, Enum):
|
|
152
|
+
class OutputSplunkMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
142
153
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
143
154
|
|
|
144
155
|
ERROR = "error"
|
|
@@ -242,7 +253,10 @@ class OutputSplunk(BaseModel):
|
|
|
242
253
|
r"""The port to connect to on the provided host"""
|
|
243
254
|
|
|
244
255
|
nested_fields: Annotated[
|
|
245
|
-
|
|
256
|
+
Annotated[
|
|
257
|
+
Optional[OutputSplunkNestedFieldSerialization],
|
|
258
|
+
PlainValidator(validate_open_enum(False)),
|
|
259
|
+
],
|
|
246
260
|
pydantic.Field(alias="nestedFields"),
|
|
247
261
|
] = OutputSplunkNestedFieldSerialization.NONE
|
|
248
262
|
r"""How to serialize nested fields into index-time fields"""
|
|
@@ -278,18 +292,29 @@ class OutputSplunk(BaseModel):
|
|
|
278
292
|
r"""Use to troubleshoot issues with sending data"""
|
|
279
293
|
|
|
280
294
|
max_s2_sversion: Annotated[
|
|
281
|
-
|
|
295
|
+
Annotated[
|
|
296
|
+
Optional[OutputSplunkMaxS2SVersion],
|
|
297
|
+
PlainValidator(validate_open_enum(False)),
|
|
298
|
+
],
|
|
299
|
+
pydantic.Field(alias="maxS2Sversion"),
|
|
282
300
|
] = OutputSplunkMaxS2SVersion.V3
|
|
283
301
|
r"""The highest S2S protocol version to advertise during handshake"""
|
|
284
302
|
|
|
285
303
|
on_backpressure: Annotated[
|
|
286
|
-
|
|
304
|
+
Annotated[
|
|
305
|
+
Optional[OutputSplunkBackpressureBehavior],
|
|
306
|
+
PlainValidator(validate_open_enum(False)),
|
|
307
|
+
],
|
|
287
308
|
pydantic.Field(alias="onBackpressure"),
|
|
288
309
|
] = OutputSplunkBackpressureBehavior.BLOCK
|
|
289
310
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
290
311
|
|
|
291
312
|
auth_type: Annotated[
|
|
292
|
-
|
|
313
|
+
Annotated[
|
|
314
|
+
Optional[OutputSplunkAuthenticationMethod],
|
|
315
|
+
PlainValidator(validate_open_enum(False)),
|
|
316
|
+
],
|
|
317
|
+
pydantic.Field(alias="authType"),
|
|
293
318
|
] = OutputSplunkAuthenticationMethod.MANUAL
|
|
294
319
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
295
320
|
|
|
@@ -300,9 +325,10 @@ class OutputSplunk(BaseModel):
|
|
|
300
325
|
] = 1
|
|
301
326
|
r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
|
|
302
327
|
|
|
303
|
-
compress:
|
|
304
|
-
OutputSplunkCompressCompression
|
|
305
|
-
|
|
328
|
+
compress: Annotated[
|
|
329
|
+
Optional[OutputSplunkCompressCompression],
|
|
330
|
+
PlainValidator(validate_open_enum(False)),
|
|
331
|
+
] = OutputSplunkCompressCompression.DISABLED
|
|
306
332
|
r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
|
|
307
333
|
|
|
308
334
|
pq_max_file_size: Annotated[
|
|
@@ -319,19 +345,29 @@ class OutputSplunk(BaseModel):
|
|
|
319
345
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
320
346
|
|
|
321
347
|
pq_compress: Annotated[
|
|
322
|
-
|
|
348
|
+
Annotated[
|
|
349
|
+
Optional[OutputSplunkPqCompressCompression],
|
|
350
|
+
PlainValidator(validate_open_enum(False)),
|
|
351
|
+
],
|
|
352
|
+
pydantic.Field(alias="pqCompress"),
|
|
323
353
|
] = OutputSplunkPqCompressCompression.NONE
|
|
324
354
|
r"""Codec to use to compress the persisted data"""
|
|
325
355
|
|
|
326
356
|
pq_on_backpressure: Annotated[
|
|
327
|
-
|
|
357
|
+
Annotated[
|
|
358
|
+
Optional[OutputSplunkQueueFullBehavior],
|
|
359
|
+
PlainValidator(validate_open_enum(False)),
|
|
360
|
+
],
|
|
328
361
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
329
362
|
] = OutputSplunkQueueFullBehavior.BLOCK
|
|
330
363
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
331
364
|
|
|
332
|
-
pq_mode: Annotated[
|
|
333
|
-
|
|
334
|
-
|
|
365
|
+
pq_mode: Annotated[
|
|
366
|
+
Annotated[
|
|
367
|
+
Optional[OutputSplunkMode], PlainValidator(validate_open_enum(False))
|
|
368
|
+
],
|
|
369
|
+
pydantic.Field(alias="pqMode"),
|
|
370
|
+
] = OutputSplunkMode.ERROR
|
|
335
371
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
336
372
|
|
|
337
373
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,7 +26,7 @@ class OutputSplunkHecExtraHTTPHeader(BaseModel):
|
|
|
23
26
|
name: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class OutputSplunkHecFailedRequestLoggingMode(str, Enum):
|
|
29
|
+
class OutputSplunkHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
28
31
|
|
|
29
32
|
PAYLOAD = "payload"
|
|
@@ -31,7 +34,7 @@ class OutputSplunkHecFailedRequestLoggingMode(str, Enum):
|
|
|
31
34
|
NONE = "none"
|
|
32
35
|
|
|
33
36
|
|
|
34
|
-
class OutputSplunkHecAuthenticationMethod(str, Enum):
|
|
37
|
+
class OutputSplunkHecAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
35
38
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
36
39
|
|
|
37
40
|
MANUAL = "manual"
|
|
@@ -92,7 +95,7 @@ class OutputSplunkHecTimeoutRetrySettings(BaseModel):
|
|
|
92
95
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
93
96
|
|
|
94
97
|
|
|
95
|
-
class OutputSplunkHecBackpressureBehavior(str, Enum):
|
|
98
|
+
class OutputSplunkHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
96
99
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
97
100
|
|
|
98
101
|
BLOCK = "block"
|
|
@@ -115,21 +118,21 @@ class OutputSplunkHecURL(BaseModel):
|
|
|
115
118
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
116
119
|
|
|
117
120
|
|
|
118
|
-
class OutputSplunkHecCompression(str, Enum):
|
|
121
|
+
class OutputSplunkHecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
119
122
|
r"""Codec to use to compress the persisted data"""
|
|
120
123
|
|
|
121
124
|
NONE = "none"
|
|
122
125
|
GZIP = "gzip"
|
|
123
126
|
|
|
124
127
|
|
|
125
|
-
class OutputSplunkHecQueueFullBehavior(str, Enum):
|
|
128
|
+
class OutputSplunkHecQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
126
129
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
127
130
|
|
|
128
131
|
BLOCK = "block"
|
|
129
132
|
DROP = "drop"
|
|
130
133
|
|
|
131
134
|
|
|
132
|
-
class OutputSplunkHecMode(str, Enum):
|
|
135
|
+
class OutputSplunkHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
133
136
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
134
137
|
|
|
135
138
|
ERROR = "error"
|
|
@@ -304,7 +307,10 @@ class OutputSplunkHec(BaseModel):
|
|
|
304
307
|
r"""Headers to add to all events"""
|
|
305
308
|
|
|
306
309
|
failed_request_logging_mode: Annotated[
|
|
307
|
-
|
|
310
|
+
Annotated[
|
|
311
|
+
Optional[OutputSplunkHecFailedRequestLoggingMode],
|
|
312
|
+
PlainValidator(validate_open_enum(False)),
|
|
313
|
+
],
|
|
308
314
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
309
315
|
] = OutputSplunkHecFailedRequestLoggingMode.NONE
|
|
310
316
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -320,7 +326,11 @@ class OutputSplunkHec(BaseModel):
|
|
|
320
326
|
r"""Output metrics in multiple-metric format, supported in Splunk 8.0 and above to allow multiple metrics in a single event."""
|
|
321
327
|
|
|
322
328
|
auth_type: Annotated[
|
|
323
|
-
|
|
329
|
+
Annotated[
|
|
330
|
+
Optional[OutputSplunkHecAuthenticationMethod],
|
|
331
|
+
PlainValidator(validate_open_enum(False)),
|
|
332
|
+
],
|
|
333
|
+
pydantic.Field(alias="authType"),
|
|
324
334
|
] = OutputSplunkHecAuthenticationMethod.MANUAL
|
|
325
335
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
326
336
|
|
|
@@ -337,11 +347,14 @@ class OutputSplunkHec(BaseModel):
|
|
|
337
347
|
|
|
338
348
|
response_honor_retry_after_header: Annotated[
|
|
339
349
|
Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
|
|
340
|
-
] =
|
|
350
|
+
] = True
|
|
341
351
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
342
352
|
|
|
343
353
|
on_backpressure: Annotated[
|
|
344
|
-
|
|
354
|
+
Annotated[
|
|
355
|
+
Optional[OutputSplunkHecBackpressureBehavior],
|
|
356
|
+
PlainValidator(validate_open_enum(False)),
|
|
357
|
+
],
|
|
345
358
|
pydantic.Field(alias="onBackpressure"),
|
|
346
359
|
] = OutputSplunkHecBackpressureBehavior.BLOCK
|
|
347
360
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -391,18 +404,28 @@ class OutputSplunkHec(BaseModel):
|
|
|
391
404
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
392
405
|
|
|
393
406
|
pq_compress: Annotated[
|
|
394
|
-
|
|
407
|
+
Annotated[
|
|
408
|
+
Optional[OutputSplunkHecCompression],
|
|
409
|
+
PlainValidator(validate_open_enum(False)),
|
|
410
|
+
],
|
|
411
|
+
pydantic.Field(alias="pqCompress"),
|
|
395
412
|
] = OutputSplunkHecCompression.NONE
|
|
396
413
|
r"""Codec to use to compress the persisted data"""
|
|
397
414
|
|
|
398
415
|
pq_on_backpressure: Annotated[
|
|
399
|
-
|
|
416
|
+
Annotated[
|
|
417
|
+
Optional[OutputSplunkHecQueueFullBehavior],
|
|
418
|
+
PlainValidator(validate_open_enum(False)),
|
|
419
|
+
],
|
|
400
420
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
401
421
|
] = OutputSplunkHecQueueFullBehavior.BLOCK
|
|
402
422
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
403
423
|
|
|
404
424
|
pq_mode: Annotated[
|
|
405
|
-
|
|
425
|
+
Annotated[
|
|
426
|
+
Optional[OutputSplunkHecMode], PlainValidator(validate_open_enum(False))
|
|
427
|
+
],
|
|
428
|
+
pydantic.Field(alias="pqMode"),
|
|
406
429
|
] = OutputSplunkHecMode.ERROR
|
|
407
430
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
408
431
|
|