cribl-control-plane 0.0.49__py3-none-any.whl → 0.1.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -6
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/health.py +6 -2
- cribl_control_plane/models/__init__.py +18 -3
- cribl_control_plane/models/appmode.py +2 -1
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +7 -2
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/getversionshowop.py +6 -5
- cribl_control_plane/models/gitshowresult.py +19 -0
- cribl_control_plane/models/hbcriblinfo.py +6 -1
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +47 -20
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +46 -16
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +53 -18
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +77 -32
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +74 -28
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +82 -30
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0a1.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0a1.dist-info}/RECORD +166 -163
- {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0a1.dist-info}/WHEEL +0 -0
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,14 +15,14 @@ class OutputSyslogType(str, Enum):
|
|
|
12
15
|
SYSLOG = "syslog"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputSyslogProtocol(str, Enum):
|
|
18
|
+
class OutputSyslogProtocol(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""The network protocol to use for sending out syslog messages"""
|
|
17
20
|
|
|
18
21
|
TCP = "tcp"
|
|
19
22
|
UDP = "udp"
|
|
20
23
|
|
|
21
24
|
|
|
22
|
-
class Facility(int, Enum):
|
|
25
|
+
class Facility(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
23
26
|
r"""Default value for message facility. Will be overwritten by value of __facility if set. Defaults to user."""
|
|
24
27
|
|
|
25
28
|
ZERO = 0
|
|
@@ -46,7 +49,7 @@ class Facility(int, Enum):
|
|
|
46
49
|
TWENTY_ONE = 21
|
|
47
50
|
|
|
48
51
|
|
|
49
|
-
class OutputSyslogSeverity(int, Enum):
|
|
52
|
+
class OutputSyslogSeverity(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
50
53
|
r"""Default value for message severity. Will be overwritten by value of __severity if set. Defaults to notice."""
|
|
51
54
|
|
|
52
55
|
ZERO = 0
|
|
@@ -59,28 +62,28 @@ class OutputSyslogSeverity(int, Enum):
|
|
|
59
62
|
SEVEN = 7
|
|
60
63
|
|
|
61
64
|
|
|
62
|
-
class OutputSyslogMessageFormat(str, Enum):
|
|
65
|
+
class OutputSyslogMessageFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
63
66
|
r"""The syslog message format depending on the receiver's support"""
|
|
64
67
|
|
|
65
68
|
RFC3164 = "rfc3164"
|
|
66
69
|
RFC5424 = "rfc5424"
|
|
67
70
|
|
|
68
71
|
|
|
69
|
-
class TimestampFormat(str, Enum):
|
|
72
|
+
class TimestampFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
70
73
|
r"""Timestamp format to use when serializing event's time field"""
|
|
71
74
|
|
|
72
75
|
SYSLOG = "syslog"
|
|
73
76
|
ISO8601 = "iso8601"
|
|
74
77
|
|
|
75
78
|
|
|
76
|
-
class OutputSyslogMinimumTLSVersion(str, Enum):
|
|
79
|
+
class OutputSyslogMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
77
80
|
TL_SV1 = "TLSv1"
|
|
78
81
|
TL_SV1_1 = "TLSv1.1"
|
|
79
82
|
TL_SV1_2 = "TLSv1.2"
|
|
80
83
|
TL_SV1_3 = "TLSv1.3"
|
|
81
84
|
|
|
82
85
|
|
|
83
|
-
class OutputSyslogMaximumTLSVersion(str, Enum):
|
|
86
|
+
class OutputSyslogMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
84
87
|
TL_SV1 = "TLSv1"
|
|
85
88
|
TL_SV1_1 = "TLSv1.1"
|
|
86
89
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -140,15 +143,23 @@ class OutputSyslogTLSSettingsClientSide(BaseModel):
|
|
|
140
143
|
r"""Passphrase to use to decrypt private key"""
|
|
141
144
|
|
|
142
145
|
min_version: Annotated[
|
|
143
|
-
|
|
146
|
+
Annotated[
|
|
147
|
+
Optional[OutputSyslogMinimumTLSVersion],
|
|
148
|
+
PlainValidator(validate_open_enum(False)),
|
|
149
|
+
],
|
|
150
|
+
pydantic.Field(alias="minVersion"),
|
|
144
151
|
] = None
|
|
145
152
|
|
|
146
153
|
max_version: Annotated[
|
|
147
|
-
|
|
154
|
+
Annotated[
|
|
155
|
+
Optional[OutputSyslogMaximumTLSVersion],
|
|
156
|
+
PlainValidator(validate_open_enum(False)),
|
|
157
|
+
],
|
|
158
|
+
pydantic.Field(alias="maxVersion"),
|
|
148
159
|
] = None
|
|
149
160
|
|
|
150
161
|
|
|
151
|
-
class OutputSyslogBackpressureBehavior(str, Enum):
|
|
162
|
+
class OutputSyslogBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
152
163
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
153
164
|
|
|
154
165
|
BLOCK = "block"
|
|
@@ -156,21 +167,21 @@ class OutputSyslogBackpressureBehavior(str, Enum):
|
|
|
156
167
|
QUEUE = "queue"
|
|
157
168
|
|
|
158
169
|
|
|
159
|
-
class OutputSyslogCompression(str, Enum):
|
|
170
|
+
class OutputSyslogCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
160
171
|
r"""Codec to use to compress the persisted data"""
|
|
161
172
|
|
|
162
173
|
NONE = "none"
|
|
163
174
|
GZIP = "gzip"
|
|
164
175
|
|
|
165
176
|
|
|
166
|
-
class OutputSyslogQueueFullBehavior(str, Enum):
|
|
177
|
+
class OutputSyslogQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
167
178
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
168
179
|
|
|
169
180
|
BLOCK = "block"
|
|
170
181
|
DROP = "drop"
|
|
171
182
|
|
|
172
183
|
|
|
173
|
-
class OutputSyslogMode(str, Enum):
|
|
184
|
+
class OutputSyslogMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
174
185
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
175
186
|
|
|
176
187
|
ERROR = "error"
|
|
@@ -316,25 +327,36 @@ class OutputSyslog(BaseModel):
|
|
|
316
327
|
streamtags: Optional[List[str]] = None
|
|
317
328
|
r"""Tags for filtering and grouping in @{product}"""
|
|
318
329
|
|
|
319
|
-
protocol:
|
|
330
|
+
protocol: Annotated[
|
|
331
|
+
Optional[OutputSyslogProtocol], PlainValidator(validate_open_enum(False))
|
|
332
|
+
] = OutputSyslogProtocol.TCP
|
|
320
333
|
r"""The network protocol to use for sending out syslog messages"""
|
|
321
334
|
|
|
322
|
-
facility:
|
|
335
|
+
facility: Annotated[
|
|
336
|
+
Optional[Facility], PlainValidator(validate_open_enum(True))
|
|
337
|
+
] = Facility.ONE
|
|
323
338
|
r"""Default value for message facility. Will be overwritten by value of __facility if set. Defaults to user."""
|
|
324
339
|
|
|
325
|
-
severity:
|
|
340
|
+
severity: Annotated[
|
|
341
|
+
Optional[OutputSyslogSeverity], PlainValidator(validate_open_enum(True))
|
|
342
|
+
] = OutputSyslogSeverity.FIVE
|
|
326
343
|
r"""Default value for message severity. Will be overwritten by value of __severity if set. Defaults to notice."""
|
|
327
344
|
|
|
328
345
|
app_name: Annotated[Optional[str], pydantic.Field(alias="appName")] = "Cribl"
|
|
329
346
|
r"""Default name for device or application that originated the message. Defaults to Cribl, but will be overwritten by value of __appname if set."""
|
|
330
347
|
|
|
331
348
|
message_format: Annotated[
|
|
332
|
-
|
|
349
|
+
Annotated[
|
|
350
|
+
Optional[OutputSyslogMessageFormat],
|
|
351
|
+
PlainValidator(validate_open_enum(False)),
|
|
352
|
+
],
|
|
353
|
+
pydantic.Field(alias="messageFormat"),
|
|
333
354
|
] = OutputSyslogMessageFormat.RFC3164
|
|
334
355
|
r"""The syslog message format depending on the receiver's support"""
|
|
335
356
|
|
|
336
357
|
timestamp_format: Annotated[
|
|
337
|
-
Optional[TimestampFormat],
|
|
358
|
+
Annotated[Optional[TimestampFormat], PlainValidator(validate_open_enum(False))],
|
|
359
|
+
pydantic.Field(alias="timestampFormat"),
|
|
338
360
|
] = TimestampFormat.SYSLOG
|
|
339
361
|
r"""Timestamp format to use when serializing event's time field"""
|
|
340
362
|
|
|
@@ -373,7 +395,10 @@ class OutputSyslog(BaseModel):
|
|
|
373
395
|
tls: Optional[OutputSyslogTLSSettingsClientSide] = None
|
|
374
396
|
|
|
375
397
|
on_backpressure: Annotated[
|
|
376
|
-
|
|
398
|
+
Annotated[
|
|
399
|
+
Optional[OutputSyslogBackpressureBehavior],
|
|
400
|
+
PlainValidator(validate_open_enum(False)),
|
|
401
|
+
],
|
|
377
402
|
pydantic.Field(alias="onBackpressure"),
|
|
378
403
|
] = OutputSyslogBackpressureBehavior.BLOCK
|
|
379
404
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -408,19 +433,28 @@ class OutputSyslog(BaseModel):
|
|
|
408
433
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
409
434
|
|
|
410
435
|
pq_compress: Annotated[
|
|
411
|
-
|
|
436
|
+
Annotated[
|
|
437
|
+
Optional[OutputSyslogCompression], PlainValidator(validate_open_enum(False))
|
|
438
|
+
],
|
|
439
|
+
pydantic.Field(alias="pqCompress"),
|
|
412
440
|
] = OutputSyslogCompression.NONE
|
|
413
441
|
r"""Codec to use to compress the persisted data"""
|
|
414
442
|
|
|
415
443
|
pq_on_backpressure: Annotated[
|
|
416
|
-
|
|
444
|
+
Annotated[
|
|
445
|
+
Optional[OutputSyslogQueueFullBehavior],
|
|
446
|
+
PlainValidator(validate_open_enum(False)),
|
|
447
|
+
],
|
|
417
448
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
418
449
|
] = OutputSyslogQueueFullBehavior.BLOCK
|
|
419
450
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
420
451
|
|
|
421
|
-
pq_mode: Annotated[
|
|
422
|
-
|
|
423
|
-
|
|
452
|
+
pq_mode: Annotated[
|
|
453
|
+
Annotated[
|
|
454
|
+
Optional[OutputSyslogMode], PlainValidator(validate_open_enum(False))
|
|
455
|
+
],
|
|
456
|
+
pydantic.Field(alias="pqMode"),
|
|
457
|
+
] = OutputSyslogMode.ERROR
|
|
424
458
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
425
459
|
|
|
426
460
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,21 +15,21 @@ class OutputTcpjsonType(str, Enum):
|
|
|
12
15
|
TCPJSON = "tcpjson"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputTcpjsonCompression(str, Enum):
|
|
18
|
+
class OutputTcpjsonCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Codec to use to compress the data before sending"""
|
|
17
20
|
|
|
18
21
|
NONE = "none"
|
|
19
22
|
GZIP = "gzip"
|
|
20
23
|
|
|
21
24
|
|
|
22
|
-
class OutputTcpjsonMinimumTLSVersion(str, Enum):
|
|
25
|
+
class OutputTcpjsonMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
23
26
|
TL_SV1 = "TLSv1"
|
|
24
27
|
TL_SV1_1 = "TLSv1.1"
|
|
25
28
|
TL_SV1_2 = "TLSv1.2"
|
|
26
29
|
TL_SV1_3 = "TLSv1.3"
|
|
27
30
|
|
|
28
31
|
|
|
29
|
-
class OutputTcpjsonMaximumTLSVersion(str, Enum):
|
|
32
|
+
class OutputTcpjsonMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
33
|
TL_SV1 = "TLSv1"
|
|
31
34
|
TL_SV1_1 = "TLSv1.1"
|
|
32
35
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -86,15 +89,23 @@ class OutputTcpjsonTLSSettingsClientSide(BaseModel):
|
|
|
86
89
|
r"""Passphrase to use to decrypt private key"""
|
|
87
90
|
|
|
88
91
|
min_version: Annotated[
|
|
89
|
-
|
|
92
|
+
Annotated[
|
|
93
|
+
Optional[OutputTcpjsonMinimumTLSVersion],
|
|
94
|
+
PlainValidator(validate_open_enum(False)),
|
|
95
|
+
],
|
|
96
|
+
pydantic.Field(alias="minVersion"),
|
|
90
97
|
] = None
|
|
91
98
|
|
|
92
99
|
max_version: Annotated[
|
|
93
|
-
|
|
100
|
+
Annotated[
|
|
101
|
+
Optional[OutputTcpjsonMaximumTLSVersion],
|
|
102
|
+
PlainValidator(validate_open_enum(False)),
|
|
103
|
+
],
|
|
104
|
+
pydantic.Field(alias="maxVersion"),
|
|
94
105
|
] = None
|
|
95
106
|
|
|
96
107
|
|
|
97
|
-
class OutputTcpjsonBackpressureBehavior(str, Enum):
|
|
108
|
+
class OutputTcpjsonBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
98
109
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
99
110
|
|
|
100
111
|
BLOCK = "block"
|
|
@@ -102,14 +113,14 @@ class OutputTcpjsonBackpressureBehavior(str, Enum):
|
|
|
102
113
|
QUEUE = "queue"
|
|
103
114
|
|
|
104
115
|
|
|
105
|
-
class OutputTcpjsonAuthenticationMethod(str, Enum):
|
|
116
|
+
class OutputTcpjsonAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
106
117
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
107
118
|
|
|
108
119
|
MANUAL = "manual"
|
|
109
120
|
SECRET = "secret"
|
|
110
121
|
|
|
111
122
|
|
|
112
|
-
class OutputTcpjsonTLS(str, Enum):
|
|
123
|
+
class OutputTcpjsonTLS(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
113
124
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
114
125
|
|
|
115
126
|
INHERIT = "inherit"
|
|
@@ -136,7 +147,9 @@ class OutputTcpjsonHost(BaseModel):
|
|
|
136
147
|
port: float
|
|
137
148
|
r"""The port to connect to on the provided host"""
|
|
138
149
|
|
|
139
|
-
tls:
|
|
150
|
+
tls: Annotated[
|
|
151
|
+
Optional[OutputTcpjsonTLS], PlainValidator(validate_open_enum(False))
|
|
152
|
+
] = OutputTcpjsonTLS.INHERIT
|
|
140
153
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
141
154
|
|
|
142
155
|
servername: Optional[str] = None
|
|
@@ -146,21 +159,21 @@ class OutputTcpjsonHost(BaseModel):
|
|
|
146
159
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
147
160
|
|
|
148
161
|
|
|
149
|
-
class OutputTcpjsonPqCompressCompression(str, Enum):
|
|
162
|
+
class OutputTcpjsonPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
150
163
|
r"""Codec to use to compress the persisted data"""
|
|
151
164
|
|
|
152
165
|
NONE = "none"
|
|
153
166
|
GZIP = "gzip"
|
|
154
167
|
|
|
155
168
|
|
|
156
|
-
class OutputTcpjsonQueueFullBehavior(str, Enum):
|
|
169
|
+
class OutputTcpjsonQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
157
170
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
158
171
|
|
|
159
172
|
BLOCK = "block"
|
|
160
173
|
DROP = "drop"
|
|
161
174
|
|
|
162
175
|
|
|
163
|
-
class OutputTcpjsonMode(str, Enum):
|
|
176
|
+
class OutputTcpjsonMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
164
177
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
165
178
|
|
|
166
179
|
ERROR = "error"
|
|
@@ -268,7 +281,9 @@ class OutputTcpjson(BaseModel):
|
|
|
268
281
|
)
|
|
269
282
|
r"""Use load-balanced destinations"""
|
|
270
283
|
|
|
271
|
-
compression:
|
|
284
|
+
compression: Annotated[
|
|
285
|
+
Optional[OutputTcpjsonCompression], PlainValidator(validate_open_enum(False))
|
|
286
|
+
] = OutputTcpjsonCompression.GZIP
|
|
272
287
|
r"""Codec to use to compress the data before sending"""
|
|
273
288
|
|
|
274
289
|
log_failed_requests: Annotated[
|
|
@@ -302,13 +317,20 @@ class OutputTcpjson(BaseModel):
|
|
|
302
317
|
r"""Upon connection, send a header-like record containing the auth token and other metadata.This record will not contain an actual event – only subsequent records will."""
|
|
303
318
|
|
|
304
319
|
on_backpressure: Annotated[
|
|
305
|
-
|
|
320
|
+
Annotated[
|
|
321
|
+
Optional[OutputTcpjsonBackpressureBehavior],
|
|
322
|
+
PlainValidator(validate_open_enum(False)),
|
|
323
|
+
],
|
|
306
324
|
pydantic.Field(alias="onBackpressure"),
|
|
307
325
|
] = OutputTcpjsonBackpressureBehavior.BLOCK
|
|
308
326
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
309
327
|
|
|
310
328
|
auth_type: Annotated[
|
|
311
|
-
|
|
329
|
+
Annotated[
|
|
330
|
+
Optional[OutputTcpjsonAuthenticationMethod],
|
|
331
|
+
PlainValidator(validate_open_enum(False)),
|
|
332
|
+
],
|
|
333
|
+
pydantic.Field(alias="authType"),
|
|
312
334
|
] = OutputTcpjsonAuthenticationMethod.MANUAL
|
|
313
335
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
314
336
|
|
|
@@ -355,19 +377,29 @@ class OutputTcpjson(BaseModel):
|
|
|
355
377
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
356
378
|
|
|
357
379
|
pq_compress: Annotated[
|
|
358
|
-
|
|
380
|
+
Annotated[
|
|
381
|
+
Optional[OutputTcpjsonPqCompressCompression],
|
|
382
|
+
PlainValidator(validate_open_enum(False)),
|
|
383
|
+
],
|
|
384
|
+
pydantic.Field(alias="pqCompress"),
|
|
359
385
|
] = OutputTcpjsonPqCompressCompression.NONE
|
|
360
386
|
r"""Codec to use to compress the persisted data"""
|
|
361
387
|
|
|
362
388
|
pq_on_backpressure: Annotated[
|
|
363
|
-
|
|
389
|
+
Annotated[
|
|
390
|
+
Optional[OutputTcpjsonQueueFullBehavior],
|
|
391
|
+
PlainValidator(validate_open_enum(False)),
|
|
392
|
+
],
|
|
364
393
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
365
394
|
] = OutputTcpjsonQueueFullBehavior.BLOCK
|
|
366
395
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
367
396
|
|
|
368
|
-
pq_mode: Annotated[
|
|
369
|
-
|
|
370
|
-
|
|
397
|
+
pq_mode: Annotated[
|
|
398
|
+
Annotated[
|
|
399
|
+
Optional[OutputTcpjsonMode], PlainValidator(validate_open_enum(False))
|
|
400
|
+
],
|
|
401
|
+
pydantic.Field(alias="pqMode"),
|
|
402
|
+
] = OutputTcpjsonMode.ERROR
|
|
371
403
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
372
404
|
|
|
373
405
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputWavefrontType(str, Enum):
|
|
|
12
15
|
WAVEFRONT = "wavefront"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputWavefrontAuthenticationMethod(str, Enum):
|
|
18
|
+
class OutputWavefrontAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
17
20
|
|
|
18
21
|
MANUAL = "manual"
|
|
@@ -30,7 +33,7 @@ class OutputWavefrontExtraHTTPHeader(BaseModel):
|
|
|
30
33
|
name: Optional[str] = None
|
|
31
34
|
|
|
32
35
|
|
|
33
|
-
class OutputWavefrontFailedRequestLoggingMode(str, Enum):
|
|
36
|
+
class OutputWavefrontFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
35
38
|
|
|
36
39
|
PAYLOAD = "payload"
|
|
@@ -92,7 +95,7 @@ class OutputWavefrontTimeoutRetrySettings(BaseModel):
|
|
|
92
95
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
93
96
|
|
|
94
97
|
|
|
95
|
-
class OutputWavefrontBackpressureBehavior(str, Enum):
|
|
98
|
+
class OutputWavefrontBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
96
99
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
97
100
|
|
|
98
101
|
BLOCK = "block"
|
|
@@ -100,21 +103,21 @@ class OutputWavefrontBackpressureBehavior(str, Enum):
|
|
|
100
103
|
QUEUE = "queue"
|
|
101
104
|
|
|
102
105
|
|
|
103
|
-
class OutputWavefrontCompression(str, Enum):
|
|
106
|
+
class OutputWavefrontCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
104
107
|
r"""Codec to use to compress the persisted data"""
|
|
105
108
|
|
|
106
109
|
NONE = "none"
|
|
107
110
|
GZIP = "gzip"
|
|
108
111
|
|
|
109
112
|
|
|
110
|
-
class OutputWavefrontQueueFullBehavior(str, Enum):
|
|
113
|
+
class OutputWavefrontQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
111
114
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
112
115
|
|
|
113
116
|
BLOCK = "block"
|
|
114
117
|
DROP = "drop"
|
|
115
118
|
|
|
116
119
|
|
|
117
|
-
class OutputWavefrontMode(str, Enum):
|
|
120
|
+
class OutputWavefrontMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
118
121
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
119
122
|
|
|
120
123
|
ERROR = "error"
|
|
@@ -221,7 +224,11 @@ class OutputWavefront(BaseModel):
|
|
|
221
224
|
r"""Tags for filtering and grouping in @{product}"""
|
|
222
225
|
|
|
223
226
|
auth_type: Annotated[
|
|
224
|
-
|
|
227
|
+
Annotated[
|
|
228
|
+
Optional[OutputWavefrontAuthenticationMethod],
|
|
229
|
+
PlainValidator(validate_open_enum(False)),
|
|
230
|
+
],
|
|
231
|
+
pydantic.Field(alias="authType"),
|
|
225
232
|
] = OutputWavefrontAuthenticationMethod.MANUAL
|
|
226
233
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
227
234
|
|
|
@@ -272,7 +279,10 @@ class OutputWavefront(BaseModel):
|
|
|
272
279
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
273
280
|
|
|
274
281
|
failed_request_logging_mode: Annotated[
|
|
275
|
-
|
|
282
|
+
Annotated[
|
|
283
|
+
Optional[OutputWavefrontFailedRequestLoggingMode],
|
|
284
|
+
PlainValidator(validate_open_enum(False)),
|
|
285
|
+
],
|
|
276
286
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
277
287
|
] = OutputWavefrontFailedRequestLoggingMode.NONE
|
|
278
288
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -299,7 +309,10 @@ class OutputWavefront(BaseModel):
|
|
|
299
309
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
300
310
|
|
|
301
311
|
on_backpressure: Annotated[
|
|
302
|
-
|
|
312
|
+
Annotated[
|
|
313
|
+
Optional[OutputWavefrontBackpressureBehavior],
|
|
314
|
+
PlainValidator(validate_open_enum(False)),
|
|
315
|
+
],
|
|
303
316
|
pydantic.Field(alias="onBackpressure"),
|
|
304
317
|
] = OutputWavefrontBackpressureBehavior.BLOCK
|
|
305
318
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -326,18 +339,28 @@ class OutputWavefront(BaseModel):
|
|
|
326
339
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
327
340
|
|
|
328
341
|
pq_compress: Annotated[
|
|
329
|
-
|
|
342
|
+
Annotated[
|
|
343
|
+
Optional[OutputWavefrontCompression],
|
|
344
|
+
PlainValidator(validate_open_enum(False)),
|
|
345
|
+
],
|
|
346
|
+
pydantic.Field(alias="pqCompress"),
|
|
330
347
|
] = OutputWavefrontCompression.NONE
|
|
331
348
|
r"""Codec to use to compress the persisted data"""
|
|
332
349
|
|
|
333
350
|
pq_on_backpressure: Annotated[
|
|
334
|
-
|
|
351
|
+
Annotated[
|
|
352
|
+
Optional[OutputWavefrontQueueFullBehavior],
|
|
353
|
+
PlainValidator(validate_open_enum(False)),
|
|
354
|
+
],
|
|
335
355
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
336
356
|
] = OutputWavefrontQueueFullBehavior.BLOCK
|
|
337
357
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
338
358
|
|
|
339
359
|
pq_mode: Annotated[
|
|
340
|
-
|
|
360
|
+
Annotated[
|
|
361
|
+
Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
|
|
362
|
+
],
|
|
363
|
+
pydantic.Field(alias="pqMode"),
|
|
341
364
|
] = OutputWavefrontMode.ERROR
|
|
342
365
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
343
366
|
|