cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +4365 -4124
- cribl_control_plane/models/createinputop.py +1734 -2771
- cribl_control_plane/models/createoutputop.py +2153 -4314
- cribl_control_plane/models/createversioncommitop.py +24 -0
- cribl_control_plane/models/createversionpushop.py +23 -0
- cribl_control_plane/models/createversionrevertop.py +47 -0
- cribl_control_plane/models/createversionsyncop.py +23 -0
- cribl_control_plane/models/createversionundoop.py +37 -0
- cribl_control_plane/models/getversionbranchop.py +23 -0
- cribl_control_plane/models/getversioncountop.py +47 -0
- cribl_control_plane/models/getversioncurrentbranchop.py +23 -0
- cribl_control_plane/models/getversiondiffop.py +63 -0
- cribl_control_plane/models/getversionfilesop.py +48 -0
- cribl_control_plane/models/getversioninfoop.py +24 -0
- cribl_control_plane/models/getversionshowop.py +63 -0
- cribl_control_plane/models/getversionstatusop.py +38 -0
- cribl_control_plane/models/gitcommitparams.py +23 -0
- cribl_control_plane/models/gitcommitsummary.py +68 -0
- cribl_control_plane/models/gitfile.py +20 -0
- cribl_control_plane/models/gitfilesresponse.py +22 -0
- cribl_control_plane/models/gitinfo.py +23 -0
- cribl_control_plane/models/gitrevertparams.py +20 -0
- cribl_control_plane/models/gitrevertresult.py +48 -0
- cribl_control_plane/models/gitstatusresult.py +73 -0
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +20 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +10 -27
- cribl_control_plane/models/inputcribllakehttp.py +12 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +10 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +10 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +31 -64
- cribl_control_plane/models/inputelastic.py +16 -44
- cribl_control_plane/models/inputeventhub.py +8 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +10 -27
- cribl_control_plane/models/inputgooglepubsub.py +8 -23
- cribl_control_plane/models/inputgrafana_union.py +35 -81
- cribl_control_plane/models/inputhttp.py +10 -27
- cribl_control_plane/models/inputhttpraw.py +10 -27
- cribl_control_plane/models/inputjournalfiles.py +6 -16
- cribl_control_plane/models/inputkafka.py +16 -45
- cribl_control_plane/models/inputkinesis.py +16 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +12 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
- cribl_control_plane/models/inputmsk.py +18 -52
- cribl_control_plane/models/inputnetflow.py +6 -15
- cribl_control_plane/models/inputoffice365mgmt.py +16 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
- cribl_control_plane/models/inputoffice365service.py +18 -39
- cribl_control_plane/models/inputopentelemetry.py +18 -42
- cribl_control_plane/models/inputprometheus.py +20 -54
- cribl_control_plane/models/inputprometheusrw.py +12 -34
- cribl_control_plane/models/inputrawudp.py +6 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +8 -20
- cribl_control_plane/models/inputsplunk.py +14 -37
- cribl_control_plane/models/inputsplunkhec.py +12 -33
- cribl_control_plane/models/inputsplunksearch.py +16 -37
- cribl_control_plane/models/inputsqs.py +12 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +12 -33
- cribl_control_plane/models/inputtcpjson.py +12 -33
- cribl_control_plane/models/inputwef.py +20 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +10 -25
- cribl_control_plane/models/inputzscalerhec.py +12 -33
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- cribl_control_plane/sdk.py +4 -0
- cribl_control_plane/versioning.py +2309 -0
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.18.dist-info}/METADATA +18 -2
- cribl_control_plane-0.0.18.dist-info/RECORD +237 -0
- cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.18.dist-info}/WHEEL +0 -0
|
@@ -1,28 +1,25 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputSyslogType(str, Enum
|
|
11
|
+
class OutputSyslogType(str, Enum):
|
|
15
12
|
SYSLOG = "syslog"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSyslogProtocol(str, Enum
|
|
15
|
+
class OutputSyslogProtocol(str, Enum):
|
|
19
16
|
r"""The network protocol to use for sending out syslog messages"""
|
|
20
17
|
|
|
21
18
|
TCP = "tcp"
|
|
22
19
|
UDP = "udp"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class Facility(int, Enum
|
|
22
|
+
class Facility(int, Enum):
|
|
26
23
|
r"""Default value for message facility. Will be overwritten by value of __facility if set. Defaults to user."""
|
|
27
24
|
|
|
28
25
|
ZERO = 0
|
|
@@ -49,7 +46,7 @@ class Facility(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
49
46
|
TWENTY_ONE = 21
|
|
50
47
|
|
|
51
48
|
|
|
52
|
-
class OutputSyslogSeverity(int, Enum
|
|
49
|
+
class OutputSyslogSeverity(int, Enum):
|
|
53
50
|
r"""Default value for message severity. Will be overwritten by value of __severity if set. Defaults to notice."""
|
|
54
51
|
|
|
55
52
|
ZERO = 0
|
|
@@ -62,28 +59,28 @@ class OutputSyslogSeverity(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
62
59
|
SEVEN = 7
|
|
63
60
|
|
|
64
61
|
|
|
65
|
-
class OutputSyslogMessageFormat(str, Enum
|
|
62
|
+
class OutputSyslogMessageFormat(str, Enum):
|
|
66
63
|
r"""The syslog message format depending on the receiver's support"""
|
|
67
64
|
|
|
68
65
|
RFC3164 = "rfc3164"
|
|
69
66
|
RFC5424 = "rfc5424"
|
|
70
67
|
|
|
71
68
|
|
|
72
|
-
class TimestampFormat(str, Enum
|
|
69
|
+
class TimestampFormat(str, Enum):
|
|
73
70
|
r"""Timestamp format to use when serializing event's time field"""
|
|
74
71
|
|
|
75
72
|
SYSLOG = "syslog"
|
|
76
73
|
ISO8601 = "iso8601"
|
|
77
74
|
|
|
78
75
|
|
|
79
|
-
class OutputSyslogMinimumTLSVersion(str, Enum
|
|
76
|
+
class OutputSyslogMinimumTLSVersion(str, Enum):
|
|
80
77
|
TL_SV1 = "TLSv1"
|
|
81
78
|
TL_SV1_1 = "TLSv1.1"
|
|
82
79
|
TL_SV1_2 = "TLSv1.2"
|
|
83
80
|
TL_SV1_3 = "TLSv1.3"
|
|
84
81
|
|
|
85
82
|
|
|
86
|
-
class OutputSyslogMaximumTLSVersion(str, Enum
|
|
83
|
+
class OutputSyslogMaximumTLSVersion(str, Enum):
|
|
87
84
|
TL_SV1 = "TLSv1"
|
|
88
85
|
TL_SV1_1 = "TLSv1.1"
|
|
89
86
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -143,23 +140,15 @@ class OutputSyslogTLSSettingsClientSide(BaseModel):
|
|
|
143
140
|
r"""Passphrase to use to decrypt private key"""
|
|
144
141
|
|
|
145
142
|
min_version: Annotated[
|
|
146
|
-
|
|
147
|
-
Optional[OutputSyslogMinimumTLSVersion],
|
|
148
|
-
PlainValidator(validate_open_enum(False)),
|
|
149
|
-
],
|
|
150
|
-
pydantic.Field(alias="minVersion"),
|
|
143
|
+
Optional[OutputSyslogMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
151
144
|
] = None
|
|
152
145
|
|
|
153
146
|
max_version: Annotated[
|
|
154
|
-
|
|
155
|
-
Optional[OutputSyslogMaximumTLSVersion],
|
|
156
|
-
PlainValidator(validate_open_enum(False)),
|
|
157
|
-
],
|
|
158
|
-
pydantic.Field(alias="maxVersion"),
|
|
147
|
+
Optional[OutputSyslogMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
159
148
|
] = None
|
|
160
149
|
|
|
161
150
|
|
|
162
|
-
class OutputSyslogBackpressureBehavior(str, Enum
|
|
151
|
+
class OutputSyslogBackpressureBehavior(str, Enum):
|
|
163
152
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
164
153
|
|
|
165
154
|
BLOCK = "block"
|
|
@@ -167,21 +156,21 @@ class OutputSyslogBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
167
156
|
QUEUE = "queue"
|
|
168
157
|
|
|
169
158
|
|
|
170
|
-
class OutputSyslogCompression(str, Enum
|
|
159
|
+
class OutputSyslogCompression(str, Enum):
|
|
171
160
|
r"""Codec to use to compress the persisted data"""
|
|
172
161
|
|
|
173
162
|
NONE = "none"
|
|
174
163
|
GZIP = "gzip"
|
|
175
164
|
|
|
176
165
|
|
|
177
|
-
class OutputSyslogQueueFullBehavior(str, Enum
|
|
166
|
+
class OutputSyslogQueueFullBehavior(str, Enum):
|
|
178
167
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
179
168
|
|
|
180
169
|
BLOCK = "block"
|
|
181
170
|
DROP = "drop"
|
|
182
171
|
|
|
183
172
|
|
|
184
|
-
class OutputSyslogMode(str, Enum
|
|
173
|
+
class OutputSyslogMode(str, Enum):
|
|
185
174
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
186
175
|
|
|
187
176
|
ERROR = "error"
|
|
@@ -264,7 +253,7 @@ class OutputSyslog(BaseModel):
|
|
|
264
253
|
id: str
|
|
265
254
|
r"""Unique ID for this output"""
|
|
266
255
|
|
|
267
|
-
type:
|
|
256
|
+
type: OutputSyslogType
|
|
268
257
|
|
|
269
258
|
pipeline: Optional[str] = None
|
|
270
259
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -280,36 +269,25 @@ class OutputSyslog(BaseModel):
|
|
|
280
269
|
streamtags: Optional[List[str]] = None
|
|
281
270
|
r"""Tags for filtering and grouping in @{product}"""
|
|
282
271
|
|
|
283
|
-
protocol:
|
|
284
|
-
Optional[OutputSyslogProtocol], PlainValidator(validate_open_enum(False))
|
|
285
|
-
] = OutputSyslogProtocol.TCP
|
|
272
|
+
protocol: Optional[OutputSyslogProtocol] = OutputSyslogProtocol.TCP
|
|
286
273
|
r"""The network protocol to use for sending out syslog messages"""
|
|
287
274
|
|
|
288
|
-
facility:
|
|
289
|
-
Optional[Facility], PlainValidator(validate_open_enum(True))
|
|
290
|
-
] = Facility.ONE
|
|
275
|
+
facility: Optional[Facility] = Facility.ONE
|
|
291
276
|
r"""Default value for message facility. Will be overwritten by value of __facility if set. Defaults to user."""
|
|
292
277
|
|
|
293
|
-
severity:
|
|
294
|
-
Optional[OutputSyslogSeverity], PlainValidator(validate_open_enum(True))
|
|
295
|
-
] = OutputSyslogSeverity.FIVE
|
|
278
|
+
severity: Optional[OutputSyslogSeverity] = OutputSyslogSeverity.FIVE
|
|
296
279
|
r"""Default value for message severity. Will be overwritten by value of __severity if set. Defaults to notice."""
|
|
297
280
|
|
|
298
281
|
app_name: Annotated[Optional[str], pydantic.Field(alias="appName")] = "Cribl"
|
|
299
282
|
r"""Default name for device or application that originated the message. Defaults to Cribl, but will be overwritten by value of __appname if set."""
|
|
300
283
|
|
|
301
284
|
message_format: Annotated[
|
|
302
|
-
|
|
303
|
-
Optional[OutputSyslogMessageFormat],
|
|
304
|
-
PlainValidator(validate_open_enum(False)),
|
|
305
|
-
],
|
|
306
|
-
pydantic.Field(alias="messageFormat"),
|
|
285
|
+
Optional[OutputSyslogMessageFormat], pydantic.Field(alias="messageFormat")
|
|
307
286
|
] = OutputSyslogMessageFormat.RFC3164
|
|
308
287
|
r"""The syslog message format depending on the receiver's support"""
|
|
309
288
|
|
|
310
289
|
timestamp_format: Annotated[
|
|
311
|
-
|
|
312
|
-
pydantic.Field(alias="timestampFormat"),
|
|
290
|
+
Optional[TimestampFormat], pydantic.Field(alias="timestampFormat")
|
|
313
291
|
] = TimestampFormat.SYSLOG
|
|
314
292
|
r"""Timestamp format to use when serializing event's time field"""
|
|
315
293
|
|
|
@@ -348,10 +326,7 @@ class OutputSyslog(BaseModel):
|
|
|
348
326
|
tls: Optional[OutputSyslogTLSSettingsClientSide] = None
|
|
349
327
|
|
|
350
328
|
on_backpressure: Annotated[
|
|
351
|
-
|
|
352
|
-
Optional[OutputSyslogBackpressureBehavior],
|
|
353
|
-
PlainValidator(validate_open_enum(False)),
|
|
354
|
-
],
|
|
329
|
+
Optional[OutputSyslogBackpressureBehavior],
|
|
355
330
|
pydantic.Field(alias="onBackpressure"),
|
|
356
331
|
] = OutputSyslogBackpressureBehavior.BLOCK
|
|
357
332
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -386,28 +361,19 @@ class OutputSyslog(BaseModel):
|
|
|
386
361
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
387
362
|
|
|
388
363
|
pq_compress: Annotated[
|
|
389
|
-
|
|
390
|
-
Optional[OutputSyslogCompression], PlainValidator(validate_open_enum(False))
|
|
391
|
-
],
|
|
392
|
-
pydantic.Field(alias="pqCompress"),
|
|
364
|
+
Optional[OutputSyslogCompression], pydantic.Field(alias="pqCompress")
|
|
393
365
|
] = OutputSyslogCompression.NONE
|
|
394
366
|
r"""Codec to use to compress the persisted data"""
|
|
395
367
|
|
|
396
368
|
pq_on_backpressure: Annotated[
|
|
397
|
-
|
|
398
|
-
Optional[OutputSyslogQueueFullBehavior],
|
|
399
|
-
PlainValidator(validate_open_enum(False)),
|
|
400
|
-
],
|
|
369
|
+
Optional[OutputSyslogQueueFullBehavior],
|
|
401
370
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
402
371
|
] = OutputSyslogQueueFullBehavior.BLOCK
|
|
403
372
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
404
373
|
|
|
405
|
-
pq_mode: Annotated[
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
],
|
|
409
|
-
pydantic.Field(alias="pqMode"),
|
|
410
|
-
] = OutputSyslogMode.ERROR
|
|
374
|
+
pq_mode: Annotated[Optional[OutputSyslogMode], pydantic.Field(alias="pqMode")] = (
|
|
375
|
+
OutputSyslogMode.ERROR
|
|
376
|
+
)
|
|
411
377
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
412
378
|
|
|
413
379
|
pq_controls: Annotated[
|
|
@@ -1,35 +1,32 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputTcpjsonType(str, Enum
|
|
11
|
+
class OutputTcpjsonType(str, Enum):
|
|
15
12
|
TCPJSON = "tcpjson"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputTcpjsonCompression(str, Enum
|
|
15
|
+
class OutputTcpjsonCompression(str, Enum):
|
|
19
16
|
r"""Codec to use to compress the data before sending"""
|
|
20
17
|
|
|
21
18
|
NONE = "none"
|
|
22
19
|
GZIP = "gzip"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputTcpjsonMinimumTLSVersion(str, Enum
|
|
22
|
+
class OutputTcpjsonMinimumTLSVersion(str, Enum):
|
|
26
23
|
TL_SV1 = "TLSv1"
|
|
27
24
|
TL_SV1_1 = "TLSv1.1"
|
|
28
25
|
TL_SV1_2 = "TLSv1.2"
|
|
29
26
|
TL_SV1_3 = "TLSv1.3"
|
|
30
27
|
|
|
31
28
|
|
|
32
|
-
class OutputTcpjsonMaximumTLSVersion(str, Enum
|
|
29
|
+
class OutputTcpjsonMaximumTLSVersion(str, Enum):
|
|
33
30
|
TL_SV1 = "TLSv1"
|
|
34
31
|
TL_SV1_1 = "TLSv1.1"
|
|
35
32
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -89,23 +86,15 @@ class OutputTcpjsonTLSSettingsClientSide(BaseModel):
|
|
|
89
86
|
r"""Passphrase to use to decrypt private key"""
|
|
90
87
|
|
|
91
88
|
min_version: Annotated[
|
|
92
|
-
|
|
93
|
-
Optional[OutputTcpjsonMinimumTLSVersion],
|
|
94
|
-
PlainValidator(validate_open_enum(False)),
|
|
95
|
-
],
|
|
96
|
-
pydantic.Field(alias="minVersion"),
|
|
89
|
+
Optional[OutputTcpjsonMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
97
90
|
] = None
|
|
98
91
|
|
|
99
92
|
max_version: Annotated[
|
|
100
|
-
|
|
101
|
-
Optional[OutputTcpjsonMaximumTLSVersion],
|
|
102
|
-
PlainValidator(validate_open_enum(False)),
|
|
103
|
-
],
|
|
104
|
-
pydantic.Field(alias="maxVersion"),
|
|
93
|
+
Optional[OutputTcpjsonMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
105
94
|
] = None
|
|
106
95
|
|
|
107
96
|
|
|
108
|
-
class OutputTcpjsonBackpressureBehavior(str, Enum
|
|
97
|
+
class OutputTcpjsonBackpressureBehavior(str, Enum):
|
|
109
98
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
110
99
|
|
|
111
100
|
BLOCK = "block"
|
|
@@ -113,14 +102,14 @@ class OutputTcpjsonBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
113
102
|
QUEUE = "queue"
|
|
114
103
|
|
|
115
104
|
|
|
116
|
-
class OutputTcpjsonAuthenticationMethod(str, Enum
|
|
105
|
+
class OutputTcpjsonAuthenticationMethod(str, Enum):
|
|
117
106
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
118
107
|
|
|
119
108
|
MANUAL = "manual"
|
|
120
109
|
SECRET = "secret"
|
|
121
110
|
|
|
122
111
|
|
|
123
|
-
class OutputTcpjsonTLS(str, Enum
|
|
112
|
+
class OutputTcpjsonTLS(str, Enum):
|
|
124
113
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
125
114
|
|
|
126
115
|
INHERIT = "inherit"
|
|
@@ -147,9 +136,7 @@ class OutputTcpjsonHost(BaseModel):
|
|
|
147
136
|
port: float
|
|
148
137
|
r"""The port to connect to on the provided host"""
|
|
149
138
|
|
|
150
|
-
tls:
|
|
151
|
-
Optional[OutputTcpjsonTLS], PlainValidator(validate_open_enum(False))
|
|
152
|
-
] = OutputTcpjsonTLS.INHERIT
|
|
139
|
+
tls: Optional[OutputTcpjsonTLS] = OutputTcpjsonTLS.INHERIT
|
|
153
140
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
154
141
|
|
|
155
142
|
servername: Optional[str] = None
|
|
@@ -159,21 +146,21 @@ class OutputTcpjsonHost(BaseModel):
|
|
|
159
146
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
160
147
|
|
|
161
148
|
|
|
162
|
-
class OutputTcpjsonPqCompressCompression(str, Enum
|
|
149
|
+
class OutputTcpjsonPqCompressCompression(str, Enum):
|
|
163
150
|
r"""Codec to use to compress the persisted data"""
|
|
164
151
|
|
|
165
152
|
NONE = "none"
|
|
166
153
|
GZIP = "gzip"
|
|
167
154
|
|
|
168
155
|
|
|
169
|
-
class OutputTcpjsonQueueFullBehavior(str, Enum
|
|
156
|
+
class OutputTcpjsonQueueFullBehavior(str, Enum):
|
|
170
157
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
171
158
|
|
|
172
159
|
BLOCK = "block"
|
|
173
160
|
DROP = "drop"
|
|
174
161
|
|
|
175
162
|
|
|
176
|
-
class OutputTcpjsonMode(str, Enum
|
|
163
|
+
class OutputTcpjsonMode(str, Enum):
|
|
177
164
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
178
165
|
|
|
179
166
|
ERROR = "error"
|
|
@@ -260,7 +247,7 @@ class OutputTcpjson(BaseModel):
|
|
|
260
247
|
id: str
|
|
261
248
|
r"""Unique ID for this output"""
|
|
262
249
|
|
|
263
|
-
type:
|
|
250
|
+
type: OutputTcpjsonType
|
|
264
251
|
|
|
265
252
|
pipeline: Optional[str] = None
|
|
266
253
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -281,9 +268,7 @@ class OutputTcpjson(BaseModel):
|
|
|
281
268
|
)
|
|
282
269
|
r"""Use load-balanced destinations"""
|
|
283
270
|
|
|
284
|
-
compression:
|
|
285
|
-
Optional[OutputTcpjsonCompression], PlainValidator(validate_open_enum(False))
|
|
286
|
-
] = OutputTcpjsonCompression.GZIP
|
|
271
|
+
compression: Optional[OutputTcpjsonCompression] = OutputTcpjsonCompression.GZIP
|
|
287
272
|
r"""Codec to use to compress the data before sending"""
|
|
288
273
|
|
|
289
274
|
log_failed_requests: Annotated[
|
|
@@ -317,20 +302,13 @@ class OutputTcpjson(BaseModel):
|
|
|
317
302
|
r"""Upon connection, send a header-like record containing the auth token and other metadata.This record will not contain an actual event – only subsequent records will."""
|
|
318
303
|
|
|
319
304
|
on_backpressure: Annotated[
|
|
320
|
-
|
|
321
|
-
Optional[OutputTcpjsonBackpressureBehavior],
|
|
322
|
-
PlainValidator(validate_open_enum(False)),
|
|
323
|
-
],
|
|
305
|
+
Optional[OutputTcpjsonBackpressureBehavior],
|
|
324
306
|
pydantic.Field(alias="onBackpressure"),
|
|
325
307
|
] = OutputTcpjsonBackpressureBehavior.BLOCK
|
|
326
308
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
327
309
|
|
|
328
310
|
auth_type: Annotated[
|
|
329
|
-
|
|
330
|
-
Optional[OutputTcpjsonAuthenticationMethod],
|
|
331
|
-
PlainValidator(validate_open_enum(False)),
|
|
332
|
-
],
|
|
333
|
-
pydantic.Field(alias="authType"),
|
|
311
|
+
Optional[OutputTcpjsonAuthenticationMethod], pydantic.Field(alias="authType")
|
|
334
312
|
] = OutputTcpjsonAuthenticationMethod.MANUAL
|
|
335
313
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
336
314
|
|
|
@@ -377,29 +355,19 @@ class OutputTcpjson(BaseModel):
|
|
|
377
355
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
378
356
|
|
|
379
357
|
pq_compress: Annotated[
|
|
380
|
-
|
|
381
|
-
Optional[OutputTcpjsonPqCompressCompression],
|
|
382
|
-
PlainValidator(validate_open_enum(False)),
|
|
383
|
-
],
|
|
384
|
-
pydantic.Field(alias="pqCompress"),
|
|
358
|
+
Optional[OutputTcpjsonPqCompressCompression], pydantic.Field(alias="pqCompress")
|
|
385
359
|
] = OutputTcpjsonPqCompressCompression.NONE
|
|
386
360
|
r"""Codec to use to compress the persisted data"""
|
|
387
361
|
|
|
388
362
|
pq_on_backpressure: Annotated[
|
|
389
|
-
|
|
390
|
-
Optional[OutputTcpjsonQueueFullBehavior],
|
|
391
|
-
PlainValidator(validate_open_enum(False)),
|
|
392
|
-
],
|
|
363
|
+
Optional[OutputTcpjsonQueueFullBehavior],
|
|
393
364
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
394
365
|
] = OutputTcpjsonQueueFullBehavior.BLOCK
|
|
395
366
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
396
367
|
|
|
397
|
-
pq_mode: Annotated[
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
],
|
|
401
|
-
pydantic.Field(alias="pqMode"),
|
|
402
|
-
] = OutputTcpjsonMode.ERROR
|
|
368
|
+
pq_mode: Annotated[Optional[OutputTcpjsonMode], pydantic.Field(alias="pqMode")] = (
|
|
369
|
+
OutputTcpjsonMode.ERROR
|
|
370
|
+
)
|
|
403
371
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
404
372
|
|
|
405
373
|
pq_controls: Annotated[
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputWavefrontType(str, Enum
|
|
11
|
+
class OutputWavefrontType(str, Enum):
|
|
15
12
|
WAVEFRONT = "wavefront"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputWavefrontAuthenticationMethod(str, Enum
|
|
15
|
+
class OutputWavefrontAuthenticationMethod(str, Enum):
|
|
19
16
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
20
17
|
|
|
21
18
|
MANUAL = "manual"
|
|
@@ -33,7 +30,7 @@ class OutputWavefrontExtraHTTPHeader(BaseModel):
|
|
|
33
30
|
name: Optional[str] = None
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class OutputWavefrontFailedRequestLoggingMode(str, Enum
|
|
33
|
+
class OutputWavefrontFailedRequestLoggingMode(str, Enum):
|
|
37
34
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
38
35
|
|
|
39
36
|
PAYLOAD = "payload"
|
|
@@ -95,7 +92,7 @@ class OutputWavefrontTimeoutRetrySettings(BaseModel):
|
|
|
95
92
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
96
93
|
|
|
97
94
|
|
|
98
|
-
class OutputWavefrontBackpressureBehavior(str, Enum
|
|
95
|
+
class OutputWavefrontBackpressureBehavior(str, Enum):
|
|
99
96
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
100
97
|
|
|
101
98
|
BLOCK = "block"
|
|
@@ -103,21 +100,21 @@ class OutputWavefrontBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
103
100
|
QUEUE = "queue"
|
|
104
101
|
|
|
105
102
|
|
|
106
|
-
class OutputWavefrontCompression(str, Enum
|
|
103
|
+
class OutputWavefrontCompression(str, Enum):
|
|
107
104
|
r"""Codec to use to compress the persisted data"""
|
|
108
105
|
|
|
109
106
|
NONE = "none"
|
|
110
107
|
GZIP = "gzip"
|
|
111
108
|
|
|
112
109
|
|
|
113
|
-
class OutputWavefrontQueueFullBehavior(str, Enum
|
|
110
|
+
class OutputWavefrontQueueFullBehavior(str, Enum):
|
|
114
111
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
115
112
|
|
|
116
113
|
BLOCK = "block"
|
|
117
114
|
DROP = "drop"
|
|
118
115
|
|
|
119
116
|
|
|
120
|
-
class OutputWavefrontMode(str, Enum
|
|
117
|
+
class OutputWavefrontMode(str, Enum):
|
|
121
118
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
122
119
|
|
|
123
120
|
ERROR = "error"
|
|
@@ -204,7 +201,7 @@ class OutputWavefrontTypedDict(TypedDict):
|
|
|
204
201
|
|
|
205
202
|
|
|
206
203
|
class OutputWavefront(BaseModel):
|
|
207
|
-
type:
|
|
204
|
+
type: OutputWavefrontType
|
|
208
205
|
|
|
209
206
|
id: Optional[str] = None
|
|
210
207
|
r"""Unique ID for this output"""
|
|
@@ -224,11 +221,7 @@ class OutputWavefront(BaseModel):
|
|
|
224
221
|
r"""Tags for filtering and grouping in @{product}"""
|
|
225
222
|
|
|
226
223
|
auth_type: Annotated[
|
|
227
|
-
|
|
228
|
-
Optional[OutputWavefrontAuthenticationMethod],
|
|
229
|
-
PlainValidator(validate_open_enum(False)),
|
|
230
|
-
],
|
|
231
|
-
pydantic.Field(alias="authType"),
|
|
224
|
+
Optional[OutputWavefrontAuthenticationMethod], pydantic.Field(alias="authType")
|
|
232
225
|
] = OutputWavefrontAuthenticationMethod.MANUAL
|
|
233
226
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
234
227
|
|
|
@@ -279,10 +272,7 @@ class OutputWavefront(BaseModel):
|
|
|
279
272
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
280
273
|
|
|
281
274
|
failed_request_logging_mode: Annotated[
|
|
282
|
-
|
|
283
|
-
Optional[OutputWavefrontFailedRequestLoggingMode],
|
|
284
|
-
PlainValidator(validate_open_enum(False)),
|
|
285
|
-
],
|
|
275
|
+
Optional[OutputWavefrontFailedRequestLoggingMode],
|
|
286
276
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
287
277
|
] = OutputWavefrontFailedRequestLoggingMode.NONE
|
|
288
278
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -309,10 +299,7 @@ class OutputWavefront(BaseModel):
|
|
|
309
299
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
310
300
|
|
|
311
301
|
on_backpressure: Annotated[
|
|
312
|
-
|
|
313
|
-
Optional[OutputWavefrontBackpressureBehavior],
|
|
314
|
-
PlainValidator(validate_open_enum(False)),
|
|
315
|
-
],
|
|
302
|
+
Optional[OutputWavefrontBackpressureBehavior],
|
|
316
303
|
pydantic.Field(alias="onBackpressure"),
|
|
317
304
|
] = OutputWavefrontBackpressureBehavior.BLOCK
|
|
318
305
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -339,28 +326,18 @@ class OutputWavefront(BaseModel):
|
|
|
339
326
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
340
327
|
|
|
341
328
|
pq_compress: Annotated[
|
|
342
|
-
|
|
343
|
-
Optional[OutputWavefrontCompression],
|
|
344
|
-
PlainValidator(validate_open_enum(False)),
|
|
345
|
-
],
|
|
346
|
-
pydantic.Field(alias="pqCompress"),
|
|
329
|
+
Optional[OutputWavefrontCompression], pydantic.Field(alias="pqCompress")
|
|
347
330
|
] = OutputWavefrontCompression.NONE
|
|
348
331
|
r"""Codec to use to compress the persisted data"""
|
|
349
332
|
|
|
350
333
|
pq_on_backpressure: Annotated[
|
|
351
|
-
|
|
352
|
-
Optional[OutputWavefrontQueueFullBehavior],
|
|
353
|
-
PlainValidator(validate_open_enum(False)),
|
|
354
|
-
],
|
|
334
|
+
Optional[OutputWavefrontQueueFullBehavior],
|
|
355
335
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
356
336
|
] = OutputWavefrontQueueFullBehavior.BLOCK
|
|
357
337
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
358
338
|
|
|
359
339
|
pq_mode: Annotated[
|
|
360
|
-
|
|
361
|
-
Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
|
|
362
|
-
],
|
|
363
|
-
pydantic.Field(alias="pqMode"),
|
|
340
|
+
Optional[OutputWavefrontMode], pydantic.Field(alias="pqMode")
|
|
364
341
|
] = OutputWavefrontMode.ERROR
|
|
365
342
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
366
343
|
|