cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +4365 -4124
- cribl_control_plane/models/createinputop.py +1734 -2771
- cribl_control_plane/models/createoutputop.py +2153 -4314
- cribl_control_plane/models/createversioncommitop.py +24 -0
- cribl_control_plane/models/createversionpushop.py +23 -0
- cribl_control_plane/models/createversionrevertop.py +47 -0
- cribl_control_plane/models/createversionsyncop.py +23 -0
- cribl_control_plane/models/createversionundoop.py +37 -0
- cribl_control_plane/models/getversionbranchop.py +23 -0
- cribl_control_plane/models/getversioncountop.py +47 -0
- cribl_control_plane/models/getversioncurrentbranchop.py +23 -0
- cribl_control_plane/models/getversiondiffop.py +63 -0
- cribl_control_plane/models/getversionfilesop.py +48 -0
- cribl_control_plane/models/getversioninfoop.py +24 -0
- cribl_control_plane/models/getversionshowop.py +63 -0
- cribl_control_plane/models/getversionstatusop.py +38 -0
- cribl_control_plane/models/gitcommitparams.py +23 -0
- cribl_control_plane/models/gitcommitsummary.py +68 -0
- cribl_control_plane/models/gitfile.py +20 -0
- cribl_control_plane/models/gitfilesresponse.py +22 -0
- cribl_control_plane/models/gitinfo.py +23 -0
- cribl_control_plane/models/gitrevertparams.py +20 -0
- cribl_control_plane/models/gitrevertresult.py +48 -0
- cribl_control_plane/models/gitstatusresult.py +73 -0
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +20 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +10 -27
- cribl_control_plane/models/inputcribllakehttp.py +12 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +10 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +10 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +31 -64
- cribl_control_plane/models/inputelastic.py +16 -44
- cribl_control_plane/models/inputeventhub.py +8 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +10 -27
- cribl_control_plane/models/inputgooglepubsub.py +8 -23
- cribl_control_plane/models/inputgrafana_union.py +35 -81
- cribl_control_plane/models/inputhttp.py +10 -27
- cribl_control_plane/models/inputhttpraw.py +10 -27
- cribl_control_plane/models/inputjournalfiles.py +6 -16
- cribl_control_plane/models/inputkafka.py +16 -45
- cribl_control_plane/models/inputkinesis.py +16 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +12 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
- cribl_control_plane/models/inputmsk.py +18 -52
- cribl_control_plane/models/inputnetflow.py +6 -15
- cribl_control_plane/models/inputoffice365mgmt.py +16 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
- cribl_control_plane/models/inputoffice365service.py +18 -39
- cribl_control_plane/models/inputopentelemetry.py +18 -42
- cribl_control_plane/models/inputprometheus.py +20 -54
- cribl_control_plane/models/inputprometheusrw.py +12 -34
- cribl_control_plane/models/inputrawudp.py +6 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +8 -20
- cribl_control_plane/models/inputsplunk.py +14 -37
- cribl_control_plane/models/inputsplunkhec.py +12 -33
- cribl_control_plane/models/inputsplunksearch.py +16 -37
- cribl_control_plane/models/inputsqs.py +12 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +12 -33
- cribl_control_plane/models/inputtcpjson.py +12 -33
- cribl_control_plane/models/inputwef.py +20 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +10 -25
- cribl_control_plane/models/inputzscalerhec.py +12 -33
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- cribl_control_plane/sdk.py +4 -0
- cribl_control_plane/versioning.py +2309 -0
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.18.dist-info}/METADATA +18 -2
- cribl_control_plane-0.0.18.dist-info/RECORD +237 -0
- cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.18.dist-info}/WHEEL +0 -0
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputKafkaType(str, Enum
|
|
11
|
+
class OutputKafkaType(str, Enum):
|
|
15
12
|
KAFKA = "kafka"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputKafkaAcknowledgments(int, Enum
|
|
15
|
+
class OutputKafkaAcknowledgments(int, Enum):
|
|
19
16
|
r"""Control the number of required acknowledgments."""
|
|
20
17
|
|
|
21
18
|
ONE = 1
|
|
@@ -23,7 +20,7 @@ class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
23
20
|
MINUS_1 = -1
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputKafkaRecordDataFormat(str, Enum
|
|
23
|
+
class OutputKafkaRecordDataFormat(str, Enum):
|
|
27
24
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
28
25
|
|
|
29
26
|
JSON = "json"
|
|
@@ -31,7 +28,7 @@ class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
31
28
|
PROTOBUF = "protobuf"
|
|
32
29
|
|
|
33
30
|
|
|
34
|
-
class OutputKafkaCompression(str, Enum
|
|
31
|
+
class OutputKafkaCompression(str, Enum):
|
|
35
32
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
36
33
|
|
|
37
34
|
NONE = "none"
|
|
@@ -59,18 +56,14 @@ class OutputKafkaAuth(BaseModel):
|
|
|
59
56
|
r"""Select or create a secret that references your credentials"""
|
|
60
57
|
|
|
61
58
|
|
|
62
|
-
class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(
|
|
63
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
64
|
-
):
|
|
59
|
+
class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
|
|
65
60
|
TL_SV1 = "TLSv1"
|
|
66
61
|
TL_SV1_1 = "TLSv1.1"
|
|
67
62
|
TL_SV1_2 = "TLSv1.2"
|
|
68
63
|
TL_SV1_3 = "TLSv1.3"
|
|
69
64
|
|
|
70
65
|
|
|
71
|
-
class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(
|
|
72
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
-
):
|
|
66
|
+
class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
|
|
74
67
|
TL_SV1 = "TLSv1"
|
|
75
68
|
TL_SV1_1 = "TLSv1.1"
|
|
76
69
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -130,18 +123,12 @@ class OutputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
|
130
123
|
r"""Passphrase to use to decrypt private key"""
|
|
131
124
|
|
|
132
125
|
min_version: Annotated[
|
|
133
|
-
|
|
134
|
-
Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
|
|
135
|
-
PlainValidator(validate_open_enum(False)),
|
|
136
|
-
],
|
|
126
|
+
Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
|
|
137
127
|
pydantic.Field(alias="minVersion"),
|
|
138
128
|
] = None
|
|
139
129
|
|
|
140
130
|
max_version: Annotated[
|
|
141
|
-
|
|
142
|
-
Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
|
|
143
|
-
PlainValidator(validate_open_enum(False)),
|
|
144
|
-
],
|
|
131
|
+
Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
|
|
145
132
|
pydantic.Field(alias="maxVersion"),
|
|
146
133
|
] = None
|
|
147
134
|
|
|
@@ -202,7 +189,7 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
202
189
|
r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
|
|
203
190
|
|
|
204
191
|
|
|
205
|
-
class OutputKafkaSASLMechanism(str, Enum
|
|
192
|
+
class OutputKafkaSASLMechanism(str, Enum):
|
|
206
193
|
PLAIN = "plain"
|
|
207
194
|
SCRAM_SHA_256 = "scram-sha-256"
|
|
208
195
|
SCRAM_SHA_512 = "scram-sha-512"
|
|
@@ -221,19 +208,17 @@ class OutputKafkaAuthentication(BaseModel):
|
|
|
221
208
|
|
|
222
209
|
disabled: Optional[bool] = True
|
|
223
210
|
|
|
224
|
-
mechanism:
|
|
225
|
-
Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
|
|
226
|
-
] = OutputKafkaSASLMechanism.PLAIN
|
|
211
|
+
mechanism: Optional[OutputKafkaSASLMechanism] = OutputKafkaSASLMechanism.PLAIN
|
|
227
212
|
|
|
228
213
|
|
|
229
|
-
class OutputKafkaMinimumTLSVersion(str, Enum
|
|
214
|
+
class OutputKafkaMinimumTLSVersion(str, Enum):
|
|
230
215
|
TL_SV1 = "TLSv1"
|
|
231
216
|
TL_SV1_1 = "TLSv1.1"
|
|
232
217
|
TL_SV1_2 = "TLSv1.2"
|
|
233
218
|
TL_SV1_3 = "TLSv1.3"
|
|
234
219
|
|
|
235
220
|
|
|
236
|
-
class OutputKafkaMaximumTLSVersion(str, Enum
|
|
221
|
+
class OutputKafkaMaximumTLSVersion(str, Enum):
|
|
237
222
|
TL_SV1 = "TLSv1"
|
|
238
223
|
TL_SV1_1 = "TLSv1.1"
|
|
239
224
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -293,23 +278,15 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
|
|
|
293
278
|
r"""Passphrase to use to decrypt private key"""
|
|
294
279
|
|
|
295
280
|
min_version: Annotated[
|
|
296
|
-
|
|
297
|
-
Optional[OutputKafkaMinimumTLSVersion],
|
|
298
|
-
PlainValidator(validate_open_enum(False)),
|
|
299
|
-
],
|
|
300
|
-
pydantic.Field(alias="minVersion"),
|
|
281
|
+
Optional[OutputKafkaMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
301
282
|
] = None
|
|
302
283
|
|
|
303
284
|
max_version: Annotated[
|
|
304
|
-
|
|
305
|
-
Optional[OutputKafkaMaximumTLSVersion],
|
|
306
|
-
PlainValidator(validate_open_enum(False)),
|
|
307
|
-
],
|
|
308
|
-
pydantic.Field(alias="maxVersion"),
|
|
285
|
+
Optional[OutputKafkaMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
309
286
|
] = None
|
|
310
287
|
|
|
311
288
|
|
|
312
|
-
class OutputKafkaBackpressureBehavior(str, Enum
|
|
289
|
+
class OutputKafkaBackpressureBehavior(str, Enum):
|
|
313
290
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
314
291
|
|
|
315
292
|
BLOCK = "block"
|
|
@@ -317,21 +294,21 @@ class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
317
294
|
QUEUE = "queue"
|
|
318
295
|
|
|
319
296
|
|
|
320
|
-
class OutputKafkaPqCompressCompression(str, Enum
|
|
297
|
+
class OutputKafkaPqCompressCompression(str, Enum):
|
|
321
298
|
r"""Codec to use to compress the persisted data"""
|
|
322
299
|
|
|
323
300
|
NONE = "none"
|
|
324
301
|
GZIP = "gzip"
|
|
325
302
|
|
|
326
303
|
|
|
327
|
-
class OutputKafkaQueueFullBehavior(str, Enum
|
|
304
|
+
class OutputKafkaQueueFullBehavior(str, Enum):
|
|
328
305
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
329
306
|
|
|
330
307
|
BLOCK = "block"
|
|
331
308
|
DROP = "drop"
|
|
332
309
|
|
|
333
310
|
|
|
334
|
-
class OutputKafkaMode(str, Enum
|
|
311
|
+
class OutputKafkaMode(str, Enum):
|
|
335
312
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
336
313
|
|
|
337
314
|
ERROR = "error"
|
|
@@ -427,9 +404,7 @@ class OutputKafka(BaseModel):
|
|
|
427
404
|
id: Optional[str] = None
|
|
428
405
|
r"""Unique ID for this output"""
|
|
429
406
|
|
|
430
|
-
type:
|
|
431
|
-
Optional[OutputKafkaType], PlainValidator(validate_open_enum(False))
|
|
432
|
-
] = None
|
|
407
|
+
type: Optional[OutputKafkaType] = None
|
|
433
408
|
|
|
434
409
|
pipeline: Optional[str] = None
|
|
435
410
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -445,23 +420,15 @@ class OutputKafka(BaseModel):
|
|
|
445
420
|
streamtags: Optional[List[str]] = None
|
|
446
421
|
r"""Tags for filtering and grouping in @{product}"""
|
|
447
422
|
|
|
448
|
-
ack:
|
|
449
|
-
Optional[OutputKafkaAcknowledgments], PlainValidator(validate_open_enum(True))
|
|
450
|
-
] = OutputKafkaAcknowledgments.ONE
|
|
423
|
+
ack: Optional[OutputKafkaAcknowledgments] = OutputKafkaAcknowledgments.ONE
|
|
451
424
|
r"""Control the number of required acknowledgments."""
|
|
452
425
|
|
|
453
426
|
format_: Annotated[
|
|
454
|
-
|
|
455
|
-
Optional[OutputKafkaRecordDataFormat],
|
|
456
|
-
PlainValidator(validate_open_enum(False)),
|
|
457
|
-
],
|
|
458
|
-
pydantic.Field(alias="format"),
|
|
427
|
+
Optional[OutputKafkaRecordDataFormat], pydantic.Field(alias="format")
|
|
459
428
|
] = OutputKafkaRecordDataFormat.JSON
|
|
460
429
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
461
430
|
|
|
462
|
-
compression:
|
|
463
|
-
Optional[OutputKafkaCompression], PlainValidator(validate_open_enum(False))
|
|
464
|
-
] = OutputKafkaCompression.GZIP
|
|
431
|
+
compression: Optional[OutputKafkaCompression] = OutputKafkaCompression.GZIP
|
|
465
432
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
466
433
|
|
|
467
434
|
max_record_size_kb: Annotated[
|
|
@@ -524,10 +491,7 @@ class OutputKafka(BaseModel):
|
|
|
524
491
|
tls: Optional[OutputKafkaTLSSettingsClientSide] = None
|
|
525
492
|
|
|
526
493
|
on_backpressure: Annotated[
|
|
527
|
-
|
|
528
|
-
Optional[OutputKafkaBackpressureBehavior],
|
|
529
|
-
PlainValidator(validate_open_enum(False)),
|
|
530
|
-
],
|
|
494
|
+
Optional[OutputKafkaBackpressureBehavior],
|
|
531
495
|
pydantic.Field(alias="onBackpressure"),
|
|
532
496
|
] = OutputKafkaBackpressureBehavior.BLOCK
|
|
533
497
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -553,27 +517,18 @@ class OutputKafka(BaseModel):
|
|
|
553
517
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
554
518
|
|
|
555
519
|
pq_compress: Annotated[
|
|
556
|
-
|
|
557
|
-
Optional[OutputKafkaPqCompressCompression],
|
|
558
|
-
PlainValidator(validate_open_enum(False)),
|
|
559
|
-
],
|
|
560
|
-
pydantic.Field(alias="pqCompress"),
|
|
520
|
+
Optional[OutputKafkaPqCompressCompression], pydantic.Field(alias="pqCompress")
|
|
561
521
|
] = OutputKafkaPqCompressCompression.NONE
|
|
562
522
|
r"""Codec to use to compress the persisted data"""
|
|
563
523
|
|
|
564
524
|
pq_on_backpressure: Annotated[
|
|
565
|
-
|
|
566
|
-
Optional[OutputKafkaQueueFullBehavior],
|
|
567
|
-
PlainValidator(validate_open_enum(False)),
|
|
568
|
-
],
|
|
569
|
-
pydantic.Field(alias="pqOnBackpressure"),
|
|
525
|
+
Optional[OutputKafkaQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
|
|
570
526
|
] = OutputKafkaQueueFullBehavior.BLOCK
|
|
571
527
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
572
528
|
|
|
573
|
-
pq_mode: Annotated[
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
] = OutputKafkaMode.ERROR
|
|
529
|
+
pq_mode: Annotated[Optional[OutputKafkaMode], pydantic.Field(alias="pqMode")] = (
|
|
530
|
+
OutputKafkaMode.ERROR
|
|
531
|
+
)
|
|
577
532
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
578
533
|
|
|
579
534
|
pq_controls: Annotated[
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputKinesisType(str, Enum
|
|
11
|
+
class OutputKinesisType(str, Enum):
|
|
15
12
|
KINESIS = "kinesis"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputKinesisAuthenticationMethod(str, Enum
|
|
15
|
+
class OutputKinesisAuthenticationMethod(str, Enum):
|
|
19
16
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
20
17
|
|
|
21
18
|
AUTO = "auto"
|
|
@@ -23,21 +20,21 @@ class OutputKinesisAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
23
20
|
SECRET = "secret"
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputKinesisSignatureVersion(str, Enum
|
|
23
|
+
class OutputKinesisSignatureVersion(str, Enum):
|
|
27
24
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
28
25
|
|
|
29
26
|
V2 = "v2"
|
|
30
27
|
V4 = "v4"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputKinesisCompression(str, Enum
|
|
30
|
+
class OutputKinesisCompression(str, Enum):
|
|
34
31
|
r"""Compression type to use for records"""
|
|
35
32
|
|
|
36
33
|
NONE = "none"
|
|
37
34
|
GZIP = "gzip"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputKinesisBackpressureBehavior(str, Enum
|
|
37
|
+
class OutputKinesisBackpressureBehavior(str, Enum):
|
|
41
38
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
42
39
|
|
|
43
40
|
BLOCK = "block"
|
|
@@ -45,21 +42,21 @@ class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
45
42
|
QUEUE = "queue"
|
|
46
43
|
|
|
47
44
|
|
|
48
|
-
class OutputKinesisPqCompressCompression(str, Enum
|
|
45
|
+
class OutputKinesisPqCompressCompression(str, Enum):
|
|
49
46
|
r"""Codec to use to compress the persisted data"""
|
|
50
47
|
|
|
51
48
|
NONE = "none"
|
|
52
49
|
GZIP = "gzip"
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputKinesisQueueFullBehavior(str, Enum
|
|
52
|
+
class OutputKinesisQueueFullBehavior(str, Enum):
|
|
56
53
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
57
54
|
|
|
58
55
|
BLOCK = "block"
|
|
59
56
|
DROP = "drop"
|
|
60
57
|
|
|
61
58
|
|
|
62
|
-
class OutputKinesisMode(str, Enum
|
|
59
|
+
class OutputKinesisMode(str, Enum):
|
|
63
60
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
64
61
|
|
|
65
62
|
ERROR = "error"
|
|
@@ -153,9 +150,7 @@ class OutputKinesis(BaseModel):
|
|
|
153
150
|
id: Optional[str] = None
|
|
154
151
|
r"""Unique ID for this output"""
|
|
155
152
|
|
|
156
|
-
type:
|
|
157
|
-
Optional[OutputKinesisType], PlainValidator(validate_open_enum(False))
|
|
158
|
-
] = None
|
|
153
|
+
type: Optional[OutputKinesisType] = None
|
|
159
154
|
|
|
160
155
|
pipeline: Optional[str] = None
|
|
161
156
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -172,10 +167,7 @@ class OutputKinesis(BaseModel):
|
|
|
172
167
|
r"""Tags for filtering and grouping in @{product}"""
|
|
173
168
|
|
|
174
169
|
aws_authentication_method: Annotated[
|
|
175
|
-
|
|
176
|
-
Optional[OutputKinesisAuthenticationMethod],
|
|
177
|
-
PlainValidator(validate_open_enum(False)),
|
|
178
|
-
],
|
|
170
|
+
Optional[OutputKinesisAuthenticationMethod],
|
|
179
171
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
180
172
|
] = OutputKinesisAuthenticationMethod.AUTO
|
|
181
173
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -188,10 +180,7 @@ class OutputKinesis(BaseModel):
|
|
|
188
180
|
r"""Kinesis stream service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to Kinesis stream-compatible endpoint."""
|
|
189
181
|
|
|
190
182
|
signature_version: Annotated[
|
|
191
|
-
|
|
192
|
-
Optional[OutputKinesisSignatureVersion],
|
|
193
|
-
PlainValidator(validate_open_enum(False)),
|
|
194
|
-
],
|
|
183
|
+
Optional[OutputKinesisSignatureVersion],
|
|
195
184
|
pydantic.Field(alias="signatureVersion"),
|
|
196
185
|
] = OutputKinesisSignatureVersion.V4
|
|
197
186
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
@@ -239,9 +228,7 @@ class OutputKinesis(BaseModel):
|
|
|
239
228
|
] = 1
|
|
240
229
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
|
|
241
230
|
|
|
242
|
-
compression:
|
|
243
|
-
Optional[OutputKinesisCompression], PlainValidator(validate_open_enum(False))
|
|
244
|
-
] = OutputKinesisCompression.GZIP
|
|
231
|
+
compression: Optional[OutputKinesisCompression] = OutputKinesisCompression.GZIP
|
|
245
232
|
r"""Compression type to use for records"""
|
|
246
233
|
|
|
247
234
|
use_list_shards: Annotated[
|
|
@@ -253,10 +240,7 @@ class OutputKinesis(BaseModel):
|
|
|
253
240
|
r"""Batch events into a single record as NDJSON"""
|
|
254
241
|
|
|
255
242
|
on_backpressure: Annotated[
|
|
256
|
-
|
|
257
|
-
Optional[OutputKinesisBackpressureBehavior],
|
|
258
|
-
PlainValidator(validate_open_enum(False)),
|
|
259
|
-
],
|
|
243
|
+
Optional[OutputKinesisBackpressureBehavior],
|
|
260
244
|
pydantic.Field(alias="onBackpressure"),
|
|
261
245
|
] = OutputKinesisBackpressureBehavior.BLOCK
|
|
262
246
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -282,29 +266,19 @@ class OutputKinesis(BaseModel):
|
|
|
282
266
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
283
267
|
|
|
284
268
|
pq_compress: Annotated[
|
|
285
|
-
|
|
286
|
-
Optional[OutputKinesisPqCompressCompression],
|
|
287
|
-
PlainValidator(validate_open_enum(False)),
|
|
288
|
-
],
|
|
289
|
-
pydantic.Field(alias="pqCompress"),
|
|
269
|
+
Optional[OutputKinesisPqCompressCompression], pydantic.Field(alias="pqCompress")
|
|
290
270
|
] = OutputKinesisPqCompressCompression.NONE
|
|
291
271
|
r"""Codec to use to compress the persisted data"""
|
|
292
272
|
|
|
293
273
|
pq_on_backpressure: Annotated[
|
|
294
|
-
|
|
295
|
-
Optional[OutputKinesisQueueFullBehavior],
|
|
296
|
-
PlainValidator(validate_open_enum(False)),
|
|
297
|
-
],
|
|
274
|
+
Optional[OutputKinesisQueueFullBehavior],
|
|
298
275
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
299
276
|
] = OutputKinesisQueueFullBehavior.BLOCK
|
|
300
277
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
301
278
|
|
|
302
|
-
pq_mode: Annotated[
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
],
|
|
306
|
-
pydantic.Field(alias="pqMode"),
|
|
307
|
-
] = OutputKinesisMode.ERROR
|
|
279
|
+
pq_mode: Annotated[Optional[OutputKinesisMode], pydantic.Field(alias="pqMode")] = (
|
|
280
|
+
OutputKinesisMode.ERROR
|
|
281
|
+
)
|
|
308
282
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
309
283
|
|
|
310
284
|
pq_controls: Annotated[
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputLokiType(str, Enum
|
|
11
|
+
class OutputLokiType(str, Enum):
|
|
15
12
|
LOKI = "loki"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputLokiMessageFormat(str, Enum
|
|
15
|
+
class OutputLokiMessageFormat(str, Enum):
|
|
19
16
|
r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
|
|
20
17
|
|
|
21
18
|
PROTOBUF = "protobuf"
|
|
@@ -33,7 +30,7 @@ class OutputLokiLabel(BaseModel):
|
|
|
33
30
|
name: Optional[str] = ""
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class OutputLokiAuthenticationType(str, Enum
|
|
33
|
+
class OutputLokiAuthenticationType(str, Enum):
|
|
37
34
|
NONE = "none"
|
|
38
35
|
TOKEN = "token"
|
|
39
36
|
TEXT_SECRET = "textSecret"
|
|
@@ -52,7 +49,7 @@ class OutputLokiExtraHTTPHeader(BaseModel):
|
|
|
52
49
|
name: Optional[str] = None
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputLokiFailedRequestLoggingMode(str, Enum
|
|
52
|
+
class OutputLokiFailedRequestLoggingMode(str, Enum):
|
|
56
53
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
57
54
|
|
|
58
55
|
PAYLOAD = "payload"
|
|
@@ -114,7 +111,7 @@ class OutputLokiTimeoutRetrySettings(BaseModel):
|
|
|
114
111
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
115
112
|
|
|
116
113
|
|
|
117
|
-
class OutputLokiBackpressureBehavior(str, Enum
|
|
114
|
+
class OutputLokiBackpressureBehavior(str, Enum):
|
|
118
115
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
119
116
|
|
|
120
117
|
BLOCK = "block"
|
|
@@ -122,21 +119,21 @@ class OutputLokiBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
122
119
|
QUEUE = "queue"
|
|
123
120
|
|
|
124
121
|
|
|
125
|
-
class OutputLokiCompression(str, Enum
|
|
122
|
+
class OutputLokiCompression(str, Enum):
|
|
126
123
|
r"""Codec to use to compress the persisted data"""
|
|
127
124
|
|
|
128
125
|
NONE = "none"
|
|
129
126
|
GZIP = "gzip"
|
|
130
127
|
|
|
131
128
|
|
|
132
|
-
class OutputLokiQueueFullBehavior(str, Enum
|
|
129
|
+
class OutputLokiQueueFullBehavior(str, Enum):
|
|
133
130
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
134
131
|
|
|
135
132
|
BLOCK = "block"
|
|
136
133
|
DROP = "drop"
|
|
137
134
|
|
|
138
135
|
|
|
139
|
-
class OutputLokiMode(str, Enum
|
|
136
|
+
class OutputLokiMode(str, Enum):
|
|
140
137
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
141
138
|
|
|
142
139
|
ERROR = "error"
|
|
@@ -234,7 +231,7 @@ class OutputLokiTypedDict(TypedDict):
|
|
|
234
231
|
|
|
235
232
|
|
|
236
233
|
class OutputLoki(BaseModel):
|
|
237
|
-
type:
|
|
234
|
+
type: OutputLokiType
|
|
238
235
|
|
|
239
236
|
url: str
|
|
240
237
|
r"""The endpoint to send logs to"""
|
|
@@ -260,10 +257,7 @@ class OutputLoki(BaseModel):
|
|
|
260
257
|
r"""Name of the event field that contains the message to send. If not specified, Stream sends a JSON representation of the whole event."""
|
|
261
258
|
|
|
262
259
|
message_format: Annotated[
|
|
263
|
-
|
|
264
|
-
Optional[OutputLokiMessageFormat], PlainValidator(validate_open_enum(False))
|
|
265
|
-
],
|
|
266
|
-
pydantic.Field(alias="messageFormat"),
|
|
260
|
+
Optional[OutputLokiMessageFormat], pydantic.Field(alias="messageFormat")
|
|
267
261
|
] = OutputLokiMessageFormat.PROTOBUF
|
|
268
262
|
r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
|
|
269
263
|
|
|
@@ -271,11 +265,7 @@ class OutputLoki(BaseModel):
|
|
|
271
265
|
r"""List of labels to send with logs. Labels define Loki streams, so use static labels to avoid proliferating label value combinations and streams. Can be merged and/or overridden by the event's __labels field. Example: '__labels: {host: \"cribl.io\", level: \"error\"}'"""
|
|
272
266
|
|
|
273
267
|
auth_type: Annotated[
|
|
274
|
-
|
|
275
|
-
Optional[OutputLokiAuthenticationType],
|
|
276
|
-
PlainValidator(validate_open_enum(False)),
|
|
277
|
-
],
|
|
278
|
-
pydantic.Field(alias="authType"),
|
|
268
|
+
Optional[OutputLokiAuthenticationType], pydantic.Field(alias="authType")
|
|
279
269
|
] = OutputLokiAuthenticationType.NONE
|
|
280
270
|
|
|
281
271
|
concurrency: Optional[float] = 1
|
|
@@ -319,10 +309,7 @@ class OutputLoki(BaseModel):
|
|
|
319
309
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
320
310
|
|
|
321
311
|
failed_request_logging_mode: Annotated[
|
|
322
|
-
|
|
323
|
-
Optional[OutputLokiFailedRequestLoggingMode],
|
|
324
|
-
PlainValidator(validate_open_enum(False)),
|
|
325
|
-
],
|
|
312
|
+
Optional[OutputLokiFailedRequestLoggingMode],
|
|
326
313
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
327
314
|
] = OutputLokiFailedRequestLoggingMode.NONE
|
|
328
315
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -349,11 +336,7 @@ class OutputLoki(BaseModel):
|
|
|
349
336
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
350
337
|
|
|
351
338
|
on_backpressure: Annotated[
|
|
352
|
-
|
|
353
|
-
Optional[OutputLokiBackpressureBehavior],
|
|
354
|
-
PlainValidator(validate_open_enum(False)),
|
|
355
|
-
],
|
|
356
|
-
pydantic.Field(alias="onBackpressure"),
|
|
339
|
+
Optional[OutputLokiBackpressureBehavior], pydantic.Field(alias="onBackpressure")
|
|
357
340
|
] = OutputLokiBackpressureBehavior.BLOCK
|
|
358
341
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
359
342
|
|
|
@@ -398,26 +381,18 @@ class OutputLoki(BaseModel):
|
|
|
398
381
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
399
382
|
|
|
400
383
|
pq_compress: Annotated[
|
|
401
|
-
|
|
402
|
-
Optional[OutputLokiCompression], PlainValidator(validate_open_enum(False))
|
|
403
|
-
],
|
|
404
|
-
pydantic.Field(alias="pqCompress"),
|
|
384
|
+
Optional[OutputLokiCompression], pydantic.Field(alias="pqCompress")
|
|
405
385
|
] = OutputLokiCompression.NONE
|
|
406
386
|
r"""Codec to use to compress the persisted data"""
|
|
407
387
|
|
|
408
388
|
pq_on_backpressure: Annotated[
|
|
409
|
-
|
|
410
|
-
Optional[OutputLokiQueueFullBehavior],
|
|
411
|
-
PlainValidator(validate_open_enum(False)),
|
|
412
|
-
],
|
|
413
|
-
pydantic.Field(alias="pqOnBackpressure"),
|
|
389
|
+
Optional[OutputLokiQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
|
|
414
390
|
] = OutputLokiQueueFullBehavior.BLOCK
|
|
415
391
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
416
392
|
|
|
417
|
-
pq_mode: Annotated[
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
] = OutputLokiMode.ERROR
|
|
393
|
+
pq_mode: Annotated[Optional[OutputLokiMode], pydantic.Field(alias="pqMode")] = (
|
|
394
|
+
OutputLokiMode.ERROR
|
|
395
|
+
)
|
|
421
396
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
422
397
|
|
|
423
398
|
pq_controls: Annotated[
|