cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +4365 -4124
- cribl_control_plane/models/createinputop.py +1734 -2771
- cribl_control_plane/models/createoutputop.py +2153 -4314
- cribl_control_plane/models/createversioncommitop.py +24 -0
- cribl_control_plane/models/createversionpushop.py +23 -0
- cribl_control_plane/models/createversionrevertop.py +47 -0
- cribl_control_plane/models/createversionsyncop.py +23 -0
- cribl_control_plane/models/createversionundoop.py +37 -0
- cribl_control_plane/models/getversionbranchop.py +23 -0
- cribl_control_plane/models/getversioncountop.py +47 -0
- cribl_control_plane/models/getversioncurrentbranchop.py +23 -0
- cribl_control_plane/models/getversiondiffop.py +63 -0
- cribl_control_plane/models/getversionfilesop.py +48 -0
- cribl_control_plane/models/getversioninfoop.py +24 -0
- cribl_control_plane/models/getversionshowop.py +63 -0
- cribl_control_plane/models/getversionstatusop.py +38 -0
- cribl_control_plane/models/gitcommitparams.py +23 -0
- cribl_control_plane/models/gitcommitsummary.py +68 -0
- cribl_control_plane/models/gitfile.py +20 -0
- cribl_control_plane/models/gitfilesresponse.py +22 -0
- cribl_control_plane/models/gitinfo.py +23 -0
- cribl_control_plane/models/gitrevertparams.py +20 -0
- cribl_control_plane/models/gitrevertresult.py +48 -0
- cribl_control_plane/models/gitstatusresult.py +73 -0
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +20 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +10 -27
- cribl_control_plane/models/inputcribllakehttp.py +12 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +10 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +10 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +31 -64
- cribl_control_plane/models/inputelastic.py +16 -44
- cribl_control_plane/models/inputeventhub.py +8 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +10 -27
- cribl_control_plane/models/inputgooglepubsub.py +8 -23
- cribl_control_plane/models/inputgrafana_union.py +35 -81
- cribl_control_plane/models/inputhttp.py +10 -27
- cribl_control_plane/models/inputhttpraw.py +10 -27
- cribl_control_plane/models/inputjournalfiles.py +6 -16
- cribl_control_plane/models/inputkafka.py +16 -45
- cribl_control_plane/models/inputkinesis.py +16 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +12 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
- cribl_control_plane/models/inputmsk.py +18 -52
- cribl_control_plane/models/inputnetflow.py +6 -15
- cribl_control_plane/models/inputoffice365mgmt.py +16 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
- cribl_control_plane/models/inputoffice365service.py +18 -39
- cribl_control_plane/models/inputopentelemetry.py +18 -42
- cribl_control_plane/models/inputprometheus.py +20 -54
- cribl_control_plane/models/inputprometheusrw.py +12 -34
- cribl_control_plane/models/inputrawudp.py +6 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +8 -20
- cribl_control_plane/models/inputsplunk.py +14 -37
- cribl_control_plane/models/inputsplunkhec.py +12 -33
- cribl_control_plane/models/inputsplunksearch.py +16 -37
- cribl_control_plane/models/inputsqs.py +12 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +12 -33
- cribl_control_plane/models/inputtcpjson.py +12 -33
- cribl_control_plane/models/inputwef.py +20 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +10 -25
- cribl_control_plane/models/inputzscalerhec.py +12 -33
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- cribl_control_plane/sdk.py +4 -0
- cribl_control_plane/versioning.py +2309 -0
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.18.dist-info}/METADATA +18 -2
- cribl_control_plane-0.0.18.dist-info/RECORD +237 -0
- cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.18.dist-info}/WHEEL +0 -0
|
@@ -1,34 +1,31 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputServiceNowType(str, Enum
|
|
11
|
+
class OutputServiceNowType(str, Enum):
|
|
15
12
|
SERVICE_NOW = "service_now"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputServiceNowOTLPVersion(str, Enum
|
|
15
|
+
class OutputServiceNowOTLPVersion(str, Enum):
|
|
19
16
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
20
17
|
|
|
21
18
|
ONE_DOT_3_DOT_1 = "1.3.1"
|
|
22
19
|
|
|
23
20
|
|
|
24
|
-
class OutputServiceNowProtocol(str, Enum
|
|
21
|
+
class OutputServiceNowProtocol(str, Enum):
|
|
25
22
|
r"""Select a transport option for OpenTelemetry"""
|
|
26
23
|
|
|
27
24
|
GRPC = "grpc"
|
|
28
25
|
HTTP = "http"
|
|
29
26
|
|
|
30
27
|
|
|
31
|
-
class OutputServiceNowCompressCompression(str, Enum
|
|
28
|
+
class OutputServiceNowCompressCompression(str, Enum):
|
|
32
29
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
33
30
|
|
|
34
31
|
NONE = "none"
|
|
@@ -36,7 +33,7 @@ class OutputServiceNowCompressCompression(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
36
33
|
GZIP = "gzip"
|
|
37
34
|
|
|
38
35
|
|
|
39
|
-
class OutputServiceNowHTTPCompressCompression(str, Enum
|
|
36
|
+
class OutputServiceNowHTTPCompressCompression(str, Enum):
|
|
40
37
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
41
38
|
|
|
42
39
|
NONE = "none"
|
|
@@ -54,7 +51,7 @@ class OutputServiceNowMetadatum(BaseModel):
|
|
|
54
51
|
key: Optional[str] = ""
|
|
55
52
|
|
|
56
53
|
|
|
57
|
-
class OutputServiceNowFailedRequestLoggingMode(str, Enum
|
|
54
|
+
class OutputServiceNowFailedRequestLoggingMode(str, Enum):
|
|
58
55
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
59
56
|
|
|
60
57
|
PAYLOAD = "payload"
|
|
@@ -62,7 +59,7 @@ class OutputServiceNowFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEn
|
|
|
62
59
|
NONE = "none"
|
|
63
60
|
|
|
64
61
|
|
|
65
|
-
class OutputServiceNowBackpressureBehavior(str, Enum
|
|
62
|
+
class OutputServiceNowBackpressureBehavior(str, Enum):
|
|
66
63
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
67
64
|
|
|
68
65
|
BLOCK = "block"
|
|
@@ -135,14 +132,14 @@ class OutputServiceNowTimeoutRetrySettings(BaseModel):
|
|
|
135
132
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
136
133
|
|
|
137
134
|
|
|
138
|
-
class OutputServiceNowMinimumTLSVersion(str, Enum
|
|
135
|
+
class OutputServiceNowMinimumTLSVersion(str, Enum):
|
|
139
136
|
TL_SV1 = "TLSv1"
|
|
140
137
|
TL_SV1_1 = "TLSv1.1"
|
|
141
138
|
TL_SV1_2 = "TLSv1.2"
|
|
142
139
|
TL_SV1_3 = "TLSv1.3"
|
|
143
140
|
|
|
144
141
|
|
|
145
|
-
class OutputServiceNowMaximumTLSVersion(str, Enum
|
|
142
|
+
class OutputServiceNowMaximumTLSVersion(str, Enum):
|
|
146
143
|
TL_SV1 = "TLSv1"
|
|
147
144
|
TL_SV1_1 = "TLSv1.1"
|
|
148
145
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -197,37 +194,29 @@ class OutputServiceNowTLSSettingsClientSide(BaseModel):
|
|
|
197
194
|
r"""Passphrase to use to decrypt private key"""
|
|
198
195
|
|
|
199
196
|
min_version: Annotated[
|
|
200
|
-
|
|
201
|
-
Optional[OutputServiceNowMinimumTLSVersion],
|
|
202
|
-
PlainValidator(validate_open_enum(False)),
|
|
203
|
-
],
|
|
204
|
-
pydantic.Field(alias="minVersion"),
|
|
197
|
+
Optional[OutputServiceNowMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
205
198
|
] = None
|
|
206
199
|
|
|
207
200
|
max_version: Annotated[
|
|
208
|
-
|
|
209
|
-
Optional[OutputServiceNowMaximumTLSVersion],
|
|
210
|
-
PlainValidator(validate_open_enum(False)),
|
|
211
|
-
],
|
|
212
|
-
pydantic.Field(alias="maxVersion"),
|
|
201
|
+
Optional[OutputServiceNowMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
213
202
|
] = None
|
|
214
203
|
|
|
215
204
|
|
|
216
|
-
class OutputServiceNowPqCompressCompression(str, Enum
|
|
205
|
+
class OutputServiceNowPqCompressCompression(str, Enum):
|
|
217
206
|
r"""Codec to use to compress the persisted data"""
|
|
218
207
|
|
|
219
208
|
NONE = "none"
|
|
220
209
|
GZIP = "gzip"
|
|
221
210
|
|
|
222
211
|
|
|
223
|
-
class OutputServiceNowQueueFullBehavior(str, Enum
|
|
212
|
+
class OutputServiceNowQueueFullBehavior(str, Enum):
|
|
224
213
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
225
214
|
|
|
226
215
|
BLOCK = "block"
|
|
227
216
|
DROP = "drop"
|
|
228
217
|
|
|
229
218
|
|
|
230
|
-
class OutputServiceNowMode(str, Enum
|
|
219
|
+
class OutputServiceNowMode(str, Enum):
|
|
231
220
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
232
221
|
|
|
233
222
|
ERROR = "error"
|
|
@@ -336,9 +325,7 @@ class OutputServiceNow(BaseModel):
|
|
|
336
325
|
id: Optional[str] = None
|
|
337
326
|
r"""Unique ID for this output"""
|
|
338
327
|
|
|
339
|
-
type:
|
|
340
|
-
Optional[OutputServiceNowType], PlainValidator(validate_open_enum(False))
|
|
341
|
-
] = None
|
|
328
|
+
type: Optional[OutputServiceNowType] = None
|
|
342
329
|
|
|
343
330
|
pipeline: Optional[str] = None
|
|
344
331
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -362,11 +349,7 @@ class OutputServiceNow(BaseModel):
|
|
|
362
349
|
)
|
|
363
350
|
|
|
364
351
|
otlp_version: Annotated[
|
|
365
|
-
|
|
366
|
-
Optional[OutputServiceNowOTLPVersion],
|
|
367
|
-
PlainValidator(validate_open_enum(False)),
|
|
368
|
-
],
|
|
369
|
-
pydantic.Field(alias="otlpVersion"),
|
|
352
|
+
Optional[OutputServiceNowOTLPVersion], pydantic.Field(alias="otlpVersion")
|
|
370
353
|
] = OutputServiceNowOTLPVersion.ONE_DOT_3_DOT_1
|
|
371
354
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
372
355
|
|
|
@@ -375,22 +358,16 @@ class OutputServiceNow(BaseModel):
|
|
|
375
358
|
] = 2048
|
|
376
359
|
r"""Maximum size, in KB, of the request body"""
|
|
377
360
|
|
|
378
|
-
protocol:
|
|
379
|
-
Optional[OutputServiceNowProtocol], PlainValidator(validate_open_enum(False))
|
|
380
|
-
] = OutputServiceNowProtocol.GRPC
|
|
361
|
+
protocol: Optional[OutputServiceNowProtocol] = OutputServiceNowProtocol.GRPC
|
|
381
362
|
r"""Select a transport option for OpenTelemetry"""
|
|
382
363
|
|
|
383
|
-
compress:
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
] = OutputServiceNowCompressCompression.GZIP
|
|
364
|
+
compress: Optional[OutputServiceNowCompressCompression] = (
|
|
365
|
+
OutputServiceNowCompressCompression.GZIP
|
|
366
|
+
)
|
|
387
367
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
388
368
|
|
|
389
369
|
http_compress: Annotated[
|
|
390
|
-
|
|
391
|
-
Optional[OutputServiceNowHTTPCompressCompression],
|
|
392
|
-
PlainValidator(validate_open_enum(False)),
|
|
393
|
-
],
|
|
370
|
+
Optional[OutputServiceNowHTTPCompressCompression],
|
|
394
371
|
pydantic.Field(alias="httpCompress"),
|
|
395
372
|
] = OutputServiceNowHTTPCompressCompression.GZIP
|
|
396
373
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
@@ -425,10 +402,7 @@ class OutputServiceNow(BaseModel):
|
|
|
425
402
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
426
403
|
|
|
427
404
|
failed_request_logging_mode: Annotated[
|
|
428
|
-
|
|
429
|
-
Optional[OutputServiceNowFailedRequestLoggingMode],
|
|
430
|
-
PlainValidator(validate_open_enum(False)),
|
|
431
|
-
],
|
|
405
|
+
Optional[OutputServiceNowFailedRequestLoggingMode],
|
|
432
406
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
433
407
|
] = OutputServiceNowFailedRequestLoggingMode.NONE
|
|
434
408
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -447,10 +421,7 @@ class OutputServiceNow(BaseModel):
|
|
|
447
421
|
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
448
422
|
|
|
449
423
|
on_backpressure: Annotated[
|
|
450
|
-
|
|
451
|
-
Optional[OutputServiceNowBackpressureBehavior],
|
|
452
|
-
PlainValidator(validate_open_enum(False)),
|
|
453
|
-
],
|
|
424
|
+
Optional[OutputServiceNowBackpressureBehavior],
|
|
454
425
|
pydantic.Field(alias="onBackpressure"),
|
|
455
426
|
] = OutputServiceNowBackpressureBehavior.BLOCK
|
|
456
427
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -513,28 +484,19 @@ class OutputServiceNow(BaseModel):
|
|
|
513
484
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
514
485
|
|
|
515
486
|
pq_compress: Annotated[
|
|
516
|
-
|
|
517
|
-
Optional[OutputServiceNowPqCompressCompression],
|
|
518
|
-
PlainValidator(validate_open_enum(False)),
|
|
519
|
-
],
|
|
487
|
+
Optional[OutputServiceNowPqCompressCompression],
|
|
520
488
|
pydantic.Field(alias="pqCompress"),
|
|
521
489
|
] = OutputServiceNowPqCompressCompression.NONE
|
|
522
490
|
r"""Codec to use to compress the persisted data"""
|
|
523
491
|
|
|
524
492
|
pq_on_backpressure: Annotated[
|
|
525
|
-
|
|
526
|
-
Optional[OutputServiceNowQueueFullBehavior],
|
|
527
|
-
PlainValidator(validate_open_enum(False)),
|
|
528
|
-
],
|
|
493
|
+
Optional[OutputServiceNowQueueFullBehavior],
|
|
529
494
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
530
495
|
] = OutputServiceNowQueueFullBehavior.BLOCK
|
|
531
496
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
532
497
|
|
|
533
498
|
pq_mode: Annotated[
|
|
534
|
-
|
|
535
|
-
Optional[OutputServiceNowMode], PlainValidator(validate_open_enum(False))
|
|
536
|
-
],
|
|
537
|
-
pydantic.Field(alias="pqMode"),
|
|
499
|
+
Optional[OutputServiceNowMode], pydantic.Field(alias="pqMode")
|
|
538
500
|
] = OutputServiceNowMode.ERROR
|
|
539
501
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
540
502
|
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputSignalfxType(str, Enum
|
|
11
|
+
class OutputSignalfxType(str, Enum):
|
|
15
12
|
SIGNALFX = "signalfx"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSignalfxAuthenticationMethod(str, Enum
|
|
15
|
+
class OutputSignalfxAuthenticationMethod(str, Enum):
|
|
19
16
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
20
17
|
|
|
21
18
|
MANUAL = "manual"
|
|
@@ -33,7 +30,7 @@ class OutputSignalfxExtraHTTPHeader(BaseModel):
|
|
|
33
30
|
name: Optional[str] = None
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class OutputSignalfxFailedRequestLoggingMode(str, Enum
|
|
33
|
+
class OutputSignalfxFailedRequestLoggingMode(str, Enum):
|
|
37
34
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
38
35
|
|
|
39
36
|
PAYLOAD = "payload"
|
|
@@ -95,7 +92,7 @@ class OutputSignalfxTimeoutRetrySettings(BaseModel):
|
|
|
95
92
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
96
93
|
|
|
97
94
|
|
|
98
|
-
class OutputSignalfxBackpressureBehavior(str, Enum
|
|
95
|
+
class OutputSignalfxBackpressureBehavior(str, Enum):
|
|
99
96
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
100
97
|
|
|
101
98
|
BLOCK = "block"
|
|
@@ -103,21 +100,21 @@ class OutputSignalfxBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
|
|
|
103
100
|
QUEUE = "queue"
|
|
104
101
|
|
|
105
102
|
|
|
106
|
-
class OutputSignalfxCompression(str, Enum
|
|
103
|
+
class OutputSignalfxCompression(str, Enum):
|
|
107
104
|
r"""Codec to use to compress the persisted data"""
|
|
108
105
|
|
|
109
106
|
NONE = "none"
|
|
110
107
|
GZIP = "gzip"
|
|
111
108
|
|
|
112
109
|
|
|
113
|
-
class OutputSignalfxQueueFullBehavior(str, Enum
|
|
110
|
+
class OutputSignalfxQueueFullBehavior(str, Enum):
|
|
114
111
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
115
112
|
|
|
116
113
|
BLOCK = "block"
|
|
117
114
|
DROP = "drop"
|
|
118
115
|
|
|
119
116
|
|
|
120
|
-
class OutputSignalfxMode(str, Enum
|
|
117
|
+
class OutputSignalfxMode(str, Enum):
|
|
121
118
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
122
119
|
|
|
123
120
|
ERROR = "error"
|
|
@@ -204,7 +201,7 @@ class OutputSignalfxTypedDict(TypedDict):
|
|
|
204
201
|
|
|
205
202
|
|
|
206
203
|
class OutputSignalfx(BaseModel):
|
|
207
|
-
type:
|
|
204
|
+
type: OutputSignalfxType
|
|
208
205
|
|
|
209
206
|
id: Optional[str] = None
|
|
210
207
|
r"""Unique ID for this output"""
|
|
@@ -224,11 +221,7 @@ class OutputSignalfx(BaseModel):
|
|
|
224
221
|
r"""Tags for filtering and grouping in @{product}"""
|
|
225
222
|
|
|
226
223
|
auth_type: Annotated[
|
|
227
|
-
|
|
228
|
-
Optional[OutputSignalfxAuthenticationMethod],
|
|
229
|
-
PlainValidator(validate_open_enum(False)),
|
|
230
|
-
],
|
|
231
|
-
pydantic.Field(alias="authType"),
|
|
224
|
+
Optional[OutputSignalfxAuthenticationMethod], pydantic.Field(alias="authType")
|
|
232
225
|
] = OutputSignalfxAuthenticationMethod.MANUAL
|
|
233
226
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
234
227
|
|
|
@@ -279,10 +272,7 @@ class OutputSignalfx(BaseModel):
|
|
|
279
272
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
280
273
|
|
|
281
274
|
failed_request_logging_mode: Annotated[
|
|
282
|
-
|
|
283
|
-
Optional[OutputSignalfxFailedRequestLoggingMode],
|
|
284
|
-
PlainValidator(validate_open_enum(False)),
|
|
285
|
-
],
|
|
275
|
+
Optional[OutputSignalfxFailedRequestLoggingMode],
|
|
286
276
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
287
277
|
] = OutputSignalfxFailedRequestLoggingMode.NONE
|
|
288
278
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -309,10 +299,7 @@ class OutputSignalfx(BaseModel):
|
|
|
309
299
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
310
300
|
|
|
311
301
|
on_backpressure: Annotated[
|
|
312
|
-
|
|
313
|
-
Optional[OutputSignalfxBackpressureBehavior],
|
|
314
|
-
PlainValidator(validate_open_enum(False)),
|
|
315
|
-
],
|
|
302
|
+
Optional[OutputSignalfxBackpressureBehavior],
|
|
316
303
|
pydantic.Field(alias="onBackpressure"),
|
|
317
304
|
] = OutputSignalfxBackpressureBehavior.BLOCK
|
|
318
305
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -339,29 +326,19 @@ class OutputSignalfx(BaseModel):
|
|
|
339
326
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
340
327
|
|
|
341
328
|
pq_compress: Annotated[
|
|
342
|
-
|
|
343
|
-
Optional[OutputSignalfxCompression],
|
|
344
|
-
PlainValidator(validate_open_enum(False)),
|
|
345
|
-
],
|
|
346
|
-
pydantic.Field(alias="pqCompress"),
|
|
329
|
+
Optional[OutputSignalfxCompression], pydantic.Field(alias="pqCompress")
|
|
347
330
|
] = OutputSignalfxCompression.NONE
|
|
348
331
|
r"""Codec to use to compress the persisted data"""
|
|
349
332
|
|
|
350
333
|
pq_on_backpressure: Annotated[
|
|
351
|
-
|
|
352
|
-
Optional[OutputSignalfxQueueFullBehavior],
|
|
353
|
-
PlainValidator(validate_open_enum(False)),
|
|
354
|
-
],
|
|
334
|
+
Optional[OutputSignalfxQueueFullBehavior],
|
|
355
335
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
356
336
|
] = OutputSignalfxQueueFullBehavior.BLOCK
|
|
357
337
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
358
338
|
|
|
359
|
-
pq_mode: Annotated[
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
],
|
|
363
|
-
pydantic.Field(alias="pqMode"),
|
|
364
|
-
] = OutputSignalfxMode.ERROR
|
|
339
|
+
pq_mode: Annotated[Optional[OutputSignalfxMode], pydantic.Field(alias="pqMode")] = (
|
|
340
|
+
OutputSignalfxMode.ERROR
|
|
341
|
+
)
|
|
365
342
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
366
343
|
|
|
367
344
|
pq_controls: Annotated[
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputSnmpType(str, Enum
|
|
11
|
+
class OutputSnmpType(str, Enum):
|
|
15
12
|
SNMP = "snmp"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -50,7 +47,7 @@ class OutputSnmpTypedDict(TypedDict):
|
|
|
50
47
|
|
|
51
48
|
|
|
52
49
|
class OutputSnmp(BaseModel):
|
|
53
|
-
type:
|
|
50
|
+
type: OutputSnmpType
|
|
54
51
|
|
|
55
52
|
hosts: List[OutputSnmpHost]
|
|
56
53
|
r"""One or more SNMP destinations to forward traps to"""
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputSnsType(str, Enum
|
|
11
|
+
class OutputSnsType(str, Enum):
|
|
15
12
|
SNS = "sns"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSnsAuthenticationMethod(str, Enum
|
|
15
|
+
class OutputSnsAuthenticationMethod(str, Enum):
|
|
19
16
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
20
17
|
|
|
21
18
|
AUTO = "auto"
|
|
@@ -23,14 +20,14 @@ class OutputSnsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
23
20
|
SECRET = "secret"
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputSnsSignatureVersion(str, Enum
|
|
23
|
+
class OutputSnsSignatureVersion(str, Enum):
|
|
27
24
|
r"""Signature version to use for signing SNS requests"""
|
|
28
25
|
|
|
29
26
|
V2 = "v2"
|
|
30
27
|
V4 = "v4"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputSnsBackpressureBehavior(str, Enum
|
|
30
|
+
class OutputSnsBackpressureBehavior(str, Enum):
|
|
34
31
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
35
32
|
|
|
36
33
|
BLOCK = "block"
|
|
@@ -38,21 +35,21 @@ class OutputSnsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
38
35
|
QUEUE = "queue"
|
|
39
36
|
|
|
40
37
|
|
|
41
|
-
class OutputSnsCompression(str, Enum
|
|
38
|
+
class OutputSnsCompression(str, Enum):
|
|
42
39
|
r"""Codec to use to compress the persisted data"""
|
|
43
40
|
|
|
44
41
|
NONE = "none"
|
|
45
42
|
GZIP = "gzip"
|
|
46
43
|
|
|
47
44
|
|
|
48
|
-
class OutputSnsQueueFullBehavior(str, Enum
|
|
45
|
+
class OutputSnsQueueFullBehavior(str, Enum):
|
|
49
46
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
50
47
|
|
|
51
48
|
BLOCK = "block"
|
|
52
49
|
DROP = "drop"
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputSnsMode(str, Enum
|
|
52
|
+
class OutputSnsMode(str, Enum):
|
|
56
53
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
57
54
|
|
|
58
55
|
ERROR = "error"
|
|
@@ -138,9 +135,7 @@ class OutputSns(BaseModel):
|
|
|
138
135
|
id: Optional[str] = None
|
|
139
136
|
r"""Unique ID for this output"""
|
|
140
137
|
|
|
141
|
-
type:
|
|
142
|
-
Optional[OutputSnsType], PlainValidator(validate_open_enum(False))
|
|
143
|
-
] = None
|
|
138
|
+
type: Optional[OutputSnsType] = None
|
|
144
139
|
|
|
145
140
|
pipeline: Optional[str] = None
|
|
146
141
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -160,10 +155,7 @@ class OutputSns(BaseModel):
|
|
|
160
155
|
r"""Maximum number of retries before the output returns an error. Note that not all errors are retryable. The retries use an exponential backoff policy."""
|
|
161
156
|
|
|
162
157
|
aws_authentication_method: Annotated[
|
|
163
|
-
|
|
164
|
-
Optional[OutputSnsAuthenticationMethod],
|
|
165
|
-
PlainValidator(validate_open_enum(False)),
|
|
166
|
-
],
|
|
158
|
+
Optional[OutputSnsAuthenticationMethod],
|
|
167
159
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
168
160
|
] = OutputSnsAuthenticationMethod.AUTO
|
|
169
161
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -179,11 +171,7 @@ class OutputSns(BaseModel):
|
|
|
179
171
|
r"""SNS service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to SNS-compatible endpoint."""
|
|
180
172
|
|
|
181
173
|
signature_version: Annotated[
|
|
182
|
-
|
|
183
|
-
Optional[OutputSnsSignatureVersion],
|
|
184
|
-
PlainValidator(validate_open_enum(False)),
|
|
185
|
-
],
|
|
186
|
-
pydantic.Field(alias="signatureVersion"),
|
|
174
|
+
Optional[OutputSnsSignatureVersion], pydantic.Field(alias="signatureVersion")
|
|
187
175
|
] = OutputSnsSignatureVersion.V4
|
|
188
176
|
r"""Signature version to use for signing SNS requests"""
|
|
189
177
|
|
|
@@ -218,11 +206,7 @@ class OutputSns(BaseModel):
|
|
|
218
206
|
r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
|
|
219
207
|
|
|
220
208
|
on_backpressure: Annotated[
|
|
221
|
-
|
|
222
|
-
Optional[OutputSnsBackpressureBehavior],
|
|
223
|
-
PlainValidator(validate_open_enum(False)),
|
|
224
|
-
],
|
|
225
|
-
pydantic.Field(alias="onBackpressure"),
|
|
209
|
+
Optional[OutputSnsBackpressureBehavior], pydantic.Field(alias="onBackpressure")
|
|
226
210
|
] = OutputSnsBackpressureBehavior.BLOCK
|
|
227
211
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
228
212
|
|
|
@@ -247,26 +231,18 @@ class OutputSns(BaseModel):
|
|
|
247
231
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
248
232
|
|
|
249
233
|
pq_compress: Annotated[
|
|
250
|
-
|
|
251
|
-
Optional[OutputSnsCompression], PlainValidator(validate_open_enum(False))
|
|
252
|
-
],
|
|
253
|
-
pydantic.Field(alias="pqCompress"),
|
|
234
|
+
Optional[OutputSnsCompression], pydantic.Field(alias="pqCompress")
|
|
254
235
|
] = OutputSnsCompression.NONE
|
|
255
236
|
r"""Codec to use to compress the persisted data"""
|
|
256
237
|
|
|
257
238
|
pq_on_backpressure: Annotated[
|
|
258
|
-
|
|
259
|
-
Optional[OutputSnsQueueFullBehavior],
|
|
260
|
-
PlainValidator(validate_open_enum(False)),
|
|
261
|
-
],
|
|
262
|
-
pydantic.Field(alias="pqOnBackpressure"),
|
|
239
|
+
Optional[OutputSnsQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
|
|
263
240
|
] = OutputSnsQueueFullBehavior.BLOCK
|
|
264
241
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
265
242
|
|
|
266
|
-
pq_mode: Annotated[
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
] = OutputSnsMode.ERROR
|
|
243
|
+
pq_mode: Annotated[Optional[OutputSnsMode], pydantic.Field(alias="pqMode")] = (
|
|
244
|
+
OutputSnsMode.ERROR
|
|
245
|
+
)
|
|
270
246
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
271
247
|
|
|
272
248
|
pq_controls: Annotated[
|