cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +4124 -4124
- cribl_control_plane/models/createinputop.py +1734 -2771
- cribl_control_plane/models/createoutputop.py +2153 -4314
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +20 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +10 -27
- cribl_control_plane/models/inputcribllakehttp.py +12 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +10 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +10 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +31 -64
- cribl_control_plane/models/inputelastic.py +16 -44
- cribl_control_plane/models/inputeventhub.py +8 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +10 -27
- cribl_control_plane/models/inputgooglepubsub.py +8 -23
- cribl_control_plane/models/inputgrafana_union.py +35 -81
- cribl_control_plane/models/inputhttp.py +10 -27
- cribl_control_plane/models/inputhttpraw.py +10 -27
- cribl_control_plane/models/inputjournalfiles.py +6 -16
- cribl_control_plane/models/inputkafka.py +16 -45
- cribl_control_plane/models/inputkinesis.py +16 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +12 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
- cribl_control_plane/models/inputmsk.py +18 -52
- cribl_control_plane/models/inputnetflow.py +6 -15
- cribl_control_plane/models/inputoffice365mgmt.py +16 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
- cribl_control_plane/models/inputoffice365service.py +18 -39
- cribl_control_plane/models/inputopentelemetry.py +18 -42
- cribl_control_plane/models/inputprometheus.py +20 -54
- cribl_control_plane/models/inputprometheusrw.py +12 -34
- cribl_control_plane/models/inputrawudp.py +6 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +8 -20
- cribl_control_plane/models/inputsplunk.py +14 -37
- cribl_control_plane/models/inputsplunkhec.py +12 -33
- cribl_control_plane/models/inputsplunksearch.py +16 -37
- cribl_control_plane/models/inputsqs.py +12 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +12 -33
- cribl_control_plane/models/inputtcpjson.py +12 -33
- cribl_control_plane/models/inputwef.py +20 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +10 -25
- cribl_control_plane/models/inputzscalerhec.py +12 -33
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.17.dist-info/RECORD +215 -0
- cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/WHEEL +0 -0
|
@@ -1,33 +1,30 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputDynatraceOtlpType(str, Enum
|
|
11
|
+
class OutputDynatraceOtlpType(str, Enum):
|
|
15
12
|
DYNATRACE_OTLP = "dynatrace_otlp"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputDynatraceOtlpProtocol(str, Enum
|
|
15
|
+
class OutputDynatraceOtlpProtocol(str, Enum):
|
|
19
16
|
r"""Select a transport option for Dynatrace"""
|
|
20
17
|
|
|
21
18
|
HTTP = "http"
|
|
22
19
|
|
|
23
20
|
|
|
24
|
-
class OutputDynatraceOTLPOTLPVersion(str, Enum
|
|
21
|
+
class OutputDynatraceOTLPOTLPVersion(str, Enum):
|
|
25
22
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
26
23
|
|
|
27
24
|
ONE_DOT_3_DOT_1 = "1.3.1"
|
|
28
25
|
|
|
29
26
|
|
|
30
|
-
class OutputDynatraceOtlpCompressCompression(str, Enum
|
|
27
|
+
class OutputDynatraceOtlpCompressCompression(str, Enum):
|
|
31
28
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
32
29
|
|
|
33
30
|
NONE = "none"
|
|
@@ -35,9 +32,7 @@ class OutputDynatraceOtlpCompressCompression(str, Enum, metaclass=utils.OpenEnum
|
|
|
35
32
|
GZIP = "gzip"
|
|
36
33
|
|
|
37
34
|
|
|
38
|
-
class OutputDynatraceOtlpHTTPCompressCompression(
|
|
39
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
40
|
-
):
|
|
35
|
+
class OutputDynatraceOtlpHTTPCompressCompression(str, Enum):
|
|
41
36
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
42
37
|
|
|
43
38
|
NONE = "none"
|
|
@@ -55,9 +50,7 @@ class OutputDynatraceOtlpMetadatum(BaseModel):
|
|
|
55
50
|
key: Optional[str] = ""
|
|
56
51
|
|
|
57
52
|
|
|
58
|
-
class OutputDynatraceOtlpFailedRequestLoggingMode(
|
|
59
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
60
|
-
):
|
|
53
|
+
class OutputDynatraceOtlpFailedRequestLoggingMode(str, Enum):
|
|
61
54
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
62
55
|
|
|
63
56
|
PAYLOAD = "payload"
|
|
@@ -65,14 +58,14 @@ class OutputDynatraceOtlpFailedRequestLoggingMode(
|
|
|
65
58
|
NONE = "none"
|
|
66
59
|
|
|
67
60
|
|
|
68
|
-
class EndpointType(str, Enum
|
|
61
|
+
class EndpointType(str, Enum):
|
|
69
62
|
r"""Select the type of Dynatrace endpoint configured"""
|
|
70
63
|
|
|
71
64
|
SAAS = "saas"
|
|
72
65
|
AG = "ag"
|
|
73
66
|
|
|
74
67
|
|
|
75
|
-
class OutputDynatraceOtlpBackpressureBehavior(str, Enum
|
|
68
|
+
class OutputDynatraceOtlpBackpressureBehavior(str, Enum):
|
|
76
69
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
77
70
|
|
|
78
71
|
BLOCK = "block"
|
|
@@ -145,21 +138,21 @@ class OutputDynatraceOtlpTimeoutRetrySettings(BaseModel):
|
|
|
145
138
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
146
139
|
|
|
147
140
|
|
|
148
|
-
class OutputDynatraceOtlpPqCompressCompression(str, Enum
|
|
141
|
+
class OutputDynatraceOtlpPqCompressCompression(str, Enum):
|
|
149
142
|
r"""Codec to use to compress the persisted data"""
|
|
150
143
|
|
|
151
144
|
NONE = "none"
|
|
152
145
|
GZIP = "gzip"
|
|
153
146
|
|
|
154
147
|
|
|
155
|
-
class OutputDynatraceOtlpQueueFullBehavior(str, Enum
|
|
148
|
+
class OutputDynatraceOtlpQueueFullBehavior(str, Enum):
|
|
156
149
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
157
150
|
|
|
158
151
|
BLOCK = "block"
|
|
159
152
|
DROP = "drop"
|
|
160
153
|
|
|
161
154
|
|
|
162
|
-
class OutputDynatraceOtlpMode(str, Enum
|
|
155
|
+
class OutputDynatraceOtlpMode(str, Enum):
|
|
163
156
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
164
157
|
|
|
165
158
|
ERROR = "error"
|
|
@@ -273,9 +266,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
273
266
|
id: Optional[str] = None
|
|
274
267
|
r"""Unique ID for this output"""
|
|
275
268
|
|
|
276
|
-
type:
|
|
277
|
-
Optional[OutputDynatraceOtlpType], PlainValidator(validate_open_enum(False))
|
|
278
|
-
] = None
|
|
269
|
+
type: Optional[OutputDynatraceOtlpType] = None
|
|
279
270
|
|
|
280
271
|
pipeline: Optional[str] = None
|
|
281
272
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -291,9 +282,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
291
282
|
streamtags: Optional[List[str]] = None
|
|
292
283
|
r"""Tags for filtering and grouping in @{product}"""
|
|
293
284
|
|
|
294
|
-
protocol:
|
|
295
|
-
Optional[OutputDynatraceOtlpProtocol], PlainValidator(validate_open_enum(False))
|
|
296
|
-
] = OutputDynatraceOtlpProtocol.HTTP
|
|
285
|
+
protocol: Optional[OutputDynatraceOtlpProtocol] = OutputDynatraceOtlpProtocol.HTTP
|
|
297
286
|
r"""Select a transport option for Dynatrace"""
|
|
298
287
|
|
|
299
288
|
endpoint: Optional[str] = (
|
|
@@ -302,25 +291,17 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
302
291
|
r"""The endpoint where Dynatrace events will be sent. Enter any valid URL or an IP address (IPv4 or IPv6; enclose IPv6 addresses in square brackets)"""
|
|
303
292
|
|
|
304
293
|
otlp_version: Annotated[
|
|
305
|
-
|
|
306
|
-
Optional[OutputDynatraceOTLPOTLPVersion],
|
|
307
|
-
PlainValidator(validate_open_enum(False)),
|
|
308
|
-
],
|
|
309
|
-
pydantic.Field(alias="otlpVersion"),
|
|
294
|
+
Optional[OutputDynatraceOTLPOTLPVersion], pydantic.Field(alias="otlpVersion")
|
|
310
295
|
] = OutputDynatraceOTLPOTLPVersion.ONE_DOT_3_DOT_1
|
|
311
296
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
312
297
|
|
|
313
|
-
compress:
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
] = OutputDynatraceOtlpCompressCompression.GZIP
|
|
298
|
+
compress: Optional[OutputDynatraceOtlpCompressCompression] = (
|
|
299
|
+
OutputDynatraceOtlpCompressCompression.GZIP
|
|
300
|
+
)
|
|
317
301
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
318
302
|
|
|
319
303
|
http_compress: Annotated[
|
|
320
|
-
|
|
321
|
-
Optional[OutputDynatraceOtlpHTTPCompressCompression],
|
|
322
|
-
PlainValidator(validate_open_enum(False)),
|
|
323
|
-
],
|
|
304
|
+
Optional[OutputDynatraceOtlpHTTPCompressCompression],
|
|
324
305
|
pydantic.Field(alias="httpCompress"),
|
|
325
306
|
] = OutputDynatraceOtlpHTTPCompressCompression.GZIP
|
|
326
307
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
@@ -360,10 +341,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
360
341
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
361
342
|
|
|
362
343
|
failed_request_logging_mode: Annotated[
|
|
363
|
-
|
|
364
|
-
Optional[OutputDynatraceOtlpFailedRequestLoggingMode],
|
|
365
|
-
PlainValidator(validate_open_enum(False)),
|
|
366
|
-
],
|
|
344
|
+
Optional[OutputDynatraceOtlpFailedRequestLoggingMode],
|
|
367
345
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
368
346
|
] = OutputDynatraceOtlpFailedRequestLoggingMode.NONE
|
|
369
347
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -382,8 +360,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
382
360
|
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
383
361
|
|
|
384
362
|
endpoint_type: Annotated[
|
|
385
|
-
|
|
386
|
-
pydantic.Field(alias="endpointType"),
|
|
363
|
+
Optional[EndpointType], pydantic.Field(alias="endpointType")
|
|
387
364
|
] = EndpointType.SAAS
|
|
388
365
|
r"""Select the type of Dynatrace endpoint configured"""
|
|
389
366
|
|
|
@@ -392,10 +369,7 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
392
369
|
)
|
|
393
370
|
|
|
394
371
|
on_backpressure: Annotated[
|
|
395
|
-
|
|
396
|
-
Optional[OutputDynatraceOtlpBackpressureBehavior],
|
|
397
|
-
PlainValidator(validate_open_enum(False)),
|
|
398
|
-
],
|
|
372
|
+
Optional[OutputDynatraceOtlpBackpressureBehavior],
|
|
399
373
|
pydantic.Field(alias="onBackpressure"),
|
|
400
374
|
] = OutputDynatraceOtlpBackpressureBehavior.BLOCK
|
|
401
375
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -456,28 +430,19 @@ class OutputDynatraceOtlp(BaseModel):
|
|
|
456
430
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
457
431
|
|
|
458
432
|
pq_compress: Annotated[
|
|
459
|
-
|
|
460
|
-
Optional[OutputDynatraceOtlpPqCompressCompression],
|
|
461
|
-
PlainValidator(validate_open_enum(False)),
|
|
462
|
-
],
|
|
433
|
+
Optional[OutputDynatraceOtlpPqCompressCompression],
|
|
463
434
|
pydantic.Field(alias="pqCompress"),
|
|
464
435
|
] = OutputDynatraceOtlpPqCompressCompression.NONE
|
|
465
436
|
r"""Codec to use to compress the persisted data"""
|
|
466
437
|
|
|
467
438
|
pq_on_backpressure: Annotated[
|
|
468
|
-
|
|
469
|
-
Optional[OutputDynatraceOtlpQueueFullBehavior],
|
|
470
|
-
PlainValidator(validate_open_enum(False)),
|
|
471
|
-
],
|
|
439
|
+
Optional[OutputDynatraceOtlpQueueFullBehavior],
|
|
472
440
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
473
441
|
] = OutputDynatraceOtlpQueueFullBehavior.BLOCK
|
|
474
442
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
475
443
|
|
|
476
444
|
pq_mode: Annotated[
|
|
477
|
-
|
|
478
|
-
Optional[OutputDynatraceOtlpMode], PlainValidator(validate_open_enum(False))
|
|
479
|
-
],
|
|
480
|
-
pydantic.Field(alias="pqMode"),
|
|
445
|
+
Optional[OutputDynatraceOtlpMode], pydantic.Field(alias="pqMode")
|
|
481
446
|
] = OutputDynatraceOtlpMode.ERROR
|
|
482
447
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
483
448
|
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputElasticType(str, Enum
|
|
11
|
+
class OutputElasticType(str, Enum):
|
|
15
12
|
ELASTIC = "elastic"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,7 +23,7 @@ class OutputElasticExtraHTTPHeader(BaseModel):
|
|
|
26
23
|
name: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class OutputElasticFailedRequestLoggingMode(str, Enum
|
|
26
|
+
class OutputElasticFailedRequestLoggingMode(str, Enum):
|
|
30
27
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
31
28
|
|
|
32
29
|
PAYLOAD = "payload"
|
|
@@ -99,7 +96,7 @@ class OutputElasticExtraParam(BaseModel):
|
|
|
99
96
|
value: str
|
|
100
97
|
|
|
101
98
|
|
|
102
|
-
class OutputElasticAuthenticationMethod(str, Enum
|
|
99
|
+
class OutputElasticAuthenticationMethod(str, Enum):
|
|
103
100
|
r"""Enter credentials directly, or select a stored secret"""
|
|
104
101
|
|
|
105
102
|
MANUAL = "manual"
|
|
@@ -118,16 +115,12 @@ class OutputElasticAuth(BaseModel):
|
|
|
118
115
|
disabled: Optional[bool] = True
|
|
119
116
|
|
|
120
117
|
auth_type: Annotated[
|
|
121
|
-
|
|
122
|
-
Optional[OutputElasticAuthenticationMethod],
|
|
123
|
-
PlainValidator(validate_open_enum(False)),
|
|
124
|
-
],
|
|
125
|
-
pydantic.Field(alias="authType"),
|
|
118
|
+
Optional[OutputElasticAuthenticationMethod], pydantic.Field(alias="authType")
|
|
126
119
|
] = OutputElasticAuthenticationMethod.MANUAL
|
|
127
120
|
r"""Enter credentials directly, or select a stored secret"""
|
|
128
121
|
|
|
129
122
|
|
|
130
|
-
class ElasticVersion(str, Enum
|
|
123
|
+
class ElasticVersion(str, Enum):
|
|
131
124
|
r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
|
|
132
125
|
|
|
133
126
|
AUTO = "auto"
|
|
@@ -135,14 +128,14 @@ class ElasticVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
135
128
|
SEVEN = "7"
|
|
136
129
|
|
|
137
130
|
|
|
138
|
-
class WriteAction(str, Enum
|
|
131
|
+
class WriteAction(str, Enum):
|
|
139
132
|
r"""Action to use when writing events. Must be set to `Create` when writing to a data stream."""
|
|
140
133
|
|
|
141
134
|
INDEX = "index"
|
|
142
135
|
CREATE = "create"
|
|
143
136
|
|
|
144
137
|
|
|
145
|
-
class OutputElasticBackpressureBehavior(str, Enum
|
|
138
|
+
class OutputElasticBackpressureBehavior(str, Enum):
|
|
146
139
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
147
140
|
|
|
148
141
|
BLOCK = "block"
|
|
@@ -165,21 +158,21 @@ class OutputElasticURL(BaseModel):
|
|
|
165
158
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
166
159
|
|
|
167
160
|
|
|
168
|
-
class OutputElasticCompression(str, Enum
|
|
161
|
+
class OutputElasticCompression(str, Enum):
|
|
169
162
|
r"""Codec to use to compress the persisted data"""
|
|
170
163
|
|
|
171
164
|
NONE = "none"
|
|
172
165
|
GZIP = "gzip"
|
|
173
166
|
|
|
174
167
|
|
|
175
|
-
class OutputElasticQueueFullBehavior(str, Enum
|
|
168
|
+
class OutputElasticQueueFullBehavior(str, Enum):
|
|
176
169
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
177
170
|
|
|
178
171
|
BLOCK = "block"
|
|
179
172
|
DROP = "drop"
|
|
180
173
|
|
|
181
174
|
|
|
182
|
-
class OutputElasticMode(str, Enum
|
|
175
|
+
class OutputElasticMode(str, Enum):
|
|
183
176
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
184
177
|
|
|
185
178
|
ERROR = "error"
|
|
@@ -285,7 +278,7 @@ class OutputElasticTypedDict(TypedDict):
|
|
|
285
278
|
|
|
286
279
|
|
|
287
280
|
class OutputElastic(BaseModel):
|
|
288
|
-
type:
|
|
281
|
+
type: OutputElasticType
|
|
289
282
|
|
|
290
283
|
index: str
|
|
291
284
|
r"""Index or data stream to send events to. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be overwritten by an event's __index field."""
|
|
@@ -354,10 +347,7 @@ class OutputElastic(BaseModel):
|
|
|
354
347
|
r"""Headers to add to all events"""
|
|
355
348
|
|
|
356
349
|
failed_request_logging_mode: Annotated[
|
|
357
|
-
|
|
358
|
-
Optional[OutputElasticFailedRequestLoggingMode],
|
|
359
|
-
PlainValidator(validate_open_enum(False)),
|
|
360
|
-
],
|
|
350
|
+
Optional[OutputElasticFailedRequestLoggingMode],
|
|
361
351
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
362
352
|
] = OutputElasticFailedRequestLoggingMode.NONE
|
|
363
353
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -390,8 +380,7 @@ class OutputElastic(BaseModel):
|
|
|
390
380
|
auth: Optional[OutputElasticAuth] = None
|
|
391
381
|
|
|
392
382
|
elastic_version: Annotated[
|
|
393
|
-
|
|
394
|
-
pydantic.Field(alias="elasticVersion"),
|
|
383
|
+
Optional[ElasticVersion], pydantic.Field(alias="elasticVersion")
|
|
395
384
|
] = ElasticVersion.AUTO
|
|
396
385
|
r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
|
|
397
386
|
|
|
@@ -406,8 +395,7 @@ class OutputElastic(BaseModel):
|
|
|
406
395
|
r"""Include the `document_id` field when sending events to an Elastic TSDS (time series data stream)"""
|
|
407
396
|
|
|
408
397
|
write_action: Annotated[
|
|
409
|
-
|
|
410
|
-
pydantic.Field(alias="writeAction"),
|
|
398
|
+
Optional[WriteAction], pydantic.Field(alias="writeAction")
|
|
411
399
|
] = WriteAction.CREATE
|
|
412
400
|
r"""Action to use when writing events. Must be set to `Create` when writing to a data stream."""
|
|
413
401
|
|
|
@@ -417,10 +405,7 @@ class OutputElastic(BaseModel):
|
|
|
417
405
|
r"""Retry failed events when a bulk request to Elastic is successful, but the response body returns an error for one or more events in the batch"""
|
|
418
406
|
|
|
419
407
|
on_backpressure: Annotated[
|
|
420
|
-
|
|
421
|
-
Optional[OutputElasticBackpressureBehavior],
|
|
422
|
-
PlainValidator(validate_open_enum(False)),
|
|
423
|
-
],
|
|
408
|
+
Optional[OutputElasticBackpressureBehavior],
|
|
424
409
|
pydantic.Field(alias="onBackpressure"),
|
|
425
410
|
] = OutputElasticBackpressureBehavior.BLOCK
|
|
426
411
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -464,29 +449,19 @@ class OutputElastic(BaseModel):
|
|
|
464
449
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
465
450
|
|
|
466
451
|
pq_compress: Annotated[
|
|
467
|
-
|
|
468
|
-
Optional[OutputElasticCompression],
|
|
469
|
-
PlainValidator(validate_open_enum(False)),
|
|
470
|
-
],
|
|
471
|
-
pydantic.Field(alias="pqCompress"),
|
|
452
|
+
Optional[OutputElasticCompression], pydantic.Field(alias="pqCompress")
|
|
472
453
|
] = OutputElasticCompression.NONE
|
|
473
454
|
r"""Codec to use to compress the persisted data"""
|
|
474
455
|
|
|
475
456
|
pq_on_backpressure: Annotated[
|
|
476
|
-
|
|
477
|
-
Optional[OutputElasticQueueFullBehavior],
|
|
478
|
-
PlainValidator(validate_open_enum(False)),
|
|
479
|
-
],
|
|
457
|
+
Optional[OutputElasticQueueFullBehavior],
|
|
480
458
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
481
459
|
] = OutputElasticQueueFullBehavior.BLOCK
|
|
482
460
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
483
461
|
|
|
484
|
-
pq_mode: Annotated[
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
],
|
|
488
|
-
pydantic.Field(alias="pqMode"),
|
|
489
|
-
] = OutputElasticMode.ERROR
|
|
462
|
+
pq_mode: Annotated[Optional[OutputElasticMode], pydantic.Field(alias="pqMode")] = (
|
|
463
|
+
OutputElasticMode.ERROR
|
|
464
|
+
)
|
|
490
465
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
491
466
|
|
|
492
467
|
pq_controls: Annotated[
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputElasticCloudType(str, Enum
|
|
11
|
+
class OutputElasticCloudType(str, Enum):
|
|
15
12
|
ELASTIC_CLOUD = "elastic_cloud"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,9 +23,7 @@ class OutputElasticCloudExtraHTTPHeader(BaseModel):
|
|
|
26
23
|
name: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class OutputElasticCloudFailedRequestLoggingMode(
|
|
30
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
31
|
-
):
|
|
26
|
+
class OutputElasticCloudFailedRequestLoggingMode(str, Enum):
|
|
32
27
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
33
28
|
|
|
34
29
|
PAYLOAD = "payload"
|
|
@@ -47,7 +42,7 @@ class OutputElasticCloudExtraParam(BaseModel):
|
|
|
47
42
|
value: str
|
|
48
43
|
|
|
49
44
|
|
|
50
|
-
class OutputElasticCloudAuthenticationMethod(str, Enum
|
|
45
|
+
class OutputElasticCloudAuthenticationMethod(str, Enum):
|
|
51
46
|
r"""Enter credentials directly, or select a stored secret"""
|
|
52
47
|
|
|
53
48
|
MANUAL = "manual"
|
|
@@ -66,10 +61,7 @@ class OutputElasticCloudAuth(BaseModel):
|
|
|
66
61
|
disabled: Optional[bool] = False
|
|
67
62
|
|
|
68
63
|
auth_type: Annotated[
|
|
69
|
-
|
|
70
|
-
Optional[OutputElasticCloudAuthenticationMethod],
|
|
71
|
-
PlainValidator(validate_open_enum(False)),
|
|
72
|
-
],
|
|
64
|
+
Optional[OutputElasticCloudAuthenticationMethod],
|
|
73
65
|
pydantic.Field(alias="authType"),
|
|
74
66
|
] = OutputElasticCloudAuthenticationMethod.MANUAL
|
|
75
67
|
r"""Enter credentials directly, or select a stored secret"""
|
|
@@ -129,7 +121,7 @@ class OutputElasticCloudTimeoutRetrySettings(BaseModel):
|
|
|
129
121
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
130
122
|
|
|
131
123
|
|
|
132
|
-
class OutputElasticCloudBackpressureBehavior(str, Enum
|
|
124
|
+
class OutputElasticCloudBackpressureBehavior(str, Enum):
|
|
133
125
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
134
126
|
|
|
135
127
|
BLOCK = "block"
|
|
@@ -137,21 +129,21 @@ class OutputElasticCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnum
|
|
|
137
129
|
QUEUE = "queue"
|
|
138
130
|
|
|
139
131
|
|
|
140
|
-
class OutputElasticCloudCompression(str, Enum
|
|
132
|
+
class OutputElasticCloudCompression(str, Enum):
|
|
141
133
|
r"""Codec to use to compress the persisted data"""
|
|
142
134
|
|
|
143
135
|
NONE = "none"
|
|
144
136
|
GZIP = "gzip"
|
|
145
137
|
|
|
146
138
|
|
|
147
|
-
class OutputElasticCloudQueueFullBehavior(str, Enum
|
|
139
|
+
class OutputElasticCloudQueueFullBehavior(str, Enum):
|
|
148
140
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
149
141
|
|
|
150
142
|
BLOCK = "block"
|
|
151
143
|
DROP = "drop"
|
|
152
144
|
|
|
153
145
|
|
|
154
|
-
class OutputElasticCloudMode(str, Enum
|
|
146
|
+
class OutputElasticCloudMode(str, Enum):
|
|
155
147
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
156
148
|
|
|
157
149
|
ERROR = "error"
|
|
@@ -248,9 +240,7 @@ class OutputElasticCloud(BaseModel):
|
|
|
248
240
|
id: Optional[str] = None
|
|
249
241
|
r"""Unique ID for this output"""
|
|
250
242
|
|
|
251
|
-
type:
|
|
252
|
-
Optional[OutputElasticCloudType], PlainValidator(validate_open_enum(False))
|
|
253
|
-
] = None
|
|
243
|
+
type: Optional[OutputElasticCloudType] = None
|
|
254
244
|
|
|
255
245
|
pipeline: Optional[str] = None
|
|
256
246
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -305,10 +295,7 @@ class OutputElasticCloud(BaseModel):
|
|
|
305
295
|
r"""Headers to add to all events"""
|
|
306
296
|
|
|
307
297
|
failed_request_logging_mode: Annotated[
|
|
308
|
-
|
|
309
|
-
Optional[OutputElasticCloudFailedRequestLoggingMode],
|
|
310
|
-
PlainValidator(validate_open_enum(False)),
|
|
311
|
-
],
|
|
298
|
+
Optional[OutputElasticCloudFailedRequestLoggingMode],
|
|
312
299
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
313
300
|
] = OutputElasticCloudFailedRequestLoggingMode.NONE
|
|
314
301
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -353,10 +340,7 @@ class OutputElasticCloud(BaseModel):
|
|
|
353
340
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
354
341
|
|
|
355
342
|
on_backpressure: Annotated[
|
|
356
|
-
|
|
357
|
-
Optional[OutputElasticCloudBackpressureBehavior],
|
|
358
|
-
PlainValidator(validate_open_enum(False)),
|
|
359
|
-
],
|
|
343
|
+
Optional[OutputElasticCloudBackpressureBehavior],
|
|
360
344
|
pydantic.Field(alias="onBackpressure"),
|
|
361
345
|
] = OutputElasticCloudBackpressureBehavior.BLOCK
|
|
362
346
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -377,28 +361,18 @@ class OutputElasticCloud(BaseModel):
|
|
|
377
361
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
378
362
|
|
|
379
363
|
pq_compress: Annotated[
|
|
380
|
-
|
|
381
|
-
Optional[OutputElasticCloudCompression],
|
|
382
|
-
PlainValidator(validate_open_enum(False)),
|
|
383
|
-
],
|
|
384
|
-
pydantic.Field(alias="pqCompress"),
|
|
364
|
+
Optional[OutputElasticCloudCompression], pydantic.Field(alias="pqCompress")
|
|
385
365
|
] = OutputElasticCloudCompression.NONE
|
|
386
366
|
r"""Codec to use to compress the persisted data"""
|
|
387
367
|
|
|
388
368
|
pq_on_backpressure: Annotated[
|
|
389
|
-
|
|
390
|
-
Optional[OutputElasticCloudQueueFullBehavior],
|
|
391
|
-
PlainValidator(validate_open_enum(False)),
|
|
392
|
-
],
|
|
369
|
+
Optional[OutputElasticCloudQueueFullBehavior],
|
|
393
370
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
394
371
|
] = OutputElasticCloudQueueFullBehavior.BLOCK
|
|
395
372
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
396
373
|
|
|
397
374
|
pq_mode: Annotated[
|
|
398
|
-
|
|
399
|
-
Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
|
|
400
|
-
],
|
|
401
|
-
pydantic.Field(alias="pqMode"),
|
|
375
|
+
Optional[OutputElasticCloudMode], pydantic.Field(alias="pqMode")
|
|
402
376
|
] = OutputElasticCloudMode.ERROR
|
|
403
377
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
404
378
|
|