cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +4124 -4124
- cribl_control_plane/models/createinputop.py +1734 -2771
- cribl_control_plane/models/createoutputop.py +2153 -4314
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +20 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +10 -27
- cribl_control_plane/models/inputcribllakehttp.py +12 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +10 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +10 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +31 -64
- cribl_control_plane/models/inputelastic.py +16 -44
- cribl_control_plane/models/inputeventhub.py +8 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +10 -27
- cribl_control_plane/models/inputgooglepubsub.py +8 -23
- cribl_control_plane/models/inputgrafana_union.py +35 -81
- cribl_control_plane/models/inputhttp.py +10 -27
- cribl_control_plane/models/inputhttpraw.py +10 -27
- cribl_control_plane/models/inputjournalfiles.py +6 -16
- cribl_control_plane/models/inputkafka.py +16 -45
- cribl_control_plane/models/inputkinesis.py +16 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +12 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
- cribl_control_plane/models/inputmsk.py +18 -52
- cribl_control_plane/models/inputnetflow.py +6 -15
- cribl_control_plane/models/inputoffice365mgmt.py +16 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
- cribl_control_plane/models/inputoffice365service.py +18 -39
- cribl_control_plane/models/inputopentelemetry.py +18 -42
- cribl_control_plane/models/inputprometheus.py +20 -54
- cribl_control_plane/models/inputprometheusrw.py +12 -34
- cribl_control_plane/models/inputrawudp.py +6 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +8 -20
- cribl_control_plane/models/inputsplunk.py +14 -37
- cribl_control_plane/models/inputsplunkhec.py +12 -33
- cribl_control_plane/models/inputsplunksearch.py +16 -37
- cribl_control_plane/models/inputsqs.py +12 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +12 -33
- cribl_control_plane/models/inputtcpjson.py +12 -33
- cribl_control_plane/models/inputwef.py +20 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +10 -25
- cribl_control_plane/models/inputzscalerhec.py +12 -33
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.17.dist-info/RECORD +215 -0
- cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/WHEEL +0 -0
|
@@ -1,26 +1,23 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputAzureDataExplorerType(str, Enum
|
|
11
|
+
class OutputAzureDataExplorerType(str, Enum):
|
|
15
12
|
AZURE_DATA_EXPLORER = "azure_data_explorer"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class IngestionMode(str, Enum
|
|
15
|
+
class IngestionMode(str, Enum):
|
|
19
16
|
BATCHING = "batching"
|
|
20
17
|
STREAMING = "streaming"
|
|
21
18
|
|
|
22
19
|
|
|
23
|
-
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum
|
|
20
|
+
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
|
|
24
21
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
25
22
|
|
|
26
23
|
HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
|
|
@@ -28,9 +25,7 @@ class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnum
|
|
|
28
25
|
HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
|
|
29
26
|
|
|
30
27
|
|
|
31
|
-
class OutputAzureDataExplorerAuthenticationMethod(
|
|
32
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
33
|
-
):
|
|
28
|
+
class OutputAzureDataExplorerAuthenticationMethod(str, Enum):
|
|
34
29
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
35
30
|
|
|
36
31
|
CLIENT_SECRET = "clientSecret"
|
|
@@ -50,9 +45,7 @@ class OutputAzureDataExplorerCertificate(BaseModel):
|
|
|
50
45
|
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
51
46
|
|
|
52
47
|
|
|
53
|
-
class OutputAzureDataExplorerBackpressureBehavior(
|
|
54
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
55
|
-
):
|
|
48
|
+
class OutputAzureDataExplorerBackpressureBehavior(str, Enum):
|
|
56
49
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
57
50
|
|
|
58
51
|
BLOCK = "block"
|
|
@@ -60,7 +53,7 @@ class OutputAzureDataExplorerBackpressureBehavior(
|
|
|
60
53
|
QUEUE = "queue"
|
|
61
54
|
|
|
62
55
|
|
|
63
|
-
class OutputAzureDataExplorerDataFormat(str, Enum
|
|
56
|
+
class OutputAzureDataExplorerDataFormat(str, Enum):
|
|
64
57
|
r"""Format of the output data"""
|
|
65
58
|
|
|
66
59
|
JSON = "json"
|
|
@@ -68,16 +61,14 @@ class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
68
61
|
PARQUET = "parquet"
|
|
69
62
|
|
|
70
63
|
|
|
71
|
-
class OutputAzureDataExplorerDiskSpaceProtection(
|
|
72
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
-
):
|
|
64
|
+
class OutputAzureDataExplorerDiskSpaceProtection(str, Enum):
|
|
74
65
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
75
66
|
|
|
76
67
|
BLOCK = "block"
|
|
77
68
|
DROP = "drop"
|
|
78
69
|
|
|
79
70
|
|
|
80
|
-
class PrefixOptional(str, Enum
|
|
71
|
+
class PrefixOptional(str, Enum):
|
|
81
72
|
DROP_BY = "dropBy"
|
|
82
73
|
INGEST_BY = "ingestBy"
|
|
83
74
|
|
|
@@ -90,9 +81,7 @@ class ExtentTagTypedDict(TypedDict):
|
|
|
90
81
|
class ExtentTag(BaseModel):
|
|
91
82
|
value: str
|
|
92
83
|
|
|
93
|
-
prefix:
|
|
94
|
-
Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
|
|
95
|
-
] = None
|
|
84
|
+
prefix: Optional[PrefixOptional] = None
|
|
96
85
|
|
|
97
86
|
|
|
98
87
|
class IngestIfNotExistTypedDict(TypedDict):
|
|
@@ -103,7 +92,7 @@ class IngestIfNotExist(BaseModel):
|
|
|
103
92
|
value: str
|
|
104
93
|
|
|
105
94
|
|
|
106
|
-
class ReportLevel(str, Enum
|
|
95
|
+
class ReportLevel(str, Enum):
|
|
107
96
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
108
97
|
|
|
109
98
|
FAILURES_ONLY = "failuresOnly"
|
|
@@ -111,7 +100,7 @@ class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
111
100
|
FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
|
|
112
101
|
|
|
113
102
|
|
|
114
|
-
class ReportMethod(str, Enum
|
|
103
|
+
class ReportMethod(str, Enum):
|
|
115
104
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
116
105
|
|
|
117
106
|
QUEUE = "queue"
|
|
@@ -184,32 +173,28 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
|
|
|
184
173
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
185
174
|
|
|
186
175
|
|
|
187
|
-
class OutputAzureDataExplorerCompressCompression(
|
|
188
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
189
|
-
):
|
|
176
|
+
class OutputAzureDataExplorerCompressCompression(str, Enum):
|
|
190
177
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
191
178
|
|
|
192
179
|
NONE = "none"
|
|
193
180
|
GZIP = "gzip"
|
|
194
181
|
|
|
195
182
|
|
|
196
|
-
class OutputAzureDataExplorerPqCompressCompression(
|
|
197
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
198
|
-
):
|
|
183
|
+
class OutputAzureDataExplorerPqCompressCompression(str, Enum):
|
|
199
184
|
r"""Codec to use to compress the persisted data"""
|
|
200
185
|
|
|
201
186
|
NONE = "none"
|
|
202
187
|
GZIP = "gzip"
|
|
203
188
|
|
|
204
189
|
|
|
205
|
-
class OutputAzureDataExplorerQueueFullBehavior(str, Enum
|
|
190
|
+
class OutputAzureDataExplorerQueueFullBehavior(str, Enum):
|
|
206
191
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
207
192
|
|
|
208
193
|
BLOCK = "block"
|
|
209
194
|
DROP = "drop"
|
|
210
195
|
|
|
211
196
|
|
|
212
|
-
class OutputAzureDataExplorerMode(str, Enum
|
|
197
|
+
class OutputAzureDataExplorerMode(str, Enum):
|
|
213
198
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
214
199
|
|
|
215
200
|
ERROR = "error"
|
|
@@ -375,9 +360,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
375
360
|
id: Optional[str] = None
|
|
376
361
|
r"""Unique ID for this output"""
|
|
377
362
|
|
|
378
|
-
type:
|
|
379
|
-
Optional[OutputAzureDataExplorerType], PlainValidator(validate_open_enum(False))
|
|
380
|
-
] = None
|
|
363
|
+
type: Optional[OutputAzureDataExplorerType] = None
|
|
381
364
|
|
|
382
365
|
pipeline: Optional[str] = None
|
|
383
366
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -399,24 +382,17 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
399
382
|
r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
|
|
400
383
|
|
|
401
384
|
ingest_mode: Annotated[
|
|
402
|
-
|
|
403
|
-
pydantic.Field(alias="ingestMode"),
|
|
385
|
+
Optional[IngestionMode], pydantic.Field(alias="ingestMode")
|
|
404
386
|
] = IngestionMode.BATCHING
|
|
405
387
|
|
|
406
388
|
oauth_endpoint: Annotated[
|
|
407
|
-
|
|
408
|
-
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
409
|
-
PlainValidator(validate_open_enum(False)),
|
|
410
|
-
],
|
|
389
|
+
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
411
390
|
pydantic.Field(alias="oauthEndpoint"),
|
|
412
391
|
] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
|
|
413
392
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
414
393
|
|
|
415
394
|
oauth_type: Annotated[
|
|
416
|
-
|
|
417
|
-
Optional[OutputAzureDataExplorerAuthenticationMethod],
|
|
418
|
-
PlainValidator(validate_open_enum(False)),
|
|
419
|
-
],
|
|
395
|
+
Optional[OutputAzureDataExplorerAuthenticationMethod],
|
|
420
396
|
pydantic.Field(alias="oauthType"),
|
|
421
397
|
] = OutputAzureDataExplorerAuthenticationMethod.CLIENT_SECRET
|
|
422
398
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
@@ -435,10 +411,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
435
411
|
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
436
412
|
|
|
437
413
|
on_backpressure: Annotated[
|
|
438
|
-
|
|
439
|
-
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
440
|
-
PlainValidator(validate_open_enum(False)),
|
|
441
|
-
],
|
|
414
|
+
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
442
415
|
pydantic.Field(alias="onBackpressure"),
|
|
443
416
|
] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
|
|
444
417
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -449,11 +422,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
449
422
|
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
450
423
|
|
|
451
424
|
format_: Annotated[
|
|
452
|
-
|
|
453
|
-
Optional[OutputAzureDataExplorerDataFormat],
|
|
454
|
-
PlainValidator(validate_open_enum(False)),
|
|
455
|
-
],
|
|
456
|
-
pydantic.Field(alias="format"),
|
|
425
|
+
Optional[OutputAzureDataExplorerDataFormat], pydantic.Field(alias="format")
|
|
457
426
|
] = OutputAzureDataExplorerDataFormat.JSON
|
|
458
427
|
r"""Format of the output data"""
|
|
459
428
|
|
|
@@ -493,10 +462,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
493
462
|
r"""Maximum number of parts to upload in parallel per file"""
|
|
494
463
|
|
|
495
464
|
on_disk_full_backpressure: Annotated[
|
|
496
|
-
|
|
497
|
-
Optional[OutputAzureDataExplorerDiskSpaceProtection],
|
|
498
|
-
PlainValidator(validate_open_enum(False)),
|
|
499
|
-
],
|
|
465
|
+
Optional[OutputAzureDataExplorerDiskSpaceProtection],
|
|
500
466
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
501
467
|
] = OutputAzureDataExplorerDiskSpaceProtection.BLOCK
|
|
502
468
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -540,14 +506,12 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
540
506
|
r"""Prevents duplicate ingestion by verifying whether an extent with the specified ingest-by tag already exists"""
|
|
541
507
|
|
|
542
508
|
report_level: Annotated[
|
|
543
|
-
|
|
544
|
-
pydantic.Field(alias="reportLevel"),
|
|
509
|
+
Optional[ReportLevel], pydantic.Field(alias="reportLevel")
|
|
545
510
|
] = ReportLevel.FAILURES_ONLY
|
|
546
511
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
547
512
|
|
|
548
513
|
report_method: Annotated[
|
|
549
|
-
|
|
550
|
-
pydantic.Field(alias="reportMethod"),
|
|
514
|
+
Optional[ReportMethod], pydantic.Field(alias="reportMethod")
|
|
551
515
|
] = ReportMethod.QUEUE
|
|
552
516
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
553
517
|
|
|
@@ -572,10 +536,9 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
572
536
|
] = False
|
|
573
537
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
574
538
|
|
|
575
|
-
compress:
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
] = OutputAzureDataExplorerCompressCompression.GZIP
|
|
539
|
+
compress: Optional[OutputAzureDataExplorerCompressCompression] = (
|
|
540
|
+
OutputAzureDataExplorerCompressCompression.GZIP
|
|
541
|
+
)
|
|
579
542
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
580
543
|
|
|
581
544
|
mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
|
|
@@ -629,29 +592,19 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
629
592
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
630
593
|
|
|
631
594
|
pq_compress: Annotated[
|
|
632
|
-
|
|
633
|
-
Optional[OutputAzureDataExplorerPqCompressCompression],
|
|
634
|
-
PlainValidator(validate_open_enum(False)),
|
|
635
|
-
],
|
|
595
|
+
Optional[OutputAzureDataExplorerPqCompressCompression],
|
|
636
596
|
pydantic.Field(alias="pqCompress"),
|
|
637
597
|
] = OutputAzureDataExplorerPqCompressCompression.NONE
|
|
638
598
|
r"""Codec to use to compress the persisted data"""
|
|
639
599
|
|
|
640
600
|
pq_on_backpressure: Annotated[
|
|
641
|
-
|
|
642
|
-
Optional[OutputAzureDataExplorerQueueFullBehavior],
|
|
643
|
-
PlainValidator(validate_open_enum(False)),
|
|
644
|
-
],
|
|
601
|
+
Optional[OutputAzureDataExplorerQueueFullBehavior],
|
|
645
602
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
646
603
|
] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
|
|
647
604
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
648
605
|
|
|
649
606
|
pq_mode: Annotated[
|
|
650
|
-
|
|
651
|
-
Optional[OutputAzureDataExplorerMode],
|
|
652
|
-
PlainValidator(validate_open_enum(False)),
|
|
653
|
-
],
|
|
654
|
-
pydantic.Field(alias="pqMode"),
|
|
607
|
+
Optional[OutputAzureDataExplorerMode], pydantic.Field(alias="pqMode")
|
|
655
608
|
] = OutputAzureDataExplorerMode.ERROR
|
|
656
609
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
657
610
|
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputAzureEventhubType(str, Enum
|
|
11
|
+
class OutputAzureEventhubType(str, Enum):
|
|
15
12
|
AZURE_EVENTHUB = "azure_eventhub"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputAzureEventhubAcknowledgments(int, Enum
|
|
15
|
+
class OutputAzureEventhubAcknowledgments(int, Enum):
|
|
19
16
|
r"""Control the number of required acknowledgments"""
|
|
20
17
|
|
|
21
18
|
ONE = 1
|
|
@@ -23,14 +20,14 @@ class OutputAzureEventhubAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta
|
|
|
23
20
|
MINUS_1 = -1
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputAzureEventhubRecordDataFormat(str, Enum
|
|
23
|
+
class OutputAzureEventhubRecordDataFormat(str, Enum):
|
|
27
24
|
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
28
25
|
|
|
29
26
|
JSON = "json"
|
|
30
27
|
RAW = "raw"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputAzureEventhubSASLMechanism(str, Enum
|
|
30
|
+
class OutputAzureEventhubSASLMechanism(str, Enum):
|
|
34
31
|
PLAIN = "plain"
|
|
35
32
|
OAUTHBEARER = "oauthbearer"
|
|
36
33
|
|
|
@@ -47,10 +44,9 @@ class OutputAzureEventhubAuthentication(BaseModel):
|
|
|
47
44
|
|
|
48
45
|
disabled: Optional[bool] = False
|
|
49
46
|
|
|
50
|
-
mechanism:
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
] = OutputAzureEventhubSASLMechanism.PLAIN
|
|
47
|
+
mechanism: Optional[OutputAzureEventhubSASLMechanism] = (
|
|
48
|
+
OutputAzureEventhubSASLMechanism.PLAIN
|
|
49
|
+
)
|
|
54
50
|
|
|
55
51
|
|
|
56
52
|
class OutputAzureEventhubTLSSettingsClientSideTypedDict(TypedDict):
|
|
@@ -68,7 +64,7 @@ class OutputAzureEventhubTLSSettingsClientSide(BaseModel):
|
|
|
68
64
|
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
|
|
69
65
|
|
|
70
66
|
|
|
71
|
-
class OutputAzureEventhubBackpressureBehavior(str, Enum
|
|
67
|
+
class OutputAzureEventhubBackpressureBehavior(str, Enum):
|
|
72
68
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
73
69
|
|
|
74
70
|
BLOCK = "block"
|
|
@@ -76,21 +72,21 @@ class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnu
|
|
|
76
72
|
QUEUE = "queue"
|
|
77
73
|
|
|
78
74
|
|
|
79
|
-
class OutputAzureEventhubCompression(str, Enum
|
|
75
|
+
class OutputAzureEventhubCompression(str, Enum):
|
|
80
76
|
r"""Codec to use to compress the persisted data"""
|
|
81
77
|
|
|
82
78
|
NONE = "none"
|
|
83
79
|
GZIP = "gzip"
|
|
84
80
|
|
|
85
81
|
|
|
86
|
-
class OutputAzureEventhubQueueFullBehavior(str, Enum
|
|
82
|
+
class OutputAzureEventhubQueueFullBehavior(str, Enum):
|
|
87
83
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
88
84
|
|
|
89
85
|
BLOCK = "block"
|
|
90
86
|
DROP = "drop"
|
|
91
87
|
|
|
92
88
|
|
|
93
|
-
class OutputAzureEventhubMode(str, Enum
|
|
89
|
+
class OutputAzureEventhubMode(str, Enum):
|
|
94
90
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
95
91
|
|
|
96
92
|
ERROR = "error"
|
|
@@ -179,9 +175,7 @@ class OutputAzureEventhub(BaseModel):
|
|
|
179
175
|
id: Optional[str] = None
|
|
180
176
|
r"""Unique ID for this output"""
|
|
181
177
|
|
|
182
|
-
type:
|
|
183
|
-
Optional[OutputAzureEventhubType], PlainValidator(validate_open_enum(False))
|
|
184
|
-
] = None
|
|
178
|
+
type: Optional[OutputAzureEventhubType] = None
|
|
185
179
|
|
|
186
180
|
pipeline: Optional[str] = None
|
|
187
181
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -197,18 +191,13 @@ class OutputAzureEventhub(BaseModel):
|
|
|
197
191
|
streamtags: Optional[List[str]] = None
|
|
198
192
|
r"""Tags for filtering and grouping in @{product}"""
|
|
199
193
|
|
|
200
|
-
ack:
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
] = OutputAzureEventhubAcknowledgments.ONE
|
|
194
|
+
ack: Optional[OutputAzureEventhubAcknowledgments] = (
|
|
195
|
+
OutputAzureEventhubAcknowledgments.ONE
|
|
196
|
+
)
|
|
204
197
|
r"""Control the number of required acknowledgments"""
|
|
205
198
|
|
|
206
199
|
format_: Annotated[
|
|
207
|
-
|
|
208
|
-
Optional[OutputAzureEventhubRecordDataFormat],
|
|
209
|
-
PlainValidator(validate_open_enum(False)),
|
|
210
|
-
],
|
|
211
|
-
pydantic.Field(alias="format"),
|
|
200
|
+
Optional[OutputAzureEventhubRecordDataFormat], pydantic.Field(alias="format")
|
|
212
201
|
] = OutputAzureEventhubRecordDataFormat.JSON
|
|
213
202
|
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
214
203
|
|
|
@@ -267,10 +256,7 @@ class OutputAzureEventhub(BaseModel):
|
|
|
267
256
|
tls: Optional[OutputAzureEventhubTLSSettingsClientSide] = None
|
|
268
257
|
|
|
269
258
|
on_backpressure: Annotated[
|
|
270
|
-
|
|
271
|
-
Optional[OutputAzureEventhubBackpressureBehavior],
|
|
272
|
-
PlainValidator(validate_open_enum(False)),
|
|
273
|
-
],
|
|
259
|
+
Optional[OutputAzureEventhubBackpressureBehavior],
|
|
274
260
|
pydantic.Field(alias="onBackpressure"),
|
|
275
261
|
] = OutputAzureEventhubBackpressureBehavior.BLOCK
|
|
276
262
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -291,28 +277,18 @@ class OutputAzureEventhub(BaseModel):
|
|
|
291
277
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
292
278
|
|
|
293
279
|
pq_compress: Annotated[
|
|
294
|
-
|
|
295
|
-
Optional[OutputAzureEventhubCompression],
|
|
296
|
-
PlainValidator(validate_open_enum(False)),
|
|
297
|
-
],
|
|
298
|
-
pydantic.Field(alias="pqCompress"),
|
|
280
|
+
Optional[OutputAzureEventhubCompression], pydantic.Field(alias="pqCompress")
|
|
299
281
|
] = OutputAzureEventhubCompression.NONE
|
|
300
282
|
r"""Codec to use to compress the persisted data"""
|
|
301
283
|
|
|
302
284
|
pq_on_backpressure: Annotated[
|
|
303
|
-
|
|
304
|
-
Optional[OutputAzureEventhubQueueFullBehavior],
|
|
305
|
-
PlainValidator(validate_open_enum(False)),
|
|
306
|
-
],
|
|
285
|
+
Optional[OutputAzureEventhubQueueFullBehavior],
|
|
307
286
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
308
287
|
] = OutputAzureEventhubQueueFullBehavior.BLOCK
|
|
309
288
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
310
289
|
|
|
311
290
|
pq_mode: Annotated[
|
|
312
|
-
|
|
313
|
-
Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
|
|
314
|
-
],
|
|
315
|
-
pydantic.Field(alias="pqMode"),
|
|
291
|
+
Optional[OutputAzureEventhubMode], pydantic.Field(alias="pqMode")
|
|
316
292
|
] = OutputAzureEventhubMode.ERROR
|
|
317
293
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
318
294
|
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputAzureLogsType(str, Enum
|
|
11
|
+
class OutputAzureLogsType(str, Enum):
|
|
15
12
|
AZURE_LOGS = "azure_logs"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,7 +23,7 @@ class OutputAzureLogsExtraHTTPHeader(BaseModel):
|
|
|
26
23
|
name: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class OutputAzureLogsFailedRequestLoggingMode(str, Enum
|
|
26
|
+
class OutputAzureLogsFailedRequestLoggingMode(str, Enum):
|
|
30
27
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
31
28
|
|
|
32
29
|
PAYLOAD = "payload"
|
|
@@ -88,7 +85,7 @@ class OutputAzureLogsTimeoutRetrySettings(BaseModel):
|
|
|
88
85
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
89
86
|
|
|
90
87
|
|
|
91
|
-
class OutputAzureLogsBackpressureBehavior(str, Enum
|
|
88
|
+
class OutputAzureLogsBackpressureBehavior(str, Enum):
|
|
92
89
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
93
90
|
|
|
94
91
|
BLOCK = "block"
|
|
@@ -96,28 +93,28 @@ class OutputAzureLogsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
96
93
|
QUEUE = "queue"
|
|
97
94
|
|
|
98
95
|
|
|
99
|
-
class OutputAzureLogsAuthenticationMethod(str, Enum
|
|
96
|
+
class OutputAzureLogsAuthenticationMethod(str, Enum):
|
|
100
97
|
r"""Enter workspace ID and workspace key directly, or select a stored secret"""
|
|
101
98
|
|
|
102
99
|
MANUAL = "manual"
|
|
103
100
|
SECRET = "secret"
|
|
104
101
|
|
|
105
102
|
|
|
106
|
-
class OutputAzureLogsCompression(str, Enum
|
|
103
|
+
class OutputAzureLogsCompression(str, Enum):
|
|
107
104
|
r"""Codec to use to compress the persisted data"""
|
|
108
105
|
|
|
109
106
|
NONE = "none"
|
|
110
107
|
GZIP = "gzip"
|
|
111
108
|
|
|
112
109
|
|
|
113
|
-
class OutputAzureLogsQueueFullBehavior(str, Enum
|
|
110
|
+
class OutputAzureLogsQueueFullBehavior(str, Enum):
|
|
114
111
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
115
112
|
|
|
116
113
|
BLOCK = "block"
|
|
117
114
|
DROP = "drop"
|
|
118
115
|
|
|
119
116
|
|
|
120
|
-
class OutputAzureLogsMode(str, Enum
|
|
117
|
+
class OutputAzureLogsMode(str, Enum):
|
|
121
118
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
122
119
|
|
|
123
120
|
ERROR = "error"
|
|
@@ -209,7 +206,7 @@ class OutputAzureLogsTypedDict(TypedDict):
|
|
|
209
206
|
|
|
210
207
|
|
|
211
208
|
class OutputAzureLogs(BaseModel):
|
|
212
|
-
type:
|
|
209
|
+
type: OutputAzureLogsType
|
|
213
210
|
|
|
214
211
|
id: Optional[str] = None
|
|
215
212
|
r"""Unique ID for this output"""
|
|
@@ -277,10 +274,7 @@ class OutputAzureLogs(BaseModel):
|
|
|
277
274
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
278
275
|
|
|
279
276
|
failed_request_logging_mode: Annotated[
|
|
280
|
-
|
|
281
|
-
Optional[OutputAzureLogsFailedRequestLoggingMode],
|
|
282
|
-
PlainValidator(validate_open_enum(False)),
|
|
283
|
-
],
|
|
277
|
+
Optional[OutputAzureLogsFailedRequestLoggingMode],
|
|
284
278
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
285
279
|
] = OutputAzureLogsFailedRequestLoggingMode.NONE
|
|
286
280
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -312,20 +306,13 @@ class OutputAzureLogs(BaseModel):
|
|
|
312
306
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
313
307
|
|
|
314
308
|
on_backpressure: Annotated[
|
|
315
|
-
|
|
316
|
-
Optional[OutputAzureLogsBackpressureBehavior],
|
|
317
|
-
PlainValidator(validate_open_enum(False)),
|
|
318
|
-
],
|
|
309
|
+
Optional[OutputAzureLogsBackpressureBehavior],
|
|
319
310
|
pydantic.Field(alias="onBackpressure"),
|
|
320
311
|
] = OutputAzureLogsBackpressureBehavior.BLOCK
|
|
321
312
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
322
313
|
|
|
323
314
|
auth_type: Annotated[
|
|
324
|
-
|
|
325
|
-
Optional[OutputAzureLogsAuthenticationMethod],
|
|
326
|
-
PlainValidator(validate_open_enum(False)),
|
|
327
|
-
],
|
|
328
|
-
pydantic.Field(alias="authType"),
|
|
315
|
+
Optional[OutputAzureLogsAuthenticationMethod], pydantic.Field(alias="authType")
|
|
329
316
|
] = OutputAzureLogsAuthenticationMethod.MANUAL
|
|
330
317
|
r"""Enter workspace ID and workspace key directly, or select a stored secret"""
|
|
331
318
|
|
|
@@ -345,28 +332,18 @@ class OutputAzureLogs(BaseModel):
|
|
|
345
332
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
346
333
|
|
|
347
334
|
pq_compress: Annotated[
|
|
348
|
-
|
|
349
|
-
Optional[OutputAzureLogsCompression],
|
|
350
|
-
PlainValidator(validate_open_enum(False)),
|
|
351
|
-
],
|
|
352
|
-
pydantic.Field(alias="pqCompress"),
|
|
335
|
+
Optional[OutputAzureLogsCompression], pydantic.Field(alias="pqCompress")
|
|
353
336
|
] = OutputAzureLogsCompression.NONE
|
|
354
337
|
r"""Codec to use to compress the persisted data"""
|
|
355
338
|
|
|
356
339
|
pq_on_backpressure: Annotated[
|
|
357
|
-
|
|
358
|
-
Optional[OutputAzureLogsQueueFullBehavior],
|
|
359
|
-
PlainValidator(validate_open_enum(False)),
|
|
360
|
-
],
|
|
340
|
+
Optional[OutputAzureLogsQueueFullBehavior],
|
|
361
341
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
362
342
|
] = OutputAzureLogsQueueFullBehavior.BLOCK
|
|
363
343
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
364
344
|
|
|
365
345
|
pq_mode: Annotated[
|
|
366
|
-
|
|
367
|
-
Optional[OutputAzureLogsMode], PlainValidator(validate_open_enum(False))
|
|
368
|
-
],
|
|
369
|
-
pydantic.Field(alias="pqMode"),
|
|
346
|
+
Optional[OutputAzureLogsMode], pydantic.Field(alias="pqMode")
|
|
370
347
|
] = OutputAzureLogsMode.ERROR
|
|
371
348
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
372
349
|
|