cribl-control-plane 0.0.47__py3-none-any.whl → 0.0.48a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -5
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/models/__init__.py +12 -12
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +7 -2
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/hbcriblinfo.py +19 -3
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/heartbeatmetadata.py +3 -0
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +47 -20
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +46 -16
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +53 -18
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeprovidedinfo.py +3 -0
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +77 -32
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +74 -28
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +82 -30
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/routeconf.py +3 -4
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.47.dist-info → cribl_control_plane-0.0.48a1.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.0.47.dist-info → cribl_control_plane-0.0.48a1.dist-info}/RECORD +160 -162
- {cribl_control_plane-0.0.47.dist-info → cribl_control_plane-0.0.48a1.dist-info}/WHEEL +1 -1
- cribl_control_plane/models/appmode.py +0 -13
- cribl_control_plane/models/routecloneconf.py +0 -13
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,12 +15,12 @@ class OutputAzureDataExplorerType(str, Enum):
|
|
|
12
15
|
AZURE_DATA_EXPLORER = "azure_data_explorer"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class IngestionMode(str, Enum):
|
|
18
|
+
class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
BATCHING = "batching"
|
|
17
20
|
STREAMING = "streaming"
|
|
18
21
|
|
|
19
22
|
|
|
20
|
-
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
|
|
23
|
+
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
21
24
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
22
25
|
|
|
23
26
|
HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
|
|
@@ -25,7 +28,9 @@ class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
|
|
|
25
28
|
HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
|
|
26
29
|
|
|
27
30
|
|
|
28
|
-
class OutputAzureDataExplorerAuthenticationMethod(
|
|
31
|
+
class OutputAzureDataExplorerAuthenticationMethod(
|
|
32
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
33
|
+
):
|
|
29
34
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
30
35
|
|
|
31
36
|
CLIENT_SECRET = "clientSecret"
|
|
@@ -45,7 +50,9 @@ class OutputAzureDataExplorerCertificate(BaseModel):
|
|
|
45
50
|
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
46
51
|
|
|
47
52
|
|
|
48
|
-
class OutputAzureDataExplorerBackpressureBehavior(
|
|
53
|
+
class OutputAzureDataExplorerBackpressureBehavior(
|
|
54
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
55
|
+
):
|
|
49
56
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
50
57
|
|
|
51
58
|
BLOCK = "block"
|
|
@@ -53,7 +60,7 @@ class OutputAzureDataExplorerBackpressureBehavior(str, Enum):
|
|
|
53
60
|
QUEUE = "queue"
|
|
54
61
|
|
|
55
62
|
|
|
56
|
-
class OutputAzureDataExplorerDataFormat(str, Enum):
|
|
63
|
+
class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
57
64
|
r"""Format of the output data"""
|
|
58
65
|
|
|
59
66
|
JSON = "json"
|
|
@@ -61,14 +68,16 @@ class OutputAzureDataExplorerDataFormat(str, Enum):
|
|
|
61
68
|
PARQUET = "parquet"
|
|
62
69
|
|
|
63
70
|
|
|
64
|
-
class OutputAzureDataExplorerDiskSpaceProtection(
|
|
71
|
+
class OutputAzureDataExplorerDiskSpaceProtection(
|
|
72
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
+
):
|
|
65
74
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
66
75
|
|
|
67
76
|
BLOCK = "block"
|
|
68
77
|
DROP = "drop"
|
|
69
78
|
|
|
70
79
|
|
|
71
|
-
class PrefixOptional(str, Enum):
|
|
80
|
+
class PrefixOptional(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
72
81
|
DROP_BY = "dropBy"
|
|
73
82
|
INGEST_BY = "ingestBy"
|
|
74
83
|
|
|
@@ -81,7 +90,9 @@ class ExtentTagTypedDict(TypedDict):
|
|
|
81
90
|
class ExtentTag(BaseModel):
|
|
82
91
|
value: str
|
|
83
92
|
|
|
84
|
-
prefix:
|
|
93
|
+
prefix: Annotated[
|
|
94
|
+
Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
|
|
95
|
+
] = None
|
|
85
96
|
|
|
86
97
|
|
|
87
98
|
class IngestIfNotExistTypedDict(TypedDict):
|
|
@@ -92,7 +103,7 @@ class IngestIfNotExist(BaseModel):
|
|
|
92
103
|
value: str
|
|
93
104
|
|
|
94
105
|
|
|
95
|
-
class ReportLevel(str, Enum):
|
|
106
|
+
class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
96
107
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
97
108
|
|
|
98
109
|
FAILURES_ONLY = "failuresOnly"
|
|
@@ -100,7 +111,7 @@ class ReportLevel(str, Enum):
|
|
|
100
111
|
FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
|
|
101
112
|
|
|
102
113
|
|
|
103
|
-
class ReportMethod(str, Enum):
|
|
114
|
+
class ReportMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
104
115
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
105
116
|
|
|
106
117
|
QUEUE = "queue"
|
|
@@ -173,28 +184,32 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
|
|
|
173
184
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
174
185
|
|
|
175
186
|
|
|
176
|
-
class OutputAzureDataExplorerCompressCompression(
|
|
187
|
+
class OutputAzureDataExplorerCompressCompression(
|
|
188
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
189
|
+
):
|
|
177
190
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
178
191
|
|
|
179
192
|
NONE = "none"
|
|
180
193
|
GZIP = "gzip"
|
|
181
194
|
|
|
182
195
|
|
|
183
|
-
class OutputAzureDataExplorerPqCompressCompression(
|
|
196
|
+
class OutputAzureDataExplorerPqCompressCompression(
|
|
197
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
198
|
+
):
|
|
184
199
|
r"""Codec to use to compress the persisted data"""
|
|
185
200
|
|
|
186
201
|
NONE = "none"
|
|
187
202
|
GZIP = "gzip"
|
|
188
203
|
|
|
189
204
|
|
|
190
|
-
class OutputAzureDataExplorerQueueFullBehavior(str, Enum):
|
|
205
|
+
class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
191
206
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
192
207
|
|
|
193
208
|
BLOCK = "block"
|
|
194
209
|
DROP = "drop"
|
|
195
210
|
|
|
196
211
|
|
|
197
|
-
class OutputAzureDataExplorerMode(str, Enum):
|
|
212
|
+
class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
198
213
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
199
214
|
|
|
200
215
|
ERROR = "error"
|
|
@@ -384,17 +399,24 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
384
399
|
r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
|
|
385
400
|
|
|
386
401
|
ingest_mode: Annotated[
|
|
387
|
-
Optional[IngestionMode],
|
|
402
|
+
Annotated[Optional[IngestionMode], PlainValidator(validate_open_enum(False))],
|
|
403
|
+
pydantic.Field(alias="ingestMode"),
|
|
388
404
|
] = IngestionMode.BATCHING
|
|
389
405
|
|
|
390
406
|
oauth_endpoint: Annotated[
|
|
391
|
-
|
|
407
|
+
Annotated[
|
|
408
|
+
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
409
|
+
PlainValidator(validate_open_enum(False)),
|
|
410
|
+
],
|
|
392
411
|
pydantic.Field(alias="oauthEndpoint"),
|
|
393
412
|
] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
|
|
394
413
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
395
414
|
|
|
396
415
|
oauth_type: Annotated[
|
|
397
|
-
|
|
416
|
+
Annotated[
|
|
417
|
+
Optional[OutputAzureDataExplorerAuthenticationMethod],
|
|
418
|
+
PlainValidator(validate_open_enum(False)),
|
|
419
|
+
],
|
|
398
420
|
pydantic.Field(alias="oauthType"),
|
|
399
421
|
] = OutputAzureDataExplorerAuthenticationMethod.CLIENT_SECRET
|
|
400
422
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
@@ -413,7 +435,10 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
413
435
|
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
414
436
|
|
|
415
437
|
on_backpressure: Annotated[
|
|
416
|
-
|
|
438
|
+
Annotated[
|
|
439
|
+
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
440
|
+
PlainValidator(validate_open_enum(False)),
|
|
441
|
+
],
|
|
417
442
|
pydantic.Field(alias="onBackpressure"),
|
|
418
443
|
] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
|
|
419
444
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -424,7 +449,11 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
424
449
|
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
425
450
|
|
|
426
451
|
format_: Annotated[
|
|
427
|
-
|
|
452
|
+
Annotated[
|
|
453
|
+
Optional[OutputAzureDataExplorerDataFormat],
|
|
454
|
+
PlainValidator(validate_open_enum(False)),
|
|
455
|
+
],
|
|
456
|
+
pydantic.Field(alias="format"),
|
|
428
457
|
] = OutputAzureDataExplorerDataFormat.JSON
|
|
429
458
|
r"""Format of the output data"""
|
|
430
459
|
|
|
@@ -464,7 +493,10 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
464
493
|
r"""Maximum number of parts to upload in parallel per file"""
|
|
465
494
|
|
|
466
495
|
on_disk_full_backpressure: Annotated[
|
|
467
|
-
|
|
496
|
+
Annotated[
|
|
497
|
+
Optional[OutputAzureDataExplorerDiskSpaceProtection],
|
|
498
|
+
PlainValidator(validate_open_enum(False)),
|
|
499
|
+
],
|
|
468
500
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
469
501
|
] = OutputAzureDataExplorerDiskSpaceProtection.BLOCK
|
|
470
502
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -508,12 +540,14 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
508
540
|
r"""Prevents duplicate ingestion by verifying whether an extent with the specified ingest-by tag already exists"""
|
|
509
541
|
|
|
510
542
|
report_level: Annotated[
|
|
511
|
-
Optional[ReportLevel],
|
|
543
|
+
Annotated[Optional[ReportLevel], PlainValidator(validate_open_enum(False))],
|
|
544
|
+
pydantic.Field(alias="reportLevel"),
|
|
512
545
|
] = ReportLevel.FAILURES_ONLY
|
|
513
546
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
514
547
|
|
|
515
548
|
report_method: Annotated[
|
|
516
|
-
Optional[ReportMethod],
|
|
549
|
+
Annotated[Optional[ReportMethod], PlainValidator(validate_open_enum(False))],
|
|
550
|
+
pydantic.Field(alias="reportMethod"),
|
|
517
551
|
] = ReportMethod.QUEUE
|
|
518
552
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
519
553
|
|
|
@@ -538,9 +572,10 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
538
572
|
] = True
|
|
539
573
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
540
574
|
|
|
541
|
-
compress:
|
|
542
|
-
OutputAzureDataExplorerCompressCompression
|
|
543
|
-
|
|
575
|
+
compress: Annotated[
|
|
576
|
+
Optional[OutputAzureDataExplorerCompressCompression],
|
|
577
|
+
PlainValidator(validate_open_enum(False)),
|
|
578
|
+
] = OutputAzureDataExplorerCompressCompression.GZIP
|
|
544
579
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
545
580
|
|
|
546
581
|
mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
|
|
@@ -594,19 +629,29 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
594
629
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
595
630
|
|
|
596
631
|
pq_compress: Annotated[
|
|
597
|
-
|
|
632
|
+
Annotated[
|
|
633
|
+
Optional[OutputAzureDataExplorerPqCompressCompression],
|
|
634
|
+
PlainValidator(validate_open_enum(False)),
|
|
635
|
+
],
|
|
598
636
|
pydantic.Field(alias="pqCompress"),
|
|
599
637
|
] = OutputAzureDataExplorerPqCompressCompression.NONE
|
|
600
638
|
r"""Codec to use to compress the persisted data"""
|
|
601
639
|
|
|
602
640
|
pq_on_backpressure: Annotated[
|
|
603
|
-
|
|
641
|
+
Annotated[
|
|
642
|
+
Optional[OutputAzureDataExplorerQueueFullBehavior],
|
|
643
|
+
PlainValidator(validate_open_enum(False)),
|
|
644
|
+
],
|
|
604
645
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
605
646
|
] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
|
|
606
647
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
607
648
|
|
|
608
649
|
pq_mode: Annotated[
|
|
609
|
-
|
|
650
|
+
Annotated[
|
|
651
|
+
Optional[OutputAzureDataExplorerMode],
|
|
652
|
+
PlainValidator(validate_open_enum(False)),
|
|
653
|
+
],
|
|
654
|
+
pydantic.Field(alias="pqMode"),
|
|
610
655
|
] = OutputAzureDataExplorerMode.ERROR
|
|
611
656
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
612
657
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputAzureEventhubType(str, Enum):
|
|
|
12
15
|
AZURE_EVENTHUB = "azure_eventhub"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputAzureEventhubAcknowledgments(int, Enum):
|
|
18
|
+
class OutputAzureEventhubAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Control the number of required acknowledgments"""
|
|
17
20
|
|
|
18
21
|
ONE = 1
|
|
@@ -20,14 +23,14 @@ class OutputAzureEventhubAcknowledgments(int, Enum):
|
|
|
20
23
|
MINUS_1 = -1
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class OutputAzureEventhubRecordDataFormat(str, Enum):
|
|
26
|
+
class OutputAzureEventhubRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
25
28
|
|
|
26
29
|
JSON = "json"
|
|
27
30
|
RAW = "raw"
|
|
28
31
|
|
|
29
32
|
|
|
30
|
-
class OutputAzureEventhubSASLMechanism(str, Enum):
|
|
33
|
+
class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
31
34
|
PLAIN = "plain"
|
|
32
35
|
OAUTHBEARER = "oauthbearer"
|
|
33
36
|
|
|
@@ -44,9 +47,10 @@ class OutputAzureEventhubAuthentication(BaseModel):
|
|
|
44
47
|
|
|
45
48
|
disabled: Optional[bool] = False
|
|
46
49
|
|
|
47
|
-
mechanism:
|
|
48
|
-
OutputAzureEventhubSASLMechanism
|
|
49
|
-
|
|
50
|
+
mechanism: Annotated[
|
|
51
|
+
Optional[OutputAzureEventhubSASLMechanism],
|
|
52
|
+
PlainValidator(validate_open_enum(False)),
|
|
53
|
+
] = OutputAzureEventhubSASLMechanism.PLAIN
|
|
50
54
|
|
|
51
55
|
|
|
52
56
|
class OutputAzureEventhubTLSSettingsClientSideTypedDict(TypedDict):
|
|
@@ -64,7 +68,7 @@ class OutputAzureEventhubTLSSettingsClientSide(BaseModel):
|
|
|
64
68
|
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
|
|
65
69
|
|
|
66
70
|
|
|
67
|
-
class OutputAzureEventhubBackpressureBehavior(str, Enum):
|
|
71
|
+
class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
68
72
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
69
73
|
|
|
70
74
|
BLOCK = "block"
|
|
@@ -72,21 +76,21 @@ class OutputAzureEventhubBackpressureBehavior(str, Enum):
|
|
|
72
76
|
QUEUE = "queue"
|
|
73
77
|
|
|
74
78
|
|
|
75
|
-
class OutputAzureEventhubCompression(str, Enum):
|
|
79
|
+
class OutputAzureEventhubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
76
80
|
r"""Codec to use to compress the persisted data"""
|
|
77
81
|
|
|
78
82
|
NONE = "none"
|
|
79
83
|
GZIP = "gzip"
|
|
80
84
|
|
|
81
85
|
|
|
82
|
-
class OutputAzureEventhubQueueFullBehavior(str, Enum):
|
|
86
|
+
class OutputAzureEventhubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
83
87
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
84
88
|
|
|
85
89
|
BLOCK = "block"
|
|
86
90
|
DROP = "drop"
|
|
87
91
|
|
|
88
92
|
|
|
89
|
-
class OutputAzureEventhubMode(str, Enum):
|
|
93
|
+
class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
90
94
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
91
95
|
|
|
92
96
|
ERROR = "error"
|
|
@@ -191,13 +195,18 @@ class OutputAzureEventhub(BaseModel):
|
|
|
191
195
|
streamtags: Optional[List[str]] = None
|
|
192
196
|
r"""Tags for filtering and grouping in @{product}"""
|
|
193
197
|
|
|
194
|
-
ack:
|
|
195
|
-
OutputAzureEventhubAcknowledgments
|
|
196
|
-
|
|
198
|
+
ack: Annotated[
|
|
199
|
+
Optional[OutputAzureEventhubAcknowledgments],
|
|
200
|
+
PlainValidator(validate_open_enum(True)),
|
|
201
|
+
] = OutputAzureEventhubAcknowledgments.ONE
|
|
197
202
|
r"""Control the number of required acknowledgments"""
|
|
198
203
|
|
|
199
204
|
format_: Annotated[
|
|
200
|
-
|
|
205
|
+
Annotated[
|
|
206
|
+
Optional[OutputAzureEventhubRecordDataFormat],
|
|
207
|
+
PlainValidator(validate_open_enum(False)),
|
|
208
|
+
],
|
|
209
|
+
pydantic.Field(alias="format"),
|
|
201
210
|
] = OutputAzureEventhubRecordDataFormat.JSON
|
|
202
211
|
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
203
212
|
|
|
@@ -256,7 +265,10 @@ class OutputAzureEventhub(BaseModel):
|
|
|
256
265
|
tls: Optional[OutputAzureEventhubTLSSettingsClientSide] = None
|
|
257
266
|
|
|
258
267
|
on_backpressure: Annotated[
|
|
259
|
-
|
|
268
|
+
Annotated[
|
|
269
|
+
Optional[OutputAzureEventhubBackpressureBehavior],
|
|
270
|
+
PlainValidator(validate_open_enum(False)),
|
|
271
|
+
],
|
|
260
272
|
pydantic.Field(alias="onBackpressure"),
|
|
261
273
|
] = OutputAzureEventhubBackpressureBehavior.BLOCK
|
|
262
274
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -277,18 +289,28 @@ class OutputAzureEventhub(BaseModel):
|
|
|
277
289
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
278
290
|
|
|
279
291
|
pq_compress: Annotated[
|
|
280
|
-
|
|
292
|
+
Annotated[
|
|
293
|
+
Optional[OutputAzureEventhubCompression],
|
|
294
|
+
PlainValidator(validate_open_enum(False)),
|
|
295
|
+
],
|
|
296
|
+
pydantic.Field(alias="pqCompress"),
|
|
281
297
|
] = OutputAzureEventhubCompression.NONE
|
|
282
298
|
r"""Codec to use to compress the persisted data"""
|
|
283
299
|
|
|
284
300
|
pq_on_backpressure: Annotated[
|
|
285
|
-
|
|
301
|
+
Annotated[
|
|
302
|
+
Optional[OutputAzureEventhubQueueFullBehavior],
|
|
303
|
+
PlainValidator(validate_open_enum(False)),
|
|
304
|
+
],
|
|
286
305
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
287
306
|
] = OutputAzureEventhubQueueFullBehavior.BLOCK
|
|
288
307
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
289
308
|
|
|
290
309
|
pq_mode: Annotated[
|
|
291
|
-
|
|
310
|
+
Annotated[
|
|
311
|
+
Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
|
|
312
|
+
],
|
|
313
|
+
pydantic.Field(alias="pqMode"),
|
|
292
314
|
] = OutputAzureEventhubMode.ERROR
|
|
293
315
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
294
316
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,7 +26,7 @@ class OutputAzureLogsExtraHTTPHeader(BaseModel):
|
|
|
23
26
|
name: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class OutputAzureLogsFailedRequestLoggingMode(str, Enum):
|
|
29
|
+
class OutputAzureLogsFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
28
31
|
|
|
29
32
|
PAYLOAD = "payload"
|
|
@@ -85,7 +88,7 @@ class OutputAzureLogsTimeoutRetrySettings(BaseModel):
|
|
|
85
88
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
86
89
|
|
|
87
90
|
|
|
88
|
-
class OutputAzureLogsBackpressureBehavior(str, Enum):
|
|
91
|
+
class OutputAzureLogsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
89
92
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
90
93
|
|
|
91
94
|
BLOCK = "block"
|
|
@@ -93,28 +96,28 @@ class OutputAzureLogsBackpressureBehavior(str, Enum):
|
|
|
93
96
|
QUEUE = "queue"
|
|
94
97
|
|
|
95
98
|
|
|
96
|
-
class OutputAzureLogsAuthenticationMethod(str, Enum):
|
|
99
|
+
class OutputAzureLogsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
97
100
|
r"""Enter workspace ID and workspace key directly, or select a stored secret"""
|
|
98
101
|
|
|
99
102
|
MANUAL = "manual"
|
|
100
103
|
SECRET = "secret"
|
|
101
104
|
|
|
102
105
|
|
|
103
|
-
class OutputAzureLogsCompression(str, Enum):
|
|
106
|
+
class OutputAzureLogsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
104
107
|
r"""Codec to use to compress the persisted data"""
|
|
105
108
|
|
|
106
109
|
NONE = "none"
|
|
107
110
|
GZIP = "gzip"
|
|
108
111
|
|
|
109
112
|
|
|
110
|
-
class OutputAzureLogsQueueFullBehavior(str, Enum):
|
|
113
|
+
class OutputAzureLogsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
111
114
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
112
115
|
|
|
113
116
|
BLOCK = "block"
|
|
114
117
|
DROP = "drop"
|
|
115
118
|
|
|
116
119
|
|
|
117
|
-
class OutputAzureLogsMode(str, Enum):
|
|
120
|
+
class OutputAzureLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
118
121
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
119
122
|
|
|
120
123
|
ERROR = "error"
|
|
@@ -274,7 +277,10 @@ class OutputAzureLogs(BaseModel):
|
|
|
274
277
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
275
278
|
|
|
276
279
|
failed_request_logging_mode: Annotated[
|
|
277
|
-
|
|
280
|
+
Annotated[
|
|
281
|
+
Optional[OutputAzureLogsFailedRequestLoggingMode],
|
|
282
|
+
PlainValidator(validate_open_enum(False)),
|
|
283
|
+
],
|
|
278
284
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
279
285
|
] = OutputAzureLogsFailedRequestLoggingMode.NONE
|
|
280
286
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -306,13 +312,20 @@ class OutputAzureLogs(BaseModel):
|
|
|
306
312
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
307
313
|
|
|
308
314
|
on_backpressure: Annotated[
|
|
309
|
-
|
|
315
|
+
Annotated[
|
|
316
|
+
Optional[OutputAzureLogsBackpressureBehavior],
|
|
317
|
+
PlainValidator(validate_open_enum(False)),
|
|
318
|
+
],
|
|
310
319
|
pydantic.Field(alias="onBackpressure"),
|
|
311
320
|
] = OutputAzureLogsBackpressureBehavior.BLOCK
|
|
312
321
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
313
322
|
|
|
314
323
|
auth_type: Annotated[
|
|
315
|
-
|
|
324
|
+
Annotated[
|
|
325
|
+
Optional[OutputAzureLogsAuthenticationMethod],
|
|
326
|
+
PlainValidator(validate_open_enum(False)),
|
|
327
|
+
],
|
|
328
|
+
pydantic.Field(alias="authType"),
|
|
316
329
|
] = OutputAzureLogsAuthenticationMethod.MANUAL
|
|
317
330
|
r"""Enter workspace ID and workspace key directly, or select a stored secret"""
|
|
318
331
|
|
|
@@ -332,18 +345,28 @@ class OutputAzureLogs(BaseModel):
|
|
|
332
345
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
333
346
|
|
|
334
347
|
pq_compress: Annotated[
|
|
335
|
-
|
|
348
|
+
Annotated[
|
|
349
|
+
Optional[OutputAzureLogsCompression],
|
|
350
|
+
PlainValidator(validate_open_enum(False)),
|
|
351
|
+
],
|
|
352
|
+
pydantic.Field(alias="pqCompress"),
|
|
336
353
|
] = OutputAzureLogsCompression.NONE
|
|
337
354
|
r"""Codec to use to compress the persisted data"""
|
|
338
355
|
|
|
339
356
|
pq_on_backpressure: Annotated[
|
|
340
|
-
|
|
357
|
+
Annotated[
|
|
358
|
+
Optional[OutputAzureLogsQueueFullBehavior],
|
|
359
|
+
PlainValidator(validate_open_enum(False)),
|
|
360
|
+
],
|
|
341
361
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
342
362
|
] = OutputAzureLogsQueueFullBehavior.BLOCK
|
|
343
363
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
344
364
|
|
|
345
365
|
pq_mode: Annotated[
|
|
346
|
-
|
|
366
|
+
Annotated[
|
|
367
|
+
Optional[OutputAzureLogsMode], PlainValidator(validate_open_enum(False))
|
|
368
|
+
],
|
|
369
|
+
pydantic.Field(alias="pqMode"),
|
|
347
370
|
] = OutputAzureLogsMode.ERROR
|
|
348
371
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
349
372
|
|