cribl-control-plane 0.0.49__py3-none-any.whl → 0.1.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -6
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/health.py +6 -2
- cribl_control_plane/models/__init__.py +18 -3
- cribl_control_plane/models/appmode.py +2 -1
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +7 -2
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/getversionshowop.py +6 -5
- cribl_control_plane/models/gitshowresult.py +19 -0
- cribl_control_plane/models/hbcriblinfo.py +6 -1
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +47 -20
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +46 -16
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +53 -18
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +77 -32
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +74 -28
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +82 -30
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0a1.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0a1.dist-info}/RECORD +166 -163
- {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0a1.dist-info}/WHEEL +0 -0
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from enum import Enum
|
|
5
6
|
|
|
6
7
|
|
|
7
|
-
class NodeSkippedUpgradeStatus(int, Enum):
|
|
8
|
+
class NodeSkippedUpgradeStatus(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
8
9
|
ZERO = 0
|
|
9
10
|
ONE = 1
|
|
10
11
|
TWO = 2
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from enum import Enum
|
|
5
6
|
|
|
6
7
|
|
|
7
|
-
class NodeUpgradeState(int, Enum):
|
|
8
|
+
class NodeUpgradeState(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
8
9
|
ZERO = 0
|
|
9
10
|
ONE = 1
|
|
10
11
|
TWO = 2
|
|
@@ -6,8 +6,10 @@ from .nodefailedupgradestatus import NodeFailedUpgradeStatus
|
|
|
6
6
|
from .nodeskippedupgradestatus import NodeSkippedUpgradeStatus
|
|
7
7
|
from .nodeupgradestate import NodeUpgradeState
|
|
8
8
|
from cribl_control_plane.types import BaseModel
|
|
9
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
10
|
+
from pydantic.functional_validators import PlainValidator
|
|
9
11
|
from typing import Optional
|
|
10
|
-
from typing_extensions import NotRequired, TypedDict
|
|
12
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
11
13
|
|
|
12
14
|
|
|
13
15
|
class NodeUpgradeStatusTypedDict(TypedDict):
|
|
@@ -19,12 +21,18 @@ class NodeUpgradeStatusTypedDict(TypedDict):
|
|
|
19
21
|
|
|
20
22
|
|
|
21
23
|
class NodeUpgradeStatus(BaseModel):
|
|
22
|
-
state: NodeUpgradeState
|
|
24
|
+
state: Annotated[NodeUpgradeState, PlainValidator(validate_open_enum(True))]
|
|
23
25
|
|
|
24
26
|
timestamp: float
|
|
25
27
|
|
|
26
|
-
active:
|
|
28
|
+
active: Annotated[
|
|
29
|
+
Optional[NodeActiveUpgradeStatus], PlainValidator(validate_open_enum(True))
|
|
30
|
+
] = None
|
|
27
31
|
|
|
28
|
-
failed:
|
|
32
|
+
failed: Annotated[
|
|
33
|
+
Optional[NodeFailedUpgradeStatus], PlainValidator(validate_open_enum(True))
|
|
34
|
+
] = None
|
|
29
35
|
|
|
30
|
-
skipped:
|
|
36
|
+
skipped: Annotated[
|
|
37
|
+
Optional[NodeSkippedUpgradeStatus], PlainValidator(validate_open_enum(True))
|
|
38
|
+
] = None
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputAzureBlobType(str, Enum):
|
|
|
12
15
|
AZURE_BLOB = "azure_blob"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputAzureBlobDataFormat(str, Enum):
|
|
18
|
+
class OutputAzureBlobDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Format of the output data"""
|
|
17
20
|
|
|
18
21
|
JSON = "json"
|
|
@@ -20,28 +23,28 @@ class OutputAzureBlobDataFormat(str, Enum):
|
|
|
20
23
|
PARQUET = "parquet"
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class OutputAzureBlobBackpressureBehavior(str, Enum):
|
|
26
|
+
class OutputAzureBlobBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
25
28
|
|
|
26
29
|
BLOCK = "block"
|
|
27
30
|
DROP = "drop"
|
|
28
31
|
|
|
29
32
|
|
|
30
|
-
class OutputAzureBlobDiskSpaceProtection(str, Enum):
|
|
33
|
+
class OutputAzureBlobDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
31
34
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
32
35
|
|
|
33
36
|
BLOCK = "block"
|
|
34
37
|
DROP = "drop"
|
|
35
38
|
|
|
36
39
|
|
|
37
|
-
class OutputAzureBlobAuthenticationMethod(str, Enum):
|
|
40
|
+
class OutputAzureBlobAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
38
41
|
MANUAL = "manual"
|
|
39
42
|
SECRET = "secret"
|
|
40
43
|
CLIENT_SECRET = "clientSecret"
|
|
41
44
|
CLIENT_CERT = "clientCert"
|
|
42
45
|
|
|
43
46
|
|
|
44
|
-
class BlobAccessTier(str, Enum):
|
|
47
|
+
class BlobAccessTier(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
45
48
|
INFERRED = "Inferred"
|
|
46
49
|
HOT = "Hot"
|
|
47
50
|
COOL = "Cool"
|
|
@@ -49,14 +52,14 @@ class BlobAccessTier(str, Enum):
|
|
|
49
52
|
ARCHIVE = "Archive"
|
|
50
53
|
|
|
51
54
|
|
|
52
|
-
class OutputAzureBlobCompression(str, Enum):
|
|
55
|
+
class OutputAzureBlobCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
53
56
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
54
57
|
|
|
55
58
|
NONE = "none"
|
|
56
59
|
GZIP = "gzip"
|
|
57
60
|
|
|
58
61
|
|
|
59
|
-
class OutputAzureBlobCompressionLevel(str, Enum):
|
|
62
|
+
class OutputAzureBlobCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
60
63
|
r"""Compression level to apply before moving files to final destination"""
|
|
61
64
|
|
|
62
65
|
BEST_SPEED = "best_speed"
|
|
@@ -64,7 +67,7 @@ class OutputAzureBlobCompressionLevel(str, Enum):
|
|
|
64
67
|
BEST_COMPRESSION = "best_compression"
|
|
65
68
|
|
|
66
69
|
|
|
67
|
-
class OutputAzureBlobParquetVersion(str, Enum):
|
|
70
|
+
class OutputAzureBlobParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
68
71
|
r"""Determines which data types are supported and how they are represented"""
|
|
69
72
|
|
|
70
73
|
PARQUET_1_0 = "PARQUET_1_0"
|
|
@@ -72,7 +75,7 @@ class OutputAzureBlobParquetVersion(str, Enum):
|
|
|
72
75
|
PARQUET_2_6 = "PARQUET_2_6"
|
|
73
76
|
|
|
74
77
|
|
|
75
|
-
class OutputAzureBlobDataPageVersion(str, Enum):
|
|
78
|
+
class OutputAzureBlobDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
76
79
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
77
80
|
|
|
78
81
|
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
@@ -261,7 +264,11 @@ class OutputAzureBlob(BaseModel):
|
|
|
261
264
|
r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
|
|
262
265
|
|
|
263
266
|
format_: Annotated[
|
|
264
|
-
|
|
267
|
+
Annotated[
|
|
268
|
+
Optional[OutputAzureBlobDataFormat],
|
|
269
|
+
PlainValidator(validate_open_enum(False)),
|
|
270
|
+
],
|
|
271
|
+
pydantic.Field(alias="format"),
|
|
265
272
|
] = OutputAzureBlobDataFormat.JSON
|
|
266
273
|
r"""Format of the output data"""
|
|
267
274
|
|
|
@@ -304,7 +311,10 @@ class OutputAzureBlob(BaseModel):
|
|
|
304
311
|
r"""Buffer size used to write to a file"""
|
|
305
312
|
|
|
306
313
|
on_backpressure: Annotated[
|
|
307
|
-
|
|
314
|
+
Annotated[
|
|
315
|
+
Optional[OutputAzureBlobBackpressureBehavior],
|
|
316
|
+
PlainValidator(validate_open_enum(False)),
|
|
317
|
+
],
|
|
308
318
|
pydantic.Field(alias="onBackpressure"),
|
|
309
319
|
] = OutputAzureBlobBackpressureBehavior.BLOCK
|
|
310
320
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -315,26 +325,39 @@ class OutputAzureBlob(BaseModel):
|
|
|
315
325
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
316
326
|
|
|
317
327
|
on_disk_full_backpressure: Annotated[
|
|
318
|
-
|
|
328
|
+
Annotated[
|
|
329
|
+
Optional[OutputAzureBlobDiskSpaceProtection],
|
|
330
|
+
PlainValidator(validate_open_enum(False)),
|
|
331
|
+
],
|
|
319
332
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
320
333
|
] = OutputAzureBlobDiskSpaceProtection.BLOCK
|
|
321
334
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
322
335
|
|
|
323
336
|
auth_type: Annotated[
|
|
324
|
-
|
|
337
|
+
Annotated[
|
|
338
|
+
Optional[OutputAzureBlobAuthenticationMethod],
|
|
339
|
+
PlainValidator(validate_open_enum(False)),
|
|
340
|
+
],
|
|
341
|
+
pydantic.Field(alias="authType"),
|
|
325
342
|
] = OutputAzureBlobAuthenticationMethod.MANUAL
|
|
326
343
|
|
|
327
344
|
storage_class: Annotated[
|
|
328
|
-
Optional[BlobAccessTier],
|
|
345
|
+
Annotated[Optional[BlobAccessTier], PlainValidator(validate_open_enum(False))],
|
|
346
|
+
pydantic.Field(alias="storageClass"),
|
|
329
347
|
] = BlobAccessTier.INFERRED
|
|
330
348
|
|
|
331
349
|
description: Optional[str] = None
|
|
332
350
|
|
|
333
|
-
compress:
|
|
351
|
+
compress: Annotated[
|
|
352
|
+
Optional[OutputAzureBlobCompression], PlainValidator(validate_open_enum(False))
|
|
353
|
+
] = OutputAzureBlobCompression.GZIP
|
|
334
354
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
335
355
|
|
|
336
356
|
compression_level: Annotated[
|
|
337
|
-
|
|
357
|
+
Annotated[
|
|
358
|
+
Optional[OutputAzureBlobCompressionLevel],
|
|
359
|
+
PlainValidator(validate_open_enum(False)),
|
|
360
|
+
],
|
|
338
361
|
pydantic.Field(alias="compressionLevel"),
|
|
339
362
|
] = OutputAzureBlobCompressionLevel.BEST_SPEED
|
|
340
363
|
r"""Compression level to apply before moving files to final destination"""
|
|
@@ -345,12 +368,19 @@ class OutputAzureBlob(BaseModel):
|
|
|
345
368
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
346
369
|
|
|
347
370
|
parquet_version: Annotated[
|
|
348
|
-
|
|
371
|
+
Annotated[
|
|
372
|
+
Optional[OutputAzureBlobParquetVersion],
|
|
373
|
+
PlainValidator(validate_open_enum(False)),
|
|
374
|
+
],
|
|
375
|
+
pydantic.Field(alias="parquetVersion"),
|
|
349
376
|
] = OutputAzureBlobParquetVersion.PARQUET_2_6
|
|
350
377
|
r"""Determines which data types are supported and how they are represented"""
|
|
351
378
|
|
|
352
379
|
parquet_data_page_version: Annotated[
|
|
353
|
-
|
|
380
|
+
Annotated[
|
|
381
|
+
Optional[OutputAzureBlobDataPageVersion],
|
|
382
|
+
PlainValidator(validate_open_enum(False)),
|
|
383
|
+
],
|
|
354
384
|
pydantic.Field(alias="parquetDataPageVersion"),
|
|
355
385
|
] = OutputAzureBlobDataPageVersion.DATA_PAGE_V2
|
|
356
386
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,12 +15,12 @@ class OutputAzureDataExplorerType(str, Enum):
|
|
|
12
15
|
AZURE_DATA_EXPLORER = "azure_data_explorer"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class IngestionMode(str, Enum):
|
|
18
|
+
class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
BATCHING = "batching"
|
|
17
20
|
STREAMING = "streaming"
|
|
18
21
|
|
|
19
22
|
|
|
20
|
-
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
|
|
23
|
+
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
21
24
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
22
25
|
|
|
23
26
|
HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
|
|
@@ -25,7 +28,9 @@ class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
|
|
|
25
28
|
HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
|
|
26
29
|
|
|
27
30
|
|
|
28
|
-
class OutputAzureDataExplorerAuthenticationMethod(
|
|
31
|
+
class OutputAzureDataExplorerAuthenticationMethod(
|
|
32
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
33
|
+
):
|
|
29
34
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
30
35
|
|
|
31
36
|
CLIENT_SECRET = "clientSecret"
|
|
@@ -45,7 +50,9 @@ class OutputAzureDataExplorerCertificate(BaseModel):
|
|
|
45
50
|
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
46
51
|
|
|
47
52
|
|
|
48
|
-
class OutputAzureDataExplorerBackpressureBehavior(
|
|
53
|
+
class OutputAzureDataExplorerBackpressureBehavior(
|
|
54
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
55
|
+
):
|
|
49
56
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
50
57
|
|
|
51
58
|
BLOCK = "block"
|
|
@@ -53,7 +60,7 @@ class OutputAzureDataExplorerBackpressureBehavior(str, Enum):
|
|
|
53
60
|
QUEUE = "queue"
|
|
54
61
|
|
|
55
62
|
|
|
56
|
-
class OutputAzureDataExplorerDataFormat(str, Enum):
|
|
63
|
+
class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
57
64
|
r"""Format of the output data"""
|
|
58
65
|
|
|
59
66
|
JSON = "json"
|
|
@@ -61,14 +68,16 @@ class OutputAzureDataExplorerDataFormat(str, Enum):
|
|
|
61
68
|
PARQUET = "parquet"
|
|
62
69
|
|
|
63
70
|
|
|
64
|
-
class OutputAzureDataExplorerDiskSpaceProtection(
|
|
71
|
+
class OutputAzureDataExplorerDiskSpaceProtection(
|
|
72
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
+
):
|
|
65
74
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
66
75
|
|
|
67
76
|
BLOCK = "block"
|
|
68
77
|
DROP = "drop"
|
|
69
78
|
|
|
70
79
|
|
|
71
|
-
class PrefixOptional(str, Enum):
|
|
80
|
+
class PrefixOptional(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
72
81
|
DROP_BY = "dropBy"
|
|
73
82
|
INGEST_BY = "ingestBy"
|
|
74
83
|
|
|
@@ -81,7 +90,9 @@ class ExtentTagTypedDict(TypedDict):
|
|
|
81
90
|
class ExtentTag(BaseModel):
|
|
82
91
|
value: str
|
|
83
92
|
|
|
84
|
-
prefix:
|
|
93
|
+
prefix: Annotated[
|
|
94
|
+
Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
|
|
95
|
+
] = None
|
|
85
96
|
|
|
86
97
|
|
|
87
98
|
class IngestIfNotExistTypedDict(TypedDict):
|
|
@@ -92,7 +103,7 @@ class IngestIfNotExist(BaseModel):
|
|
|
92
103
|
value: str
|
|
93
104
|
|
|
94
105
|
|
|
95
|
-
class ReportLevel(str, Enum):
|
|
106
|
+
class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
96
107
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
97
108
|
|
|
98
109
|
FAILURES_ONLY = "failuresOnly"
|
|
@@ -100,7 +111,7 @@ class ReportLevel(str, Enum):
|
|
|
100
111
|
FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
|
|
101
112
|
|
|
102
113
|
|
|
103
|
-
class ReportMethod(str, Enum):
|
|
114
|
+
class ReportMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
104
115
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
105
116
|
|
|
106
117
|
QUEUE = "queue"
|
|
@@ -173,28 +184,32 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
|
|
|
173
184
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
174
185
|
|
|
175
186
|
|
|
176
|
-
class OutputAzureDataExplorerCompressCompression(
|
|
187
|
+
class OutputAzureDataExplorerCompressCompression(
|
|
188
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
189
|
+
):
|
|
177
190
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
178
191
|
|
|
179
192
|
NONE = "none"
|
|
180
193
|
GZIP = "gzip"
|
|
181
194
|
|
|
182
195
|
|
|
183
|
-
class OutputAzureDataExplorerPqCompressCompression(
|
|
196
|
+
class OutputAzureDataExplorerPqCompressCompression(
|
|
197
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
198
|
+
):
|
|
184
199
|
r"""Codec to use to compress the persisted data"""
|
|
185
200
|
|
|
186
201
|
NONE = "none"
|
|
187
202
|
GZIP = "gzip"
|
|
188
203
|
|
|
189
204
|
|
|
190
|
-
class OutputAzureDataExplorerQueueFullBehavior(str, Enum):
|
|
205
|
+
class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
191
206
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
192
207
|
|
|
193
208
|
BLOCK = "block"
|
|
194
209
|
DROP = "drop"
|
|
195
210
|
|
|
196
211
|
|
|
197
|
-
class OutputAzureDataExplorerMode(str, Enum):
|
|
212
|
+
class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
198
213
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
199
214
|
|
|
200
215
|
ERROR = "error"
|
|
@@ -384,17 +399,24 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
384
399
|
r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
|
|
385
400
|
|
|
386
401
|
ingest_mode: Annotated[
|
|
387
|
-
Optional[IngestionMode],
|
|
402
|
+
Annotated[Optional[IngestionMode], PlainValidator(validate_open_enum(False))],
|
|
403
|
+
pydantic.Field(alias="ingestMode"),
|
|
388
404
|
] = IngestionMode.BATCHING
|
|
389
405
|
|
|
390
406
|
oauth_endpoint: Annotated[
|
|
391
|
-
|
|
407
|
+
Annotated[
|
|
408
|
+
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
409
|
+
PlainValidator(validate_open_enum(False)),
|
|
410
|
+
],
|
|
392
411
|
pydantic.Field(alias="oauthEndpoint"),
|
|
393
412
|
] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
|
|
394
413
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
395
414
|
|
|
396
415
|
oauth_type: Annotated[
|
|
397
|
-
|
|
416
|
+
Annotated[
|
|
417
|
+
Optional[OutputAzureDataExplorerAuthenticationMethod],
|
|
418
|
+
PlainValidator(validate_open_enum(False)),
|
|
419
|
+
],
|
|
398
420
|
pydantic.Field(alias="oauthType"),
|
|
399
421
|
] = OutputAzureDataExplorerAuthenticationMethod.CLIENT_SECRET
|
|
400
422
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
@@ -413,7 +435,10 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
413
435
|
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
414
436
|
|
|
415
437
|
on_backpressure: Annotated[
|
|
416
|
-
|
|
438
|
+
Annotated[
|
|
439
|
+
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
440
|
+
PlainValidator(validate_open_enum(False)),
|
|
441
|
+
],
|
|
417
442
|
pydantic.Field(alias="onBackpressure"),
|
|
418
443
|
] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
|
|
419
444
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -424,7 +449,11 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
424
449
|
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
425
450
|
|
|
426
451
|
format_: Annotated[
|
|
427
|
-
|
|
452
|
+
Annotated[
|
|
453
|
+
Optional[OutputAzureDataExplorerDataFormat],
|
|
454
|
+
PlainValidator(validate_open_enum(False)),
|
|
455
|
+
],
|
|
456
|
+
pydantic.Field(alias="format"),
|
|
428
457
|
] = OutputAzureDataExplorerDataFormat.JSON
|
|
429
458
|
r"""Format of the output data"""
|
|
430
459
|
|
|
@@ -464,7 +493,10 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
464
493
|
r"""Maximum number of parts to upload in parallel per file"""
|
|
465
494
|
|
|
466
495
|
on_disk_full_backpressure: Annotated[
|
|
467
|
-
|
|
496
|
+
Annotated[
|
|
497
|
+
Optional[OutputAzureDataExplorerDiskSpaceProtection],
|
|
498
|
+
PlainValidator(validate_open_enum(False)),
|
|
499
|
+
],
|
|
468
500
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
469
501
|
] = OutputAzureDataExplorerDiskSpaceProtection.BLOCK
|
|
470
502
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -508,12 +540,14 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
508
540
|
r"""Prevents duplicate ingestion by verifying whether an extent with the specified ingest-by tag already exists"""
|
|
509
541
|
|
|
510
542
|
report_level: Annotated[
|
|
511
|
-
Optional[ReportLevel],
|
|
543
|
+
Annotated[Optional[ReportLevel], PlainValidator(validate_open_enum(False))],
|
|
544
|
+
pydantic.Field(alias="reportLevel"),
|
|
512
545
|
] = ReportLevel.FAILURES_ONLY
|
|
513
546
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
514
547
|
|
|
515
548
|
report_method: Annotated[
|
|
516
|
-
Optional[ReportMethod],
|
|
549
|
+
Annotated[Optional[ReportMethod], PlainValidator(validate_open_enum(False))],
|
|
550
|
+
pydantic.Field(alias="reportMethod"),
|
|
517
551
|
] = ReportMethod.QUEUE
|
|
518
552
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
519
553
|
|
|
@@ -538,9 +572,10 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
538
572
|
] = True
|
|
539
573
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
540
574
|
|
|
541
|
-
compress:
|
|
542
|
-
OutputAzureDataExplorerCompressCompression
|
|
543
|
-
|
|
575
|
+
compress: Annotated[
|
|
576
|
+
Optional[OutputAzureDataExplorerCompressCompression],
|
|
577
|
+
PlainValidator(validate_open_enum(False)),
|
|
578
|
+
] = OutputAzureDataExplorerCompressCompression.GZIP
|
|
544
579
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
545
580
|
|
|
546
581
|
mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
|
|
@@ -594,19 +629,29 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
594
629
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
595
630
|
|
|
596
631
|
pq_compress: Annotated[
|
|
597
|
-
|
|
632
|
+
Annotated[
|
|
633
|
+
Optional[OutputAzureDataExplorerPqCompressCompression],
|
|
634
|
+
PlainValidator(validate_open_enum(False)),
|
|
635
|
+
],
|
|
598
636
|
pydantic.Field(alias="pqCompress"),
|
|
599
637
|
] = OutputAzureDataExplorerPqCompressCompression.NONE
|
|
600
638
|
r"""Codec to use to compress the persisted data"""
|
|
601
639
|
|
|
602
640
|
pq_on_backpressure: Annotated[
|
|
603
|
-
|
|
641
|
+
Annotated[
|
|
642
|
+
Optional[OutputAzureDataExplorerQueueFullBehavior],
|
|
643
|
+
PlainValidator(validate_open_enum(False)),
|
|
644
|
+
],
|
|
604
645
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
605
646
|
] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
|
|
606
647
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
607
648
|
|
|
608
649
|
pq_mode: Annotated[
|
|
609
|
-
|
|
650
|
+
Annotated[
|
|
651
|
+
Optional[OutputAzureDataExplorerMode],
|
|
652
|
+
PlainValidator(validate_open_enum(False)),
|
|
653
|
+
],
|
|
654
|
+
pydantic.Field(alias="pqMode"),
|
|
610
655
|
] = OutputAzureDataExplorerMode.ERROR
|
|
611
656
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
612
657
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputAzureEventhubType(str, Enum):
|
|
|
12
15
|
AZURE_EVENTHUB = "azure_eventhub"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputAzureEventhubAcknowledgments(int, Enum):
|
|
18
|
+
class OutputAzureEventhubAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Control the number of required acknowledgments"""
|
|
17
20
|
|
|
18
21
|
ONE = 1
|
|
@@ -20,14 +23,14 @@ class OutputAzureEventhubAcknowledgments(int, Enum):
|
|
|
20
23
|
MINUS_1 = -1
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class OutputAzureEventhubRecordDataFormat(str, Enum):
|
|
26
|
+
class OutputAzureEventhubRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
25
28
|
|
|
26
29
|
JSON = "json"
|
|
27
30
|
RAW = "raw"
|
|
28
31
|
|
|
29
32
|
|
|
30
|
-
class OutputAzureEventhubSASLMechanism(str, Enum):
|
|
33
|
+
class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
31
34
|
PLAIN = "plain"
|
|
32
35
|
OAUTHBEARER = "oauthbearer"
|
|
33
36
|
|
|
@@ -44,9 +47,10 @@ class OutputAzureEventhubAuthentication(BaseModel):
|
|
|
44
47
|
|
|
45
48
|
disabled: Optional[bool] = False
|
|
46
49
|
|
|
47
|
-
mechanism:
|
|
48
|
-
OutputAzureEventhubSASLMechanism
|
|
49
|
-
|
|
50
|
+
mechanism: Annotated[
|
|
51
|
+
Optional[OutputAzureEventhubSASLMechanism],
|
|
52
|
+
PlainValidator(validate_open_enum(False)),
|
|
53
|
+
] = OutputAzureEventhubSASLMechanism.PLAIN
|
|
50
54
|
|
|
51
55
|
|
|
52
56
|
class OutputAzureEventhubTLSSettingsClientSideTypedDict(TypedDict):
|
|
@@ -64,7 +68,7 @@ class OutputAzureEventhubTLSSettingsClientSide(BaseModel):
|
|
|
64
68
|
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
|
|
65
69
|
|
|
66
70
|
|
|
67
|
-
class OutputAzureEventhubBackpressureBehavior(str, Enum):
|
|
71
|
+
class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
68
72
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
69
73
|
|
|
70
74
|
BLOCK = "block"
|
|
@@ -72,21 +76,21 @@ class OutputAzureEventhubBackpressureBehavior(str, Enum):
|
|
|
72
76
|
QUEUE = "queue"
|
|
73
77
|
|
|
74
78
|
|
|
75
|
-
class OutputAzureEventhubCompression(str, Enum):
|
|
79
|
+
class OutputAzureEventhubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
76
80
|
r"""Codec to use to compress the persisted data"""
|
|
77
81
|
|
|
78
82
|
NONE = "none"
|
|
79
83
|
GZIP = "gzip"
|
|
80
84
|
|
|
81
85
|
|
|
82
|
-
class OutputAzureEventhubQueueFullBehavior(str, Enum):
|
|
86
|
+
class OutputAzureEventhubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
83
87
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
84
88
|
|
|
85
89
|
BLOCK = "block"
|
|
86
90
|
DROP = "drop"
|
|
87
91
|
|
|
88
92
|
|
|
89
|
-
class OutputAzureEventhubMode(str, Enum):
|
|
93
|
+
class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
90
94
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
91
95
|
|
|
92
96
|
ERROR = "error"
|
|
@@ -191,13 +195,18 @@ class OutputAzureEventhub(BaseModel):
|
|
|
191
195
|
streamtags: Optional[List[str]] = None
|
|
192
196
|
r"""Tags for filtering and grouping in @{product}"""
|
|
193
197
|
|
|
194
|
-
ack:
|
|
195
|
-
OutputAzureEventhubAcknowledgments
|
|
196
|
-
|
|
198
|
+
ack: Annotated[
|
|
199
|
+
Optional[OutputAzureEventhubAcknowledgments],
|
|
200
|
+
PlainValidator(validate_open_enum(True)),
|
|
201
|
+
] = OutputAzureEventhubAcknowledgments.ONE
|
|
197
202
|
r"""Control the number of required acknowledgments"""
|
|
198
203
|
|
|
199
204
|
format_: Annotated[
|
|
200
|
-
|
|
205
|
+
Annotated[
|
|
206
|
+
Optional[OutputAzureEventhubRecordDataFormat],
|
|
207
|
+
PlainValidator(validate_open_enum(False)),
|
|
208
|
+
],
|
|
209
|
+
pydantic.Field(alias="format"),
|
|
201
210
|
] = OutputAzureEventhubRecordDataFormat.JSON
|
|
202
211
|
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
203
212
|
|
|
@@ -256,7 +265,10 @@ class OutputAzureEventhub(BaseModel):
|
|
|
256
265
|
tls: Optional[OutputAzureEventhubTLSSettingsClientSide] = None
|
|
257
266
|
|
|
258
267
|
on_backpressure: Annotated[
|
|
259
|
-
|
|
268
|
+
Annotated[
|
|
269
|
+
Optional[OutputAzureEventhubBackpressureBehavior],
|
|
270
|
+
PlainValidator(validate_open_enum(False)),
|
|
271
|
+
],
|
|
260
272
|
pydantic.Field(alias="onBackpressure"),
|
|
261
273
|
] = OutputAzureEventhubBackpressureBehavior.BLOCK
|
|
262
274
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -277,18 +289,28 @@ class OutputAzureEventhub(BaseModel):
|
|
|
277
289
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
278
290
|
|
|
279
291
|
pq_compress: Annotated[
|
|
280
|
-
|
|
292
|
+
Annotated[
|
|
293
|
+
Optional[OutputAzureEventhubCompression],
|
|
294
|
+
PlainValidator(validate_open_enum(False)),
|
|
295
|
+
],
|
|
296
|
+
pydantic.Field(alias="pqCompress"),
|
|
281
297
|
] = OutputAzureEventhubCompression.NONE
|
|
282
298
|
r"""Codec to use to compress the persisted data"""
|
|
283
299
|
|
|
284
300
|
pq_on_backpressure: Annotated[
|
|
285
|
-
|
|
301
|
+
Annotated[
|
|
302
|
+
Optional[OutputAzureEventhubQueueFullBehavior],
|
|
303
|
+
PlainValidator(validate_open_enum(False)),
|
|
304
|
+
],
|
|
286
305
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
287
306
|
] = OutputAzureEventhubQueueFullBehavior.BLOCK
|
|
288
307
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
289
308
|
|
|
290
309
|
pq_mode: Annotated[
|
|
291
|
-
|
|
310
|
+
Annotated[
|
|
311
|
+
Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
|
|
312
|
+
],
|
|
313
|
+
pydantic.Field(alias="pqMode"),
|
|
292
314
|
] = OutputAzureEventhubMode.ERROR
|
|
293
315
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
294
316
|
|