cribl-control-plane 0.0.50rc1__py3-none-any.whl → 0.0.51__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +6 -4
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/health.py +2 -6
- cribl_control_plane/httpclient.py +0 -1
- cribl_control_plane/models/__init__.py +4 -21
- cribl_control_plane/models/appmode.py +1 -2
- cribl_control_plane/models/cacheconnection.py +2 -10
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
- cribl_control_plane/models/cloudprovider.py +1 -2
- cribl_control_plane/models/configgroup.py +2 -7
- cribl_control_plane/models/configgroupcloud.py +2 -6
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
- cribl_control_plane/models/createinputhectokenbyidop.py +5 -6
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +2 -8
- cribl_control_plane/models/datasetmetadata.py +2 -8
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
- cribl_control_plane/models/getsummaryop.py +2 -7
- cribl_control_plane/models/getversionshowop.py +5 -6
- cribl_control_plane/models/gitinfo.py +3 -14
- cribl_control_plane/models/hbcriblinfo.py +1 -11
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +14 -34
- cribl_control_plane/models/inputazureblob.py +6 -17
- cribl_control_plane/models/inputcollection.py +4 -11
- cribl_control_plane/models/inputconfluentcloud.py +20 -47
- cribl_control_plane/models/inputcribl.py +4 -11
- cribl_control_plane/models/inputcriblhttp.py +8 -23
- cribl_control_plane/models/inputcribllakehttp.py +10 -22
- cribl_control_plane/models/inputcriblmetrics.py +4 -12
- cribl_control_plane/models/inputcribltcp.py +8 -23
- cribl_control_plane/models/inputcrowdstrike.py +10 -26
- cribl_control_plane/models/inputdatadogagent.py +8 -24
- cribl_control_plane/models/inputdatagen.py +4 -11
- cribl_control_plane/models/inputedgeprometheus.py +24 -58
- cribl_control_plane/models/inputelastic.py +14 -40
- cribl_control_plane/models/inputeventhub.py +6 -15
- cribl_control_plane/models/inputexec.py +6 -14
- cribl_control_plane/models/inputfile.py +6 -15
- cribl_control_plane/models/inputfirehose.py +8 -23
- cribl_control_plane/models/inputgooglepubsub.py +6 -19
- cribl_control_plane/models/inputgrafana.py +24 -67
- cribl_control_plane/models/inputhttp.py +8 -23
- cribl_control_plane/models/inputhttpraw.py +8 -23
- cribl_control_plane/models/inputjournalfiles.py +4 -12
- cribl_control_plane/models/inputkafka.py +16 -46
- cribl_control_plane/models/inputkinesis.py +14 -38
- cribl_control_plane/models/inputkubeevents.py +4 -11
- cribl_control_plane/models/inputkubelogs.py +8 -16
- cribl_control_plane/models/inputkubemetrics.py +8 -16
- cribl_control_plane/models/inputloki.py +10 -29
- cribl_control_plane/models/inputmetrics.py +8 -23
- cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
- cribl_control_plane/models/inputmsk.py +18 -53
- cribl_control_plane/models/inputnetflow.py +4 -11
- cribl_control_plane/models/inputoffice365mgmt.py +14 -33
- cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
- cribl_control_plane/models/inputoffice365service.py +16 -35
- cribl_control_plane/models/inputopentelemetry.py +16 -38
- cribl_control_plane/models/inputprometheus.py +18 -50
- cribl_control_plane/models/inputprometheusrw.py +10 -30
- cribl_control_plane/models/inputrawudp.py +4 -11
- cribl_control_plane/models/inputs3.py +8 -21
- cribl_control_plane/models/inputs3inventory.py +10 -26
- cribl_control_plane/models/inputsecuritylake.py +10 -27
- cribl_control_plane/models/inputsnmp.py +6 -16
- cribl_control_plane/models/inputsplunk.py +12 -33
- cribl_control_plane/models/inputsplunkhec.py +10 -29
- cribl_control_plane/models/inputsplunksearch.py +14 -33
- cribl_control_plane/models/inputsqs.py +10 -27
- cribl_control_plane/models/inputsyslog.py +16 -43
- cribl_control_plane/models/inputsystemmetrics.py +24 -48
- cribl_control_plane/models/inputsystemstate.py +8 -16
- cribl_control_plane/models/inputtcp.py +10 -29
- cribl_control_plane/models/inputtcpjson.py +10 -29
- cribl_control_plane/models/inputwef.py +14 -37
- cribl_control_plane/models/inputwindowsmetrics.py +24 -44
- cribl_control_plane/models/inputwineventlogs.py +10 -20
- cribl_control_plane/models/inputwiz.py +8 -21
- cribl_control_plane/models/inputwizwebhook.py +8 -23
- cribl_control_plane/models/inputzscalerhec.py +10 -29
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
- cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
- cribl_control_plane/models/masterworkerentry.py +2 -7
- cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
- cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeprovidedinfo.py +1 -4
- cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeupgradestate.py +1 -2
- cribl_control_plane/models/nodeupgradestatus.py +5 -13
- cribl_control_plane/models/outputazureblob.py +18 -48
- cribl_control_plane/models/outputazuredataexplorer.py +28 -73
- cribl_control_plane/models/outputazureeventhub.py +18 -40
- cribl_control_plane/models/outputazurelogs.py +12 -35
- cribl_control_plane/models/outputclickhouse.py +20 -55
- cribl_control_plane/models/outputcloudwatch.py +10 -29
- cribl_control_plane/models/outputconfluentcloud.py +32 -77
- cribl_control_plane/models/outputcriblhttp.py +16 -44
- cribl_control_plane/models/outputcribllake.py +16 -46
- cribl_control_plane/models/outputcribltcp.py +18 -45
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
- cribl_control_plane/models/outputdatadog.py +20 -48
- cribl_control_plane/models/outputdataset.py +18 -46
- cribl_control_plane/models/outputdiskspool.py +2 -7
- cribl_control_plane/models/outputdls3.py +24 -68
- cribl_control_plane/models/outputdynatracehttp.py +20 -53
- cribl_control_plane/models/outputdynatraceotlp.py +22 -55
- cribl_control_plane/models/outputelastic.py +18 -43
- cribl_control_plane/models/outputelasticcloud.py +12 -36
- cribl_control_plane/models/outputexabeam.py +10 -29
- cribl_control_plane/models/outputfilesystem.py +14 -39
- cribl_control_plane/models/outputgooglechronicle.py +16 -50
- cribl_control_plane/models/outputgooglecloudlogging.py +18 -50
- cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
- cribl_control_plane/models/outputgooglepubsub.py +10 -31
- cribl_control_plane/models/outputgrafanacloud.py +32 -97
- cribl_control_plane/models/outputgraphite.py +14 -31
- cribl_control_plane/models/outputhoneycomb.py +12 -35
- cribl_control_plane/models/outputhumiohec.py +16 -43
- cribl_control_plane/models/outputinfluxdb.py +16 -42
- cribl_control_plane/models/outputkafka.py +28 -74
- cribl_control_plane/models/outputkinesis.py +16 -40
- cribl_control_plane/models/outputloki.py +16 -41
- cribl_control_plane/models/outputminio.py +24 -65
- cribl_control_plane/models/outputmsk.py +30 -82
- cribl_control_plane/models/outputnewrelic.py +18 -43
- cribl_control_plane/models/outputnewrelicevents.py +14 -41
- cribl_control_plane/models/outputopentelemetry.py +26 -67
- cribl_control_plane/models/outputprometheus.py +12 -35
- cribl_control_plane/models/outputring.py +8 -19
- cribl_control_plane/models/outputs3.py +26 -68
- cribl_control_plane/models/outputsecuritylake.py +18 -52
- cribl_control_plane/models/outputsentinel.py +18 -45
- cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
- cribl_control_plane/models/outputservicenow.py +24 -60
- cribl_control_plane/models/outputsignalfx.py +14 -37
- cribl_control_plane/models/outputsns.py +14 -36
- cribl_control_plane/models/outputsplunk.py +24 -60
- cribl_control_plane/models/outputsplunkhec.py +12 -35
- cribl_control_plane/models/outputsplunklb.py +30 -77
- cribl_control_plane/models/outputsqs.py +16 -41
- cribl_control_plane/models/outputstatsd.py +14 -30
- cribl_control_plane/models/outputstatsdext.py +12 -29
- cribl_control_plane/models/outputsumologic.py +12 -35
- cribl_control_plane/models/outputsyslog.py +24 -58
- cribl_control_plane/models/outputtcpjson.py +20 -52
- cribl_control_plane/models/outputwavefront.py +12 -35
- cribl_control_plane/models/outputwebhook.py +22 -58
- cribl_control_plane/models/outputxsiam.py +14 -35
- cribl_control_plane/models/packinfo.py +0 -3
- cribl_control_plane/models/packinstallinfo.py +0 -3
- cribl_control_plane/models/productscore.py +1 -2
- cribl_control_plane/models/rbacresource.py +1 -2
- cribl_control_plane/models/resourcepolicy.py +2 -4
- cribl_control_plane/models/runnablejobcollection.py +13 -30
- cribl_control_plane/models/runnablejobexecutor.py +4 -13
- cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -6
- cribl_control_plane/models/workertypes.py +1 -2
- {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/METADATA +14 -5
- cribl_control_plane-0.0.51.dist-info/RECORD +325 -0
- cribl_control_plane/models/error.py +0 -16
- cribl_control_plane/models/gethealthinfoop.py +0 -17
- cribl_control_plane/models/gitshowresult.py +0 -19
- cribl_control_plane-0.0.50rc1.dist-info/RECORD +0 -328
- {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/WHEEL +0 -0
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from enum import Enum
|
|
6
5
|
|
|
7
6
|
|
|
8
|
-
class NodeActiveUpgradeStatus(int, Enum
|
|
7
|
+
class NodeActiveUpgradeStatus(int, Enum):
|
|
9
8
|
ZERO = 0
|
|
10
9
|
ONE = 1
|
|
11
10
|
TWO = 2
|
|
@@ -1,10 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from enum import Enum
|
|
6
5
|
|
|
7
6
|
|
|
8
|
-
class NodeFailedUpgradeStatus(int, Enum
|
|
7
|
+
class NodeFailedUpgradeStatus(int, Enum):
|
|
9
8
|
ZERO = 0
|
|
10
9
|
ONE = 1
|
|
@@ -5,7 +5,7 @@ from .hbcriblinfo import HBCriblInfo, HBCriblInfoTypedDict
|
|
|
5
5
|
from .heartbeatmetadata import HeartbeatMetadata, HeartbeatMetadataTypedDict
|
|
6
6
|
from cribl_control_plane.types import BaseModel
|
|
7
7
|
import pydantic
|
|
8
|
-
from typing import
|
|
8
|
+
from typing import List, Optional, Union
|
|
9
9
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
10
10
|
|
|
11
11
|
|
|
@@ -125,7 +125,6 @@ class NodeProvidedInfoTypedDict(TypedDict):
|
|
|
125
125
|
architecture: str
|
|
126
126
|
cpus: float
|
|
127
127
|
cribl: HBCriblInfoTypedDict
|
|
128
|
-
env: Dict[str, str]
|
|
129
128
|
free_disk_space: float
|
|
130
129
|
hostname: str
|
|
131
130
|
node: str
|
|
@@ -150,8 +149,6 @@ class NodeProvidedInfo(BaseModel):
|
|
|
150
149
|
|
|
151
150
|
cribl: HBCriblInfo
|
|
152
151
|
|
|
153
|
-
env: Dict[str, str]
|
|
154
|
-
|
|
155
152
|
free_disk_space: Annotated[float, pydantic.Field(alias="freeDiskSpace")]
|
|
156
153
|
|
|
157
154
|
hostname: str
|
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from enum import Enum
|
|
6
5
|
|
|
7
6
|
|
|
8
|
-
class NodeSkippedUpgradeStatus(int, Enum
|
|
7
|
+
class NodeSkippedUpgradeStatus(int, Enum):
|
|
9
8
|
ZERO = 0
|
|
10
9
|
ONE = 1
|
|
11
10
|
TWO = 2
|
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from enum import Enum
|
|
6
5
|
|
|
7
6
|
|
|
8
|
-
class NodeUpgradeState(int, Enum
|
|
7
|
+
class NodeUpgradeState(int, Enum):
|
|
9
8
|
ZERO = 0
|
|
10
9
|
ONE = 1
|
|
11
10
|
TWO = 2
|
|
@@ -6,10 +6,8 @@ from .nodefailedupgradestatus import NodeFailedUpgradeStatus
|
|
|
6
6
|
from .nodeskippedupgradestatus import NodeSkippedUpgradeStatus
|
|
7
7
|
from .nodeupgradestate import NodeUpgradeState
|
|
8
8
|
from cribl_control_plane.types import BaseModel
|
|
9
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
10
|
-
from pydantic.functional_validators import PlainValidator
|
|
11
9
|
from typing import Optional
|
|
12
|
-
from typing_extensions import
|
|
10
|
+
from typing_extensions import NotRequired, TypedDict
|
|
13
11
|
|
|
14
12
|
|
|
15
13
|
class NodeUpgradeStatusTypedDict(TypedDict):
|
|
@@ -21,18 +19,12 @@ class NodeUpgradeStatusTypedDict(TypedDict):
|
|
|
21
19
|
|
|
22
20
|
|
|
23
21
|
class NodeUpgradeStatus(BaseModel):
|
|
24
|
-
state:
|
|
22
|
+
state: NodeUpgradeState
|
|
25
23
|
|
|
26
24
|
timestamp: float
|
|
27
25
|
|
|
28
|
-
active:
|
|
29
|
-
Optional[NodeActiveUpgradeStatus], PlainValidator(validate_open_enum(True))
|
|
30
|
-
] = None
|
|
26
|
+
active: Optional[NodeActiveUpgradeStatus] = None
|
|
31
27
|
|
|
32
|
-
failed:
|
|
33
|
-
Optional[NodeFailedUpgradeStatus], PlainValidator(validate_open_enum(True))
|
|
34
|
-
] = None
|
|
28
|
+
failed: Optional[NodeFailedUpgradeStatus] = None
|
|
35
29
|
|
|
36
|
-
skipped:
|
|
37
|
-
Optional[NodeSkippedUpgradeStatus], PlainValidator(validate_open_enum(True))
|
|
38
|
-
] = None
|
|
30
|
+
skipped: Optional[NodeSkippedUpgradeStatus] = None
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputAzureBlobType(str, Enum):
|
|
|
15
12
|
AZURE_BLOB = "azure_blob"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputAzureBlobDataFormat(str, Enum
|
|
15
|
+
class OutputAzureBlobDataFormat(str, Enum):
|
|
19
16
|
r"""Format of the output data"""
|
|
20
17
|
|
|
21
18
|
JSON = "json"
|
|
@@ -23,28 +20,28 @@ class OutputAzureBlobDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
23
20
|
PARQUET = "parquet"
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputAzureBlobBackpressureBehavior(str, Enum
|
|
23
|
+
class OutputAzureBlobBackpressureBehavior(str, Enum):
|
|
27
24
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
28
25
|
|
|
29
26
|
BLOCK = "block"
|
|
30
27
|
DROP = "drop"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputAzureBlobDiskSpaceProtection(str, Enum
|
|
30
|
+
class OutputAzureBlobDiskSpaceProtection(str, Enum):
|
|
34
31
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
35
32
|
|
|
36
33
|
BLOCK = "block"
|
|
37
34
|
DROP = "drop"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputAzureBlobAuthenticationMethod(str, Enum
|
|
37
|
+
class OutputAzureBlobAuthenticationMethod(str, Enum):
|
|
41
38
|
MANUAL = "manual"
|
|
42
39
|
SECRET = "secret"
|
|
43
40
|
CLIENT_SECRET = "clientSecret"
|
|
44
41
|
CLIENT_CERT = "clientCert"
|
|
45
42
|
|
|
46
43
|
|
|
47
|
-
class BlobAccessTier(str, Enum
|
|
44
|
+
class BlobAccessTier(str, Enum):
|
|
48
45
|
INFERRED = "Inferred"
|
|
49
46
|
HOT = "Hot"
|
|
50
47
|
COOL = "Cool"
|
|
@@ -52,14 +49,14 @@ class BlobAccessTier(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
52
49
|
ARCHIVE = "Archive"
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputAzureBlobCompression(str, Enum
|
|
52
|
+
class OutputAzureBlobCompression(str, Enum):
|
|
56
53
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
57
54
|
|
|
58
55
|
NONE = "none"
|
|
59
56
|
GZIP = "gzip"
|
|
60
57
|
|
|
61
58
|
|
|
62
|
-
class OutputAzureBlobCompressionLevel(str, Enum
|
|
59
|
+
class OutputAzureBlobCompressionLevel(str, Enum):
|
|
63
60
|
r"""Compression level to apply before moving files to final destination"""
|
|
64
61
|
|
|
65
62
|
BEST_SPEED = "best_speed"
|
|
@@ -67,7 +64,7 @@ class OutputAzureBlobCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
67
64
|
BEST_COMPRESSION = "best_compression"
|
|
68
65
|
|
|
69
66
|
|
|
70
|
-
class OutputAzureBlobParquetVersion(str, Enum
|
|
67
|
+
class OutputAzureBlobParquetVersion(str, Enum):
|
|
71
68
|
r"""Determines which data types are supported and how they are represented"""
|
|
72
69
|
|
|
73
70
|
PARQUET_1_0 = "PARQUET_1_0"
|
|
@@ -75,7 +72,7 @@ class OutputAzureBlobParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
75
72
|
PARQUET_2_6 = "PARQUET_2_6"
|
|
76
73
|
|
|
77
74
|
|
|
78
|
-
class OutputAzureBlobDataPageVersion(str, Enum
|
|
75
|
+
class OutputAzureBlobDataPageVersion(str, Enum):
|
|
79
76
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
80
77
|
|
|
81
78
|
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
@@ -264,11 +261,7 @@ class OutputAzureBlob(BaseModel):
|
|
|
264
261
|
r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
|
|
265
262
|
|
|
266
263
|
format_: Annotated[
|
|
267
|
-
|
|
268
|
-
Optional[OutputAzureBlobDataFormat],
|
|
269
|
-
PlainValidator(validate_open_enum(False)),
|
|
270
|
-
],
|
|
271
|
-
pydantic.Field(alias="format"),
|
|
264
|
+
Optional[OutputAzureBlobDataFormat], pydantic.Field(alias="format")
|
|
272
265
|
] = OutputAzureBlobDataFormat.JSON
|
|
273
266
|
r"""Format of the output data"""
|
|
274
267
|
|
|
@@ -311,10 +304,7 @@ class OutputAzureBlob(BaseModel):
|
|
|
311
304
|
r"""Buffer size used to write to a file"""
|
|
312
305
|
|
|
313
306
|
on_backpressure: Annotated[
|
|
314
|
-
|
|
315
|
-
Optional[OutputAzureBlobBackpressureBehavior],
|
|
316
|
-
PlainValidator(validate_open_enum(False)),
|
|
317
|
-
],
|
|
307
|
+
Optional[OutputAzureBlobBackpressureBehavior],
|
|
318
308
|
pydantic.Field(alias="onBackpressure"),
|
|
319
309
|
] = OutputAzureBlobBackpressureBehavior.BLOCK
|
|
320
310
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -325,39 +315,26 @@ class OutputAzureBlob(BaseModel):
|
|
|
325
315
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
326
316
|
|
|
327
317
|
on_disk_full_backpressure: Annotated[
|
|
328
|
-
|
|
329
|
-
Optional[OutputAzureBlobDiskSpaceProtection],
|
|
330
|
-
PlainValidator(validate_open_enum(False)),
|
|
331
|
-
],
|
|
318
|
+
Optional[OutputAzureBlobDiskSpaceProtection],
|
|
332
319
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
333
320
|
] = OutputAzureBlobDiskSpaceProtection.BLOCK
|
|
334
321
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
335
322
|
|
|
336
323
|
auth_type: Annotated[
|
|
337
|
-
|
|
338
|
-
Optional[OutputAzureBlobAuthenticationMethod],
|
|
339
|
-
PlainValidator(validate_open_enum(False)),
|
|
340
|
-
],
|
|
341
|
-
pydantic.Field(alias="authType"),
|
|
324
|
+
Optional[OutputAzureBlobAuthenticationMethod], pydantic.Field(alias="authType")
|
|
342
325
|
] = OutputAzureBlobAuthenticationMethod.MANUAL
|
|
343
326
|
|
|
344
327
|
storage_class: Annotated[
|
|
345
|
-
|
|
346
|
-
pydantic.Field(alias="storageClass"),
|
|
328
|
+
Optional[BlobAccessTier], pydantic.Field(alias="storageClass")
|
|
347
329
|
] = BlobAccessTier.INFERRED
|
|
348
330
|
|
|
349
331
|
description: Optional[str] = None
|
|
350
332
|
|
|
351
|
-
compress:
|
|
352
|
-
Optional[OutputAzureBlobCompression], PlainValidator(validate_open_enum(False))
|
|
353
|
-
] = OutputAzureBlobCompression.GZIP
|
|
333
|
+
compress: Optional[OutputAzureBlobCompression] = OutputAzureBlobCompression.GZIP
|
|
354
334
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
355
335
|
|
|
356
336
|
compression_level: Annotated[
|
|
357
|
-
|
|
358
|
-
Optional[OutputAzureBlobCompressionLevel],
|
|
359
|
-
PlainValidator(validate_open_enum(False)),
|
|
360
|
-
],
|
|
337
|
+
Optional[OutputAzureBlobCompressionLevel],
|
|
361
338
|
pydantic.Field(alias="compressionLevel"),
|
|
362
339
|
] = OutputAzureBlobCompressionLevel.BEST_SPEED
|
|
363
340
|
r"""Compression level to apply before moving files to final destination"""
|
|
@@ -368,19 +345,12 @@ class OutputAzureBlob(BaseModel):
|
|
|
368
345
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
369
346
|
|
|
370
347
|
parquet_version: Annotated[
|
|
371
|
-
|
|
372
|
-
Optional[OutputAzureBlobParquetVersion],
|
|
373
|
-
PlainValidator(validate_open_enum(False)),
|
|
374
|
-
],
|
|
375
|
-
pydantic.Field(alias="parquetVersion"),
|
|
348
|
+
Optional[OutputAzureBlobParquetVersion], pydantic.Field(alias="parquetVersion")
|
|
376
349
|
] = OutputAzureBlobParquetVersion.PARQUET_2_6
|
|
377
350
|
r"""Determines which data types are supported and how they are represented"""
|
|
378
351
|
|
|
379
352
|
parquet_data_page_version: Annotated[
|
|
380
|
-
|
|
381
|
-
Optional[OutputAzureBlobDataPageVersion],
|
|
382
|
-
PlainValidator(validate_open_enum(False)),
|
|
383
|
-
],
|
|
353
|
+
Optional[OutputAzureBlobDataPageVersion],
|
|
384
354
|
pydantic.Field(alias="parquetDataPageVersion"),
|
|
385
355
|
] = OutputAzureBlobDataPageVersion.DATA_PAGE_V2
|
|
386
356
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,12 +12,12 @@ class OutputAzureDataExplorerType(str, Enum):
|
|
|
15
12
|
AZURE_DATA_EXPLORER = "azure_data_explorer"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class IngestionMode(str, Enum
|
|
15
|
+
class IngestionMode(str, Enum):
|
|
19
16
|
BATCHING = "batching"
|
|
20
17
|
STREAMING = "streaming"
|
|
21
18
|
|
|
22
19
|
|
|
23
|
-
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum
|
|
20
|
+
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
|
|
24
21
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
25
22
|
|
|
26
23
|
HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
|
|
@@ -28,9 +25,7 @@ class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnum
|
|
|
28
25
|
HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
|
|
29
26
|
|
|
30
27
|
|
|
31
|
-
class OutputAzureDataExplorerAuthenticationMethod(
|
|
32
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
33
|
-
):
|
|
28
|
+
class OutputAzureDataExplorerAuthenticationMethod(str, Enum):
|
|
34
29
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
35
30
|
|
|
36
31
|
CLIENT_SECRET = "clientSecret"
|
|
@@ -50,9 +45,7 @@ class OutputAzureDataExplorerCertificate(BaseModel):
|
|
|
50
45
|
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
51
46
|
|
|
52
47
|
|
|
53
|
-
class OutputAzureDataExplorerBackpressureBehavior(
|
|
54
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
55
|
-
):
|
|
48
|
+
class OutputAzureDataExplorerBackpressureBehavior(str, Enum):
|
|
56
49
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
57
50
|
|
|
58
51
|
BLOCK = "block"
|
|
@@ -60,7 +53,7 @@ class OutputAzureDataExplorerBackpressureBehavior(
|
|
|
60
53
|
QUEUE = "queue"
|
|
61
54
|
|
|
62
55
|
|
|
63
|
-
class OutputAzureDataExplorerDataFormat(str, Enum
|
|
56
|
+
class OutputAzureDataExplorerDataFormat(str, Enum):
|
|
64
57
|
r"""Format of the output data"""
|
|
65
58
|
|
|
66
59
|
JSON = "json"
|
|
@@ -68,16 +61,14 @@ class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
68
61
|
PARQUET = "parquet"
|
|
69
62
|
|
|
70
63
|
|
|
71
|
-
class OutputAzureDataExplorerDiskSpaceProtection(
|
|
72
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
-
):
|
|
64
|
+
class OutputAzureDataExplorerDiskSpaceProtection(str, Enum):
|
|
74
65
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
75
66
|
|
|
76
67
|
BLOCK = "block"
|
|
77
68
|
DROP = "drop"
|
|
78
69
|
|
|
79
70
|
|
|
80
|
-
class PrefixOptional(str, Enum
|
|
71
|
+
class PrefixOptional(str, Enum):
|
|
81
72
|
DROP_BY = "dropBy"
|
|
82
73
|
INGEST_BY = "ingestBy"
|
|
83
74
|
|
|
@@ -90,9 +81,7 @@ class ExtentTagTypedDict(TypedDict):
|
|
|
90
81
|
class ExtentTag(BaseModel):
|
|
91
82
|
value: str
|
|
92
83
|
|
|
93
|
-
prefix:
|
|
94
|
-
Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
|
|
95
|
-
] = None
|
|
84
|
+
prefix: Optional[PrefixOptional] = None
|
|
96
85
|
|
|
97
86
|
|
|
98
87
|
class IngestIfNotExistTypedDict(TypedDict):
|
|
@@ -103,7 +92,7 @@ class IngestIfNotExist(BaseModel):
|
|
|
103
92
|
value: str
|
|
104
93
|
|
|
105
94
|
|
|
106
|
-
class ReportLevel(str, Enum
|
|
95
|
+
class ReportLevel(str, Enum):
|
|
107
96
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
108
97
|
|
|
109
98
|
FAILURES_ONLY = "failuresOnly"
|
|
@@ -111,7 +100,7 @@ class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
111
100
|
FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
|
|
112
101
|
|
|
113
102
|
|
|
114
|
-
class ReportMethod(str, Enum
|
|
103
|
+
class ReportMethod(str, Enum):
|
|
115
104
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
116
105
|
|
|
117
106
|
QUEUE = "queue"
|
|
@@ -184,32 +173,28 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
|
|
|
184
173
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
185
174
|
|
|
186
175
|
|
|
187
|
-
class OutputAzureDataExplorerCompressCompression(
|
|
188
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
189
|
-
):
|
|
176
|
+
class OutputAzureDataExplorerCompressCompression(str, Enum):
|
|
190
177
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
191
178
|
|
|
192
179
|
NONE = "none"
|
|
193
180
|
GZIP = "gzip"
|
|
194
181
|
|
|
195
182
|
|
|
196
|
-
class OutputAzureDataExplorerPqCompressCompression(
|
|
197
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
198
|
-
):
|
|
183
|
+
class OutputAzureDataExplorerPqCompressCompression(str, Enum):
|
|
199
184
|
r"""Codec to use to compress the persisted data"""
|
|
200
185
|
|
|
201
186
|
NONE = "none"
|
|
202
187
|
GZIP = "gzip"
|
|
203
188
|
|
|
204
189
|
|
|
205
|
-
class OutputAzureDataExplorerQueueFullBehavior(str, Enum
|
|
190
|
+
class OutputAzureDataExplorerQueueFullBehavior(str, Enum):
|
|
206
191
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
207
192
|
|
|
208
193
|
BLOCK = "block"
|
|
209
194
|
DROP = "drop"
|
|
210
195
|
|
|
211
196
|
|
|
212
|
-
class OutputAzureDataExplorerMode(str, Enum
|
|
197
|
+
class OutputAzureDataExplorerMode(str, Enum):
|
|
213
198
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
214
199
|
|
|
215
200
|
ERROR = "error"
|
|
@@ -399,24 +384,17 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
399
384
|
r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
|
|
400
385
|
|
|
401
386
|
ingest_mode: Annotated[
|
|
402
|
-
|
|
403
|
-
pydantic.Field(alias="ingestMode"),
|
|
387
|
+
Optional[IngestionMode], pydantic.Field(alias="ingestMode")
|
|
404
388
|
] = IngestionMode.BATCHING
|
|
405
389
|
|
|
406
390
|
oauth_endpoint: Annotated[
|
|
407
|
-
|
|
408
|
-
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
409
|
-
PlainValidator(validate_open_enum(False)),
|
|
410
|
-
],
|
|
391
|
+
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
411
392
|
pydantic.Field(alias="oauthEndpoint"),
|
|
412
393
|
] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
|
|
413
394
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
414
395
|
|
|
415
396
|
oauth_type: Annotated[
|
|
416
|
-
|
|
417
|
-
Optional[OutputAzureDataExplorerAuthenticationMethod],
|
|
418
|
-
PlainValidator(validate_open_enum(False)),
|
|
419
|
-
],
|
|
397
|
+
Optional[OutputAzureDataExplorerAuthenticationMethod],
|
|
420
398
|
pydantic.Field(alias="oauthType"),
|
|
421
399
|
] = OutputAzureDataExplorerAuthenticationMethod.CLIENT_SECRET
|
|
422
400
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
@@ -435,10 +413,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
435
413
|
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
436
414
|
|
|
437
415
|
on_backpressure: Annotated[
|
|
438
|
-
|
|
439
|
-
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
440
|
-
PlainValidator(validate_open_enum(False)),
|
|
441
|
-
],
|
|
416
|
+
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
442
417
|
pydantic.Field(alias="onBackpressure"),
|
|
443
418
|
] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
|
|
444
419
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -449,11 +424,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
449
424
|
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
450
425
|
|
|
451
426
|
format_: Annotated[
|
|
452
|
-
|
|
453
|
-
Optional[OutputAzureDataExplorerDataFormat],
|
|
454
|
-
PlainValidator(validate_open_enum(False)),
|
|
455
|
-
],
|
|
456
|
-
pydantic.Field(alias="format"),
|
|
427
|
+
Optional[OutputAzureDataExplorerDataFormat], pydantic.Field(alias="format")
|
|
457
428
|
] = OutputAzureDataExplorerDataFormat.JSON
|
|
458
429
|
r"""Format of the output data"""
|
|
459
430
|
|
|
@@ -493,10 +464,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
493
464
|
r"""Maximum number of parts to upload in parallel per file"""
|
|
494
465
|
|
|
495
466
|
on_disk_full_backpressure: Annotated[
|
|
496
|
-
|
|
497
|
-
Optional[OutputAzureDataExplorerDiskSpaceProtection],
|
|
498
|
-
PlainValidator(validate_open_enum(False)),
|
|
499
|
-
],
|
|
467
|
+
Optional[OutputAzureDataExplorerDiskSpaceProtection],
|
|
500
468
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
501
469
|
] = OutputAzureDataExplorerDiskSpaceProtection.BLOCK
|
|
502
470
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -540,14 +508,12 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
540
508
|
r"""Prevents duplicate ingestion by verifying whether an extent with the specified ingest-by tag already exists"""
|
|
541
509
|
|
|
542
510
|
report_level: Annotated[
|
|
543
|
-
|
|
544
|
-
pydantic.Field(alias="reportLevel"),
|
|
511
|
+
Optional[ReportLevel], pydantic.Field(alias="reportLevel")
|
|
545
512
|
] = ReportLevel.FAILURES_ONLY
|
|
546
513
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
547
514
|
|
|
548
515
|
report_method: Annotated[
|
|
549
|
-
|
|
550
|
-
pydantic.Field(alias="reportMethod"),
|
|
516
|
+
Optional[ReportMethod], pydantic.Field(alias="reportMethod")
|
|
551
517
|
] = ReportMethod.QUEUE
|
|
552
518
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
553
519
|
|
|
@@ -572,10 +538,9 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
572
538
|
] = True
|
|
573
539
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
574
540
|
|
|
575
|
-
compress:
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
] = OutputAzureDataExplorerCompressCompression.GZIP
|
|
541
|
+
compress: Optional[OutputAzureDataExplorerCompressCompression] = (
|
|
542
|
+
OutputAzureDataExplorerCompressCompression.GZIP
|
|
543
|
+
)
|
|
579
544
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
580
545
|
|
|
581
546
|
mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
|
|
@@ -629,29 +594,19 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
629
594
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
630
595
|
|
|
631
596
|
pq_compress: Annotated[
|
|
632
|
-
|
|
633
|
-
Optional[OutputAzureDataExplorerPqCompressCompression],
|
|
634
|
-
PlainValidator(validate_open_enum(False)),
|
|
635
|
-
],
|
|
597
|
+
Optional[OutputAzureDataExplorerPqCompressCompression],
|
|
636
598
|
pydantic.Field(alias="pqCompress"),
|
|
637
599
|
] = OutputAzureDataExplorerPqCompressCompression.NONE
|
|
638
600
|
r"""Codec to use to compress the persisted data"""
|
|
639
601
|
|
|
640
602
|
pq_on_backpressure: Annotated[
|
|
641
|
-
|
|
642
|
-
Optional[OutputAzureDataExplorerQueueFullBehavior],
|
|
643
|
-
PlainValidator(validate_open_enum(False)),
|
|
644
|
-
],
|
|
603
|
+
Optional[OutputAzureDataExplorerQueueFullBehavior],
|
|
645
604
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
646
605
|
] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
|
|
647
606
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
648
607
|
|
|
649
608
|
pq_mode: Annotated[
|
|
650
|
-
|
|
651
|
-
Optional[OutputAzureDataExplorerMode],
|
|
652
|
-
PlainValidator(validate_open_enum(False)),
|
|
653
|
-
],
|
|
654
|
-
pydantic.Field(alias="pqMode"),
|
|
609
|
+
Optional[OutputAzureDataExplorerMode], pydantic.Field(alias="pqMode")
|
|
655
610
|
] = OutputAzureDataExplorerMode.ERROR
|
|
656
611
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
657
612
|
|