cribl-control-plane 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/{outputs.py → destinations.py} +69 -71
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +5347 -115
- cribl_control_plane/models/createinputop.py +18216 -2
- cribl_control_plane/models/createoutputop.py +18417 -4
- cribl_control_plane/models/createoutputtestbyidop.py +2 -2
- cribl_control_plane/models/deleteoutputbyidop.py +2 -2
- cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
- cribl_control_plane/models/getoutputbyidop.py +2 -2
- cribl_control_plane/models/getoutputpqbyidop.py +2 -2
- cribl_control_plane/models/getoutputsamplesbyidop.py +2 -2
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +22 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +12 -27
- cribl_control_plane/models/inputcribllakehttp.py +14 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +12 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +12 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +33 -64
- cribl_control_plane/models/inputelastic.py +18 -44
- cribl_control_plane/models/inputeventhub.py +10 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +12 -27
- cribl_control_plane/models/inputgooglepubsub.py +10 -23
- cribl_control_plane/models/inputgrafana_union.py +39 -81
- cribl_control_plane/models/inputhttp.py +12 -27
- cribl_control_plane/models/inputhttpraw.py +12 -27
- cribl_control_plane/models/inputjournalfiles.py +8 -16
- cribl_control_plane/models/inputkafka.py +18 -45
- cribl_control_plane/models/inputkinesis.py +18 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +14 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +14 -33
- cribl_control_plane/models/inputmsk.py +20 -52
- cribl_control_plane/models/inputnetflow.py +8 -15
- cribl_control_plane/models/inputoffice365mgmt.py +18 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +20 -41
- cribl_control_plane/models/inputoffice365service.py +20 -41
- cribl_control_plane/models/inputopentelemetry.py +20 -42
- cribl_control_plane/models/inputprometheus.py +22 -54
- cribl_control_plane/models/inputprometheusrw.py +14 -34
- cribl_control_plane/models/inputrawudp.py +8 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +10 -20
- cribl_control_plane/models/inputsplunk.py +16 -37
- cribl_control_plane/models/inputsplunkhec.py +14 -33
- cribl_control_plane/models/inputsplunksearch.py +18 -37
- cribl_control_plane/models/inputsqs.py +14 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +14 -33
- cribl_control_plane/models/inputtcpjson.py +14 -33
- cribl_control_plane/models/inputwef.py +22 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +12 -25
- cribl_control_plane/models/inputzscalerhec.py +14 -33
- cribl_control_plane/models/listoutputop.py +2 -2
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- cribl_control_plane/models/updateoutputbyidop.py +4 -4
- cribl_control_plane/sdk.py +3 -5
- cribl_control_plane/sources.py +8 -10
- {cribl_control_plane-0.0.15.dist-info → cribl_control_plane-0.0.17.dist-info}/METADATA +13 -13
- cribl_control_plane-0.0.17.dist-info/RECORD +215 -0
- cribl_control_plane-0.0.15.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.15.dist-info → cribl_control_plane-0.0.17.dist-info}/WHEEL +0 -0
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import Any, List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class InputZscalerHecType(str, Enum
|
|
11
|
+
class InputZscalerHecType(str, Enum):
|
|
15
12
|
ZSCALER_HEC = "zscaler_hec"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,14 +23,14 @@ class InputZscalerHecConnection(BaseModel):
|
|
|
26
23
|
pipeline: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class InputZscalerHecMode(str, Enum
|
|
26
|
+
class InputZscalerHecMode(str, Enum):
|
|
30
27
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
28
|
|
|
32
29
|
SMART = "smart"
|
|
33
30
|
ALWAYS = "always"
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class InputZscalerHecCompression(str, Enum
|
|
33
|
+
class InputZscalerHecCompression(str, Enum):
|
|
37
34
|
r"""Codec to use to compress the persisted data"""
|
|
38
35
|
|
|
39
36
|
NONE = "none"
|
|
@@ -58,9 +55,7 @@ class InputZscalerHecPqTypedDict(TypedDict):
|
|
|
58
55
|
|
|
59
56
|
|
|
60
57
|
class InputZscalerHecPq(BaseModel):
|
|
61
|
-
mode:
|
|
62
|
-
Optional[InputZscalerHecMode], PlainValidator(validate_open_enum(False))
|
|
63
|
-
] = InputZscalerHecMode.ALWAYS
|
|
58
|
+
mode: Optional[InputZscalerHecMode] = InputZscalerHecMode.ALWAYS
|
|
64
59
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
60
|
|
|
66
61
|
max_buffer_size: Annotated[
|
|
@@ -84,13 +79,11 @@ class InputZscalerHecPq(BaseModel):
|
|
|
84
79
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
80
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
81
|
|
|
87
|
-
compress:
|
|
88
|
-
Optional[InputZscalerHecCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
-
] = InputZscalerHecCompression.NONE
|
|
82
|
+
compress: Optional[InputZscalerHecCompression] = InputZscalerHecCompression.NONE
|
|
90
83
|
r"""Codec to use to compress the persisted data"""
|
|
91
84
|
|
|
92
85
|
|
|
93
|
-
class InputZscalerHecAuthenticationMethod(str, Enum
|
|
86
|
+
class InputZscalerHecAuthenticationMethod(str, Enum):
|
|
94
87
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
95
88
|
|
|
96
89
|
MANUAL = "manual"
|
|
@@ -127,11 +120,7 @@ class InputZscalerHecAuthToken(BaseModel):
|
|
|
127
120
|
token: Any
|
|
128
121
|
|
|
129
122
|
auth_type: Annotated[
|
|
130
|
-
|
|
131
|
-
Optional[InputZscalerHecAuthenticationMethod],
|
|
132
|
-
PlainValidator(validate_open_enum(False)),
|
|
133
|
-
],
|
|
134
|
-
pydantic.Field(alias="authType"),
|
|
123
|
+
Optional[InputZscalerHecAuthenticationMethod], pydantic.Field(alias="authType")
|
|
135
124
|
] = InputZscalerHecAuthenticationMethod.MANUAL
|
|
136
125
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
137
126
|
|
|
@@ -150,14 +139,14 @@ class InputZscalerHecAuthToken(BaseModel):
|
|
|
150
139
|
r"""Fields to add to events referencing this token"""
|
|
151
140
|
|
|
152
141
|
|
|
153
|
-
class InputZscalerHecMinimumTLSVersion(str, Enum
|
|
142
|
+
class InputZscalerHecMinimumTLSVersion(str, Enum):
|
|
154
143
|
TL_SV1 = "TLSv1"
|
|
155
144
|
TL_SV1_1 = "TLSv1.1"
|
|
156
145
|
TL_SV1_2 = "TLSv1.2"
|
|
157
146
|
TL_SV1_3 = "TLSv1.3"
|
|
158
147
|
|
|
159
148
|
|
|
160
|
-
class InputZscalerHecMaximumTLSVersion(str, Enum
|
|
149
|
+
class InputZscalerHecMaximumTLSVersion(str, Enum):
|
|
161
150
|
TL_SV1 = "TLSv1"
|
|
162
151
|
TL_SV1_1 = "TLSv1.1"
|
|
163
152
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -216,19 +205,11 @@ class InputZscalerHecTLSSettingsServerSide(BaseModel):
|
|
|
216
205
|
] = None
|
|
217
206
|
|
|
218
207
|
min_version: Annotated[
|
|
219
|
-
|
|
220
|
-
Optional[InputZscalerHecMinimumTLSVersion],
|
|
221
|
-
PlainValidator(validate_open_enum(False)),
|
|
222
|
-
],
|
|
223
|
-
pydantic.Field(alias="minVersion"),
|
|
208
|
+
Optional[InputZscalerHecMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
224
209
|
] = None
|
|
225
210
|
|
|
226
211
|
max_version: Annotated[
|
|
227
|
-
|
|
228
|
-
Optional[InputZscalerHecMaximumTLSVersion],
|
|
229
|
-
PlainValidator(validate_open_enum(False)),
|
|
230
|
-
],
|
|
231
|
-
pydantic.Field(alias="maxVersion"),
|
|
212
|
+
Optional[InputZscalerHecMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
232
213
|
] = None
|
|
233
214
|
|
|
234
215
|
|
|
@@ -246,11 +227,11 @@ class InputZscalerHecMetadatum(BaseModel):
|
|
|
246
227
|
|
|
247
228
|
|
|
248
229
|
class InputZscalerHecTypedDict(TypedDict):
|
|
249
|
-
type: InputZscalerHecType
|
|
250
230
|
port: float
|
|
251
231
|
r"""Port to listen on"""
|
|
252
232
|
id: NotRequired[str]
|
|
253
233
|
r"""Unique ID for this input"""
|
|
234
|
+
type: NotRequired[InputZscalerHecType]
|
|
254
235
|
disabled: NotRequired[bool]
|
|
255
236
|
pipeline: NotRequired[str]
|
|
256
237
|
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
@@ -309,14 +290,14 @@ class InputZscalerHecTypedDict(TypedDict):
|
|
|
309
290
|
|
|
310
291
|
|
|
311
292
|
class InputZscalerHec(BaseModel):
|
|
312
|
-
type: Annotated[InputZscalerHecType, PlainValidator(validate_open_enum(False))]
|
|
313
|
-
|
|
314
293
|
port: float
|
|
315
294
|
r"""Port to listen on"""
|
|
316
295
|
|
|
317
296
|
id: Optional[str] = None
|
|
318
297
|
r"""Unique ID for this input"""
|
|
319
298
|
|
|
299
|
+
type: Optional[InputZscalerHecType] = None
|
|
300
|
+
|
|
320
301
|
disabled: Optional[bool] = False
|
|
321
302
|
|
|
322
303
|
pipeline: Optional[str] = None
|
|
@@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict
|
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
class ListOutputResponseTypedDict(TypedDict):
|
|
11
|
-
r"""a list of
|
|
11
|
+
r"""a list of Destination objects"""
|
|
12
12
|
|
|
13
13
|
count: NotRequired[int]
|
|
14
14
|
r"""number of items present in the items array"""
|
|
@@ -16,7 +16,7 @@ class ListOutputResponseTypedDict(TypedDict):
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class ListOutputResponse(BaseModel):
|
|
19
|
-
r"""a list of
|
|
19
|
+
r"""a list of Destination objects"""
|
|
20
20
|
|
|
21
21
|
count: Optional[int] = None
|
|
22
22
|
r"""number of items present in the items array"""
|
|
@@ -40,10 +40,7 @@ from .outputgooglecloudstorage import (
|
|
|
40
40
|
OutputGoogleCloudStorageTypedDict,
|
|
41
41
|
)
|
|
42
42
|
from .outputgooglepubsub import OutputGooglePubsub, OutputGooglePubsubTypedDict
|
|
43
|
-
from .
|
|
44
|
-
OutputGrafanaCloudUnion,
|
|
45
|
-
OutputGrafanaCloudUnionTypedDict,
|
|
46
|
-
)
|
|
43
|
+
from .outputgrafanacloud import OutputGrafanaCloud, OutputGrafanaCloudTypedDict
|
|
47
44
|
from .outputgraphite import OutputGraphite, OutputGraphiteTypedDict
|
|
48
45
|
from .outputhoneycomb import OutputHoneycomb, OutputHoneycombTypedDict
|
|
49
46
|
from .outputhumiohec import OutputHumioHec, OutputHumioHecTypedDict
|
|
@@ -153,7 +150,7 @@ OutputTypedDict = TypeAliasType(
|
|
|
153
150
|
OutputAzureDataExplorerTypedDict,
|
|
154
151
|
OutputWebhookTypedDict,
|
|
155
152
|
OutputGoogleCloudLoggingTypedDict,
|
|
156
|
-
|
|
153
|
+
OutputGrafanaCloudTypedDict,
|
|
157
154
|
],
|
|
158
155
|
)
|
|
159
156
|
|
|
@@ -224,6 +221,6 @@ Output = TypeAliasType(
|
|
|
224
221
|
OutputAzureDataExplorer,
|
|
225
222
|
OutputWebhook,
|
|
226
223
|
OutputGoogleCloudLogging,
|
|
227
|
-
|
|
224
|
+
OutputGrafanaCloud,
|
|
228
225
|
],
|
|
229
226
|
)
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputAzureBlobType(str, Enum
|
|
11
|
+
class OutputAzureBlobType(str, Enum):
|
|
15
12
|
AZURE_BLOB = "azure_blob"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputAzureBlobDataFormat(str, Enum
|
|
15
|
+
class OutputAzureBlobDataFormat(str, Enum):
|
|
19
16
|
r"""Format of the output data"""
|
|
20
17
|
|
|
21
18
|
JSON = "json"
|
|
@@ -23,28 +20,28 @@ class OutputAzureBlobDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
23
20
|
PARQUET = "parquet"
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputAzureBlobBackpressureBehavior(str, Enum
|
|
23
|
+
class OutputAzureBlobBackpressureBehavior(str, Enum):
|
|
27
24
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
28
25
|
|
|
29
26
|
BLOCK = "block"
|
|
30
27
|
DROP = "drop"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputAzureBlobDiskSpaceProtection(str, Enum
|
|
30
|
+
class OutputAzureBlobDiskSpaceProtection(str, Enum):
|
|
34
31
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
35
32
|
|
|
36
33
|
BLOCK = "block"
|
|
37
34
|
DROP = "drop"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputAzureBlobAuthenticationMethod(str, Enum
|
|
37
|
+
class OutputAzureBlobAuthenticationMethod(str, Enum):
|
|
41
38
|
MANUAL = "manual"
|
|
42
39
|
SECRET = "secret"
|
|
43
40
|
CLIENT_SECRET = "clientSecret"
|
|
44
41
|
CLIENT_CERT = "clientCert"
|
|
45
42
|
|
|
46
43
|
|
|
47
|
-
class BlobAccessTier(str, Enum
|
|
44
|
+
class BlobAccessTier(str, Enum):
|
|
48
45
|
INFERRED = "Inferred"
|
|
49
46
|
HOT = "Hot"
|
|
50
47
|
COOL = "Cool"
|
|
@@ -52,14 +49,14 @@ class BlobAccessTier(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
52
49
|
ARCHIVE = "Archive"
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputAzureBlobCompression(str, Enum
|
|
52
|
+
class OutputAzureBlobCompression(str, Enum):
|
|
56
53
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
57
54
|
|
|
58
55
|
NONE = "none"
|
|
59
56
|
GZIP = "gzip"
|
|
60
57
|
|
|
61
58
|
|
|
62
|
-
class OutputAzureBlobCompressionLevel(str, Enum
|
|
59
|
+
class OutputAzureBlobCompressionLevel(str, Enum):
|
|
63
60
|
r"""Compression level to apply before moving files to final destination"""
|
|
64
61
|
|
|
65
62
|
BEST_SPEED = "best_speed"
|
|
@@ -67,7 +64,7 @@ class OutputAzureBlobCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
67
64
|
BEST_COMPRESSION = "best_compression"
|
|
68
65
|
|
|
69
66
|
|
|
70
|
-
class OutputAzureBlobParquetVersion(str, Enum
|
|
67
|
+
class OutputAzureBlobParquetVersion(str, Enum):
|
|
71
68
|
r"""Determines which data types are supported and how they are represented"""
|
|
72
69
|
|
|
73
70
|
PARQUET_1_0 = "PARQUET_1_0"
|
|
@@ -75,7 +72,7 @@ class OutputAzureBlobParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
75
72
|
PARQUET_2_6 = "PARQUET_2_6"
|
|
76
73
|
|
|
77
74
|
|
|
78
|
-
class OutputAzureBlobDataPageVersion(str, Enum
|
|
75
|
+
class OutputAzureBlobDataPageVersion(str, Enum):
|
|
79
76
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
80
77
|
|
|
81
78
|
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
@@ -214,9 +211,7 @@ class OutputAzureBlob(BaseModel):
|
|
|
214
211
|
id: Optional[str] = None
|
|
215
212
|
r"""Unique ID for this output"""
|
|
216
213
|
|
|
217
|
-
type:
|
|
218
|
-
Optional[OutputAzureBlobType], PlainValidator(validate_open_enum(False))
|
|
219
|
-
] = None
|
|
214
|
+
type: Optional[OutputAzureBlobType] = None
|
|
220
215
|
|
|
221
216
|
pipeline: Optional[str] = None
|
|
222
217
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -266,11 +261,7 @@ class OutputAzureBlob(BaseModel):
|
|
|
266
261
|
r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
|
|
267
262
|
|
|
268
263
|
format_: Annotated[
|
|
269
|
-
|
|
270
|
-
Optional[OutputAzureBlobDataFormat],
|
|
271
|
-
PlainValidator(validate_open_enum(False)),
|
|
272
|
-
],
|
|
273
|
-
pydantic.Field(alias="format"),
|
|
264
|
+
Optional[OutputAzureBlobDataFormat], pydantic.Field(alias="format")
|
|
274
265
|
] = OutputAzureBlobDataFormat.JSON
|
|
275
266
|
r"""Format of the output data"""
|
|
276
267
|
|
|
@@ -313,10 +304,7 @@ class OutputAzureBlob(BaseModel):
|
|
|
313
304
|
r"""Buffer size used to write to a file"""
|
|
314
305
|
|
|
315
306
|
on_backpressure: Annotated[
|
|
316
|
-
|
|
317
|
-
Optional[OutputAzureBlobBackpressureBehavior],
|
|
318
|
-
PlainValidator(validate_open_enum(False)),
|
|
319
|
-
],
|
|
307
|
+
Optional[OutputAzureBlobBackpressureBehavior],
|
|
320
308
|
pydantic.Field(alias="onBackpressure"),
|
|
321
309
|
] = OutputAzureBlobBackpressureBehavior.BLOCK
|
|
322
310
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -327,39 +315,26 @@ class OutputAzureBlob(BaseModel):
|
|
|
327
315
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
328
316
|
|
|
329
317
|
on_disk_full_backpressure: Annotated[
|
|
330
|
-
|
|
331
|
-
Optional[OutputAzureBlobDiskSpaceProtection],
|
|
332
|
-
PlainValidator(validate_open_enum(False)),
|
|
333
|
-
],
|
|
318
|
+
Optional[OutputAzureBlobDiskSpaceProtection],
|
|
334
319
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
335
320
|
] = OutputAzureBlobDiskSpaceProtection.BLOCK
|
|
336
321
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
337
322
|
|
|
338
323
|
auth_type: Annotated[
|
|
339
|
-
|
|
340
|
-
Optional[OutputAzureBlobAuthenticationMethod],
|
|
341
|
-
PlainValidator(validate_open_enum(False)),
|
|
342
|
-
],
|
|
343
|
-
pydantic.Field(alias="authType"),
|
|
324
|
+
Optional[OutputAzureBlobAuthenticationMethod], pydantic.Field(alias="authType")
|
|
344
325
|
] = OutputAzureBlobAuthenticationMethod.MANUAL
|
|
345
326
|
|
|
346
327
|
storage_class: Annotated[
|
|
347
|
-
|
|
348
|
-
pydantic.Field(alias="storageClass"),
|
|
328
|
+
Optional[BlobAccessTier], pydantic.Field(alias="storageClass")
|
|
349
329
|
] = BlobAccessTier.INFERRED
|
|
350
330
|
|
|
351
331
|
description: Optional[str] = None
|
|
352
332
|
|
|
353
|
-
compress:
|
|
354
|
-
Optional[OutputAzureBlobCompression], PlainValidator(validate_open_enum(False))
|
|
355
|
-
] = OutputAzureBlobCompression.GZIP
|
|
333
|
+
compress: Optional[OutputAzureBlobCompression] = OutputAzureBlobCompression.GZIP
|
|
356
334
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
357
335
|
|
|
358
336
|
compression_level: Annotated[
|
|
359
|
-
|
|
360
|
-
Optional[OutputAzureBlobCompressionLevel],
|
|
361
|
-
PlainValidator(validate_open_enum(False)),
|
|
362
|
-
],
|
|
337
|
+
Optional[OutputAzureBlobCompressionLevel],
|
|
363
338
|
pydantic.Field(alias="compressionLevel"),
|
|
364
339
|
] = OutputAzureBlobCompressionLevel.BEST_SPEED
|
|
365
340
|
r"""Compression level to apply before moving files to final destination"""
|
|
@@ -370,19 +345,12 @@ class OutputAzureBlob(BaseModel):
|
|
|
370
345
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
371
346
|
|
|
372
347
|
parquet_version: Annotated[
|
|
373
|
-
|
|
374
|
-
Optional[OutputAzureBlobParquetVersion],
|
|
375
|
-
PlainValidator(validate_open_enum(False)),
|
|
376
|
-
],
|
|
377
|
-
pydantic.Field(alias="parquetVersion"),
|
|
348
|
+
Optional[OutputAzureBlobParquetVersion], pydantic.Field(alias="parquetVersion")
|
|
378
349
|
] = OutputAzureBlobParquetVersion.PARQUET_2_6
|
|
379
350
|
r"""Determines which data types are supported and how they are represented"""
|
|
380
351
|
|
|
381
352
|
parquet_data_page_version: Annotated[
|
|
382
|
-
|
|
383
|
-
Optional[OutputAzureBlobDataPageVersion],
|
|
384
|
-
PlainValidator(validate_open_enum(False)),
|
|
385
|
-
],
|
|
353
|
+
Optional[OutputAzureBlobDataPageVersion],
|
|
386
354
|
pydantic.Field(alias="parquetDataPageVersion"),
|
|
387
355
|
] = OutputAzureBlobDataPageVersion.DATA_PAGE_V2
|
|
388
356
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
@@ -1,26 +1,23 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputAzureDataExplorerType(str, Enum
|
|
11
|
+
class OutputAzureDataExplorerType(str, Enum):
|
|
15
12
|
AZURE_DATA_EXPLORER = "azure_data_explorer"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class IngestionMode(str, Enum
|
|
15
|
+
class IngestionMode(str, Enum):
|
|
19
16
|
BATCHING = "batching"
|
|
20
17
|
STREAMING = "streaming"
|
|
21
18
|
|
|
22
19
|
|
|
23
|
-
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum
|
|
20
|
+
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
|
|
24
21
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
25
22
|
|
|
26
23
|
HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
|
|
@@ -28,9 +25,7 @@ class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnum
|
|
|
28
25
|
HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
|
|
29
26
|
|
|
30
27
|
|
|
31
|
-
class OutputAzureDataExplorerAuthenticationMethod(
|
|
32
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
33
|
-
):
|
|
28
|
+
class OutputAzureDataExplorerAuthenticationMethod(str, Enum):
|
|
34
29
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
35
30
|
|
|
36
31
|
CLIENT_SECRET = "clientSecret"
|
|
@@ -50,9 +45,7 @@ class OutputAzureDataExplorerCertificate(BaseModel):
|
|
|
50
45
|
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
51
46
|
|
|
52
47
|
|
|
53
|
-
class OutputAzureDataExplorerBackpressureBehavior(
|
|
54
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
55
|
-
):
|
|
48
|
+
class OutputAzureDataExplorerBackpressureBehavior(str, Enum):
|
|
56
49
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
57
50
|
|
|
58
51
|
BLOCK = "block"
|
|
@@ -60,7 +53,7 @@ class OutputAzureDataExplorerBackpressureBehavior(
|
|
|
60
53
|
QUEUE = "queue"
|
|
61
54
|
|
|
62
55
|
|
|
63
|
-
class OutputAzureDataExplorerDataFormat(str, Enum
|
|
56
|
+
class OutputAzureDataExplorerDataFormat(str, Enum):
|
|
64
57
|
r"""Format of the output data"""
|
|
65
58
|
|
|
66
59
|
JSON = "json"
|
|
@@ -68,16 +61,14 @@ class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
68
61
|
PARQUET = "parquet"
|
|
69
62
|
|
|
70
63
|
|
|
71
|
-
class OutputAzureDataExplorerDiskSpaceProtection(
|
|
72
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
-
):
|
|
64
|
+
class OutputAzureDataExplorerDiskSpaceProtection(str, Enum):
|
|
74
65
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
75
66
|
|
|
76
67
|
BLOCK = "block"
|
|
77
68
|
DROP = "drop"
|
|
78
69
|
|
|
79
70
|
|
|
80
|
-
class PrefixOptional(str, Enum
|
|
71
|
+
class PrefixOptional(str, Enum):
|
|
81
72
|
DROP_BY = "dropBy"
|
|
82
73
|
INGEST_BY = "ingestBy"
|
|
83
74
|
|
|
@@ -90,9 +81,7 @@ class ExtentTagTypedDict(TypedDict):
|
|
|
90
81
|
class ExtentTag(BaseModel):
|
|
91
82
|
value: str
|
|
92
83
|
|
|
93
|
-
prefix:
|
|
94
|
-
Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
|
|
95
|
-
] = None
|
|
84
|
+
prefix: Optional[PrefixOptional] = None
|
|
96
85
|
|
|
97
86
|
|
|
98
87
|
class IngestIfNotExistTypedDict(TypedDict):
|
|
@@ -103,7 +92,7 @@ class IngestIfNotExist(BaseModel):
|
|
|
103
92
|
value: str
|
|
104
93
|
|
|
105
94
|
|
|
106
|
-
class ReportLevel(str, Enum
|
|
95
|
+
class ReportLevel(str, Enum):
|
|
107
96
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
108
97
|
|
|
109
98
|
FAILURES_ONLY = "failuresOnly"
|
|
@@ -111,7 +100,7 @@ class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
111
100
|
FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
|
|
112
101
|
|
|
113
102
|
|
|
114
|
-
class ReportMethod(str, Enum
|
|
103
|
+
class ReportMethod(str, Enum):
|
|
115
104
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
116
105
|
|
|
117
106
|
QUEUE = "queue"
|
|
@@ -184,32 +173,28 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
|
|
|
184
173
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
185
174
|
|
|
186
175
|
|
|
187
|
-
class OutputAzureDataExplorerCompressCompression(
|
|
188
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
189
|
-
):
|
|
176
|
+
class OutputAzureDataExplorerCompressCompression(str, Enum):
|
|
190
177
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
191
178
|
|
|
192
179
|
NONE = "none"
|
|
193
180
|
GZIP = "gzip"
|
|
194
181
|
|
|
195
182
|
|
|
196
|
-
class OutputAzureDataExplorerPqCompressCompression(
|
|
197
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
198
|
-
):
|
|
183
|
+
class OutputAzureDataExplorerPqCompressCompression(str, Enum):
|
|
199
184
|
r"""Codec to use to compress the persisted data"""
|
|
200
185
|
|
|
201
186
|
NONE = "none"
|
|
202
187
|
GZIP = "gzip"
|
|
203
188
|
|
|
204
189
|
|
|
205
|
-
class OutputAzureDataExplorerQueueFullBehavior(str, Enum
|
|
190
|
+
class OutputAzureDataExplorerQueueFullBehavior(str, Enum):
|
|
206
191
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
207
192
|
|
|
208
193
|
BLOCK = "block"
|
|
209
194
|
DROP = "drop"
|
|
210
195
|
|
|
211
196
|
|
|
212
|
-
class OutputAzureDataExplorerMode(str, Enum
|
|
197
|
+
class OutputAzureDataExplorerMode(str, Enum):
|
|
213
198
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
214
199
|
|
|
215
200
|
ERROR = "error"
|
|
@@ -375,9 +360,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
375
360
|
id: Optional[str] = None
|
|
376
361
|
r"""Unique ID for this output"""
|
|
377
362
|
|
|
378
|
-
type:
|
|
379
|
-
Optional[OutputAzureDataExplorerType], PlainValidator(validate_open_enum(False))
|
|
380
|
-
] = None
|
|
363
|
+
type: Optional[OutputAzureDataExplorerType] = None
|
|
381
364
|
|
|
382
365
|
pipeline: Optional[str] = None
|
|
383
366
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -399,24 +382,17 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
399
382
|
r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
|
|
400
383
|
|
|
401
384
|
ingest_mode: Annotated[
|
|
402
|
-
|
|
403
|
-
pydantic.Field(alias="ingestMode"),
|
|
385
|
+
Optional[IngestionMode], pydantic.Field(alias="ingestMode")
|
|
404
386
|
] = IngestionMode.BATCHING
|
|
405
387
|
|
|
406
388
|
oauth_endpoint: Annotated[
|
|
407
|
-
|
|
408
|
-
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
409
|
-
PlainValidator(validate_open_enum(False)),
|
|
410
|
-
],
|
|
389
|
+
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
411
390
|
pydantic.Field(alias="oauthEndpoint"),
|
|
412
391
|
] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
|
|
413
392
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
414
393
|
|
|
415
394
|
oauth_type: Annotated[
|
|
416
|
-
|
|
417
|
-
Optional[OutputAzureDataExplorerAuthenticationMethod],
|
|
418
|
-
PlainValidator(validate_open_enum(False)),
|
|
419
|
-
],
|
|
395
|
+
Optional[OutputAzureDataExplorerAuthenticationMethod],
|
|
420
396
|
pydantic.Field(alias="oauthType"),
|
|
421
397
|
] = OutputAzureDataExplorerAuthenticationMethod.CLIENT_SECRET
|
|
422
398
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
@@ -435,10 +411,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
435
411
|
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
436
412
|
|
|
437
413
|
on_backpressure: Annotated[
|
|
438
|
-
|
|
439
|
-
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
440
|
-
PlainValidator(validate_open_enum(False)),
|
|
441
|
-
],
|
|
414
|
+
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
442
415
|
pydantic.Field(alias="onBackpressure"),
|
|
443
416
|
] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
|
|
444
417
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -449,11 +422,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
449
422
|
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
450
423
|
|
|
451
424
|
format_: Annotated[
|
|
452
|
-
|
|
453
|
-
Optional[OutputAzureDataExplorerDataFormat],
|
|
454
|
-
PlainValidator(validate_open_enum(False)),
|
|
455
|
-
],
|
|
456
|
-
pydantic.Field(alias="format"),
|
|
425
|
+
Optional[OutputAzureDataExplorerDataFormat], pydantic.Field(alias="format")
|
|
457
426
|
] = OutputAzureDataExplorerDataFormat.JSON
|
|
458
427
|
r"""Format of the output data"""
|
|
459
428
|
|
|
@@ -493,10 +462,7 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
493
462
|
r"""Maximum number of parts to upload in parallel per file"""
|
|
494
463
|
|
|
495
464
|
on_disk_full_backpressure: Annotated[
|
|
496
|
-
|
|
497
|
-
Optional[OutputAzureDataExplorerDiskSpaceProtection],
|
|
498
|
-
PlainValidator(validate_open_enum(False)),
|
|
499
|
-
],
|
|
465
|
+
Optional[OutputAzureDataExplorerDiskSpaceProtection],
|
|
500
466
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
501
467
|
] = OutputAzureDataExplorerDiskSpaceProtection.BLOCK
|
|
502
468
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -540,14 +506,12 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
540
506
|
r"""Prevents duplicate ingestion by verifying whether an extent with the specified ingest-by tag already exists"""
|
|
541
507
|
|
|
542
508
|
report_level: Annotated[
|
|
543
|
-
|
|
544
|
-
pydantic.Field(alias="reportLevel"),
|
|
509
|
+
Optional[ReportLevel], pydantic.Field(alias="reportLevel")
|
|
545
510
|
] = ReportLevel.FAILURES_ONLY
|
|
546
511
|
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
547
512
|
|
|
548
513
|
report_method: Annotated[
|
|
549
|
-
|
|
550
|
-
pydantic.Field(alias="reportMethod"),
|
|
514
|
+
Optional[ReportMethod], pydantic.Field(alias="reportMethod")
|
|
551
515
|
] = ReportMethod.QUEUE
|
|
552
516
|
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
553
517
|
|
|
@@ -572,10 +536,9 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
572
536
|
] = False
|
|
573
537
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
574
538
|
|
|
575
|
-
compress:
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
] = OutputAzureDataExplorerCompressCompression.GZIP
|
|
539
|
+
compress: Optional[OutputAzureDataExplorerCompressCompression] = (
|
|
540
|
+
OutputAzureDataExplorerCompressCompression.GZIP
|
|
541
|
+
)
|
|
579
542
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
580
543
|
|
|
581
544
|
mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
|
|
@@ -629,29 +592,19 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
629
592
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
630
593
|
|
|
631
594
|
pq_compress: Annotated[
|
|
632
|
-
|
|
633
|
-
Optional[OutputAzureDataExplorerPqCompressCompression],
|
|
634
|
-
PlainValidator(validate_open_enum(False)),
|
|
635
|
-
],
|
|
595
|
+
Optional[OutputAzureDataExplorerPqCompressCompression],
|
|
636
596
|
pydantic.Field(alias="pqCompress"),
|
|
637
597
|
] = OutputAzureDataExplorerPqCompressCompression.NONE
|
|
638
598
|
r"""Codec to use to compress the persisted data"""
|
|
639
599
|
|
|
640
600
|
pq_on_backpressure: Annotated[
|
|
641
|
-
|
|
642
|
-
Optional[OutputAzureDataExplorerQueueFullBehavior],
|
|
643
|
-
PlainValidator(validate_open_enum(False)),
|
|
644
|
-
],
|
|
601
|
+
Optional[OutputAzureDataExplorerQueueFullBehavior],
|
|
645
602
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
646
603
|
] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
|
|
647
604
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
648
605
|
|
|
649
606
|
pq_mode: Annotated[
|
|
650
|
-
|
|
651
|
-
Optional[OutputAzureDataExplorerMode],
|
|
652
|
-
PlainValidator(validate_open_enum(False)),
|
|
653
|
-
],
|
|
654
|
-
pydantic.Field(alias="pqMode"),
|
|
607
|
+
Optional[OutputAzureDataExplorerMode], pydantic.Field(alias="pqMode")
|
|
655
608
|
] = OutputAzureDataExplorerMode.ERROR
|
|
656
609
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
657
610
|
|