cribl-control-plane 0.0.48a1__py3-none-any.whl → 0.0.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +6 -4
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/httpclient.py +0 -1
- cribl_control_plane/models/__init__.py +12 -12
- cribl_control_plane/models/appmode.py +13 -0
- cribl_control_plane/models/cacheconnection.py +2 -10
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
- cribl_control_plane/models/cloudprovider.py +1 -2
- cribl_control_plane/models/configgroup.py +2 -7
- cribl_control_plane/models/configgroupcloud.py +2 -6
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
- cribl_control_plane/models/cribllakedataset.py +2 -8
- cribl_control_plane/models/datasetmetadata.py +2 -8
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
- cribl_control_plane/models/getsummaryop.py +2 -7
- cribl_control_plane/models/hbcriblinfo.py +3 -19
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/inputappscope.py +14 -34
- cribl_control_plane/models/inputazureblob.py +6 -17
- cribl_control_plane/models/inputcollection.py +4 -11
- cribl_control_plane/models/inputconfluentcloud.py +20 -47
- cribl_control_plane/models/inputcribl.py +4 -11
- cribl_control_plane/models/inputcriblhttp.py +8 -23
- cribl_control_plane/models/inputcribllakehttp.py +10 -22
- cribl_control_plane/models/inputcriblmetrics.py +4 -12
- cribl_control_plane/models/inputcribltcp.py +8 -23
- cribl_control_plane/models/inputcrowdstrike.py +10 -26
- cribl_control_plane/models/inputdatadogagent.py +8 -24
- cribl_control_plane/models/inputdatagen.py +4 -11
- cribl_control_plane/models/inputedgeprometheus.py +24 -58
- cribl_control_plane/models/inputelastic.py +14 -40
- cribl_control_plane/models/inputeventhub.py +6 -15
- cribl_control_plane/models/inputexec.py +6 -14
- cribl_control_plane/models/inputfile.py +6 -15
- cribl_control_plane/models/inputfirehose.py +8 -23
- cribl_control_plane/models/inputgooglepubsub.py +6 -19
- cribl_control_plane/models/inputgrafana.py +24 -67
- cribl_control_plane/models/inputhttp.py +8 -23
- cribl_control_plane/models/inputhttpraw.py +8 -23
- cribl_control_plane/models/inputjournalfiles.py +4 -12
- cribl_control_plane/models/inputkafka.py +16 -46
- cribl_control_plane/models/inputkinesis.py +14 -38
- cribl_control_plane/models/inputkubeevents.py +4 -11
- cribl_control_plane/models/inputkubelogs.py +8 -16
- cribl_control_plane/models/inputkubemetrics.py +8 -16
- cribl_control_plane/models/inputloki.py +10 -29
- cribl_control_plane/models/inputmetrics.py +8 -23
- cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
- cribl_control_plane/models/inputmsk.py +18 -53
- cribl_control_plane/models/inputnetflow.py +4 -11
- cribl_control_plane/models/inputoffice365mgmt.py +14 -33
- cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
- cribl_control_plane/models/inputoffice365service.py +16 -35
- cribl_control_plane/models/inputopentelemetry.py +16 -38
- cribl_control_plane/models/inputprometheus.py +18 -50
- cribl_control_plane/models/inputprometheusrw.py +10 -30
- cribl_control_plane/models/inputrawudp.py +4 -11
- cribl_control_plane/models/inputs3.py +8 -21
- cribl_control_plane/models/inputs3inventory.py +10 -26
- cribl_control_plane/models/inputsecuritylake.py +10 -27
- cribl_control_plane/models/inputsnmp.py +6 -16
- cribl_control_plane/models/inputsplunk.py +12 -33
- cribl_control_plane/models/inputsplunkhec.py +10 -29
- cribl_control_plane/models/inputsplunksearch.py +14 -33
- cribl_control_plane/models/inputsqs.py +10 -27
- cribl_control_plane/models/inputsyslog.py +16 -43
- cribl_control_plane/models/inputsystemmetrics.py +24 -48
- cribl_control_plane/models/inputsystemstate.py +8 -16
- cribl_control_plane/models/inputtcp.py +10 -29
- cribl_control_plane/models/inputtcpjson.py +10 -29
- cribl_control_plane/models/inputwef.py +14 -37
- cribl_control_plane/models/inputwindowsmetrics.py +24 -44
- cribl_control_plane/models/inputwineventlogs.py +10 -20
- cribl_control_plane/models/inputwiz.py +8 -21
- cribl_control_plane/models/inputwizwebhook.py +8 -23
- cribl_control_plane/models/inputzscalerhec.py +10 -29
- cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
- cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
- cribl_control_plane/models/masterworkerentry.py +2 -7
- cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
- cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeprovidedinfo.py +0 -3
- cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeupgradestate.py +1 -2
- cribl_control_plane/models/nodeupgradestatus.py +5 -13
- cribl_control_plane/models/outputazureblob.py +18 -48
- cribl_control_plane/models/outputazuredataexplorer.py +28 -73
- cribl_control_plane/models/outputazureeventhub.py +18 -40
- cribl_control_plane/models/outputazurelogs.py +12 -35
- cribl_control_plane/models/outputclickhouse.py +20 -55
- cribl_control_plane/models/outputcloudwatch.py +10 -29
- cribl_control_plane/models/outputconfluentcloud.py +32 -77
- cribl_control_plane/models/outputcriblhttp.py +16 -44
- cribl_control_plane/models/outputcribllake.py +16 -46
- cribl_control_plane/models/outputcribltcp.py +18 -45
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
- cribl_control_plane/models/outputdatadog.py +20 -48
- cribl_control_plane/models/outputdataset.py +18 -46
- cribl_control_plane/models/outputdiskspool.py +2 -7
- cribl_control_plane/models/outputdls3.py +24 -68
- cribl_control_plane/models/outputdynatracehttp.py +20 -53
- cribl_control_plane/models/outputdynatraceotlp.py +22 -55
- cribl_control_plane/models/outputelastic.py +18 -43
- cribl_control_plane/models/outputelasticcloud.py +12 -36
- cribl_control_plane/models/outputexabeam.py +10 -29
- cribl_control_plane/models/outputfilesystem.py +14 -39
- cribl_control_plane/models/outputgooglechronicle.py +16 -50
- cribl_control_plane/models/outputgooglecloudlogging.py +14 -41
- cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
- cribl_control_plane/models/outputgooglepubsub.py +10 -31
- cribl_control_plane/models/outputgrafanacloud.py +32 -97
- cribl_control_plane/models/outputgraphite.py +14 -31
- cribl_control_plane/models/outputhoneycomb.py +12 -35
- cribl_control_plane/models/outputhumiohec.py +16 -43
- cribl_control_plane/models/outputinfluxdb.py +16 -42
- cribl_control_plane/models/outputkafka.py +28 -74
- cribl_control_plane/models/outputkinesis.py +16 -40
- cribl_control_plane/models/outputloki.py +16 -41
- cribl_control_plane/models/outputminio.py +24 -65
- cribl_control_plane/models/outputmsk.py +30 -82
- cribl_control_plane/models/outputnewrelic.py +18 -43
- cribl_control_plane/models/outputnewrelicevents.py +14 -41
- cribl_control_plane/models/outputopentelemetry.py +26 -67
- cribl_control_plane/models/outputprometheus.py +12 -35
- cribl_control_plane/models/outputring.py +8 -19
- cribl_control_plane/models/outputs3.py +26 -68
- cribl_control_plane/models/outputsecuritylake.py +18 -52
- cribl_control_plane/models/outputsentinel.py +18 -45
- cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
- cribl_control_plane/models/outputservicenow.py +24 -60
- cribl_control_plane/models/outputsignalfx.py +14 -37
- cribl_control_plane/models/outputsns.py +14 -36
- cribl_control_plane/models/outputsplunk.py +24 -60
- cribl_control_plane/models/outputsplunkhec.py +12 -35
- cribl_control_plane/models/outputsplunklb.py +30 -77
- cribl_control_plane/models/outputsqs.py +16 -41
- cribl_control_plane/models/outputstatsd.py +14 -30
- cribl_control_plane/models/outputstatsdext.py +12 -29
- cribl_control_plane/models/outputsumologic.py +12 -35
- cribl_control_plane/models/outputsyslog.py +24 -58
- cribl_control_plane/models/outputtcpjson.py +20 -52
- cribl_control_plane/models/outputwavefront.py +12 -35
- cribl_control_plane/models/outputwebhook.py +22 -58
- cribl_control_plane/models/outputxsiam.py +14 -35
- cribl_control_plane/models/productscore.py +1 -2
- cribl_control_plane/models/rbacresource.py +1 -2
- cribl_control_plane/models/resourcepolicy.py +2 -4
- cribl_control_plane/models/routecloneconf.py +13 -0
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/runnablejobcollection.py +13 -30
- cribl_control_plane/models/runnablejobexecutor.py +4 -13
- cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
- cribl_control_plane/models/workertypes.py +1 -2
- cribl_control_plane/sdk.py +2 -2
- cribl_control_plane/utils/annotations.py +32 -8
- {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.50.dist-info}/METADATA +2 -1
- {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.50.dist-info}/RECORD +165 -163
- {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.50.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputKinesisType(str, Enum):
|
|
|
15
12
|
KINESIS = "kinesis"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputKinesisAuthenticationMethod(str, Enum
|
|
15
|
+
class OutputKinesisAuthenticationMethod(str, Enum):
|
|
19
16
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
20
17
|
|
|
21
18
|
AUTO = "auto"
|
|
@@ -23,21 +20,21 @@ class OutputKinesisAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
23
20
|
SECRET = "secret"
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputKinesisSignatureVersion(str, Enum
|
|
23
|
+
class OutputKinesisSignatureVersion(str, Enum):
|
|
27
24
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
28
25
|
|
|
29
26
|
V2 = "v2"
|
|
30
27
|
V4 = "v4"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputKinesisCompression(str, Enum
|
|
30
|
+
class OutputKinesisCompression(str, Enum):
|
|
34
31
|
r"""Compression type to use for records"""
|
|
35
32
|
|
|
36
33
|
NONE = "none"
|
|
37
34
|
GZIP = "gzip"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputKinesisBackpressureBehavior(str, Enum
|
|
37
|
+
class OutputKinesisBackpressureBehavior(str, Enum):
|
|
41
38
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
42
39
|
|
|
43
40
|
BLOCK = "block"
|
|
@@ -45,21 +42,21 @@ class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
45
42
|
QUEUE = "queue"
|
|
46
43
|
|
|
47
44
|
|
|
48
|
-
class OutputKinesisPqCompressCompression(str, Enum
|
|
45
|
+
class OutputKinesisPqCompressCompression(str, Enum):
|
|
49
46
|
r"""Codec to use to compress the persisted data"""
|
|
50
47
|
|
|
51
48
|
NONE = "none"
|
|
52
49
|
GZIP = "gzip"
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputKinesisQueueFullBehavior(str, Enum
|
|
52
|
+
class OutputKinesisQueueFullBehavior(str, Enum):
|
|
56
53
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
57
54
|
|
|
58
55
|
BLOCK = "block"
|
|
59
56
|
DROP = "drop"
|
|
60
57
|
|
|
61
58
|
|
|
62
|
-
class OutputKinesisMode(str, Enum
|
|
59
|
+
class OutputKinesisMode(str, Enum):
|
|
63
60
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
64
61
|
|
|
65
62
|
ERROR = "error"
|
|
@@ -170,10 +167,7 @@ class OutputKinesis(BaseModel):
|
|
|
170
167
|
r"""Tags for filtering and grouping in @{product}"""
|
|
171
168
|
|
|
172
169
|
aws_authentication_method: Annotated[
|
|
173
|
-
|
|
174
|
-
Optional[OutputKinesisAuthenticationMethod],
|
|
175
|
-
PlainValidator(validate_open_enum(False)),
|
|
176
|
-
],
|
|
170
|
+
Optional[OutputKinesisAuthenticationMethod],
|
|
177
171
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
178
172
|
] = OutputKinesisAuthenticationMethod.AUTO
|
|
179
173
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -186,10 +180,7 @@ class OutputKinesis(BaseModel):
|
|
|
186
180
|
r"""Kinesis stream service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to Kinesis stream-compatible endpoint."""
|
|
187
181
|
|
|
188
182
|
signature_version: Annotated[
|
|
189
|
-
|
|
190
|
-
Optional[OutputKinesisSignatureVersion],
|
|
191
|
-
PlainValidator(validate_open_enum(False)),
|
|
192
|
-
],
|
|
183
|
+
Optional[OutputKinesisSignatureVersion],
|
|
193
184
|
pydantic.Field(alias="signatureVersion"),
|
|
194
185
|
] = OutputKinesisSignatureVersion.V4
|
|
195
186
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
@@ -237,9 +228,7 @@ class OutputKinesis(BaseModel):
|
|
|
237
228
|
] = 1
|
|
238
229
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
|
|
239
230
|
|
|
240
|
-
compression:
|
|
241
|
-
Optional[OutputKinesisCompression], PlainValidator(validate_open_enum(False))
|
|
242
|
-
] = OutputKinesisCompression.GZIP
|
|
231
|
+
compression: Optional[OutputKinesisCompression] = OutputKinesisCompression.GZIP
|
|
243
232
|
r"""Compression type to use for records"""
|
|
244
233
|
|
|
245
234
|
use_list_shards: Annotated[
|
|
@@ -251,10 +240,7 @@ class OutputKinesis(BaseModel):
|
|
|
251
240
|
r"""Batch events into a single record as NDJSON"""
|
|
252
241
|
|
|
253
242
|
on_backpressure: Annotated[
|
|
254
|
-
|
|
255
|
-
Optional[OutputKinesisBackpressureBehavior],
|
|
256
|
-
PlainValidator(validate_open_enum(False)),
|
|
257
|
-
],
|
|
243
|
+
Optional[OutputKinesisBackpressureBehavior],
|
|
258
244
|
pydantic.Field(alias="onBackpressure"),
|
|
259
245
|
] = OutputKinesisBackpressureBehavior.BLOCK
|
|
260
246
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -280,29 +266,19 @@ class OutputKinesis(BaseModel):
|
|
|
280
266
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
281
267
|
|
|
282
268
|
pq_compress: Annotated[
|
|
283
|
-
|
|
284
|
-
Optional[OutputKinesisPqCompressCompression],
|
|
285
|
-
PlainValidator(validate_open_enum(False)),
|
|
286
|
-
],
|
|
287
|
-
pydantic.Field(alias="pqCompress"),
|
|
269
|
+
Optional[OutputKinesisPqCompressCompression], pydantic.Field(alias="pqCompress")
|
|
288
270
|
] = OutputKinesisPqCompressCompression.NONE
|
|
289
271
|
r"""Codec to use to compress the persisted data"""
|
|
290
272
|
|
|
291
273
|
pq_on_backpressure: Annotated[
|
|
292
|
-
|
|
293
|
-
Optional[OutputKinesisQueueFullBehavior],
|
|
294
|
-
PlainValidator(validate_open_enum(False)),
|
|
295
|
-
],
|
|
274
|
+
Optional[OutputKinesisQueueFullBehavior],
|
|
296
275
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
297
276
|
] = OutputKinesisQueueFullBehavior.BLOCK
|
|
298
277
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
299
278
|
|
|
300
|
-
pq_mode: Annotated[
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
],
|
|
304
|
-
pydantic.Field(alias="pqMode"),
|
|
305
|
-
] = OutputKinesisMode.ERROR
|
|
279
|
+
pq_mode: Annotated[Optional[OutputKinesisMode], pydantic.Field(alias="pqMode")] = (
|
|
280
|
+
OutputKinesisMode.ERROR
|
|
281
|
+
)
|
|
306
282
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
307
283
|
|
|
308
284
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputLokiType(str, Enum):
|
|
|
15
12
|
LOKI = "loki"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputLokiMessageFormat(str, Enum
|
|
15
|
+
class OutputLokiMessageFormat(str, Enum):
|
|
19
16
|
r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
|
|
20
17
|
|
|
21
18
|
PROTOBUF = "protobuf"
|
|
@@ -33,7 +30,7 @@ class OutputLokiLabel(BaseModel):
|
|
|
33
30
|
name: Optional[str] = ""
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class OutputLokiAuthenticationType(str, Enum
|
|
33
|
+
class OutputLokiAuthenticationType(str, Enum):
|
|
37
34
|
NONE = "none"
|
|
38
35
|
TOKEN = "token"
|
|
39
36
|
TEXT_SECRET = "textSecret"
|
|
@@ -52,7 +49,7 @@ class OutputLokiExtraHTTPHeader(BaseModel):
|
|
|
52
49
|
name: Optional[str] = None
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputLokiFailedRequestLoggingMode(str, Enum
|
|
52
|
+
class OutputLokiFailedRequestLoggingMode(str, Enum):
|
|
56
53
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
57
54
|
|
|
58
55
|
PAYLOAD = "payload"
|
|
@@ -114,7 +111,7 @@ class OutputLokiTimeoutRetrySettings(BaseModel):
|
|
|
114
111
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
115
112
|
|
|
116
113
|
|
|
117
|
-
class OutputLokiBackpressureBehavior(str, Enum
|
|
114
|
+
class OutputLokiBackpressureBehavior(str, Enum):
|
|
118
115
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
119
116
|
|
|
120
117
|
BLOCK = "block"
|
|
@@ -122,21 +119,21 @@ class OutputLokiBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
122
119
|
QUEUE = "queue"
|
|
123
120
|
|
|
124
121
|
|
|
125
|
-
class OutputLokiCompression(str, Enum
|
|
122
|
+
class OutputLokiCompression(str, Enum):
|
|
126
123
|
r"""Codec to use to compress the persisted data"""
|
|
127
124
|
|
|
128
125
|
NONE = "none"
|
|
129
126
|
GZIP = "gzip"
|
|
130
127
|
|
|
131
128
|
|
|
132
|
-
class OutputLokiQueueFullBehavior(str, Enum
|
|
129
|
+
class OutputLokiQueueFullBehavior(str, Enum):
|
|
133
130
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
134
131
|
|
|
135
132
|
BLOCK = "block"
|
|
136
133
|
DROP = "drop"
|
|
137
134
|
|
|
138
135
|
|
|
139
|
-
class OutputLokiMode(str, Enum
|
|
136
|
+
class OutputLokiMode(str, Enum):
|
|
140
137
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
141
138
|
|
|
142
139
|
ERROR = "error"
|
|
@@ -262,10 +259,7 @@ class OutputLoki(BaseModel):
|
|
|
262
259
|
r"""Name of the event field that contains the message to send. If not specified, Stream sends a JSON representation of the whole event."""
|
|
263
260
|
|
|
264
261
|
message_format: Annotated[
|
|
265
|
-
|
|
266
|
-
Optional[OutputLokiMessageFormat], PlainValidator(validate_open_enum(False))
|
|
267
|
-
],
|
|
268
|
-
pydantic.Field(alias="messageFormat"),
|
|
262
|
+
Optional[OutputLokiMessageFormat], pydantic.Field(alias="messageFormat")
|
|
269
263
|
] = OutputLokiMessageFormat.PROTOBUF
|
|
270
264
|
r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
|
|
271
265
|
|
|
@@ -273,11 +267,7 @@ class OutputLoki(BaseModel):
|
|
|
273
267
|
r"""List of labels to send with logs. Labels define Loki streams, so use static labels to avoid proliferating label value combinations and streams. Can be merged and/or overridden by the event's __labels field. Example: '__labels: {host: \"cribl.io\", level: \"error\"}'"""
|
|
274
268
|
|
|
275
269
|
auth_type: Annotated[
|
|
276
|
-
|
|
277
|
-
Optional[OutputLokiAuthenticationType],
|
|
278
|
-
PlainValidator(validate_open_enum(False)),
|
|
279
|
-
],
|
|
280
|
-
pydantic.Field(alias="authType"),
|
|
270
|
+
Optional[OutputLokiAuthenticationType], pydantic.Field(alias="authType")
|
|
281
271
|
] = OutputLokiAuthenticationType.NONE
|
|
282
272
|
|
|
283
273
|
concurrency: Optional[float] = 1
|
|
@@ -321,10 +311,7 @@ class OutputLoki(BaseModel):
|
|
|
321
311
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
322
312
|
|
|
323
313
|
failed_request_logging_mode: Annotated[
|
|
324
|
-
|
|
325
|
-
Optional[OutputLokiFailedRequestLoggingMode],
|
|
326
|
-
PlainValidator(validate_open_enum(False)),
|
|
327
|
-
],
|
|
314
|
+
Optional[OutputLokiFailedRequestLoggingMode],
|
|
328
315
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
329
316
|
] = OutputLokiFailedRequestLoggingMode.NONE
|
|
330
317
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -356,11 +343,7 @@ class OutputLoki(BaseModel):
|
|
|
356
343
|
r"""Add per-event HTTP headers from the __headers field to outgoing requests. Events with different headers are batched and sent separately."""
|
|
357
344
|
|
|
358
345
|
on_backpressure: Annotated[
|
|
359
|
-
|
|
360
|
-
Optional[OutputLokiBackpressureBehavior],
|
|
361
|
-
PlainValidator(validate_open_enum(False)),
|
|
362
|
-
],
|
|
363
|
-
pydantic.Field(alias="onBackpressure"),
|
|
346
|
+
Optional[OutputLokiBackpressureBehavior], pydantic.Field(alias="onBackpressure")
|
|
364
347
|
] = OutputLokiBackpressureBehavior.BLOCK
|
|
365
348
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
366
349
|
|
|
@@ -405,26 +388,18 @@ class OutputLoki(BaseModel):
|
|
|
405
388
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
406
389
|
|
|
407
390
|
pq_compress: Annotated[
|
|
408
|
-
|
|
409
|
-
Optional[OutputLokiCompression], PlainValidator(validate_open_enum(False))
|
|
410
|
-
],
|
|
411
|
-
pydantic.Field(alias="pqCompress"),
|
|
391
|
+
Optional[OutputLokiCompression], pydantic.Field(alias="pqCompress")
|
|
412
392
|
] = OutputLokiCompression.NONE
|
|
413
393
|
r"""Codec to use to compress the persisted data"""
|
|
414
394
|
|
|
415
395
|
pq_on_backpressure: Annotated[
|
|
416
|
-
|
|
417
|
-
Optional[OutputLokiQueueFullBehavior],
|
|
418
|
-
PlainValidator(validate_open_enum(False)),
|
|
419
|
-
],
|
|
420
|
-
pydantic.Field(alias="pqOnBackpressure"),
|
|
396
|
+
Optional[OutputLokiQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
|
|
421
397
|
] = OutputLokiQueueFullBehavior.BLOCK
|
|
422
398
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
423
399
|
|
|
424
|
-
pq_mode: Annotated[
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
] = OutputLokiMode.ERROR
|
|
400
|
+
pq_mode: Annotated[Optional[OutputLokiMode], pydantic.Field(alias="pqMode")] = (
|
|
401
|
+
OutputLokiMode.ERROR
|
|
402
|
+
)
|
|
428
403
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
429
404
|
|
|
430
405
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputMinioType(str, Enum):
|
|
|
15
12
|
MINIO = "minio"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputMinioAuthenticationMethod(str, Enum
|
|
15
|
+
class OutputMinioAuthenticationMethod(str, Enum):
|
|
19
16
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
20
17
|
|
|
21
18
|
AUTO = "auto"
|
|
@@ -23,14 +20,14 @@ class OutputMinioAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
23
20
|
SECRET = "secret"
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputMinioSignatureVersion(str, Enum
|
|
23
|
+
class OutputMinioSignatureVersion(str, Enum):
|
|
27
24
|
r"""Signature version to use for signing MinIO requests"""
|
|
28
25
|
|
|
29
26
|
V2 = "v2"
|
|
30
27
|
V4 = "v4"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputMinioObjectACL(str, Enum
|
|
30
|
+
class OutputMinioObjectACL(str, Enum):
|
|
34
31
|
r"""Object ACL to assign to uploaded objects"""
|
|
35
32
|
|
|
36
33
|
PRIVATE = "private"
|
|
@@ -42,20 +39,20 @@ class OutputMinioObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
42
39
|
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
|
|
43
40
|
|
|
44
41
|
|
|
45
|
-
class OutputMinioStorageClass(str, Enum
|
|
42
|
+
class OutputMinioStorageClass(str, Enum):
|
|
46
43
|
r"""Storage class to select for uploaded objects"""
|
|
47
44
|
|
|
48
45
|
STANDARD = "STANDARD"
|
|
49
46
|
REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
|
|
50
47
|
|
|
51
48
|
|
|
52
|
-
class ServerSideEncryption(str, Enum
|
|
49
|
+
class ServerSideEncryption(str, Enum):
|
|
53
50
|
r"""Server-side encryption for uploaded objects"""
|
|
54
51
|
|
|
55
52
|
AES256 = "AES256"
|
|
56
53
|
|
|
57
54
|
|
|
58
|
-
class OutputMinioDataFormat(str, Enum
|
|
55
|
+
class OutputMinioDataFormat(str, Enum):
|
|
59
56
|
r"""Format of the output data"""
|
|
60
57
|
|
|
61
58
|
JSON = "json"
|
|
@@ -63,28 +60,28 @@ class OutputMinioDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
63
60
|
PARQUET = "parquet"
|
|
64
61
|
|
|
65
62
|
|
|
66
|
-
class OutputMinioBackpressureBehavior(str, Enum
|
|
63
|
+
class OutputMinioBackpressureBehavior(str, Enum):
|
|
67
64
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
68
65
|
|
|
69
66
|
BLOCK = "block"
|
|
70
67
|
DROP = "drop"
|
|
71
68
|
|
|
72
69
|
|
|
73
|
-
class OutputMinioDiskSpaceProtection(str, Enum
|
|
70
|
+
class OutputMinioDiskSpaceProtection(str, Enum):
|
|
74
71
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
75
72
|
|
|
76
73
|
BLOCK = "block"
|
|
77
74
|
DROP = "drop"
|
|
78
75
|
|
|
79
76
|
|
|
80
|
-
class OutputMinioCompression(str, Enum
|
|
77
|
+
class OutputMinioCompression(str, Enum):
|
|
81
78
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
82
79
|
|
|
83
80
|
NONE = "none"
|
|
84
81
|
GZIP = "gzip"
|
|
85
82
|
|
|
86
83
|
|
|
87
|
-
class OutputMinioCompressionLevel(str, Enum
|
|
84
|
+
class OutputMinioCompressionLevel(str, Enum):
|
|
88
85
|
r"""Compression level to apply before moving files to final destination"""
|
|
89
86
|
|
|
90
87
|
BEST_SPEED = "best_speed"
|
|
@@ -92,7 +89,7 @@ class OutputMinioCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
92
89
|
BEST_COMPRESSION = "best_compression"
|
|
93
90
|
|
|
94
91
|
|
|
95
|
-
class OutputMinioParquetVersion(str, Enum
|
|
92
|
+
class OutputMinioParquetVersion(str, Enum):
|
|
96
93
|
r"""Determines which data types are supported and how they are represented"""
|
|
97
94
|
|
|
98
95
|
PARQUET_1_0 = "PARQUET_1_0"
|
|
@@ -100,7 +97,7 @@ class OutputMinioParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
100
97
|
PARQUET_2_6 = "PARQUET_2_6"
|
|
101
98
|
|
|
102
99
|
|
|
103
|
-
class OutputMinioDataPageVersion(str, Enum
|
|
100
|
+
class OutputMinioDataPageVersion(str, Enum):
|
|
104
101
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
105
102
|
|
|
106
103
|
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
@@ -254,10 +251,7 @@ class OutputMinio(BaseModel):
|
|
|
254
251
|
r"""Tags for filtering and grouping in @{product}"""
|
|
255
252
|
|
|
256
253
|
aws_authentication_method: Annotated[
|
|
257
|
-
|
|
258
|
-
Optional[OutputMinioAuthenticationMethod],
|
|
259
|
-
PlainValidator(validate_open_enum(False)),
|
|
260
|
-
],
|
|
254
|
+
Optional[OutputMinioAuthenticationMethod],
|
|
261
255
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
262
256
|
] = OutputMinioAuthenticationMethod.AUTO
|
|
263
257
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -284,35 +278,22 @@ class OutputMinio(BaseModel):
|
|
|
284
278
|
r"""Root directory to prepend to path before uploading. Enter a constant, or a JavaScript expression enclosed in quotes or backticks."""
|
|
285
279
|
|
|
286
280
|
signature_version: Annotated[
|
|
287
|
-
|
|
288
|
-
Optional[OutputMinioSignatureVersion],
|
|
289
|
-
PlainValidator(validate_open_enum(False)),
|
|
290
|
-
],
|
|
291
|
-
pydantic.Field(alias="signatureVersion"),
|
|
281
|
+
Optional[OutputMinioSignatureVersion], pydantic.Field(alias="signatureVersion")
|
|
292
282
|
] = OutputMinioSignatureVersion.V4
|
|
293
283
|
r"""Signature version to use for signing MinIO requests"""
|
|
294
284
|
|
|
295
285
|
object_acl: Annotated[
|
|
296
|
-
|
|
297
|
-
Optional[OutputMinioObjectACL], PlainValidator(validate_open_enum(False))
|
|
298
|
-
],
|
|
299
|
-
pydantic.Field(alias="objectACL"),
|
|
286
|
+
Optional[OutputMinioObjectACL], pydantic.Field(alias="objectACL")
|
|
300
287
|
] = OutputMinioObjectACL.PRIVATE
|
|
301
288
|
r"""Object ACL to assign to uploaded objects"""
|
|
302
289
|
|
|
303
290
|
storage_class: Annotated[
|
|
304
|
-
|
|
305
|
-
Optional[OutputMinioStorageClass], PlainValidator(validate_open_enum(False))
|
|
306
|
-
],
|
|
307
|
-
pydantic.Field(alias="storageClass"),
|
|
291
|
+
Optional[OutputMinioStorageClass], pydantic.Field(alias="storageClass")
|
|
308
292
|
] = None
|
|
309
293
|
r"""Storage class to select for uploaded objects"""
|
|
310
294
|
|
|
311
295
|
server_side_encryption: Annotated[
|
|
312
|
-
|
|
313
|
-
Optional[ServerSideEncryption], PlainValidator(validate_open_enum(False))
|
|
314
|
-
],
|
|
315
|
-
pydantic.Field(alias="serverSideEncryption"),
|
|
296
|
+
Optional[ServerSideEncryption], pydantic.Field(alias="serverSideEncryption")
|
|
316
297
|
] = None
|
|
317
298
|
r"""Server-side encryption for uploaded objects"""
|
|
318
299
|
|
|
@@ -342,10 +323,7 @@ class OutputMinio(BaseModel):
|
|
|
342
323
|
r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
|
|
343
324
|
|
|
344
325
|
format_: Annotated[
|
|
345
|
-
|
|
346
|
-
Optional[OutputMinioDataFormat], PlainValidator(validate_open_enum(False))
|
|
347
|
-
],
|
|
348
|
-
pydantic.Field(alias="format"),
|
|
326
|
+
Optional[OutputMinioDataFormat], pydantic.Field(alias="format")
|
|
349
327
|
] = OutputMinioDataFormat.JSON
|
|
350
328
|
r"""Format of the output data"""
|
|
351
329
|
|
|
@@ -378,10 +356,7 @@ class OutputMinio(BaseModel):
|
|
|
378
356
|
r"""Buffer size used to write to a file"""
|
|
379
357
|
|
|
380
358
|
on_backpressure: Annotated[
|
|
381
|
-
|
|
382
|
-
Optional[OutputMinioBackpressureBehavior],
|
|
383
|
-
PlainValidator(validate_open_enum(False)),
|
|
384
|
-
],
|
|
359
|
+
Optional[OutputMinioBackpressureBehavior],
|
|
385
360
|
pydantic.Field(alias="onBackpressure"),
|
|
386
361
|
] = OutputMinioBackpressureBehavior.BLOCK
|
|
387
362
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -392,10 +367,7 @@ class OutputMinio(BaseModel):
|
|
|
392
367
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
393
368
|
|
|
394
369
|
on_disk_full_backpressure: Annotated[
|
|
395
|
-
|
|
396
|
-
Optional[OutputMinioDiskSpaceProtection],
|
|
397
|
-
PlainValidator(validate_open_enum(False)),
|
|
398
|
-
],
|
|
370
|
+
Optional[OutputMinioDiskSpaceProtection],
|
|
399
371
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
400
372
|
] = OutputMinioDiskSpaceProtection.BLOCK
|
|
401
373
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -423,17 +395,11 @@ class OutputMinio(BaseModel):
|
|
|
423
395
|
aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
|
|
424
396
|
r"""Select or create a stored secret that references your access key and secret key"""
|
|
425
397
|
|
|
426
|
-
compress:
|
|
427
|
-
Optional[OutputMinioCompression], PlainValidator(validate_open_enum(False))
|
|
428
|
-
] = OutputMinioCompression.GZIP
|
|
398
|
+
compress: Optional[OutputMinioCompression] = OutputMinioCompression.GZIP
|
|
429
399
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
430
400
|
|
|
431
401
|
compression_level: Annotated[
|
|
432
|
-
|
|
433
|
-
Optional[OutputMinioCompressionLevel],
|
|
434
|
-
PlainValidator(validate_open_enum(False)),
|
|
435
|
-
],
|
|
436
|
-
pydantic.Field(alias="compressionLevel"),
|
|
402
|
+
Optional[OutputMinioCompressionLevel], pydantic.Field(alias="compressionLevel")
|
|
437
403
|
] = OutputMinioCompressionLevel.BEST_SPEED
|
|
438
404
|
r"""Compression level to apply before moving files to final destination"""
|
|
439
405
|
|
|
@@ -443,19 +409,12 @@ class OutputMinio(BaseModel):
|
|
|
443
409
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
444
410
|
|
|
445
411
|
parquet_version: Annotated[
|
|
446
|
-
|
|
447
|
-
Optional[OutputMinioParquetVersion],
|
|
448
|
-
PlainValidator(validate_open_enum(False)),
|
|
449
|
-
],
|
|
450
|
-
pydantic.Field(alias="parquetVersion"),
|
|
412
|
+
Optional[OutputMinioParquetVersion], pydantic.Field(alias="parquetVersion")
|
|
451
413
|
] = OutputMinioParquetVersion.PARQUET_2_6
|
|
452
414
|
r"""Determines which data types are supported and how they are represented"""
|
|
453
415
|
|
|
454
416
|
parquet_data_page_version: Annotated[
|
|
455
|
-
|
|
456
|
-
Optional[OutputMinioDataPageVersion],
|
|
457
|
-
PlainValidator(validate_open_enum(False)),
|
|
458
|
-
],
|
|
417
|
+
Optional[OutputMinioDataPageVersion],
|
|
459
418
|
pydantic.Field(alias="parquetDataPageVersion"),
|
|
460
419
|
] = OutputMinioDataPageVersion.DATA_PAGE_V2
|
|
461
420
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|