cribl-control-plane 0.0.50__py3-none-any.whl → 0.0.50rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -5
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/groups_sdk.py +4 -4
- cribl_control_plane/health.py +6 -2
- cribl_control_plane/models/__init__.py +56 -31
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +24 -4
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/getversionshowop.py +6 -5
- cribl_control_plane/models/gitinfo.py +14 -3
- cribl_control_plane/models/gitshowresult.py +19 -0
- cribl_control_plane/models/hbcriblinfo.py +24 -3
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/heartbeatmetadata.py +3 -0
- cribl_control_plane/models/input.py +65 -63
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +41 -32
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +41 -28
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +48 -30
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeprovidedinfo.py +3 -0
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/output.py +84 -79
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +71 -44
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatabricks.py +282 -0
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +50 -18
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +69 -40
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +77 -42
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/routeconf.py +3 -4
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.50rc2.dist-info/RECORD +327 -0
- cribl_control_plane/models/appmode.py +0 -13
- cribl_control_plane/models/routecloneconf.py +0 -13
- cribl_control_plane-0.0.50.dist-info/RECORD +0 -325
- {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/WHEEL +0 -0
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,7 +26,7 @@ class OutputElasticExtraHTTPHeader(BaseModel):
|
|
|
23
26
|
name: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class OutputElasticFailedRequestLoggingMode(str, Enum):
|
|
29
|
+
class OutputElasticFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
28
31
|
|
|
29
32
|
PAYLOAD = "payload"
|
|
@@ -96,7 +99,7 @@ class OutputElasticExtraParam(BaseModel):
|
|
|
96
99
|
value: str
|
|
97
100
|
|
|
98
101
|
|
|
99
|
-
class OutputElasticAuthenticationMethod(str, Enum):
|
|
102
|
+
class OutputElasticAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
100
103
|
r"""Enter credentials directly, or select a stored secret"""
|
|
101
104
|
|
|
102
105
|
MANUAL = "manual"
|
|
@@ -115,12 +118,16 @@ class OutputElasticAuth(BaseModel):
|
|
|
115
118
|
disabled: Optional[bool] = True
|
|
116
119
|
|
|
117
120
|
auth_type: Annotated[
|
|
118
|
-
|
|
121
|
+
Annotated[
|
|
122
|
+
Optional[OutputElasticAuthenticationMethod],
|
|
123
|
+
PlainValidator(validate_open_enum(False)),
|
|
124
|
+
],
|
|
125
|
+
pydantic.Field(alias="authType"),
|
|
119
126
|
] = OutputElasticAuthenticationMethod.MANUAL
|
|
120
127
|
r"""Enter credentials directly, or select a stored secret"""
|
|
121
128
|
|
|
122
129
|
|
|
123
|
-
class ElasticVersion(str, Enum):
|
|
130
|
+
class ElasticVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
124
131
|
r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
|
|
125
132
|
|
|
126
133
|
AUTO = "auto"
|
|
@@ -128,14 +135,14 @@ class ElasticVersion(str, Enum):
|
|
|
128
135
|
SEVEN = "7"
|
|
129
136
|
|
|
130
137
|
|
|
131
|
-
class WriteAction(str, Enum):
|
|
138
|
+
class WriteAction(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
132
139
|
r"""Action to use when writing events. Must be set to `Create` when writing to a data stream."""
|
|
133
140
|
|
|
134
141
|
INDEX = "index"
|
|
135
142
|
CREATE = "create"
|
|
136
143
|
|
|
137
144
|
|
|
138
|
-
class OutputElasticBackpressureBehavior(str, Enum):
|
|
145
|
+
class OutputElasticBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
139
146
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
140
147
|
|
|
141
148
|
BLOCK = "block"
|
|
@@ -158,21 +165,21 @@ class OutputElasticURL(BaseModel):
|
|
|
158
165
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
159
166
|
|
|
160
167
|
|
|
161
|
-
class OutputElasticCompression(str, Enum):
|
|
168
|
+
class OutputElasticCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
162
169
|
r"""Codec to use to compress the persisted data"""
|
|
163
170
|
|
|
164
171
|
NONE = "none"
|
|
165
172
|
GZIP = "gzip"
|
|
166
173
|
|
|
167
174
|
|
|
168
|
-
class OutputElasticQueueFullBehavior(str, Enum):
|
|
175
|
+
class OutputElasticQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
169
176
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
170
177
|
|
|
171
178
|
BLOCK = "block"
|
|
172
179
|
DROP = "drop"
|
|
173
180
|
|
|
174
181
|
|
|
175
|
-
class OutputElasticMode(str, Enum):
|
|
182
|
+
class OutputElasticMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
176
183
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
177
184
|
|
|
178
185
|
ERROR = "error"
|
|
@@ -347,7 +354,10 @@ class OutputElastic(BaseModel):
|
|
|
347
354
|
r"""Headers to add to all events"""
|
|
348
355
|
|
|
349
356
|
failed_request_logging_mode: Annotated[
|
|
350
|
-
|
|
357
|
+
Annotated[
|
|
358
|
+
Optional[OutputElasticFailedRequestLoggingMode],
|
|
359
|
+
PlainValidator(validate_open_enum(False)),
|
|
360
|
+
],
|
|
351
361
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
352
362
|
] = OutputElasticFailedRequestLoggingMode.NONE
|
|
353
363
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -380,7 +390,8 @@ class OutputElastic(BaseModel):
|
|
|
380
390
|
auth: Optional[OutputElasticAuth] = None
|
|
381
391
|
|
|
382
392
|
elastic_version: Annotated[
|
|
383
|
-
Optional[ElasticVersion],
|
|
393
|
+
Annotated[Optional[ElasticVersion], PlainValidator(validate_open_enum(False))],
|
|
394
|
+
pydantic.Field(alias="elasticVersion"),
|
|
384
395
|
] = ElasticVersion.AUTO
|
|
385
396
|
r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
|
|
386
397
|
|
|
@@ -395,7 +406,8 @@ class OutputElastic(BaseModel):
|
|
|
395
406
|
r"""Include the `document_id` field when sending events to an Elastic TSDS (time series data stream)"""
|
|
396
407
|
|
|
397
408
|
write_action: Annotated[
|
|
398
|
-
Optional[WriteAction],
|
|
409
|
+
Annotated[Optional[WriteAction], PlainValidator(validate_open_enum(False))],
|
|
410
|
+
pydantic.Field(alias="writeAction"),
|
|
399
411
|
] = WriteAction.CREATE
|
|
400
412
|
r"""Action to use when writing events. Must be set to `Create` when writing to a data stream."""
|
|
401
413
|
|
|
@@ -405,7 +417,10 @@ class OutputElastic(BaseModel):
|
|
|
405
417
|
r"""Retry failed events when a bulk request to Elastic is successful, but the response body returns an error for one or more events in the batch"""
|
|
406
418
|
|
|
407
419
|
on_backpressure: Annotated[
|
|
408
|
-
|
|
420
|
+
Annotated[
|
|
421
|
+
Optional[OutputElasticBackpressureBehavior],
|
|
422
|
+
PlainValidator(validate_open_enum(False)),
|
|
423
|
+
],
|
|
409
424
|
pydantic.Field(alias="onBackpressure"),
|
|
410
425
|
] = OutputElasticBackpressureBehavior.BLOCK
|
|
411
426
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -449,19 +464,29 @@ class OutputElastic(BaseModel):
|
|
|
449
464
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
450
465
|
|
|
451
466
|
pq_compress: Annotated[
|
|
452
|
-
|
|
467
|
+
Annotated[
|
|
468
|
+
Optional[OutputElasticCompression],
|
|
469
|
+
PlainValidator(validate_open_enum(False)),
|
|
470
|
+
],
|
|
471
|
+
pydantic.Field(alias="pqCompress"),
|
|
453
472
|
] = OutputElasticCompression.NONE
|
|
454
473
|
r"""Codec to use to compress the persisted data"""
|
|
455
474
|
|
|
456
475
|
pq_on_backpressure: Annotated[
|
|
457
|
-
|
|
476
|
+
Annotated[
|
|
477
|
+
Optional[OutputElasticQueueFullBehavior],
|
|
478
|
+
PlainValidator(validate_open_enum(False)),
|
|
479
|
+
],
|
|
458
480
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
459
481
|
] = OutputElasticQueueFullBehavior.BLOCK
|
|
460
482
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
461
483
|
|
|
462
|
-
pq_mode: Annotated[
|
|
463
|
-
|
|
464
|
-
|
|
484
|
+
pq_mode: Annotated[
|
|
485
|
+
Annotated[
|
|
486
|
+
Optional[OutputElasticMode], PlainValidator(validate_open_enum(False))
|
|
487
|
+
],
|
|
488
|
+
pydantic.Field(alias="pqMode"),
|
|
489
|
+
] = OutputElasticMode.ERROR
|
|
465
490
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
466
491
|
|
|
467
492
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,7 +26,9 @@ class OutputElasticCloudExtraHTTPHeader(BaseModel):
|
|
|
23
26
|
name: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class OutputElasticCloudFailedRequestLoggingMode(
|
|
29
|
+
class OutputElasticCloudFailedRequestLoggingMode(
|
|
30
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
31
|
+
):
|
|
27
32
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
28
33
|
|
|
29
34
|
PAYLOAD = "payload"
|
|
@@ -42,7 +47,7 @@ class OutputElasticCloudExtraParam(BaseModel):
|
|
|
42
47
|
value: str
|
|
43
48
|
|
|
44
49
|
|
|
45
|
-
class OutputElasticCloudAuthenticationMethod(str, Enum):
|
|
50
|
+
class OutputElasticCloudAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
46
51
|
r"""Enter credentials directly, or select a stored secret"""
|
|
47
52
|
|
|
48
53
|
MANUAL = "manual"
|
|
@@ -61,7 +66,10 @@ class OutputElasticCloudAuth(BaseModel):
|
|
|
61
66
|
disabled: Optional[bool] = False
|
|
62
67
|
|
|
63
68
|
auth_type: Annotated[
|
|
64
|
-
|
|
69
|
+
Annotated[
|
|
70
|
+
Optional[OutputElasticCloudAuthenticationMethod],
|
|
71
|
+
PlainValidator(validate_open_enum(False)),
|
|
72
|
+
],
|
|
65
73
|
pydantic.Field(alias="authType"),
|
|
66
74
|
] = OutputElasticCloudAuthenticationMethod.MANUAL
|
|
67
75
|
r"""Enter credentials directly, or select a stored secret"""
|
|
@@ -121,7 +129,7 @@ class OutputElasticCloudTimeoutRetrySettings(BaseModel):
|
|
|
121
129
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
122
130
|
|
|
123
131
|
|
|
124
|
-
class OutputElasticCloudBackpressureBehavior(str, Enum):
|
|
132
|
+
class OutputElasticCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
125
133
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
126
134
|
|
|
127
135
|
BLOCK = "block"
|
|
@@ -129,21 +137,21 @@ class OutputElasticCloudBackpressureBehavior(str, Enum):
|
|
|
129
137
|
QUEUE = "queue"
|
|
130
138
|
|
|
131
139
|
|
|
132
|
-
class OutputElasticCloudCompression(str, Enum):
|
|
140
|
+
class OutputElasticCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
133
141
|
r"""Codec to use to compress the persisted data"""
|
|
134
142
|
|
|
135
143
|
NONE = "none"
|
|
136
144
|
GZIP = "gzip"
|
|
137
145
|
|
|
138
146
|
|
|
139
|
-
class OutputElasticCloudQueueFullBehavior(str, Enum):
|
|
147
|
+
class OutputElasticCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
140
148
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
141
149
|
|
|
142
150
|
BLOCK = "block"
|
|
143
151
|
DROP = "drop"
|
|
144
152
|
|
|
145
153
|
|
|
146
|
-
class OutputElasticCloudMode(str, Enum):
|
|
154
|
+
class OutputElasticCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
147
155
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
148
156
|
|
|
149
157
|
ERROR = "error"
|
|
@@ -295,7 +303,10 @@ class OutputElasticCloud(BaseModel):
|
|
|
295
303
|
r"""Headers to add to all events"""
|
|
296
304
|
|
|
297
305
|
failed_request_logging_mode: Annotated[
|
|
298
|
-
|
|
306
|
+
Annotated[
|
|
307
|
+
Optional[OutputElasticCloudFailedRequestLoggingMode],
|
|
308
|
+
PlainValidator(validate_open_enum(False)),
|
|
309
|
+
],
|
|
299
310
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
300
311
|
] = OutputElasticCloudFailedRequestLoggingMode.NONE
|
|
301
312
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -340,7 +351,10 @@ class OutputElasticCloud(BaseModel):
|
|
|
340
351
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
341
352
|
|
|
342
353
|
on_backpressure: Annotated[
|
|
343
|
-
|
|
354
|
+
Annotated[
|
|
355
|
+
Optional[OutputElasticCloudBackpressureBehavior],
|
|
356
|
+
PlainValidator(validate_open_enum(False)),
|
|
357
|
+
],
|
|
344
358
|
pydantic.Field(alias="onBackpressure"),
|
|
345
359
|
] = OutputElasticCloudBackpressureBehavior.BLOCK
|
|
346
360
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -361,18 +375,28 @@ class OutputElasticCloud(BaseModel):
|
|
|
361
375
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
362
376
|
|
|
363
377
|
pq_compress: Annotated[
|
|
364
|
-
|
|
378
|
+
Annotated[
|
|
379
|
+
Optional[OutputElasticCloudCompression],
|
|
380
|
+
PlainValidator(validate_open_enum(False)),
|
|
381
|
+
],
|
|
382
|
+
pydantic.Field(alias="pqCompress"),
|
|
365
383
|
] = OutputElasticCloudCompression.NONE
|
|
366
384
|
r"""Codec to use to compress the persisted data"""
|
|
367
385
|
|
|
368
386
|
pq_on_backpressure: Annotated[
|
|
369
|
-
|
|
387
|
+
Annotated[
|
|
388
|
+
Optional[OutputElasticCloudQueueFullBehavior],
|
|
389
|
+
PlainValidator(validate_open_enum(False)),
|
|
390
|
+
],
|
|
370
391
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
371
392
|
] = OutputElasticCloudQueueFullBehavior.BLOCK
|
|
372
393
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
373
394
|
|
|
374
395
|
pq_mode: Annotated[
|
|
375
|
-
|
|
396
|
+
Annotated[
|
|
397
|
+
Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
|
|
398
|
+
],
|
|
399
|
+
pydantic.Field(alias="pqMode"),
|
|
376
400
|
] = OutputElasticCloudMode.ERROR
|
|
377
401
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
378
402
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,14 +15,14 @@ class OutputExabeamType(str, Enum):
|
|
|
12
15
|
EXABEAM = "exabeam"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputExabeamSignatureVersion(str, Enum):
|
|
18
|
+
class OutputExabeamSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Signature version to use for signing Google Cloud Storage requests"""
|
|
17
20
|
|
|
18
21
|
V2 = "v2"
|
|
19
22
|
V4 = "v4"
|
|
20
23
|
|
|
21
24
|
|
|
22
|
-
class OutputExabeamObjectACL(str, Enum):
|
|
25
|
+
class OutputExabeamObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
23
26
|
r"""Object ACL to assign to uploaded objects"""
|
|
24
27
|
|
|
25
28
|
PRIVATE = "private"
|
|
@@ -30,7 +33,7 @@ class OutputExabeamObjectACL(str, Enum):
|
|
|
30
33
|
PUBLIC_READ = "public-read"
|
|
31
34
|
|
|
32
35
|
|
|
33
|
-
class OutputExabeamStorageClass(str, Enum):
|
|
36
|
+
class OutputExabeamStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
r"""Storage class to select for uploaded objects"""
|
|
35
38
|
|
|
36
39
|
STANDARD = "STANDARD"
|
|
@@ -39,14 +42,14 @@ class OutputExabeamStorageClass(str, Enum):
|
|
|
39
42
|
ARCHIVE = "ARCHIVE"
|
|
40
43
|
|
|
41
44
|
|
|
42
|
-
class OutputExabeamBackpressureBehavior(str, Enum):
|
|
45
|
+
class OutputExabeamBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
43
46
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
44
47
|
|
|
45
48
|
BLOCK = "block"
|
|
46
49
|
DROP = "drop"
|
|
47
50
|
|
|
48
51
|
|
|
49
|
-
class OutputExabeamDiskSpaceProtection(str, Enum):
|
|
52
|
+
class OutputExabeamDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
50
53
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
51
54
|
|
|
52
55
|
BLOCK = "block"
|
|
@@ -165,18 +168,28 @@ class OutputExabeam(BaseModel):
|
|
|
165
168
|
r"""Google Cloud Storage service endpoint"""
|
|
166
169
|
|
|
167
170
|
signature_version: Annotated[
|
|
168
|
-
|
|
171
|
+
Annotated[
|
|
172
|
+
Optional[OutputExabeamSignatureVersion],
|
|
173
|
+
PlainValidator(validate_open_enum(False)),
|
|
174
|
+
],
|
|
169
175
|
pydantic.Field(alias="signatureVersion"),
|
|
170
176
|
] = OutputExabeamSignatureVersion.V4
|
|
171
177
|
r"""Signature version to use for signing Google Cloud Storage requests"""
|
|
172
178
|
|
|
173
179
|
object_acl: Annotated[
|
|
174
|
-
|
|
180
|
+
Annotated[
|
|
181
|
+
Optional[OutputExabeamObjectACL], PlainValidator(validate_open_enum(False))
|
|
182
|
+
],
|
|
183
|
+
pydantic.Field(alias="objectACL"),
|
|
175
184
|
] = OutputExabeamObjectACL.PRIVATE
|
|
176
185
|
r"""Object ACL to assign to uploaded objects"""
|
|
177
186
|
|
|
178
187
|
storage_class: Annotated[
|
|
179
|
-
|
|
188
|
+
Annotated[
|
|
189
|
+
Optional[OutputExabeamStorageClass],
|
|
190
|
+
PlainValidator(validate_open_enum(False)),
|
|
191
|
+
],
|
|
192
|
+
pydantic.Field(alias="storageClass"),
|
|
180
193
|
] = None
|
|
181
194
|
r"""Storage class to select for uploaded objects"""
|
|
182
195
|
|
|
@@ -216,7 +229,10 @@ class OutputExabeam(BaseModel):
|
|
|
216
229
|
r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
|
|
217
230
|
|
|
218
231
|
on_backpressure: Annotated[
|
|
219
|
-
|
|
232
|
+
Annotated[
|
|
233
|
+
Optional[OutputExabeamBackpressureBehavior],
|
|
234
|
+
PlainValidator(validate_open_enum(False)),
|
|
235
|
+
],
|
|
220
236
|
pydantic.Field(alias="onBackpressure"),
|
|
221
237
|
] = OutputExabeamBackpressureBehavior.BLOCK
|
|
222
238
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -227,7 +243,10 @@ class OutputExabeam(BaseModel):
|
|
|
227
243
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
228
244
|
|
|
229
245
|
on_disk_full_backpressure: Annotated[
|
|
230
|
-
|
|
246
|
+
Annotated[
|
|
247
|
+
Optional[OutputExabeamDiskSpaceProtection],
|
|
248
|
+
PlainValidator(validate_open_enum(False)),
|
|
249
|
+
],
|
|
231
250
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
232
251
|
] = OutputExabeamDiskSpaceProtection.BLOCK
|
|
233
252
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputFilesystemType(str, Enum):
|
|
|
12
15
|
FILESYSTEM = "filesystem"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputFilesystemDataFormat(str, Enum):
|
|
18
|
+
class OutputFilesystemDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Format of the output data"""
|
|
17
20
|
|
|
18
21
|
JSON = "json"
|
|
@@ -20,28 +23,28 @@ class OutputFilesystemDataFormat(str, Enum):
|
|
|
20
23
|
PARQUET = "parquet"
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class OutputFilesystemBackpressureBehavior(str, Enum):
|
|
26
|
+
class OutputFilesystemBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
25
28
|
|
|
26
29
|
BLOCK = "block"
|
|
27
30
|
DROP = "drop"
|
|
28
31
|
|
|
29
32
|
|
|
30
|
-
class OutputFilesystemDiskSpaceProtection(str, Enum):
|
|
33
|
+
class OutputFilesystemDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
31
34
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
32
35
|
|
|
33
36
|
BLOCK = "block"
|
|
34
37
|
DROP = "drop"
|
|
35
38
|
|
|
36
39
|
|
|
37
|
-
class OutputFilesystemCompression(str, Enum):
|
|
40
|
+
class OutputFilesystemCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
38
41
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
39
42
|
|
|
40
43
|
NONE = "none"
|
|
41
44
|
GZIP = "gzip"
|
|
42
45
|
|
|
43
46
|
|
|
44
|
-
class OutputFilesystemCompressionLevel(str, Enum):
|
|
47
|
+
class OutputFilesystemCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
45
48
|
r"""Compression level to apply before moving files to final destination"""
|
|
46
49
|
|
|
47
50
|
BEST_SPEED = "best_speed"
|
|
@@ -49,7 +52,7 @@ class OutputFilesystemCompressionLevel(str, Enum):
|
|
|
49
52
|
BEST_COMPRESSION = "best_compression"
|
|
50
53
|
|
|
51
54
|
|
|
52
|
-
class OutputFilesystemParquetVersion(str, Enum):
|
|
55
|
+
class OutputFilesystemParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
53
56
|
r"""Determines which data types are supported and how they are represented"""
|
|
54
57
|
|
|
55
58
|
PARQUET_1_0 = "PARQUET_1_0"
|
|
@@ -57,7 +60,7 @@ class OutputFilesystemParquetVersion(str, Enum):
|
|
|
57
60
|
PARQUET_2_6 = "PARQUET_2_6"
|
|
58
61
|
|
|
59
62
|
|
|
60
|
-
class OutputFilesystemDataPageVersion(str, Enum):
|
|
63
|
+
class OutputFilesystemDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
61
64
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
62
65
|
|
|
63
66
|
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
@@ -196,7 +199,11 @@ class OutputFilesystem(BaseModel):
|
|
|
196
199
|
r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
|
|
197
200
|
|
|
198
201
|
format_: Annotated[
|
|
199
|
-
|
|
202
|
+
Annotated[
|
|
203
|
+
Optional[OutputFilesystemDataFormat],
|
|
204
|
+
PlainValidator(validate_open_enum(False)),
|
|
205
|
+
],
|
|
206
|
+
pydantic.Field(alias="format"),
|
|
200
207
|
] = OutputFilesystemDataFormat.JSON
|
|
201
208
|
r"""Format of the output data"""
|
|
202
209
|
|
|
@@ -239,7 +246,10 @@ class OutputFilesystem(BaseModel):
|
|
|
239
246
|
r"""Buffer size used to write to a file"""
|
|
240
247
|
|
|
241
248
|
on_backpressure: Annotated[
|
|
242
|
-
|
|
249
|
+
Annotated[
|
|
250
|
+
Optional[OutputFilesystemBackpressureBehavior],
|
|
251
|
+
PlainValidator(validate_open_enum(False)),
|
|
252
|
+
],
|
|
243
253
|
pydantic.Field(alias="onBackpressure"),
|
|
244
254
|
] = OutputFilesystemBackpressureBehavior.BLOCK
|
|
245
255
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -250,18 +260,26 @@ class OutputFilesystem(BaseModel):
|
|
|
250
260
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
251
261
|
|
|
252
262
|
on_disk_full_backpressure: Annotated[
|
|
253
|
-
|
|
263
|
+
Annotated[
|
|
264
|
+
Optional[OutputFilesystemDiskSpaceProtection],
|
|
265
|
+
PlainValidator(validate_open_enum(False)),
|
|
266
|
+
],
|
|
254
267
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
255
268
|
] = OutputFilesystemDiskSpaceProtection.BLOCK
|
|
256
269
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
257
270
|
|
|
258
271
|
description: Optional[str] = None
|
|
259
272
|
|
|
260
|
-
compress:
|
|
273
|
+
compress: Annotated[
|
|
274
|
+
Optional[OutputFilesystemCompression], PlainValidator(validate_open_enum(False))
|
|
275
|
+
] = OutputFilesystemCompression.GZIP
|
|
261
276
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
262
277
|
|
|
263
278
|
compression_level: Annotated[
|
|
264
|
-
|
|
279
|
+
Annotated[
|
|
280
|
+
Optional[OutputFilesystemCompressionLevel],
|
|
281
|
+
PlainValidator(validate_open_enum(False)),
|
|
282
|
+
],
|
|
265
283
|
pydantic.Field(alias="compressionLevel"),
|
|
266
284
|
] = OutputFilesystemCompressionLevel.BEST_SPEED
|
|
267
285
|
r"""Compression level to apply before moving files to final destination"""
|
|
@@ -272,12 +290,19 @@ class OutputFilesystem(BaseModel):
|
|
|
272
290
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
273
291
|
|
|
274
292
|
parquet_version: Annotated[
|
|
275
|
-
|
|
293
|
+
Annotated[
|
|
294
|
+
Optional[OutputFilesystemParquetVersion],
|
|
295
|
+
PlainValidator(validate_open_enum(False)),
|
|
296
|
+
],
|
|
297
|
+
pydantic.Field(alias="parquetVersion"),
|
|
276
298
|
] = OutputFilesystemParquetVersion.PARQUET_2_6
|
|
277
299
|
r"""Determines which data types are supported and how they are represented"""
|
|
278
300
|
|
|
279
301
|
parquet_data_page_version: Annotated[
|
|
280
|
-
|
|
302
|
+
Annotated[
|
|
303
|
+
Optional[OutputFilesystemDataPageVersion],
|
|
304
|
+
PlainValidator(validate_open_enum(False)),
|
|
305
|
+
],
|
|
281
306
|
pydantic.Field(alias="parquetDataPageVersion"),
|
|
282
307
|
] = OutputFilesystemDataPageVersion.DATA_PAGE_V2
|
|
283
308
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|