cribl-control-plane 0.2.1rc7__py3-none-any.whl → 0.3.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/errors/__init__.py +5 -8
- cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
- cribl_control_plane/groups_sdk.py +28 -52
- cribl_control_plane/health.py +16 -22
- cribl_control_plane/models/__init__.py +54 -217
- cribl_control_plane/models/appmode.py +14 -0
- cribl_control_plane/models/authtoken.py +1 -5
- cribl_control_plane/models/cacheconnection.py +0 -20
- cribl_control_plane/models/configgroup.py +7 -55
- cribl_control_plane/models/configgroupcloud.py +1 -11
- cribl_control_plane/models/createconfiggroupbyproductop.py +5 -17
- cribl_control_plane/models/createroutesappendbyidop.py +2 -2
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +1 -11
- cribl_control_plane/models/cribllakedatasetupdate.py +1 -11
- cribl_control_plane/models/datasetmetadata.py +1 -11
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
- cribl_control_plane/models/distributedsummary.py +0 -6
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +0 -11
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +0 -11
- cribl_control_plane/models/hbcriblinfo.py +3 -24
- cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +8 -27
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/input.py +78 -80
- cribl_control_plane/models/inputappscope.py +17 -80
- cribl_control_plane/models/inputazureblob.py +1 -33
- cribl_control_plane/models/inputcollection.py +1 -24
- cribl_control_plane/models/inputconfluentcloud.py +18 -195
- cribl_control_plane/models/inputcribl.py +1 -24
- cribl_control_plane/models/inputcriblhttp.py +17 -62
- cribl_control_plane/models/inputcribllakehttp.py +17 -62
- cribl_control_plane/models/inputcriblmetrics.py +1 -24
- cribl_control_plane/models/inputcribltcp.py +17 -62
- cribl_control_plane/models/inputcrowdstrike.py +1 -54
- cribl_control_plane/models/inputdatadogagent.py +17 -62
- cribl_control_plane/models/inputdatagen.py +1 -24
- cribl_control_plane/models/inputedgeprometheus.py +34 -147
- cribl_control_plane/models/inputelastic.py +27 -119
- cribl_control_plane/models/inputeventhub.py +1 -182
- cribl_control_plane/models/inputexec.py +1 -33
- cribl_control_plane/models/inputfile.py +3 -42
- cribl_control_plane/models/inputfirehose.py +17 -62
- cribl_control_plane/models/inputgooglepubsub.py +1 -36
- cribl_control_plane/models/inputgrafana.py +32 -157
- cribl_control_plane/models/inputhttp.py +17 -62
- cribl_control_plane/models/inputhttpraw.py +17 -62
- cribl_control_plane/models/inputjournalfiles.py +1 -24
- cribl_control_plane/models/inputkafka.py +17 -189
- cribl_control_plane/models/inputkinesis.py +1 -80
- cribl_control_plane/models/inputkubeevents.py +1 -24
- cribl_control_plane/models/inputkubelogs.py +1 -33
- cribl_control_plane/models/inputkubemetrics.py +1 -33
- cribl_control_plane/models/inputloki.py +17 -71
- cribl_control_plane/models/inputmetrics.py +17 -62
- cribl_control_plane/models/inputmodeldriventelemetry.py +17 -62
- cribl_control_plane/models/inputmsk.py +18 -81
- cribl_control_plane/models/inputnetflow.py +1 -24
- cribl_control_plane/models/inputoffice365mgmt.py +1 -67
- cribl_control_plane/models/inputoffice365msgtrace.py +1 -67
- cribl_control_plane/models/inputoffice365service.py +1 -67
- cribl_control_plane/models/inputopentelemetry.py +16 -92
- cribl_control_plane/models/inputprometheus.py +34 -138
- cribl_control_plane/models/inputprometheusrw.py +17 -71
- cribl_control_plane/models/inputrawudp.py +1 -24
- cribl_control_plane/models/inputs3.py +1 -45
- cribl_control_plane/models/inputs3inventory.py +1 -54
- cribl_control_plane/models/inputsecuritylake.py +1 -54
- cribl_control_plane/models/inputsnmp.py +1 -40
- cribl_control_plane/models/inputsplunk.py +17 -85
- cribl_control_plane/models/inputsplunkhec.py +16 -70
- cribl_control_plane/models/inputsplunksearch.py +1 -63
- cribl_control_plane/models/inputsqs.py +1 -56
- cribl_control_plane/models/inputsyslog.py +32 -121
- cribl_control_plane/models/inputsystemmetrics.py +9 -142
- cribl_control_plane/models/inputsystemstate.py +1 -33
- cribl_control_plane/models/inputtcp.py +17 -81
- cribl_control_plane/models/inputtcpjson.py +17 -71
- cribl_control_plane/models/inputwef.py +1 -71
- cribl_control_plane/models/inputwindowsmetrics.py +9 -129
- cribl_control_plane/models/inputwineventlogs.py +1 -60
- cribl_control_plane/models/inputwiz.py +1 -45
- cribl_control_plane/models/inputwizwebhook.py +17 -62
- cribl_control_plane/models/inputzscalerhec.py +16 -70
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/jobstatus.py +3 -34
- cribl_control_plane/models/listconfiggroupbyproductop.py +0 -11
- cribl_control_plane/models/logininfo.py +3 -3
- cribl_control_plane/models/masterworkerentry.py +1 -11
- cribl_control_plane/models/nodeprovidedinfo.py +1 -11
- cribl_control_plane/models/nodeupgradestatus.py +0 -38
- cribl_control_plane/models/output.py +88 -93
- cribl_control_plane/models/outputazureblob.py +1 -110
- cribl_control_plane/models/outputazuredataexplorer.py +87 -452
- cribl_control_plane/models/outputazureeventhub.py +19 -281
- cribl_control_plane/models/outputazurelogs.py +19 -115
- cribl_control_plane/models/outputchronicle.py +19 -115
- cribl_control_plane/models/outputclickhouse.py +19 -155
- cribl_control_plane/models/outputcloudwatch.py +19 -106
- cribl_control_plane/models/outputconfluentcloud.py +38 -311
- cribl_control_plane/models/outputcriblhttp.py +19 -135
- cribl_control_plane/models/outputcribllake.py +1 -97
- cribl_control_plane/models/outputcribltcp.py +19 -132
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +20 -129
- cribl_control_plane/models/outputdatadog.py +19 -159
- cribl_control_plane/models/outputdataset.py +19 -143
- cribl_control_plane/models/outputdiskspool.py +1 -11
- cribl_control_plane/models/outputdls3.py +1 -152
- cribl_control_plane/models/outputdynatracehttp.py +19 -160
- cribl_control_plane/models/outputdynatraceotlp.py +19 -160
- cribl_control_plane/models/outputelastic.py +19 -163
- cribl_control_plane/models/outputelasticcloud.py +19 -140
- cribl_control_plane/models/outputexabeam.py +1 -61
- cribl_control_plane/models/outputfilesystem.py +1 -87
- cribl_control_plane/models/outputgooglechronicle.py +20 -166
- cribl_control_plane/models/outputgooglecloudlogging.py +20 -131
- cribl_control_plane/models/outputgooglecloudstorage.py +1 -136
- cribl_control_plane/models/outputgooglepubsub.py +19 -106
- cribl_control_plane/models/outputgrafanacloud.py +37 -288
- cribl_control_plane/models/outputgraphite.py +19 -105
- cribl_control_plane/models/outputhoneycomb.py +19 -115
- cribl_control_plane/models/outputhumiohec.py +19 -126
- cribl_control_plane/models/outputinfluxdb.py +19 -130
- cribl_control_plane/models/outputkafka.py +34 -302
- cribl_control_plane/models/outputkinesis.py +19 -133
- cribl_control_plane/models/outputloki.py +17 -129
- cribl_control_plane/models/outputminio.py +1 -145
- cribl_control_plane/models/outputmsk.py +34 -193
- cribl_control_plane/models/outputnewrelic.py +19 -136
- cribl_control_plane/models/outputnewrelicevents.py +20 -128
- cribl_control_plane/models/outputopentelemetry.py +19 -178
- cribl_control_plane/models/outputprometheus.py +19 -115
- cribl_control_plane/models/outputring.py +1 -31
- cribl_control_plane/models/outputs3.py +1 -152
- cribl_control_plane/models/outputsecuritylake.py +1 -114
- cribl_control_plane/models/outputsentinel.py +19 -135
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -134
- cribl_control_plane/models/outputservicenow.py +19 -168
- cribl_control_plane/models/outputsignalfx.py +19 -115
- cribl_control_plane/models/outputsns.py +17 -113
- cribl_control_plane/models/outputsplunk.py +19 -153
- cribl_control_plane/models/outputsplunkhec.py +19 -208
- cribl_control_plane/models/outputsplunklb.py +19 -182
- cribl_control_plane/models/outputsqs.py +17 -124
- cribl_control_plane/models/outputstatsd.py +19 -105
- cribl_control_plane/models/outputstatsdext.py +19 -105
- cribl_control_plane/models/outputsumologic.py +19 -117
- cribl_control_plane/models/outputsyslog.py +96 -259
- cribl_control_plane/models/outputtcpjson.py +19 -141
- cribl_control_plane/models/outputwavefront.py +19 -115
- cribl_control_plane/models/outputwebhook.py +19 -161
- cribl_control_plane/models/outputxsiam.py +17 -113
- cribl_control_plane/models/packinfo.py +5 -8
- cribl_control_plane/models/packinstallinfo.py +5 -8
- cribl_control_plane/models/resourcepolicy.py +0 -11
- cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/runnablejobcollection.py +9 -72
- cribl_control_plane/models/runnablejobexecutor.py +9 -32
- cribl_control_plane/models/runnablejobscheduledsearch.py +9 -23
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +0 -11
- cribl_control_plane/packs.py +7 -202
- cribl_control_plane/routes_sdk.py +6 -6
- cribl_control_plane/tokens.py +15 -23
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
- cribl_control_plane-0.3.0a1.dist-info/RECORD +330 -0
- cribl_control_plane/models/groupcreaterequest.py +0 -171
- cribl_control_plane/models/outpostnodeinfo.py +0 -16
- cribl_control_plane/models/outputdatabricks.py +0 -482
- cribl_control_plane/models/updatepacksop.py +0 -25
- cribl_control_plane-0.2.1rc7.dist-info/RECORD +0 -331
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,19 +18,12 @@ class OutputDatasetType(str, Enum):
|
|
|
19
18
|
class OutputDatasetSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
|
|
21
20
|
|
|
22
|
-
# 0 - finest
|
|
23
21
|
FINEST = "finest"
|
|
24
|
-
# 1 - finer
|
|
25
22
|
FINER = "finer"
|
|
26
|
-
# 2 - fine
|
|
27
23
|
FINE = "fine"
|
|
28
|
-
# 3 - info
|
|
29
24
|
INFO = "info"
|
|
30
|
-
# 4 - warning
|
|
31
25
|
WARNING = "warning"
|
|
32
|
-
# 5 - error
|
|
33
26
|
ERROR = "error"
|
|
34
|
-
# 6 - fatal
|
|
35
27
|
FATAL = "fatal"
|
|
36
28
|
|
|
37
29
|
|
|
@@ -92,11 +84,8 @@ class OutputDatasetTimeoutRetrySettings(BaseModel):
|
|
|
92
84
|
class DataSetSite(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
93
85
|
r"""DataSet site to which events should be sent"""
|
|
94
86
|
|
|
95
|
-
# US
|
|
96
87
|
US = "us"
|
|
97
|
-
# Europe
|
|
98
88
|
EU = "eu"
|
|
99
|
-
# Custom
|
|
100
89
|
CUSTOM = "custom"
|
|
101
90
|
|
|
102
91
|
|
|
@@ -114,22 +103,16 @@ class OutputDatasetExtraHTTPHeader(BaseModel):
|
|
|
114
103
|
class OutputDatasetFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
115
104
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
116
105
|
|
|
117
|
-
# Payload
|
|
118
106
|
PAYLOAD = "payload"
|
|
119
|
-
# Payload + Headers
|
|
120
107
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
121
|
-
# None
|
|
122
108
|
NONE = "none"
|
|
123
109
|
|
|
124
110
|
|
|
125
111
|
class OutputDatasetBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
126
112
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
127
113
|
|
|
128
|
-
# Block
|
|
129
114
|
BLOCK = "block"
|
|
130
|
-
# Drop
|
|
131
115
|
DROP = "drop"
|
|
132
|
-
# Persistent Queue
|
|
133
116
|
QUEUE = "queue"
|
|
134
117
|
|
|
135
118
|
|
|
@@ -140,35 +123,28 @@ class OutputDatasetAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
140
123
|
SECRET = "secret"
|
|
141
124
|
|
|
142
125
|
|
|
143
|
-
class OutputDatasetMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
144
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
145
|
-
|
|
146
|
-
# Error
|
|
147
|
-
ERROR = "error"
|
|
148
|
-
# Backpressure
|
|
149
|
-
ALWAYS = "always"
|
|
150
|
-
# Always On
|
|
151
|
-
BACKPRESSURE = "backpressure"
|
|
152
|
-
|
|
153
|
-
|
|
154
126
|
class OutputDatasetCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
155
127
|
r"""Codec to use to compress the persisted data"""
|
|
156
128
|
|
|
157
|
-
# None
|
|
158
129
|
NONE = "none"
|
|
159
|
-
# Gzip
|
|
160
130
|
GZIP = "gzip"
|
|
161
131
|
|
|
162
132
|
|
|
163
133
|
class OutputDatasetQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
164
134
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
165
135
|
|
|
166
|
-
# Block
|
|
167
136
|
BLOCK = "block"
|
|
168
|
-
# Drop new data
|
|
169
137
|
DROP = "drop"
|
|
170
138
|
|
|
171
139
|
|
|
140
|
+
class OutputDatasetMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
141
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
142
|
+
|
|
143
|
+
ERROR = "error"
|
|
144
|
+
BACKPRESSURE = "backpressure"
|
|
145
|
+
ALWAYS = "always"
|
|
146
|
+
|
|
147
|
+
|
|
172
148
|
class OutputDatasetPqControlsTypedDict(TypedDict):
|
|
173
149
|
pass
|
|
174
150
|
|
|
@@ -241,16 +217,6 @@ class OutputDatasetTypedDict(TypedDict):
|
|
|
241
217
|
r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
|
|
242
218
|
description: NotRequired[str]
|
|
243
219
|
custom_url: NotRequired[str]
|
|
244
|
-
pq_strict_ordering: NotRequired[bool]
|
|
245
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
246
|
-
pq_rate_per_sec: NotRequired[float]
|
|
247
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
248
|
-
pq_mode: NotRequired[OutputDatasetMode]
|
|
249
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
250
|
-
pq_max_buffer_size: NotRequired[float]
|
|
251
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
252
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
253
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
254
220
|
pq_max_file_size: NotRequired[str]
|
|
255
221
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
256
222
|
pq_max_size: NotRequired[str]
|
|
@@ -261,6 +227,8 @@ class OutputDatasetTypedDict(TypedDict):
|
|
|
261
227
|
r"""Codec to use to compress the persisted data"""
|
|
262
228
|
pq_on_backpressure: NotRequired[OutputDatasetQueueFullBehavior]
|
|
263
229
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
230
|
+
pq_mode: NotRequired[OutputDatasetMode]
|
|
231
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
264
232
|
pq_controls: NotRequired[OutputDatasetPqControlsTypedDict]
|
|
265
233
|
api_key: NotRequired[str]
|
|
266
234
|
r"""A 'Log Write Access' API key for the DataSet account"""
|
|
@@ -419,34 +387,6 @@ class OutputDataset(BaseModel):
|
|
|
419
387
|
|
|
420
388
|
custom_url: Annotated[Optional[str], pydantic.Field(alias="customUrl")] = None
|
|
421
389
|
|
|
422
|
-
pq_strict_ordering: Annotated[
|
|
423
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
424
|
-
] = True
|
|
425
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
426
|
-
|
|
427
|
-
pq_rate_per_sec: Annotated[
|
|
428
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
429
|
-
] = 0
|
|
430
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
431
|
-
|
|
432
|
-
pq_mode: Annotated[
|
|
433
|
-
Annotated[
|
|
434
|
-
Optional[OutputDatasetMode], PlainValidator(validate_open_enum(False))
|
|
435
|
-
],
|
|
436
|
-
pydantic.Field(alias="pqMode"),
|
|
437
|
-
] = OutputDatasetMode.ERROR
|
|
438
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
439
|
-
|
|
440
|
-
pq_max_buffer_size: Annotated[
|
|
441
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
442
|
-
] = 42
|
|
443
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
444
|
-
|
|
445
|
-
pq_max_backpressure_sec: Annotated[
|
|
446
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
447
|
-
] = 30
|
|
448
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
449
|
-
|
|
450
390
|
pq_max_file_size: Annotated[
|
|
451
391
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
452
392
|
] = "1 MB"
|
|
@@ -478,6 +418,14 @@ class OutputDataset(BaseModel):
|
|
|
478
418
|
] = OutputDatasetQueueFullBehavior.BLOCK
|
|
479
419
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
480
420
|
|
|
421
|
+
pq_mode: Annotated[
|
|
422
|
+
Annotated[
|
|
423
|
+
Optional[OutputDatasetMode], PlainValidator(validate_open_enum(False))
|
|
424
|
+
],
|
|
425
|
+
pydantic.Field(alias="pqMode"),
|
|
426
|
+
] = OutputDatasetMode.ERROR
|
|
427
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
428
|
+
|
|
481
429
|
pq_controls: Annotated[
|
|
482
430
|
Optional[OutputDatasetPqControls], pydantic.Field(alias="pqControls")
|
|
483
431
|
] = None
|
|
@@ -487,75 +435,3 @@ class OutputDataset(BaseModel):
|
|
|
487
435
|
|
|
488
436
|
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
489
437
|
r"""Select or create a stored text secret"""
|
|
490
|
-
|
|
491
|
-
@field_serializer("default_severity")
|
|
492
|
-
def serialize_default_severity(self, value):
|
|
493
|
-
if isinstance(value, str):
|
|
494
|
-
try:
|
|
495
|
-
return models.OutputDatasetSeverity(value)
|
|
496
|
-
except ValueError:
|
|
497
|
-
return value
|
|
498
|
-
return value
|
|
499
|
-
|
|
500
|
-
@field_serializer("site")
|
|
501
|
-
def serialize_site(self, value):
|
|
502
|
-
if isinstance(value, str):
|
|
503
|
-
try:
|
|
504
|
-
return models.DataSetSite(value)
|
|
505
|
-
except ValueError:
|
|
506
|
-
return value
|
|
507
|
-
return value
|
|
508
|
-
|
|
509
|
-
@field_serializer("failed_request_logging_mode")
|
|
510
|
-
def serialize_failed_request_logging_mode(self, value):
|
|
511
|
-
if isinstance(value, str):
|
|
512
|
-
try:
|
|
513
|
-
return models.OutputDatasetFailedRequestLoggingMode(value)
|
|
514
|
-
except ValueError:
|
|
515
|
-
return value
|
|
516
|
-
return value
|
|
517
|
-
|
|
518
|
-
@field_serializer("on_backpressure")
|
|
519
|
-
def serialize_on_backpressure(self, value):
|
|
520
|
-
if isinstance(value, str):
|
|
521
|
-
try:
|
|
522
|
-
return models.OutputDatasetBackpressureBehavior(value)
|
|
523
|
-
except ValueError:
|
|
524
|
-
return value
|
|
525
|
-
return value
|
|
526
|
-
|
|
527
|
-
@field_serializer("auth_type")
|
|
528
|
-
def serialize_auth_type(self, value):
|
|
529
|
-
if isinstance(value, str):
|
|
530
|
-
try:
|
|
531
|
-
return models.OutputDatasetAuthenticationMethod(value)
|
|
532
|
-
except ValueError:
|
|
533
|
-
return value
|
|
534
|
-
return value
|
|
535
|
-
|
|
536
|
-
@field_serializer("pq_mode")
|
|
537
|
-
def serialize_pq_mode(self, value):
|
|
538
|
-
if isinstance(value, str):
|
|
539
|
-
try:
|
|
540
|
-
return models.OutputDatasetMode(value)
|
|
541
|
-
except ValueError:
|
|
542
|
-
return value
|
|
543
|
-
return value
|
|
544
|
-
|
|
545
|
-
@field_serializer("pq_compress")
|
|
546
|
-
def serialize_pq_compress(self, value):
|
|
547
|
-
if isinstance(value, str):
|
|
548
|
-
try:
|
|
549
|
-
return models.OutputDatasetCompression(value)
|
|
550
|
-
except ValueError:
|
|
551
|
-
return value
|
|
552
|
-
return value
|
|
553
|
-
|
|
554
|
-
@field_serializer("pq_on_backpressure")
|
|
555
|
-
def serialize_pq_on_backpressure(self, value):
|
|
556
|
-
if isinstance(value, str):
|
|
557
|
-
try:
|
|
558
|
-
return models.OutputDatasetQueueFullBehavior(value)
|
|
559
|
-
except ValueError:
|
|
560
|
-
return value
|
|
561
|
-
return value
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -88,12 +87,3 @@ class OutputDiskSpool(BaseModel):
|
|
|
88
87
|
r"""JavaScript expression defining how files are partitioned and organized within the time-buckets. If blank, the event's __partition property is used and otherwise, events go directly into the time-bucket directory."""
|
|
89
88
|
|
|
90
89
|
description: Optional[str] = None
|
|
91
|
-
|
|
92
|
-
@field_serializer("compress")
|
|
93
|
-
def serialize_compress(self, value):
|
|
94
|
-
if isinstance(value, str):
|
|
95
|
-
try:
|
|
96
|
-
return models.OutputDiskSpoolCompression(value)
|
|
97
|
-
except ValueError:
|
|
98
|
-
return value
|
|
99
|
-
return value
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,11 +18,8 @@ class OutputDlS3Type(str, Enum):
|
|
|
19
18
|
class OutputDlS3AuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
21
20
|
|
|
22
|
-
# Auto
|
|
23
21
|
AUTO = "auto"
|
|
24
|
-
# Manual
|
|
25
22
|
MANUAL = "manual"
|
|
26
|
-
# Secret Key pair
|
|
27
23
|
SECRET = "secret"
|
|
28
24
|
|
|
29
25
|
|
|
@@ -37,78 +33,54 @@ class OutputDlS3SignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
37
33
|
class OutputDlS3ObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
38
34
|
r"""Object ACL to assign to uploaded objects"""
|
|
39
35
|
|
|
40
|
-
# Private
|
|
41
36
|
PRIVATE = "private"
|
|
42
|
-
# Public Read Only
|
|
43
37
|
PUBLIC_READ = "public-read"
|
|
44
|
-
# Public Read/Write
|
|
45
38
|
PUBLIC_READ_WRITE = "public-read-write"
|
|
46
|
-
# Authenticated Read Only
|
|
47
39
|
AUTHENTICATED_READ = "authenticated-read"
|
|
48
|
-
# AWS EC2 AMI Read Only
|
|
49
40
|
AWS_EXEC_READ = "aws-exec-read"
|
|
50
|
-
# Bucket Owner Read Only
|
|
51
41
|
BUCKET_OWNER_READ = "bucket-owner-read"
|
|
52
|
-
# Bucket Owner Full Control
|
|
53
42
|
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
|
|
54
43
|
|
|
55
44
|
|
|
56
45
|
class OutputDlS3StorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
57
46
|
r"""Storage class to select for uploaded objects"""
|
|
58
47
|
|
|
59
|
-
# Standard
|
|
60
48
|
STANDARD = "STANDARD"
|
|
61
|
-
# Reduced Redundancy Storage
|
|
62
49
|
REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
|
|
63
|
-
# Standard, Infrequent Access
|
|
64
50
|
STANDARD_IA = "STANDARD_IA"
|
|
65
|
-
# One Zone, Infrequent Access
|
|
66
51
|
ONEZONE_IA = "ONEZONE_IA"
|
|
67
|
-
# Intelligent Tiering
|
|
68
52
|
INTELLIGENT_TIERING = "INTELLIGENT_TIERING"
|
|
69
|
-
# Glacier Flexible Retrieval
|
|
70
53
|
GLACIER = "GLACIER"
|
|
71
|
-
# Glacier Instant Retrieval
|
|
72
54
|
GLACIER_IR = "GLACIER_IR"
|
|
73
|
-
# Glacier Deep Archive
|
|
74
55
|
DEEP_ARCHIVE = "DEEP_ARCHIVE"
|
|
75
56
|
|
|
76
57
|
|
|
77
58
|
class OutputDlS3ServerSideEncryptionForUploadedObjects(
|
|
78
59
|
str, Enum, metaclass=utils.OpenEnumMeta
|
|
79
60
|
):
|
|
80
|
-
# Amazon S3 Managed Key
|
|
81
61
|
AES256 = "AES256"
|
|
82
|
-
# AWS KMS Managed Key
|
|
83
62
|
AWS_KMS = "aws:kms"
|
|
84
63
|
|
|
85
64
|
|
|
86
65
|
class OutputDlS3DataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
87
66
|
r"""Format of the output data"""
|
|
88
67
|
|
|
89
|
-
# JSON
|
|
90
68
|
JSON = "json"
|
|
91
|
-
# Raw
|
|
92
69
|
RAW = "raw"
|
|
93
|
-
# Parquet
|
|
94
70
|
PARQUET = "parquet"
|
|
95
71
|
|
|
96
72
|
|
|
97
73
|
class OutputDlS3BackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
98
74
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
99
75
|
|
|
100
|
-
# Block
|
|
101
76
|
BLOCK = "block"
|
|
102
|
-
# Drop
|
|
103
77
|
DROP = "drop"
|
|
104
78
|
|
|
105
79
|
|
|
106
80
|
class OutputDlS3DiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
107
81
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
108
82
|
|
|
109
|
-
# Block
|
|
110
83
|
BLOCK = "block"
|
|
111
|
-
# Drop
|
|
112
84
|
DROP = "drop"
|
|
113
85
|
|
|
114
86
|
|
|
@@ -122,31 +94,23 @@ class OutputDlS3Compression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
122
94
|
class OutputDlS3CompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
123
95
|
r"""Compression level to apply before moving files to final destination"""
|
|
124
96
|
|
|
125
|
-
# Best Speed
|
|
126
97
|
BEST_SPEED = "best_speed"
|
|
127
|
-
# Normal
|
|
128
98
|
NORMAL = "normal"
|
|
129
|
-
# Best Compression
|
|
130
99
|
BEST_COMPRESSION = "best_compression"
|
|
131
100
|
|
|
132
101
|
|
|
133
102
|
class OutputDlS3ParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
134
103
|
r"""Determines which data types are supported and how they are represented"""
|
|
135
104
|
|
|
136
|
-
# 1.0
|
|
137
105
|
PARQUET_1_0 = "PARQUET_1_0"
|
|
138
|
-
# 2.4
|
|
139
106
|
PARQUET_2_4 = "PARQUET_2_4"
|
|
140
|
-
# 2.6
|
|
141
107
|
PARQUET_2_6 = "PARQUET_2_6"
|
|
142
108
|
|
|
143
109
|
|
|
144
110
|
class OutputDlS3DataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
145
111
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
146
112
|
|
|
147
|
-
# V1
|
|
148
113
|
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
149
|
-
# V2
|
|
150
114
|
DATA_PAGE_V2 = "DATA_PAGE_V2"
|
|
151
115
|
|
|
152
116
|
|
|
@@ -257,8 +221,6 @@ class OutputDlS3TypedDict(TypedDict):
|
|
|
257
221
|
r"""Compression level to apply before moving files to final destination"""
|
|
258
222
|
automatic_schema: NotRequired[bool]
|
|
259
223
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
260
|
-
parquet_schema: NotRequired[str]
|
|
261
|
-
r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
|
|
262
224
|
parquet_version: NotRequired[OutputDlS3ParquetVersion]
|
|
263
225
|
r"""Determines which data types are supported and how they are represented"""
|
|
264
226
|
parquet_data_page_version: NotRequired[OutputDlS3DataPageVersion]
|
|
@@ -528,11 +490,6 @@ class OutputDlS3(BaseModel):
|
|
|
528
490
|
] = False
|
|
529
491
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
530
492
|
|
|
531
|
-
parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
|
|
532
|
-
None
|
|
533
|
-
)
|
|
534
|
-
r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
|
|
535
|
-
|
|
536
493
|
parquet_version: Annotated[
|
|
537
494
|
Annotated[
|
|
538
495
|
Optional[OutputDlS3ParquetVersion],
|
|
@@ -599,111 +556,3 @@ class OutputDlS3(BaseModel):
|
|
|
599
556
|
|
|
600
557
|
max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
|
|
601
558
|
r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
|
|
602
|
-
|
|
603
|
-
@field_serializer("aws_authentication_method")
|
|
604
|
-
def serialize_aws_authentication_method(self, value):
|
|
605
|
-
if isinstance(value, str):
|
|
606
|
-
try:
|
|
607
|
-
return models.OutputDlS3AuthenticationMethod(value)
|
|
608
|
-
except ValueError:
|
|
609
|
-
return value
|
|
610
|
-
return value
|
|
611
|
-
|
|
612
|
-
@field_serializer("signature_version")
|
|
613
|
-
def serialize_signature_version(self, value):
|
|
614
|
-
if isinstance(value, str):
|
|
615
|
-
try:
|
|
616
|
-
return models.OutputDlS3SignatureVersion(value)
|
|
617
|
-
except ValueError:
|
|
618
|
-
return value
|
|
619
|
-
return value
|
|
620
|
-
|
|
621
|
-
@field_serializer("object_acl")
|
|
622
|
-
def serialize_object_acl(self, value):
|
|
623
|
-
if isinstance(value, str):
|
|
624
|
-
try:
|
|
625
|
-
return models.OutputDlS3ObjectACL(value)
|
|
626
|
-
except ValueError:
|
|
627
|
-
return value
|
|
628
|
-
return value
|
|
629
|
-
|
|
630
|
-
@field_serializer("storage_class")
|
|
631
|
-
def serialize_storage_class(self, value):
|
|
632
|
-
if isinstance(value, str):
|
|
633
|
-
try:
|
|
634
|
-
return models.OutputDlS3StorageClass(value)
|
|
635
|
-
except ValueError:
|
|
636
|
-
return value
|
|
637
|
-
return value
|
|
638
|
-
|
|
639
|
-
@field_serializer("server_side_encryption")
|
|
640
|
-
def serialize_server_side_encryption(self, value):
|
|
641
|
-
if isinstance(value, str):
|
|
642
|
-
try:
|
|
643
|
-
return models.OutputDlS3ServerSideEncryptionForUploadedObjects(value)
|
|
644
|
-
except ValueError:
|
|
645
|
-
return value
|
|
646
|
-
return value
|
|
647
|
-
|
|
648
|
-
@field_serializer("format_")
|
|
649
|
-
def serialize_format_(self, value):
|
|
650
|
-
if isinstance(value, str):
|
|
651
|
-
try:
|
|
652
|
-
return models.OutputDlS3DataFormat(value)
|
|
653
|
-
except ValueError:
|
|
654
|
-
return value
|
|
655
|
-
return value
|
|
656
|
-
|
|
657
|
-
@field_serializer("on_backpressure")
|
|
658
|
-
def serialize_on_backpressure(self, value):
|
|
659
|
-
if isinstance(value, str):
|
|
660
|
-
try:
|
|
661
|
-
return models.OutputDlS3BackpressureBehavior(value)
|
|
662
|
-
except ValueError:
|
|
663
|
-
return value
|
|
664
|
-
return value
|
|
665
|
-
|
|
666
|
-
@field_serializer("on_disk_full_backpressure")
|
|
667
|
-
def serialize_on_disk_full_backpressure(self, value):
|
|
668
|
-
if isinstance(value, str):
|
|
669
|
-
try:
|
|
670
|
-
return models.OutputDlS3DiskSpaceProtection(value)
|
|
671
|
-
except ValueError:
|
|
672
|
-
return value
|
|
673
|
-
return value
|
|
674
|
-
|
|
675
|
-
@field_serializer("compress")
|
|
676
|
-
def serialize_compress(self, value):
|
|
677
|
-
if isinstance(value, str):
|
|
678
|
-
try:
|
|
679
|
-
return models.OutputDlS3Compression(value)
|
|
680
|
-
except ValueError:
|
|
681
|
-
return value
|
|
682
|
-
return value
|
|
683
|
-
|
|
684
|
-
@field_serializer("compression_level")
|
|
685
|
-
def serialize_compression_level(self, value):
|
|
686
|
-
if isinstance(value, str):
|
|
687
|
-
try:
|
|
688
|
-
return models.OutputDlS3CompressionLevel(value)
|
|
689
|
-
except ValueError:
|
|
690
|
-
return value
|
|
691
|
-
return value
|
|
692
|
-
|
|
693
|
-
@field_serializer("parquet_version")
|
|
694
|
-
def serialize_parquet_version(self, value):
|
|
695
|
-
if isinstance(value, str):
|
|
696
|
-
try:
|
|
697
|
-
return models.OutputDlS3ParquetVersion(value)
|
|
698
|
-
except ValueError:
|
|
699
|
-
return value
|
|
700
|
-
return value
|
|
701
|
-
|
|
702
|
-
@field_serializer("parquet_data_page_version")
|
|
703
|
-
def serialize_parquet_data_page_version(self, value):
|
|
704
|
-
if isinstance(value, str):
|
|
705
|
-
try:
|
|
706
|
-
return models.OutputDlS3DataPageVersion(value)
|
|
707
|
-
except ValueError:
|
|
708
|
-
return value
|
|
709
|
-
return value
|