cribl-control-plane 0.0.48a1__py3-none-any.whl → 0.0.49__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +6 -4
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +12 -12
- cribl_control_plane/models/appmode.py +13 -0
- cribl_control_plane/models/cacheconnection.py +2 -10
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
- cribl_control_plane/models/cloudprovider.py +1 -2
- cribl_control_plane/models/configgroup.py +2 -7
- cribl_control_plane/models/configgroupcloud.py +2 -6
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
- cribl_control_plane/models/cribllakedataset.py +2 -8
- cribl_control_plane/models/datasetmetadata.py +2 -8
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
- cribl_control_plane/models/getsummaryop.py +2 -7
- cribl_control_plane/models/hbcriblinfo.py +3 -19
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/inputappscope.py +14 -34
- cribl_control_plane/models/inputazureblob.py +6 -17
- cribl_control_plane/models/inputcollection.py +4 -11
- cribl_control_plane/models/inputconfluentcloud.py +20 -47
- cribl_control_plane/models/inputcribl.py +4 -11
- cribl_control_plane/models/inputcriblhttp.py +8 -23
- cribl_control_plane/models/inputcribllakehttp.py +10 -22
- cribl_control_plane/models/inputcriblmetrics.py +4 -12
- cribl_control_plane/models/inputcribltcp.py +8 -23
- cribl_control_plane/models/inputcrowdstrike.py +10 -26
- cribl_control_plane/models/inputdatadogagent.py +8 -24
- cribl_control_plane/models/inputdatagen.py +4 -11
- cribl_control_plane/models/inputedgeprometheus.py +24 -58
- cribl_control_plane/models/inputelastic.py +14 -40
- cribl_control_plane/models/inputeventhub.py +6 -15
- cribl_control_plane/models/inputexec.py +6 -14
- cribl_control_plane/models/inputfile.py +6 -15
- cribl_control_plane/models/inputfirehose.py +8 -23
- cribl_control_plane/models/inputgooglepubsub.py +6 -19
- cribl_control_plane/models/inputgrafana.py +24 -67
- cribl_control_plane/models/inputhttp.py +8 -23
- cribl_control_plane/models/inputhttpraw.py +8 -23
- cribl_control_plane/models/inputjournalfiles.py +4 -12
- cribl_control_plane/models/inputkafka.py +16 -46
- cribl_control_plane/models/inputkinesis.py +14 -38
- cribl_control_plane/models/inputkubeevents.py +4 -11
- cribl_control_plane/models/inputkubelogs.py +8 -16
- cribl_control_plane/models/inputkubemetrics.py +8 -16
- cribl_control_plane/models/inputloki.py +10 -29
- cribl_control_plane/models/inputmetrics.py +8 -23
- cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
- cribl_control_plane/models/inputmsk.py +18 -53
- cribl_control_plane/models/inputnetflow.py +4 -11
- cribl_control_plane/models/inputoffice365mgmt.py +14 -33
- cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
- cribl_control_plane/models/inputoffice365service.py +16 -35
- cribl_control_plane/models/inputopentelemetry.py +16 -38
- cribl_control_plane/models/inputprometheus.py +18 -50
- cribl_control_plane/models/inputprometheusrw.py +10 -30
- cribl_control_plane/models/inputrawudp.py +4 -11
- cribl_control_plane/models/inputs3.py +8 -21
- cribl_control_plane/models/inputs3inventory.py +10 -26
- cribl_control_plane/models/inputsecuritylake.py +10 -27
- cribl_control_plane/models/inputsnmp.py +6 -16
- cribl_control_plane/models/inputsplunk.py +12 -33
- cribl_control_plane/models/inputsplunkhec.py +10 -29
- cribl_control_plane/models/inputsplunksearch.py +14 -33
- cribl_control_plane/models/inputsqs.py +10 -27
- cribl_control_plane/models/inputsyslog.py +16 -43
- cribl_control_plane/models/inputsystemmetrics.py +24 -48
- cribl_control_plane/models/inputsystemstate.py +8 -16
- cribl_control_plane/models/inputtcp.py +10 -29
- cribl_control_plane/models/inputtcpjson.py +10 -29
- cribl_control_plane/models/inputwef.py +14 -37
- cribl_control_plane/models/inputwindowsmetrics.py +24 -44
- cribl_control_plane/models/inputwineventlogs.py +10 -20
- cribl_control_plane/models/inputwiz.py +8 -21
- cribl_control_plane/models/inputwizwebhook.py +8 -23
- cribl_control_plane/models/inputzscalerhec.py +10 -29
- cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
- cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
- cribl_control_plane/models/masterworkerentry.py +2 -7
- cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
- cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeprovidedinfo.py +0 -3
- cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeupgradestate.py +1 -2
- cribl_control_plane/models/nodeupgradestatus.py +5 -13
- cribl_control_plane/models/outputazureblob.py +18 -48
- cribl_control_plane/models/outputazuredataexplorer.py +28 -73
- cribl_control_plane/models/outputazureeventhub.py +18 -40
- cribl_control_plane/models/outputazurelogs.py +12 -35
- cribl_control_plane/models/outputclickhouse.py +20 -55
- cribl_control_plane/models/outputcloudwatch.py +10 -29
- cribl_control_plane/models/outputconfluentcloud.py +32 -77
- cribl_control_plane/models/outputcriblhttp.py +16 -44
- cribl_control_plane/models/outputcribllake.py +16 -46
- cribl_control_plane/models/outputcribltcp.py +18 -45
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
- cribl_control_plane/models/outputdatadog.py +20 -48
- cribl_control_plane/models/outputdataset.py +18 -46
- cribl_control_plane/models/outputdiskspool.py +2 -7
- cribl_control_plane/models/outputdls3.py +24 -68
- cribl_control_plane/models/outputdynatracehttp.py +20 -53
- cribl_control_plane/models/outputdynatraceotlp.py +22 -55
- cribl_control_plane/models/outputelastic.py +18 -43
- cribl_control_plane/models/outputelasticcloud.py +12 -36
- cribl_control_plane/models/outputexabeam.py +10 -29
- cribl_control_plane/models/outputfilesystem.py +14 -39
- cribl_control_plane/models/outputgooglechronicle.py +16 -50
- cribl_control_plane/models/outputgooglecloudlogging.py +14 -41
- cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
- cribl_control_plane/models/outputgooglepubsub.py +10 -31
- cribl_control_plane/models/outputgrafanacloud.py +32 -97
- cribl_control_plane/models/outputgraphite.py +14 -31
- cribl_control_plane/models/outputhoneycomb.py +12 -35
- cribl_control_plane/models/outputhumiohec.py +16 -43
- cribl_control_plane/models/outputinfluxdb.py +16 -42
- cribl_control_plane/models/outputkafka.py +28 -74
- cribl_control_plane/models/outputkinesis.py +16 -40
- cribl_control_plane/models/outputloki.py +16 -41
- cribl_control_plane/models/outputminio.py +24 -65
- cribl_control_plane/models/outputmsk.py +30 -82
- cribl_control_plane/models/outputnewrelic.py +18 -43
- cribl_control_plane/models/outputnewrelicevents.py +14 -41
- cribl_control_plane/models/outputopentelemetry.py +26 -67
- cribl_control_plane/models/outputprometheus.py +12 -35
- cribl_control_plane/models/outputring.py +8 -19
- cribl_control_plane/models/outputs3.py +26 -68
- cribl_control_plane/models/outputsecuritylake.py +18 -52
- cribl_control_plane/models/outputsentinel.py +18 -45
- cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
- cribl_control_plane/models/outputservicenow.py +24 -60
- cribl_control_plane/models/outputsignalfx.py +14 -37
- cribl_control_plane/models/outputsns.py +14 -36
- cribl_control_plane/models/outputsplunk.py +24 -60
- cribl_control_plane/models/outputsplunkhec.py +12 -35
- cribl_control_plane/models/outputsplunklb.py +30 -77
- cribl_control_plane/models/outputsqs.py +16 -41
- cribl_control_plane/models/outputstatsd.py +14 -30
- cribl_control_plane/models/outputstatsdext.py +12 -29
- cribl_control_plane/models/outputsumologic.py +12 -35
- cribl_control_plane/models/outputsyslog.py +24 -58
- cribl_control_plane/models/outputtcpjson.py +20 -52
- cribl_control_plane/models/outputwavefront.py +12 -35
- cribl_control_plane/models/outputwebhook.py +22 -58
- cribl_control_plane/models/outputxsiam.py +14 -35
- cribl_control_plane/models/productscore.py +1 -2
- cribl_control_plane/models/rbacresource.py +1 -2
- cribl_control_plane/models/resourcepolicy.py +2 -4
- cribl_control_plane/models/routecloneconf.py +13 -0
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/runnablejobcollection.py +13 -30
- cribl_control_plane/models/runnablejobexecutor.py +4 -13
- cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
- cribl_control_plane/models/workertypes.py +1 -2
- cribl_control_plane/sdk.py +2 -2
- cribl_control_plane/utils/annotations.py +32 -8
- {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.49.dist-info}/METADATA +2 -1
- {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.49.dist-info}/RECORD +164 -162
- {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.49.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,14 +12,14 @@ class OutputSqsType(str, Enum):
|
|
|
15
12
|
SQS = "sqs"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSqsQueueType(str, Enum
|
|
15
|
+
class OutputSqsQueueType(str, Enum):
|
|
19
16
|
r"""The queue type used (or created). Defaults to Standard."""
|
|
20
17
|
|
|
21
18
|
STANDARD = "standard"
|
|
22
19
|
FIFO = "fifo"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputSqsAuthenticationMethod(str, Enum
|
|
22
|
+
class OutputSqsAuthenticationMethod(str, Enum):
|
|
26
23
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
27
24
|
|
|
28
25
|
AUTO = "auto"
|
|
@@ -30,14 +27,14 @@ class OutputSqsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
30
27
|
SECRET = "secret"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputSqsSignatureVersion(str, Enum
|
|
30
|
+
class OutputSqsSignatureVersion(str, Enum):
|
|
34
31
|
r"""Signature version to use for signing SQS requests"""
|
|
35
32
|
|
|
36
33
|
V2 = "v2"
|
|
37
34
|
V4 = "v4"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputSqsBackpressureBehavior(str, Enum
|
|
37
|
+
class OutputSqsBackpressureBehavior(str, Enum):
|
|
41
38
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
42
39
|
|
|
43
40
|
BLOCK = "block"
|
|
@@ -45,21 +42,21 @@ class OutputSqsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
45
42
|
QUEUE = "queue"
|
|
46
43
|
|
|
47
44
|
|
|
48
|
-
class OutputSqsCompression(str, Enum
|
|
45
|
+
class OutputSqsCompression(str, Enum):
|
|
49
46
|
r"""Codec to use to compress the persisted data"""
|
|
50
47
|
|
|
51
48
|
NONE = "none"
|
|
52
49
|
GZIP = "gzip"
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputSqsQueueFullBehavior(str, Enum
|
|
52
|
+
class OutputSqsQueueFullBehavior(str, Enum):
|
|
56
53
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
57
54
|
|
|
58
55
|
BLOCK = "block"
|
|
59
56
|
DROP = "drop"
|
|
60
57
|
|
|
61
58
|
|
|
62
|
-
class OutputSqsMode(str, Enum
|
|
59
|
+
class OutputSqsMode(str, Enum):
|
|
63
60
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
64
61
|
|
|
65
62
|
ERROR = "error"
|
|
@@ -171,10 +168,7 @@ class OutputSqs(BaseModel):
|
|
|
171
168
|
r"""Tags for filtering and grouping in @{product}"""
|
|
172
169
|
|
|
173
170
|
queue_type: Annotated[
|
|
174
|
-
|
|
175
|
-
Optional[OutputSqsQueueType], PlainValidator(validate_open_enum(False))
|
|
176
|
-
],
|
|
177
|
-
pydantic.Field(alias="queueType"),
|
|
171
|
+
Optional[OutputSqsQueueType], pydantic.Field(alias="queueType")
|
|
178
172
|
] = OutputSqsQueueType.STANDARD
|
|
179
173
|
r"""The queue type used (or created). Defaults to Standard."""
|
|
180
174
|
|
|
@@ -192,10 +186,7 @@ class OutputSqs(BaseModel):
|
|
|
192
186
|
r"""Create queue if it does not exist."""
|
|
193
187
|
|
|
194
188
|
aws_authentication_method: Annotated[
|
|
195
|
-
|
|
196
|
-
Optional[OutputSqsAuthenticationMethod],
|
|
197
|
-
PlainValidator(validate_open_enum(False)),
|
|
198
|
-
],
|
|
189
|
+
Optional[OutputSqsAuthenticationMethod],
|
|
199
190
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
200
191
|
] = OutputSqsAuthenticationMethod.AUTO
|
|
201
192
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -211,11 +202,7 @@ class OutputSqs(BaseModel):
|
|
|
211
202
|
r"""SQS service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to SQS-compatible endpoint."""
|
|
212
203
|
|
|
213
204
|
signature_version: Annotated[
|
|
214
|
-
|
|
215
|
-
Optional[OutputSqsSignatureVersion],
|
|
216
|
-
PlainValidator(validate_open_enum(False)),
|
|
217
|
-
],
|
|
218
|
-
pydantic.Field(alias="signatureVersion"),
|
|
205
|
+
Optional[OutputSqsSignatureVersion], pydantic.Field(alias="signatureVersion")
|
|
219
206
|
] = OutputSqsSignatureVersion.V4
|
|
220
207
|
r"""Signature version to use for signing SQS requests"""
|
|
221
208
|
|
|
@@ -270,11 +257,7 @@ class OutputSqs(BaseModel):
|
|
|
270
257
|
r"""The maximum number of in-progress API requests before backpressure is applied."""
|
|
271
258
|
|
|
272
259
|
on_backpressure: Annotated[
|
|
273
|
-
|
|
274
|
-
Optional[OutputSqsBackpressureBehavior],
|
|
275
|
-
PlainValidator(validate_open_enum(False)),
|
|
276
|
-
],
|
|
277
|
-
pydantic.Field(alias="onBackpressure"),
|
|
260
|
+
Optional[OutputSqsBackpressureBehavior], pydantic.Field(alias="onBackpressure")
|
|
278
261
|
] = OutputSqsBackpressureBehavior.BLOCK
|
|
279
262
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
280
263
|
|
|
@@ -299,26 +282,18 @@ class OutputSqs(BaseModel):
|
|
|
299
282
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
300
283
|
|
|
301
284
|
pq_compress: Annotated[
|
|
302
|
-
|
|
303
|
-
Optional[OutputSqsCompression], PlainValidator(validate_open_enum(False))
|
|
304
|
-
],
|
|
305
|
-
pydantic.Field(alias="pqCompress"),
|
|
285
|
+
Optional[OutputSqsCompression], pydantic.Field(alias="pqCompress")
|
|
306
286
|
] = OutputSqsCompression.NONE
|
|
307
287
|
r"""Codec to use to compress the persisted data"""
|
|
308
288
|
|
|
309
289
|
pq_on_backpressure: Annotated[
|
|
310
|
-
|
|
311
|
-
Optional[OutputSqsQueueFullBehavior],
|
|
312
|
-
PlainValidator(validate_open_enum(False)),
|
|
313
|
-
],
|
|
314
|
-
pydantic.Field(alias="pqOnBackpressure"),
|
|
290
|
+
Optional[OutputSqsQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
|
|
315
291
|
] = OutputSqsQueueFullBehavior.BLOCK
|
|
316
292
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
317
293
|
|
|
318
|
-
pq_mode: Annotated[
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
] = OutputSqsMode.ERROR
|
|
294
|
+
pq_mode: Annotated[Optional[OutputSqsMode], pydantic.Field(alias="pqMode")] = (
|
|
295
|
+
OutputSqsMode.ERROR
|
|
296
|
+
)
|
|
322
297
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
323
298
|
|
|
324
299
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,14 +12,14 @@ class OutputStatsdType(str, Enum):
|
|
|
15
12
|
STATSD = "statsd"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputStatsdDestinationProtocol(str, Enum
|
|
15
|
+
class OutputStatsdDestinationProtocol(str, Enum):
|
|
19
16
|
r"""Protocol to use when communicating with the destination."""
|
|
20
17
|
|
|
21
18
|
UDP = "udp"
|
|
22
19
|
TCP = "tcp"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputStatsdBackpressureBehavior(str, Enum
|
|
22
|
+
class OutputStatsdBackpressureBehavior(str, Enum):
|
|
26
23
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
27
24
|
|
|
28
25
|
BLOCK = "block"
|
|
@@ -30,21 +27,21 @@ class OutputStatsdBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
30
27
|
QUEUE = "queue"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputStatsdCompression(str, Enum
|
|
30
|
+
class OutputStatsdCompression(str, Enum):
|
|
34
31
|
r"""Codec to use to compress the persisted data"""
|
|
35
32
|
|
|
36
33
|
NONE = "none"
|
|
37
34
|
GZIP = "gzip"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputStatsdQueueFullBehavior(str, Enum
|
|
37
|
+
class OutputStatsdQueueFullBehavior(str, Enum):
|
|
41
38
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
42
39
|
|
|
43
40
|
BLOCK = "block"
|
|
44
41
|
DROP = "drop"
|
|
45
42
|
|
|
46
43
|
|
|
47
|
-
class OutputStatsdMode(str, Enum
|
|
44
|
+
class OutputStatsdMode(str, Enum):
|
|
48
45
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
49
46
|
|
|
50
47
|
ERROR = "error"
|
|
@@ -131,10 +128,9 @@ class OutputStatsd(BaseModel):
|
|
|
131
128
|
streamtags: Optional[List[str]] = None
|
|
132
129
|
r"""Tags for filtering and grouping in @{product}"""
|
|
133
130
|
|
|
134
|
-
protocol:
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
] = OutputStatsdDestinationProtocol.UDP
|
|
131
|
+
protocol: Optional[OutputStatsdDestinationProtocol] = (
|
|
132
|
+
OutputStatsdDestinationProtocol.UDP
|
|
133
|
+
)
|
|
138
134
|
r"""Protocol to use when communicating with the destination."""
|
|
139
135
|
|
|
140
136
|
port: Optional[float] = 8125
|
|
@@ -171,10 +167,7 @@ class OutputStatsd(BaseModel):
|
|
|
171
167
|
r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
|
|
172
168
|
|
|
173
169
|
on_backpressure: Annotated[
|
|
174
|
-
|
|
175
|
-
Optional[OutputStatsdBackpressureBehavior],
|
|
176
|
-
PlainValidator(validate_open_enum(False)),
|
|
177
|
-
],
|
|
170
|
+
Optional[OutputStatsdBackpressureBehavior],
|
|
178
171
|
pydantic.Field(alias="onBackpressure"),
|
|
179
172
|
] = OutputStatsdBackpressureBehavior.BLOCK
|
|
180
173
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -193,28 +186,19 @@ class OutputStatsd(BaseModel):
|
|
|
193
186
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
194
187
|
|
|
195
188
|
pq_compress: Annotated[
|
|
196
|
-
|
|
197
|
-
Optional[OutputStatsdCompression], PlainValidator(validate_open_enum(False))
|
|
198
|
-
],
|
|
199
|
-
pydantic.Field(alias="pqCompress"),
|
|
189
|
+
Optional[OutputStatsdCompression], pydantic.Field(alias="pqCompress")
|
|
200
190
|
] = OutputStatsdCompression.NONE
|
|
201
191
|
r"""Codec to use to compress the persisted data"""
|
|
202
192
|
|
|
203
193
|
pq_on_backpressure: Annotated[
|
|
204
|
-
|
|
205
|
-
Optional[OutputStatsdQueueFullBehavior],
|
|
206
|
-
PlainValidator(validate_open_enum(False)),
|
|
207
|
-
],
|
|
194
|
+
Optional[OutputStatsdQueueFullBehavior],
|
|
208
195
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
209
196
|
] = OutputStatsdQueueFullBehavior.BLOCK
|
|
210
197
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
211
198
|
|
|
212
|
-
pq_mode: Annotated[
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
],
|
|
216
|
-
pydantic.Field(alias="pqMode"),
|
|
217
|
-
] = OutputStatsdMode.ERROR
|
|
199
|
+
pq_mode: Annotated[Optional[OutputStatsdMode], pydantic.Field(alias="pqMode")] = (
|
|
200
|
+
OutputStatsdMode.ERROR
|
|
201
|
+
)
|
|
218
202
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
219
203
|
|
|
220
204
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,14 +12,14 @@ class OutputStatsdExtType(str, Enum):
|
|
|
15
12
|
STATSD_EXT = "statsd_ext"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputStatsdExtDestinationProtocol(str, Enum
|
|
15
|
+
class OutputStatsdExtDestinationProtocol(str, Enum):
|
|
19
16
|
r"""Protocol to use when communicating with the destination."""
|
|
20
17
|
|
|
21
18
|
UDP = "udp"
|
|
22
19
|
TCP = "tcp"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputStatsdExtBackpressureBehavior(str, Enum
|
|
22
|
+
class OutputStatsdExtBackpressureBehavior(str, Enum):
|
|
26
23
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
27
24
|
|
|
28
25
|
BLOCK = "block"
|
|
@@ -30,21 +27,21 @@ class OutputStatsdExtBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
30
27
|
QUEUE = "queue"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputStatsdExtCompression(str, Enum
|
|
30
|
+
class OutputStatsdExtCompression(str, Enum):
|
|
34
31
|
r"""Codec to use to compress the persisted data"""
|
|
35
32
|
|
|
36
33
|
NONE = "none"
|
|
37
34
|
GZIP = "gzip"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputStatsdExtQueueFullBehavior(str, Enum
|
|
37
|
+
class OutputStatsdExtQueueFullBehavior(str, Enum):
|
|
41
38
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
42
39
|
|
|
43
40
|
BLOCK = "block"
|
|
44
41
|
DROP = "drop"
|
|
45
42
|
|
|
46
43
|
|
|
47
|
-
class OutputStatsdExtMode(str, Enum
|
|
44
|
+
class OutputStatsdExtMode(str, Enum):
|
|
48
45
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
49
46
|
|
|
50
47
|
ERROR = "error"
|
|
@@ -131,10 +128,9 @@ class OutputStatsdExt(BaseModel):
|
|
|
131
128
|
streamtags: Optional[List[str]] = None
|
|
132
129
|
r"""Tags for filtering and grouping in @{product}"""
|
|
133
130
|
|
|
134
|
-
protocol:
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
] = OutputStatsdExtDestinationProtocol.UDP
|
|
131
|
+
protocol: Optional[OutputStatsdExtDestinationProtocol] = (
|
|
132
|
+
OutputStatsdExtDestinationProtocol.UDP
|
|
133
|
+
)
|
|
138
134
|
r"""Protocol to use when communicating with the destination."""
|
|
139
135
|
|
|
140
136
|
port: Optional[float] = 8125
|
|
@@ -171,10 +167,7 @@ class OutputStatsdExt(BaseModel):
|
|
|
171
167
|
r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
|
|
172
168
|
|
|
173
169
|
on_backpressure: Annotated[
|
|
174
|
-
|
|
175
|
-
Optional[OutputStatsdExtBackpressureBehavior],
|
|
176
|
-
PlainValidator(validate_open_enum(False)),
|
|
177
|
-
],
|
|
170
|
+
Optional[OutputStatsdExtBackpressureBehavior],
|
|
178
171
|
pydantic.Field(alias="onBackpressure"),
|
|
179
172
|
] = OutputStatsdExtBackpressureBehavior.BLOCK
|
|
180
173
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -193,28 +186,18 @@ class OutputStatsdExt(BaseModel):
|
|
|
193
186
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
194
187
|
|
|
195
188
|
pq_compress: Annotated[
|
|
196
|
-
|
|
197
|
-
Optional[OutputStatsdExtCompression],
|
|
198
|
-
PlainValidator(validate_open_enum(False)),
|
|
199
|
-
],
|
|
200
|
-
pydantic.Field(alias="pqCompress"),
|
|
189
|
+
Optional[OutputStatsdExtCompression], pydantic.Field(alias="pqCompress")
|
|
201
190
|
] = OutputStatsdExtCompression.NONE
|
|
202
191
|
r"""Codec to use to compress the persisted data"""
|
|
203
192
|
|
|
204
193
|
pq_on_backpressure: Annotated[
|
|
205
|
-
|
|
206
|
-
Optional[OutputStatsdExtQueueFullBehavior],
|
|
207
|
-
PlainValidator(validate_open_enum(False)),
|
|
208
|
-
],
|
|
194
|
+
Optional[OutputStatsdExtQueueFullBehavior],
|
|
209
195
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
210
196
|
] = OutputStatsdExtQueueFullBehavior.BLOCK
|
|
211
197
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
212
198
|
|
|
213
199
|
pq_mode: Annotated[
|
|
214
|
-
|
|
215
|
-
Optional[OutputStatsdExtMode], PlainValidator(validate_open_enum(False))
|
|
216
|
-
],
|
|
217
|
-
pydantic.Field(alias="pqMode"),
|
|
200
|
+
Optional[OutputStatsdExtMode], pydantic.Field(alias="pqMode")
|
|
218
201
|
] = OutputStatsdExtMode.ERROR
|
|
219
202
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
220
203
|
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputSumoLogicType(str, Enum):
|
|
|
15
12
|
SUMO_LOGIC = "sumo_logic"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSumoLogicDataFormat(str, Enum
|
|
15
|
+
class OutputSumoLogicDataFormat(str, Enum):
|
|
19
16
|
r"""Preserve the raw event format instead of JSONifying it"""
|
|
20
17
|
|
|
21
18
|
JSON = "json"
|
|
@@ -33,7 +30,7 @@ class OutputSumoLogicExtraHTTPHeader(BaseModel):
|
|
|
33
30
|
name: Optional[str] = None
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class OutputSumoLogicFailedRequestLoggingMode(str, Enum
|
|
33
|
+
class OutputSumoLogicFailedRequestLoggingMode(str, Enum):
|
|
37
34
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
38
35
|
|
|
39
36
|
PAYLOAD = "payload"
|
|
@@ -95,7 +92,7 @@ class OutputSumoLogicTimeoutRetrySettings(BaseModel):
|
|
|
95
92
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
96
93
|
|
|
97
94
|
|
|
98
|
-
class OutputSumoLogicBackpressureBehavior(str, Enum
|
|
95
|
+
class OutputSumoLogicBackpressureBehavior(str, Enum):
|
|
99
96
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
100
97
|
|
|
101
98
|
BLOCK = "block"
|
|
@@ -103,21 +100,21 @@ class OutputSumoLogicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
103
100
|
QUEUE = "queue"
|
|
104
101
|
|
|
105
102
|
|
|
106
|
-
class OutputSumoLogicCompression(str, Enum
|
|
103
|
+
class OutputSumoLogicCompression(str, Enum):
|
|
107
104
|
r"""Codec to use to compress the persisted data"""
|
|
108
105
|
|
|
109
106
|
NONE = "none"
|
|
110
107
|
GZIP = "gzip"
|
|
111
108
|
|
|
112
109
|
|
|
113
|
-
class OutputSumoLogicQueueFullBehavior(str, Enum
|
|
110
|
+
class OutputSumoLogicQueueFullBehavior(str, Enum):
|
|
114
111
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
115
112
|
|
|
116
113
|
BLOCK = "block"
|
|
117
114
|
DROP = "drop"
|
|
118
115
|
|
|
119
116
|
|
|
120
|
-
class OutputSumoLogicMode(str, Enum
|
|
117
|
+
class OutputSumoLogicMode(str, Enum):
|
|
121
118
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
122
119
|
|
|
123
120
|
ERROR = "error"
|
|
@@ -237,11 +234,7 @@ class OutputSumoLogic(BaseModel):
|
|
|
237
234
|
r"""Override the source category configured on the Sumo Logic HTTP collector. This can also be overridden at the event level with the __sourceCategory field."""
|
|
238
235
|
|
|
239
236
|
format_: Annotated[
|
|
240
|
-
|
|
241
|
-
Optional[OutputSumoLogicDataFormat],
|
|
242
|
-
PlainValidator(validate_open_enum(False)),
|
|
243
|
-
],
|
|
244
|
-
pydantic.Field(alias="format"),
|
|
237
|
+
Optional[OutputSumoLogicDataFormat], pydantic.Field(alias="format")
|
|
245
238
|
] = OutputSumoLogicDataFormat.JSON
|
|
246
239
|
r"""Preserve the raw event format instead of JSONifying it"""
|
|
247
240
|
|
|
@@ -289,10 +282,7 @@ class OutputSumoLogic(BaseModel):
|
|
|
289
282
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
290
283
|
|
|
291
284
|
failed_request_logging_mode: Annotated[
|
|
292
|
-
|
|
293
|
-
Optional[OutputSumoLogicFailedRequestLoggingMode],
|
|
294
|
-
PlainValidator(validate_open_enum(False)),
|
|
295
|
-
],
|
|
285
|
+
Optional[OutputSumoLogicFailedRequestLoggingMode],
|
|
296
286
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
297
287
|
] = OutputSumoLogicFailedRequestLoggingMode.NONE
|
|
298
288
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -319,10 +309,7 @@ class OutputSumoLogic(BaseModel):
|
|
|
319
309
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
320
310
|
|
|
321
311
|
on_backpressure: Annotated[
|
|
322
|
-
|
|
323
|
-
Optional[OutputSumoLogicBackpressureBehavior],
|
|
324
|
-
PlainValidator(validate_open_enum(False)),
|
|
325
|
-
],
|
|
312
|
+
Optional[OutputSumoLogicBackpressureBehavior],
|
|
326
313
|
pydantic.Field(alias="onBackpressure"),
|
|
327
314
|
] = OutputSumoLogicBackpressureBehavior.BLOCK
|
|
328
315
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -348,28 +335,18 @@ class OutputSumoLogic(BaseModel):
|
|
|
348
335
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
349
336
|
|
|
350
337
|
pq_compress: Annotated[
|
|
351
|
-
|
|
352
|
-
Optional[OutputSumoLogicCompression],
|
|
353
|
-
PlainValidator(validate_open_enum(False)),
|
|
354
|
-
],
|
|
355
|
-
pydantic.Field(alias="pqCompress"),
|
|
338
|
+
Optional[OutputSumoLogicCompression], pydantic.Field(alias="pqCompress")
|
|
356
339
|
] = OutputSumoLogicCompression.NONE
|
|
357
340
|
r"""Codec to use to compress the persisted data"""
|
|
358
341
|
|
|
359
342
|
pq_on_backpressure: Annotated[
|
|
360
|
-
|
|
361
|
-
Optional[OutputSumoLogicQueueFullBehavior],
|
|
362
|
-
PlainValidator(validate_open_enum(False)),
|
|
363
|
-
],
|
|
343
|
+
Optional[OutputSumoLogicQueueFullBehavior],
|
|
364
344
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
365
345
|
] = OutputSumoLogicQueueFullBehavior.BLOCK
|
|
366
346
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
367
347
|
|
|
368
348
|
pq_mode: Annotated[
|
|
369
|
-
|
|
370
|
-
Optional[OutputSumoLogicMode], PlainValidator(validate_open_enum(False))
|
|
371
|
-
],
|
|
372
|
-
pydantic.Field(alias="pqMode"),
|
|
349
|
+
Optional[OutputSumoLogicMode], pydantic.Field(alias="pqMode")
|
|
373
350
|
] = OutputSumoLogicMode.ERROR
|
|
374
351
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
375
352
|
|