cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +4124 -4124
- cribl_control_plane/models/createinputop.py +1734 -2771
- cribl_control_plane/models/createoutputop.py +2153 -4314
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +20 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +10 -27
- cribl_control_plane/models/inputcribllakehttp.py +12 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +10 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +10 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +31 -64
- cribl_control_plane/models/inputelastic.py +16 -44
- cribl_control_plane/models/inputeventhub.py +8 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +10 -27
- cribl_control_plane/models/inputgooglepubsub.py +8 -23
- cribl_control_plane/models/inputgrafana_union.py +35 -81
- cribl_control_plane/models/inputhttp.py +10 -27
- cribl_control_plane/models/inputhttpraw.py +10 -27
- cribl_control_plane/models/inputjournalfiles.py +6 -16
- cribl_control_plane/models/inputkafka.py +16 -45
- cribl_control_plane/models/inputkinesis.py +16 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +12 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
- cribl_control_plane/models/inputmsk.py +18 -52
- cribl_control_plane/models/inputnetflow.py +6 -15
- cribl_control_plane/models/inputoffice365mgmt.py +16 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
- cribl_control_plane/models/inputoffice365service.py +18 -39
- cribl_control_plane/models/inputopentelemetry.py +18 -42
- cribl_control_plane/models/inputprometheus.py +20 -54
- cribl_control_plane/models/inputprometheusrw.py +12 -34
- cribl_control_plane/models/inputrawudp.py +6 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +8 -20
- cribl_control_plane/models/inputsplunk.py +14 -37
- cribl_control_plane/models/inputsplunkhec.py +12 -33
- cribl_control_plane/models/inputsplunksearch.py +16 -37
- cribl_control_plane/models/inputsqs.py +12 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +12 -33
- cribl_control_plane/models/inputtcpjson.py +12 -33
- cribl_control_plane/models/inputwef.py +20 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +10 -25
- cribl_control_plane/models/inputzscalerhec.py +12 -33
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.17.dist-info/RECORD +215 -0
- cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/WHEEL +0 -0
|
@@ -1,28 +1,25 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputSqsType(str, Enum
|
|
11
|
+
class OutputSqsType(str, Enum):
|
|
15
12
|
SQS = "sqs"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSqsQueueType(str, Enum
|
|
15
|
+
class OutputSqsQueueType(str, Enum):
|
|
19
16
|
r"""The queue type used (or created). Defaults to Standard."""
|
|
20
17
|
|
|
21
18
|
STANDARD = "standard"
|
|
22
19
|
FIFO = "fifo"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputSqsAuthenticationMethod(str, Enum
|
|
22
|
+
class OutputSqsAuthenticationMethod(str, Enum):
|
|
26
23
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
27
24
|
|
|
28
25
|
AUTO = "auto"
|
|
@@ -30,14 +27,14 @@ class OutputSqsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
30
27
|
SECRET = "secret"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputSqsSignatureVersion(str, Enum
|
|
30
|
+
class OutputSqsSignatureVersion(str, Enum):
|
|
34
31
|
r"""Signature version to use for signing SQS requests"""
|
|
35
32
|
|
|
36
33
|
V2 = "v2"
|
|
37
34
|
V4 = "v4"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputSqsBackpressureBehavior(str, Enum
|
|
37
|
+
class OutputSqsBackpressureBehavior(str, Enum):
|
|
41
38
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
42
39
|
|
|
43
40
|
BLOCK = "block"
|
|
@@ -45,21 +42,21 @@ class OutputSqsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
45
42
|
QUEUE = "queue"
|
|
46
43
|
|
|
47
44
|
|
|
48
|
-
class OutputSqsCompression(str, Enum
|
|
45
|
+
class OutputSqsCompression(str, Enum):
|
|
49
46
|
r"""Codec to use to compress the persisted data"""
|
|
50
47
|
|
|
51
48
|
NONE = "none"
|
|
52
49
|
GZIP = "gzip"
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputSqsQueueFullBehavior(str, Enum
|
|
52
|
+
class OutputSqsQueueFullBehavior(str, Enum):
|
|
56
53
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
57
54
|
|
|
58
55
|
BLOCK = "block"
|
|
59
56
|
DROP = "drop"
|
|
60
57
|
|
|
61
58
|
|
|
62
|
-
class OutputSqsMode(str, Enum
|
|
59
|
+
class OutputSqsMode(str, Enum):
|
|
63
60
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
64
61
|
|
|
65
62
|
ERROR = "error"
|
|
@@ -154,9 +151,7 @@ class OutputSqs(BaseModel):
|
|
|
154
151
|
id: Optional[str] = None
|
|
155
152
|
r"""Unique ID for this output"""
|
|
156
153
|
|
|
157
|
-
type:
|
|
158
|
-
Optional[OutputSqsType], PlainValidator(validate_open_enum(False))
|
|
159
|
-
] = None
|
|
154
|
+
type: Optional[OutputSqsType] = None
|
|
160
155
|
|
|
161
156
|
pipeline: Optional[str] = None
|
|
162
157
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -173,10 +168,7 @@ class OutputSqs(BaseModel):
|
|
|
173
168
|
r"""Tags for filtering and grouping in @{product}"""
|
|
174
169
|
|
|
175
170
|
queue_type: Annotated[
|
|
176
|
-
|
|
177
|
-
Optional[OutputSqsQueueType], PlainValidator(validate_open_enum(False))
|
|
178
|
-
],
|
|
179
|
-
pydantic.Field(alias="queueType"),
|
|
171
|
+
Optional[OutputSqsQueueType], pydantic.Field(alias="queueType")
|
|
180
172
|
] = OutputSqsQueueType.STANDARD
|
|
181
173
|
r"""The queue type used (or created). Defaults to Standard."""
|
|
182
174
|
|
|
@@ -194,10 +186,7 @@ class OutputSqs(BaseModel):
|
|
|
194
186
|
r"""Create queue if it does not exist."""
|
|
195
187
|
|
|
196
188
|
aws_authentication_method: Annotated[
|
|
197
|
-
|
|
198
|
-
Optional[OutputSqsAuthenticationMethod],
|
|
199
|
-
PlainValidator(validate_open_enum(False)),
|
|
200
|
-
],
|
|
189
|
+
Optional[OutputSqsAuthenticationMethod],
|
|
201
190
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
202
191
|
] = OutputSqsAuthenticationMethod.AUTO
|
|
203
192
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -213,11 +202,7 @@ class OutputSqs(BaseModel):
|
|
|
213
202
|
r"""SQS service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to SQS-compatible endpoint."""
|
|
214
203
|
|
|
215
204
|
signature_version: Annotated[
|
|
216
|
-
|
|
217
|
-
Optional[OutputSqsSignatureVersion],
|
|
218
|
-
PlainValidator(validate_open_enum(False)),
|
|
219
|
-
],
|
|
220
|
-
pydantic.Field(alias="signatureVersion"),
|
|
205
|
+
Optional[OutputSqsSignatureVersion], pydantic.Field(alias="signatureVersion")
|
|
221
206
|
] = OutputSqsSignatureVersion.V4
|
|
222
207
|
r"""Signature version to use for signing SQS requests"""
|
|
223
208
|
|
|
@@ -272,11 +257,7 @@ class OutputSqs(BaseModel):
|
|
|
272
257
|
r"""The maximum number of in-progress API requests before backpressure is applied."""
|
|
273
258
|
|
|
274
259
|
on_backpressure: Annotated[
|
|
275
|
-
|
|
276
|
-
Optional[OutputSqsBackpressureBehavior],
|
|
277
|
-
PlainValidator(validate_open_enum(False)),
|
|
278
|
-
],
|
|
279
|
-
pydantic.Field(alias="onBackpressure"),
|
|
260
|
+
Optional[OutputSqsBackpressureBehavior], pydantic.Field(alias="onBackpressure")
|
|
280
261
|
] = OutputSqsBackpressureBehavior.BLOCK
|
|
281
262
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
282
263
|
|
|
@@ -301,26 +282,18 @@ class OutputSqs(BaseModel):
|
|
|
301
282
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
302
283
|
|
|
303
284
|
pq_compress: Annotated[
|
|
304
|
-
|
|
305
|
-
Optional[OutputSqsCompression], PlainValidator(validate_open_enum(False))
|
|
306
|
-
],
|
|
307
|
-
pydantic.Field(alias="pqCompress"),
|
|
285
|
+
Optional[OutputSqsCompression], pydantic.Field(alias="pqCompress")
|
|
308
286
|
] = OutputSqsCompression.NONE
|
|
309
287
|
r"""Codec to use to compress the persisted data"""
|
|
310
288
|
|
|
311
289
|
pq_on_backpressure: Annotated[
|
|
312
|
-
|
|
313
|
-
Optional[OutputSqsQueueFullBehavior],
|
|
314
|
-
PlainValidator(validate_open_enum(False)),
|
|
315
|
-
],
|
|
316
|
-
pydantic.Field(alias="pqOnBackpressure"),
|
|
290
|
+
Optional[OutputSqsQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
|
|
317
291
|
] = OutputSqsQueueFullBehavior.BLOCK
|
|
318
292
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
319
293
|
|
|
320
|
-
pq_mode: Annotated[
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
] = OutputSqsMode.ERROR
|
|
294
|
+
pq_mode: Annotated[Optional[OutputSqsMode], pydantic.Field(alias="pqMode")] = (
|
|
295
|
+
OutputSqsMode.ERROR
|
|
296
|
+
)
|
|
324
297
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
325
298
|
|
|
326
299
|
pq_controls: Annotated[
|
|
@@ -1,28 +1,25 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputStatsdType(str, Enum
|
|
11
|
+
class OutputStatsdType(str, Enum):
|
|
15
12
|
STATSD = "statsd"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputStatsdDestinationProtocol(str, Enum
|
|
15
|
+
class OutputStatsdDestinationProtocol(str, Enum):
|
|
19
16
|
r"""Protocol to use when communicating with the destination."""
|
|
20
17
|
|
|
21
18
|
UDP = "udp"
|
|
22
19
|
TCP = "tcp"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputStatsdBackpressureBehavior(str, Enum
|
|
22
|
+
class OutputStatsdBackpressureBehavior(str, Enum):
|
|
26
23
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
27
24
|
|
|
28
25
|
BLOCK = "block"
|
|
@@ -30,21 +27,21 @@ class OutputStatsdBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
30
27
|
QUEUE = "queue"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputStatsdCompression(str, Enum
|
|
30
|
+
class OutputStatsdCompression(str, Enum):
|
|
34
31
|
r"""Codec to use to compress the persisted data"""
|
|
35
32
|
|
|
36
33
|
NONE = "none"
|
|
37
34
|
GZIP = "gzip"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputStatsdQueueFullBehavior(str, Enum
|
|
37
|
+
class OutputStatsdQueueFullBehavior(str, Enum):
|
|
41
38
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
42
39
|
|
|
43
40
|
BLOCK = "block"
|
|
44
41
|
DROP = "drop"
|
|
45
42
|
|
|
46
43
|
|
|
47
|
-
class OutputStatsdMode(str, Enum
|
|
44
|
+
class OutputStatsdMode(str, Enum):
|
|
48
45
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
49
46
|
|
|
50
47
|
ERROR = "error"
|
|
@@ -115,9 +112,7 @@ class OutputStatsd(BaseModel):
|
|
|
115
112
|
id: Optional[str] = None
|
|
116
113
|
r"""Unique ID for this output"""
|
|
117
114
|
|
|
118
|
-
type:
|
|
119
|
-
Optional[OutputStatsdType], PlainValidator(validate_open_enum(False))
|
|
120
|
-
] = None
|
|
115
|
+
type: Optional[OutputStatsdType] = None
|
|
121
116
|
|
|
122
117
|
pipeline: Optional[str] = None
|
|
123
118
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -133,10 +128,9 @@ class OutputStatsd(BaseModel):
|
|
|
133
128
|
streamtags: Optional[List[str]] = None
|
|
134
129
|
r"""Tags for filtering and grouping in @{product}"""
|
|
135
130
|
|
|
136
|
-
protocol:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
] = OutputStatsdDestinationProtocol.UDP
|
|
131
|
+
protocol: Optional[OutputStatsdDestinationProtocol] = (
|
|
132
|
+
OutputStatsdDestinationProtocol.UDP
|
|
133
|
+
)
|
|
140
134
|
r"""Protocol to use when communicating with the destination."""
|
|
141
135
|
|
|
142
136
|
port: Optional[float] = 8125
|
|
@@ -173,10 +167,7 @@ class OutputStatsd(BaseModel):
|
|
|
173
167
|
r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
|
|
174
168
|
|
|
175
169
|
on_backpressure: Annotated[
|
|
176
|
-
|
|
177
|
-
Optional[OutputStatsdBackpressureBehavior],
|
|
178
|
-
PlainValidator(validate_open_enum(False)),
|
|
179
|
-
],
|
|
170
|
+
Optional[OutputStatsdBackpressureBehavior],
|
|
180
171
|
pydantic.Field(alias="onBackpressure"),
|
|
181
172
|
] = OutputStatsdBackpressureBehavior.BLOCK
|
|
182
173
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -195,28 +186,19 @@ class OutputStatsd(BaseModel):
|
|
|
195
186
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
196
187
|
|
|
197
188
|
pq_compress: Annotated[
|
|
198
|
-
|
|
199
|
-
Optional[OutputStatsdCompression], PlainValidator(validate_open_enum(False))
|
|
200
|
-
],
|
|
201
|
-
pydantic.Field(alias="pqCompress"),
|
|
189
|
+
Optional[OutputStatsdCompression], pydantic.Field(alias="pqCompress")
|
|
202
190
|
] = OutputStatsdCompression.NONE
|
|
203
191
|
r"""Codec to use to compress the persisted data"""
|
|
204
192
|
|
|
205
193
|
pq_on_backpressure: Annotated[
|
|
206
|
-
|
|
207
|
-
Optional[OutputStatsdQueueFullBehavior],
|
|
208
|
-
PlainValidator(validate_open_enum(False)),
|
|
209
|
-
],
|
|
194
|
+
Optional[OutputStatsdQueueFullBehavior],
|
|
210
195
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
211
196
|
] = OutputStatsdQueueFullBehavior.BLOCK
|
|
212
197
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
213
198
|
|
|
214
|
-
pq_mode: Annotated[
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
],
|
|
218
|
-
pydantic.Field(alias="pqMode"),
|
|
219
|
-
] = OutputStatsdMode.ERROR
|
|
199
|
+
pq_mode: Annotated[Optional[OutputStatsdMode], pydantic.Field(alias="pqMode")] = (
|
|
200
|
+
OutputStatsdMode.ERROR
|
|
201
|
+
)
|
|
220
202
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
221
203
|
|
|
222
204
|
pq_controls: Annotated[
|
|
@@ -1,28 +1,25 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputStatsdExtType(str, Enum
|
|
11
|
+
class OutputStatsdExtType(str, Enum):
|
|
15
12
|
STATSD_EXT = "statsd_ext"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputStatsdExtDestinationProtocol(str, Enum
|
|
15
|
+
class OutputStatsdExtDestinationProtocol(str, Enum):
|
|
19
16
|
r"""Protocol to use when communicating with the destination."""
|
|
20
17
|
|
|
21
18
|
UDP = "udp"
|
|
22
19
|
TCP = "tcp"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputStatsdExtBackpressureBehavior(str, Enum
|
|
22
|
+
class OutputStatsdExtBackpressureBehavior(str, Enum):
|
|
26
23
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
27
24
|
|
|
28
25
|
BLOCK = "block"
|
|
@@ -30,21 +27,21 @@ class OutputStatsdExtBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
30
27
|
QUEUE = "queue"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputStatsdExtCompression(str, Enum
|
|
30
|
+
class OutputStatsdExtCompression(str, Enum):
|
|
34
31
|
r"""Codec to use to compress the persisted data"""
|
|
35
32
|
|
|
36
33
|
NONE = "none"
|
|
37
34
|
GZIP = "gzip"
|
|
38
35
|
|
|
39
36
|
|
|
40
|
-
class OutputStatsdExtQueueFullBehavior(str, Enum
|
|
37
|
+
class OutputStatsdExtQueueFullBehavior(str, Enum):
|
|
41
38
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
42
39
|
|
|
43
40
|
BLOCK = "block"
|
|
44
41
|
DROP = "drop"
|
|
45
42
|
|
|
46
43
|
|
|
47
|
-
class OutputStatsdExtMode(str, Enum
|
|
44
|
+
class OutputStatsdExtMode(str, Enum):
|
|
48
45
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
49
46
|
|
|
50
47
|
ERROR = "error"
|
|
@@ -115,9 +112,7 @@ class OutputStatsdExt(BaseModel):
|
|
|
115
112
|
id: Optional[str] = None
|
|
116
113
|
r"""Unique ID for this output"""
|
|
117
114
|
|
|
118
|
-
type:
|
|
119
|
-
Optional[OutputStatsdExtType], PlainValidator(validate_open_enum(False))
|
|
120
|
-
] = None
|
|
115
|
+
type: Optional[OutputStatsdExtType] = None
|
|
121
116
|
|
|
122
117
|
pipeline: Optional[str] = None
|
|
123
118
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -133,10 +128,9 @@ class OutputStatsdExt(BaseModel):
|
|
|
133
128
|
streamtags: Optional[List[str]] = None
|
|
134
129
|
r"""Tags for filtering and grouping in @{product}"""
|
|
135
130
|
|
|
136
|
-
protocol:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
] = OutputStatsdExtDestinationProtocol.UDP
|
|
131
|
+
protocol: Optional[OutputStatsdExtDestinationProtocol] = (
|
|
132
|
+
OutputStatsdExtDestinationProtocol.UDP
|
|
133
|
+
)
|
|
140
134
|
r"""Protocol to use when communicating with the destination."""
|
|
141
135
|
|
|
142
136
|
port: Optional[float] = 8125
|
|
@@ -173,10 +167,7 @@ class OutputStatsdExt(BaseModel):
|
|
|
173
167
|
r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
|
|
174
168
|
|
|
175
169
|
on_backpressure: Annotated[
|
|
176
|
-
|
|
177
|
-
Optional[OutputStatsdExtBackpressureBehavior],
|
|
178
|
-
PlainValidator(validate_open_enum(False)),
|
|
179
|
-
],
|
|
170
|
+
Optional[OutputStatsdExtBackpressureBehavior],
|
|
180
171
|
pydantic.Field(alias="onBackpressure"),
|
|
181
172
|
] = OutputStatsdExtBackpressureBehavior.BLOCK
|
|
182
173
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -195,28 +186,18 @@ class OutputStatsdExt(BaseModel):
|
|
|
195
186
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
196
187
|
|
|
197
188
|
pq_compress: Annotated[
|
|
198
|
-
|
|
199
|
-
Optional[OutputStatsdExtCompression],
|
|
200
|
-
PlainValidator(validate_open_enum(False)),
|
|
201
|
-
],
|
|
202
|
-
pydantic.Field(alias="pqCompress"),
|
|
189
|
+
Optional[OutputStatsdExtCompression], pydantic.Field(alias="pqCompress")
|
|
203
190
|
] = OutputStatsdExtCompression.NONE
|
|
204
191
|
r"""Codec to use to compress the persisted data"""
|
|
205
192
|
|
|
206
193
|
pq_on_backpressure: Annotated[
|
|
207
|
-
|
|
208
|
-
Optional[OutputStatsdExtQueueFullBehavior],
|
|
209
|
-
PlainValidator(validate_open_enum(False)),
|
|
210
|
-
],
|
|
194
|
+
Optional[OutputStatsdExtQueueFullBehavior],
|
|
211
195
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
212
196
|
] = OutputStatsdExtQueueFullBehavior.BLOCK
|
|
213
197
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
214
198
|
|
|
215
199
|
pq_mode: Annotated[
|
|
216
|
-
|
|
217
|
-
Optional[OutputStatsdExtMode], PlainValidator(validate_open_enum(False))
|
|
218
|
-
],
|
|
219
|
-
pydantic.Field(alias="pqMode"),
|
|
200
|
+
Optional[OutputStatsdExtMode], pydantic.Field(alias="pqMode")
|
|
220
201
|
] = OutputStatsdExtMode.ERROR
|
|
221
202
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
222
203
|
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputSumoLogicType(str, Enum
|
|
11
|
+
class OutputSumoLogicType(str, Enum):
|
|
15
12
|
SUMO_LOGIC = "sumo_logic"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSumoLogicDataFormat(str, Enum
|
|
15
|
+
class OutputSumoLogicDataFormat(str, Enum):
|
|
19
16
|
r"""Preserve the raw event format instead of JSONifying it"""
|
|
20
17
|
|
|
21
18
|
JSON = "json"
|
|
@@ -33,7 +30,7 @@ class OutputSumoLogicExtraHTTPHeader(BaseModel):
|
|
|
33
30
|
name: Optional[str] = None
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class OutputSumoLogicFailedRequestLoggingMode(str, Enum
|
|
33
|
+
class OutputSumoLogicFailedRequestLoggingMode(str, Enum):
|
|
37
34
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
38
35
|
|
|
39
36
|
PAYLOAD = "payload"
|
|
@@ -95,7 +92,7 @@ class OutputSumoLogicTimeoutRetrySettings(BaseModel):
|
|
|
95
92
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
96
93
|
|
|
97
94
|
|
|
98
|
-
class OutputSumoLogicBackpressureBehavior(str, Enum
|
|
95
|
+
class OutputSumoLogicBackpressureBehavior(str, Enum):
|
|
99
96
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
100
97
|
|
|
101
98
|
BLOCK = "block"
|
|
@@ -103,21 +100,21 @@ class OutputSumoLogicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
103
100
|
QUEUE = "queue"
|
|
104
101
|
|
|
105
102
|
|
|
106
|
-
class OutputSumoLogicCompression(str, Enum
|
|
103
|
+
class OutputSumoLogicCompression(str, Enum):
|
|
107
104
|
r"""Codec to use to compress the persisted data"""
|
|
108
105
|
|
|
109
106
|
NONE = "none"
|
|
110
107
|
GZIP = "gzip"
|
|
111
108
|
|
|
112
109
|
|
|
113
|
-
class OutputSumoLogicQueueFullBehavior(str, Enum
|
|
110
|
+
class OutputSumoLogicQueueFullBehavior(str, Enum):
|
|
114
111
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
115
112
|
|
|
116
113
|
BLOCK = "block"
|
|
117
114
|
DROP = "drop"
|
|
118
115
|
|
|
119
116
|
|
|
120
|
-
class OutputSumoLogicMode(str, Enum
|
|
117
|
+
class OutputSumoLogicMode(str, Enum):
|
|
121
118
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
122
119
|
|
|
123
120
|
ERROR = "error"
|
|
@@ -206,7 +203,7 @@ class OutputSumoLogicTypedDict(TypedDict):
|
|
|
206
203
|
|
|
207
204
|
|
|
208
205
|
class OutputSumoLogic(BaseModel):
|
|
209
|
-
type:
|
|
206
|
+
type: OutputSumoLogicType
|
|
210
207
|
|
|
211
208
|
url: str
|
|
212
209
|
r"""Sumo Logic HTTP collector URL to which events should be sent"""
|
|
@@ -237,11 +234,7 @@ class OutputSumoLogic(BaseModel):
|
|
|
237
234
|
r"""Override the source category configured on the Sumo Logic HTTP collector. This can also be overridden at the event level with the __sourceCategory field."""
|
|
238
235
|
|
|
239
236
|
format_: Annotated[
|
|
240
|
-
|
|
241
|
-
Optional[OutputSumoLogicDataFormat],
|
|
242
|
-
PlainValidator(validate_open_enum(False)),
|
|
243
|
-
],
|
|
244
|
-
pydantic.Field(alias="format"),
|
|
237
|
+
Optional[OutputSumoLogicDataFormat], pydantic.Field(alias="format")
|
|
245
238
|
] = OutputSumoLogicDataFormat.JSON
|
|
246
239
|
r"""Preserve the raw event format instead of JSONifying it"""
|
|
247
240
|
|
|
@@ -289,10 +282,7 @@ class OutputSumoLogic(BaseModel):
|
|
|
289
282
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
290
283
|
|
|
291
284
|
failed_request_logging_mode: Annotated[
|
|
292
|
-
|
|
293
|
-
Optional[OutputSumoLogicFailedRequestLoggingMode],
|
|
294
|
-
PlainValidator(validate_open_enum(False)),
|
|
295
|
-
],
|
|
285
|
+
Optional[OutputSumoLogicFailedRequestLoggingMode],
|
|
296
286
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
297
287
|
] = OutputSumoLogicFailedRequestLoggingMode.NONE
|
|
298
288
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -319,10 +309,7 @@ class OutputSumoLogic(BaseModel):
|
|
|
319
309
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
320
310
|
|
|
321
311
|
on_backpressure: Annotated[
|
|
322
|
-
|
|
323
|
-
Optional[OutputSumoLogicBackpressureBehavior],
|
|
324
|
-
PlainValidator(validate_open_enum(False)),
|
|
325
|
-
],
|
|
312
|
+
Optional[OutputSumoLogicBackpressureBehavior],
|
|
326
313
|
pydantic.Field(alias="onBackpressure"),
|
|
327
314
|
] = OutputSumoLogicBackpressureBehavior.BLOCK
|
|
328
315
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -348,28 +335,18 @@ class OutputSumoLogic(BaseModel):
|
|
|
348
335
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
349
336
|
|
|
350
337
|
pq_compress: Annotated[
|
|
351
|
-
|
|
352
|
-
Optional[OutputSumoLogicCompression],
|
|
353
|
-
PlainValidator(validate_open_enum(False)),
|
|
354
|
-
],
|
|
355
|
-
pydantic.Field(alias="pqCompress"),
|
|
338
|
+
Optional[OutputSumoLogicCompression], pydantic.Field(alias="pqCompress")
|
|
356
339
|
] = OutputSumoLogicCompression.NONE
|
|
357
340
|
r"""Codec to use to compress the persisted data"""
|
|
358
341
|
|
|
359
342
|
pq_on_backpressure: Annotated[
|
|
360
|
-
|
|
361
|
-
Optional[OutputSumoLogicQueueFullBehavior],
|
|
362
|
-
PlainValidator(validate_open_enum(False)),
|
|
363
|
-
],
|
|
343
|
+
Optional[OutputSumoLogicQueueFullBehavior],
|
|
364
344
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
365
345
|
] = OutputSumoLogicQueueFullBehavior.BLOCK
|
|
366
346
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
367
347
|
|
|
368
348
|
pq_mode: Annotated[
|
|
369
|
-
|
|
370
|
-
Optional[OutputSumoLogicMode], PlainValidator(validate_open_enum(False))
|
|
371
|
-
],
|
|
372
|
-
pydantic.Field(alias="pqMode"),
|
|
349
|
+
Optional[OutputSumoLogicMode], pydantic.Field(alias="pqMode")
|
|
373
350
|
] = OutputSumoLogicMode.ERROR
|
|
374
351
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
375
352
|
|