cribl-control-plane 0.0.50rc1__py3-none-any.whl → 0.0.51__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +6 -4
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/health.py +2 -6
- cribl_control_plane/httpclient.py +0 -1
- cribl_control_plane/models/__init__.py +4 -21
- cribl_control_plane/models/appmode.py +1 -2
- cribl_control_plane/models/cacheconnection.py +2 -10
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
- cribl_control_plane/models/cloudprovider.py +1 -2
- cribl_control_plane/models/configgroup.py +2 -7
- cribl_control_plane/models/configgroupcloud.py +2 -6
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
- cribl_control_plane/models/createinputhectokenbyidop.py +5 -6
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +2 -8
- cribl_control_plane/models/datasetmetadata.py +2 -8
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
- cribl_control_plane/models/getsummaryop.py +2 -7
- cribl_control_plane/models/getversionshowop.py +5 -6
- cribl_control_plane/models/gitinfo.py +3 -14
- cribl_control_plane/models/hbcriblinfo.py +1 -11
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +14 -34
- cribl_control_plane/models/inputazureblob.py +6 -17
- cribl_control_plane/models/inputcollection.py +4 -11
- cribl_control_plane/models/inputconfluentcloud.py +20 -47
- cribl_control_plane/models/inputcribl.py +4 -11
- cribl_control_plane/models/inputcriblhttp.py +8 -23
- cribl_control_plane/models/inputcribllakehttp.py +10 -22
- cribl_control_plane/models/inputcriblmetrics.py +4 -12
- cribl_control_plane/models/inputcribltcp.py +8 -23
- cribl_control_plane/models/inputcrowdstrike.py +10 -26
- cribl_control_plane/models/inputdatadogagent.py +8 -24
- cribl_control_plane/models/inputdatagen.py +4 -11
- cribl_control_plane/models/inputedgeprometheus.py +24 -58
- cribl_control_plane/models/inputelastic.py +14 -40
- cribl_control_plane/models/inputeventhub.py +6 -15
- cribl_control_plane/models/inputexec.py +6 -14
- cribl_control_plane/models/inputfile.py +6 -15
- cribl_control_plane/models/inputfirehose.py +8 -23
- cribl_control_plane/models/inputgooglepubsub.py +6 -19
- cribl_control_plane/models/inputgrafana.py +24 -67
- cribl_control_plane/models/inputhttp.py +8 -23
- cribl_control_plane/models/inputhttpraw.py +8 -23
- cribl_control_plane/models/inputjournalfiles.py +4 -12
- cribl_control_plane/models/inputkafka.py +16 -46
- cribl_control_plane/models/inputkinesis.py +14 -38
- cribl_control_plane/models/inputkubeevents.py +4 -11
- cribl_control_plane/models/inputkubelogs.py +8 -16
- cribl_control_plane/models/inputkubemetrics.py +8 -16
- cribl_control_plane/models/inputloki.py +10 -29
- cribl_control_plane/models/inputmetrics.py +8 -23
- cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
- cribl_control_plane/models/inputmsk.py +18 -53
- cribl_control_plane/models/inputnetflow.py +4 -11
- cribl_control_plane/models/inputoffice365mgmt.py +14 -33
- cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
- cribl_control_plane/models/inputoffice365service.py +16 -35
- cribl_control_plane/models/inputopentelemetry.py +16 -38
- cribl_control_plane/models/inputprometheus.py +18 -50
- cribl_control_plane/models/inputprometheusrw.py +10 -30
- cribl_control_plane/models/inputrawudp.py +4 -11
- cribl_control_plane/models/inputs3.py +8 -21
- cribl_control_plane/models/inputs3inventory.py +10 -26
- cribl_control_plane/models/inputsecuritylake.py +10 -27
- cribl_control_plane/models/inputsnmp.py +6 -16
- cribl_control_plane/models/inputsplunk.py +12 -33
- cribl_control_plane/models/inputsplunkhec.py +10 -29
- cribl_control_plane/models/inputsplunksearch.py +14 -33
- cribl_control_plane/models/inputsqs.py +10 -27
- cribl_control_plane/models/inputsyslog.py +16 -43
- cribl_control_plane/models/inputsystemmetrics.py +24 -48
- cribl_control_plane/models/inputsystemstate.py +8 -16
- cribl_control_plane/models/inputtcp.py +10 -29
- cribl_control_plane/models/inputtcpjson.py +10 -29
- cribl_control_plane/models/inputwef.py +14 -37
- cribl_control_plane/models/inputwindowsmetrics.py +24 -44
- cribl_control_plane/models/inputwineventlogs.py +10 -20
- cribl_control_plane/models/inputwiz.py +8 -21
- cribl_control_plane/models/inputwizwebhook.py +8 -23
- cribl_control_plane/models/inputzscalerhec.py +10 -29
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
- cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
- cribl_control_plane/models/masterworkerentry.py +2 -7
- cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
- cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeprovidedinfo.py +1 -4
- cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeupgradestate.py +1 -2
- cribl_control_plane/models/nodeupgradestatus.py +5 -13
- cribl_control_plane/models/outputazureblob.py +18 -48
- cribl_control_plane/models/outputazuredataexplorer.py +28 -73
- cribl_control_plane/models/outputazureeventhub.py +18 -40
- cribl_control_plane/models/outputazurelogs.py +12 -35
- cribl_control_plane/models/outputclickhouse.py +20 -55
- cribl_control_plane/models/outputcloudwatch.py +10 -29
- cribl_control_plane/models/outputconfluentcloud.py +32 -77
- cribl_control_plane/models/outputcriblhttp.py +16 -44
- cribl_control_plane/models/outputcribllake.py +16 -46
- cribl_control_plane/models/outputcribltcp.py +18 -45
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
- cribl_control_plane/models/outputdatadog.py +20 -48
- cribl_control_plane/models/outputdataset.py +18 -46
- cribl_control_plane/models/outputdiskspool.py +2 -7
- cribl_control_plane/models/outputdls3.py +24 -68
- cribl_control_plane/models/outputdynatracehttp.py +20 -53
- cribl_control_plane/models/outputdynatraceotlp.py +22 -55
- cribl_control_plane/models/outputelastic.py +18 -43
- cribl_control_plane/models/outputelasticcloud.py +12 -36
- cribl_control_plane/models/outputexabeam.py +10 -29
- cribl_control_plane/models/outputfilesystem.py +14 -39
- cribl_control_plane/models/outputgooglechronicle.py +16 -50
- cribl_control_plane/models/outputgooglecloudlogging.py +18 -50
- cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
- cribl_control_plane/models/outputgooglepubsub.py +10 -31
- cribl_control_plane/models/outputgrafanacloud.py +32 -97
- cribl_control_plane/models/outputgraphite.py +14 -31
- cribl_control_plane/models/outputhoneycomb.py +12 -35
- cribl_control_plane/models/outputhumiohec.py +16 -43
- cribl_control_plane/models/outputinfluxdb.py +16 -42
- cribl_control_plane/models/outputkafka.py +28 -74
- cribl_control_plane/models/outputkinesis.py +16 -40
- cribl_control_plane/models/outputloki.py +16 -41
- cribl_control_plane/models/outputminio.py +24 -65
- cribl_control_plane/models/outputmsk.py +30 -82
- cribl_control_plane/models/outputnewrelic.py +18 -43
- cribl_control_plane/models/outputnewrelicevents.py +14 -41
- cribl_control_plane/models/outputopentelemetry.py +26 -67
- cribl_control_plane/models/outputprometheus.py +12 -35
- cribl_control_plane/models/outputring.py +8 -19
- cribl_control_plane/models/outputs3.py +26 -68
- cribl_control_plane/models/outputsecuritylake.py +18 -52
- cribl_control_plane/models/outputsentinel.py +18 -45
- cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
- cribl_control_plane/models/outputservicenow.py +24 -60
- cribl_control_plane/models/outputsignalfx.py +14 -37
- cribl_control_plane/models/outputsns.py +14 -36
- cribl_control_plane/models/outputsplunk.py +24 -60
- cribl_control_plane/models/outputsplunkhec.py +12 -35
- cribl_control_plane/models/outputsplunklb.py +30 -77
- cribl_control_plane/models/outputsqs.py +16 -41
- cribl_control_plane/models/outputstatsd.py +14 -30
- cribl_control_plane/models/outputstatsdext.py +12 -29
- cribl_control_plane/models/outputsumologic.py +12 -35
- cribl_control_plane/models/outputsyslog.py +24 -58
- cribl_control_plane/models/outputtcpjson.py +20 -52
- cribl_control_plane/models/outputwavefront.py +12 -35
- cribl_control_plane/models/outputwebhook.py +22 -58
- cribl_control_plane/models/outputxsiam.py +14 -35
- cribl_control_plane/models/packinfo.py +0 -3
- cribl_control_plane/models/packinstallinfo.py +0 -3
- cribl_control_plane/models/productscore.py +1 -2
- cribl_control_plane/models/rbacresource.py +1 -2
- cribl_control_plane/models/resourcepolicy.py +2 -4
- cribl_control_plane/models/runnablejobcollection.py +13 -30
- cribl_control_plane/models/runnablejobexecutor.py +4 -13
- cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -6
- cribl_control_plane/models/workertypes.py +1 -2
- {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/METADATA +14 -5
- cribl_control_plane-0.0.51.dist-info/RECORD +325 -0
- cribl_control_plane/models/error.py +0 -16
- cribl_control_plane/models/gethealthinfoop.py +0 -17
- cribl_control_plane/models/gitshowresult.py +0 -19
- cribl_control_plane-0.0.50rc1.dist-info/RECORD +0 -328
- {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,21 +12,21 @@ class OutputCriblTCPType(str, Enum):
|
|
|
15
12
|
CRIBL_TCP = "cribl_tcp"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputCriblTCPCompression(str, Enum
|
|
15
|
+
class OutputCriblTCPCompression(str, Enum):
|
|
19
16
|
r"""Codec to use to compress the data before sending"""
|
|
20
17
|
|
|
21
18
|
NONE = "none"
|
|
22
19
|
GZIP = "gzip"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputCriblTCPMinimumTLSVersion(str, Enum
|
|
22
|
+
class OutputCriblTCPMinimumTLSVersion(str, Enum):
|
|
26
23
|
TL_SV1 = "TLSv1"
|
|
27
24
|
TL_SV1_1 = "TLSv1.1"
|
|
28
25
|
TL_SV1_2 = "TLSv1.2"
|
|
29
26
|
TL_SV1_3 = "TLSv1.3"
|
|
30
27
|
|
|
31
28
|
|
|
32
|
-
class OutputCriblTCPMaximumTLSVersion(str, Enum
|
|
29
|
+
class OutputCriblTCPMaximumTLSVersion(str, Enum):
|
|
33
30
|
TL_SV1 = "TLSv1"
|
|
34
31
|
TL_SV1_1 = "TLSv1.1"
|
|
35
32
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -89,23 +86,15 @@ class OutputCriblTCPTLSSettingsClientSide(BaseModel):
|
|
|
89
86
|
r"""Passphrase to use to decrypt private key"""
|
|
90
87
|
|
|
91
88
|
min_version: Annotated[
|
|
92
|
-
|
|
93
|
-
Optional[OutputCriblTCPMinimumTLSVersion],
|
|
94
|
-
PlainValidator(validate_open_enum(False)),
|
|
95
|
-
],
|
|
96
|
-
pydantic.Field(alias="minVersion"),
|
|
89
|
+
Optional[OutputCriblTCPMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
97
90
|
] = None
|
|
98
91
|
|
|
99
92
|
max_version: Annotated[
|
|
100
|
-
|
|
101
|
-
Optional[OutputCriblTCPMaximumTLSVersion],
|
|
102
|
-
PlainValidator(validate_open_enum(False)),
|
|
103
|
-
],
|
|
104
|
-
pydantic.Field(alias="maxVersion"),
|
|
93
|
+
Optional[OutputCriblTCPMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
105
94
|
] = None
|
|
106
95
|
|
|
107
96
|
|
|
108
|
-
class OutputCriblTCPBackpressureBehavior(str, Enum
|
|
97
|
+
class OutputCriblTCPBackpressureBehavior(str, Enum):
|
|
109
98
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
110
99
|
|
|
111
100
|
BLOCK = "block"
|
|
@@ -113,7 +102,7 @@ class OutputCriblTCPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
|
|
|
113
102
|
QUEUE = "queue"
|
|
114
103
|
|
|
115
104
|
|
|
116
|
-
class OutputCriblTCPTLS(str, Enum
|
|
105
|
+
class OutputCriblTCPTLS(str, Enum):
|
|
117
106
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
118
107
|
|
|
119
108
|
INHERIT = "inherit"
|
|
@@ -140,9 +129,7 @@ class OutputCriblTCPHost(BaseModel):
|
|
|
140
129
|
port: Optional[float] = 10300
|
|
141
130
|
r"""The port to connect to on the provided host"""
|
|
142
131
|
|
|
143
|
-
tls:
|
|
144
|
-
Optional[OutputCriblTCPTLS], PlainValidator(validate_open_enum(False))
|
|
145
|
-
] = OutputCriblTCPTLS.INHERIT
|
|
132
|
+
tls: Optional[OutputCriblTCPTLS] = OutputCriblTCPTLS.INHERIT
|
|
146
133
|
r"""Whether to inherit TLS configs from group setting or disable TLS"""
|
|
147
134
|
|
|
148
135
|
servername: Optional[str] = None
|
|
@@ -152,21 +139,21 @@ class OutputCriblTCPHost(BaseModel):
|
|
|
152
139
|
r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
|
|
153
140
|
|
|
154
141
|
|
|
155
|
-
class OutputCriblTCPPqCompressCompression(str, Enum
|
|
142
|
+
class OutputCriblTCPPqCompressCompression(str, Enum):
|
|
156
143
|
r"""Codec to use to compress the persisted data"""
|
|
157
144
|
|
|
158
145
|
NONE = "none"
|
|
159
146
|
GZIP = "gzip"
|
|
160
147
|
|
|
161
148
|
|
|
162
|
-
class OutputCriblTCPQueueFullBehavior(str, Enum
|
|
149
|
+
class OutputCriblTCPQueueFullBehavior(str, Enum):
|
|
163
150
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
164
151
|
|
|
165
152
|
BLOCK = "block"
|
|
166
153
|
DROP = "drop"
|
|
167
154
|
|
|
168
155
|
|
|
169
|
-
class OutputCriblTCPMode(str, Enum
|
|
156
|
+
class OutputCriblTCPMode(str, Enum):
|
|
170
157
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
171
158
|
|
|
172
159
|
ERROR = "error"
|
|
@@ -268,9 +255,7 @@ class OutputCriblTCP(BaseModel):
|
|
|
268
255
|
)
|
|
269
256
|
r"""Use load-balanced destinations"""
|
|
270
257
|
|
|
271
|
-
compression:
|
|
272
|
-
Optional[OutputCriblTCPCompression], PlainValidator(validate_open_enum(False))
|
|
273
|
-
] = OutputCriblTCPCompression.GZIP
|
|
258
|
+
compression: Optional[OutputCriblTCPCompression] = OutputCriblTCPCompression.GZIP
|
|
274
259
|
r"""Codec to use to compress the data before sending"""
|
|
275
260
|
|
|
276
261
|
log_failed_requests: Annotated[
|
|
@@ -306,10 +291,7 @@ class OutputCriblTCP(BaseModel):
|
|
|
306
291
|
r"""Fields to exclude from the event. By default, all internal fields except `__output` are sent. Example: `cribl_pipe`, `c*`. Wildcards supported."""
|
|
307
292
|
|
|
308
293
|
on_backpressure: Annotated[
|
|
309
|
-
|
|
310
|
-
Optional[OutputCriblTCPBackpressureBehavior],
|
|
311
|
-
PlainValidator(validate_open_enum(False)),
|
|
312
|
-
],
|
|
294
|
+
Optional[OutputCriblTCPBackpressureBehavior],
|
|
313
295
|
pydantic.Field(alias="onBackpressure"),
|
|
314
296
|
] = OutputCriblTCPBackpressureBehavior.BLOCK
|
|
315
297
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -357,29 +339,20 @@ class OutputCriblTCP(BaseModel):
|
|
|
357
339
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
358
340
|
|
|
359
341
|
pq_compress: Annotated[
|
|
360
|
-
|
|
361
|
-
Optional[OutputCriblTCPPqCompressCompression],
|
|
362
|
-
PlainValidator(validate_open_enum(False)),
|
|
363
|
-
],
|
|
342
|
+
Optional[OutputCriblTCPPqCompressCompression],
|
|
364
343
|
pydantic.Field(alias="pqCompress"),
|
|
365
344
|
] = OutputCriblTCPPqCompressCompression.NONE
|
|
366
345
|
r"""Codec to use to compress the persisted data"""
|
|
367
346
|
|
|
368
347
|
pq_on_backpressure: Annotated[
|
|
369
|
-
|
|
370
|
-
Optional[OutputCriblTCPQueueFullBehavior],
|
|
371
|
-
PlainValidator(validate_open_enum(False)),
|
|
372
|
-
],
|
|
348
|
+
Optional[OutputCriblTCPQueueFullBehavior],
|
|
373
349
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
374
350
|
] = OutputCriblTCPQueueFullBehavior.BLOCK
|
|
375
351
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
376
352
|
|
|
377
|
-
pq_mode: Annotated[
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
],
|
|
381
|
-
pydantic.Field(alias="pqMode"),
|
|
382
|
-
] = OutputCriblTCPMode.ERROR
|
|
353
|
+
pq_mode: Annotated[Optional[OutputCriblTCPMode], pydantic.Field(alias="pqMode")] = (
|
|
354
|
+
OutputCriblTCPMode.ERROR
|
|
355
|
+
)
|
|
383
356
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
384
357
|
|
|
385
358
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -26,9 +23,7 @@ class OutputCrowdstrikeNextGenSiemExtraHTTPHeader(BaseModel):
|
|
|
26
23
|
name: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class OutputCrowdstrikeNextGenSiemFailedRequestLoggingMode(
|
|
30
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
31
|
-
):
|
|
26
|
+
class OutputCrowdstrikeNextGenSiemFailedRequestLoggingMode(str, Enum):
|
|
32
27
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
33
28
|
|
|
34
29
|
PAYLOAD = "payload"
|
|
@@ -36,18 +31,14 @@ class OutputCrowdstrikeNextGenSiemFailedRequestLoggingMode(
|
|
|
36
31
|
NONE = "none"
|
|
37
32
|
|
|
38
33
|
|
|
39
|
-
class OutputCrowdstrikeNextGenSiemRequestFormat(
|
|
40
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
41
|
-
):
|
|
34
|
+
class OutputCrowdstrikeNextGenSiemRequestFormat(str, Enum):
|
|
42
35
|
r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
|
|
43
36
|
|
|
44
37
|
JSON = "JSON"
|
|
45
38
|
RAW = "raw"
|
|
46
39
|
|
|
47
40
|
|
|
48
|
-
class OutputCrowdstrikeNextGenSiemAuthenticationMethod(
|
|
49
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
50
|
-
):
|
|
41
|
+
class OutputCrowdstrikeNextGenSiemAuthenticationMethod(str, Enum):
|
|
51
42
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
52
43
|
|
|
53
44
|
MANUAL = "manual"
|
|
@@ -108,9 +99,7 @@ class OutputCrowdstrikeNextGenSiemTimeoutRetrySettings(BaseModel):
|
|
|
108
99
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
109
100
|
|
|
110
101
|
|
|
111
|
-
class OutputCrowdstrikeNextGenSiemBackpressureBehavior(
|
|
112
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
113
|
-
):
|
|
102
|
+
class OutputCrowdstrikeNextGenSiemBackpressureBehavior(str, Enum):
|
|
114
103
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
115
104
|
|
|
116
105
|
BLOCK = "block"
|
|
@@ -118,23 +107,21 @@ class OutputCrowdstrikeNextGenSiemBackpressureBehavior(
|
|
|
118
107
|
QUEUE = "queue"
|
|
119
108
|
|
|
120
109
|
|
|
121
|
-
class OutputCrowdstrikeNextGenSiemCompression(str, Enum
|
|
110
|
+
class OutputCrowdstrikeNextGenSiemCompression(str, Enum):
|
|
122
111
|
r"""Codec to use to compress the persisted data"""
|
|
123
112
|
|
|
124
113
|
NONE = "none"
|
|
125
114
|
GZIP = "gzip"
|
|
126
115
|
|
|
127
116
|
|
|
128
|
-
class OutputCrowdstrikeNextGenSiemQueueFullBehavior(
|
|
129
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
130
|
-
):
|
|
117
|
+
class OutputCrowdstrikeNextGenSiemQueueFullBehavior(str, Enum):
|
|
131
118
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
132
119
|
|
|
133
120
|
BLOCK = "block"
|
|
134
121
|
DROP = "drop"
|
|
135
122
|
|
|
136
123
|
|
|
137
|
-
class OutputCrowdstrikeNextGenSiemMode(str, Enum
|
|
124
|
+
class OutputCrowdstrikeNextGenSiemMode(str, Enum):
|
|
138
125
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
139
126
|
|
|
140
127
|
ERROR = "error"
|
|
@@ -298,10 +285,7 @@ class OutputCrowdstrikeNextGenSiem(BaseModel):
|
|
|
298
285
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
299
286
|
|
|
300
287
|
failed_request_logging_mode: Annotated[
|
|
301
|
-
|
|
302
|
-
Optional[OutputCrowdstrikeNextGenSiemFailedRequestLoggingMode],
|
|
303
|
-
PlainValidator(validate_open_enum(False)),
|
|
304
|
-
],
|
|
288
|
+
Optional[OutputCrowdstrikeNextGenSiemFailedRequestLoggingMode],
|
|
305
289
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
306
290
|
] = OutputCrowdstrikeNextGenSiemFailedRequestLoggingMode.NONE
|
|
307
291
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -312,19 +296,13 @@ class OutputCrowdstrikeNextGenSiem(BaseModel):
|
|
|
312
296
|
r"""List of headers that are safe to log in plain text"""
|
|
313
297
|
|
|
314
298
|
format_: Annotated[
|
|
315
|
-
|
|
316
|
-
Optional[OutputCrowdstrikeNextGenSiemRequestFormat],
|
|
317
|
-
PlainValidator(validate_open_enum(False)),
|
|
318
|
-
],
|
|
299
|
+
Optional[OutputCrowdstrikeNextGenSiemRequestFormat],
|
|
319
300
|
pydantic.Field(alias="format"),
|
|
320
301
|
] = OutputCrowdstrikeNextGenSiemRequestFormat.JSON
|
|
321
302
|
r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
|
|
322
303
|
|
|
323
304
|
auth_type: Annotated[
|
|
324
|
-
|
|
325
|
-
Optional[OutputCrowdstrikeNextGenSiemAuthenticationMethod],
|
|
326
|
-
PlainValidator(validate_open_enum(False)),
|
|
327
|
-
],
|
|
305
|
+
Optional[OutputCrowdstrikeNextGenSiemAuthenticationMethod],
|
|
328
306
|
pydantic.Field(alias="authType"),
|
|
329
307
|
] = OutputCrowdstrikeNextGenSiemAuthenticationMethod.MANUAL
|
|
330
308
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
@@ -346,10 +324,7 @@ class OutputCrowdstrikeNextGenSiem(BaseModel):
|
|
|
346
324
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
347
325
|
|
|
348
326
|
on_backpressure: Annotated[
|
|
349
|
-
|
|
350
|
-
Optional[OutputCrowdstrikeNextGenSiemBackpressureBehavior],
|
|
351
|
-
PlainValidator(validate_open_enum(False)),
|
|
352
|
-
],
|
|
327
|
+
Optional[OutputCrowdstrikeNextGenSiemBackpressureBehavior],
|
|
353
328
|
pydantic.Field(alias="onBackpressure"),
|
|
354
329
|
] = OutputCrowdstrikeNextGenSiemBackpressureBehavior.BLOCK
|
|
355
330
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -375,29 +350,19 @@ class OutputCrowdstrikeNextGenSiem(BaseModel):
|
|
|
375
350
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
376
351
|
|
|
377
352
|
pq_compress: Annotated[
|
|
378
|
-
|
|
379
|
-
Optional[OutputCrowdstrikeNextGenSiemCompression],
|
|
380
|
-
PlainValidator(validate_open_enum(False)),
|
|
381
|
-
],
|
|
353
|
+
Optional[OutputCrowdstrikeNextGenSiemCompression],
|
|
382
354
|
pydantic.Field(alias="pqCompress"),
|
|
383
355
|
] = OutputCrowdstrikeNextGenSiemCompression.NONE
|
|
384
356
|
r"""Codec to use to compress the persisted data"""
|
|
385
357
|
|
|
386
358
|
pq_on_backpressure: Annotated[
|
|
387
|
-
|
|
388
|
-
Optional[OutputCrowdstrikeNextGenSiemQueueFullBehavior],
|
|
389
|
-
PlainValidator(validate_open_enum(False)),
|
|
390
|
-
],
|
|
359
|
+
Optional[OutputCrowdstrikeNextGenSiemQueueFullBehavior],
|
|
391
360
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
392
361
|
] = OutputCrowdstrikeNextGenSiemQueueFullBehavior.BLOCK
|
|
393
362
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
394
363
|
|
|
395
364
|
pq_mode: Annotated[
|
|
396
|
-
|
|
397
|
-
Optional[OutputCrowdstrikeNextGenSiemMode],
|
|
398
|
-
PlainValidator(validate_open_enum(False)),
|
|
399
|
-
],
|
|
400
|
-
pydantic.Field(alias="pqMode"),
|
|
365
|
+
Optional[OutputCrowdstrikeNextGenSiemMode], pydantic.Field(alias="pqMode")
|
|
401
366
|
] = OutputCrowdstrikeNextGenSiemMode.ERROR
|
|
402
367
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
403
368
|
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,14 +12,14 @@ class OutputDatadogType(str, Enum):
|
|
|
15
12
|
DATADOG = "datadog"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class SendLogsAs(str, Enum
|
|
15
|
+
class SendLogsAs(str, Enum):
|
|
19
16
|
r"""The content type to use when sending logs"""
|
|
20
17
|
|
|
21
18
|
TEXT = "text"
|
|
22
19
|
JSON = "json"
|
|
23
20
|
|
|
24
21
|
|
|
25
|
-
class OutputDatadogSeverity(str, Enum
|
|
22
|
+
class OutputDatadogSeverity(str, Enum):
|
|
26
23
|
r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
|
|
27
24
|
|
|
28
25
|
EMERGENCY = "emergency"
|
|
@@ -35,7 +32,7 @@ class OutputDatadogSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
35
32
|
DEBUG = "debug"
|
|
36
33
|
|
|
37
34
|
|
|
38
|
-
class DatadogSite(str, Enum
|
|
35
|
+
class DatadogSite(str, Enum):
|
|
39
36
|
r"""Datadog site to which events should be sent"""
|
|
40
37
|
|
|
41
38
|
US = "us"
|
|
@@ -58,7 +55,7 @@ class OutputDatadogExtraHTTPHeader(BaseModel):
|
|
|
58
55
|
name: Optional[str] = None
|
|
59
56
|
|
|
60
57
|
|
|
61
|
-
class OutputDatadogFailedRequestLoggingMode(str, Enum
|
|
58
|
+
class OutputDatadogFailedRequestLoggingMode(str, Enum):
|
|
62
59
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
63
60
|
|
|
64
61
|
PAYLOAD = "payload"
|
|
@@ -120,7 +117,7 @@ class OutputDatadogTimeoutRetrySettings(BaseModel):
|
|
|
120
117
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
121
118
|
|
|
122
119
|
|
|
123
|
-
class OutputDatadogBackpressureBehavior(str, Enum
|
|
120
|
+
class OutputDatadogBackpressureBehavior(str, Enum):
|
|
124
121
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
125
122
|
|
|
126
123
|
BLOCK = "block"
|
|
@@ -128,28 +125,28 @@ class OutputDatadogBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
128
125
|
QUEUE = "queue"
|
|
129
126
|
|
|
130
127
|
|
|
131
|
-
class OutputDatadogAuthenticationMethod(str, Enum
|
|
128
|
+
class OutputDatadogAuthenticationMethod(str, Enum):
|
|
132
129
|
r"""Enter API key directly, or select a stored secret"""
|
|
133
130
|
|
|
134
131
|
MANUAL = "manual"
|
|
135
132
|
SECRET = "secret"
|
|
136
133
|
|
|
137
134
|
|
|
138
|
-
class OutputDatadogCompression(str, Enum
|
|
135
|
+
class OutputDatadogCompression(str, Enum):
|
|
139
136
|
r"""Codec to use to compress the persisted data"""
|
|
140
137
|
|
|
141
138
|
NONE = "none"
|
|
142
139
|
GZIP = "gzip"
|
|
143
140
|
|
|
144
141
|
|
|
145
|
-
class OutputDatadogQueueFullBehavior(str, Enum
|
|
142
|
+
class OutputDatadogQueueFullBehavior(str, Enum):
|
|
146
143
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
147
144
|
|
|
148
145
|
BLOCK = "block"
|
|
149
146
|
DROP = "drop"
|
|
150
147
|
|
|
151
148
|
|
|
152
|
-
class OutputDatadogMode(str, Enum
|
|
149
|
+
class OutputDatadogMode(str, Enum):
|
|
153
150
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
154
151
|
|
|
155
152
|
ERROR = "error"
|
|
@@ -279,8 +276,7 @@ class OutputDatadog(BaseModel):
|
|
|
279
276
|
r"""Tags for filtering and grouping in @{product}"""
|
|
280
277
|
|
|
281
278
|
content_type: Annotated[
|
|
282
|
-
|
|
283
|
-
pydantic.Field(alias="contentType"),
|
|
279
|
+
Optional[SendLogsAs], pydantic.Field(alias="contentType")
|
|
284
280
|
] = SendLogsAs.JSON
|
|
285
281
|
r"""The content type to use when sending logs"""
|
|
286
282
|
|
|
@@ -307,14 +303,10 @@ class OutputDatadog(BaseModel):
|
|
|
307
303
|
] = False
|
|
308
304
|
r"""Allow API key to be set from the event's '__agent_api_key' field"""
|
|
309
305
|
|
|
310
|
-
severity:
|
|
311
|
-
Optional[OutputDatadogSeverity], PlainValidator(validate_open_enum(False))
|
|
312
|
-
] = None
|
|
306
|
+
severity: Optional[OutputDatadogSeverity] = None
|
|
313
307
|
r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
|
|
314
308
|
|
|
315
|
-
site:
|
|
316
|
-
Optional[DatadogSite], PlainValidator(validate_open_enum(False))
|
|
317
|
-
] = DatadogSite.US
|
|
309
|
+
site: Optional[DatadogSite] = DatadogSite.US
|
|
318
310
|
r"""Datadog site to which events should be sent"""
|
|
319
311
|
|
|
320
312
|
send_counters_as_count: Annotated[
|
|
@@ -366,10 +358,7 @@ class OutputDatadog(BaseModel):
|
|
|
366
358
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
367
359
|
|
|
368
360
|
failed_request_logging_mode: Annotated[
|
|
369
|
-
|
|
370
|
-
Optional[OutputDatadogFailedRequestLoggingMode],
|
|
371
|
-
PlainValidator(validate_open_enum(False)),
|
|
372
|
-
],
|
|
361
|
+
Optional[OutputDatadogFailedRequestLoggingMode],
|
|
373
362
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
374
363
|
] = OutputDatadogFailedRequestLoggingMode.NONE
|
|
375
364
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -396,20 +385,13 @@ class OutputDatadog(BaseModel):
|
|
|
396
385
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
397
386
|
|
|
398
387
|
on_backpressure: Annotated[
|
|
399
|
-
|
|
400
|
-
Optional[OutputDatadogBackpressureBehavior],
|
|
401
|
-
PlainValidator(validate_open_enum(False)),
|
|
402
|
-
],
|
|
388
|
+
Optional[OutputDatadogBackpressureBehavior],
|
|
403
389
|
pydantic.Field(alias="onBackpressure"),
|
|
404
390
|
] = OutputDatadogBackpressureBehavior.BLOCK
|
|
405
391
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
406
392
|
|
|
407
393
|
auth_type: Annotated[
|
|
408
|
-
|
|
409
|
-
Optional[OutputDatadogAuthenticationMethod],
|
|
410
|
-
PlainValidator(validate_open_enum(False)),
|
|
411
|
-
],
|
|
412
|
-
pydantic.Field(alias="authType"),
|
|
394
|
+
Optional[OutputDatadogAuthenticationMethod], pydantic.Field(alias="authType")
|
|
413
395
|
] = OutputDatadogAuthenticationMethod.MANUAL
|
|
414
396
|
r"""Enter API key directly, or select a stored secret"""
|
|
415
397
|
|
|
@@ -436,29 +418,19 @@ class OutputDatadog(BaseModel):
|
|
|
436
418
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
437
419
|
|
|
438
420
|
pq_compress: Annotated[
|
|
439
|
-
|
|
440
|
-
Optional[OutputDatadogCompression],
|
|
441
|
-
PlainValidator(validate_open_enum(False)),
|
|
442
|
-
],
|
|
443
|
-
pydantic.Field(alias="pqCompress"),
|
|
421
|
+
Optional[OutputDatadogCompression], pydantic.Field(alias="pqCompress")
|
|
444
422
|
] = OutputDatadogCompression.NONE
|
|
445
423
|
r"""Codec to use to compress the persisted data"""
|
|
446
424
|
|
|
447
425
|
pq_on_backpressure: Annotated[
|
|
448
|
-
|
|
449
|
-
Optional[OutputDatadogQueueFullBehavior],
|
|
450
|
-
PlainValidator(validate_open_enum(False)),
|
|
451
|
-
],
|
|
426
|
+
Optional[OutputDatadogQueueFullBehavior],
|
|
452
427
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
453
428
|
] = OutputDatadogQueueFullBehavior.BLOCK
|
|
454
429
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
455
430
|
|
|
456
|
-
pq_mode: Annotated[
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
],
|
|
460
|
-
pydantic.Field(alias="pqMode"),
|
|
461
|
-
] = OutputDatadogMode.ERROR
|
|
431
|
+
pq_mode: Annotated[Optional[OutputDatadogMode], pydantic.Field(alias="pqMode")] = (
|
|
432
|
+
OutputDatadogMode.ERROR
|
|
433
|
+
)
|
|
462
434
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
463
435
|
|
|
464
436
|
pq_controls: Annotated[
|