cribl-control-plane 0.0.46__py3-none-any.whl → 0.0.48a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -6
- cribl_control_plane/errors/apierror.py +2 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +11 -7
- cribl_control_plane/errors/error.py +4 -2
- cribl_control_plane/errors/healthstatus_error.py +12 -4
- cribl_control_plane/errors/no_response_error.py +5 -1
- cribl_control_plane/errors/responsevalidationerror.py +2 -0
- cribl_control_plane/models/__init__.py +12 -12
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +7 -2
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/hbcriblinfo.py +19 -3
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/heartbeatmetadata.py +3 -0
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +47 -20
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +46 -16
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +53 -18
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeprovidedinfo.py +3 -0
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +77 -32
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +74 -28
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +82 -30
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/routeconf.py +3 -4
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.46.dist-info → cribl_control_plane-0.0.48a1.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.0.46.dist-info → cribl_control_plane-0.0.48a1.dist-info}/RECORD +165 -167
- {cribl_control_plane-0.0.46.dist-info → cribl_control_plane-0.0.48a1.dist-info}/WHEEL +1 -1
- cribl_control_plane/models/appmode.py +0 -13
- cribl_control_plane/models/routecloneconf.py +0 -13
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,7 +26,7 @@ class OutputHumioHecExtraHTTPHeader(BaseModel):
|
|
|
23
26
|
name: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class OutputHumioHecFailedRequestLoggingMode(str, Enum):
|
|
29
|
+
class OutputHumioHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
28
31
|
|
|
29
32
|
PAYLOAD = "payload"
|
|
@@ -31,14 +34,14 @@ class OutputHumioHecFailedRequestLoggingMode(str, Enum):
|
|
|
31
34
|
NONE = "none"
|
|
32
35
|
|
|
33
36
|
|
|
34
|
-
class OutputHumioHecRequestFormat(str, Enum):
|
|
37
|
+
class OutputHumioHecRequestFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
35
38
|
r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
|
|
36
39
|
|
|
37
40
|
JSON = "JSON"
|
|
38
41
|
RAW = "raw"
|
|
39
42
|
|
|
40
43
|
|
|
41
|
-
class OutputHumioHecAuthenticationMethod(str, Enum):
|
|
44
|
+
class OutputHumioHecAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
42
45
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
43
46
|
|
|
44
47
|
MANUAL = "manual"
|
|
@@ -99,7 +102,7 @@ class OutputHumioHecTimeoutRetrySettings(BaseModel):
|
|
|
99
102
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
100
103
|
|
|
101
104
|
|
|
102
|
-
class OutputHumioHecBackpressureBehavior(str, Enum):
|
|
105
|
+
class OutputHumioHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
103
106
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
104
107
|
|
|
105
108
|
BLOCK = "block"
|
|
@@ -107,21 +110,21 @@ class OutputHumioHecBackpressureBehavior(str, Enum):
|
|
|
107
110
|
QUEUE = "queue"
|
|
108
111
|
|
|
109
112
|
|
|
110
|
-
class OutputHumioHecCompression(str, Enum):
|
|
113
|
+
class OutputHumioHecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
111
114
|
r"""Codec to use to compress the persisted data"""
|
|
112
115
|
|
|
113
116
|
NONE = "none"
|
|
114
117
|
GZIP = "gzip"
|
|
115
118
|
|
|
116
119
|
|
|
117
|
-
class OutputHumioHecQueueFullBehavior(str, Enum):
|
|
120
|
+
class OutputHumioHecQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
118
121
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
119
122
|
|
|
120
123
|
BLOCK = "block"
|
|
121
124
|
DROP = "drop"
|
|
122
125
|
|
|
123
126
|
|
|
124
|
-
class OutputHumioHecMode(str, Enum):
|
|
127
|
+
class OutputHumioHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
125
128
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
126
129
|
|
|
127
130
|
ERROR = "error"
|
|
@@ -276,7 +279,10 @@ class OutputHumioHec(BaseModel):
|
|
|
276
279
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
277
280
|
|
|
278
281
|
failed_request_logging_mode: Annotated[
|
|
279
|
-
|
|
282
|
+
Annotated[
|
|
283
|
+
Optional[OutputHumioHecFailedRequestLoggingMode],
|
|
284
|
+
PlainValidator(validate_open_enum(False)),
|
|
285
|
+
],
|
|
280
286
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
281
287
|
] = OutputHumioHecFailedRequestLoggingMode.NONE
|
|
282
288
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -287,12 +293,20 @@ class OutputHumioHec(BaseModel):
|
|
|
287
293
|
r"""List of headers that are safe to log in plain text"""
|
|
288
294
|
|
|
289
295
|
format_: Annotated[
|
|
290
|
-
|
|
296
|
+
Annotated[
|
|
297
|
+
Optional[OutputHumioHecRequestFormat],
|
|
298
|
+
PlainValidator(validate_open_enum(False)),
|
|
299
|
+
],
|
|
300
|
+
pydantic.Field(alias="format"),
|
|
291
301
|
] = OutputHumioHecRequestFormat.JSON
|
|
292
302
|
r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
|
|
293
303
|
|
|
294
304
|
auth_type: Annotated[
|
|
295
|
-
|
|
305
|
+
Annotated[
|
|
306
|
+
Optional[OutputHumioHecAuthenticationMethod],
|
|
307
|
+
PlainValidator(validate_open_enum(False)),
|
|
308
|
+
],
|
|
309
|
+
pydantic.Field(alias="authType"),
|
|
296
310
|
] = OutputHumioHecAuthenticationMethod.MANUAL
|
|
297
311
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
298
312
|
|
|
@@ -313,7 +327,10 @@ class OutputHumioHec(BaseModel):
|
|
|
313
327
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
314
328
|
|
|
315
329
|
on_backpressure: Annotated[
|
|
316
|
-
|
|
330
|
+
Annotated[
|
|
331
|
+
Optional[OutputHumioHecBackpressureBehavior],
|
|
332
|
+
PlainValidator(validate_open_enum(False)),
|
|
333
|
+
],
|
|
317
334
|
pydantic.Field(alias="onBackpressure"),
|
|
318
335
|
] = OutputHumioHecBackpressureBehavior.BLOCK
|
|
319
336
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -340,19 +357,29 @@ class OutputHumioHec(BaseModel):
|
|
|
340
357
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
341
358
|
|
|
342
359
|
pq_compress: Annotated[
|
|
343
|
-
|
|
360
|
+
Annotated[
|
|
361
|
+
Optional[OutputHumioHecCompression],
|
|
362
|
+
PlainValidator(validate_open_enum(False)),
|
|
363
|
+
],
|
|
364
|
+
pydantic.Field(alias="pqCompress"),
|
|
344
365
|
] = OutputHumioHecCompression.NONE
|
|
345
366
|
r"""Codec to use to compress the persisted data"""
|
|
346
367
|
|
|
347
368
|
pq_on_backpressure: Annotated[
|
|
348
|
-
|
|
369
|
+
Annotated[
|
|
370
|
+
Optional[OutputHumioHecQueueFullBehavior],
|
|
371
|
+
PlainValidator(validate_open_enum(False)),
|
|
372
|
+
],
|
|
349
373
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
350
374
|
] = OutputHumioHecQueueFullBehavior.BLOCK
|
|
351
375
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
352
376
|
|
|
353
|
-
pq_mode: Annotated[
|
|
354
|
-
|
|
355
|
-
|
|
377
|
+
pq_mode: Annotated[
|
|
378
|
+
Annotated[
|
|
379
|
+
Optional[OutputHumioHecMode], PlainValidator(validate_open_enum(False))
|
|
380
|
+
],
|
|
381
|
+
pydantic.Field(alias="pqMode"),
|
|
382
|
+
] = OutputHumioHecMode.ERROR
|
|
356
383
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
357
384
|
|
|
358
385
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputInfluxdbType(str, Enum):
|
|
|
12
15
|
INFLUXDB = "influxdb"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class TimestampPrecision(str, Enum):
|
|
18
|
+
class TimestampPrecision(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Sets the precision for the supplied Unix time values. Defaults to milliseconds."""
|
|
17
20
|
|
|
18
21
|
NS = "ns"
|
|
@@ -34,7 +37,7 @@ class OutputInfluxdbExtraHTTPHeader(BaseModel):
|
|
|
34
37
|
name: Optional[str] = None
|
|
35
38
|
|
|
36
39
|
|
|
37
|
-
class OutputInfluxdbFailedRequestLoggingMode(str, Enum):
|
|
40
|
+
class OutputInfluxdbFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
38
41
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
39
42
|
|
|
40
43
|
PAYLOAD = "payload"
|
|
@@ -96,7 +99,7 @@ class OutputInfluxdbTimeoutRetrySettings(BaseModel):
|
|
|
96
99
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
97
100
|
|
|
98
101
|
|
|
99
|
-
class OutputInfluxdbBackpressureBehavior(str, Enum):
|
|
102
|
+
class OutputInfluxdbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
100
103
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
101
104
|
|
|
102
105
|
BLOCK = "block"
|
|
@@ -104,7 +107,7 @@ class OutputInfluxdbBackpressureBehavior(str, Enum):
|
|
|
104
107
|
QUEUE = "queue"
|
|
105
108
|
|
|
106
109
|
|
|
107
|
-
class OutputInfluxdbAuthenticationType(str, Enum):
|
|
110
|
+
class OutputInfluxdbAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
108
111
|
r"""InfluxDB authentication type"""
|
|
109
112
|
|
|
110
113
|
NONE = "none"
|
|
@@ -115,21 +118,21 @@ class OutputInfluxdbAuthenticationType(str, Enum):
|
|
|
115
118
|
OAUTH = "oauth"
|
|
116
119
|
|
|
117
120
|
|
|
118
|
-
class OutputInfluxdbCompression(str, Enum):
|
|
121
|
+
class OutputInfluxdbCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
119
122
|
r"""Codec to use to compress the persisted data"""
|
|
120
123
|
|
|
121
124
|
NONE = "none"
|
|
122
125
|
GZIP = "gzip"
|
|
123
126
|
|
|
124
127
|
|
|
125
|
-
class OutputInfluxdbQueueFullBehavior(str, Enum):
|
|
128
|
+
class OutputInfluxdbQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
126
129
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
127
130
|
|
|
128
131
|
BLOCK = "block"
|
|
129
132
|
DROP = "drop"
|
|
130
133
|
|
|
131
134
|
|
|
132
|
-
class OutputInfluxdbMode(str, Enum):
|
|
135
|
+
class OutputInfluxdbMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
133
136
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
134
137
|
|
|
135
138
|
ERROR = "error"
|
|
@@ -306,7 +309,10 @@ class OutputInfluxdb(BaseModel):
|
|
|
306
309
|
r"""The v2 API can be enabled with InfluxDB versions 1.8 and later."""
|
|
307
310
|
|
|
308
311
|
timestamp_precision: Annotated[
|
|
309
|
-
|
|
312
|
+
Annotated[
|
|
313
|
+
Optional[TimestampPrecision], PlainValidator(validate_open_enum(False))
|
|
314
|
+
],
|
|
315
|
+
pydantic.Field(alias="timestampPrecision"),
|
|
310
316
|
] = TimestampPrecision.MS
|
|
311
317
|
r"""Sets the precision for the supplied Unix time values. Defaults to milliseconds."""
|
|
312
318
|
|
|
@@ -364,7 +370,10 @@ class OutputInfluxdb(BaseModel):
|
|
|
364
370
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
365
371
|
|
|
366
372
|
failed_request_logging_mode: Annotated[
|
|
367
|
-
|
|
373
|
+
Annotated[
|
|
374
|
+
Optional[OutputInfluxdbFailedRequestLoggingMode],
|
|
375
|
+
PlainValidator(validate_open_enum(False)),
|
|
376
|
+
],
|
|
368
377
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
369
378
|
] = OutputInfluxdbFailedRequestLoggingMode.NONE
|
|
370
379
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -391,13 +400,20 @@ class OutputInfluxdb(BaseModel):
|
|
|
391
400
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
392
401
|
|
|
393
402
|
on_backpressure: Annotated[
|
|
394
|
-
|
|
403
|
+
Annotated[
|
|
404
|
+
Optional[OutputInfluxdbBackpressureBehavior],
|
|
405
|
+
PlainValidator(validate_open_enum(False)),
|
|
406
|
+
],
|
|
395
407
|
pydantic.Field(alias="onBackpressure"),
|
|
396
408
|
] = OutputInfluxdbBackpressureBehavior.BLOCK
|
|
397
409
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
398
410
|
|
|
399
411
|
auth_type: Annotated[
|
|
400
|
-
|
|
412
|
+
Annotated[
|
|
413
|
+
Optional[OutputInfluxdbAuthenticationType],
|
|
414
|
+
PlainValidator(validate_open_enum(False)),
|
|
415
|
+
],
|
|
416
|
+
pydantic.Field(alias="authType"),
|
|
401
417
|
] = OutputInfluxdbAuthenticationType.NONE
|
|
402
418
|
r"""InfluxDB authentication type"""
|
|
403
419
|
|
|
@@ -426,19 +442,29 @@ class OutputInfluxdb(BaseModel):
|
|
|
426
442
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
427
443
|
|
|
428
444
|
pq_compress: Annotated[
|
|
429
|
-
|
|
445
|
+
Annotated[
|
|
446
|
+
Optional[OutputInfluxdbCompression],
|
|
447
|
+
PlainValidator(validate_open_enum(False)),
|
|
448
|
+
],
|
|
449
|
+
pydantic.Field(alias="pqCompress"),
|
|
430
450
|
] = OutputInfluxdbCompression.NONE
|
|
431
451
|
r"""Codec to use to compress the persisted data"""
|
|
432
452
|
|
|
433
453
|
pq_on_backpressure: Annotated[
|
|
434
|
-
|
|
454
|
+
Annotated[
|
|
455
|
+
Optional[OutputInfluxdbQueueFullBehavior],
|
|
456
|
+
PlainValidator(validate_open_enum(False)),
|
|
457
|
+
],
|
|
435
458
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
436
459
|
] = OutputInfluxdbQueueFullBehavior.BLOCK
|
|
437
460
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
438
461
|
|
|
439
|
-
pq_mode: Annotated[
|
|
440
|
-
|
|
441
|
-
|
|
462
|
+
pq_mode: Annotated[
|
|
463
|
+
Annotated[
|
|
464
|
+
Optional[OutputInfluxdbMode], PlainValidator(validate_open_enum(False))
|
|
465
|
+
],
|
|
466
|
+
pydantic.Field(alias="pqMode"),
|
|
467
|
+
] = OutputInfluxdbMode.ERROR
|
|
442
468
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
443
469
|
|
|
444
470
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputKafkaType(str, Enum):
|
|
|
12
15
|
KAFKA = "kafka"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputKafkaAcknowledgments(int, Enum):
|
|
18
|
+
class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Control the number of required acknowledgments."""
|
|
17
20
|
|
|
18
21
|
ONE = 1
|
|
@@ -20,7 +23,7 @@ class OutputKafkaAcknowledgments(int, Enum):
|
|
|
20
23
|
MINUS_1 = -1
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class OutputKafkaRecordDataFormat(str, Enum):
|
|
26
|
+
class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
25
28
|
|
|
26
29
|
JSON = "json"
|
|
@@ -28,7 +31,7 @@ class OutputKafkaRecordDataFormat(str, Enum):
|
|
|
28
31
|
PROTOBUF = "protobuf"
|
|
29
32
|
|
|
30
33
|
|
|
31
|
-
class OutputKafkaCompression(str, Enum):
|
|
34
|
+
class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
32
35
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
33
36
|
|
|
34
37
|
NONE = "none"
|
|
@@ -37,7 +40,7 @@ class OutputKafkaCompression(str, Enum):
|
|
|
37
40
|
LZ4 = "lz4"
|
|
38
41
|
|
|
39
42
|
|
|
40
|
-
class OutputKafkaSchemaType(str, Enum):
|
|
43
|
+
class OutputKafkaSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
41
44
|
r"""The schema format used to encode and decode event data"""
|
|
42
45
|
|
|
43
46
|
AVRO = "avro"
|
|
@@ -63,14 +66,18 @@ class OutputKafkaAuth(BaseModel):
|
|
|
63
66
|
r"""Select or create a secret that references your credentials"""
|
|
64
67
|
|
|
65
68
|
|
|
66
|
-
class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(
|
|
69
|
+
class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(
|
|
70
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
71
|
+
):
|
|
67
72
|
TL_SV1 = "TLSv1"
|
|
68
73
|
TL_SV1_1 = "TLSv1.1"
|
|
69
74
|
TL_SV1_2 = "TLSv1.2"
|
|
70
75
|
TL_SV1_3 = "TLSv1.3"
|
|
71
76
|
|
|
72
77
|
|
|
73
|
-
class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(
|
|
78
|
+
class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(
|
|
79
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
80
|
+
):
|
|
74
81
|
TL_SV1 = "TLSv1"
|
|
75
82
|
TL_SV1_1 = "TLSv1.1"
|
|
76
83
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -130,12 +137,18 @@ class OutputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
|
130
137
|
r"""Passphrase to use to decrypt private key"""
|
|
131
138
|
|
|
132
139
|
min_version: Annotated[
|
|
133
|
-
|
|
140
|
+
Annotated[
|
|
141
|
+
Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
|
|
142
|
+
PlainValidator(validate_open_enum(False)),
|
|
143
|
+
],
|
|
134
144
|
pydantic.Field(alias="minVersion"),
|
|
135
145
|
] = None
|
|
136
146
|
|
|
137
147
|
max_version: Annotated[
|
|
138
|
-
|
|
148
|
+
Annotated[
|
|
149
|
+
Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
|
|
150
|
+
PlainValidator(validate_open_enum(False)),
|
|
151
|
+
],
|
|
139
152
|
pydantic.Field(alias="maxVersion"),
|
|
140
153
|
] = None
|
|
141
154
|
|
|
@@ -170,7 +183,10 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
170
183
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
171
184
|
|
|
172
185
|
schema_type: Annotated[
|
|
173
|
-
|
|
186
|
+
Annotated[
|
|
187
|
+
Optional[OutputKafkaSchemaType], PlainValidator(validate_open_enum(False))
|
|
188
|
+
],
|
|
189
|
+
pydantic.Field(alias="schemaType"),
|
|
174
190
|
] = OutputKafkaSchemaType.AVRO
|
|
175
191
|
r"""The schema format used to encode and decode event data"""
|
|
176
192
|
|
|
@@ -203,7 +219,7 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
203
219
|
r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
|
|
204
220
|
|
|
205
221
|
|
|
206
|
-
class OutputKafkaSASLMechanism(str, Enum):
|
|
222
|
+
class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
207
223
|
PLAIN = "plain"
|
|
208
224
|
SCRAM_SHA_256 = "scram-sha-256"
|
|
209
225
|
SCRAM_SHA_512 = "scram-sha-512"
|
|
@@ -224,7 +240,9 @@ class OutputKafkaAuthentication(BaseModel):
|
|
|
224
240
|
|
|
225
241
|
disabled: Optional[bool] = True
|
|
226
242
|
|
|
227
|
-
mechanism:
|
|
243
|
+
mechanism: Annotated[
|
|
244
|
+
Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
|
|
245
|
+
] = OutputKafkaSASLMechanism.PLAIN
|
|
228
246
|
|
|
229
247
|
oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
|
|
230
248
|
False
|
|
@@ -232,14 +250,14 @@ class OutputKafkaAuthentication(BaseModel):
|
|
|
232
250
|
r"""Enable OAuth authentication"""
|
|
233
251
|
|
|
234
252
|
|
|
235
|
-
class OutputKafkaMinimumTLSVersion(str, Enum):
|
|
253
|
+
class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
236
254
|
TL_SV1 = "TLSv1"
|
|
237
255
|
TL_SV1_1 = "TLSv1.1"
|
|
238
256
|
TL_SV1_2 = "TLSv1.2"
|
|
239
257
|
TL_SV1_3 = "TLSv1.3"
|
|
240
258
|
|
|
241
259
|
|
|
242
|
-
class OutputKafkaMaximumTLSVersion(str, Enum):
|
|
260
|
+
class OutputKafkaMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
243
261
|
TL_SV1 = "TLSv1"
|
|
244
262
|
TL_SV1_1 = "TLSv1.1"
|
|
245
263
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -299,15 +317,23 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
|
|
|
299
317
|
r"""Passphrase to use to decrypt private key"""
|
|
300
318
|
|
|
301
319
|
min_version: Annotated[
|
|
302
|
-
|
|
320
|
+
Annotated[
|
|
321
|
+
Optional[OutputKafkaMinimumTLSVersion],
|
|
322
|
+
PlainValidator(validate_open_enum(False)),
|
|
323
|
+
],
|
|
324
|
+
pydantic.Field(alias="minVersion"),
|
|
303
325
|
] = None
|
|
304
326
|
|
|
305
327
|
max_version: Annotated[
|
|
306
|
-
|
|
328
|
+
Annotated[
|
|
329
|
+
Optional[OutputKafkaMaximumTLSVersion],
|
|
330
|
+
PlainValidator(validate_open_enum(False)),
|
|
331
|
+
],
|
|
332
|
+
pydantic.Field(alias="maxVersion"),
|
|
307
333
|
] = None
|
|
308
334
|
|
|
309
335
|
|
|
310
|
-
class OutputKafkaBackpressureBehavior(str, Enum):
|
|
336
|
+
class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
311
337
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
312
338
|
|
|
313
339
|
BLOCK = "block"
|
|
@@ -315,21 +341,21 @@ class OutputKafkaBackpressureBehavior(str, Enum):
|
|
|
315
341
|
QUEUE = "queue"
|
|
316
342
|
|
|
317
343
|
|
|
318
|
-
class OutputKafkaPqCompressCompression(str, Enum):
|
|
344
|
+
class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
319
345
|
r"""Codec to use to compress the persisted data"""
|
|
320
346
|
|
|
321
347
|
NONE = "none"
|
|
322
348
|
GZIP = "gzip"
|
|
323
349
|
|
|
324
350
|
|
|
325
|
-
class OutputKafkaQueueFullBehavior(str, Enum):
|
|
351
|
+
class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
326
352
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
327
353
|
|
|
328
354
|
BLOCK = "block"
|
|
329
355
|
DROP = "drop"
|
|
330
356
|
|
|
331
357
|
|
|
332
|
-
class OutputKafkaMode(str, Enum):
|
|
358
|
+
class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
333
359
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
334
360
|
|
|
335
361
|
ERROR = "error"
|
|
@@ -441,15 +467,23 @@ class OutputKafka(BaseModel):
|
|
|
441
467
|
streamtags: Optional[List[str]] = None
|
|
442
468
|
r"""Tags for filtering and grouping in @{product}"""
|
|
443
469
|
|
|
444
|
-
ack:
|
|
470
|
+
ack: Annotated[
|
|
471
|
+
Optional[OutputKafkaAcknowledgments], PlainValidator(validate_open_enum(True))
|
|
472
|
+
] = OutputKafkaAcknowledgments.ONE
|
|
445
473
|
r"""Control the number of required acknowledgments."""
|
|
446
474
|
|
|
447
475
|
format_: Annotated[
|
|
448
|
-
|
|
476
|
+
Annotated[
|
|
477
|
+
Optional[OutputKafkaRecordDataFormat],
|
|
478
|
+
PlainValidator(validate_open_enum(False)),
|
|
479
|
+
],
|
|
480
|
+
pydantic.Field(alias="format"),
|
|
449
481
|
] = OutputKafkaRecordDataFormat.JSON
|
|
450
482
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
451
483
|
|
|
452
|
-
compression:
|
|
484
|
+
compression: Annotated[
|
|
485
|
+
Optional[OutputKafkaCompression], PlainValidator(validate_open_enum(False))
|
|
486
|
+
] = OutputKafkaCompression.GZIP
|
|
453
487
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
454
488
|
|
|
455
489
|
max_record_size_kb: Annotated[
|
|
@@ -512,7 +546,10 @@ class OutputKafka(BaseModel):
|
|
|
512
546
|
tls: Optional[OutputKafkaTLSSettingsClientSide] = None
|
|
513
547
|
|
|
514
548
|
on_backpressure: Annotated[
|
|
515
|
-
|
|
549
|
+
Annotated[
|
|
550
|
+
Optional[OutputKafkaBackpressureBehavior],
|
|
551
|
+
PlainValidator(validate_open_enum(False)),
|
|
552
|
+
],
|
|
516
553
|
pydantic.Field(alias="onBackpressure"),
|
|
517
554
|
] = OutputKafkaBackpressureBehavior.BLOCK
|
|
518
555
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -538,18 +575,27 @@ class OutputKafka(BaseModel):
|
|
|
538
575
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
539
576
|
|
|
540
577
|
pq_compress: Annotated[
|
|
541
|
-
|
|
578
|
+
Annotated[
|
|
579
|
+
Optional[OutputKafkaPqCompressCompression],
|
|
580
|
+
PlainValidator(validate_open_enum(False)),
|
|
581
|
+
],
|
|
582
|
+
pydantic.Field(alias="pqCompress"),
|
|
542
583
|
] = OutputKafkaPqCompressCompression.NONE
|
|
543
584
|
r"""Codec to use to compress the persisted data"""
|
|
544
585
|
|
|
545
586
|
pq_on_backpressure: Annotated[
|
|
546
|
-
|
|
587
|
+
Annotated[
|
|
588
|
+
Optional[OutputKafkaQueueFullBehavior],
|
|
589
|
+
PlainValidator(validate_open_enum(False)),
|
|
590
|
+
],
|
|
591
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
547
592
|
] = OutputKafkaQueueFullBehavior.BLOCK
|
|
548
593
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
549
594
|
|
|
550
|
-
pq_mode: Annotated[
|
|
551
|
-
OutputKafkaMode
|
|
552
|
-
|
|
595
|
+
pq_mode: Annotated[
|
|
596
|
+
Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
|
|
597
|
+
pydantic.Field(alias="pqMode"),
|
|
598
|
+
] = OutputKafkaMode.ERROR
|
|
553
599
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
554
600
|
|
|
555
601
|
pq_controls: Annotated[
|