cribl-control-plane 0.0.50__py3-none-any.whl → 0.0.50rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -5
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/groups_sdk.py +4 -4
- cribl_control_plane/health.py +6 -2
- cribl_control_plane/models/__init__.py +56 -31
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +24 -4
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/getversionshowop.py +6 -5
- cribl_control_plane/models/gitinfo.py +14 -3
- cribl_control_plane/models/gitshowresult.py +19 -0
- cribl_control_plane/models/hbcriblinfo.py +24 -3
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/heartbeatmetadata.py +3 -0
- cribl_control_plane/models/input.py +65 -63
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +41 -32
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +41 -28
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +48 -30
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeprovidedinfo.py +3 -0
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/output.py +84 -79
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +71 -44
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatabricks.py +282 -0
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +50 -18
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +69 -40
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +77 -42
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/routeconf.py +3 -4
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.50rc2.dist-info/RECORD +327 -0
- cribl_control_plane/models/appmode.py +0 -13
- cribl_control_plane/models/routecloneconf.py +0 -13
- cribl_control_plane-0.0.50.dist-info/RECORD +0 -325
- {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/WHEEL +0 -0
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputKafkaType(str, Enum):
|
|
|
12
15
|
KAFKA = "kafka"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputKafkaAcknowledgments(int, Enum):
|
|
18
|
+
class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Control the number of required acknowledgments."""
|
|
17
20
|
|
|
18
21
|
ONE = 1
|
|
@@ -20,7 +23,7 @@ class OutputKafkaAcknowledgments(int, Enum):
|
|
|
20
23
|
MINUS_1 = -1
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class OutputKafkaRecordDataFormat(str, Enum):
|
|
26
|
+
class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
25
28
|
|
|
26
29
|
JSON = "json"
|
|
@@ -28,7 +31,7 @@ class OutputKafkaRecordDataFormat(str, Enum):
|
|
|
28
31
|
PROTOBUF = "protobuf"
|
|
29
32
|
|
|
30
33
|
|
|
31
|
-
class OutputKafkaCompression(str, Enum):
|
|
34
|
+
class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
32
35
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
33
36
|
|
|
34
37
|
NONE = "none"
|
|
@@ -37,13 +40,6 @@ class OutputKafkaCompression(str, Enum):
|
|
|
37
40
|
LZ4 = "lz4"
|
|
38
41
|
|
|
39
42
|
|
|
40
|
-
class OutputKafkaSchemaType(str, Enum):
|
|
41
|
-
r"""The schema format used to encode and decode event data"""
|
|
42
|
-
|
|
43
|
-
AVRO = "avro"
|
|
44
|
-
JSON = "json"
|
|
45
|
-
|
|
46
|
-
|
|
47
43
|
class OutputKafkaAuthTypedDict(TypedDict):
|
|
48
44
|
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
49
45
|
|
|
@@ -63,14 +59,18 @@ class OutputKafkaAuth(BaseModel):
|
|
|
63
59
|
r"""Select or create a secret that references your credentials"""
|
|
64
60
|
|
|
65
61
|
|
|
66
|
-
class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(
|
|
62
|
+
class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(
|
|
63
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
64
|
+
):
|
|
67
65
|
TL_SV1 = "TLSv1"
|
|
68
66
|
TL_SV1_1 = "TLSv1.1"
|
|
69
67
|
TL_SV1_2 = "TLSv1.2"
|
|
70
68
|
TL_SV1_3 = "TLSv1.3"
|
|
71
69
|
|
|
72
70
|
|
|
73
|
-
class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(
|
|
71
|
+
class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(
|
|
72
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
+
):
|
|
74
74
|
TL_SV1 = "TLSv1"
|
|
75
75
|
TL_SV1_1 = "TLSv1.1"
|
|
76
76
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -130,12 +130,18 @@ class OutputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
|
130
130
|
r"""Passphrase to use to decrypt private key"""
|
|
131
131
|
|
|
132
132
|
min_version: Annotated[
|
|
133
|
-
|
|
133
|
+
Annotated[
|
|
134
|
+
Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
|
|
135
|
+
PlainValidator(validate_open_enum(False)),
|
|
136
|
+
],
|
|
134
137
|
pydantic.Field(alias="minVersion"),
|
|
135
138
|
] = None
|
|
136
139
|
|
|
137
140
|
max_version: Annotated[
|
|
138
|
-
|
|
141
|
+
Annotated[
|
|
142
|
+
Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
|
|
143
|
+
PlainValidator(validate_open_enum(False)),
|
|
144
|
+
],
|
|
139
145
|
pydantic.Field(alias="maxVersion"),
|
|
140
146
|
] = None
|
|
141
147
|
|
|
@@ -144,8 +150,6 @@ class OutputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
|
144
150
|
disabled: NotRequired[bool]
|
|
145
151
|
schema_registry_url: NotRequired[str]
|
|
146
152
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
147
|
-
schema_type: NotRequired[OutputKafkaSchemaType]
|
|
148
|
-
r"""The schema format used to encode and decode event data"""
|
|
149
153
|
connection_timeout: NotRequired[float]
|
|
150
154
|
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
151
155
|
request_timeout: NotRequired[float]
|
|
@@ -169,11 +173,6 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
169
173
|
] = "http://localhost:8081"
|
|
170
174
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
171
175
|
|
|
172
|
-
schema_type: Annotated[
|
|
173
|
-
Optional[OutputKafkaSchemaType], pydantic.Field(alias="schemaType")
|
|
174
|
-
] = OutputKafkaSchemaType.AVRO
|
|
175
|
-
r"""The schema format used to encode and decode event data"""
|
|
176
|
-
|
|
177
176
|
connection_timeout: Annotated[
|
|
178
177
|
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
179
178
|
] = 30000
|
|
@@ -203,7 +202,7 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
203
202
|
r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
|
|
204
203
|
|
|
205
204
|
|
|
206
|
-
class OutputKafkaSASLMechanism(str, Enum):
|
|
205
|
+
class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
207
206
|
PLAIN = "plain"
|
|
208
207
|
SCRAM_SHA_256 = "scram-sha-256"
|
|
209
208
|
SCRAM_SHA_512 = "scram-sha-512"
|
|
@@ -224,7 +223,9 @@ class OutputKafkaAuthentication(BaseModel):
|
|
|
224
223
|
|
|
225
224
|
disabled: Optional[bool] = True
|
|
226
225
|
|
|
227
|
-
mechanism:
|
|
226
|
+
mechanism: Annotated[
|
|
227
|
+
Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
|
|
228
|
+
] = OutputKafkaSASLMechanism.PLAIN
|
|
228
229
|
|
|
229
230
|
oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
|
|
230
231
|
False
|
|
@@ -232,14 +233,14 @@ class OutputKafkaAuthentication(BaseModel):
|
|
|
232
233
|
r"""Enable OAuth authentication"""
|
|
233
234
|
|
|
234
235
|
|
|
235
|
-
class OutputKafkaMinimumTLSVersion(str, Enum):
|
|
236
|
+
class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
236
237
|
TL_SV1 = "TLSv1"
|
|
237
238
|
TL_SV1_1 = "TLSv1.1"
|
|
238
239
|
TL_SV1_2 = "TLSv1.2"
|
|
239
240
|
TL_SV1_3 = "TLSv1.3"
|
|
240
241
|
|
|
241
242
|
|
|
242
|
-
class OutputKafkaMaximumTLSVersion(str, Enum):
|
|
243
|
+
class OutputKafkaMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
243
244
|
TL_SV1 = "TLSv1"
|
|
244
245
|
TL_SV1_1 = "TLSv1.1"
|
|
245
246
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -299,15 +300,23 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
|
|
|
299
300
|
r"""Passphrase to use to decrypt private key"""
|
|
300
301
|
|
|
301
302
|
min_version: Annotated[
|
|
302
|
-
|
|
303
|
+
Annotated[
|
|
304
|
+
Optional[OutputKafkaMinimumTLSVersion],
|
|
305
|
+
PlainValidator(validate_open_enum(False)),
|
|
306
|
+
],
|
|
307
|
+
pydantic.Field(alias="minVersion"),
|
|
303
308
|
] = None
|
|
304
309
|
|
|
305
310
|
max_version: Annotated[
|
|
306
|
-
|
|
311
|
+
Annotated[
|
|
312
|
+
Optional[OutputKafkaMaximumTLSVersion],
|
|
313
|
+
PlainValidator(validate_open_enum(False)),
|
|
314
|
+
],
|
|
315
|
+
pydantic.Field(alias="maxVersion"),
|
|
307
316
|
] = None
|
|
308
317
|
|
|
309
318
|
|
|
310
|
-
class OutputKafkaBackpressureBehavior(str, Enum):
|
|
319
|
+
class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
311
320
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
312
321
|
|
|
313
322
|
BLOCK = "block"
|
|
@@ -315,21 +324,21 @@ class OutputKafkaBackpressureBehavior(str, Enum):
|
|
|
315
324
|
QUEUE = "queue"
|
|
316
325
|
|
|
317
326
|
|
|
318
|
-
class OutputKafkaPqCompressCompression(str, Enum):
|
|
327
|
+
class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
319
328
|
r"""Codec to use to compress the persisted data"""
|
|
320
329
|
|
|
321
330
|
NONE = "none"
|
|
322
331
|
GZIP = "gzip"
|
|
323
332
|
|
|
324
333
|
|
|
325
|
-
class OutputKafkaQueueFullBehavior(str, Enum):
|
|
334
|
+
class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
326
335
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
327
336
|
|
|
328
337
|
BLOCK = "block"
|
|
329
338
|
DROP = "drop"
|
|
330
339
|
|
|
331
340
|
|
|
332
|
-
class OutputKafkaMode(str, Enum):
|
|
341
|
+
class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
333
342
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
334
343
|
|
|
335
344
|
ERROR = "error"
|
|
@@ -441,15 +450,23 @@ class OutputKafka(BaseModel):
|
|
|
441
450
|
streamtags: Optional[List[str]] = None
|
|
442
451
|
r"""Tags for filtering and grouping in @{product}"""
|
|
443
452
|
|
|
444
|
-
ack:
|
|
453
|
+
ack: Annotated[
|
|
454
|
+
Optional[OutputKafkaAcknowledgments], PlainValidator(validate_open_enum(True))
|
|
455
|
+
] = OutputKafkaAcknowledgments.ONE
|
|
445
456
|
r"""Control the number of required acknowledgments."""
|
|
446
457
|
|
|
447
458
|
format_: Annotated[
|
|
448
|
-
|
|
459
|
+
Annotated[
|
|
460
|
+
Optional[OutputKafkaRecordDataFormat],
|
|
461
|
+
PlainValidator(validate_open_enum(False)),
|
|
462
|
+
],
|
|
463
|
+
pydantic.Field(alias="format"),
|
|
449
464
|
] = OutputKafkaRecordDataFormat.JSON
|
|
450
465
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
451
466
|
|
|
452
|
-
compression:
|
|
467
|
+
compression: Annotated[
|
|
468
|
+
Optional[OutputKafkaCompression], PlainValidator(validate_open_enum(False))
|
|
469
|
+
] = OutputKafkaCompression.GZIP
|
|
453
470
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
454
471
|
|
|
455
472
|
max_record_size_kb: Annotated[
|
|
@@ -512,7 +529,10 @@ class OutputKafka(BaseModel):
|
|
|
512
529
|
tls: Optional[OutputKafkaTLSSettingsClientSide] = None
|
|
513
530
|
|
|
514
531
|
on_backpressure: Annotated[
|
|
515
|
-
|
|
532
|
+
Annotated[
|
|
533
|
+
Optional[OutputKafkaBackpressureBehavior],
|
|
534
|
+
PlainValidator(validate_open_enum(False)),
|
|
535
|
+
],
|
|
516
536
|
pydantic.Field(alias="onBackpressure"),
|
|
517
537
|
] = OutputKafkaBackpressureBehavior.BLOCK
|
|
518
538
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -538,18 +558,27 @@ class OutputKafka(BaseModel):
|
|
|
538
558
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
539
559
|
|
|
540
560
|
pq_compress: Annotated[
|
|
541
|
-
|
|
561
|
+
Annotated[
|
|
562
|
+
Optional[OutputKafkaPqCompressCompression],
|
|
563
|
+
PlainValidator(validate_open_enum(False)),
|
|
564
|
+
],
|
|
565
|
+
pydantic.Field(alias="pqCompress"),
|
|
542
566
|
] = OutputKafkaPqCompressCompression.NONE
|
|
543
567
|
r"""Codec to use to compress the persisted data"""
|
|
544
568
|
|
|
545
569
|
pq_on_backpressure: Annotated[
|
|
546
|
-
|
|
570
|
+
Annotated[
|
|
571
|
+
Optional[OutputKafkaQueueFullBehavior],
|
|
572
|
+
PlainValidator(validate_open_enum(False)),
|
|
573
|
+
],
|
|
574
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
547
575
|
] = OutputKafkaQueueFullBehavior.BLOCK
|
|
548
576
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
549
577
|
|
|
550
|
-
pq_mode: Annotated[
|
|
551
|
-
OutputKafkaMode
|
|
552
|
-
|
|
578
|
+
pq_mode: Annotated[
|
|
579
|
+
Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
|
|
580
|
+
pydantic.Field(alias="pqMode"),
|
|
581
|
+
] = OutputKafkaMode.ERROR
|
|
553
582
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
554
583
|
|
|
555
584
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputKinesisType(str, Enum):
|
|
|
12
15
|
KINESIS = "kinesis"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputKinesisAuthenticationMethod(str, Enum):
|
|
18
|
+
class OutputKinesisAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
17
20
|
|
|
18
21
|
AUTO = "auto"
|
|
@@ -20,21 +23,21 @@ class OutputKinesisAuthenticationMethod(str, Enum):
|
|
|
20
23
|
SECRET = "secret"
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class OutputKinesisSignatureVersion(str, Enum):
|
|
26
|
+
class OutputKinesisSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
25
28
|
|
|
26
29
|
V2 = "v2"
|
|
27
30
|
V4 = "v4"
|
|
28
31
|
|
|
29
32
|
|
|
30
|
-
class OutputKinesisCompression(str, Enum):
|
|
33
|
+
class OutputKinesisCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
31
34
|
r"""Compression type to use for records"""
|
|
32
35
|
|
|
33
36
|
NONE = "none"
|
|
34
37
|
GZIP = "gzip"
|
|
35
38
|
|
|
36
39
|
|
|
37
|
-
class OutputKinesisBackpressureBehavior(str, Enum):
|
|
40
|
+
class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
38
41
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
39
42
|
|
|
40
43
|
BLOCK = "block"
|
|
@@ -42,21 +45,21 @@ class OutputKinesisBackpressureBehavior(str, Enum):
|
|
|
42
45
|
QUEUE = "queue"
|
|
43
46
|
|
|
44
47
|
|
|
45
|
-
class OutputKinesisPqCompressCompression(str, Enum):
|
|
48
|
+
class OutputKinesisPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
46
49
|
r"""Codec to use to compress the persisted data"""
|
|
47
50
|
|
|
48
51
|
NONE = "none"
|
|
49
52
|
GZIP = "gzip"
|
|
50
53
|
|
|
51
54
|
|
|
52
|
-
class OutputKinesisQueueFullBehavior(str, Enum):
|
|
55
|
+
class OutputKinesisQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
53
56
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
54
57
|
|
|
55
58
|
BLOCK = "block"
|
|
56
59
|
DROP = "drop"
|
|
57
60
|
|
|
58
61
|
|
|
59
|
-
class OutputKinesisMode(str, Enum):
|
|
62
|
+
class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
60
63
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
61
64
|
|
|
62
65
|
ERROR = "error"
|
|
@@ -167,7 +170,10 @@ class OutputKinesis(BaseModel):
|
|
|
167
170
|
r"""Tags for filtering and grouping in @{product}"""
|
|
168
171
|
|
|
169
172
|
aws_authentication_method: Annotated[
|
|
170
|
-
|
|
173
|
+
Annotated[
|
|
174
|
+
Optional[OutputKinesisAuthenticationMethod],
|
|
175
|
+
PlainValidator(validate_open_enum(False)),
|
|
176
|
+
],
|
|
171
177
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
172
178
|
] = OutputKinesisAuthenticationMethod.AUTO
|
|
173
179
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -180,7 +186,10 @@ class OutputKinesis(BaseModel):
|
|
|
180
186
|
r"""Kinesis stream service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to Kinesis stream-compatible endpoint."""
|
|
181
187
|
|
|
182
188
|
signature_version: Annotated[
|
|
183
|
-
|
|
189
|
+
Annotated[
|
|
190
|
+
Optional[OutputKinesisSignatureVersion],
|
|
191
|
+
PlainValidator(validate_open_enum(False)),
|
|
192
|
+
],
|
|
184
193
|
pydantic.Field(alias="signatureVersion"),
|
|
185
194
|
] = OutputKinesisSignatureVersion.V4
|
|
186
195
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
@@ -228,7 +237,9 @@ class OutputKinesis(BaseModel):
|
|
|
228
237
|
] = 1
|
|
229
238
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
|
|
230
239
|
|
|
231
|
-
compression:
|
|
240
|
+
compression: Annotated[
|
|
241
|
+
Optional[OutputKinesisCompression], PlainValidator(validate_open_enum(False))
|
|
242
|
+
] = OutputKinesisCompression.GZIP
|
|
232
243
|
r"""Compression type to use for records"""
|
|
233
244
|
|
|
234
245
|
use_list_shards: Annotated[
|
|
@@ -240,7 +251,10 @@ class OutputKinesis(BaseModel):
|
|
|
240
251
|
r"""Batch events into a single record as NDJSON"""
|
|
241
252
|
|
|
242
253
|
on_backpressure: Annotated[
|
|
243
|
-
|
|
254
|
+
Annotated[
|
|
255
|
+
Optional[OutputKinesisBackpressureBehavior],
|
|
256
|
+
PlainValidator(validate_open_enum(False)),
|
|
257
|
+
],
|
|
244
258
|
pydantic.Field(alias="onBackpressure"),
|
|
245
259
|
] = OutputKinesisBackpressureBehavior.BLOCK
|
|
246
260
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -266,19 +280,29 @@ class OutputKinesis(BaseModel):
|
|
|
266
280
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
267
281
|
|
|
268
282
|
pq_compress: Annotated[
|
|
269
|
-
|
|
283
|
+
Annotated[
|
|
284
|
+
Optional[OutputKinesisPqCompressCompression],
|
|
285
|
+
PlainValidator(validate_open_enum(False)),
|
|
286
|
+
],
|
|
287
|
+
pydantic.Field(alias="pqCompress"),
|
|
270
288
|
] = OutputKinesisPqCompressCompression.NONE
|
|
271
289
|
r"""Codec to use to compress the persisted data"""
|
|
272
290
|
|
|
273
291
|
pq_on_backpressure: Annotated[
|
|
274
|
-
|
|
292
|
+
Annotated[
|
|
293
|
+
Optional[OutputKinesisQueueFullBehavior],
|
|
294
|
+
PlainValidator(validate_open_enum(False)),
|
|
295
|
+
],
|
|
275
296
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
276
297
|
] = OutputKinesisQueueFullBehavior.BLOCK
|
|
277
298
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
278
299
|
|
|
279
|
-
pq_mode: Annotated[
|
|
280
|
-
|
|
281
|
-
|
|
300
|
+
pq_mode: Annotated[
|
|
301
|
+
Annotated[
|
|
302
|
+
Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
|
|
303
|
+
],
|
|
304
|
+
pydantic.Field(alias="pqMode"),
|
|
305
|
+
] = OutputKinesisMode.ERROR
|
|
282
306
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
283
307
|
|
|
284
308
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputLokiType(str, Enum):
|
|
|
12
15
|
LOKI = "loki"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputLokiMessageFormat(str, Enum):
|
|
18
|
+
class OutputLokiMessageFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
|
|
17
20
|
|
|
18
21
|
PROTOBUF = "protobuf"
|
|
@@ -30,7 +33,7 @@ class OutputLokiLabel(BaseModel):
|
|
|
30
33
|
name: Optional[str] = ""
|
|
31
34
|
|
|
32
35
|
|
|
33
|
-
class OutputLokiAuthenticationType(str, Enum):
|
|
36
|
+
class OutputLokiAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
NONE = "none"
|
|
35
38
|
TOKEN = "token"
|
|
36
39
|
TEXT_SECRET = "textSecret"
|
|
@@ -49,7 +52,7 @@ class OutputLokiExtraHTTPHeader(BaseModel):
|
|
|
49
52
|
name: Optional[str] = None
|
|
50
53
|
|
|
51
54
|
|
|
52
|
-
class OutputLokiFailedRequestLoggingMode(str, Enum):
|
|
55
|
+
class OutputLokiFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
53
56
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
54
57
|
|
|
55
58
|
PAYLOAD = "payload"
|
|
@@ -111,7 +114,7 @@ class OutputLokiTimeoutRetrySettings(BaseModel):
|
|
|
111
114
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
112
115
|
|
|
113
116
|
|
|
114
|
-
class OutputLokiBackpressureBehavior(str, Enum):
|
|
117
|
+
class OutputLokiBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
115
118
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
116
119
|
|
|
117
120
|
BLOCK = "block"
|
|
@@ -119,21 +122,21 @@ class OutputLokiBackpressureBehavior(str, Enum):
|
|
|
119
122
|
QUEUE = "queue"
|
|
120
123
|
|
|
121
124
|
|
|
122
|
-
class OutputLokiCompression(str, Enum):
|
|
125
|
+
class OutputLokiCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
123
126
|
r"""Codec to use to compress the persisted data"""
|
|
124
127
|
|
|
125
128
|
NONE = "none"
|
|
126
129
|
GZIP = "gzip"
|
|
127
130
|
|
|
128
131
|
|
|
129
|
-
class OutputLokiQueueFullBehavior(str, Enum):
|
|
132
|
+
class OutputLokiQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
130
133
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
131
134
|
|
|
132
135
|
BLOCK = "block"
|
|
133
136
|
DROP = "drop"
|
|
134
137
|
|
|
135
138
|
|
|
136
|
-
class OutputLokiMode(str, Enum):
|
|
139
|
+
class OutputLokiMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
137
140
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
138
141
|
|
|
139
142
|
ERROR = "error"
|
|
@@ -259,7 +262,10 @@ class OutputLoki(BaseModel):
|
|
|
259
262
|
r"""Name of the event field that contains the message to send. If not specified, Stream sends a JSON representation of the whole event."""
|
|
260
263
|
|
|
261
264
|
message_format: Annotated[
|
|
262
|
-
|
|
265
|
+
Annotated[
|
|
266
|
+
Optional[OutputLokiMessageFormat], PlainValidator(validate_open_enum(False))
|
|
267
|
+
],
|
|
268
|
+
pydantic.Field(alias="messageFormat"),
|
|
263
269
|
] = OutputLokiMessageFormat.PROTOBUF
|
|
264
270
|
r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
|
|
265
271
|
|
|
@@ -267,7 +273,11 @@ class OutputLoki(BaseModel):
|
|
|
267
273
|
r"""List of labels to send with logs. Labels define Loki streams, so use static labels to avoid proliferating label value combinations and streams. Can be merged and/or overridden by the event's __labels field. Example: '__labels: {host: \"cribl.io\", level: \"error\"}'"""
|
|
268
274
|
|
|
269
275
|
auth_type: Annotated[
|
|
270
|
-
|
|
276
|
+
Annotated[
|
|
277
|
+
Optional[OutputLokiAuthenticationType],
|
|
278
|
+
PlainValidator(validate_open_enum(False)),
|
|
279
|
+
],
|
|
280
|
+
pydantic.Field(alias="authType"),
|
|
271
281
|
] = OutputLokiAuthenticationType.NONE
|
|
272
282
|
|
|
273
283
|
concurrency: Optional[float] = 1
|
|
@@ -311,7 +321,10 @@ class OutputLoki(BaseModel):
|
|
|
311
321
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
312
322
|
|
|
313
323
|
failed_request_logging_mode: Annotated[
|
|
314
|
-
|
|
324
|
+
Annotated[
|
|
325
|
+
Optional[OutputLokiFailedRequestLoggingMode],
|
|
326
|
+
PlainValidator(validate_open_enum(False)),
|
|
327
|
+
],
|
|
315
328
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
316
329
|
] = OutputLokiFailedRequestLoggingMode.NONE
|
|
317
330
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -343,7 +356,11 @@ class OutputLoki(BaseModel):
|
|
|
343
356
|
r"""Add per-event HTTP headers from the __headers field to outgoing requests. Events with different headers are batched and sent separately."""
|
|
344
357
|
|
|
345
358
|
on_backpressure: Annotated[
|
|
346
|
-
|
|
359
|
+
Annotated[
|
|
360
|
+
Optional[OutputLokiBackpressureBehavior],
|
|
361
|
+
PlainValidator(validate_open_enum(False)),
|
|
362
|
+
],
|
|
363
|
+
pydantic.Field(alias="onBackpressure"),
|
|
347
364
|
] = OutputLokiBackpressureBehavior.BLOCK
|
|
348
365
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
349
366
|
|
|
@@ -388,18 +405,26 @@ class OutputLoki(BaseModel):
|
|
|
388
405
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
389
406
|
|
|
390
407
|
pq_compress: Annotated[
|
|
391
|
-
|
|
408
|
+
Annotated[
|
|
409
|
+
Optional[OutputLokiCompression], PlainValidator(validate_open_enum(False))
|
|
410
|
+
],
|
|
411
|
+
pydantic.Field(alias="pqCompress"),
|
|
392
412
|
] = OutputLokiCompression.NONE
|
|
393
413
|
r"""Codec to use to compress the persisted data"""
|
|
394
414
|
|
|
395
415
|
pq_on_backpressure: Annotated[
|
|
396
|
-
|
|
416
|
+
Annotated[
|
|
417
|
+
Optional[OutputLokiQueueFullBehavior],
|
|
418
|
+
PlainValidator(validate_open_enum(False)),
|
|
419
|
+
],
|
|
420
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
397
421
|
] = OutputLokiQueueFullBehavior.BLOCK
|
|
398
422
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
399
423
|
|
|
400
|
-
pq_mode: Annotated[
|
|
401
|
-
OutputLokiMode
|
|
402
|
-
|
|
424
|
+
pq_mode: Annotated[
|
|
425
|
+
Annotated[Optional[OutputLokiMode], PlainValidator(validate_open_enum(False))],
|
|
426
|
+
pydantic.Field(alias="pqMode"),
|
|
427
|
+
] = OutputLokiMode.ERROR
|
|
403
428
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
404
429
|
|
|
405
430
|
pq_controls: Annotated[
|