cribl-control-plane 0.0.46__py3-none-any.whl → 0.0.48a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -6
- cribl_control_plane/errors/apierror.py +2 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +11 -7
- cribl_control_plane/errors/error.py +4 -2
- cribl_control_plane/errors/healthstatus_error.py +12 -4
- cribl_control_plane/errors/no_response_error.py +5 -1
- cribl_control_plane/errors/responsevalidationerror.py +2 -0
- cribl_control_plane/models/__init__.py +12 -12
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +7 -2
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/hbcriblinfo.py +19 -3
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/heartbeatmetadata.py +3 -0
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +47 -20
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +46 -16
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +53 -18
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeprovidedinfo.py +3 -0
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +77 -32
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +74 -28
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +82 -30
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/routeconf.py +3 -4
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.46.dist-info → cribl_control_plane-0.0.48a1.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.0.46.dist-info → cribl_control_plane-0.0.48a1.dist-info}/RECORD +165 -167
- {cribl_control_plane-0.0.46.dist-info → cribl_control_plane-0.0.48a1.dist-info}/WHEEL +1 -1
- cribl_control_plane/models/appmode.py +0 -13
- cribl_control_plane/models/routecloneconf.py +0 -13
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,14 +26,14 @@ class InputKafkaConnection(BaseModel):
|
|
|
23
26
|
pipeline: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class InputKafkaMode(str, Enum):
|
|
29
|
+
class InputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
28
31
|
|
|
29
32
|
SMART = "smart"
|
|
30
33
|
ALWAYS = "always"
|
|
31
34
|
|
|
32
35
|
|
|
33
|
-
class InputKafkaCompression(str, Enum):
|
|
36
|
+
class InputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
r"""Codec to use to compress the persisted data"""
|
|
35
38
|
|
|
36
39
|
NONE = "none"
|
|
@@ -64,7 +67,9 @@ class InputKafkaPqTypedDict(TypedDict):
|
|
|
64
67
|
|
|
65
68
|
|
|
66
69
|
class InputKafkaPq(BaseModel):
|
|
67
|
-
mode:
|
|
70
|
+
mode: Annotated[
|
|
71
|
+
Optional[InputKafkaMode], PlainValidator(validate_open_enum(False))
|
|
72
|
+
] = InputKafkaMode.ALWAYS
|
|
68
73
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
69
74
|
|
|
70
75
|
max_buffer_size: Annotated[
|
|
@@ -88,7 +93,9 @@ class InputKafkaPq(BaseModel):
|
|
|
88
93
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
89
94
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
90
95
|
|
|
91
|
-
compress:
|
|
96
|
+
compress: Annotated[
|
|
97
|
+
Optional[InputKafkaCompression], PlainValidator(validate_open_enum(False))
|
|
98
|
+
] = InputKafkaCompression.NONE
|
|
92
99
|
r"""Codec to use to compress the persisted data"""
|
|
93
100
|
|
|
94
101
|
pq_controls: Annotated[
|
|
@@ -96,7 +103,7 @@ class InputKafkaPq(BaseModel):
|
|
|
96
103
|
] = None
|
|
97
104
|
|
|
98
105
|
|
|
99
|
-
class InputKafkaSchemaType(str, Enum):
|
|
106
|
+
class InputKafkaSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
100
107
|
r"""The schema format used to encode and decode event data"""
|
|
101
108
|
|
|
102
109
|
AVRO = "avro"
|
|
@@ -122,14 +129,18 @@ class InputKafkaAuth(BaseModel):
|
|
|
122
129
|
r"""Select or create a secret that references your credentials"""
|
|
123
130
|
|
|
124
131
|
|
|
125
|
-
class InputKafkaKafkaSchemaRegistryMinimumTLSVersion(
|
|
132
|
+
class InputKafkaKafkaSchemaRegistryMinimumTLSVersion(
|
|
133
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
134
|
+
):
|
|
126
135
|
TL_SV1 = "TLSv1"
|
|
127
136
|
TL_SV1_1 = "TLSv1.1"
|
|
128
137
|
TL_SV1_2 = "TLSv1.2"
|
|
129
138
|
TL_SV1_3 = "TLSv1.3"
|
|
130
139
|
|
|
131
140
|
|
|
132
|
-
class InputKafkaKafkaSchemaRegistryMaximumTLSVersion(
|
|
141
|
+
class InputKafkaKafkaSchemaRegistryMaximumTLSVersion(
|
|
142
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
143
|
+
):
|
|
133
144
|
TL_SV1 = "TLSv1"
|
|
134
145
|
TL_SV1_1 = "TLSv1.1"
|
|
135
146
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -189,12 +200,18 @@ class InputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
|
189
200
|
r"""Passphrase to use to decrypt private key"""
|
|
190
201
|
|
|
191
202
|
min_version: Annotated[
|
|
192
|
-
|
|
203
|
+
Annotated[
|
|
204
|
+
Optional[InputKafkaKafkaSchemaRegistryMinimumTLSVersion],
|
|
205
|
+
PlainValidator(validate_open_enum(False)),
|
|
206
|
+
],
|
|
193
207
|
pydantic.Field(alias="minVersion"),
|
|
194
208
|
] = None
|
|
195
209
|
|
|
196
210
|
max_version: Annotated[
|
|
197
|
-
|
|
211
|
+
Annotated[
|
|
212
|
+
Optional[InputKafkaKafkaSchemaRegistryMaximumTLSVersion],
|
|
213
|
+
PlainValidator(validate_open_enum(False)),
|
|
214
|
+
],
|
|
198
215
|
pydantic.Field(alias="maxVersion"),
|
|
199
216
|
] = None
|
|
200
217
|
|
|
@@ -225,7 +242,10 @@ class InputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
225
242
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
226
243
|
|
|
227
244
|
schema_type: Annotated[
|
|
228
|
-
|
|
245
|
+
Annotated[
|
|
246
|
+
Optional[InputKafkaSchemaType], PlainValidator(validate_open_enum(False))
|
|
247
|
+
],
|
|
248
|
+
pydantic.Field(alias="schemaType"),
|
|
229
249
|
] = InputKafkaSchemaType.AVRO
|
|
230
250
|
r"""The schema format used to encode and decode event data"""
|
|
231
251
|
|
|
@@ -248,7 +268,7 @@ class InputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
248
268
|
tls: Optional[InputKafkaKafkaSchemaRegistryTLSSettingsClientSide] = None
|
|
249
269
|
|
|
250
270
|
|
|
251
|
-
class InputKafkaSASLMechanism(str, Enum):
|
|
271
|
+
class InputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
252
272
|
PLAIN = "plain"
|
|
253
273
|
SCRAM_SHA_256 = "scram-sha-256"
|
|
254
274
|
SCRAM_SHA_512 = "scram-sha-512"
|
|
@@ -269,7 +289,9 @@ class InputKafkaAuthentication(BaseModel):
|
|
|
269
289
|
|
|
270
290
|
disabled: Optional[bool] = True
|
|
271
291
|
|
|
272
|
-
mechanism:
|
|
292
|
+
mechanism: Annotated[
|
|
293
|
+
Optional[InputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
|
|
294
|
+
] = InputKafkaSASLMechanism.PLAIN
|
|
273
295
|
|
|
274
296
|
oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
|
|
275
297
|
False
|
|
@@ -277,14 +299,14 @@ class InputKafkaAuthentication(BaseModel):
|
|
|
277
299
|
r"""Enable OAuth authentication"""
|
|
278
300
|
|
|
279
301
|
|
|
280
|
-
class InputKafkaMinimumTLSVersion(str, Enum):
|
|
302
|
+
class InputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
281
303
|
TL_SV1 = "TLSv1"
|
|
282
304
|
TL_SV1_1 = "TLSv1.1"
|
|
283
305
|
TL_SV1_2 = "TLSv1.2"
|
|
284
306
|
TL_SV1_3 = "TLSv1.3"
|
|
285
307
|
|
|
286
308
|
|
|
287
|
-
class InputKafkaMaximumTLSVersion(str, Enum):
|
|
309
|
+
class InputKafkaMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
288
310
|
TL_SV1 = "TLSv1"
|
|
289
311
|
TL_SV1_1 = "TLSv1.1"
|
|
290
312
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -344,11 +366,19 @@ class InputKafkaTLSSettingsClientSide(BaseModel):
|
|
|
344
366
|
r"""Passphrase to use to decrypt private key"""
|
|
345
367
|
|
|
346
368
|
min_version: Annotated[
|
|
347
|
-
|
|
369
|
+
Annotated[
|
|
370
|
+
Optional[InputKafkaMinimumTLSVersion],
|
|
371
|
+
PlainValidator(validate_open_enum(False)),
|
|
372
|
+
],
|
|
373
|
+
pydantic.Field(alias="minVersion"),
|
|
348
374
|
] = None
|
|
349
375
|
|
|
350
376
|
max_version: Annotated[
|
|
351
|
-
|
|
377
|
+
Annotated[
|
|
378
|
+
Optional[InputKafkaMaximumTLSVersion],
|
|
379
|
+
PlainValidator(validate_open_enum(False)),
|
|
380
|
+
],
|
|
381
|
+
pydantic.Field(alias="maxVersion"),
|
|
352
382
|
] = None
|
|
353
383
|
|
|
354
384
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,14 +26,14 @@ class InputKinesisConnection(BaseModel):
|
|
|
23
26
|
pipeline: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class InputKinesisMode(str, Enum):
|
|
29
|
+
class InputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
28
31
|
|
|
29
32
|
SMART = "smart"
|
|
30
33
|
ALWAYS = "always"
|
|
31
34
|
|
|
32
35
|
|
|
33
|
-
class InputKinesisCompression(str, Enum):
|
|
36
|
+
class InputKinesisCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
r"""Codec to use to compress the persisted data"""
|
|
35
38
|
|
|
36
39
|
NONE = "none"
|
|
@@ -64,7 +67,9 @@ class InputKinesisPqTypedDict(TypedDict):
|
|
|
64
67
|
|
|
65
68
|
|
|
66
69
|
class InputKinesisPq(BaseModel):
|
|
67
|
-
mode:
|
|
70
|
+
mode: Annotated[
|
|
71
|
+
Optional[InputKinesisMode], PlainValidator(validate_open_enum(False))
|
|
72
|
+
] = InputKinesisMode.ALWAYS
|
|
68
73
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
69
74
|
|
|
70
75
|
max_buffer_size: Annotated[
|
|
@@ -88,7 +93,9 @@ class InputKinesisPq(BaseModel):
|
|
|
88
93
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
89
94
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
90
95
|
|
|
91
|
-
compress:
|
|
96
|
+
compress: Annotated[
|
|
97
|
+
Optional[InputKinesisCompression], PlainValidator(validate_open_enum(False))
|
|
98
|
+
] = InputKinesisCompression.NONE
|
|
92
99
|
r"""Codec to use to compress the persisted data"""
|
|
93
100
|
|
|
94
101
|
pq_controls: Annotated[
|
|
@@ -96,14 +103,14 @@ class InputKinesisPq(BaseModel):
|
|
|
96
103
|
] = None
|
|
97
104
|
|
|
98
105
|
|
|
99
|
-
class ShardIteratorStart(str, Enum):
|
|
106
|
+
class ShardIteratorStart(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
100
107
|
r"""Location at which to start reading a shard for the first time"""
|
|
101
108
|
|
|
102
109
|
TRIM_HORIZON = "TRIM_HORIZON"
|
|
103
110
|
LATEST = "LATEST"
|
|
104
111
|
|
|
105
112
|
|
|
106
|
-
class InputKinesisRecordDataFormat(str, Enum):
|
|
113
|
+
class InputKinesisRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
107
114
|
r"""Format of data inside the Kinesis Stream records. Gzip compression is automatically detected."""
|
|
108
115
|
|
|
109
116
|
CRIBL = "cribl"
|
|
@@ -112,14 +119,14 @@ class InputKinesisRecordDataFormat(str, Enum):
|
|
|
112
119
|
LINE = "line"
|
|
113
120
|
|
|
114
121
|
|
|
115
|
-
class ShardLoadBalancing(str, Enum):
|
|
122
|
+
class ShardLoadBalancing(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
116
123
|
r"""The load-balancing algorithm to use for spreading out shards across Workers and Worker Processes"""
|
|
117
124
|
|
|
118
125
|
CONSISTENT_HASHING = "ConsistentHashing"
|
|
119
126
|
ROUND_ROBIN = "RoundRobin"
|
|
120
127
|
|
|
121
128
|
|
|
122
|
-
class InputKinesisAuthenticationMethod(str, Enum):
|
|
129
|
+
class InputKinesisAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
123
130
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
124
131
|
|
|
125
132
|
AUTO = "auto"
|
|
@@ -127,7 +134,7 @@ class InputKinesisAuthenticationMethod(str, Enum):
|
|
|
127
134
|
SECRET = "secret"
|
|
128
135
|
|
|
129
136
|
|
|
130
|
-
class InputKinesisSignatureVersion(str, Enum):
|
|
137
|
+
class InputKinesisSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
131
138
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
132
139
|
|
|
133
140
|
V2 = "v2"
|
|
@@ -259,12 +266,19 @@ class InputKinesis(BaseModel):
|
|
|
259
266
|
r"""A JavaScript expression to be called with each shardId for the stream. If the expression evaluates to a truthy value, the shard will be processed."""
|
|
260
267
|
|
|
261
268
|
shard_iterator_type: Annotated[
|
|
262
|
-
|
|
269
|
+
Annotated[
|
|
270
|
+
Optional[ShardIteratorStart], PlainValidator(validate_open_enum(False))
|
|
271
|
+
],
|
|
272
|
+
pydantic.Field(alias="shardIteratorType"),
|
|
263
273
|
] = ShardIteratorStart.TRIM_HORIZON
|
|
264
274
|
r"""Location at which to start reading a shard for the first time"""
|
|
265
275
|
|
|
266
276
|
payload_format: Annotated[
|
|
267
|
-
|
|
277
|
+
Annotated[
|
|
278
|
+
Optional[InputKinesisRecordDataFormat],
|
|
279
|
+
PlainValidator(validate_open_enum(False)),
|
|
280
|
+
],
|
|
281
|
+
pydantic.Field(alias="payloadFormat"),
|
|
268
282
|
] = InputKinesisRecordDataFormat.CRIBL
|
|
269
283
|
r"""Format of data inside the Kinesis Stream records. Gzip compression is automatically detected."""
|
|
270
284
|
|
|
@@ -279,12 +293,18 @@ class InputKinesis(BaseModel):
|
|
|
279
293
|
r"""Maximum number of records, across all shards, to pull down at once per Worker Process"""
|
|
280
294
|
|
|
281
295
|
load_balancing_algorithm: Annotated[
|
|
282
|
-
|
|
296
|
+
Annotated[
|
|
297
|
+
Optional[ShardLoadBalancing], PlainValidator(validate_open_enum(False))
|
|
298
|
+
],
|
|
299
|
+
pydantic.Field(alias="loadBalancingAlgorithm"),
|
|
283
300
|
] = ShardLoadBalancing.CONSISTENT_HASHING
|
|
284
301
|
r"""The load-balancing algorithm to use for spreading out shards across Workers and Worker Processes"""
|
|
285
302
|
|
|
286
303
|
aws_authentication_method: Annotated[
|
|
287
|
-
|
|
304
|
+
Annotated[
|
|
305
|
+
Optional[InputKinesisAuthenticationMethod],
|
|
306
|
+
PlainValidator(validate_open_enum(False)),
|
|
307
|
+
],
|
|
288
308
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
289
309
|
] = InputKinesisAuthenticationMethod.AUTO
|
|
290
310
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -297,7 +317,11 @@ class InputKinesis(BaseModel):
|
|
|
297
317
|
r"""Kinesis stream service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to Kinesis stream-compatible endpoint."""
|
|
298
318
|
|
|
299
319
|
signature_version: Annotated[
|
|
300
|
-
|
|
320
|
+
Annotated[
|
|
321
|
+
Optional[InputKinesisSignatureVersion],
|
|
322
|
+
PlainValidator(validate_open_enum(False)),
|
|
323
|
+
],
|
|
324
|
+
pydantic.Field(alias="signatureVersion"),
|
|
301
325
|
] = InputKinesisSignatureVersion.V4
|
|
302
326
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
303
327
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,14 +26,14 @@ class InputKubeEventsConnection(BaseModel):
|
|
|
23
26
|
pipeline: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class InputKubeEventsMode(str, Enum):
|
|
29
|
+
class InputKubeEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
28
31
|
|
|
29
32
|
SMART = "smart"
|
|
30
33
|
ALWAYS = "always"
|
|
31
34
|
|
|
32
35
|
|
|
33
|
-
class InputKubeEventsCompression(str, Enum):
|
|
36
|
+
class InputKubeEventsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
r"""Codec to use to compress the persisted data"""
|
|
35
38
|
|
|
36
39
|
NONE = "none"
|
|
@@ -64,7 +67,9 @@ class InputKubeEventsPqTypedDict(TypedDict):
|
|
|
64
67
|
|
|
65
68
|
|
|
66
69
|
class InputKubeEventsPq(BaseModel):
|
|
67
|
-
mode:
|
|
70
|
+
mode: Annotated[
|
|
71
|
+
Optional[InputKubeEventsMode], PlainValidator(validate_open_enum(False))
|
|
72
|
+
] = InputKubeEventsMode.ALWAYS
|
|
68
73
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
69
74
|
|
|
70
75
|
max_buffer_size: Annotated[
|
|
@@ -88,7 +93,9 @@ class InputKubeEventsPq(BaseModel):
|
|
|
88
93
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
89
94
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
90
95
|
|
|
91
|
-
compress:
|
|
96
|
+
compress: Annotated[
|
|
97
|
+
Optional[InputKubeEventsCompression], PlainValidator(validate_open_enum(False))
|
|
98
|
+
] = InputKubeEventsCompression.NONE
|
|
92
99
|
r"""Codec to use to compress the persisted data"""
|
|
93
100
|
|
|
94
101
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,14 +26,14 @@ class InputKubeLogsConnection(BaseModel):
|
|
|
23
26
|
pipeline: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class InputKubeLogsMode(str, Enum):
|
|
29
|
+
class InputKubeLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
28
31
|
|
|
29
32
|
SMART = "smart"
|
|
30
33
|
ALWAYS = "always"
|
|
31
34
|
|
|
32
35
|
|
|
33
|
-
class InputKubeLogsPqCompression(str, Enum):
|
|
36
|
+
class InputKubeLogsPqCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
r"""Codec to use to compress the persisted data"""
|
|
35
38
|
|
|
36
39
|
NONE = "none"
|
|
@@ -64,7 +67,9 @@ class InputKubeLogsPqTypedDict(TypedDict):
|
|
|
64
67
|
|
|
65
68
|
|
|
66
69
|
class InputKubeLogsPq(BaseModel):
|
|
67
|
-
mode:
|
|
70
|
+
mode: Annotated[
|
|
71
|
+
Optional[InputKubeLogsMode], PlainValidator(validate_open_enum(False))
|
|
72
|
+
] = InputKubeLogsMode.ALWAYS
|
|
68
73
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
69
74
|
|
|
70
75
|
max_buffer_size: Annotated[
|
|
@@ -88,7 +93,9 @@ class InputKubeLogsPq(BaseModel):
|
|
|
88
93
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
89
94
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
90
95
|
|
|
91
|
-
compress:
|
|
96
|
+
compress: Annotated[
|
|
97
|
+
Optional[InputKubeLogsPqCompression], PlainValidator(validate_open_enum(False))
|
|
98
|
+
] = InputKubeLogsPqCompression.NONE
|
|
92
99
|
r"""Codec to use to compress the persisted data"""
|
|
93
100
|
|
|
94
101
|
pq_controls: Annotated[
|
|
@@ -124,7 +131,7 @@ class InputKubeLogsMetadatum(BaseModel):
|
|
|
124
131
|
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
125
132
|
|
|
126
133
|
|
|
127
|
-
class InputKubeLogsPersistenceCompression(str, Enum):
|
|
134
|
+
class InputKubeLogsPersistenceCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
128
135
|
r"""Data compression format. Default is gzip."""
|
|
129
136
|
|
|
130
137
|
NONE = "none"
|
|
@@ -157,9 +164,10 @@ class InputKubeLogsDiskSpooling(BaseModel):
|
|
|
157
164
|
max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
|
|
158
165
|
r"""Maximum amount of time to retain data before older buckets are deleted. Examples: 2h, 4d. Default is 24h."""
|
|
159
166
|
|
|
160
|
-
compress:
|
|
161
|
-
InputKubeLogsPersistenceCompression
|
|
162
|
-
|
|
167
|
+
compress: Annotated[
|
|
168
|
+
Optional[InputKubeLogsPersistenceCompression],
|
|
169
|
+
PlainValidator(validate_open_enum(False)),
|
|
170
|
+
] = InputKubeLogsPersistenceCompression.GZIP
|
|
163
171
|
r"""Data compression format. Default is gzip."""
|
|
164
172
|
|
|
165
173
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,14 +26,14 @@ class InputKubeMetricsConnection(BaseModel):
|
|
|
23
26
|
pipeline: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class InputKubeMetricsMode(str, Enum):
|
|
29
|
+
class InputKubeMetricsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
28
31
|
|
|
29
32
|
SMART = "smart"
|
|
30
33
|
ALWAYS = "always"
|
|
31
34
|
|
|
32
35
|
|
|
33
|
-
class InputKubeMetricsCompression(str, Enum):
|
|
36
|
+
class InputKubeMetricsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
r"""Codec to use to compress the persisted data"""
|
|
35
38
|
|
|
36
39
|
NONE = "none"
|
|
@@ -64,7 +67,9 @@ class InputKubeMetricsPqTypedDict(TypedDict):
|
|
|
64
67
|
|
|
65
68
|
|
|
66
69
|
class InputKubeMetricsPq(BaseModel):
|
|
67
|
-
mode:
|
|
70
|
+
mode: Annotated[
|
|
71
|
+
Optional[InputKubeMetricsMode], PlainValidator(validate_open_enum(False))
|
|
72
|
+
] = InputKubeMetricsMode.ALWAYS
|
|
68
73
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
69
74
|
|
|
70
75
|
max_buffer_size: Annotated[
|
|
@@ -88,7 +93,9 @@ class InputKubeMetricsPq(BaseModel):
|
|
|
88
93
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
89
94
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
90
95
|
|
|
91
|
-
compress:
|
|
96
|
+
compress: Annotated[
|
|
97
|
+
Optional[InputKubeMetricsCompression], PlainValidator(validate_open_enum(False))
|
|
98
|
+
] = InputKubeMetricsCompression.NONE
|
|
92
99
|
r"""Codec to use to compress the persisted data"""
|
|
93
100
|
|
|
94
101
|
pq_controls: Annotated[
|
|
@@ -124,7 +131,7 @@ class InputKubeMetricsMetadatum(BaseModel):
|
|
|
124
131
|
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
125
132
|
|
|
126
133
|
|
|
127
|
-
class InputKubeMetricsDataCompressionFormat(str, Enum):
|
|
134
|
+
class InputKubeMetricsDataCompressionFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
128
135
|
NONE = "none"
|
|
129
136
|
GZIP = "gzip"
|
|
130
137
|
|
|
@@ -156,9 +163,10 @@ class InputKubeMetricsPersistence(BaseModel):
|
|
|
156
163
|
max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
|
|
157
164
|
r"""Maximum amount of time to retain data (examples: 2h, 4d). When limit is reached, older data will be deleted."""
|
|
158
165
|
|
|
159
|
-
compress:
|
|
160
|
-
InputKubeMetricsDataCompressionFormat
|
|
161
|
-
|
|
166
|
+
compress: Annotated[
|
|
167
|
+
Optional[InputKubeMetricsDataCompressionFormat],
|
|
168
|
+
PlainValidator(validate_open_enum(False)),
|
|
169
|
+
] = InputKubeMetricsDataCompressionFormat.GZIP
|
|
162
170
|
|
|
163
171
|
dest_path: Annotated[Optional[str], pydantic.Field(alias="destPath")] = (
|
|
164
172
|
"$CRIBL_HOME/state/kube_metrics"
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import Any, List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -23,14 +26,14 @@ class InputLokiConnection(BaseModel):
|
|
|
23
26
|
pipeline: Optional[str] = None
|
|
24
27
|
|
|
25
28
|
|
|
26
|
-
class InputLokiMode(str, Enum):
|
|
29
|
+
class InputLokiMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
28
31
|
|
|
29
32
|
SMART = "smart"
|
|
30
33
|
ALWAYS = "always"
|
|
31
34
|
|
|
32
35
|
|
|
33
|
-
class InputLokiCompression(str, Enum):
|
|
36
|
+
class InputLokiCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
r"""Codec to use to compress the persisted data"""
|
|
35
38
|
|
|
36
39
|
NONE = "none"
|
|
@@ -64,7 +67,9 @@ class InputLokiPqTypedDict(TypedDict):
|
|
|
64
67
|
|
|
65
68
|
|
|
66
69
|
class InputLokiPq(BaseModel):
|
|
67
|
-
mode:
|
|
70
|
+
mode: Annotated[
|
|
71
|
+
Optional[InputLokiMode], PlainValidator(validate_open_enum(False))
|
|
72
|
+
] = InputLokiMode.ALWAYS
|
|
68
73
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
69
74
|
|
|
70
75
|
max_buffer_size: Annotated[
|
|
@@ -88,7 +93,9 @@ class InputLokiPq(BaseModel):
|
|
|
88
93
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
89
94
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
90
95
|
|
|
91
|
-
compress:
|
|
96
|
+
compress: Annotated[
|
|
97
|
+
Optional[InputLokiCompression], PlainValidator(validate_open_enum(False))
|
|
98
|
+
] = InputLokiCompression.NONE
|
|
92
99
|
r"""Codec to use to compress the persisted data"""
|
|
93
100
|
|
|
94
101
|
pq_controls: Annotated[
|
|
@@ -96,14 +103,14 @@ class InputLokiPq(BaseModel):
|
|
|
96
103
|
] = None
|
|
97
104
|
|
|
98
105
|
|
|
99
|
-
class InputLokiMinimumTLSVersion(str, Enum):
|
|
106
|
+
class InputLokiMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
100
107
|
TL_SV1 = "TLSv1"
|
|
101
108
|
TL_SV1_1 = "TLSv1.1"
|
|
102
109
|
TL_SV1_2 = "TLSv1.2"
|
|
103
110
|
TL_SV1_3 = "TLSv1.3"
|
|
104
111
|
|
|
105
112
|
|
|
106
|
-
class InputLokiMaximumTLSVersion(str, Enum):
|
|
113
|
+
class InputLokiMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
107
114
|
TL_SV1 = "TLSv1"
|
|
108
115
|
TL_SV1_1 = "TLSv1.1"
|
|
109
116
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -162,15 +169,23 @@ class InputLokiTLSSettingsServerSide(BaseModel):
|
|
|
162
169
|
] = None
|
|
163
170
|
|
|
164
171
|
min_version: Annotated[
|
|
165
|
-
|
|
172
|
+
Annotated[
|
|
173
|
+
Optional[InputLokiMinimumTLSVersion],
|
|
174
|
+
PlainValidator(validate_open_enum(False)),
|
|
175
|
+
],
|
|
176
|
+
pydantic.Field(alias="minVersion"),
|
|
166
177
|
] = None
|
|
167
178
|
|
|
168
179
|
max_version: Annotated[
|
|
169
|
-
|
|
180
|
+
Annotated[
|
|
181
|
+
Optional[InputLokiMaximumTLSVersion],
|
|
182
|
+
PlainValidator(validate_open_enum(False)),
|
|
183
|
+
],
|
|
184
|
+
pydantic.Field(alias="maxVersion"),
|
|
170
185
|
] = None
|
|
171
186
|
|
|
172
187
|
|
|
173
|
-
class InputLokiAuthenticationType(str, Enum):
|
|
188
|
+
class InputLokiAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
174
189
|
r"""Loki logs authentication type"""
|
|
175
190
|
|
|
176
191
|
NONE = "none"
|
|
@@ -401,7 +416,11 @@ class InputLoki(BaseModel):
|
|
|
401
416
|
r"""Absolute path on which to listen for Loki logs requests. Defaults to /loki/api/v1/push, which will (in this example) expand as: 'http://<your‑upstream‑URL>:<your‑port>/loki/api/v1/push'."""
|
|
402
417
|
|
|
403
418
|
auth_type: Annotated[
|
|
404
|
-
|
|
419
|
+
Annotated[
|
|
420
|
+
Optional[InputLokiAuthenticationType],
|
|
421
|
+
PlainValidator(validate_open_enum(False)),
|
|
422
|
+
],
|
|
423
|
+
pydantic.Field(alias="authType"),
|
|
405
424
|
] = InputLokiAuthenticationType.NONE
|
|
406
425
|
r"""Loki logs authentication type"""
|
|
407
426
|
|