cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +4124 -4124
- cribl_control_plane/models/createinputop.py +1734 -2771
- cribl_control_plane/models/createoutputop.py +2153 -4314
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +20 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +10 -27
- cribl_control_plane/models/inputcribllakehttp.py +12 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +10 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +10 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +31 -64
- cribl_control_plane/models/inputelastic.py +16 -44
- cribl_control_plane/models/inputeventhub.py +8 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +10 -27
- cribl_control_plane/models/inputgooglepubsub.py +8 -23
- cribl_control_plane/models/inputgrafana_union.py +35 -81
- cribl_control_plane/models/inputhttp.py +10 -27
- cribl_control_plane/models/inputhttpraw.py +10 -27
- cribl_control_plane/models/inputjournalfiles.py +6 -16
- cribl_control_plane/models/inputkafka.py +16 -45
- cribl_control_plane/models/inputkinesis.py +16 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +12 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
- cribl_control_plane/models/inputmsk.py +18 -52
- cribl_control_plane/models/inputnetflow.py +6 -15
- cribl_control_plane/models/inputoffice365mgmt.py +16 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
- cribl_control_plane/models/inputoffice365service.py +18 -39
- cribl_control_plane/models/inputopentelemetry.py +18 -42
- cribl_control_plane/models/inputprometheus.py +20 -54
- cribl_control_plane/models/inputprometheusrw.py +12 -34
- cribl_control_plane/models/inputrawudp.py +6 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +8 -20
- cribl_control_plane/models/inputsplunk.py +14 -37
- cribl_control_plane/models/inputsplunkhec.py +12 -33
- cribl_control_plane/models/inputsplunksearch.py +16 -37
- cribl_control_plane/models/inputsqs.py +12 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +12 -33
- cribl_control_plane/models/inputtcpjson.py +12 -33
- cribl_control_plane/models/inputwef.py +20 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +10 -25
- cribl_control_plane/models/inputzscalerhec.py +12 -33
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.17.dist-info/RECORD +215 -0
- cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/WHEEL +0 -0
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import Any, List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class InputHTTPRawType(str, Enum
|
|
11
|
+
class InputHTTPRawType(str, Enum):
|
|
15
12
|
HTTP_RAW = "http_raw"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,14 +23,14 @@ class InputHTTPRawConnection(BaseModel):
|
|
|
26
23
|
pipeline: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class InputHTTPRawMode(str, Enum
|
|
26
|
+
class InputHTTPRawMode(str, Enum):
|
|
30
27
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
28
|
|
|
32
29
|
SMART = "smart"
|
|
33
30
|
ALWAYS = "always"
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class InputHTTPRawCompression(str, Enum
|
|
33
|
+
class InputHTTPRawCompression(str, Enum):
|
|
37
34
|
r"""Codec to use to compress the persisted data"""
|
|
38
35
|
|
|
39
36
|
NONE = "none"
|
|
@@ -58,9 +55,7 @@ class InputHTTPRawPqTypedDict(TypedDict):
|
|
|
58
55
|
|
|
59
56
|
|
|
60
57
|
class InputHTTPRawPq(BaseModel):
|
|
61
|
-
mode:
|
|
62
|
-
Optional[InputHTTPRawMode], PlainValidator(validate_open_enum(False))
|
|
63
|
-
] = InputHTTPRawMode.ALWAYS
|
|
58
|
+
mode: Optional[InputHTTPRawMode] = InputHTTPRawMode.ALWAYS
|
|
64
59
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
60
|
|
|
66
61
|
max_buffer_size: Annotated[
|
|
@@ -84,20 +79,18 @@ class InputHTTPRawPq(BaseModel):
|
|
|
84
79
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
80
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
81
|
|
|
87
|
-
compress:
|
|
88
|
-
Optional[InputHTTPRawCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
-
] = InputHTTPRawCompression.NONE
|
|
82
|
+
compress: Optional[InputHTTPRawCompression] = InputHTTPRawCompression.NONE
|
|
90
83
|
r"""Codec to use to compress the persisted data"""
|
|
91
84
|
|
|
92
85
|
|
|
93
|
-
class InputHTTPRawMinimumTLSVersion(str, Enum
|
|
86
|
+
class InputHTTPRawMinimumTLSVersion(str, Enum):
|
|
94
87
|
TL_SV1 = "TLSv1"
|
|
95
88
|
TL_SV1_1 = "TLSv1.1"
|
|
96
89
|
TL_SV1_2 = "TLSv1.2"
|
|
97
90
|
TL_SV1_3 = "TLSv1.3"
|
|
98
91
|
|
|
99
92
|
|
|
100
|
-
class InputHTTPRawMaximumTLSVersion(str, Enum
|
|
93
|
+
class InputHTTPRawMaximumTLSVersion(str, Enum):
|
|
101
94
|
TL_SV1 = "TLSv1"
|
|
102
95
|
TL_SV1_1 = "TLSv1.1"
|
|
103
96
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -156,19 +149,11 @@ class InputHTTPRawTLSSettingsServerSide(BaseModel):
|
|
|
156
149
|
] = None
|
|
157
150
|
|
|
158
151
|
min_version: Annotated[
|
|
159
|
-
|
|
160
|
-
Optional[InputHTTPRawMinimumTLSVersion],
|
|
161
|
-
PlainValidator(validate_open_enum(False)),
|
|
162
|
-
],
|
|
163
|
-
pydantic.Field(alias="minVersion"),
|
|
152
|
+
Optional[InputHTTPRawMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
164
153
|
] = None
|
|
165
154
|
|
|
166
155
|
max_version: Annotated[
|
|
167
|
-
|
|
168
|
-
Optional[InputHTTPRawMaximumTLSVersion],
|
|
169
|
-
PlainValidator(validate_open_enum(False)),
|
|
170
|
-
],
|
|
171
|
-
pydantic.Field(alias="maxVersion"),
|
|
156
|
+
Optional[InputHTTPRawMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
172
157
|
] = None
|
|
173
158
|
|
|
174
159
|
|
|
@@ -285,9 +270,7 @@ class InputHTTPRaw(BaseModel):
|
|
|
285
270
|
id: Optional[str] = None
|
|
286
271
|
r"""Unique ID for this input"""
|
|
287
272
|
|
|
288
|
-
type:
|
|
289
|
-
Optional[InputHTTPRawType], PlainValidator(validate_open_enum(False))
|
|
290
|
-
] = None
|
|
273
|
+
type: Optional[InputHTTPRawType] = None
|
|
291
274
|
|
|
292
275
|
disabled: Optional[bool] = False
|
|
293
276
|
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class InputJournalFilesType(str, Enum
|
|
11
|
+
class InputJournalFilesType(str, Enum):
|
|
15
12
|
JOURNAL_FILES = "journal_files"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,14 +23,14 @@ class InputJournalFilesConnection(BaseModel):
|
|
|
26
23
|
pipeline: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class InputJournalFilesMode(str, Enum
|
|
26
|
+
class InputJournalFilesMode(str, Enum):
|
|
30
27
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
28
|
|
|
32
29
|
SMART = "smart"
|
|
33
30
|
ALWAYS = "always"
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class InputJournalFilesCompression(str, Enum
|
|
33
|
+
class InputJournalFilesCompression(str, Enum):
|
|
37
34
|
r"""Codec to use to compress the persisted data"""
|
|
38
35
|
|
|
39
36
|
NONE = "none"
|
|
@@ -58,9 +55,7 @@ class InputJournalFilesPqTypedDict(TypedDict):
|
|
|
58
55
|
|
|
59
56
|
|
|
60
57
|
class InputJournalFilesPq(BaseModel):
|
|
61
|
-
mode:
|
|
62
|
-
Optional[InputJournalFilesMode], PlainValidator(validate_open_enum(False))
|
|
63
|
-
] = InputJournalFilesMode.ALWAYS
|
|
58
|
+
mode: Optional[InputJournalFilesMode] = InputJournalFilesMode.ALWAYS
|
|
64
59
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
60
|
|
|
66
61
|
max_buffer_size: Annotated[
|
|
@@ -84,10 +79,7 @@ class InputJournalFilesPq(BaseModel):
|
|
|
84
79
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
80
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
81
|
|
|
87
|
-
compress:
|
|
88
|
-
Optional[InputJournalFilesCompression],
|
|
89
|
-
PlainValidator(validate_open_enum(False)),
|
|
90
|
-
] = InputJournalFilesCompression.NONE
|
|
82
|
+
compress: Optional[InputJournalFilesCompression] = InputJournalFilesCompression.NONE
|
|
91
83
|
r"""Codec to use to compress the persisted data"""
|
|
92
84
|
|
|
93
85
|
|
|
@@ -164,9 +156,7 @@ class InputJournalFiles(BaseModel):
|
|
|
164
156
|
id: Optional[str] = None
|
|
165
157
|
r"""Unique ID for this input"""
|
|
166
158
|
|
|
167
|
-
type:
|
|
168
|
-
Optional[InputJournalFilesType], PlainValidator(validate_open_enum(False))
|
|
169
|
-
] = None
|
|
159
|
+
type: Optional[InputJournalFilesType] = None
|
|
170
160
|
|
|
171
161
|
disabled: Optional[bool] = False
|
|
172
162
|
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class InputKafkaType(str, Enum
|
|
11
|
+
class InputKafkaType(str, Enum):
|
|
15
12
|
KAFKA = "kafka"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,14 +23,14 @@ class InputKafkaConnection(BaseModel):
|
|
|
26
23
|
pipeline: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class InputKafkaMode(str, Enum
|
|
26
|
+
class InputKafkaMode(str, Enum):
|
|
30
27
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
28
|
|
|
32
29
|
SMART = "smart"
|
|
33
30
|
ALWAYS = "always"
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class InputKafkaCompression(str, Enum
|
|
33
|
+
class InputKafkaCompression(str, Enum):
|
|
37
34
|
r"""Codec to use to compress the persisted data"""
|
|
38
35
|
|
|
39
36
|
NONE = "none"
|
|
@@ -58,9 +55,7 @@ class InputKafkaPqTypedDict(TypedDict):
|
|
|
58
55
|
|
|
59
56
|
|
|
60
57
|
class InputKafkaPq(BaseModel):
|
|
61
|
-
mode:
|
|
62
|
-
Optional[InputKafkaMode], PlainValidator(validate_open_enum(False))
|
|
63
|
-
] = InputKafkaMode.ALWAYS
|
|
58
|
+
mode: Optional[InputKafkaMode] = InputKafkaMode.ALWAYS
|
|
64
59
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
60
|
|
|
66
61
|
max_buffer_size: Annotated[
|
|
@@ -84,9 +79,7 @@ class InputKafkaPq(BaseModel):
|
|
|
84
79
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
80
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
81
|
|
|
87
|
-
compress:
|
|
88
|
-
Optional[InputKafkaCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
-
] = InputKafkaCompression.NONE
|
|
82
|
+
compress: Optional[InputKafkaCompression] = InputKafkaCompression.NONE
|
|
90
83
|
r"""Codec to use to compress the persisted data"""
|
|
91
84
|
|
|
92
85
|
|
|
@@ -109,18 +102,14 @@ class InputKafkaAuth(BaseModel):
|
|
|
109
102
|
r"""Select or create a secret that references your credentials"""
|
|
110
103
|
|
|
111
104
|
|
|
112
|
-
class InputKafkaKafkaSchemaRegistryMinimumTLSVersion(
|
|
113
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
114
|
-
):
|
|
105
|
+
class InputKafkaKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
|
|
115
106
|
TL_SV1 = "TLSv1"
|
|
116
107
|
TL_SV1_1 = "TLSv1.1"
|
|
117
108
|
TL_SV1_2 = "TLSv1.2"
|
|
118
109
|
TL_SV1_3 = "TLSv1.3"
|
|
119
110
|
|
|
120
111
|
|
|
121
|
-
class InputKafkaKafkaSchemaRegistryMaximumTLSVersion(
|
|
122
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
123
|
-
):
|
|
112
|
+
class InputKafkaKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
|
|
124
113
|
TL_SV1 = "TLSv1"
|
|
125
114
|
TL_SV1_1 = "TLSv1.1"
|
|
126
115
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -180,18 +169,12 @@ class InputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
|
180
169
|
r"""Passphrase to use to decrypt private key"""
|
|
181
170
|
|
|
182
171
|
min_version: Annotated[
|
|
183
|
-
|
|
184
|
-
Optional[InputKafkaKafkaSchemaRegistryMinimumTLSVersion],
|
|
185
|
-
PlainValidator(validate_open_enum(False)),
|
|
186
|
-
],
|
|
172
|
+
Optional[InputKafkaKafkaSchemaRegistryMinimumTLSVersion],
|
|
187
173
|
pydantic.Field(alias="minVersion"),
|
|
188
174
|
] = None
|
|
189
175
|
|
|
190
176
|
max_version: Annotated[
|
|
191
|
-
|
|
192
|
-
Optional[InputKafkaKafkaSchemaRegistryMaximumTLSVersion],
|
|
193
|
-
PlainValidator(validate_open_enum(False)),
|
|
194
|
-
],
|
|
177
|
+
Optional[InputKafkaKafkaSchemaRegistryMaximumTLSVersion],
|
|
195
178
|
pydantic.Field(alias="maxVersion"),
|
|
196
179
|
] = None
|
|
197
180
|
|
|
@@ -238,7 +221,7 @@ class InputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
238
221
|
tls: Optional[InputKafkaKafkaSchemaRegistryTLSSettingsClientSide] = None
|
|
239
222
|
|
|
240
223
|
|
|
241
|
-
class InputKafkaSASLMechanism(str, Enum
|
|
224
|
+
class InputKafkaSASLMechanism(str, Enum):
|
|
242
225
|
PLAIN = "plain"
|
|
243
226
|
SCRAM_SHA_256 = "scram-sha-256"
|
|
244
227
|
SCRAM_SHA_512 = "scram-sha-512"
|
|
@@ -257,19 +240,17 @@ class InputKafkaAuthentication(BaseModel):
|
|
|
257
240
|
|
|
258
241
|
disabled: Optional[bool] = True
|
|
259
242
|
|
|
260
|
-
mechanism:
|
|
261
|
-
Optional[InputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
|
|
262
|
-
] = InputKafkaSASLMechanism.PLAIN
|
|
243
|
+
mechanism: Optional[InputKafkaSASLMechanism] = InputKafkaSASLMechanism.PLAIN
|
|
263
244
|
|
|
264
245
|
|
|
265
|
-
class InputKafkaMinimumTLSVersion(str, Enum
|
|
246
|
+
class InputKafkaMinimumTLSVersion(str, Enum):
|
|
266
247
|
TL_SV1 = "TLSv1"
|
|
267
248
|
TL_SV1_1 = "TLSv1.1"
|
|
268
249
|
TL_SV1_2 = "TLSv1.2"
|
|
269
250
|
TL_SV1_3 = "TLSv1.3"
|
|
270
251
|
|
|
271
252
|
|
|
272
|
-
class InputKafkaMaximumTLSVersion(str, Enum
|
|
253
|
+
class InputKafkaMaximumTLSVersion(str, Enum):
|
|
273
254
|
TL_SV1 = "TLSv1"
|
|
274
255
|
TL_SV1_1 = "TLSv1.1"
|
|
275
256
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -329,19 +310,11 @@ class InputKafkaTLSSettingsClientSide(BaseModel):
|
|
|
329
310
|
r"""Passphrase to use to decrypt private key"""
|
|
330
311
|
|
|
331
312
|
min_version: Annotated[
|
|
332
|
-
|
|
333
|
-
Optional[InputKafkaMinimumTLSVersion],
|
|
334
|
-
PlainValidator(validate_open_enum(False)),
|
|
335
|
-
],
|
|
336
|
-
pydantic.Field(alias="minVersion"),
|
|
313
|
+
Optional[InputKafkaMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
337
314
|
] = None
|
|
338
315
|
|
|
339
316
|
max_version: Annotated[
|
|
340
|
-
|
|
341
|
-
Optional[InputKafkaMaximumTLSVersion],
|
|
342
|
-
PlainValidator(validate_open_enum(False)),
|
|
343
|
-
],
|
|
344
|
-
pydantic.Field(alias="maxVersion"),
|
|
317
|
+
Optional[InputKafkaMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
345
318
|
] = None
|
|
346
319
|
|
|
347
320
|
|
|
@@ -448,9 +421,7 @@ class InputKafka(BaseModel):
|
|
|
448
421
|
id: Optional[str] = None
|
|
449
422
|
r"""Unique ID for this input"""
|
|
450
423
|
|
|
451
|
-
type:
|
|
452
|
-
Optional[InputKafkaType], PlainValidator(validate_open_enum(False))
|
|
453
|
-
] = None
|
|
424
|
+
type: Optional[InputKafkaType] = None
|
|
454
425
|
|
|
455
426
|
disabled: Optional[bool] = False
|
|
456
427
|
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class InputKinesisType(str, Enum
|
|
11
|
+
class InputKinesisType(str, Enum):
|
|
15
12
|
KINESIS = "kinesis"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,14 +23,14 @@ class InputKinesisConnection(BaseModel):
|
|
|
26
23
|
pipeline: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class InputKinesisMode(str, Enum
|
|
26
|
+
class InputKinesisMode(str, Enum):
|
|
30
27
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
28
|
|
|
32
29
|
SMART = "smart"
|
|
33
30
|
ALWAYS = "always"
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class InputKinesisCompression(str, Enum
|
|
33
|
+
class InputKinesisCompression(str, Enum):
|
|
37
34
|
r"""Codec to use to compress the persisted data"""
|
|
38
35
|
|
|
39
36
|
NONE = "none"
|
|
@@ -58,9 +55,7 @@ class InputKinesisPqTypedDict(TypedDict):
|
|
|
58
55
|
|
|
59
56
|
|
|
60
57
|
class InputKinesisPq(BaseModel):
|
|
61
|
-
mode:
|
|
62
|
-
Optional[InputKinesisMode], PlainValidator(validate_open_enum(False))
|
|
63
|
-
] = InputKinesisMode.ALWAYS
|
|
58
|
+
mode: Optional[InputKinesisMode] = InputKinesisMode.ALWAYS
|
|
64
59
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
60
|
|
|
66
61
|
max_buffer_size: Annotated[
|
|
@@ -84,20 +79,18 @@ class InputKinesisPq(BaseModel):
|
|
|
84
79
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
80
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
81
|
|
|
87
|
-
compress:
|
|
88
|
-
Optional[InputKinesisCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
-
] = InputKinesisCompression.NONE
|
|
82
|
+
compress: Optional[InputKinesisCompression] = InputKinesisCompression.NONE
|
|
90
83
|
r"""Codec to use to compress the persisted data"""
|
|
91
84
|
|
|
92
85
|
|
|
93
|
-
class ShardIteratorStart(str, Enum
|
|
86
|
+
class ShardIteratorStart(str, Enum):
|
|
94
87
|
r"""Location at which to start reading a shard for the first time"""
|
|
95
88
|
|
|
96
89
|
TRIM_HORIZON = "TRIM_HORIZON"
|
|
97
90
|
LATEST = "LATEST"
|
|
98
91
|
|
|
99
92
|
|
|
100
|
-
class InputKinesisRecordDataFormat(str, Enum
|
|
93
|
+
class InputKinesisRecordDataFormat(str, Enum):
|
|
101
94
|
r"""Format of data inside the Kinesis Stream records. Gzip compression is automatically detected."""
|
|
102
95
|
|
|
103
96
|
CRIBL = "cribl"
|
|
@@ -106,14 +99,14 @@ class InputKinesisRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
106
99
|
LINE = "line"
|
|
107
100
|
|
|
108
101
|
|
|
109
|
-
class ShardLoadBalancing(str, Enum
|
|
102
|
+
class ShardLoadBalancing(str, Enum):
|
|
110
103
|
r"""The load-balancing algorithm to use for spreading out shards across Workers and Worker Processes"""
|
|
111
104
|
|
|
112
105
|
CONSISTENT_HASHING = "ConsistentHashing"
|
|
113
106
|
ROUND_ROBIN = "RoundRobin"
|
|
114
107
|
|
|
115
108
|
|
|
116
|
-
class InputKinesisAuthenticationMethod(str, Enum
|
|
109
|
+
class InputKinesisAuthenticationMethod(str, Enum):
|
|
117
110
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
118
111
|
|
|
119
112
|
AUTO = "auto"
|
|
@@ -121,7 +114,7 @@ class InputKinesisAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
121
114
|
SECRET = "secret"
|
|
122
115
|
|
|
123
116
|
|
|
124
|
-
class InputKinesisSignatureVersion(str, Enum
|
|
117
|
+
class InputKinesisSignatureVersion(str, Enum):
|
|
125
118
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
126
119
|
|
|
127
120
|
V2 = "v2"
|
|
@@ -218,9 +211,7 @@ class InputKinesis(BaseModel):
|
|
|
218
211
|
id: Optional[str] = None
|
|
219
212
|
r"""Unique ID for this input"""
|
|
220
213
|
|
|
221
|
-
type:
|
|
222
|
-
Optional[InputKinesisType], PlainValidator(validate_open_enum(False))
|
|
223
|
-
] = None
|
|
214
|
+
type: Optional[InputKinesisType] = None
|
|
224
215
|
|
|
225
216
|
disabled: Optional[bool] = False
|
|
226
217
|
|
|
@@ -255,19 +246,12 @@ class InputKinesis(BaseModel):
|
|
|
255
246
|
r"""A JavaScript expression to be called with each shardId for the stream. If the expression evaluates to a truthy value, the shard will be processed."""
|
|
256
247
|
|
|
257
248
|
shard_iterator_type: Annotated[
|
|
258
|
-
|
|
259
|
-
Optional[ShardIteratorStart], PlainValidator(validate_open_enum(False))
|
|
260
|
-
],
|
|
261
|
-
pydantic.Field(alias="shardIteratorType"),
|
|
249
|
+
Optional[ShardIteratorStart], pydantic.Field(alias="shardIteratorType")
|
|
262
250
|
] = ShardIteratorStart.TRIM_HORIZON
|
|
263
251
|
r"""Location at which to start reading a shard for the first time"""
|
|
264
252
|
|
|
265
253
|
payload_format: Annotated[
|
|
266
|
-
|
|
267
|
-
Optional[InputKinesisRecordDataFormat],
|
|
268
|
-
PlainValidator(validate_open_enum(False)),
|
|
269
|
-
],
|
|
270
|
-
pydantic.Field(alias="payloadFormat"),
|
|
254
|
+
Optional[InputKinesisRecordDataFormat], pydantic.Field(alias="payloadFormat")
|
|
271
255
|
] = InputKinesisRecordDataFormat.CRIBL
|
|
272
256
|
r"""Format of data inside the Kinesis Stream records. Gzip compression is automatically detected."""
|
|
273
257
|
|
|
@@ -282,18 +266,12 @@ class InputKinesis(BaseModel):
|
|
|
282
266
|
r"""Maximum number of records, across all shards, to pull down at once per Worker Process"""
|
|
283
267
|
|
|
284
268
|
load_balancing_algorithm: Annotated[
|
|
285
|
-
|
|
286
|
-
Optional[ShardLoadBalancing], PlainValidator(validate_open_enum(False))
|
|
287
|
-
],
|
|
288
|
-
pydantic.Field(alias="loadBalancingAlgorithm"),
|
|
269
|
+
Optional[ShardLoadBalancing], pydantic.Field(alias="loadBalancingAlgorithm")
|
|
289
270
|
] = ShardLoadBalancing.CONSISTENT_HASHING
|
|
290
271
|
r"""The load-balancing algorithm to use for spreading out shards across Workers and Worker Processes"""
|
|
291
272
|
|
|
292
273
|
aws_authentication_method: Annotated[
|
|
293
|
-
|
|
294
|
-
Optional[InputKinesisAuthenticationMethod],
|
|
295
|
-
PlainValidator(validate_open_enum(False)),
|
|
296
|
-
],
|
|
274
|
+
Optional[InputKinesisAuthenticationMethod],
|
|
297
275
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
298
276
|
] = InputKinesisAuthenticationMethod.AUTO
|
|
299
277
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -306,11 +284,7 @@ class InputKinesis(BaseModel):
|
|
|
306
284
|
r"""Kinesis stream service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to Kinesis stream-compatible endpoint."""
|
|
307
285
|
|
|
308
286
|
signature_version: Annotated[
|
|
309
|
-
|
|
310
|
-
Optional[InputKinesisSignatureVersion],
|
|
311
|
-
PlainValidator(validate_open_enum(False)),
|
|
312
|
-
],
|
|
313
|
-
pydantic.Field(alias="signatureVersion"),
|
|
287
|
+
Optional[InputKinesisSignatureVersion], pydantic.Field(alias="signatureVersion")
|
|
314
288
|
] = InputKinesisSignatureVersion.V4
|
|
315
289
|
r"""Signature version to use for signing Kinesis stream requests"""
|
|
316
290
|
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class InputKubeEventsType(str, Enum
|
|
11
|
+
class InputKubeEventsType(str, Enum):
|
|
15
12
|
KUBE_EVENTS = "kube_events"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,14 +23,14 @@ class InputKubeEventsConnection(BaseModel):
|
|
|
26
23
|
pipeline: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class InputKubeEventsMode(str, Enum
|
|
26
|
+
class InputKubeEventsMode(str, Enum):
|
|
30
27
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
28
|
|
|
32
29
|
SMART = "smart"
|
|
33
30
|
ALWAYS = "always"
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class InputKubeEventsCompression(str, Enum
|
|
33
|
+
class InputKubeEventsCompression(str, Enum):
|
|
37
34
|
r"""Codec to use to compress the persisted data"""
|
|
38
35
|
|
|
39
36
|
NONE = "none"
|
|
@@ -58,9 +55,7 @@ class InputKubeEventsPqTypedDict(TypedDict):
|
|
|
58
55
|
|
|
59
56
|
|
|
60
57
|
class InputKubeEventsPq(BaseModel):
|
|
61
|
-
mode:
|
|
62
|
-
Optional[InputKubeEventsMode], PlainValidator(validate_open_enum(False))
|
|
63
|
-
] = InputKubeEventsMode.ALWAYS
|
|
58
|
+
mode: Optional[InputKubeEventsMode] = InputKubeEventsMode.ALWAYS
|
|
64
59
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
60
|
|
|
66
61
|
max_buffer_size: Annotated[
|
|
@@ -84,9 +79,7 @@ class InputKubeEventsPq(BaseModel):
|
|
|
84
79
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
80
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
81
|
|
|
87
|
-
compress:
|
|
88
|
-
Optional[InputKubeEventsCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
-
] = InputKubeEventsCompression.NONE
|
|
82
|
+
compress: Optional[InputKubeEventsCompression] = InputKubeEventsCompression.NONE
|
|
90
83
|
r"""Codec to use to compress the persisted data"""
|
|
91
84
|
|
|
92
85
|
|
|
@@ -147,7 +140,7 @@ class InputKubeEvents(BaseModel):
|
|
|
147
140
|
id: str
|
|
148
141
|
r"""Unique ID for this input"""
|
|
149
142
|
|
|
150
|
-
type:
|
|
143
|
+
type: InputKubeEventsType
|
|
151
144
|
|
|
152
145
|
disabled: Optional[bool] = False
|
|
153
146
|
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class InputKubeLogsType(str, Enum
|
|
11
|
+
class InputKubeLogsType(str, Enum):
|
|
15
12
|
KUBE_LOGS = "kube_logs"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,14 +23,14 @@ class InputKubeLogsConnection(BaseModel):
|
|
|
26
23
|
pipeline: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class InputKubeLogsMode(str, Enum
|
|
26
|
+
class InputKubeLogsMode(str, Enum):
|
|
30
27
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
28
|
|
|
32
29
|
SMART = "smart"
|
|
33
30
|
ALWAYS = "always"
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class InputKubeLogsPqCompression(str, Enum
|
|
33
|
+
class InputKubeLogsPqCompression(str, Enum):
|
|
37
34
|
r"""Codec to use to compress the persisted data"""
|
|
38
35
|
|
|
39
36
|
NONE = "none"
|
|
@@ -58,9 +55,7 @@ class InputKubeLogsPqTypedDict(TypedDict):
|
|
|
58
55
|
|
|
59
56
|
|
|
60
57
|
class InputKubeLogsPq(BaseModel):
|
|
61
|
-
mode:
|
|
62
|
-
Optional[InputKubeLogsMode], PlainValidator(validate_open_enum(False))
|
|
63
|
-
] = InputKubeLogsMode.ALWAYS
|
|
58
|
+
mode: Optional[InputKubeLogsMode] = InputKubeLogsMode.ALWAYS
|
|
64
59
|
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
60
|
|
|
66
61
|
max_buffer_size: Annotated[
|
|
@@ -84,9 +79,7 @@ class InputKubeLogsPq(BaseModel):
|
|
|
84
79
|
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
80
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
81
|
|
|
87
|
-
compress:
|
|
88
|
-
Optional[InputKubeLogsPqCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
-
] = InputKubeLogsPqCompression.NONE
|
|
82
|
+
compress: Optional[InputKubeLogsPqCompression] = InputKubeLogsPqCompression.NONE
|
|
90
83
|
r"""Codec to use to compress the persisted data"""
|
|
91
84
|
|
|
92
85
|
|
|
@@ -118,7 +111,7 @@ class InputKubeLogsMetadatum(BaseModel):
|
|
|
118
111
|
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
119
112
|
|
|
120
113
|
|
|
121
|
-
class InputKubeLogsPersistenceCompression(str, Enum
|
|
114
|
+
class InputKubeLogsPersistenceCompression(str, Enum):
|
|
122
115
|
r"""Data compression format. Default is gzip."""
|
|
123
116
|
|
|
124
117
|
NONE = "none"
|
|
@@ -151,10 +144,9 @@ class InputKubeLogsDiskSpooling(BaseModel):
|
|
|
151
144
|
max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
|
|
152
145
|
r"""Maximum amount of time to retain data before older buckets are deleted. Examples: 2h, 4d. Default is 24h."""
|
|
153
146
|
|
|
154
|
-
compress:
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
] = InputKubeLogsPersistenceCompression.GZIP
|
|
147
|
+
compress: Optional[InputKubeLogsPersistenceCompression] = (
|
|
148
|
+
InputKubeLogsPersistenceCompression.GZIP
|
|
149
|
+
)
|
|
158
150
|
r"""Data compression format. Default is gzip."""
|
|
159
151
|
|
|
160
152
|
|
|
@@ -198,7 +190,7 @@ class InputKubeLogs(BaseModel):
|
|
|
198
190
|
id: str
|
|
199
191
|
r"""Unique ID for this input"""
|
|
200
192
|
|
|
201
|
-
type:
|
|
193
|
+
type: InputKubeLogsType
|
|
202
194
|
|
|
203
195
|
disabled: Optional[bool] = False
|
|
204
196
|
|