cribl-control-plane 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/__init__.py +17 -0
- cribl_control_plane/_hooks/__init__.py +5 -0
- cribl_control_plane/_hooks/clientcredentials.py +211 -0
- cribl_control_plane/_hooks/registration.py +13 -0
- cribl_control_plane/_hooks/sdkhooks.py +81 -0
- cribl_control_plane/_hooks/types.py +112 -0
- cribl_control_plane/_version.py +15 -0
- cribl_control_plane/auth_sdk.py +184 -0
- cribl_control_plane/basesdk.py +358 -0
- cribl_control_plane/errors/__init__.py +60 -0
- cribl_control_plane/errors/apierror.py +38 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
- cribl_control_plane/errors/error.py +24 -0
- cribl_control_plane/errors/healthstatus_error.py +38 -0
- cribl_control_plane/errors/no_response_error.py +13 -0
- cribl_control_plane/errors/responsevalidationerror.py +25 -0
- cribl_control_plane/health.py +166 -0
- cribl_control_plane/httpclient.py +126 -0
- cribl_control_plane/models/__init__.py +7305 -0
- cribl_control_plane/models/addhectokenrequest.py +34 -0
- cribl_control_plane/models/authtoken.py +13 -0
- cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
- cribl_control_plane/models/createinputop.py +24 -0
- cribl_control_plane/models/createoutputop.py +24 -0
- cribl_control_plane/models/createoutputtestbyidop.py +46 -0
- cribl_control_plane/models/criblevent.py +14 -0
- cribl_control_plane/models/deleteinputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getinputbyidop.py +37 -0
- cribl_control_plane/models/getoutputbyidop.py +37 -0
- cribl_control_plane/models/getoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
- cribl_control_plane/models/healthstatus.py +36 -0
- cribl_control_plane/models/input.py +199 -0
- cribl_control_plane/models/inputappscope.py +448 -0
- cribl_control_plane/models/inputazureblob.py +308 -0
- cribl_control_plane/models/inputcollection.py +208 -0
- cribl_control_plane/models/inputconfluentcloud.py +585 -0
- cribl_control_plane/models/inputcribl.py +165 -0
- cribl_control_plane/models/inputcriblhttp.py +341 -0
- cribl_control_plane/models/inputcribllakehttp.py +342 -0
- cribl_control_plane/models/inputcriblmetrics.py +175 -0
- cribl_control_plane/models/inputcribltcp.py +299 -0
- cribl_control_plane/models/inputcrowdstrike.py +410 -0
- cribl_control_plane/models/inputdatadogagent.py +364 -0
- cribl_control_plane/models/inputdatagen.py +180 -0
- cribl_control_plane/models/inputedgeprometheus.py +551 -0
- cribl_control_plane/models/inputelastic.py +494 -0
- cribl_control_plane/models/inputeventhub.py +360 -0
- cribl_control_plane/models/inputexec.py +213 -0
- cribl_control_plane/models/inputfile.py +259 -0
- cribl_control_plane/models/inputfirehose.py +341 -0
- cribl_control_plane/models/inputgooglepubsub.py +247 -0
- cribl_control_plane/models/inputgrafana_union.py +1247 -0
- cribl_control_plane/models/inputhttp.py +403 -0
- cribl_control_plane/models/inputhttpraw.py +407 -0
- cribl_control_plane/models/inputjournalfiles.py +208 -0
- cribl_control_plane/models/inputkafka.py +581 -0
- cribl_control_plane/models/inputkinesis.py +363 -0
- cribl_control_plane/models/inputkubeevents.py +182 -0
- cribl_control_plane/models/inputkubelogs.py +256 -0
- cribl_control_plane/models/inputkubemetrics.py +233 -0
- cribl_control_plane/models/inputloki.py +468 -0
- cribl_control_plane/models/inputmetrics.py +290 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
- cribl_control_plane/models/inputmsk.py +654 -0
- cribl_control_plane/models/inputnetflow.py +224 -0
- cribl_control_plane/models/inputoffice365mgmt.py +384 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
- cribl_control_plane/models/inputoffice365service.py +377 -0
- cribl_control_plane/models/inputopentelemetry.py +516 -0
- cribl_control_plane/models/inputprometheus.py +464 -0
- cribl_control_plane/models/inputprometheusrw.py +470 -0
- cribl_control_plane/models/inputrawudp.py +207 -0
- cribl_control_plane/models/inputs3.py +416 -0
- cribl_control_plane/models/inputs3inventory.py +440 -0
- cribl_control_plane/models/inputsecuritylake.py +425 -0
- cribl_control_plane/models/inputsnmp.py +274 -0
- cribl_control_plane/models/inputsplunk.py +387 -0
- cribl_control_plane/models/inputsplunkhec.py +478 -0
- cribl_control_plane/models/inputsplunksearch.py +537 -0
- cribl_control_plane/models/inputsqs.py +320 -0
- cribl_control_plane/models/inputsyslog_union.py +759 -0
- cribl_control_plane/models/inputsystemmetrics.py +533 -0
- cribl_control_plane/models/inputsystemstate.py +417 -0
- cribl_control_plane/models/inputtcp.py +359 -0
- cribl_control_plane/models/inputtcpjson.py +334 -0
- cribl_control_plane/models/inputwef.py +498 -0
- cribl_control_plane/models/inputwindowsmetrics.py +457 -0
- cribl_control_plane/models/inputwineventlogs.py +222 -0
- cribl_control_plane/models/inputwiz.py +334 -0
- cribl_control_plane/models/inputzscalerhec.py +439 -0
- cribl_control_plane/models/listinputop.py +24 -0
- cribl_control_plane/models/listoutputop.py +24 -0
- cribl_control_plane/models/logininfo.py +16 -0
- cribl_control_plane/models/output.py +229 -0
- cribl_control_plane/models/outputazureblob.py +471 -0
- cribl_control_plane/models/outputazuredataexplorer.py +660 -0
- cribl_control_plane/models/outputazureeventhub.py +321 -0
- cribl_control_plane/models/outputazurelogs.py +386 -0
- cribl_control_plane/models/outputclickhouse.py +650 -0
- cribl_control_plane/models/outputcloudwatch.py +273 -0
- cribl_control_plane/models/outputconfluentcloud.py +591 -0
- cribl_control_plane/models/outputcriblhttp.py +494 -0
- cribl_control_plane/models/outputcribllake.py +396 -0
- cribl_control_plane/models/outputcribltcp.py +387 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
- cribl_control_plane/models/outputdatadog.py +472 -0
- cribl_control_plane/models/outputdataset.py +437 -0
- cribl_control_plane/models/outputdefault.py +55 -0
- cribl_control_plane/models/outputdevnull.py +50 -0
- cribl_control_plane/models/outputdiskspool.py +89 -0
- cribl_control_plane/models/outputdls3.py +560 -0
- cribl_control_plane/models/outputdynatracehttp.py +454 -0
- cribl_control_plane/models/outputdynatraceotlp.py +486 -0
- cribl_control_plane/models/outputelastic.py +494 -0
- cribl_control_plane/models/outputelasticcloud.py +407 -0
- cribl_control_plane/models/outputexabeam.py +297 -0
- cribl_control_plane/models/outputfilesystem.py +357 -0
- cribl_control_plane/models/outputgooglechronicle.py +486 -0
- cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
- cribl_control_plane/models/outputgooglepubsub.py +274 -0
- cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
- cribl_control_plane/models/outputgraphite.py +225 -0
- cribl_control_plane/models/outputhoneycomb.py +369 -0
- cribl_control_plane/models/outputhumiohec.py +389 -0
- cribl_control_plane/models/outputinfluxdb.py +523 -0
- cribl_control_plane/models/outputkafka.py +581 -0
- cribl_control_plane/models/outputkinesis.py +312 -0
- cribl_control_plane/models/outputloki.py +425 -0
- cribl_control_plane/models/outputminio.py +512 -0
- cribl_control_plane/models/outputmsk.py +654 -0
- cribl_control_plane/models/outputnetflow.py +80 -0
- cribl_control_plane/models/outputnewrelic.py +424 -0
- cribl_control_plane/models/outputnewrelicevents.py +401 -0
- cribl_control_plane/models/outputopentelemetry.py +669 -0
- cribl_control_plane/models/outputprometheus.py +485 -0
- cribl_control_plane/models/outputring.py +121 -0
- cribl_control_plane/models/outputrouter.py +83 -0
- cribl_control_plane/models/outputs3.py +556 -0
- cribl_control_plane/models/outputsamplesresponse.py +14 -0
- cribl_control_plane/models/outputsecuritylake.py +505 -0
- cribl_control_plane/models/outputsentinel.py +488 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
- cribl_control_plane/models/outputservicenow.py +543 -0
- cribl_control_plane/models/outputsignalfx.py +369 -0
- cribl_control_plane/models/outputsnmp.py +80 -0
- cribl_control_plane/models/outputsns.py +274 -0
- cribl_control_plane/models/outputsplunk.py +383 -0
- cribl_control_plane/models/outputsplunkhec.py +434 -0
- cribl_control_plane/models/outputsplunklb.py +558 -0
- cribl_control_plane/models/outputsqs.py +328 -0
- cribl_control_plane/models/outputstatsd.py +224 -0
- cribl_control_plane/models/outputstatsdext.py +225 -0
- cribl_control_plane/models/outputsumologic.py +378 -0
- cribl_control_plane/models/outputsyslog.py +415 -0
- cribl_control_plane/models/outputtcpjson.py +413 -0
- cribl_control_plane/models/outputtestrequest.py +15 -0
- cribl_control_plane/models/outputtestresponse.py +29 -0
- cribl_control_plane/models/outputwavefront.py +369 -0
- cribl_control_plane/models/outputwebhook.py +689 -0
- cribl_control_plane/models/outputxsiam.py +415 -0
- cribl_control_plane/models/schemeclientoauth.py +24 -0
- cribl_control_plane/models/security.py +36 -0
- cribl_control_plane/models/updatehectokenrequest.py +31 -0
- cribl_control_plane/models/updateinputbyidop.py +44 -0
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
- cribl_control_plane/models/updateoutputbyidop.py +44 -0
- cribl_control_plane/outputs.py +1615 -0
- cribl_control_plane/py.typed +1 -0
- cribl_control_plane/sdk.py +164 -0
- cribl_control_plane/sdkconfiguration.py +36 -0
- cribl_control_plane/sources.py +1355 -0
- cribl_control_plane/types/__init__.py +21 -0
- cribl_control_plane/types/basemodel.py +39 -0
- cribl_control_plane/utils/__init__.py +187 -0
- cribl_control_plane/utils/annotations.py +55 -0
- cribl_control_plane/utils/datetimes.py +23 -0
- cribl_control_plane/utils/enums.py +74 -0
- cribl_control_plane/utils/eventstreaming.py +238 -0
- cribl_control_plane/utils/forms.py +223 -0
- cribl_control_plane/utils/headers.py +136 -0
- cribl_control_plane/utils/logger.py +27 -0
- cribl_control_plane/utils/metadata.py +118 -0
- cribl_control_plane/utils/queryparams.py +205 -0
- cribl_control_plane/utils/requestbodies.py +66 -0
- cribl_control_plane/utils/retries.py +217 -0
- cribl_control_plane/utils/security.py +207 -0
- cribl_control_plane/utils/serializers.py +249 -0
- cribl_control_plane/utils/unmarshal_json_response.py +24 -0
- cribl_control_plane/utils/url.py +155 -0
- cribl_control_plane/utils/values.py +137 -0
- cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
- cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
- cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,363 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InputKinesisType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
KINESIS = "kinesis"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InputKinesisConnectionTypedDict(TypedDict):
|
|
19
|
+
output: str
|
|
20
|
+
pipeline: NotRequired[str]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class InputKinesisConnection(BaseModel):
|
|
24
|
+
output: str
|
|
25
|
+
|
|
26
|
+
pipeline: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class InputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
|
+
|
|
32
|
+
SMART = "smart"
|
|
33
|
+
ALWAYS = "always"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class InputKinesisCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
37
|
+
r"""Codec to use to compress the persisted data"""
|
|
38
|
+
|
|
39
|
+
NONE = "none"
|
|
40
|
+
GZIP = "gzip"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class InputKinesisPqTypedDict(TypedDict):
|
|
44
|
+
mode: NotRequired[InputKinesisMode]
|
|
45
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
46
|
+
max_buffer_size: NotRequired[float]
|
|
47
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
48
|
+
commit_frequency: NotRequired[float]
|
|
49
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
50
|
+
max_file_size: NotRequired[str]
|
|
51
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
52
|
+
max_size: NotRequired[str]
|
|
53
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
54
|
+
path: NotRequired[str]
|
|
55
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
56
|
+
compress: NotRequired[InputKinesisCompression]
|
|
57
|
+
r"""Codec to use to compress the persisted data"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class InputKinesisPq(BaseModel):
|
|
61
|
+
mode: Annotated[
|
|
62
|
+
Optional[InputKinesisMode], PlainValidator(validate_open_enum(False))
|
|
63
|
+
] = InputKinesisMode.ALWAYS
|
|
64
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
|
+
|
|
66
|
+
max_buffer_size: Annotated[
|
|
67
|
+
Optional[float], pydantic.Field(alias="maxBufferSize")
|
|
68
|
+
] = 1000
|
|
69
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
70
|
+
|
|
71
|
+
commit_frequency: Annotated[
|
|
72
|
+
Optional[float], pydantic.Field(alias="commitFrequency")
|
|
73
|
+
] = 42
|
|
74
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
75
|
+
|
|
76
|
+
max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
|
|
77
|
+
"1 MB"
|
|
78
|
+
)
|
|
79
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
80
|
+
|
|
81
|
+
max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
|
|
82
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
83
|
+
|
|
84
|
+
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
|
+
|
|
87
|
+
compress: Annotated[
|
|
88
|
+
Optional[InputKinesisCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
+
] = InputKinesisCompression.NONE
|
|
90
|
+
r"""Codec to use to compress the persisted data"""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class ShardIteratorStart(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
94
|
+
r"""Location at which to start reading a shard for the first time"""
|
|
95
|
+
|
|
96
|
+
TRIM_HORIZON = "TRIM_HORIZON"
|
|
97
|
+
LATEST = "LATEST"
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class InputKinesisRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
101
|
+
r"""Format of data inside the Kinesis Stream records. Gzip compression is automatically detected."""
|
|
102
|
+
|
|
103
|
+
CRIBL = "cribl"
|
|
104
|
+
NDJSON = "ndjson"
|
|
105
|
+
CLOUDWATCH = "cloudwatch"
|
|
106
|
+
LINE = "line"
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class ShardLoadBalancing(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
110
|
+
r"""The load-balancing algorithm to use for spreading out shards across Workers and Worker Processes"""
|
|
111
|
+
|
|
112
|
+
CONSISTENT_HASHING = "ConsistentHashing"
|
|
113
|
+
ROUND_ROBIN = "RoundRobin"
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class InputKinesisAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
117
|
+
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
118
|
+
|
|
119
|
+
AUTO = "auto"
|
|
120
|
+
MANUAL = "manual"
|
|
121
|
+
SECRET = "secret"
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class InputKinesisSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
125
|
+
r"""Signature version to use for signing Kinesis stream requests"""
|
|
126
|
+
|
|
127
|
+
V2 = "v2"
|
|
128
|
+
V4 = "v4"
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class InputKinesisMetadatumTypedDict(TypedDict):
|
|
132
|
+
name: str
|
|
133
|
+
value: str
|
|
134
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class InputKinesisMetadatum(BaseModel):
|
|
138
|
+
name: str
|
|
139
|
+
|
|
140
|
+
value: str
|
|
141
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class InputKinesisTypedDict(TypedDict):
|
|
145
|
+
type: InputKinesisType
|
|
146
|
+
stream_name: str
|
|
147
|
+
r"""Kinesis Data Stream to read data from"""
|
|
148
|
+
region: str
|
|
149
|
+
r"""Region where the Kinesis stream is located"""
|
|
150
|
+
id: NotRequired[str]
|
|
151
|
+
r"""Unique ID for this input"""
|
|
152
|
+
disabled: NotRequired[bool]
|
|
153
|
+
pipeline: NotRequired[str]
|
|
154
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
155
|
+
send_to_routes: NotRequired[bool]
|
|
156
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
157
|
+
environment: NotRequired[str]
|
|
158
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
159
|
+
pq_enabled: NotRequired[bool]
|
|
160
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
161
|
+
streamtags: NotRequired[List[str]]
|
|
162
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
163
|
+
connections: NotRequired[List[InputKinesisConnectionTypedDict]]
|
|
164
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
165
|
+
pq: NotRequired[InputKinesisPqTypedDict]
|
|
166
|
+
service_interval: NotRequired[float]
|
|
167
|
+
r"""Time interval in minutes between consecutive service calls"""
|
|
168
|
+
shard_expr: NotRequired[str]
|
|
169
|
+
r"""A JavaScript expression to be called with each shardId for the stream. If the expression evaluates to a truthy value, the shard will be processed."""
|
|
170
|
+
shard_iterator_type: NotRequired[ShardIteratorStart]
|
|
171
|
+
r"""Location at which to start reading a shard for the first time"""
|
|
172
|
+
payload_format: NotRequired[InputKinesisRecordDataFormat]
|
|
173
|
+
r"""Format of data inside the Kinesis Stream records. Gzip compression is automatically detected."""
|
|
174
|
+
get_records_limit: NotRequired[float]
|
|
175
|
+
r"""Maximum number of records per getRecords call"""
|
|
176
|
+
get_records_limit_total: NotRequired[float]
|
|
177
|
+
r"""Maximum number of records, across all shards, to pull down at once per Worker Process"""
|
|
178
|
+
load_balancing_algorithm: NotRequired[ShardLoadBalancing]
|
|
179
|
+
r"""The load-balancing algorithm to use for spreading out shards across Workers and Worker Processes"""
|
|
180
|
+
aws_authentication_method: NotRequired[InputKinesisAuthenticationMethod]
|
|
181
|
+
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
182
|
+
aws_secret_key: NotRequired[str]
|
|
183
|
+
endpoint: NotRequired[str]
|
|
184
|
+
r"""Kinesis stream service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to Kinesis stream-compatible endpoint."""
|
|
185
|
+
signature_version: NotRequired[InputKinesisSignatureVersion]
|
|
186
|
+
r"""Signature version to use for signing Kinesis stream requests"""
|
|
187
|
+
reuse_connections: NotRequired[bool]
|
|
188
|
+
r"""Reuse connections between requests, which can improve performance"""
|
|
189
|
+
reject_unauthorized: NotRequired[bool]
|
|
190
|
+
r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
|
|
191
|
+
enable_assume_role: NotRequired[bool]
|
|
192
|
+
r"""Use Assume Role credentials to access Kinesis stream"""
|
|
193
|
+
assume_role_arn: NotRequired[str]
|
|
194
|
+
r"""Amazon Resource Name (ARN) of the role to assume"""
|
|
195
|
+
assume_role_external_id: NotRequired[str]
|
|
196
|
+
r"""External ID to use when assuming role"""
|
|
197
|
+
duration_seconds: NotRequired[float]
|
|
198
|
+
r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
|
|
199
|
+
verify_kpl_check_sums: NotRequired[bool]
|
|
200
|
+
r"""Verify Kinesis Producer Library (KPL) event checksums"""
|
|
201
|
+
avoid_duplicates: NotRequired[bool]
|
|
202
|
+
r"""When resuming streaming from a stored state, Stream will read the next available record, rather than rereading the last-read record. Enabling this setting can cause data loss after a Worker Node's unexpected shutdown or restart."""
|
|
203
|
+
metadata: NotRequired[List[InputKinesisMetadatumTypedDict]]
|
|
204
|
+
r"""Fields to add to events from this input"""
|
|
205
|
+
description: NotRequired[str]
|
|
206
|
+
aws_api_key: NotRequired[str]
|
|
207
|
+
aws_secret: NotRequired[str]
|
|
208
|
+
r"""Select or create a stored secret that references your access key and secret key"""
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
class InputKinesis(BaseModel):
|
|
212
|
+
type: Annotated[InputKinesisType, PlainValidator(validate_open_enum(False))]
|
|
213
|
+
|
|
214
|
+
stream_name: Annotated[str, pydantic.Field(alias="streamName")]
|
|
215
|
+
r"""Kinesis Data Stream to read data from"""
|
|
216
|
+
|
|
217
|
+
region: str
|
|
218
|
+
r"""Region where the Kinesis stream is located"""
|
|
219
|
+
|
|
220
|
+
id: Optional[str] = None
|
|
221
|
+
r"""Unique ID for this input"""
|
|
222
|
+
|
|
223
|
+
disabled: Optional[bool] = False
|
|
224
|
+
|
|
225
|
+
pipeline: Optional[str] = None
|
|
226
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
227
|
+
|
|
228
|
+
send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
|
|
229
|
+
True
|
|
230
|
+
)
|
|
231
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
232
|
+
|
|
233
|
+
environment: Optional[str] = None
|
|
234
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
235
|
+
|
|
236
|
+
pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
|
|
237
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
238
|
+
|
|
239
|
+
streamtags: Optional[List[str]] = None
|
|
240
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
241
|
+
|
|
242
|
+
connections: Optional[List[InputKinesisConnection]] = None
|
|
243
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
244
|
+
|
|
245
|
+
pq: Optional[InputKinesisPq] = None
|
|
246
|
+
|
|
247
|
+
service_interval: Annotated[
|
|
248
|
+
Optional[float], pydantic.Field(alias="serviceInterval")
|
|
249
|
+
] = 1
|
|
250
|
+
r"""Time interval in minutes between consecutive service calls"""
|
|
251
|
+
|
|
252
|
+
shard_expr: Annotated[Optional[str], pydantic.Field(alias="shardExpr")] = "true"
|
|
253
|
+
r"""A JavaScript expression to be called with each shardId for the stream. If the expression evaluates to a truthy value, the shard will be processed."""
|
|
254
|
+
|
|
255
|
+
shard_iterator_type: Annotated[
|
|
256
|
+
Annotated[
|
|
257
|
+
Optional[ShardIteratorStart], PlainValidator(validate_open_enum(False))
|
|
258
|
+
],
|
|
259
|
+
pydantic.Field(alias="shardIteratorType"),
|
|
260
|
+
] = ShardIteratorStart.TRIM_HORIZON
|
|
261
|
+
r"""Location at which to start reading a shard for the first time"""
|
|
262
|
+
|
|
263
|
+
payload_format: Annotated[
|
|
264
|
+
Annotated[
|
|
265
|
+
Optional[InputKinesisRecordDataFormat],
|
|
266
|
+
PlainValidator(validate_open_enum(False)),
|
|
267
|
+
],
|
|
268
|
+
pydantic.Field(alias="payloadFormat"),
|
|
269
|
+
] = InputKinesisRecordDataFormat.CRIBL
|
|
270
|
+
r"""Format of data inside the Kinesis Stream records. Gzip compression is automatically detected."""
|
|
271
|
+
|
|
272
|
+
get_records_limit: Annotated[
|
|
273
|
+
Optional[float], pydantic.Field(alias="getRecordsLimit")
|
|
274
|
+
] = 5000
|
|
275
|
+
r"""Maximum number of records per getRecords call"""
|
|
276
|
+
|
|
277
|
+
get_records_limit_total: Annotated[
|
|
278
|
+
Optional[float], pydantic.Field(alias="getRecordsLimitTotal")
|
|
279
|
+
] = 20000
|
|
280
|
+
r"""Maximum number of records, across all shards, to pull down at once per Worker Process"""
|
|
281
|
+
|
|
282
|
+
load_balancing_algorithm: Annotated[
|
|
283
|
+
Annotated[
|
|
284
|
+
Optional[ShardLoadBalancing], PlainValidator(validate_open_enum(False))
|
|
285
|
+
],
|
|
286
|
+
pydantic.Field(alias="loadBalancingAlgorithm"),
|
|
287
|
+
] = ShardLoadBalancing.CONSISTENT_HASHING
|
|
288
|
+
r"""The load-balancing algorithm to use for spreading out shards across Workers and Worker Processes"""
|
|
289
|
+
|
|
290
|
+
aws_authentication_method: Annotated[
|
|
291
|
+
Annotated[
|
|
292
|
+
Optional[InputKinesisAuthenticationMethod],
|
|
293
|
+
PlainValidator(validate_open_enum(False)),
|
|
294
|
+
],
|
|
295
|
+
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
296
|
+
] = InputKinesisAuthenticationMethod.AUTO
|
|
297
|
+
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
298
|
+
|
|
299
|
+
aws_secret_key: Annotated[Optional[str], pydantic.Field(alias="awsSecretKey")] = (
|
|
300
|
+
None
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
endpoint: Optional[str] = None
|
|
304
|
+
r"""Kinesis stream service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to Kinesis stream-compatible endpoint."""
|
|
305
|
+
|
|
306
|
+
signature_version: Annotated[
|
|
307
|
+
Annotated[
|
|
308
|
+
Optional[InputKinesisSignatureVersion],
|
|
309
|
+
PlainValidator(validate_open_enum(False)),
|
|
310
|
+
],
|
|
311
|
+
pydantic.Field(alias="signatureVersion"),
|
|
312
|
+
] = InputKinesisSignatureVersion.V4
|
|
313
|
+
r"""Signature version to use for signing Kinesis stream requests"""
|
|
314
|
+
|
|
315
|
+
reuse_connections: Annotated[
|
|
316
|
+
Optional[bool], pydantic.Field(alias="reuseConnections")
|
|
317
|
+
] = True
|
|
318
|
+
r"""Reuse connections between requests, which can improve performance"""
|
|
319
|
+
|
|
320
|
+
reject_unauthorized: Annotated[
|
|
321
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
322
|
+
] = True
|
|
323
|
+
r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
|
|
324
|
+
|
|
325
|
+
enable_assume_role: Annotated[
|
|
326
|
+
Optional[bool], pydantic.Field(alias="enableAssumeRole")
|
|
327
|
+
] = False
|
|
328
|
+
r"""Use Assume Role credentials to access Kinesis stream"""
|
|
329
|
+
|
|
330
|
+
assume_role_arn: Annotated[Optional[str], pydantic.Field(alias="assumeRoleArn")] = (
|
|
331
|
+
None
|
|
332
|
+
)
|
|
333
|
+
r"""Amazon Resource Name (ARN) of the role to assume"""
|
|
334
|
+
|
|
335
|
+
assume_role_external_id: Annotated[
|
|
336
|
+
Optional[str], pydantic.Field(alias="assumeRoleExternalId")
|
|
337
|
+
] = None
|
|
338
|
+
r"""External ID to use when assuming role"""
|
|
339
|
+
|
|
340
|
+
duration_seconds: Annotated[
|
|
341
|
+
Optional[float], pydantic.Field(alias="durationSeconds")
|
|
342
|
+
] = 3600
|
|
343
|
+
r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
|
|
344
|
+
|
|
345
|
+
verify_kpl_check_sums: Annotated[
|
|
346
|
+
Optional[bool], pydantic.Field(alias="verifyKPLCheckSums")
|
|
347
|
+
] = False
|
|
348
|
+
r"""Verify Kinesis Producer Library (KPL) event checksums"""
|
|
349
|
+
|
|
350
|
+
avoid_duplicates: Annotated[
|
|
351
|
+
Optional[bool], pydantic.Field(alias="avoidDuplicates")
|
|
352
|
+
] = False
|
|
353
|
+
r"""When resuming streaming from a stored state, Stream will read the next available record, rather than rereading the last-read record. Enabling this setting can cause data loss after a Worker Node's unexpected shutdown or restart."""
|
|
354
|
+
|
|
355
|
+
metadata: Optional[List[InputKinesisMetadatum]] = None
|
|
356
|
+
r"""Fields to add to events from this input"""
|
|
357
|
+
|
|
358
|
+
description: Optional[str] = None
|
|
359
|
+
|
|
360
|
+
aws_api_key: Annotated[Optional[str], pydantic.Field(alias="awsApiKey")] = None
|
|
361
|
+
|
|
362
|
+
aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
|
|
363
|
+
r"""Select or create a stored secret that references your access key and secret key"""
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InputKubeEventsType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
KUBE_EVENTS = "kube_events"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InputKubeEventsConnectionTypedDict(TypedDict):
|
|
19
|
+
output: str
|
|
20
|
+
pipeline: NotRequired[str]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class InputKubeEventsConnection(BaseModel):
|
|
24
|
+
output: str
|
|
25
|
+
|
|
26
|
+
pipeline: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class InputKubeEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
|
+
|
|
32
|
+
SMART = "smart"
|
|
33
|
+
ALWAYS = "always"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class InputKubeEventsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
37
|
+
r"""Codec to use to compress the persisted data"""
|
|
38
|
+
|
|
39
|
+
NONE = "none"
|
|
40
|
+
GZIP = "gzip"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class InputKubeEventsPqTypedDict(TypedDict):
|
|
44
|
+
mode: NotRequired[InputKubeEventsMode]
|
|
45
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
46
|
+
max_buffer_size: NotRequired[float]
|
|
47
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
48
|
+
commit_frequency: NotRequired[float]
|
|
49
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
50
|
+
max_file_size: NotRequired[str]
|
|
51
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
52
|
+
max_size: NotRequired[str]
|
|
53
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
54
|
+
path: NotRequired[str]
|
|
55
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
56
|
+
compress: NotRequired[InputKubeEventsCompression]
|
|
57
|
+
r"""Codec to use to compress the persisted data"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class InputKubeEventsPq(BaseModel):
|
|
61
|
+
mode: Annotated[
|
|
62
|
+
Optional[InputKubeEventsMode], PlainValidator(validate_open_enum(False))
|
|
63
|
+
] = InputKubeEventsMode.ALWAYS
|
|
64
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
|
+
|
|
66
|
+
max_buffer_size: Annotated[
|
|
67
|
+
Optional[float], pydantic.Field(alias="maxBufferSize")
|
|
68
|
+
] = 1000
|
|
69
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
70
|
+
|
|
71
|
+
commit_frequency: Annotated[
|
|
72
|
+
Optional[float], pydantic.Field(alias="commitFrequency")
|
|
73
|
+
] = 42
|
|
74
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
75
|
+
|
|
76
|
+
max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
|
|
77
|
+
"1 MB"
|
|
78
|
+
)
|
|
79
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
80
|
+
|
|
81
|
+
max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
|
|
82
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
83
|
+
|
|
84
|
+
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
|
+
|
|
87
|
+
compress: Annotated[
|
|
88
|
+
Optional[InputKubeEventsCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
+
] = InputKubeEventsCompression.NONE
|
|
90
|
+
r"""Codec to use to compress the persisted data"""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class InputKubeEventsRuleTypedDict(TypedDict):
|
|
94
|
+
filter_: str
|
|
95
|
+
r"""JavaScript expression applied to Kubernetes objects. Return 'true' to include it."""
|
|
96
|
+
description: NotRequired[str]
|
|
97
|
+
r"""Optional description of this rule's purpose"""
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class InputKubeEventsRule(BaseModel):
|
|
101
|
+
filter_: Annotated[str, pydantic.Field(alias="filter")]
|
|
102
|
+
r"""JavaScript expression applied to Kubernetes objects. Return 'true' to include it."""
|
|
103
|
+
|
|
104
|
+
description: Optional[str] = None
|
|
105
|
+
r"""Optional description of this rule's purpose"""
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class InputKubeEventsMetadatumTypedDict(TypedDict):
|
|
109
|
+
name: str
|
|
110
|
+
value: str
|
|
111
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class InputKubeEventsMetadatum(BaseModel):
|
|
115
|
+
name: str
|
|
116
|
+
|
|
117
|
+
value: str
|
|
118
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class InputKubeEventsTypedDict(TypedDict):
|
|
122
|
+
id: str
|
|
123
|
+
r"""Unique ID for this input"""
|
|
124
|
+
type: InputKubeEventsType
|
|
125
|
+
disabled: NotRequired[bool]
|
|
126
|
+
pipeline: NotRequired[str]
|
|
127
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
128
|
+
send_to_routes: NotRequired[bool]
|
|
129
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
130
|
+
environment: NotRequired[str]
|
|
131
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
132
|
+
pq_enabled: NotRequired[bool]
|
|
133
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
134
|
+
streamtags: NotRequired[List[str]]
|
|
135
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
136
|
+
connections: NotRequired[List[InputKubeEventsConnectionTypedDict]]
|
|
137
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
138
|
+
pq: NotRequired[InputKubeEventsPqTypedDict]
|
|
139
|
+
rules: NotRequired[List[InputKubeEventsRuleTypedDict]]
|
|
140
|
+
r"""Filtering on event fields"""
|
|
141
|
+
metadata: NotRequired[List[InputKubeEventsMetadatumTypedDict]]
|
|
142
|
+
r"""Fields to add to events from this input"""
|
|
143
|
+
description: NotRequired[str]
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class InputKubeEvents(BaseModel):
|
|
147
|
+
id: str
|
|
148
|
+
r"""Unique ID for this input"""
|
|
149
|
+
|
|
150
|
+
type: Annotated[InputKubeEventsType, PlainValidator(validate_open_enum(False))]
|
|
151
|
+
|
|
152
|
+
disabled: Optional[bool] = False
|
|
153
|
+
|
|
154
|
+
pipeline: Optional[str] = None
|
|
155
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
156
|
+
|
|
157
|
+
send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
|
|
158
|
+
True
|
|
159
|
+
)
|
|
160
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
161
|
+
|
|
162
|
+
environment: Optional[str] = None
|
|
163
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
164
|
+
|
|
165
|
+
pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
|
|
166
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
167
|
+
|
|
168
|
+
streamtags: Optional[List[str]] = None
|
|
169
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
170
|
+
|
|
171
|
+
connections: Optional[List[InputKubeEventsConnection]] = None
|
|
172
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
173
|
+
|
|
174
|
+
pq: Optional[InputKubeEventsPq] = None
|
|
175
|
+
|
|
176
|
+
rules: Optional[List[InputKubeEventsRule]] = None
|
|
177
|
+
r"""Filtering on event fields"""
|
|
178
|
+
|
|
179
|
+
metadata: Optional[List[InputKubeEventsMetadatum]] = None
|
|
180
|
+
r"""Fields to add to events from this input"""
|
|
181
|
+
|
|
182
|
+
description: Optional[str] = None
|