cribl-control-plane 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/__init__.py +17 -0
- cribl_control_plane/_hooks/__init__.py +5 -0
- cribl_control_plane/_hooks/clientcredentials.py +211 -0
- cribl_control_plane/_hooks/registration.py +13 -0
- cribl_control_plane/_hooks/sdkhooks.py +81 -0
- cribl_control_plane/_hooks/types.py +112 -0
- cribl_control_plane/_version.py +15 -0
- cribl_control_plane/auth_sdk.py +184 -0
- cribl_control_plane/basesdk.py +358 -0
- cribl_control_plane/errors/__init__.py +60 -0
- cribl_control_plane/errors/apierror.py +38 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
- cribl_control_plane/errors/error.py +24 -0
- cribl_control_plane/errors/healthstatus_error.py +38 -0
- cribl_control_plane/errors/no_response_error.py +13 -0
- cribl_control_plane/errors/responsevalidationerror.py +25 -0
- cribl_control_plane/health.py +166 -0
- cribl_control_plane/httpclient.py +126 -0
- cribl_control_plane/models/__init__.py +7305 -0
- cribl_control_plane/models/addhectokenrequest.py +34 -0
- cribl_control_plane/models/authtoken.py +13 -0
- cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
- cribl_control_plane/models/createinputop.py +24 -0
- cribl_control_plane/models/createoutputop.py +24 -0
- cribl_control_plane/models/createoutputtestbyidop.py +46 -0
- cribl_control_plane/models/criblevent.py +14 -0
- cribl_control_plane/models/deleteinputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getinputbyidop.py +37 -0
- cribl_control_plane/models/getoutputbyidop.py +37 -0
- cribl_control_plane/models/getoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
- cribl_control_plane/models/healthstatus.py +36 -0
- cribl_control_plane/models/input.py +199 -0
- cribl_control_plane/models/inputappscope.py +448 -0
- cribl_control_plane/models/inputazureblob.py +308 -0
- cribl_control_plane/models/inputcollection.py +208 -0
- cribl_control_plane/models/inputconfluentcloud.py +585 -0
- cribl_control_plane/models/inputcribl.py +165 -0
- cribl_control_plane/models/inputcriblhttp.py +341 -0
- cribl_control_plane/models/inputcribllakehttp.py +342 -0
- cribl_control_plane/models/inputcriblmetrics.py +175 -0
- cribl_control_plane/models/inputcribltcp.py +299 -0
- cribl_control_plane/models/inputcrowdstrike.py +410 -0
- cribl_control_plane/models/inputdatadogagent.py +364 -0
- cribl_control_plane/models/inputdatagen.py +180 -0
- cribl_control_plane/models/inputedgeprometheus.py +551 -0
- cribl_control_plane/models/inputelastic.py +494 -0
- cribl_control_plane/models/inputeventhub.py +360 -0
- cribl_control_plane/models/inputexec.py +213 -0
- cribl_control_plane/models/inputfile.py +259 -0
- cribl_control_plane/models/inputfirehose.py +341 -0
- cribl_control_plane/models/inputgooglepubsub.py +247 -0
- cribl_control_plane/models/inputgrafana_union.py +1247 -0
- cribl_control_plane/models/inputhttp.py +403 -0
- cribl_control_plane/models/inputhttpraw.py +407 -0
- cribl_control_plane/models/inputjournalfiles.py +208 -0
- cribl_control_plane/models/inputkafka.py +581 -0
- cribl_control_plane/models/inputkinesis.py +363 -0
- cribl_control_plane/models/inputkubeevents.py +182 -0
- cribl_control_plane/models/inputkubelogs.py +256 -0
- cribl_control_plane/models/inputkubemetrics.py +233 -0
- cribl_control_plane/models/inputloki.py +468 -0
- cribl_control_plane/models/inputmetrics.py +290 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
- cribl_control_plane/models/inputmsk.py +654 -0
- cribl_control_plane/models/inputnetflow.py +224 -0
- cribl_control_plane/models/inputoffice365mgmt.py +384 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
- cribl_control_plane/models/inputoffice365service.py +377 -0
- cribl_control_plane/models/inputopentelemetry.py +516 -0
- cribl_control_plane/models/inputprometheus.py +464 -0
- cribl_control_plane/models/inputprometheusrw.py +470 -0
- cribl_control_plane/models/inputrawudp.py +207 -0
- cribl_control_plane/models/inputs3.py +416 -0
- cribl_control_plane/models/inputs3inventory.py +440 -0
- cribl_control_plane/models/inputsecuritylake.py +425 -0
- cribl_control_plane/models/inputsnmp.py +274 -0
- cribl_control_plane/models/inputsplunk.py +387 -0
- cribl_control_plane/models/inputsplunkhec.py +478 -0
- cribl_control_plane/models/inputsplunksearch.py +537 -0
- cribl_control_plane/models/inputsqs.py +320 -0
- cribl_control_plane/models/inputsyslog_union.py +759 -0
- cribl_control_plane/models/inputsystemmetrics.py +533 -0
- cribl_control_plane/models/inputsystemstate.py +417 -0
- cribl_control_plane/models/inputtcp.py +359 -0
- cribl_control_plane/models/inputtcpjson.py +334 -0
- cribl_control_plane/models/inputwef.py +498 -0
- cribl_control_plane/models/inputwindowsmetrics.py +457 -0
- cribl_control_plane/models/inputwineventlogs.py +222 -0
- cribl_control_plane/models/inputwiz.py +334 -0
- cribl_control_plane/models/inputzscalerhec.py +439 -0
- cribl_control_plane/models/listinputop.py +24 -0
- cribl_control_plane/models/listoutputop.py +24 -0
- cribl_control_plane/models/logininfo.py +16 -0
- cribl_control_plane/models/output.py +229 -0
- cribl_control_plane/models/outputazureblob.py +471 -0
- cribl_control_plane/models/outputazuredataexplorer.py +660 -0
- cribl_control_plane/models/outputazureeventhub.py +321 -0
- cribl_control_plane/models/outputazurelogs.py +386 -0
- cribl_control_plane/models/outputclickhouse.py +650 -0
- cribl_control_plane/models/outputcloudwatch.py +273 -0
- cribl_control_plane/models/outputconfluentcloud.py +591 -0
- cribl_control_plane/models/outputcriblhttp.py +494 -0
- cribl_control_plane/models/outputcribllake.py +396 -0
- cribl_control_plane/models/outputcribltcp.py +387 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
- cribl_control_plane/models/outputdatadog.py +472 -0
- cribl_control_plane/models/outputdataset.py +437 -0
- cribl_control_plane/models/outputdefault.py +55 -0
- cribl_control_plane/models/outputdevnull.py +50 -0
- cribl_control_plane/models/outputdiskspool.py +89 -0
- cribl_control_plane/models/outputdls3.py +560 -0
- cribl_control_plane/models/outputdynatracehttp.py +454 -0
- cribl_control_plane/models/outputdynatraceotlp.py +486 -0
- cribl_control_plane/models/outputelastic.py +494 -0
- cribl_control_plane/models/outputelasticcloud.py +407 -0
- cribl_control_plane/models/outputexabeam.py +297 -0
- cribl_control_plane/models/outputfilesystem.py +357 -0
- cribl_control_plane/models/outputgooglechronicle.py +486 -0
- cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
- cribl_control_plane/models/outputgooglepubsub.py +274 -0
- cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
- cribl_control_plane/models/outputgraphite.py +225 -0
- cribl_control_plane/models/outputhoneycomb.py +369 -0
- cribl_control_plane/models/outputhumiohec.py +389 -0
- cribl_control_plane/models/outputinfluxdb.py +523 -0
- cribl_control_plane/models/outputkafka.py +581 -0
- cribl_control_plane/models/outputkinesis.py +312 -0
- cribl_control_plane/models/outputloki.py +425 -0
- cribl_control_plane/models/outputminio.py +512 -0
- cribl_control_plane/models/outputmsk.py +654 -0
- cribl_control_plane/models/outputnetflow.py +80 -0
- cribl_control_plane/models/outputnewrelic.py +424 -0
- cribl_control_plane/models/outputnewrelicevents.py +401 -0
- cribl_control_plane/models/outputopentelemetry.py +669 -0
- cribl_control_plane/models/outputprometheus.py +485 -0
- cribl_control_plane/models/outputring.py +121 -0
- cribl_control_plane/models/outputrouter.py +83 -0
- cribl_control_plane/models/outputs3.py +556 -0
- cribl_control_plane/models/outputsamplesresponse.py +14 -0
- cribl_control_plane/models/outputsecuritylake.py +505 -0
- cribl_control_plane/models/outputsentinel.py +488 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
- cribl_control_plane/models/outputservicenow.py +543 -0
- cribl_control_plane/models/outputsignalfx.py +369 -0
- cribl_control_plane/models/outputsnmp.py +80 -0
- cribl_control_plane/models/outputsns.py +274 -0
- cribl_control_plane/models/outputsplunk.py +383 -0
- cribl_control_plane/models/outputsplunkhec.py +434 -0
- cribl_control_plane/models/outputsplunklb.py +558 -0
- cribl_control_plane/models/outputsqs.py +328 -0
- cribl_control_plane/models/outputstatsd.py +224 -0
- cribl_control_plane/models/outputstatsdext.py +225 -0
- cribl_control_plane/models/outputsumologic.py +378 -0
- cribl_control_plane/models/outputsyslog.py +415 -0
- cribl_control_plane/models/outputtcpjson.py +413 -0
- cribl_control_plane/models/outputtestrequest.py +15 -0
- cribl_control_plane/models/outputtestresponse.py +29 -0
- cribl_control_plane/models/outputwavefront.py +369 -0
- cribl_control_plane/models/outputwebhook.py +689 -0
- cribl_control_plane/models/outputxsiam.py +415 -0
- cribl_control_plane/models/schemeclientoauth.py +24 -0
- cribl_control_plane/models/security.py +36 -0
- cribl_control_plane/models/updatehectokenrequest.py +31 -0
- cribl_control_plane/models/updateinputbyidop.py +44 -0
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
- cribl_control_plane/models/updateoutputbyidop.py +44 -0
- cribl_control_plane/outputs.py +1615 -0
- cribl_control_plane/py.typed +1 -0
- cribl_control_plane/sdk.py +164 -0
- cribl_control_plane/sdkconfiguration.py +36 -0
- cribl_control_plane/sources.py +1355 -0
- cribl_control_plane/types/__init__.py +21 -0
- cribl_control_plane/types/basemodel.py +39 -0
- cribl_control_plane/utils/__init__.py +187 -0
- cribl_control_plane/utils/annotations.py +55 -0
- cribl_control_plane/utils/datetimes.py +23 -0
- cribl_control_plane/utils/enums.py +74 -0
- cribl_control_plane/utils/eventstreaming.py +238 -0
- cribl_control_plane/utils/forms.py +223 -0
- cribl_control_plane/utils/headers.py +136 -0
- cribl_control_plane/utils/logger.py +27 -0
- cribl_control_plane/utils/metadata.py +118 -0
- cribl_control_plane/utils/queryparams.py +205 -0
- cribl_control_plane/utils/requestbodies.py +66 -0
- cribl_control_plane/utils/retries.py +217 -0
- cribl_control_plane/utils/security.py +207 -0
- cribl_control_plane/utils/serializers.py +249 -0
- cribl_control_plane/utils/unmarshal_json_response.py +24 -0
- cribl_control_plane/utils/url.py +155 -0
- cribl_control_plane/utils/values.py +137 -0
- cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
- cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
- cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,308 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InputAzureBlobType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
AZURE_BLOB = "azure_blob"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InputAzureBlobConnectionTypedDict(TypedDict):
|
|
19
|
+
output: str
|
|
20
|
+
pipeline: NotRequired[str]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class InputAzureBlobConnection(BaseModel):
|
|
24
|
+
output: str
|
|
25
|
+
|
|
26
|
+
pipeline: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class InputAzureBlobMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
|
+
|
|
32
|
+
SMART = "smart"
|
|
33
|
+
ALWAYS = "always"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class InputAzureBlobCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
37
|
+
r"""Codec to use to compress the persisted data"""
|
|
38
|
+
|
|
39
|
+
NONE = "none"
|
|
40
|
+
GZIP = "gzip"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class InputAzureBlobPqTypedDict(TypedDict):
|
|
44
|
+
mode: NotRequired[InputAzureBlobMode]
|
|
45
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
46
|
+
max_buffer_size: NotRequired[float]
|
|
47
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
48
|
+
commit_frequency: NotRequired[float]
|
|
49
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
50
|
+
max_file_size: NotRequired[str]
|
|
51
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
52
|
+
max_size: NotRequired[str]
|
|
53
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
54
|
+
path: NotRequired[str]
|
|
55
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
56
|
+
compress: NotRequired[InputAzureBlobCompression]
|
|
57
|
+
r"""Codec to use to compress the persisted data"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class InputAzureBlobPq(BaseModel):
|
|
61
|
+
mode: Annotated[
|
|
62
|
+
Optional[InputAzureBlobMode], PlainValidator(validate_open_enum(False))
|
|
63
|
+
] = InputAzureBlobMode.ALWAYS
|
|
64
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
|
+
|
|
66
|
+
max_buffer_size: Annotated[
|
|
67
|
+
Optional[float], pydantic.Field(alias="maxBufferSize")
|
|
68
|
+
] = 1000
|
|
69
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
70
|
+
|
|
71
|
+
commit_frequency: Annotated[
|
|
72
|
+
Optional[float], pydantic.Field(alias="commitFrequency")
|
|
73
|
+
] = 42
|
|
74
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
75
|
+
|
|
76
|
+
max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
|
|
77
|
+
"1 MB"
|
|
78
|
+
)
|
|
79
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
80
|
+
|
|
81
|
+
max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
|
|
82
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
83
|
+
|
|
84
|
+
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
|
+
|
|
87
|
+
compress: Annotated[
|
|
88
|
+
Optional[InputAzureBlobCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
+
] = InputAzureBlobCompression.NONE
|
|
90
|
+
r"""Codec to use to compress the persisted data"""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class InputAzureBlobMetadatumTypedDict(TypedDict):
|
|
94
|
+
name: str
|
|
95
|
+
value: str
|
|
96
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class InputAzureBlobMetadatum(BaseModel):
|
|
100
|
+
name: str
|
|
101
|
+
|
|
102
|
+
value: str
|
|
103
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class InputAzureBlobAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
107
|
+
MANUAL = "manual"
|
|
108
|
+
SECRET = "secret"
|
|
109
|
+
CLIENT_SECRET = "clientSecret"
|
|
110
|
+
CLIENT_CERT = "clientCert"
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class InputAzureBlobCertificateTypedDict(TypedDict):
|
|
114
|
+
certificate_name: str
|
|
115
|
+
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class InputAzureBlobCertificate(BaseModel):
|
|
119
|
+
certificate_name: Annotated[str, pydantic.Field(alias="certificateName")]
|
|
120
|
+
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class InputAzureBlobTypedDict(TypedDict):
|
|
124
|
+
type: InputAzureBlobType
|
|
125
|
+
queue_name: str
|
|
126
|
+
r"""The storage account queue name blob notifications will be read from. Value must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at initialization time. Example referencing a Global Variable: `myQueue-${C.vars.myVar}`"""
|
|
127
|
+
id: NotRequired[str]
|
|
128
|
+
r"""Unique ID for this input"""
|
|
129
|
+
disabled: NotRequired[bool]
|
|
130
|
+
pipeline: NotRequired[str]
|
|
131
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
132
|
+
send_to_routes: NotRequired[bool]
|
|
133
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
134
|
+
environment: NotRequired[str]
|
|
135
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
136
|
+
pq_enabled: NotRequired[bool]
|
|
137
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
138
|
+
streamtags: NotRequired[List[str]]
|
|
139
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
140
|
+
connections: NotRequired[List[InputAzureBlobConnectionTypedDict]]
|
|
141
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
142
|
+
pq: NotRequired[InputAzureBlobPqTypedDict]
|
|
143
|
+
file_filter: NotRequired[str]
|
|
144
|
+
r"""Regex matching file names to download and process. Defaults to: .*"""
|
|
145
|
+
visibility_timeout: NotRequired[float]
|
|
146
|
+
r"""The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request."""
|
|
147
|
+
num_receivers: NotRequired[float]
|
|
148
|
+
r"""How many receiver processes to run. The higher the number, the better the throughput - at the expense of CPU overhead."""
|
|
149
|
+
max_messages: NotRequired[float]
|
|
150
|
+
r"""The maximum number of messages to return in a poll request. Azure storage queues never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 32."""
|
|
151
|
+
service_period_secs: NotRequired[float]
|
|
152
|
+
r"""The duration (in seconds) which pollers should be validated and restarted if exited"""
|
|
153
|
+
skip_on_error: NotRequired[bool]
|
|
154
|
+
r"""Skip files that trigger a processing error. Disabled by default, which allows retries after processing errors."""
|
|
155
|
+
metadata: NotRequired[List[InputAzureBlobMetadatumTypedDict]]
|
|
156
|
+
r"""Fields to add to events from this input"""
|
|
157
|
+
breaker_rulesets: NotRequired[List[str]]
|
|
158
|
+
r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
|
|
159
|
+
stale_channel_flush_ms: NotRequired[float]
|
|
160
|
+
r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
|
|
161
|
+
parquet_chunk_size_mb: NotRequired[float]
|
|
162
|
+
r"""Maximum file size for each Parquet chunk"""
|
|
163
|
+
parquet_chunk_download_timeout: NotRequired[float]
|
|
164
|
+
r"""The maximum time allowed for downloading a Parquet chunk. Processing will stop if a chunk cannot be downloaded within the time specified."""
|
|
165
|
+
auth_type: NotRequired[InputAzureBlobAuthenticationMethod]
|
|
166
|
+
description: NotRequired[str]
|
|
167
|
+
connection_string: NotRequired[str]
|
|
168
|
+
r"""Enter your Azure Storage account connection string. If left blank, Stream will fall back to env.AZURE_STORAGE_CONNECTION_STRING."""
|
|
169
|
+
text_secret: NotRequired[str]
|
|
170
|
+
r"""Select or create a stored text secret"""
|
|
171
|
+
storage_account_name: NotRequired[str]
|
|
172
|
+
r"""The name of your Azure storage account"""
|
|
173
|
+
tenant_id: NotRequired[str]
|
|
174
|
+
r"""The service principal's tenant ID"""
|
|
175
|
+
client_id: NotRequired[str]
|
|
176
|
+
r"""The service principal's client ID"""
|
|
177
|
+
azure_cloud: NotRequired[str]
|
|
178
|
+
r"""The Azure cloud to use. Defaults to Azure Public Cloud."""
|
|
179
|
+
endpoint_suffix: NotRequired[str]
|
|
180
|
+
r"""Endpoint suffix for the service URL. Takes precedence over the Azure Cloud setting. Defaults to core.windows.net."""
|
|
181
|
+
client_text_secret: NotRequired[str]
|
|
182
|
+
r"""Select or create a stored text secret"""
|
|
183
|
+
certificate: NotRequired[InputAzureBlobCertificateTypedDict]
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class InputAzureBlob(BaseModel):
|
|
187
|
+
type: Annotated[InputAzureBlobType, PlainValidator(validate_open_enum(False))]
|
|
188
|
+
|
|
189
|
+
queue_name: Annotated[str, pydantic.Field(alias="queueName")]
|
|
190
|
+
r"""The storage account queue name blob notifications will be read from. Value must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at initialization time. Example referencing a Global Variable: `myQueue-${C.vars.myVar}`"""
|
|
191
|
+
|
|
192
|
+
id: Optional[str] = None
|
|
193
|
+
r"""Unique ID for this input"""
|
|
194
|
+
|
|
195
|
+
disabled: Optional[bool] = False
|
|
196
|
+
|
|
197
|
+
pipeline: Optional[str] = None
|
|
198
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
199
|
+
|
|
200
|
+
send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
|
|
201
|
+
True
|
|
202
|
+
)
|
|
203
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
204
|
+
|
|
205
|
+
environment: Optional[str] = None
|
|
206
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
207
|
+
|
|
208
|
+
pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
|
|
209
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
210
|
+
|
|
211
|
+
streamtags: Optional[List[str]] = None
|
|
212
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
213
|
+
|
|
214
|
+
connections: Optional[List[InputAzureBlobConnection]] = None
|
|
215
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
216
|
+
|
|
217
|
+
pq: Optional[InputAzureBlobPq] = None
|
|
218
|
+
|
|
219
|
+
file_filter: Annotated[Optional[str], pydantic.Field(alias="fileFilter")] = "/.*/"
|
|
220
|
+
r"""Regex matching file names to download and process. Defaults to: .*"""
|
|
221
|
+
|
|
222
|
+
visibility_timeout: Annotated[
|
|
223
|
+
Optional[float], pydantic.Field(alias="visibilityTimeout")
|
|
224
|
+
] = 600
|
|
225
|
+
r"""The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request."""
|
|
226
|
+
|
|
227
|
+
num_receivers: Annotated[Optional[float], pydantic.Field(alias="numReceivers")] = 1
|
|
228
|
+
r"""How many receiver processes to run. The higher the number, the better the throughput - at the expense of CPU overhead."""
|
|
229
|
+
|
|
230
|
+
max_messages: Annotated[Optional[float], pydantic.Field(alias="maxMessages")] = 1
|
|
231
|
+
r"""The maximum number of messages to return in a poll request. Azure storage queues never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 32."""
|
|
232
|
+
|
|
233
|
+
service_period_secs: Annotated[
|
|
234
|
+
Optional[float], pydantic.Field(alias="servicePeriodSecs")
|
|
235
|
+
] = 5
|
|
236
|
+
r"""The duration (in seconds) which pollers should be validated and restarted if exited"""
|
|
237
|
+
|
|
238
|
+
skip_on_error: Annotated[Optional[bool], pydantic.Field(alias="skipOnError")] = (
|
|
239
|
+
False
|
|
240
|
+
)
|
|
241
|
+
r"""Skip files that trigger a processing error. Disabled by default, which allows retries after processing errors."""
|
|
242
|
+
|
|
243
|
+
metadata: Optional[List[InputAzureBlobMetadatum]] = None
|
|
244
|
+
r"""Fields to add to events from this input"""
|
|
245
|
+
|
|
246
|
+
breaker_rulesets: Annotated[
|
|
247
|
+
Optional[List[str]], pydantic.Field(alias="breakerRulesets")
|
|
248
|
+
] = None
|
|
249
|
+
r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
|
|
250
|
+
|
|
251
|
+
stale_channel_flush_ms: Annotated[
|
|
252
|
+
Optional[float], pydantic.Field(alias="staleChannelFlushMs")
|
|
253
|
+
] = 10000
|
|
254
|
+
r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
|
|
255
|
+
|
|
256
|
+
parquet_chunk_size_mb: Annotated[
|
|
257
|
+
Optional[float], pydantic.Field(alias="parquetChunkSizeMB")
|
|
258
|
+
] = 5
|
|
259
|
+
r"""Maximum file size for each Parquet chunk"""
|
|
260
|
+
|
|
261
|
+
parquet_chunk_download_timeout: Annotated[
|
|
262
|
+
Optional[float], pydantic.Field(alias="parquetChunkDownloadTimeout")
|
|
263
|
+
] = 600
|
|
264
|
+
r"""The maximum time allowed for downloading a Parquet chunk. Processing will stop if a chunk cannot be downloaded within the time specified."""
|
|
265
|
+
|
|
266
|
+
auth_type: Annotated[
|
|
267
|
+
Annotated[
|
|
268
|
+
Optional[InputAzureBlobAuthenticationMethod],
|
|
269
|
+
PlainValidator(validate_open_enum(False)),
|
|
270
|
+
],
|
|
271
|
+
pydantic.Field(alias="authType"),
|
|
272
|
+
] = InputAzureBlobAuthenticationMethod.MANUAL
|
|
273
|
+
|
|
274
|
+
description: Optional[str] = None
|
|
275
|
+
|
|
276
|
+
connection_string: Annotated[
|
|
277
|
+
Optional[str], pydantic.Field(alias="connectionString")
|
|
278
|
+
] = None
|
|
279
|
+
r"""Enter your Azure Storage account connection string. If left blank, Stream will fall back to env.AZURE_STORAGE_CONNECTION_STRING."""
|
|
280
|
+
|
|
281
|
+
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
282
|
+
r"""Select or create a stored text secret"""
|
|
283
|
+
|
|
284
|
+
storage_account_name: Annotated[
|
|
285
|
+
Optional[str], pydantic.Field(alias="storageAccountName")
|
|
286
|
+
] = None
|
|
287
|
+
r"""The name of your Azure storage account"""
|
|
288
|
+
|
|
289
|
+
tenant_id: Annotated[Optional[str], pydantic.Field(alias="tenantId")] = None
|
|
290
|
+
r"""The service principal's tenant ID"""
|
|
291
|
+
|
|
292
|
+
client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
|
|
293
|
+
r"""The service principal's client ID"""
|
|
294
|
+
|
|
295
|
+
azure_cloud: Annotated[Optional[str], pydantic.Field(alias="azureCloud")] = None
|
|
296
|
+
r"""The Azure cloud to use. Defaults to Azure Public Cloud."""
|
|
297
|
+
|
|
298
|
+
endpoint_suffix: Annotated[
|
|
299
|
+
Optional[str], pydantic.Field(alias="endpointSuffix")
|
|
300
|
+
] = None
|
|
301
|
+
r"""Endpoint suffix for the service URL. Takes precedence over the Azure Cloud setting. Defaults to core.windows.net."""
|
|
302
|
+
|
|
303
|
+
client_text_secret: Annotated[
|
|
304
|
+
Optional[str], pydantic.Field(alias="clientTextSecret")
|
|
305
|
+
] = None
|
|
306
|
+
r"""Select or create a stored text secret"""
|
|
307
|
+
|
|
308
|
+
certificate: Optional[InputAzureBlobCertificate] = None
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InputCollectionType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
COLLECTION = "collection"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InputCollectionConnectionTypedDict(TypedDict):
|
|
19
|
+
output: str
|
|
20
|
+
pipeline: NotRequired[str]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class InputCollectionConnection(BaseModel):
|
|
24
|
+
output: str
|
|
25
|
+
|
|
26
|
+
pipeline: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class InputCollectionMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
|
+
|
|
32
|
+
SMART = "smart"
|
|
33
|
+
ALWAYS = "always"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class InputCollectionCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
37
|
+
r"""Codec to use to compress the persisted data"""
|
|
38
|
+
|
|
39
|
+
NONE = "none"
|
|
40
|
+
GZIP = "gzip"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class InputCollectionPqTypedDict(TypedDict):
|
|
44
|
+
mode: NotRequired[InputCollectionMode]
|
|
45
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
46
|
+
max_buffer_size: NotRequired[float]
|
|
47
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
48
|
+
commit_frequency: NotRequired[float]
|
|
49
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
50
|
+
max_file_size: NotRequired[str]
|
|
51
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
52
|
+
max_size: NotRequired[str]
|
|
53
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
54
|
+
path: NotRequired[str]
|
|
55
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
56
|
+
compress: NotRequired[InputCollectionCompression]
|
|
57
|
+
r"""Codec to use to compress the persisted data"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class InputCollectionPq(BaseModel):
|
|
61
|
+
mode: Annotated[
|
|
62
|
+
Optional[InputCollectionMode], PlainValidator(validate_open_enum(False))
|
|
63
|
+
] = InputCollectionMode.ALWAYS
|
|
64
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
|
+
|
|
66
|
+
max_buffer_size: Annotated[
|
|
67
|
+
Optional[float], pydantic.Field(alias="maxBufferSize")
|
|
68
|
+
] = 1000
|
|
69
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
70
|
+
|
|
71
|
+
commit_frequency: Annotated[
|
|
72
|
+
Optional[float], pydantic.Field(alias="commitFrequency")
|
|
73
|
+
] = 42
|
|
74
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
75
|
+
|
|
76
|
+
max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
|
|
77
|
+
"1 MB"
|
|
78
|
+
)
|
|
79
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
80
|
+
|
|
81
|
+
max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
|
|
82
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
83
|
+
|
|
84
|
+
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
|
+
|
|
87
|
+
compress: Annotated[
|
|
88
|
+
Optional[InputCollectionCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
+
] = InputCollectionCompression.NONE
|
|
90
|
+
r"""Codec to use to compress the persisted data"""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class InputCollectionPreprocessTypedDict(TypedDict):
|
|
94
|
+
disabled: NotRequired[bool]
|
|
95
|
+
command: NotRequired[str]
|
|
96
|
+
r"""Command to feed the data through (via stdin) and process its output (stdout)"""
|
|
97
|
+
args: NotRequired[List[str]]
|
|
98
|
+
r"""Arguments to be added to the custom command"""
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class InputCollectionPreprocess(BaseModel):
|
|
102
|
+
disabled: Optional[bool] = True
|
|
103
|
+
|
|
104
|
+
command: Optional[str] = None
|
|
105
|
+
r"""Command to feed the data through (via stdin) and process its output (stdout)"""
|
|
106
|
+
|
|
107
|
+
args: Optional[List[str]] = None
|
|
108
|
+
r"""Arguments to be added to the custom command"""
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class InputCollectionMetadatumTypedDict(TypedDict):
|
|
112
|
+
name: str
|
|
113
|
+
value: str
|
|
114
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class InputCollectionMetadatum(BaseModel):
|
|
118
|
+
name: str
|
|
119
|
+
|
|
120
|
+
value: str
|
|
121
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class InputCollectionTypedDict(TypedDict):
|
|
125
|
+
id: str
|
|
126
|
+
r"""Unique ID for this input"""
|
|
127
|
+
type: NotRequired[InputCollectionType]
|
|
128
|
+
disabled: NotRequired[bool]
|
|
129
|
+
pipeline: NotRequired[str]
|
|
130
|
+
r"""Pipeline to process results"""
|
|
131
|
+
send_to_routes: NotRequired[bool]
|
|
132
|
+
r"""Send events to normal routing and event processing. Disable to select a specific Pipeline/Destination combination."""
|
|
133
|
+
environment: NotRequired[str]
|
|
134
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
135
|
+
pq_enabled: NotRequired[bool]
|
|
136
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
137
|
+
streamtags: NotRequired[List[str]]
|
|
138
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
139
|
+
connections: NotRequired[List[InputCollectionConnectionTypedDict]]
|
|
140
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
141
|
+
pq: NotRequired[InputCollectionPqTypedDict]
|
|
142
|
+
breaker_rulesets: NotRequired[List[str]]
|
|
143
|
+
r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
|
|
144
|
+
stale_channel_flush_ms: NotRequired[float]
|
|
145
|
+
r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
|
|
146
|
+
preprocess: NotRequired[InputCollectionPreprocessTypedDict]
|
|
147
|
+
throttle_rate_per_sec: NotRequired[str]
|
|
148
|
+
r"""Rate (in bytes per second) to throttle while writing to an output. Accepts values with multiple-byte units, such as KB, MB, and GB. (Example: 42 MB) Default value of 0 specifies no throttling."""
|
|
149
|
+
metadata: NotRequired[List[InputCollectionMetadatumTypedDict]]
|
|
150
|
+
r"""Fields to add to events from this input"""
|
|
151
|
+
output: NotRequired[str]
|
|
152
|
+
r"""Destination to send results to"""
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class InputCollection(BaseModel):
|
|
156
|
+
id: str
|
|
157
|
+
r"""Unique ID for this input"""
|
|
158
|
+
|
|
159
|
+
type: Annotated[
|
|
160
|
+
Optional[InputCollectionType], PlainValidator(validate_open_enum(False))
|
|
161
|
+
] = InputCollectionType.COLLECTION
|
|
162
|
+
|
|
163
|
+
disabled: Optional[bool] = False
|
|
164
|
+
|
|
165
|
+
pipeline: Optional[str] = None
|
|
166
|
+
r"""Pipeline to process results"""
|
|
167
|
+
|
|
168
|
+
send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
|
|
169
|
+
True
|
|
170
|
+
)
|
|
171
|
+
r"""Send events to normal routing and event processing. Disable to select a specific Pipeline/Destination combination."""
|
|
172
|
+
|
|
173
|
+
environment: Optional[str] = None
|
|
174
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
175
|
+
|
|
176
|
+
pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
|
|
177
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
178
|
+
|
|
179
|
+
streamtags: Optional[List[str]] = None
|
|
180
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
181
|
+
|
|
182
|
+
connections: Optional[List[InputCollectionConnection]] = None
|
|
183
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
184
|
+
|
|
185
|
+
pq: Optional[InputCollectionPq] = None
|
|
186
|
+
|
|
187
|
+
breaker_rulesets: Annotated[
|
|
188
|
+
Optional[List[str]], pydantic.Field(alias="breakerRulesets")
|
|
189
|
+
] = None
|
|
190
|
+
r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
|
|
191
|
+
|
|
192
|
+
stale_channel_flush_ms: Annotated[
|
|
193
|
+
Optional[float], pydantic.Field(alias="staleChannelFlushMs")
|
|
194
|
+
] = 10000
|
|
195
|
+
r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
|
|
196
|
+
|
|
197
|
+
preprocess: Optional[InputCollectionPreprocess] = None
|
|
198
|
+
|
|
199
|
+
throttle_rate_per_sec: Annotated[
|
|
200
|
+
Optional[str], pydantic.Field(alias="throttleRatePerSec")
|
|
201
|
+
] = "0"
|
|
202
|
+
r"""Rate (in bytes per second) to throttle while writing to an output. Accepts values with multiple-byte units, such as KB, MB, and GB. (Example: 42 MB) Default value of 0 specifies no throttling."""
|
|
203
|
+
|
|
204
|
+
metadata: Optional[List[InputCollectionMetadatum]] = None
|
|
205
|
+
r"""Fields to add to events from this input"""
|
|
206
|
+
|
|
207
|
+
output: Optional[str] = None
|
|
208
|
+
r"""Destination to send results to"""
|