cribl-control-plane 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/__init__.py +17 -0
- cribl_control_plane/_hooks/__init__.py +5 -0
- cribl_control_plane/_hooks/clientcredentials.py +211 -0
- cribl_control_plane/_hooks/registration.py +13 -0
- cribl_control_plane/_hooks/sdkhooks.py +81 -0
- cribl_control_plane/_hooks/types.py +112 -0
- cribl_control_plane/_version.py +15 -0
- cribl_control_plane/auth_sdk.py +184 -0
- cribl_control_plane/basesdk.py +358 -0
- cribl_control_plane/errors/__init__.py +60 -0
- cribl_control_plane/errors/apierror.py +38 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
- cribl_control_plane/errors/error.py +24 -0
- cribl_control_plane/errors/healthstatus_error.py +38 -0
- cribl_control_plane/errors/no_response_error.py +13 -0
- cribl_control_plane/errors/responsevalidationerror.py +25 -0
- cribl_control_plane/health.py +166 -0
- cribl_control_plane/httpclient.py +126 -0
- cribl_control_plane/models/__init__.py +7305 -0
- cribl_control_plane/models/addhectokenrequest.py +34 -0
- cribl_control_plane/models/authtoken.py +13 -0
- cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
- cribl_control_plane/models/createinputop.py +24 -0
- cribl_control_plane/models/createoutputop.py +24 -0
- cribl_control_plane/models/createoutputtestbyidop.py +46 -0
- cribl_control_plane/models/criblevent.py +14 -0
- cribl_control_plane/models/deleteinputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getinputbyidop.py +37 -0
- cribl_control_plane/models/getoutputbyidop.py +37 -0
- cribl_control_plane/models/getoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
- cribl_control_plane/models/healthstatus.py +36 -0
- cribl_control_plane/models/input.py +199 -0
- cribl_control_plane/models/inputappscope.py +448 -0
- cribl_control_plane/models/inputazureblob.py +308 -0
- cribl_control_plane/models/inputcollection.py +208 -0
- cribl_control_plane/models/inputconfluentcloud.py +585 -0
- cribl_control_plane/models/inputcribl.py +165 -0
- cribl_control_plane/models/inputcriblhttp.py +341 -0
- cribl_control_plane/models/inputcribllakehttp.py +342 -0
- cribl_control_plane/models/inputcriblmetrics.py +175 -0
- cribl_control_plane/models/inputcribltcp.py +299 -0
- cribl_control_plane/models/inputcrowdstrike.py +410 -0
- cribl_control_plane/models/inputdatadogagent.py +364 -0
- cribl_control_plane/models/inputdatagen.py +180 -0
- cribl_control_plane/models/inputedgeprometheus.py +551 -0
- cribl_control_plane/models/inputelastic.py +494 -0
- cribl_control_plane/models/inputeventhub.py +360 -0
- cribl_control_plane/models/inputexec.py +213 -0
- cribl_control_plane/models/inputfile.py +259 -0
- cribl_control_plane/models/inputfirehose.py +341 -0
- cribl_control_plane/models/inputgooglepubsub.py +247 -0
- cribl_control_plane/models/inputgrafana_union.py +1247 -0
- cribl_control_plane/models/inputhttp.py +403 -0
- cribl_control_plane/models/inputhttpraw.py +407 -0
- cribl_control_plane/models/inputjournalfiles.py +208 -0
- cribl_control_plane/models/inputkafka.py +581 -0
- cribl_control_plane/models/inputkinesis.py +363 -0
- cribl_control_plane/models/inputkubeevents.py +182 -0
- cribl_control_plane/models/inputkubelogs.py +256 -0
- cribl_control_plane/models/inputkubemetrics.py +233 -0
- cribl_control_plane/models/inputloki.py +468 -0
- cribl_control_plane/models/inputmetrics.py +290 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
- cribl_control_plane/models/inputmsk.py +654 -0
- cribl_control_plane/models/inputnetflow.py +224 -0
- cribl_control_plane/models/inputoffice365mgmt.py +384 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
- cribl_control_plane/models/inputoffice365service.py +377 -0
- cribl_control_plane/models/inputopentelemetry.py +516 -0
- cribl_control_plane/models/inputprometheus.py +464 -0
- cribl_control_plane/models/inputprometheusrw.py +470 -0
- cribl_control_plane/models/inputrawudp.py +207 -0
- cribl_control_plane/models/inputs3.py +416 -0
- cribl_control_plane/models/inputs3inventory.py +440 -0
- cribl_control_plane/models/inputsecuritylake.py +425 -0
- cribl_control_plane/models/inputsnmp.py +274 -0
- cribl_control_plane/models/inputsplunk.py +387 -0
- cribl_control_plane/models/inputsplunkhec.py +478 -0
- cribl_control_plane/models/inputsplunksearch.py +537 -0
- cribl_control_plane/models/inputsqs.py +320 -0
- cribl_control_plane/models/inputsyslog_union.py +759 -0
- cribl_control_plane/models/inputsystemmetrics.py +533 -0
- cribl_control_plane/models/inputsystemstate.py +417 -0
- cribl_control_plane/models/inputtcp.py +359 -0
- cribl_control_plane/models/inputtcpjson.py +334 -0
- cribl_control_plane/models/inputwef.py +498 -0
- cribl_control_plane/models/inputwindowsmetrics.py +457 -0
- cribl_control_plane/models/inputwineventlogs.py +222 -0
- cribl_control_plane/models/inputwiz.py +334 -0
- cribl_control_plane/models/inputzscalerhec.py +439 -0
- cribl_control_plane/models/listinputop.py +24 -0
- cribl_control_plane/models/listoutputop.py +24 -0
- cribl_control_plane/models/logininfo.py +16 -0
- cribl_control_plane/models/output.py +229 -0
- cribl_control_plane/models/outputazureblob.py +471 -0
- cribl_control_plane/models/outputazuredataexplorer.py +660 -0
- cribl_control_plane/models/outputazureeventhub.py +321 -0
- cribl_control_plane/models/outputazurelogs.py +386 -0
- cribl_control_plane/models/outputclickhouse.py +650 -0
- cribl_control_plane/models/outputcloudwatch.py +273 -0
- cribl_control_plane/models/outputconfluentcloud.py +591 -0
- cribl_control_plane/models/outputcriblhttp.py +494 -0
- cribl_control_plane/models/outputcribllake.py +396 -0
- cribl_control_plane/models/outputcribltcp.py +387 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
- cribl_control_plane/models/outputdatadog.py +472 -0
- cribl_control_plane/models/outputdataset.py +437 -0
- cribl_control_plane/models/outputdefault.py +55 -0
- cribl_control_plane/models/outputdevnull.py +50 -0
- cribl_control_plane/models/outputdiskspool.py +89 -0
- cribl_control_plane/models/outputdls3.py +560 -0
- cribl_control_plane/models/outputdynatracehttp.py +454 -0
- cribl_control_plane/models/outputdynatraceotlp.py +486 -0
- cribl_control_plane/models/outputelastic.py +494 -0
- cribl_control_plane/models/outputelasticcloud.py +407 -0
- cribl_control_plane/models/outputexabeam.py +297 -0
- cribl_control_plane/models/outputfilesystem.py +357 -0
- cribl_control_plane/models/outputgooglechronicle.py +486 -0
- cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
- cribl_control_plane/models/outputgooglepubsub.py +274 -0
- cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
- cribl_control_plane/models/outputgraphite.py +225 -0
- cribl_control_plane/models/outputhoneycomb.py +369 -0
- cribl_control_plane/models/outputhumiohec.py +389 -0
- cribl_control_plane/models/outputinfluxdb.py +523 -0
- cribl_control_plane/models/outputkafka.py +581 -0
- cribl_control_plane/models/outputkinesis.py +312 -0
- cribl_control_plane/models/outputloki.py +425 -0
- cribl_control_plane/models/outputminio.py +512 -0
- cribl_control_plane/models/outputmsk.py +654 -0
- cribl_control_plane/models/outputnetflow.py +80 -0
- cribl_control_plane/models/outputnewrelic.py +424 -0
- cribl_control_plane/models/outputnewrelicevents.py +401 -0
- cribl_control_plane/models/outputopentelemetry.py +669 -0
- cribl_control_plane/models/outputprometheus.py +485 -0
- cribl_control_plane/models/outputring.py +121 -0
- cribl_control_plane/models/outputrouter.py +83 -0
- cribl_control_plane/models/outputs3.py +556 -0
- cribl_control_plane/models/outputsamplesresponse.py +14 -0
- cribl_control_plane/models/outputsecuritylake.py +505 -0
- cribl_control_plane/models/outputsentinel.py +488 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
- cribl_control_plane/models/outputservicenow.py +543 -0
- cribl_control_plane/models/outputsignalfx.py +369 -0
- cribl_control_plane/models/outputsnmp.py +80 -0
- cribl_control_plane/models/outputsns.py +274 -0
- cribl_control_plane/models/outputsplunk.py +383 -0
- cribl_control_plane/models/outputsplunkhec.py +434 -0
- cribl_control_plane/models/outputsplunklb.py +558 -0
- cribl_control_plane/models/outputsqs.py +328 -0
- cribl_control_plane/models/outputstatsd.py +224 -0
- cribl_control_plane/models/outputstatsdext.py +225 -0
- cribl_control_plane/models/outputsumologic.py +378 -0
- cribl_control_plane/models/outputsyslog.py +415 -0
- cribl_control_plane/models/outputtcpjson.py +413 -0
- cribl_control_plane/models/outputtestrequest.py +15 -0
- cribl_control_plane/models/outputtestresponse.py +29 -0
- cribl_control_plane/models/outputwavefront.py +369 -0
- cribl_control_plane/models/outputwebhook.py +689 -0
- cribl_control_plane/models/outputxsiam.py +415 -0
- cribl_control_plane/models/schemeclientoauth.py +24 -0
- cribl_control_plane/models/security.py +36 -0
- cribl_control_plane/models/updatehectokenrequest.py +31 -0
- cribl_control_plane/models/updateinputbyidop.py +44 -0
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
- cribl_control_plane/models/updateoutputbyidop.py +44 -0
- cribl_control_plane/outputs.py +1615 -0
- cribl_control_plane/py.typed +1 -0
- cribl_control_plane/sdk.py +164 -0
- cribl_control_plane/sdkconfiguration.py +36 -0
- cribl_control_plane/sources.py +1355 -0
- cribl_control_plane/types/__init__.py +21 -0
- cribl_control_plane/types/basemodel.py +39 -0
- cribl_control_plane/utils/__init__.py +187 -0
- cribl_control_plane/utils/annotations.py +55 -0
- cribl_control_plane/utils/datetimes.py +23 -0
- cribl_control_plane/utils/enums.py +74 -0
- cribl_control_plane/utils/eventstreaming.py +238 -0
- cribl_control_plane/utils/forms.py +223 -0
- cribl_control_plane/utils/headers.py +136 -0
- cribl_control_plane/utils/logger.py +27 -0
- cribl_control_plane/utils/metadata.py +118 -0
- cribl_control_plane/utils/queryparams.py +205 -0
- cribl_control_plane/utils/requestbodies.py +66 -0
- cribl_control_plane/utils/retries.py +217 -0
- cribl_control_plane/utils/security.py +207 -0
- cribl_control_plane/utils/serializers.py +249 -0
- cribl_control_plane/utils/unmarshal_json_response.py +24 -0
- cribl_control_plane/utils/url.py +155 -0
- cribl_control_plane/utils/values.py +137 -0
- cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
- cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
- cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,440 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InputS3InventoryType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
S3_INVENTORY = "s3_inventory"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InputS3InventoryConnectionTypedDict(TypedDict):
|
|
19
|
+
output: str
|
|
20
|
+
pipeline: NotRequired[str]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class InputS3InventoryConnection(BaseModel):
|
|
24
|
+
output: str
|
|
25
|
+
|
|
26
|
+
pipeline: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class InputS3InventoryMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
|
+
|
|
32
|
+
SMART = "smart"
|
|
33
|
+
ALWAYS = "always"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class InputS3InventoryCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
37
|
+
r"""Codec to use to compress the persisted data"""
|
|
38
|
+
|
|
39
|
+
NONE = "none"
|
|
40
|
+
GZIP = "gzip"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class InputS3InventoryPqTypedDict(TypedDict):
|
|
44
|
+
mode: NotRequired[InputS3InventoryMode]
|
|
45
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
46
|
+
max_buffer_size: NotRequired[float]
|
|
47
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
48
|
+
commit_frequency: NotRequired[float]
|
|
49
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
50
|
+
max_file_size: NotRequired[str]
|
|
51
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
52
|
+
max_size: NotRequired[str]
|
|
53
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
54
|
+
path: NotRequired[str]
|
|
55
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
56
|
+
compress: NotRequired[InputS3InventoryCompression]
|
|
57
|
+
r"""Codec to use to compress the persisted data"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class InputS3InventoryPq(BaseModel):
|
|
61
|
+
mode: Annotated[
|
|
62
|
+
Optional[InputS3InventoryMode], PlainValidator(validate_open_enum(False))
|
|
63
|
+
] = InputS3InventoryMode.ALWAYS
|
|
64
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
|
+
|
|
66
|
+
max_buffer_size: Annotated[
|
|
67
|
+
Optional[float], pydantic.Field(alias="maxBufferSize")
|
|
68
|
+
] = 1000
|
|
69
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
70
|
+
|
|
71
|
+
commit_frequency: Annotated[
|
|
72
|
+
Optional[float], pydantic.Field(alias="commitFrequency")
|
|
73
|
+
] = 42
|
|
74
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
75
|
+
|
|
76
|
+
max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
|
|
77
|
+
"1 MB"
|
|
78
|
+
)
|
|
79
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
80
|
+
|
|
81
|
+
max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
|
|
82
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
83
|
+
|
|
84
|
+
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
|
+
|
|
87
|
+
compress: Annotated[
|
|
88
|
+
Optional[InputS3InventoryCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
+
] = InputS3InventoryCompression.NONE
|
|
90
|
+
r"""Codec to use to compress the persisted data"""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class InputS3InventoryAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
94
|
+
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
95
|
+
|
|
96
|
+
AUTO = "auto"
|
|
97
|
+
MANUAL = "manual"
|
|
98
|
+
SECRET = "secret"
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class InputS3InventorySignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
102
|
+
r"""Signature version to use for signing S3 requests"""
|
|
103
|
+
|
|
104
|
+
V2 = "v2"
|
|
105
|
+
V4 = "v4"
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class InputS3InventoryPreprocessTypedDict(TypedDict):
|
|
109
|
+
disabled: NotRequired[bool]
|
|
110
|
+
command: NotRequired[str]
|
|
111
|
+
r"""Command to feed the data through (via stdin) and process its output (stdout)"""
|
|
112
|
+
args: NotRequired[List[str]]
|
|
113
|
+
r"""Arguments to be added to the custom command"""
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class InputS3InventoryPreprocess(BaseModel):
|
|
117
|
+
disabled: Optional[bool] = True
|
|
118
|
+
|
|
119
|
+
command: Optional[str] = None
|
|
120
|
+
r"""Command to feed the data through (via stdin) and process its output (stdout)"""
|
|
121
|
+
|
|
122
|
+
args: Optional[List[str]] = None
|
|
123
|
+
r"""Arguments to be added to the custom command"""
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class InputS3InventoryMetadatumTypedDict(TypedDict):
|
|
127
|
+
name: str
|
|
128
|
+
value: str
|
|
129
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
class InputS3InventoryMetadatum(BaseModel):
|
|
133
|
+
name: str
|
|
134
|
+
|
|
135
|
+
value: str
|
|
136
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class InputS3InventoryCheckpointingTypedDict(TypedDict):
|
|
140
|
+
enabled: NotRequired[bool]
|
|
141
|
+
r"""Resume processing files after an interruption"""
|
|
142
|
+
retries: NotRequired[float]
|
|
143
|
+
r"""The number of times to retry processing when a processing error occurs. If Skip file on error is enabled, this setting is ignored."""
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class InputS3InventoryCheckpointing(BaseModel):
|
|
147
|
+
enabled: Optional[bool] = False
|
|
148
|
+
r"""Resume processing files after an interruption"""
|
|
149
|
+
|
|
150
|
+
retries: Optional[float] = 5
|
|
151
|
+
r"""The number of times to retry processing when a processing error occurs. If Skip file on error is enabled, this setting is ignored."""
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class InputS3InventoryTagAfterProcessing(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
155
|
+
FALSE = "false"
|
|
156
|
+
TRUE = "true"
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
class InputS3InventoryTypedDict(TypedDict):
|
|
160
|
+
type: InputS3InventoryType
|
|
161
|
+
queue_name: str
|
|
162
|
+
r"""The name, URL, or ARN of the SQS queue to read notifications from. When a non-AWS URL is specified, format must be: '{url}/myQueueName'. Example: 'https://host:port/myQueueName'. Value must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `https://host:port/myQueue-${C.vars.myVar}`."""
|
|
163
|
+
id: NotRequired[str]
|
|
164
|
+
r"""Unique ID for this input"""
|
|
165
|
+
disabled: NotRequired[bool]
|
|
166
|
+
pipeline: NotRequired[str]
|
|
167
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
168
|
+
send_to_routes: NotRequired[bool]
|
|
169
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
170
|
+
environment: NotRequired[str]
|
|
171
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
172
|
+
pq_enabled: NotRequired[bool]
|
|
173
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
174
|
+
streamtags: NotRequired[List[str]]
|
|
175
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
176
|
+
connections: NotRequired[List[InputS3InventoryConnectionTypedDict]]
|
|
177
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
178
|
+
pq: NotRequired[InputS3InventoryPqTypedDict]
|
|
179
|
+
file_filter: NotRequired[str]
|
|
180
|
+
r"""Regex matching file names to download and process. Defaults to: .*"""
|
|
181
|
+
aws_account_id: NotRequired[str]
|
|
182
|
+
r"""SQS queue owner's AWS account ID. Leave empty if SQS queue is in same AWS account."""
|
|
183
|
+
aws_authentication_method: NotRequired[InputS3InventoryAuthenticationMethod]
|
|
184
|
+
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
185
|
+
aws_secret_key: NotRequired[str]
|
|
186
|
+
region: NotRequired[str]
|
|
187
|
+
r"""AWS Region where the S3 bucket and SQS queue are located. Required, unless the Queue entry is a URL or ARN that includes a Region."""
|
|
188
|
+
endpoint: NotRequired[str]
|
|
189
|
+
r"""S3 service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to S3-compatible endpoint."""
|
|
190
|
+
signature_version: NotRequired[InputS3InventorySignatureVersion]
|
|
191
|
+
r"""Signature version to use for signing S3 requests"""
|
|
192
|
+
reuse_connections: NotRequired[bool]
|
|
193
|
+
r"""Reuse connections between requests, which can improve performance"""
|
|
194
|
+
reject_unauthorized: NotRequired[bool]
|
|
195
|
+
r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
|
|
196
|
+
breaker_rulesets: NotRequired[List[str]]
|
|
197
|
+
r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
|
|
198
|
+
stale_channel_flush_ms: NotRequired[float]
|
|
199
|
+
r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
|
|
200
|
+
max_messages: NotRequired[float]
|
|
201
|
+
r"""The maximum number of messages SQS should return in a poll request. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 10."""
|
|
202
|
+
visibility_timeout: NotRequired[float]
|
|
203
|
+
r"""After messages are retrieved by a ReceiveMessage request, @{product} will hide them from subsequent retrieve requests for at least this duration. You can set this as high as 43200 sec. (12 hours)."""
|
|
204
|
+
num_receivers: NotRequired[float]
|
|
205
|
+
r"""How many receiver processes to run. The higher the number, the better the throughput - at the expense of CPU overhead."""
|
|
206
|
+
socket_timeout: NotRequired[float]
|
|
207
|
+
r"""Socket inactivity timeout (in seconds). Increase this value if timeouts occur due to backpressure."""
|
|
208
|
+
skip_on_error: NotRequired[bool]
|
|
209
|
+
r"""Skip files that trigger a processing error. Disabled by default, which allows retries after processing errors."""
|
|
210
|
+
enable_assume_role: NotRequired[bool]
|
|
211
|
+
r"""Use Assume Role credentials to access Amazon S3"""
|
|
212
|
+
assume_role_arn: NotRequired[str]
|
|
213
|
+
r"""Amazon Resource Name (ARN) of the role to assume"""
|
|
214
|
+
assume_role_external_id: NotRequired[str]
|
|
215
|
+
r"""External ID to use when assuming role"""
|
|
216
|
+
duration_seconds: NotRequired[float]
|
|
217
|
+
r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
|
|
218
|
+
enable_sqs_assume_role: NotRequired[bool]
|
|
219
|
+
r"""Use Assume Role credentials when accessing Amazon SQS"""
|
|
220
|
+
preprocess: NotRequired[InputS3InventoryPreprocessTypedDict]
|
|
221
|
+
metadata: NotRequired[List[InputS3InventoryMetadatumTypedDict]]
|
|
222
|
+
r"""Fields to add to events from this input"""
|
|
223
|
+
parquet_chunk_size_mb: NotRequired[float]
|
|
224
|
+
r"""Maximum file size for each Parquet chunk"""
|
|
225
|
+
parquet_chunk_download_timeout: NotRequired[float]
|
|
226
|
+
r"""The maximum time allowed for downloading a Parquet chunk. Processing will stop if a chunk cannot be downloaded within the time specified."""
|
|
227
|
+
checkpointing: NotRequired[InputS3InventoryCheckpointingTypedDict]
|
|
228
|
+
poll_timeout: NotRequired[float]
|
|
229
|
+
r"""How long to wait for events before trying polling again. The lower the number the higher the AWS bill. The higher the number the longer it will take for the source to react to configuration changes and system restarts."""
|
|
230
|
+
checksum_suffix: NotRequired[str]
|
|
231
|
+
r"""Filename suffix of the manifest checksum file. If a filename matching this suffix is received in the queue, the matching manifest file will be downloaded and validated against its value. Defaults to \"checksum\" """
|
|
232
|
+
max_manifest_size_kb: NotRequired[int]
|
|
233
|
+
r"""Maximum download size (KB) of each manifest or checksum file. Manifest files larger than this size will not be read. Defaults to 4096."""
|
|
234
|
+
validate_inventory_files: NotRequired[bool]
|
|
235
|
+
r"""If set to Yes, each inventory file in the manifest will be validated against its checksum. Defaults to false"""
|
|
236
|
+
description: NotRequired[str]
|
|
237
|
+
aws_api_key: NotRequired[str]
|
|
238
|
+
aws_secret: NotRequired[str]
|
|
239
|
+
r"""Select or create a stored secret that references your access key and secret key"""
|
|
240
|
+
tag_after_processing: NotRequired[InputS3InventoryTagAfterProcessing]
|
|
241
|
+
processed_tag_key: NotRequired[str]
|
|
242
|
+
r"""The key for the S3 object tag applied after processing. This field accepts an expression for dynamic generation."""
|
|
243
|
+
processed_tag_value: NotRequired[str]
|
|
244
|
+
r"""The value for the S3 object tag applied after processing. This field accepts an expression for dynamic generation."""
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
class InputS3Inventory(BaseModel):
|
|
248
|
+
type: Annotated[InputS3InventoryType, PlainValidator(validate_open_enum(False))]
|
|
249
|
+
|
|
250
|
+
queue_name: Annotated[str, pydantic.Field(alias="queueName")]
|
|
251
|
+
r"""The name, URL, or ARN of the SQS queue to read notifications from. When a non-AWS URL is specified, format must be: '{url}/myQueueName'. Example: 'https://host:port/myQueueName'. Value must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `https://host:port/myQueue-${C.vars.myVar}`."""
|
|
252
|
+
|
|
253
|
+
id: Optional[str] = None
|
|
254
|
+
r"""Unique ID for this input"""
|
|
255
|
+
|
|
256
|
+
disabled: Optional[bool] = False
|
|
257
|
+
|
|
258
|
+
pipeline: Optional[str] = None
|
|
259
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
260
|
+
|
|
261
|
+
send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
|
|
262
|
+
True
|
|
263
|
+
)
|
|
264
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
265
|
+
|
|
266
|
+
environment: Optional[str] = None
|
|
267
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
268
|
+
|
|
269
|
+
pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
|
|
270
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
271
|
+
|
|
272
|
+
streamtags: Optional[List[str]] = None
|
|
273
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
274
|
+
|
|
275
|
+
connections: Optional[List[InputS3InventoryConnection]] = None
|
|
276
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
277
|
+
|
|
278
|
+
pq: Optional[InputS3InventoryPq] = None
|
|
279
|
+
|
|
280
|
+
file_filter: Annotated[Optional[str], pydantic.Field(alias="fileFilter")] = "/.*/"
|
|
281
|
+
r"""Regex matching file names to download and process. Defaults to: .*"""
|
|
282
|
+
|
|
283
|
+
aws_account_id: Annotated[Optional[str], pydantic.Field(alias="awsAccountId")] = (
|
|
284
|
+
None
|
|
285
|
+
)
|
|
286
|
+
r"""SQS queue owner's AWS account ID. Leave empty if SQS queue is in same AWS account."""
|
|
287
|
+
|
|
288
|
+
aws_authentication_method: Annotated[
|
|
289
|
+
Annotated[
|
|
290
|
+
Optional[InputS3InventoryAuthenticationMethod],
|
|
291
|
+
PlainValidator(validate_open_enum(False)),
|
|
292
|
+
],
|
|
293
|
+
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
294
|
+
] = InputS3InventoryAuthenticationMethod.AUTO
|
|
295
|
+
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
296
|
+
|
|
297
|
+
aws_secret_key: Annotated[Optional[str], pydantic.Field(alias="awsSecretKey")] = (
|
|
298
|
+
None
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
region: Optional[str] = None
|
|
302
|
+
r"""AWS Region where the S3 bucket and SQS queue are located. Required, unless the Queue entry is a URL or ARN that includes a Region."""
|
|
303
|
+
|
|
304
|
+
endpoint: Optional[str] = None
|
|
305
|
+
r"""S3 service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to S3-compatible endpoint."""
|
|
306
|
+
|
|
307
|
+
signature_version: Annotated[
|
|
308
|
+
Annotated[
|
|
309
|
+
Optional[InputS3InventorySignatureVersion],
|
|
310
|
+
PlainValidator(validate_open_enum(False)),
|
|
311
|
+
],
|
|
312
|
+
pydantic.Field(alias="signatureVersion"),
|
|
313
|
+
] = InputS3InventorySignatureVersion.V4
|
|
314
|
+
r"""Signature version to use for signing S3 requests"""
|
|
315
|
+
|
|
316
|
+
reuse_connections: Annotated[
|
|
317
|
+
Optional[bool], pydantic.Field(alias="reuseConnections")
|
|
318
|
+
] = True
|
|
319
|
+
r"""Reuse connections between requests, which can improve performance"""
|
|
320
|
+
|
|
321
|
+
reject_unauthorized: Annotated[
|
|
322
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
323
|
+
] = True
|
|
324
|
+
r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
|
|
325
|
+
|
|
326
|
+
breaker_rulesets: Annotated[
|
|
327
|
+
Optional[List[str]], pydantic.Field(alias="breakerRulesets")
|
|
328
|
+
] = None
|
|
329
|
+
r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
|
|
330
|
+
|
|
331
|
+
stale_channel_flush_ms: Annotated[
|
|
332
|
+
Optional[float], pydantic.Field(alias="staleChannelFlushMs")
|
|
333
|
+
] = 10000
|
|
334
|
+
r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
|
|
335
|
+
|
|
336
|
+
max_messages: Annotated[Optional[float], pydantic.Field(alias="maxMessages")] = 1
|
|
337
|
+
r"""The maximum number of messages SQS should return in a poll request. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 10."""
|
|
338
|
+
|
|
339
|
+
visibility_timeout: Annotated[
|
|
340
|
+
Optional[float], pydantic.Field(alias="visibilityTimeout")
|
|
341
|
+
] = 600
|
|
342
|
+
r"""After messages are retrieved by a ReceiveMessage request, @{product} will hide them from subsequent retrieve requests for at least this duration. You can set this as high as 43200 sec. (12 hours)."""
|
|
343
|
+
|
|
344
|
+
num_receivers: Annotated[Optional[float], pydantic.Field(alias="numReceivers")] = 1
|
|
345
|
+
r"""How many receiver processes to run. The higher the number, the better the throughput - at the expense of CPU overhead."""
|
|
346
|
+
|
|
347
|
+
socket_timeout: Annotated[
|
|
348
|
+
Optional[float], pydantic.Field(alias="socketTimeout")
|
|
349
|
+
] = 300
|
|
350
|
+
r"""Socket inactivity timeout (in seconds). Increase this value if timeouts occur due to backpressure."""
|
|
351
|
+
|
|
352
|
+
skip_on_error: Annotated[Optional[bool], pydantic.Field(alias="skipOnError")] = (
|
|
353
|
+
False
|
|
354
|
+
)
|
|
355
|
+
r"""Skip files that trigger a processing error. Disabled by default, which allows retries after processing errors."""
|
|
356
|
+
|
|
357
|
+
enable_assume_role: Annotated[
|
|
358
|
+
Optional[bool], pydantic.Field(alias="enableAssumeRole")
|
|
359
|
+
] = True
|
|
360
|
+
r"""Use Assume Role credentials to access Amazon S3"""
|
|
361
|
+
|
|
362
|
+
assume_role_arn: Annotated[Optional[str], pydantic.Field(alias="assumeRoleArn")] = (
|
|
363
|
+
None
|
|
364
|
+
)
|
|
365
|
+
r"""Amazon Resource Name (ARN) of the role to assume"""
|
|
366
|
+
|
|
367
|
+
assume_role_external_id: Annotated[
|
|
368
|
+
Optional[str], pydantic.Field(alias="assumeRoleExternalId")
|
|
369
|
+
] = None
|
|
370
|
+
r"""External ID to use when assuming role"""
|
|
371
|
+
|
|
372
|
+
duration_seconds: Annotated[
|
|
373
|
+
Optional[float], pydantic.Field(alias="durationSeconds")
|
|
374
|
+
] = 3600
|
|
375
|
+
r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
|
|
376
|
+
|
|
377
|
+
enable_sqs_assume_role: Annotated[
|
|
378
|
+
Optional[bool], pydantic.Field(alias="enableSQSAssumeRole")
|
|
379
|
+
] = False
|
|
380
|
+
r"""Use Assume Role credentials when accessing Amazon SQS"""
|
|
381
|
+
|
|
382
|
+
preprocess: Optional[InputS3InventoryPreprocess] = None
|
|
383
|
+
|
|
384
|
+
metadata: Optional[List[InputS3InventoryMetadatum]] = None
|
|
385
|
+
r"""Fields to add to events from this input"""
|
|
386
|
+
|
|
387
|
+
parquet_chunk_size_mb: Annotated[
|
|
388
|
+
Optional[float], pydantic.Field(alias="parquetChunkSizeMB")
|
|
389
|
+
] = 5
|
|
390
|
+
r"""Maximum file size for each Parquet chunk"""
|
|
391
|
+
|
|
392
|
+
parquet_chunk_download_timeout: Annotated[
|
|
393
|
+
Optional[float], pydantic.Field(alias="parquetChunkDownloadTimeout")
|
|
394
|
+
] = 600
|
|
395
|
+
r"""The maximum time allowed for downloading a Parquet chunk. Processing will stop if a chunk cannot be downloaded within the time specified."""
|
|
396
|
+
|
|
397
|
+
checkpointing: Optional[InputS3InventoryCheckpointing] = None
|
|
398
|
+
|
|
399
|
+
poll_timeout: Annotated[Optional[float], pydantic.Field(alias="pollTimeout")] = 10
|
|
400
|
+
r"""How long to wait for events before trying polling again. The lower the number the higher the AWS bill. The higher the number the longer it will take for the source to react to configuration changes and system restarts."""
|
|
401
|
+
|
|
402
|
+
checksum_suffix: Annotated[
|
|
403
|
+
Optional[str], pydantic.Field(alias="checksumSuffix")
|
|
404
|
+
] = "checksum"
|
|
405
|
+
r"""Filename suffix of the manifest checksum file. If a filename matching this suffix is received in the queue, the matching manifest file will be downloaded and validated against its value. Defaults to \"checksum\" """
|
|
406
|
+
|
|
407
|
+
max_manifest_size_kb: Annotated[
|
|
408
|
+
Optional[int], pydantic.Field(alias="maxManifestSizeKB")
|
|
409
|
+
] = 4096
|
|
410
|
+
r"""Maximum download size (KB) of each manifest or checksum file. Manifest files larger than this size will not be read. Defaults to 4096."""
|
|
411
|
+
|
|
412
|
+
validate_inventory_files: Annotated[
|
|
413
|
+
Optional[bool], pydantic.Field(alias="validateInventoryFiles")
|
|
414
|
+
] = False
|
|
415
|
+
r"""If set to Yes, each inventory file in the manifest will be validated against its checksum. Defaults to false"""
|
|
416
|
+
|
|
417
|
+
description: Optional[str] = None
|
|
418
|
+
|
|
419
|
+
aws_api_key: Annotated[Optional[str], pydantic.Field(alias="awsApiKey")] = None
|
|
420
|
+
|
|
421
|
+
aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
|
|
422
|
+
r"""Select or create a stored secret that references your access key and secret key"""
|
|
423
|
+
|
|
424
|
+
tag_after_processing: Annotated[
|
|
425
|
+
Annotated[
|
|
426
|
+
Optional[InputS3InventoryTagAfterProcessing],
|
|
427
|
+
PlainValidator(validate_open_enum(False)),
|
|
428
|
+
],
|
|
429
|
+
pydantic.Field(alias="tagAfterProcessing"),
|
|
430
|
+
] = None
|
|
431
|
+
|
|
432
|
+
processed_tag_key: Annotated[
|
|
433
|
+
Optional[str], pydantic.Field(alias="processedTagKey")
|
|
434
|
+
] = None
|
|
435
|
+
r"""The key for the S3 object tag applied after processing. This field accepts an expression for dynamic generation."""
|
|
436
|
+
|
|
437
|
+
processed_tag_value: Annotated[
|
|
438
|
+
Optional[str], pydantic.Field(alias="processedTagValue")
|
|
439
|
+
] = None
|
|
440
|
+
r"""The value for the S3 object tag applied after processing. This field accepts an expression for dynamic generation."""
|