cribl-control-plane 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/__init__.py +17 -0
- cribl_control_plane/_hooks/__init__.py +5 -0
- cribl_control_plane/_hooks/clientcredentials.py +211 -0
- cribl_control_plane/_hooks/registration.py +13 -0
- cribl_control_plane/_hooks/sdkhooks.py +81 -0
- cribl_control_plane/_hooks/types.py +112 -0
- cribl_control_plane/_version.py +15 -0
- cribl_control_plane/auth_sdk.py +184 -0
- cribl_control_plane/basesdk.py +358 -0
- cribl_control_plane/errors/__init__.py +60 -0
- cribl_control_plane/errors/apierror.py +38 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
- cribl_control_plane/errors/error.py +24 -0
- cribl_control_plane/errors/healthstatus_error.py +38 -0
- cribl_control_plane/errors/no_response_error.py +13 -0
- cribl_control_plane/errors/responsevalidationerror.py +25 -0
- cribl_control_plane/health.py +166 -0
- cribl_control_plane/httpclient.py +126 -0
- cribl_control_plane/models/__init__.py +7305 -0
- cribl_control_plane/models/addhectokenrequest.py +34 -0
- cribl_control_plane/models/authtoken.py +13 -0
- cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
- cribl_control_plane/models/createinputop.py +24 -0
- cribl_control_plane/models/createoutputop.py +24 -0
- cribl_control_plane/models/createoutputtestbyidop.py +46 -0
- cribl_control_plane/models/criblevent.py +14 -0
- cribl_control_plane/models/deleteinputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getinputbyidop.py +37 -0
- cribl_control_plane/models/getoutputbyidop.py +37 -0
- cribl_control_plane/models/getoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
- cribl_control_plane/models/healthstatus.py +36 -0
- cribl_control_plane/models/input.py +199 -0
- cribl_control_plane/models/inputappscope.py +448 -0
- cribl_control_plane/models/inputazureblob.py +308 -0
- cribl_control_plane/models/inputcollection.py +208 -0
- cribl_control_plane/models/inputconfluentcloud.py +585 -0
- cribl_control_plane/models/inputcribl.py +165 -0
- cribl_control_plane/models/inputcriblhttp.py +341 -0
- cribl_control_plane/models/inputcribllakehttp.py +342 -0
- cribl_control_plane/models/inputcriblmetrics.py +175 -0
- cribl_control_plane/models/inputcribltcp.py +299 -0
- cribl_control_plane/models/inputcrowdstrike.py +410 -0
- cribl_control_plane/models/inputdatadogagent.py +364 -0
- cribl_control_plane/models/inputdatagen.py +180 -0
- cribl_control_plane/models/inputedgeprometheus.py +551 -0
- cribl_control_plane/models/inputelastic.py +494 -0
- cribl_control_plane/models/inputeventhub.py +360 -0
- cribl_control_plane/models/inputexec.py +213 -0
- cribl_control_plane/models/inputfile.py +259 -0
- cribl_control_plane/models/inputfirehose.py +341 -0
- cribl_control_plane/models/inputgooglepubsub.py +247 -0
- cribl_control_plane/models/inputgrafana_union.py +1247 -0
- cribl_control_plane/models/inputhttp.py +403 -0
- cribl_control_plane/models/inputhttpraw.py +407 -0
- cribl_control_plane/models/inputjournalfiles.py +208 -0
- cribl_control_plane/models/inputkafka.py +581 -0
- cribl_control_plane/models/inputkinesis.py +363 -0
- cribl_control_plane/models/inputkubeevents.py +182 -0
- cribl_control_plane/models/inputkubelogs.py +256 -0
- cribl_control_plane/models/inputkubemetrics.py +233 -0
- cribl_control_plane/models/inputloki.py +468 -0
- cribl_control_plane/models/inputmetrics.py +290 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
- cribl_control_plane/models/inputmsk.py +654 -0
- cribl_control_plane/models/inputnetflow.py +224 -0
- cribl_control_plane/models/inputoffice365mgmt.py +384 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
- cribl_control_plane/models/inputoffice365service.py +377 -0
- cribl_control_plane/models/inputopentelemetry.py +516 -0
- cribl_control_plane/models/inputprometheus.py +464 -0
- cribl_control_plane/models/inputprometheusrw.py +470 -0
- cribl_control_plane/models/inputrawudp.py +207 -0
- cribl_control_plane/models/inputs3.py +416 -0
- cribl_control_plane/models/inputs3inventory.py +440 -0
- cribl_control_plane/models/inputsecuritylake.py +425 -0
- cribl_control_plane/models/inputsnmp.py +274 -0
- cribl_control_plane/models/inputsplunk.py +387 -0
- cribl_control_plane/models/inputsplunkhec.py +478 -0
- cribl_control_plane/models/inputsplunksearch.py +537 -0
- cribl_control_plane/models/inputsqs.py +320 -0
- cribl_control_plane/models/inputsyslog_union.py +759 -0
- cribl_control_plane/models/inputsystemmetrics.py +533 -0
- cribl_control_plane/models/inputsystemstate.py +417 -0
- cribl_control_plane/models/inputtcp.py +359 -0
- cribl_control_plane/models/inputtcpjson.py +334 -0
- cribl_control_plane/models/inputwef.py +498 -0
- cribl_control_plane/models/inputwindowsmetrics.py +457 -0
- cribl_control_plane/models/inputwineventlogs.py +222 -0
- cribl_control_plane/models/inputwiz.py +334 -0
- cribl_control_plane/models/inputzscalerhec.py +439 -0
- cribl_control_plane/models/listinputop.py +24 -0
- cribl_control_plane/models/listoutputop.py +24 -0
- cribl_control_plane/models/logininfo.py +16 -0
- cribl_control_plane/models/output.py +229 -0
- cribl_control_plane/models/outputazureblob.py +471 -0
- cribl_control_plane/models/outputazuredataexplorer.py +660 -0
- cribl_control_plane/models/outputazureeventhub.py +321 -0
- cribl_control_plane/models/outputazurelogs.py +386 -0
- cribl_control_plane/models/outputclickhouse.py +650 -0
- cribl_control_plane/models/outputcloudwatch.py +273 -0
- cribl_control_plane/models/outputconfluentcloud.py +591 -0
- cribl_control_plane/models/outputcriblhttp.py +494 -0
- cribl_control_plane/models/outputcribllake.py +396 -0
- cribl_control_plane/models/outputcribltcp.py +387 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
- cribl_control_plane/models/outputdatadog.py +472 -0
- cribl_control_plane/models/outputdataset.py +437 -0
- cribl_control_plane/models/outputdefault.py +55 -0
- cribl_control_plane/models/outputdevnull.py +50 -0
- cribl_control_plane/models/outputdiskspool.py +89 -0
- cribl_control_plane/models/outputdls3.py +560 -0
- cribl_control_plane/models/outputdynatracehttp.py +454 -0
- cribl_control_plane/models/outputdynatraceotlp.py +486 -0
- cribl_control_plane/models/outputelastic.py +494 -0
- cribl_control_plane/models/outputelasticcloud.py +407 -0
- cribl_control_plane/models/outputexabeam.py +297 -0
- cribl_control_plane/models/outputfilesystem.py +357 -0
- cribl_control_plane/models/outputgooglechronicle.py +486 -0
- cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
- cribl_control_plane/models/outputgooglepubsub.py +274 -0
- cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
- cribl_control_plane/models/outputgraphite.py +225 -0
- cribl_control_plane/models/outputhoneycomb.py +369 -0
- cribl_control_plane/models/outputhumiohec.py +389 -0
- cribl_control_plane/models/outputinfluxdb.py +523 -0
- cribl_control_plane/models/outputkafka.py +581 -0
- cribl_control_plane/models/outputkinesis.py +312 -0
- cribl_control_plane/models/outputloki.py +425 -0
- cribl_control_plane/models/outputminio.py +512 -0
- cribl_control_plane/models/outputmsk.py +654 -0
- cribl_control_plane/models/outputnetflow.py +80 -0
- cribl_control_plane/models/outputnewrelic.py +424 -0
- cribl_control_plane/models/outputnewrelicevents.py +401 -0
- cribl_control_plane/models/outputopentelemetry.py +669 -0
- cribl_control_plane/models/outputprometheus.py +485 -0
- cribl_control_plane/models/outputring.py +121 -0
- cribl_control_plane/models/outputrouter.py +83 -0
- cribl_control_plane/models/outputs3.py +556 -0
- cribl_control_plane/models/outputsamplesresponse.py +14 -0
- cribl_control_plane/models/outputsecuritylake.py +505 -0
- cribl_control_plane/models/outputsentinel.py +488 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
- cribl_control_plane/models/outputservicenow.py +543 -0
- cribl_control_plane/models/outputsignalfx.py +369 -0
- cribl_control_plane/models/outputsnmp.py +80 -0
- cribl_control_plane/models/outputsns.py +274 -0
- cribl_control_plane/models/outputsplunk.py +383 -0
- cribl_control_plane/models/outputsplunkhec.py +434 -0
- cribl_control_plane/models/outputsplunklb.py +558 -0
- cribl_control_plane/models/outputsqs.py +328 -0
- cribl_control_plane/models/outputstatsd.py +224 -0
- cribl_control_plane/models/outputstatsdext.py +225 -0
- cribl_control_plane/models/outputsumologic.py +378 -0
- cribl_control_plane/models/outputsyslog.py +415 -0
- cribl_control_plane/models/outputtcpjson.py +413 -0
- cribl_control_plane/models/outputtestrequest.py +15 -0
- cribl_control_plane/models/outputtestresponse.py +29 -0
- cribl_control_plane/models/outputwavefront.py +369 -0
- cribl_control_plane/models/outputwebhook.py +689 -0
- cribl_control_plane/models/outputxsiam.py +415 -0
- cribl_control_plane/models/schemeclientoauth.py +24 -0
- cribl_control_plane/models/security.py +36 -0
- cribl_control_plane/models/updatehectokenrequest.py +31 -0
- cribl_control_plane/models/updateinputbyidop.py +44 -0
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
- cribl_control_plane/models/updateoutputbyidop.py +44 -0
- cribl_control_plane/outputs.py +1615 -0
- cribl_control_plane/py.typed +1 -0
- cribl_control_plane/sdk.py +164 -0
- cribl_control_plane/sdkconfiguration.py +36 -0
- cribl_control_plane/sources.py +1355 -0
- cribl_control_plane/types/__init__.py +21 -0
- cribl_control_plane/types/basemodel.py +39 -0
- cribl_control_plane/utils/__init__.py +187 -0
- cribl_control_plane/utils/annotations.py +55 -0
- cribl_control_plane/utils/datetimes.py +23 -0
- cribl_control_plane/utils/enums.py +74 -0
- cribl_control_plane/utils/eventstreaming.py +238 -0
- cribl_control_plane/utils/forms.py +223 -0
- cribl_control_plane/utils/headers.py +136 -0
- cribl_control_plane/utils/logger.py +27 -0
- cribl_control_plane/utils/metadata.py +118 -0
- cribl_control_plane/utils/queryparams.py +205 -0
- cribl_control_plane/utils/requestbodies.py +66 -0
- cribl_control_plane/utils/retries.py +217 -0
- cribl_control_plane/utils/security.py +207 -0
- cribl_control_plane/utils/serializers.py +249 -0
- cribl_control_plane/utils/unmarshal_json_response.py +24 -0
- cribl_control_plane/utils/url.py +155 -0
- cribl_control_plane/utils/values.py +137 -0
- cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
- cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
- cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InputFileType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
FILE = "file"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InputFileConnectionTypedDict(TypedDict):
|
|
19
|
+
output: str
|
|
20
|
+
pipeline: NotRequired[str]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class InputFileConnection(BaseModel):
|
|
24
|
+
output: str
|
|
25
|
+
|
|
26
|
+
pipeline: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class InputFilePqMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
|
+
|
|
32
|
+
SMART = "smart"
|
|
33
|
+
ALWAYS = "always"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class InputFileCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
37
|
+
r"""Codec to use to compress the persisted data"""
|
|
38
|
+
|
|
39
|
+
NONE = "none"
|
|
40
|
+
GZIP = "gzip"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class InputFilePqTypedDict(TypedDict):
|
|
44
|
+
mode: NotRequired[InputFilePqMode]
|
|
45
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
46
|
+
max_buffer_size: NotRequired[float]
|
|
47
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
48
|
+
commit_frequency: NotRequired[float]
|
|
49
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
50
|
+
max_file_size: NotRequired[str]
|
|
51
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
52
|
+
max_size: NotRequired[str]
|
|
53
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
54
|
+
path: NotRequired[str]
|
|
55
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
56
|
+
compress: NotRequired[InputFileCompression]
|
|
57
|
+
r"""Codec to use to compress the persisted data"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class InputFilePq(BaseModel):
|
|
61
|
+
mode: Annotated[
|
|
62
|
+
Optional[InputFilePqMode], PlainValidator(validate_open_enum(False))
|
|
63
|
+
] = InputFilePqMode.ALWAYS
|
|
64
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
|
+
|
|
66
|
+
max_buffer_size: Annotated[
|
|
67
|
+
Optional[float], pydantic.Field(alias="maxBufferSize")
|
|
68
|
+
] = 1000
|
|
69
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
70
|
+
|
|
71
|
+
commit_frequency: Annotated[
|
|
72
|
+
Optional[float], pydantic.Field(alias="commitFrequency")
|
|
73
|
+
] = 42
|
|
74
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
75
|
+
|
|
76
|
+
max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
|
|
77
|
+
"1 MB"
|
|
78
|
+
)
|
|
79
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
80
|
+
|
|
81
|
+
max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
|
|
82
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
83
|
+
|
|
84
|
+
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
|
+
|
|
87
|
+
compress: Annotated[
|
|
88
|
+
Optional[InputFileCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
+
] = InputFileCompression.NONE
|
|
90
|
+
r"""Codec to use to compress the persisted data"""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class InputFileMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
94
|
+
r"""Choose how to discover files to monitor"""
|
|
95
|
+
|
|
96
|
+
AUTO = "auto"
|
|
97
|
+
MANUAL = "manual"
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class InputFileMetadatumTypedDict(TypedDict):
|
|
101
|
+
name: str
|
|
102
|
+
value: str
|
|
103
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class InputFileMetadatum(BaseModel):
|
|
107
|
+
name: str
|
|
108
|
+
|
|
109
|
+
value: str
|
|
110
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class InputFileTypedDict(TypedDict):
|
|
114
|
+
id: str
|
|
115
|
+
r"""Unique ID for this input"""
|
|
116
|
+
type: InputFileType
|
|
117
|
+
disabled: NotRequired[bool]
|
|
118
|
+
pipeline: NotRequired[str]
|
|
119
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
120
|
+
send_to_routes: NotRequired[bool]
|
|
121
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
122
|
+
environment: NotRequired[str]
|
|
123
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
124
|
+
pq_enabled: NotRequired[bool]
|
|
125
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
126
|
+
streamtags: NotRequired[List[str]]
|
|
127
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
128
|
+
connections: NotRequired[List[InputFileConnectionTypedDict]]
|
|
129
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
130
|
+
pq: NotRequired[InputFilePqTypedDict]
|
|
131
|
+
mode: NotRequired[InputFileMode]
|
|
132
|
+
r"""Choose how to discover files to monitor"""
|
|
133
|
+
interval: NotRequired[float]
|
|
134
|
+
r"""Time, in seconds, between scanning for files"""
|
|
135
|
+
filenames: NotRequired[List[str]]
|
|
136
|
+
r"""The full path of discovered files are matched against this wildcard list"""
|
|
137
|
+
tail_only: NotRequired[bool]
|
|
138
|
+
r"""Read only new entries at the end of all files discovered at next startup. @{product} will then read newly discovered files from the head. Disable this to resume reading all files from head."""
|
|
139
|
+
idle_timeout: NotRequired[float]
|
|
140
|
+
r"""Time, in seconds, before an idle file is closed"""
|
|
141
|
+
max_age_dur: NotRequired[str]
|
|
142
|
+
r"""The maximum age of files to monitor. Format examples: 60s, 4h, 3d, 1w. Age is relative to file modification time. Leave empty to apply no age filters."""
|
|
143
|
+
check_file_mod_time: NotRequired[bool]
|
|
144
|
+
r"""Skip files with modification times earlier than the maximum age duration"""
|
|
145
|
+
force_text: NotRequired[bool]
|
|
146
|
+
r"""Forces files containing binary data to be streamed as text"""
|
|
147
|
+
hash_len: NotRequired[float]
|
|
148
|
+
r"""Length of file header bytes to use in hash for unique file identification"""
|
|
149
|
+
metadata: NotRequired[List[InputFileMetadatumTypedDict]]
|
|
150
|
+
r"""Fields to add to events from this input"""
|
|
151
|
+
breaker_rulesets: NotRequired[List[str]]
|
|
152
|
+
r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
|
|
153
|
+
stale_channel_flush_ms: NotRequired[float]
|
|
154
|
+
r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
|
|
155
|
+
description: NotRequired[str]
|
|
156
|
+
path: NotRequired[str]
|
|
157
|
+
r"""Directory path to search for files. Environment variables will be resolved, e.g. $CRIBL_HOME/log/."""
|
|
158
|
+
depth: NotRequired[float]
|
|
159
|
+
r"""Set how many subdirectories deep to search. Use 0 to search only files in the given path, 1 to also look in its immediate subdirectories, etc. Leave it empty for unlimited depth."""
|
|
160
|
+
suppress_missing_path_errors: NotRequired[bool]
|
|
161
|
+
delete_files: NotRequired[bool]
|
|
162
|
+
r"""Delete files after they have been collected"""
|
|
163
|
+
include_unidentifiable_binary: NotRequired[bool]
|
|
164
|
+
r"""Stream binary files as Base64-encoded chunks."""
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class InputFile(BaseModel):
|
|
168
|
+
id: str
|
|
169
|
+
r"""Unique ID for this input"""
|
|
170
|
+
|
|
171
|
+
type: Annotated[InputFileType, PlainValidator(validate_open_enum(False))]
|
|
172
|
+
|
|
173
|
+
disabled: Optional[bool] = False
|
|
174
|
+
|
|
175
|
+
pipeline: Optional[str] = None
|
|
176
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
177
|
+
|
|
178
|
+
send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
|
|
179
|
+
True
|
|
180
|
+
)
|
|
181
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
182
|
+
|
|
183
|
+
environment: Optional[str] = None
|
|
184
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
185
|
+
|
|
186
|
+
pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
|
|
187
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
188
|
+
|
|
189
|
+
streamtags: Optional[List[str]] = None
|
|
190
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
191
|
+
|
|
192
|
+
connections: Optional[List[InputFileConnection]] = None
|
|
193
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
194
|
+
|
|
195
|
+
pq: Optional[InputFilePq] = None
|
|
196
|
+
|
|
197
|
+
mode: Annotated[
|
|
198
|
+
Optional[InputFileMode], PlainValidator(validate_open_enum(False))
|
|
199
|
+
] = InputFileMode.AUTO
|
|
200
|
+
r"""Choose how to discover files to monitor"""
|
|
201
|
+
|
|
202
|
+
interval: Optional[float] = 10
|
|
203
|
+
r"""Time, in seconds, between scanning for files"""
|
|
204
|
+
|
|
205
|
+
filenames: Optional[List[str]] = None
|
|
206
|
+
r"""The full path of discovered files are matched against this wildcard list"""
|
|
207
|
+
|
|
208
|
+
tail_only: Annotated[Optional[bool], pydantic.Field(alias="tailOnly")] = False
|
|
209
|
+
r"""Read only new entries at the end of all files discovered at next startup. @{product} will then read newly discovered files from the head. Disable this to resume reading all files from head."""
|
|
210
|
+
|
|
211
|
+
idle_timeout: Annotated[Optional[float], pydantic.Field(alias="idleTimeout")] = 300
|
|
212
|
+
r"""Time, in seconds, before an idle file is closed"""
|
|
213
|
+
|
|
214
|
+
max_age_dur: Annotated[Optional[str], pydantic.Field(alias="maxAgeDur")] = None
|
|
215
|
+
r"""The maximum age of files to monitor. Format examples: 60s, 4h, 3d, 1w. Age is relative to file modification time. Leave empty to apply no age filters."""
|
|
216
|
+
|
|
217
|
+
check_file_mod_time: Annotated[
|
|
218
|
+
Optional[bool], pydantic.Field(alias="checkFileModTime")
|
|
219
|
+
] = False
|
|
220
|
+
r"""Skip files with modification times earlier than the maximum age duration"""
|
|
221
|
+
|
|
222
|
+
force_text: Annotated[Optional[bool], pydantic.Field(alias="forceText")] = False
|
|
223
|
+
r"""Forces files containing binary data to be streamed as text"""
|
|
224
|
+
|
|
225
|
+
hash_len: Annotated[Optional[float], pydantic.Field(alias="hashLen")] = 256
|
|
226
|
+
r"""Length of file header bytes to use in hash for unique file identification"""
|
|
227
|
+
|
|
228
|
+
metadata: Optional[List[InputFileMetadatum]] = None
|
|
229
|
+
r"""Fields to add to events from this input"""
|
|
230
|
+
|
|
231
|
+
breaker_rulesets: Annotated[
|
|
232
|
+
Optional[List[str]], pydantic.Field(alias="breakerRulesets")
|
|
233
|
+
] = None
|
|
234
|
+
r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
|
|
235
|
+
|
|
236
|
+
stale_channel_flush_ms: Annotated[
|
|
237
|
+
Optional[float], pydantic.Field(alias="staleChannelFlushMs")
|
|
238
|
+
] = 10000
|
|
239
|
+
r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
|
|
240
|
+
|
|
241
|
+
description: Optional[str] = None
|
|
242
|
+
|
|
243
|
+
path: Optional[str] = None
|
|
244
|
+
r"""Directory path to search for files. Environment variables will be resolved, e.g. $CRIBL_HOME/log/."""
|
|
245
|
+
|
|
246
|
+
depth: Optional[float] = None
|
|
247
|
+
r"""Set how many subdirectories deep to search. Use 0 to search only files in the given path, 1 to also look in its immediate subdirectories, etc. Leave it empty for unlimited depth."""
|
|
248
|
+
|
|
249
|
+
suppress_missing_path_errors: Annotated[
|
|
250
|
+
Optional[bool], pydantic.Field(alias="suppressMissingPathErrors")
|
|
251
|
+
] = False
|
|
252
|
+
|
|
253
|
+
delete_files: Annotated[Optional[bool], pydantic.Field(alias="deleteFiles")] = False
|
|
254
|
+
r"""Delete files after they have been collected"""
|
|
255
|
+
|
|
256
|
+
include_unidentifiable_binary: Annotated[
|
|
257
|
+
Optional[bool], pydantic.Field(alias="includeUnidentifiableBinary")
|
|
258
|
+
] = False
|
|
259
|
+
r"""Stream binary files as Base64-encoded chunks."""
|
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import Any, List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InputFirehoseType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
FIREHOSE = "firehose"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InputFirehoseConnectionTypedDict(TypedDict):
|
|
19
|
+
output: str
|
|
20
|
+
pipeline: NotRequired[str]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class InputFirehoseConnection(BaseModel):
|
|
24
|
+
output: str
|
|
25
|
+
|
|
26
|
+
pipeline: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class InputFirehoseMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
31
|
+
|
|
32
|
+
SMART = "smart"
|
|
33
|
+
ALWAYS = "always"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class InputFirehoseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
37
|
+
r"""Codec to use to compress the persisted data"""
|
|
38
|
+
|
|
39
|
+
NONE = "none"
|
|
40
|
+
GZIP = "gzip"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class InputFirehosePqTypedDict(TypedDict):
|
|
44
|
+
mode: NotRequired[InputFirehoseMode]
|
|
45
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
46
|
+
max_buffer_size: NotRequired[float]
|
|
47
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
48
|
+
commit_frequency: NotRequired[float]
|
|
49
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
50
|
+
max_file_size: NotRequired[str]
|
|
51
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
52
|
+
max_size: NotRequired[str]
|
|
53
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
54
|
+
path: NotRequired[str]
|
|
55
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
56
|
+
compress: NotRequired[InputFirehoseCompression]
|
|
57
|
+
r"""Codec to use to compress the persisted data"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class InputFirehosePq(BaseModel):
|
|
61
|
+
mode: Annotated[
|
|
62
|
+
Optional[InputFirehoseMode], PlainValidator(validate_open_enum(False))
|
|
63
|
+
] = InputFirehoseMode.ALWAYS
|
|
64
|
+
r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
|
|
65
|
+
|
|
66
|
+
max_buffer_size: Annotated[
|
|
67
|
+
Optional[float], pydantic.Field(alias="maxBufferSize")
|
|
68
|
+
] = 1000
|
|
69
|
+
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
70
|
+
|
|
71
|
+
commit_frequency: Annotated[
|
|
72
|
+
Optional[float], pydantic.Field(alias="commitFrequency")
|
|
73
|
+
] = 42
|
|
74
|
+
r"""The number of events to send downstream before committing that Stream has read them"""
|
|
75
|
+
|
|
76
|
+
max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
|
|
77
|
+
"1 MB"
|
|
78
|
+
)
|
|
79
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
|
|
80
|
+
|
|
81
|
+
max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
|
|
82
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
83
|
+
|
|
84
|
+
path: Optional[str] = "$CRIBL_HOME/state/queues"
|
|
85
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
|
|
86
|
+
|
|
87
|
+
compress: Annotated[
|
|
88
|
+
Optional[InputFirehoseCompression], PlainValidator(validate_open_enum(False))
|
|
89
|
+
] = InputFirehoseCompression.NONE
|
|
90
|
+
r"""Codec to use to compress the persisted data"""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class InputFirehoseMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
94
|
+
TL_SV1 = "TLSv1"
|
|
95
|
+
TL_SV1_1 = "TLSv1.1"
|
|
96
|
+
TL_SV1_2 = "TLSv1.2"
|
|
97
|
+
TL_SV1_3 = "TLSv1.3"
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class InputFirehoseMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
101
|
+
TL_SV1 = "TLSv1"
|
|
102
|
+
TL_SV1_1 = "TLSv1.1"
|
|
103
|
+
TL_SV1_2 = "TLSv1.2"
|
|
104
|
+
TL_SV1_3 = "TLSv1.3"
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class InputFirehoseTLSSettingsServerSideTypedDict(TypedDict):
|
|
108
|
+
disabled: NotRequired[bool]
|
|
109
|
+
certificate_name: NotRequired[str]
|
|
110
|
+
r"""The name of the predefined certificate"""
|
|
111
|
+
priv_key_path: NotRequired[str]
|
|
112
|
+
r"""Path on server containing the private key to use. PEM format. Can reference $ENV_VARS."""
|
|
113
|
+
passphrase: NotRequired[str]
|
|
114
|
+
r"""Passphrase to use to decrypt private key"""
|
|
115
|
+
cert_path: NotRequired[str]
|
|
116
|
+
r"""Path on server containing certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
117
|
+
ca_path: NotRequired[str]
|
|
118
|
+
r"""Path on server containing CA certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
119
|
+
request_cert: NotRequired[bool]
|
|
120
|
+
r"""Require clients to present their certificates. Used to perform client authentication using SSL certs."""
|
|
121
|
+
reject_unauthorized: NotRequired[Any]
|
|
122
|
+
common_name_regex: NotRequired[Any]
|
|
123
|
+
min_version: NotRequired[InputFirehoseMinimumTLSVersion]
|
|
124
|
+
max_version: NotRequired[InputFirehoseMaximumTLSVersion]
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class InputFirehoseTLSSettingsServerSide(BaseModel):
|
|
128
|
+
disabled: Optional[bool] = True
|
|
129
|
+
|
|
130
|
+
certificate_name: Annotated[
|
|
131
|
+
Optional[str], pydantic.Field(alias="certificateName")
|
|
132
|
+
] = None
|
|
133
|
+
r"""The name of the predefined certificate"""
|
|
134
|
+
|
|
135
|
+
priv_key_path: Annotated[Optional[str], pydantic.Field(alias="privKeyPath")] = None
|
|
136
|
+
r"""Path on server containing the private key to use. PEM format. Can reference $ENV_VARS."""
|
|
137
|
+
|
|
138
|
+
passphrase: Optional[str] = None
|
|
139
|
+
r"""Passphrase to use to decrypt private key"""
|
|
140
|
+
|
|
141
|
+
cert_path: Annotated[Optional[str], pydantic.Field(alias="certPath")] = None
|
|
142
|
+
r"""Path on server containing certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
143
|
+
|
|
144
|
+
ca_path: Annotated[Optional[str], pydantic.Field(alias="caPath")] = None
|
|
145
|
+
r"""Path on server containing CA certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
146
|
+
|
|
147
|
+
request_cert: Annotated[Optional[bool], pydantic.Field(alias="requestCert")] = False
|
|
148
|
+
r"""Require clients to present their certificates. Used to perform client authentication using SSL certs."""
|
|
149
|
+
|
|
150
|
+
reject_unauthorized: Annotated[
|
|
151
|
+
Optional[Any], pydantic.Field(alias="rejectUnauthorized")
|
|
152
|
+
] = None
|
|
153
|
+
|
|
154
|
+
common_name_regex: Annotated[
|
|
155
|
+
Optional[Any], pydantic.Field(alias="commonNameRegex")
|
|
156
|
+
] = None
|
|
157
|
+
|
|
158
|
+
min_version: Annotated[
|
|
159
|
+
Annotated[
|
|
160
|
+
Optional[InputFirehoseMinimumTLSVersion],
|
|
161
|
+
PlainValidator(validate_open_enum(False)),
|
|
162
|
+
],
|
|
163
|
+
pydantic.Field(alias="minVersion"),
|
|
164
|
+
] = None
|
|
165
|
+
|
|
166
|
+
max_version: Annotated[
|
|
167
|
+
Annotated[
|
|
168
|
+
Optional[InputFirehoseMaximumTLSVersion],
|
|
169
|
+
PlainValidator(validate_open_enum(False)),
|
|
170
|
+
],
|
|
171
|
+
pydantic.Field(alias="maxVersion"),
|
|
172
|
+
] = None
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
class InputFirehoseMetadatumTypedDict(TypedDict):
|
|
176
|
+
name: str
|
|
177
|
+
value: str
|
|
178
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class InputFirehoseMetadatum(BaseModel):
|
|
182
|
+
name: str
|
|
183
|
+
|
|
184
|
+
value: str
|
|
185
|
+
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class InputFirehoseTypedDict(TypedDict):
|
|
189
|
+
type: InputFirehoseType
|
|
190
|
+
port: float
|
|
191
|
+
r"""Port to listen on"""
|
|
192
|
+
id: NotRequired[str]
|
|
193
|
+
r"""Unique ID for this input"""
|
|
194
|
+
disabled: NotRequired[bool]
|
|
195
|
+
pipeline: NotRequired[str]
|
|
196
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
197
|
+
send_to_routes: NotRequired[bool]
|
|
198
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
199
|
+
environment: NotRequired[str]
|
|
200
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
201
|
+
pq_enabled: NotRequired[bool]
|
|
202
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
203
|
+
streamtags: NotRequired[List[str]]
|
|
204
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
205
|
+
connections: NotRequired[List[InputFirehoseConnectionTypedDict]]
|
|
206
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
207
|
+
pq: NotRequired[InputFirehosePqTypedDict]
|
|
208
|
+
host: NotRequired[str]
|
|
209
|
+
r"""Address to bind on. Defaults to 0.0.0.0 (all addresses)."""
|
|
210
|
+
auth_tokens: NotRequired[List[str]]
|
|
211
|
+
r"""Shared secrets to be provided by any client (Authorization: <token>). If empty, unauthorized access is permitted."""
|
|
212
|
+
tls: NotRequired[InputFirehoseTLSSettingsServerSideTypedDict]
|
|
213
|
+
max_active_req: NotRequired[float]
|
|
214
|
+
r"""Maximum number of active requests allowed per Worker Process. Set to 0 for unlimited. Caution: Increasing the limit above the default value, or setting it to unlimited, may degrade performance and reduce throughput."""
|
|
215
|
+
max_requests_per_socket: NotRequired[int]
|
|
216
|
+
r"""Maximum number of requests per socket before @{product} instructs the client to close the connection. Default is 0 (unlimited)."""
|
|
217
|
+
enable_proxy_header: NotRequired[bool]
|
|
218
|
+
r"""Extract the client IP and port from PROXY protocol v1/v2. When enabled, the X-Forwarded-For header is ignored. Disable to use the X-Forwarded-For header for client IP extraction."""
|
|
219
|
+
capture_headers: NotRequired[bool]
|
|
220
|
+
r"""Add request headers to events, in the __headers field"""
|
|
221
|
+
activity_log_sample_rate: NotRequired[float]
|
|
222
|
+
r"""How often request activity is logged at the `info` level. A value of 1 would log every request, 10 every 10th request, etc."""
|
|
223
|
+
request_timeout: NotRequired[float]
|
|
224
|
+
r"""How long to wait for an incoming request to complete before aborting it. Use 0 to disable."""
|
|
225
|
+
socket_timeout: NotRequired[float]
|
|
226
|
+
r"""How long @{product} should wait before assuming that an inactive socket has timed out. To wait forever, set to 0."""
|
|
227
|
+
keep_alive_timeout: NotRequired[float]
|
|
228
|
+
r"""After the last response is sent, @{product} will wait this long for additional data before closing the socket connection. Minimum 1 second, maximum 600 seconds (10 minutes)."""
|
|
229
|
+
enable_health_check: NotRequired[bool]
|
|
230
|
+
r"""Expose the /cribl_health endpoint, which returns 200 OK when this Source is healthy"""
|
|
231
|
+
ip_allowlist_regex: NotRequired[str]
|
|
232
|
+
r"""Messages from matched IP addresses will be processed, unless also matched by the denylist"""
|
|
233
|
+
ip_denylist_regex: NotRequired[str]
|
|
234
|
+
r"""Messages from matched IP addresses will be ignored. This takes precedence over the allowlist."""
|
|
235
|
+
metadata: NotRequired[List[InputFirehoseMetadatumTypedDict]]
|
|
236
|
+
r"""Fields to add to events from this input"""
|
|
237
|
+
description: NotRequired[str]
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
class InputFirehose(BaseModel):
|
|
241
|
+
type: Annotated[InputFirehoseType, PlainValidator(validate_open_enum(False))]
|
|
242
|
+
|
|
243
|
+
port: float
|
|
244
|
+
r"""Port to listen on"""
|
|
245
|
+
|
|
246
|
+
id: Optional[str] = None
|
|
247
|
+
r"""Unique ID for this input"""
|
|
248
|
+
|
|
249
|
+
disabled: Optional[bool] = False
|
|
250
|
+
|
|
251
|
+
pipeline: Optional[str] = None
|
|
252
|
+
r"""Pipeline to process data from this Source before sending it through the Routes"""
|
|
253
|
+
|
|
254
|
+
send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
|
|
255
|
+
True
|
|
256
|
+
)
|
|
257
|
+
r"""Select whether to send data to Routes, or directly to Destinations."""
|
|
258
|
+
|
|
259
|
+
environment: Optional[str] = None
|
|
260
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
261
|
+
|
|
262
|
+
pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
|
|
263
|
+
r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
|
|
264
|
+
|
|
265
|
+
streamtags: Optional[List[str]] = None
|
|
266
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
267
|
+
|
|
268
|
+
connections: Optional[List[InputFirehoseConnection]] = None
|
|
269
|
+
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
270
|
+
|
|
271
|
+
pq: Optional[InputFirehosePq] = None
|
|
272
|
+
|
|
273
|
+
host: Optional[str] = "0.0.0.0"
|
|
274
|
+
r"""Address to bind on. Defaults to 0.0.0.0 (all addresses)."""
|
|
275
|
+
|
|
276
|
+
auth_tokens: Annotated[Optional[List[str]], pydantic.Field(alias="authTokens")] = (
|
|
277
|
+
None
|
|
278
|
+
)
|
|
279
|
+
r"""Shared secrets to be provided by any client (Authorization: <token>). If empty, unauthorized access is permitted."""
|
|
280
|
+
|
|
281
|
+
tls: Optional[InputFirehoseTLSSettingsServerSide] = None
|
|
282
|
+
|
|
283
|
+
max_active_req: Annotated[Optional[float], pydantic.Field(alias="maxActiveReq")] = (
|
|
284
|
+
256
|
|
285
|
+
)
|
|
286
|
+
r"""Maximum number of active requests allowed per Worker Process. Set to 0 for unlimited. Caution: Increasing the limit above the default value, or setting it to unlimited, may degrade performance and reduce throughput."""
|
|
287
|
+
|
|
288
|
+
max_requests_per_socket: Annotated[
|
|
289
|
+
Optional[int], pydantic.Field(alias="maxRequestsPerSocket")
|
|
290
|
+
] = 0
|
|
291
|
+
r"""Maximum number of requests per socket before @{product} instructs the client to close the connection. Default is 0 (unlimited)."""
|
|
292
|
+
|
|
293
|
+
enable_proxy_header: Annotated[
|
|
294
|
+
Optional[bool], pydantic.Field(alias="enableProxyHeader")
|
|
295
|
+
] = False
|
|
296
|
+
r"""Extract the client IP and port from PROXY protocol v1/v2. When enabled, the X-Forwarded-For header is ignored. Disable to use the X-Forwarded-For header for client IP extraction."""
|
|
297
|
+
|
|
298
|
+
capture_headers: Annotated[
|
|
299
|
+
Optional[bool], pydantic.Field(alias="captureHeaders")
|
|
300
|
+
] = False
|
|
301
|
+
r"""Add request headers to events, in the __headers field"""
|
|
302
|
+
|
|
303
|
+
activity_log_sample_rate: Annotated[
|
|
304
|
+
Optional[float], pydantic.Field(alias="activityLogSampleRate")
|
|
305
|
+
] = 100
|
|
306
|
+
r"""How often request activity is logged at the `info` level. A value of 1 would log every request, 10 every 10th request, etc."""
|
|
307
|
+
|
|
308
|
+
request_timeout: Annotated[
|
|
309
|
+
Optional[float], pydantic.Field(alias="requestTimeout")
|
|
310
|
+
] = 0
|
|
311
|
+
r"""How long to wait for an incoming request to complete before aborting it. Use 0 to disable."""
|
|
312
|
+
|
|
313
|
+
socket_timeout: Annotated[
|
|
314
|
+
Optional[float], pydantic.Field(alias="socketTimeout")
|
|
315
|
+
] = 0
|
|
316
|
+
r"""How long @{product} should wait before assuming that an inactive socket has timed out. To wait forever, set to 0."""
|
|
317
|
+
|
|
318
|
+
keep_alive_timeout: Annotated[
|
|
319
|
+
Optional[float], pydantic.Field(alias="keepAliveTimeout")
|
|
320
|
+
] = 5
|
|
321
|
+
r"""After the last response is sent, @{product} will wait this long for additional data before closing the socket connection. Minimum 1 second, maximum 600 seconds (10 minutes)."""
|
|
322
|
+
|
|
323
|
+
enable_health_check: Annotated[
|
|
324
|
+
Optional[bool], pydantic.Field(alias="enableHealthCheck")
|
|
325
|
+
] = False
|
|
326
|
+
r"""Expose the /cribl_health endpoint, which returns 200 OK when this Source is healthy"""
|
|
327
|
+
|
|
328
|
+
ip_allowlist_regex: Annotated[
|
|
329
|
+
Optional[str], pydantic.Field(alias="ipAllowlistRegex")
|
|
330
|
+
] = "/.*/"
|
|
331
|
+
r"""Messages from matched IP addresses will be processed, unless also matched by the denylist"""
|
|
332
|
+
|
|
333
|
+
ip_denylist_regex: Annotated[
|
|
334
|
+
Optional[str], pydantic.Field(alias="ipDenylistRegex")
|
|
335
|
+
] = "/^$/"
|
|
336
|
+
r"""Messages from matched IP addresses will be ignored. This takes precedence over the allowlist."""
|
|
337
|
+
|
|
338
|
+
metadata: Optional[List[InputFirehoseMetadatum]] = None
|
|
339
|
+
r"""Fields to add to events from this input"""
|
|
340
|
+
|
|
341
|
+
description: Optional[str] = None
|