cribl-control-plane 0.1.0b2__py3-none-any.whl → 0.2.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_hooks/clientcredentials.py +91 -41
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/errors/apierror.py +1 -1
- cribl_control_plane/errors/criblcontrolplaneerror.py +1 -1
- cribl_control_plane/errors/error.py +1 -1
- cribl_control_plane/errors/healthstatus_error.py +1 -1
- cribl_control_plane/errors/no_response_error.py +1 -1
- cribl_control_plane/errors/responsevalidationerror.py +1 -1
- cribl_control_plane/groups_sdk.py +4 -4
- cribl_control_plane/httpclient.py +0 -1
- cribl_control_plane/lakedatasets.py +12 -12
- cribl_control_plane/models/__init__.py +106 -42
- cribl_control_plane/models/appmode.py +14 -0
- cribl_control_plane/models/configgroup.py +2 -17
- cribl_control_plane/models/cribllakedatasetupdate.py +81 -0
- cribl_control_plane/models/gitinfo.py +14 -3
- cribl_control_plane/models/hbcriblinfo.py +3 -14
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/inputconfluentcloud.py +18 -0
- cribl_control_plane/models/inputkafka.py +17 -0
- cribl_control_plane/models/inputmsk.py +17 -0
- cribl_control_plane/models/inputsqs.py +8 -10
- cribl_control_plane/models/nodeprovidedinfo.py +0 -3
- cribl_control_plane/models/output.py +25 -25
- cribl_control_plane/models/outputchronicle.py +431 -0
- cribl_control_plane/models/outputconfluentcloud.py +18 -0
- cribl_control_plane/models/outputgooglechronicle.py +5 -4
- cribl_control_plane/models/outputgooglecloudlogging.py +9 -4
- cribl_control_plane/models/outputkafka.py +17 -0
- cribl_control_plane/models/outputmsk.py +17 -0
- cribl_control_plane/models/outputsqs.py +8 -10
- cribl_control_plane/models/routecloneconf.py +13 -0
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/updatecribllakedatasetbylakeidandidop.py +9 -5
- {cribl_control_plane-0.1.0b2.dist-info → cribl_control_plane-0.2.0a1.dist-info}/METADATA +1 -8
- {cribl_control_plane-0.1.0b2.dist-info → cribl_control_plane-0.2.0a1.dist-info}/RECORD +37 -34
- cribl_control_plane/models/outputdatabricks.py +0 -282
- {cribl_control_plane-0.1.0b2.dist-info → cribl_control_plane-0.2.0a1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OutputChronicleType(str, Enum):
|
|
15
|
+
CHRONICLE = "chronicle"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutputChronicleAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
|
+
SERVICE_ACCOUNT = "serviceAccount"
|
|
20
|
+
SERVICE_ACCOUNT_SECRET = "serviceAccountSecret"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class OutputChronicleResponseRetrySettingTypedDict(TypedDict):
|
|
24
|
+
http_status: float
|
|
25
|
+
r"""The HTTP response status code that will trigger retries"""
|
|
26
|
+
initial_backoff: NotRequired[float]
|
|
27
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
28
|
+
backoff_rate: NotRequired[float]
|
|
29
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
30
|
+
max_backoff: NotRequired[float]
|
|
31
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class OutputChronicleResponseRetrySetting(BaseModel):
|
|
35
|
+
http_status: Annotated[float, pydantic.Field(alias="httpStatus")]
|
|
36
|
+
r"""The HTTP response status code that will trigger retries"""
|
|
37
|
+
|
|
38
|
+
initial_backoff: Annotated[
|
|
39
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
40
|
+
] = 1000
|
|
41
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
42
|
+
|
|
43
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
44
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
45
|
+
|
|
46
|
+
max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
|
|
47
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class OutputChronicleTimeoutRetrySettingsTypedDict(TypedDict):
|
|
51
|
+
timeout_retry: NotRequired[bool]
|
|
52
|
+
initial_backoff: NotRequired[float]
|
|
53
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
54
|
+
backoff_rate: NotRequired[float]
|
|
55
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
56
|
+
max_backoff: NotRequired[float]
|
|
57
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class OutputChronicleTimeoutRetrySettings(BaseModel):
|
|
61
|
+
timeout_retry: Annotated[Optional[bool], pydantic.Field(alias="timeoutRetry")] = (
|
|
62
|
+
False
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
initial_backoff: Annotated[
|
|
66
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
67
|
+
] = 1000
|
|
68
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
69
|
+
|
|
70
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
71
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
72
|
+
|
|
73
|
+
max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
|
|
74
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class OutputChronicleExtraHTTPHeaderTypedDict(TypedDict):
|
|
78
|
+
value: str
|
|
79
|
+
name: NotRequired[str]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class OutputChronicleExtraHTTPHeader(BaseModel):
|
|
83
|
+
value: str
|
|
84
|
+
|
|
85
|
+
name: Optional[str] = None
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class OutputChronicleFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
89
|
+
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
90
|
+
|
|
91
|
+
PAYLOAD = "payload"
|
|
92
|
+
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
93
|
+
NONE = "none"
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class OutputChronicleBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
97
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
98
|
+
|
|
99
|
+
BLOCK = "block"
|
|
100
|
+
DROP = "drop"
|
|
101
|
+
QUEUE = "queue"
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class OutputChronicleCustomLabelTypedDict(TypedDict):
|
|
105
|
+
key: str
|
|
106
|
+
value: str
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class OutputChronicleCustomLabel(BaseModel):
|
|
110
|
+
key: str
|
|
111
|
+
|
|
112
|
+
value: str
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class OutputChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
116
|
+
r"""Codec to use to compress the persisted data"""
|
|
117
|
+
|
|
118
|
+
NONE = "none"
|
|
119
|
+
GZIP = "gzip"
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class OutputChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
123
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
124
|
+
|
|
125
|
+
BLOCK = "block"
|
|
126
|
+
DROP = "drop"
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
130
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
131
|
+
|
|
132
|
+
ERROR = "error"
|
|
133
|
+
BACKPRESSURE = "backpressure"
|
|
134
|
+
ALWAYS = "always"
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class OutputChroniclePqControlsTypedDict(TypedDict):
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class OutputChroniclePqControls(BaseModel):
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class OutputChronicleTypedDict(TypedDict):
|
|
146
|
+
type: OutputChronicleType
|
|
147
|
+
region: str
|
|
148
|
+
r"""Regional endpoint to send events to"""
|
|
149
|
+
log_type: str
|
|
150
|
+
r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
|
|
151
|
+
gcp_project_id: str
|
|
152
|
+
r"""The Google Cloud Platform (GCP) project ID to send events to"""
|
|
153
|
+
gcp_instance: str
|
|
154
|
+
r"""The Google Cloud Platform (GCP) instance to send events to. This is the Chronicle customer uuid."""
|
|
155
|
+
id: NotRequired[str]
|
|
156
|
+
r"""Unique ID for this output"""
|
|
157
|
+
pipeline: NotRequired[str]
|
|
158
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
159
|
+
system_fields: NotRequired[List[str]]
|
|
160
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
161
|
+
environment: NotRequired[str]
|
|
162
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
163
|
+
streamtags: NotRequired[List[str]]
|
|
164
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
165
|
+
api_version: NotRequired[str]
|
|
166
|
+
authentication_method: NotRequired[OutputChronicleAuthenticationMethod]
|
|
167
|
+
response_retry_settings: NotRequired[
|
|
168
|
+
List[OutputChronicleResponseRetrySettingTypedDict]
|
|
169
|
+
]
|
|
170
|
+
r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
|
|
171
|
+
timeout_retry_settings: NotRequired[OutputChronicleTimeoutRetrySettingsTypedDict]
|
|
172
|
+
response_honor_retry_after_header: NotRequired[bool]
|
|
173
|
+
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
174
|
+
concurrency: NotRequired[float]
|
|
175
|
+
r"""Maximum number of ongoing requests before blocking"""
|
|
176
|
+
max_payload_size_kb: NotRequired[float]
|
|
177
|
+
r"""Maximum size, in KB, of the request body"""
|
|
178
|
+
max_payload_events: NotRequired[float]
|
|
179
|
+
r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
|
|
180
|
+
compress: NotRequired[bool]
|
|
181
|
+
r"""Compress the payload body before sending"""
|
|
182
|
+
reject_unauthorized: NotRequired[bool]
|
|
183
|
+
r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
|
|
184
|
+
Enabled by default. When this setting is also present in TLS Settings (Client Side),
|
|
185
|
+
that value will take precedence.
|
|
186
|
+
"""
|
|
187
|
+
timeout_sec: NotRequired[float]
|
|
188
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
189
|
+
flush_period_sec: NotRequired[float]
|
|
190
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
191
|
+
extra_http_headers: NotRequired[List[OutputChronicleExtraHTTPHeaderTypedDict]]
|
|
192
|
+
r"""Headers to add to all events"""
|
|
193
|
+
failed_request_logging_mode: NotRequired[OutputChronicleFailedRequestLoggingMode]
|
|
194
|
+
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
195
|
+
safe_headers: NotRequired[List[str]]
|
|
196
|
+
r"""List of headers that are safe to log in plain text"""
|
|
197
|
+
use_round_robin_dns: NotRequired[bool]
|
|
198
|
+
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
|
|
199
|
+
on_backpressure: NotRequired[OutputChronicleBackpressureBehavior]
|
|
200
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
201
|
+
total_memory_limit_kb: NotRequired[float]
|
|
202
|
+
r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
|
|
203
|
+
ingestion_method: NotRequired[str]
|
|
204
|
+
namespace: NotRequired[str]
|
|
205
|
+
r"""User-configured environment namespace to identify the data domain the logs originated from. This namespace is used as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
|
|
206
|
+
log_text_field: NotRequired[str]
|
|
207
|
+
r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
|
|
208
|
+
custom_labels: NotRequired[List[OutputChronicleCustomLabelTypedDict]]
|
|
209
|
+
r"""Custom labels to be added to every event"""
|
|
210
|
+
description: NotRequired[str]
|
|
211
|
+
service_account_credentials: NotRequired[str]
|
|
212
|
+
r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
|
|
213
|
+
service_account_credentials_secret: NotRequired[str]
|
|
214
|
+
r"""Select or create a stored text secret"""
|
|
215
|
+
pq_max_file_size: NotRequired[str]
|
|
216
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
217
|
+
pq_max_size: NotRequired[str]
|
|
218
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
219
|
+
pq_path: NotRequired[str]
|
|
220
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
221
|
+
pq_compress: NotRequired[OutputChronicleCompression]
|
|
222
|
+
r"""Codec to use to compress the persisted data"""
|
|
223
|
+
pq_on_backpressure: NotRequired[OutputChronicleQueueFullBehavior]
|
|
224
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
225
|
+
pq_mode: NotRequired[OutputChronicleMode]
|
|
226
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
227
|
+
pq_controls: NotRequired[OutputChroniclePqControlsTypedDict]
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
class OutputChronicle(BaseModel):
|
|
231
|
+
type: OutputChronicleType
|
|
232
|
+
|
|
233
|
+
region: str
|
|
234
|
+
r"""Regional endpoint to send events to"""
|
|
235
|
+
|
|
236
|
+
log_type: Annotated[str, pydantic.Field(alias="logType")]
|
|
237
|
+
r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
|
|
238
|
+
|
|
239
|
+
gcp_project_id: Annotated[str, pydantic.Field(alias="gcpProjectId")]
|
|
240
|
+
r"""The Google Cloud Platform (GCP) project ID to send events to"""
|
|
241
|
+
|
|
242
|
+
gcp_instance: Annotated[str, pydantic.Field(alias="gcpInstance")]
|
|
243
|
+
r"""The Google Cloud Platform (GCP) instance to send events to. This is the Chronicle customer uuid."""
|
|
244
|
+
|
|
245
|
+
id: Optional[str] = None
|
|
246
|
+
r"""Unique ID for this output"""
|
|
247
|
+
|
|
248
|
+
pipeline: Optional[str] = None
|
|
249
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
250
|
+
|
|
251
|
+
system_fields: Annotated[
|
|
252
|
+
Optional[List[str]], pydantic.Field(alias="systemFields")
|
|
253
|
+
] = None
|
|
254
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
255
|
+
|
|
256
|
+
environment: Optional[str] = None
|
|
257
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
258
|
+
|
|
259
|
+
streamtags: Optional[List[str]] = None
|
|
260
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
261
|
+
|
|
262
|
+
api_version: Annotated[Optional[str], pydantic.Field(alias="apiVersion")] = (
|
|
263
|
+
"v1alpha"
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
authentication_method: Annotated[
|
|
267
|
+
Annotated[
|
|
268
|
+
Optional[OutputChronicleAuthenticationMethod],
|
|
269
|
+
PlainValidator(validate_open_enum(False)),
|
|
270
|
+
],
|
|
271
|
+
pydantic.Field(alias="authenticationMethod"),
|
|
272
|
+
] = OutputChronicleAuthenticationMethod.SERVICE_ACCOUNT
|
|
273
|
+
|
|
274
|
+
response_retry_settings: Annotated[
|
|
275
|
+
Optional[List[OutputChronicleResponseRetrySetting]],
|
|
276
|
+
pydantic.Field(alias="responseRetrySettings"),
|
|
277
|
+
] = None
|
|
278
|
+
r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
|
|
279
|
+
|
|
280
|
+
timeout_retry_settings: Annotated[
|
|
281
|
+
Optional[OutputChronicleTimeoutRetrySettings],
|
|
282
|
+
pydantic.Field(alias="timeoutRetrySettings"),
|
|
283
|
+
] = None
|
|
284
|
+
|
|
285
|
+
response_honor_retry_after_header: Annotated[
|
|
286
|
+
Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
|
|
287
|
+
] = True
|
|
288
|
+
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
289
|
+
|
|
290
|
+
concurrency: Optional[float] = 5
|
|
291
|
+
r"""Maximum number of ongoing requests before blocking"""
|
|
292
|
+
|
|
293
|
+
max_payload_size_kb: Annotated[
|
|
294
|
+
Optional[float], pydantic.Field(alias="maxPayloadSizeKB")
|
|
295
|
+
] = 1024
|
|
296
|
+
r"""Maximum size, in KB, of the request body"""
|
|
297
|
+
|
|
298
|
+
max_payload_events: Annotated[
|
|
299
|
+
Optional[float], pydantic.Field(alias="maxPayloadEvents")
|
|
300
|
+
] = 0
|
|
301
|
+
r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
|
|
302
|
+
|
|
303
|
+
compress: Optional[bool] = True
|
|
304
|
+
r"""Compress the payload body before sending"""
|
|
305
|
+
|
|
306
|
+
reject_unauthorized: Annotated[
|
|
307
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
308
|
+
] = True
|
|
309
|
+
r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
|
|
310
|
+
Enabled by default. When this setting is also present in TLS Settings (Client Side),
|
|
311
|
+
that value will take precedence.
|
|
312
|
+
"""
|
|
313
|
+
|
|
314
|
+
timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 90
|
|
315
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
316
|
+
|
|
317
|
+
flush_period_sec: Annotated[
|
|
318
|
+
Optional[float], pydantic.Field(alias="flushPeriodSec")
|
|
319
|
+
] = 1
|
|
320
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
321
|
+
|
|
322
|
+
extra_http_headers: Annotated[
|
|
323
|
+
Optional[List[OutputChronicleExtraHTTPHeader]],
|
|
324
|
+
pydantic.Field(alias="extraHttpHeaders"),
|
|
325
|
+
] = None
|
|
326
|
+
r"""Headers to add to all events"""
|
|
327
|
+
|
|
328
|
+
failed_request_logging_mode: Annotated[
|
|
329
|
+
Annotated[
|
|
330
|
+
Optional[OutputChronicleFailedRequestLoggingMode],
|
|
331
|
+
PlainValidator(validate_open_enum(False)),
|
|
332
|
+
],
|
|
333
|
+
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
334
|
+
] = OutputChronicleFailedRequestLoggingMode.NONE
|
|
335
|
+
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
336
|
+
|
|
337
|
+
safe_headers: Annotated[
|
|
338
|
+
Optional[List[str]], pydantic.Field(alias="safeHeaders")
|
|
339
|
+
] = None
|
|
340
|
+
r"""List of headers that are safe to log in plain text"""
|
|
341
|
+
|
|
342
|
+
use_round_robin_dns: Annotated[
|
|
343
|
+
Optional[bool], pydantic.Field(alias="useRoundRobinDns")
|
|
344
|
+
] = False
|
|
345
|
+
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
|
|
346
|
+
|
|
347
|
+
on_backpressure: Annotated[
|
|
348
|
+
Annotated[
|
|
349
|
+
Optional[OutputChronicleBackpressureBehavior],
|
|
350
|
+
PlainValidator(validate_open_enum(False)),
|
|
351
|
+
],
|
|
352
|
+
pydantic.Field(alias="onBackpressure"),
|
|
353
|
+
] = OutputChronicleBackpressureBehavior.BLOCK
|
|
354
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
355
|
+
|
|
356
|
+
total_memory_limit_kb: Annotated[
|
|
357
|
+
Optional[float], pydantic.Field(alias="totalMemoryLimitKB")
|
|
358
|
+
] = None
|
|
359
|
+
r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
|
|
360
|
+
|
|
361
|
+
ingestion_method: Annotated[
|
|
362
|
+
Optional[str], pydantic.Field(alias="ingestionMethod")
|
|
363
|
+
] = "ImportLogs"
|
|
364
|
+
|
|
365
|
+
namespace: Optional[str] = None
|
|
366
|
+
r"""User-configured environment namespace to identify the data domain the logs originated from. This namespace is used as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
|
|
367
|
+
|
|
368
|
+
log_text_field: Annotated[Optional[str], pydantic.Field(alias="logTextField")] = (
|
|
369
|
+
None
|
|
370
|
+
)
|
|
371
|
+
r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
|
|
372
|
+
|
|
373
|
+
custom_labels: Annotated[
|
|
374
|
+
Optional[List[OutputChronicleCustomLabel]], pydantic.Field(alias="customLabels")
|
|
375
|
+
] = None
|
|
376
|
+
r"""Custom labels to be added to every event"""
|
|
377
|
+
|
|
378
|
+
description: Optional[str] = None
|
|
379
|
+
|
|
380
|
+
service_account_credentials: Annotated[
|
|
381
|
+
Optional[str], pydantic.Field(alias="serviceAccountCredentials")
|
|
382
|
+
] = None
|
|
383
|
+
r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
|
|
384
|
+
|
|
385
|
+
service_account_credentials_secret: Annotated[
|
|
386
|
+
Optional[str], pydantic.Field(alias="serviceAccountCredentialsSecret")
|
|
387
|
+
] = None
|
|
388
|
+
r"""Select or create a stored text secret"""
|
|
389
|
+
|
|
390
|
+
pq_max_file_size: Annotated[
|
|
391
|
+
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
392
|
+
] = "1 MB"
|
|
393
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
394
|
+
|
|
395
|
+
pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
|
|
396
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
397
|
+
|
|
398
|
+
pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
|
|
399
|
+
"$CRIBL_HOME/state/queues"
|
|
400
|
+
)
|
|
401
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
402
|
+
|
|
403
|
+
pq_compress: Annotated[
|
|
404
|
+
Annotated[
|
|
405
|
+
Optional[OutputChronicleCompression],
|
|
406
|
+
PlainValidator(validate_open_enum(False)),
|
|
407
|
+
],
|
|
408
|
+
pydantic.Field(alias="pqCompress"),
|
|
409
|
+
] = OutputChronicleCompression.NONE
|
|
410
|
+
r"""Codec to use to compress the persisted data"""
|
|
411
|
+
|
|
412
|
+
pq_on_backpressure: Annotated[
|
|
413
|
+
Annotated[
|
|
414
|
+
Optional[OutputChronicleQueueFullBehavior],
|
|
415
|
+
PlainValidator(validate_open_enum(False)),
|
|
416
|
+
],
|
|
417
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
418
|
+
] = OutputChronicleQueueFullBehavior.BLOCK
|
|
419
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
420
|
+
|
|
421
|
+
pq_mode: Annotated[
|
|
422
|
+
Annotated[
|
|
423
|
+
Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
|
|
424
|
+
],
|
|
425
|
+
pydantic.Field(alias="pqMode"),
|
|
426
|
+
] = OutputChronicleMode.ERROR
|
|
427
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
428
|
+
|
|
429
|
+
pq_controls: Annotated[
|
|
430
|
+
Optional[OutputChroniclePqControls], pydantic.Field(alias="pqControls")
|
|
431
|
+
] = None
|
|
@@ -123,6 +123,13 @@ class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
123
123
|
LZ4 = "lz4"
|
|
124
124
|
|
|
125
125
|
|
|
126
|
+
class OutputConfluentCloudSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
127
|
+
r"""The schema format used to encode and decode event data"""
|
|
128
|
+
|
|
129
|
+
AVRO = "avro"
|
|
130
|
+
JSON = "json"
|
|
131
|
+
|
|
132
|
+
|
|
126
133
|
class OutputConfluentCloudAuthTypedDict(TypedDict):
|
|
127
134
|
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
128
135
|
|
|
@@ -233,6 +240,8 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
|
233
240
|
disabled: NotRequired[bool]
|
|
234
241
|
schema_registry_url: NotRequired[str]
|
|
235
242
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
243
|
+
schema_type: NotRequired[OutputConfluentCloudSchemaType]
|
|
244
|
+
r"""The schema format used to encode and decode event data"""
|
|
236
245
|
connection_timeout: NotRequired[float]
|
|
237
246
|
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
238
247
|
request_timeout: NotRequired[float]
|
|
@@ -258,6 +267,15 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
258
267
|
] = "http://localhost:8081"
|
|
259
268
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
260
269
|
|
|
270
|
+
schema_type: Annotated[
|
|
271
|
+
Annotated[
|
|
272
|
+
Optional[OutputConfluentCloudSchemaType],
|
|
273
|
+
PlainValidator(validate_open_enum(False)),
|
|
274
|
+
],
|
|
275
|
+
pydantic.Field(alias="schemaType"),
|
|
276
|
+
] = OutputConfluentCloudSchemaType.AVRO
|
|
277
|
+
r"""The schema format used to encode and decode event data"""
|
|
278
|
+
|
|
261
279
|
connection_timeout: Annotated[
|
|
262
280
|
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
263
281
|
] = 30000
|
|
@@ -130,12 +130,12 @@ class ExtraLogType(BaseModel):
|
|
|
130
130
|
description: Optional[str] = None
|
|
131
131
|
|
|
132
132
|
|
|
133
|
-
class
|
|
133
|
+
class OutputGoogleChronicleCustomLabelTypedDict(TypedDict):
|
|
134
134
|
key: str
|
|
135
135
|
value: str
|
|
136
136
|
|
|
137
137
|
|
|
138
|
-
class
|
|
138
|
+
class OutputGoogleChronicleCustomLabel(BaseModel):
|
|
139
139
|
key: str
|
|
140
140
|
|
|
141
141
|
value: str
|
|
@@ -239,7 +239,7 @@ class OutputGoogleChronicleTypedDict(TypedDict):
|
|
|
239
239
|
r"""A unique identifier (UUID) for your Google SecOps instance. This is provided by your Google representative and is required for API V2 authentication."""
|
|
240
240
|
namespace: NotRequired[str]
|
|
241
241
|
r"""User-configured environment namespace to identify the data domain the logs originated from. Use namespace as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
|
|
242
|
-
custom_labels: NotRequired[List[
|
|
242
|
+
custom_labels: NotRequired[List[OutputGoogleChronicleCustomLabelTypedDict]]
|
|
243
243
|
r"""Custom labels to be added to every batch"""
|
|
244
244
|
api_key: NotRequired[str]
|
|
245
245
|
r"""Organization's API key in Google SecOps"""
|
|
@@ -417,7 +417,8 @@ class OutputGoogleChronicle(BaseModel):
|
|
|
417
417
|
r"""User-configured environment namespace to identify the data domain the logs originated from. Use namespace as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
|
|
418
418
|
|
|
419
419
|
custom_labels: Annotated[
|
|
420
|
-
Optional[List[
|
|
420
|
+
Optional[List[OutputGoogleChronicleCustomLabel]],
|
|
421
|
+
pydantic.Field(alias="customLabels"),
|
|
421
422
|
] = None
|
|
422
423
|
r"""Custom labels to be added to every batch"""
|
|
423
424
|
|
|
@@ -115,9 +115,9 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
|
|
|
115
115
|
type: OutputGoogleCloudLoggingType
|
|
116
116
|
log_location_type: LogLocationType
|
|
117
117
|
log_name_expression: str
|
|
118
|
-
r"""JavaScript expression to compute the value of the log name."""
|
|
118
|
+
r"""JavaScript expression to compute the value of the log name. If Validate and correct log name is enabled, invalid characters (characters other than alphanumerics, forward-slashes, underscores, hyphens, and periods) will be replaced with an underscore."""
|
|
119
119
|
log_location_expression: str
|
|
120
|
-
r"""JavaScript expression to compute the value of the folder ID with which log entries should be associated."""
|
|
120
|
+
r"""JavaScript expression to compute the value of the folder ID with which log entries should be associated. If Validate and correct log name is enabled, invalid characters (characters other than alphanumerics, forward-slashes, underscores, hyphens, and periods) will be replaced with an underscore."""
|
|
121
121
|
id: NotRequired[str]
|
|
122
122
|
r"""Unique ID for this output"""
|
|
123
123
|
pipeline: NotRequired[str]
|
|
@@ -128,6 +128,7 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
|
|
|
128
128
|
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
129
129
|
streamtags: NotRequired[List[str]]
|
|
130
130
|
r"""Tags for filtering and grouping in @{product}"""
|
|
131
|
+
sanitize_log_names: NotRequired[bool]
|
|
131
132
|
payload_format: NotRequired[PayloadFormat]
|
|
132
133
|
r"""Format to use when sending payload. Defaults to Text."""
|
|
133
134
|
log_labels: NotRequired[List[LogLabelTypedDict]]
|
|
@@ -247,12 +248,12 @@ class OutputGoogleCloudLogging(BaseModel):
|
|
|
247
248
|
]
|
|
248
249
|
|
|
249
250
|
log_name_expression: Annotated[str, pydantic.Field(alias="logNameExpression")]
|
|
250
|
-
r"""JavaScript expression to compute the value of the log name."""
|
|
251
|
+
r"""JavaScript expression to compute the value of the log name. If Validate and correct log name is enabled, invalid characters (characters other than alphanumerics, forward-slashes, underscores, hyphens, and periods) will be replaced with an underscore."""
|
|
251
252
|
|
|
252
253
|
log_location_expression: Annotated[
|
|
253
254
|
str, pydantic.Field(alias="logLocationExpression")
|
|
254
255
|
]
|
|
255
|
-
r"""JavaScript expression to compute the value of the folder ID with which log entries should be associated."""
|
|
256
|
+
r"""JavaScript expression to compute the value of the folder ID with which log entries should be associated. If Validate and correct log name is enabled, invalid characters (characters other than alphanumerics, forward-slashes, underscores, hyphens, and periods) will be replaced with an underscore."""
|
|
256
257
|
|
|
257
258
|
id: Optional[str] = None
|
|
258
259
|
r"""Unique ID for this output"""
|
|
@@ -271,6 +272,10 @@ class OutputGoogleCloudLogging(BaseModel):
|
|
|
271
272
|
streamtags: Optional[List[str]] = None
|
|
272
273
|
r"""Tags for filtering and grouping in @{product}"""
|
|
273
274
|
|
|
275
|
+
sanitize_log_names: Annotated[
|
|
276
|
+
Optional[bool], pydantic.Field(alias="sanitizeLogNames")
|
|
277
|
+
] = False
|
|
278
|
+
|
|
274
279
|
payload_format: Annotated[
|
|
275
280
|
Annotated[Optional[PayloadFormat], PlainValidator(validate_open_enum(False))],
|
|
276
281
|
pydantic.Field(alias="payloadFormat"),
|
|
@@ -40,6 +40,13 @@ class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
40
40
|
LZ4 = "lz4"
|
|
41
41
|
|
|
42
42
|
|
|
43
|
+
class OutputKafkaSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
44
|
+
r"""The schema format used to encode and decode event data"""
|
|
45
|
+
|
|
46
|
+
AVRO = "avro"
|
|
47
|
+
JSON = "json"
|
|
48
|
+
|
|
49
|
+
|
|
43
50
|
class OutputKafkaAuthTypedDict(TypedDict):
|
|
44
51
|
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
45
52
|
|
|
@@ -150,6 +157,8 @@ class OutputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
|
150
157
|
disabled: NotRequired[bool]
|
|
151
158
|
schema_registry_url: NotRequired[str]
|
|
152
159
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
160
|
+
schema_type: NotRequired[OutputKafkaSchemaType]
|
|
161
|
+
r"""The schema format used to encode and decode event data"""
|
|
153
162
|
connection_timeout: NotRequired[float]
|
|
154
163
|
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
155
164
|
request_timeout: NotRequired[float]
|
|
@@ -173,6 +182,14 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
173
182
|
] = "http://localhost:8081"
|
|
174
183
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
175
184
|
|
|
185
|
+
schema_type: Annotated[
|
|
186
|
+
Annotated[
|
|
187
|
+
Optional[OutputKafkaSchemaType], PlainValidator(validate_open_enum(False))
|
|
188
|
+
],
|
|
189
|
+
pydantic.Field(alias="schemaType"),
|
|
190
|
+
] = OutputKafkaSchemaType.AVRO
|
|
191
|
+
r"""The schema format used to encode and decode event data"""
|
|
192
|
+
|
|
176
193
|
connection_timeout: Annotated[
|
|
177
194
|
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
178
195
|
] = 30000
|
|
@@ -40,6 +40,13 @@ class OutputMskCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
40
40
|
LZ4 = "lz4"
|
|
41
41
|
|
|
42
42
|
|
|
43
|
+
class OutputMskSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
44
|
+
r"""The schema format used to encode and decode event data"""
|
|
45
|
+
|
|
46
|
+
AVRO = "avro"
|
|
47
|
+
JSON = "json"
|
|
48
|
+
|
|
49
|
+
|
|
43
50
|
class OutputMskAuthTypedDict(TypedDict):
|
|
44
51
|
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
45
52
|
|
|
@@ -150,6 +157,8 @@ class OutputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
|
150
157
|
disabled: NotRequired[bool]
|
|
151
158
|
schema_registry_url: NotRequired[str]
|
|
152
159
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
160
|
+
schema_type: NotRequired[OutputMskSchemaType]
|
|
161
|
+
r"""The schema format used to encode and decode event data"""
|
|
153
162
|
connection_timeout: NotRequired[float]
|
|
154
163
|
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
155
164
|
request_timeout: NotRequired[float]
|
|
@@ -173,6 +182,14 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
173
182
|
] = "http://localhost:8081"
|
|
174
183
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
175
184
|
|
|
185
|
+
schema_type: Annotated[
|
|
186
|
+
Annotated[
|
|
187
|
+
Optional[OutputMskSchemaType], PlainValidator(validate_open_enum(False))
|
|
188
|
+
],
|
|
189
|
+
pydantic.Field(alias="schemaType"),
|
|
190
|
+
] = OutputMskSchemaType.AVRO
|
|
191
|
+
r"""The schema format used to encode and decode event data"""
|
|
192
|
+
|
|
176
193
|
connection_timeout: Annotated[
|
|
177
194
|
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
178
195
|
] = 30000
|