cribl-control-plane 0.2.1rc4__py3-none-any.whl → 0.2.1rc6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/groups_sdk.py +2 -2
- cribl_control_plane/models/__init__.py +3 -99
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -2
- cribl_control_plane/models/input.py +14 -14
- cribl_control_plane/models/inputappscope.py +16 -20
- cribl_control_plane/models/inputconfluentcloud.py +0 -110
- cribl_control_plane/models/inputcriblhttp.py +16 -20
- cribl_control_plane/models/inputcribllakehttp.py +16 -20
- cribl_control_plane/models/inputcribltcp.py +16 -20
- cribl_control_plane/models/inputdatadogagent.py +16 -20
- cribl_control_plane/models/inputedgeprometheus.py +36 -44
- cribl_control_plane/models/inputelastic.py +27 -44
- cribl_control_plane/models/inputeventhub.py +0 -118
- cribl_control_plane/models/inputfirehose.py +16 -20
- cribl_control_plane/models/inputgrafana.py +31 -39
- cribl_control_plane/models/inputhttp.py +16 -20
- cribl_control_plane/models/inputhttpraw.py +16 -20
- cribl_control_plane/models/inputkafka.py +0 -108
- cribl_control_plane/models/inputloki.py +16 -20
- cribl_control_plane/models/inputmetrics.py +16 -20
- cribl_control_plane/models/inputmodeldriventelemetry.py +16 -20
- cribl_control_plane/models/inputopentelemetry.py +15 -19
- cribl_control_plane/models/inputprometheus.py +36 -44
- cribl_control_plane/models/inputprometheusrw.py +16 -20
- cribl_control_plane/models/inputsplunk.py +16 -20
- cribl_control_plane/models/inputsplunkhec.py +15 -19
- cribl_control_plane/models/inputsyslog.py +31 -39
- cribl_control_plane/models/inputsystemmetrics.py +10 -20
- cribl_control_plane/models/inputtcp.py +16 -30
- cribl_control_plane/models/inputtcpjson.py +16 -20
- cribl_control_plane/models/inputwindowsmetrics.py +10 -20
- cribl_control_plane/models/inputwineventlogs.py +0 -14
- cribl_control_plane/models/inputwizwebhook.py +16 -20
- cribl_control_plane/models/inputzscalerhec.py +15 -19
- cribl_control_plane/models/output.py +22 -22
- cribl_control_plane/models/outputazureblob.py +0 -7
- cribl_control_plane/models/outputazuredataexplorer.py +93 -283
- cribl_control_plane/models/outputazureeventhub.py +21 -169
- cribl_control_plane/models/outputazurelogs.py +21 -49
- cribl_control_plane/models/outputchronicle.py +21 -49
- cribl_control_plane/models/outputclickhouse.py +21 -49
- cribl_control_plane/models/outputcloudwatch.py +21 -49
- cribl_control_plane/models/outputconfluentcloud.py +22 -167
- cribl_control_plane/models/outputcriblhttp.py +21 -49
- cribl_control_plane/models/outputcribltcp.py +21 -49
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +22 -50
- cribl_control_plane/models/outputdatabricks.py +0 -7
- cribl_control_plane/models/outputdatadog.py +21 -49
- cribl_control_plane/models/outputdataset.py +21 -49
- cribl_control_plane/models/outputdls3.py +0 -7
- cribl_control_plane/models/outputdynatracehttp.py +21 -49
- cribl_control_plane/models/outputdynatraceotlp.py +21 -49
- cribl_control_plane/models/outputelastic.py +21 -74
- cribl_control_plane/models/outputelasticcloud.py +21 -74
- cribl_control_plane/models/outputfilesystem.py +0 -7
- cribl_control_plane/models/outputgooglechronicle.py +22 -65
- cribl_control_plane/models/outputgooglecloudlogging.py +22 -50
- cribl_control_plane/models/outputgooglecloudstorage.py +0 -7
- cribl_control_plane/models/outputgooglepubsub.py +21 -49
- cribl_control_plane/models/outputgrafanacloud.py +42 -98
- cribl_control_plane/models/outputgraphite.py +21 -49
- cribl_control_plane/models/outputhoneycomb.py +21 -49
- cribl_control_plane/models/outputhumiohec.py +21 -49
- cribl_control_plane/models/outputinfluxdb.py +21 -49
- cribl_control_plane/models/outputkafka.py +19 -162
- cribl_control_plane/models/outputkinesis.py +21 -56
- cribl_control_plane/models/outputloki.py +19 -47
- cribl_control_plane/models/outputminio.py +0 -7
- cribl_control_plane/models/outputmsk.py +19 -54
- cribl_control_plane/models/outputnewrelic.py +21 -49
- cribl_control_plane/models/outputnewrelicevents.py +22 -50
- cribl_control_plane/models/outputopentelemetry.py +21 -49
- cribl_control_plane/models/outputprometheus.py +21 -49
- cribl_control_plane/models/outputs3.py +0 -7
- cribl_control_plane/models/outputsentinel.py +21 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +22 -50
- cribl_control_plane/models/outputservicenow.py +21 -49
- cribl_control_plane/models/outputsignalfx.py +21 -49
- cribl_control_plane/models/outputsns.py +19 -47
- cribl_control_plane/models/outputsplunk.py +21 -49
- cribl_control_plane/models/outputsplunkhec.py +21 -49
- cribl_control_plane/models/outputsplunklb.py +21 -49
- cribl_control_plane/models/outputsqs.py +19 -47
- cribl_control_plane/models/outputstatsd.py +21 -49
- cribl_control_plane/models/outputstatsdext.py +21 -49
- cribl_control_plane/models/outputsumologic.py +21 -49
- cribl_control_plane/models/outputsyslog.py +99 -129
- cribl_control_plane/models/outputtcpjson.py +21 -49
- cribl_control_plane/models/outputwavefront.py +21 -49
- cribl_control_plane/models/outputwebhook.py +21 -49
- cribl_control_plane/models/outputxsiam.py +19 -47
- cribl_control_plane/models/runnablejobcollection.py +8 -12
- cribl_control_plane/models/runnablejobexecutor.py +8 -12
- cribl_control_plane/models/runnablejobscheduledsearch.py +8 -12
- {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc6.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc6.dist-info}/RECORD +98 -98
- {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc6.dist-info}/WHEEL +0 -0
|
@@ -22,9 +22,7 @@ class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
22
22
|
STREAMING = "streaming"
|
|
23
23
|
|
|
24
24
|
|
|
25
|
-
class
|
|
26
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
27
|
-
):
|
|
25
|
+
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
28
26
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
29
27
|
|
|
30
28
|
HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
|
|
@@ -57,68 +55,6 @@ class OutputAzureDataExplorerCertificate(BaseModel):
|
|
|
57
55
|
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
58
56
|
|
|
59
57
|
|
|
60
|
-
class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
61
|
-
r"""Format of the output data"""
|
|
62
|
-
|
|
63
|
-
# JSON
|
|
64
|
-
JSON = "json"
|
|
65
|
-
# Raw
|
|
66
|
-
RAW = "raw"
|
|
67
|
-
# Parquet
|
|
68
|
-
PARQUET = "parquet"
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
class OutputAzureDataExplorerCompressCompression(
|
|
72
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
-
):
|
|
74
|
-
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
75
|
-
|
|
76
|
-
NONE = "none"
|
|
77
|
-
GZIP = "gzip"
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
class OutputAzureDataExplorerCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
81
|
-
r"""Compression level to apply before moving files to final destination"""
|
|
82
|
-
|
|
83
|
-
# Best Speed
|
|
84
|
-
BEST_SPEED = "best_speed"
|
|
85
|
-
# Normal
|
|
86
|
-
NORMAL = "normal"
|
|
87
|
-
# Best Compression
|
|
88
|
-
BEST_COMPRESSION = "best_compression"
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
class OutputAzureDataExplorerParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
92
|
-
r"""Determines which data types are supported and how they are represented"""
|
|
93
|
-
|
|
94
|
-
# 1.0
|
|
95
|
-
PARQUET_1_0 = "PARQUET_1_0"
|
|
96
|
-
# 2.4
|
|
97
|
-
PARQUET_2_4 = "PARQUET_2_4"
|
|
98
|
-
# 2.6
|
|
99
|
-
PARQUET_2_6 = "PARQUET_2_6"
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
class OutputAzureDataExplorerDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
103
|
-
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
104
|
-
|
|
105
|
-
# V1
|
|
106
|
-
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
107
|
-
# V2
|
|
108
|
-
DATA_PAGE_V2 = "DATA_PAGE_V2"
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
class OutputAzureDataExplorerKeyValueMetadatumTypedDict(TypedDict):
|
|
112
|
-
value: str
|
|
113
|
-
key: NotRequired[str]
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
class OutputAzureDataExplorerKeyValueMetadatum(BaseModel):
|
|
117
|
-
value: str
|
|
118
|
-
|
|
119
|
-
key: Optional[str] = ""
|
|
120
|
-
|
|
121
|
-
|
|
122
58
|
class OutputAzureDataExplorerBackpressureBehavior(
|
|
123
59
|
str, Enum, metaclass=utils.OpenEnumMeta
|
|
124
60
|
):
|
|
@@ -132,6 +68,17 @@ class OutputAzureDataExplorerBackpressureBehavior(
|
|
|
132
68
|
QUEUE = "queue"
|
|
133
69
|
|
|
134
70
|
|
|
71
|
+
class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
72
|
+
r"""Format of the output data"""
|
|
73
|
+
|
|
74
|
+
# JSON
|
|
75
|
+
JSON = "json"
|
|
76
|
+
# Raw
|
|
77
|
+
RAW = "raw"
|
|
78
|
+
# Parquet
|
|
79
|
+
PARQUET = "parquet"
|
|
80
|
+
|
|
81
|
+
|
|
135
82
|
class OutputAzureDataExplorerDiskSpaceProtection(
|
|
136
83
|
str, Enum, metaclass=utils.OpenEnumMeta
|
|
137
84
|
):
|
|
@@ -258,15 +205,13 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
|
|
|
258
205
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
259
206
|
|
|
260
207
|
|
|
261
|
-
class
|
|
262
|
-
|
|
208
|
+
class OutputAzureDataExplorerCompressCompression(
|
|
209
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
210
|
+
):
|
|
211
|
+
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
263
212
|
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
# Backpressure
|
|
267
|
-
ALWAYS = "always"
|
|
268
|
-
# Always On
|
|
269
|
-
BACKPRESSURE = "backpressure"
|
|
213
|
+
NONE = "none"
|
|
214
|
+
GZIP = "gzip"
|
|
270
215
|
|
|
271
216
|
|
|
272
217
|
class OutputAzureDataExplorerPqCompressCompression(
|
|
@@ -289,6 +234,17 @@ class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEn
|
|
|
289
234
|
DROP = "drop"
|
|
290
235
|
|
|
291
236
|
|
|
237
|
+
class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
238
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
239
|
+
|
|
240
|
+
# Error
|
|
241
|
+
ERROR = "error"
|
|
242
|
+
# Backpressure
|
|
243
|
+
BACKPRESSURE = "backpressure"
|
|
244
|
+
# Always On
|
|
245
|
+
ALWAYS = "always"
|
|
246
|
+
|
|
247
|
+
|
|
292
248
|
class OutputAzureDataExplorerPqControlsTypedDict(TypedDict):
|
|
293
249
|
pass
|
|
294
250
|
|
|
@@ -324,9 +280,7 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
|
|
|
324
280
|
validate_database_settings: NotRequired[bool]
|
|
325
281
|
r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
|
|
326
282
|
ingest_mode: NotRequired[IngestionMode]
|
|
327
|
-
oauth_endpoint: NotRequired[
|
|
328
|
-
OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint
|
|
329
|
-
]
|
|
283
|
+
oauth_endpoint: NotRequired[MicrosoftEntraIDAuthenticationEndpoint]
|
|
330
284
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
331
285
|
oauth_type: NotRequired[OutputAzureDataExplorerAuthenticationMethod]
|
|
332
286
|
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
@@ -336,56 +290,14 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
|
|
|
336
290
|
text_secret: NotRequired[str]
|
|
337
291
|
r"""Select or create a stored text secret"""
|
|
338
292
|
certificate: NotRequired[OutputAzureDataExplorerCertificateTypedDict]
|
|
339
|
-
format_: NotRequired[OutputAzureDataExplorerDataFormat]
|
|
340
|
-
r"""Format of the output data"""
|
|
341
|
-
compress: NotRequired[OutputAzureDataExplorerCompressCompression]
|
|
342
|
-
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
343
|
-
compression_level: NotRequired[OutputAzureDataExplorerCompressionLevel]
|
|
344
|
-
r"""Compression level to apply before moving files to final destination"""
|
|
345
|
-
automatic_schema: NotRequired[bool]
|
|
346
|
-
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
347
|
-
parquet_schema: NotRequired[str]
|
|
348
|
-
r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
|
|
349
|
-
parquet_version: NotRequired[OutputAzureDataExplorerParquetVersion]
|
|
350
|
-
r"""Determines which data types are supported and how they are represented"""
|
|
351
|
-
parquet_data_page_version: NotRequired[OutputAzureDataExplorerDataPageVersion]
|
|
352
|
-
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
353
|
-
parquet_row_group_length: NotRequired[float]
|
|
354
|
-
r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
|
|
355
|
-
parquet_page_size: NotRequired[str]
|
|
356
|
-
r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
|
|
357
|
-
should_log_invalid_rows: NotRequired[bool]
|
|
358
|
-
r"""Log up to 3 rows that @{product} skips due to data mismatch"""
|
|
359
|
-
key_value_metadata: NotRequired[
|
|
360
|
-
List[OutputAzureDataExplorerKeyValueMetadatumTypedDict]
|
|
361
|
-
]
|
|
362
|
-
r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
|
|
363
|
-
enable_statistics: NotRequired[bool]
|
|
364
|
-
r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
|
|
365
|
-
enable_write_page_index: NotRequired[bool]
|
|
366
|
-
r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
|
|
367
|
-
enable_page_checksum: NotRequired[bool]
|
|
368
|
-
r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
|
|
369
|
-
remove_empty_dirs: NotRequired[bool]
|
|
370
|
-
r"""Remove empty staging directories after moving files"""
|
|
371
|
-
empty_dir_cleanup_sec: NotRequired[float]
|
|
372
|
-
r"""How frequently, in seconds, to clean up empty directories"""
|
|
373
|
-
deadletter_enabled: NotRequired[bool]
|
|
374
|
-
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
375
|
-
deadletter_path: NotRequired[str]
|
|
376
|
-
r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
|
|
377
|
-
max_retry_num: NotRequired[float]
|
|
378
|
-
r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
|
|
379
|
-
is_mapping_obj: NotRequired[bool]
|
|
380
|
-
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
381
|
-
mapping_obj: NotRequired[str]
|
|
382
|
-
r"""Enter a JSON object that defines your desired data mapping"""
|
|
383
|
-
mapping_ref: NotRequired[str]
|
|
384
|
-
r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
|
|
385
293
|
ingest_url: NotRequired[str]
|
|
386
294
|
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
387
295
|
on_backpressure: NotRequired[OutputAzureDataExplorerBackpressureBehavior]
|
|
388
296
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
297
|
+
is_mapping_obj: NotRequired[bool]
|
|
298
|
+
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
299
|
+
format_: NotRequired[OutputAzureDataExplorerDataFormat]
|
|
300
|
+
r"""Format of the output data"""
|
|
389
301
|
stage_path: NotRequired[str]
|
|
390
302
|
r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant and stable storage."""
|
|
391
303
|
file_name_suffix: NotRequired[str]
|
|
@@ -404,6 +316,10 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
|
|
|
404
316
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
405
317
|
add_id_to_stage_path: NotRequired[bool]
|
|
406
318
|
r"""Add the Output ID value to staging location"""
|
|
319
|
+
remove_empty_dirs: NotRequired[bool]
|
|
320
|
+
r"""Remove empty staging directories after moving files"""
|
|
321
|
+
deadletter_enabled: NotRequired[bool]
|
|
322
|
+
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
407
323
|
timeout_sec: NotRequired[float]
|
|
408
324
|
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
409
325
|
flush_immediately: NotRequired[bool]
|
|
@@ -429,6 +345,10 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
|
|
|
429
345
|
]
|
|
430
346
|
response_honor_retry_after_header: NotRequired[bool]
|
|
431
347
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
348
|
+
compress: NotRequired[OutputAzureDataExplorerCompressCompression]
|
|
349
|
+
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
350
|
+
mapping_ref: NotRequired[str]
|
|
351
|
+
r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
|
|
432
352
|
concurrency: NotRequired[float]
|
|
433
353
|
r"""Maximum number of ongoing requests before blocking"""
|
|
434
354
|
max_payload_size_kb: NotRequired[float]
|
|
@@ -446,16 +366,6 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
|
|
|
446
366
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
447
367
|
keep_alive: NotRequired[bool]
|
|
448
368
|
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
449
|
-
pq_strict_ordering: NotRequired[bool]
|
|
450
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
451
|
-
pq_rate_per_sec: NotRequired[float]
|
|
452
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
453
|
-
pq_mode: NotRequired[OutputAzureDataExplorerMode]
|
|
454
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
455
|
-
pq_max_buffer_size: NotRequired[float]
|
|
456
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
457
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
458
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
459
369
|
pq_max_file_size: NotRequired[str]
|
|
460
370
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
461
371
|
pq_max_size: NotRequired[str]
|
|
@@ -466,7 +376,11 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
|
|
|
466
376
|
r"""Codec to use to compress the persisted data"""
|
|
467
377
|
pq_on_backpressure: NotRequired[OutputAzureDataExplorerQueueFullBehavior]
|
|
468
378
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
379
|
+
pq_mode: NotRequired[OutputAzureDataExplorerMode]
|
|
380
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
469
381
|
pq_controls: NotRequired[OutputAzureDataExplorerPqControlsTypedDict]
|
|
382
|
+
empty_dir_cleanup_sec: NotRequired[float]
|
|
383
|
+
r"""How frequently, in seconds, to clean up empty directories"""
|
|
470
384
|
|
|
471
385
|
|
|
472
386
|
class OutputAzureDataExplorer(BaseModel):
|
|
@@ -519,11 +433,11 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
519
433
|
|
|
520
434
|
oauth_endpoint: Annotated[
|
|
521
435
|
Annotated[
|
|
522
|
-
Optional[
|
|
436
|
+
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
523
437
|
PlainValidator(validate_open_enum(False)),
|
|
524
438
|
],
|
|
525
439
|
pydantic.Field(alias="oauthEndpoint"),
|
|
526
|
-
] =
|
|
440
|
+
] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
|
|
527
441
|
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
528
442
|
|
|
529
443
|
oauth_type: Annotated[
|
|
@@ -545,139 +459,31 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
545
459
|
|
|
546
460
|
certificate: Optional[OutputAzureDataExplorerCertificate] = None
|
|
547
461
|
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
Optional[OutputAzureDataExplorerDataFormat],
|
|
551
|
-
PlainValidator(validate_open_enum(False)),
|
|
552
|
-
],
|
|
553
|
-
pydantic.Field(alias="format"),
|
|
554
|
-
] = OutputAzureDataExplorerDataFormat.JSON
|
|
555
|
-
r"""Format of the output data"""
|
|
556
|
-
|
|
557
|
-
compress: Annotated[
|
|
558
|
-
Optional[OutputAzureDataExplorerCompressCompression],
|
|
559
|
-
PlainValidator(validate_open_enum(False)),
|
|
560
|
-
] = OutputAzureDataExplorerCompressCompression.GZIP
|
|
561
|
-
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
562
|
-
|
|
563
|
-
compression_level: Annotated[
|
|
564
|
-
Annotated[
|
|
565
|
-
Optional[OutputAzureDataExplorerCompressionLevel],
|
|
566
|
-
PlainValidator(validate_open_enum(False)),
|
|
567
|
-
],
|
|
568
|
-
pydantic.Field(alias="compressionLevel"),
|
|
569
|
-
] = OutputAzureDataExplorerCompressionLevel.BEST_SPEED
|
|
570
|
-
r"""Compression level to apply before moving files to final destination"""
|
|
571
|
-
|
|
572
|
-
automatic_schema: Annotated[
|
|
573
|
-
Optional[bool], pydantic.Field(alias="automaticSchema")
|
|
574
|
-
] = False
|
|
575
|
-
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
576
|
-
|
|
577
|
-
parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
|
|
578
|
-
None
|
|
579
|
-
)
|
|
580
|
-
r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
|
|
581
|
-
|
|
582
|
-
parquet_version: Annotated[
|
|
583
|
-
Annotated[
|
|
584
|
-
Optional[OutputAzureDataExplorerParquetVersion],
|
|
585
|
-
PlainValidator(validate_open_enum(False)),
|
|
586
|
-
],
|
|
587
|
-
pydantic.Field(alias="parquetVersion"),
|
|
588
|
-
] = OutputAzureDataExplorerParquetVersion.PARQUET_2_6
|
|
589
|
-
r"""Determines which data types are supported and how they are represented"""
|
|
462
|
+
ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
|
|
463
|
+
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
590
464
|
|
|
591
|
-
|
|
465
|
+
on_backpressure: Annotated[
|
|
592
466
|
Annotated[
|
|
593
|
-
Optional[
|
|
467
|
+
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
594
468
|
PlainValidator(validate_open_enum(False)),
|
|
595
469
|
],
|
|
596
|
-
pydantic.Field(alias="
|
|
597
|
-
] =
|
|
598
|
-
r"""
|
|
599
|
-
|
|
600
|
-
parquet_row_group_length: Annotated[
|
|
601
|
-
Optional[float], pydantic.Field(alias="parquetRowGroupLength")
|
|
602
|
-
] = 10000
|
|
603
|
-
r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
|
|
604
|
-
|
|
605
|
-
parquet_page_size: Annotated[
|
|
606
|
-
Optional[str], pydantic.Field(alias="parquetPageSize")
|
|
607
|
-
] = "1MB"
|
|
608
|
-
r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
|
|
609
|
-
|
|
610
|
-
should_log_invalid_rows: Annotated[
|
|
611
|
-
Optional[bool], pydantic.Field(alias="shouldLogInvalidRows")
|
|
612
|
-
] = None
|
|
613
|
-
r"""Log up to 3 rows that @{product} skips due to data mismatch"""
|
|
614
|
-
|
|
615
|
-
key_value_metadata: Annotated[
|
|
616
|
-
Optional[List[OutputAzureDataExplorerKeyValueMetadatum]],
|
|
617
|
-
pydantic.Field(alias="keyValueMetadata"),
|
|
618
|
-
] = None
|
|
619
|
-
r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
|
|
620
|
-
|
|
621
|
-
enable_statistics: Annotated[
|
|
622
|
-
Optional[bool], pydantic.Field(alias="enableStatistics")
|
|
623
|
-
] = True
|
|
624
|
-
r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
|
|
625
|
-
|
|
626
|
-
enable_write_page_index: Annotated[
|
|
627
|
-
Optional[bool], pydantic.Field(alias="enableWritePageIndex")
|
|
628
|
-
] = True
|
|
629
|
-
r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
|
|
630
|
-
|
|
631
|
-
enable_page_checksum: Annotated[
|
|
632
|
-
Optional[bool], pydantic.Field(alias="enablePageChecksum")
|
|
633
|
-
] = False
|
|
634
|
-
r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
|
|
635
|
-
|
|
636
|
-
remove_empty_dirs: Annotated[
|
|
637
|
-
Optional[bool], pydantic.Field(alias="removeEmptyDirs")
|
|
638
|
-
] = True
|
|
639
|
-
r"""Remove empty staging directories after moving files"""
|
|
640
|
-
|
|
641
|
-
empty_dir_cleanup_sec: Annotated[
|
|
642
|
-
Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
|
|
643
|
-
] = 300
|
|
644
|
-
r"""How frequently, in seconds, to clean up empty directories"""
|
|
645
|
-
|
|
646
|
-
deadletter_enabled: Annotated[
|
|
647
|
-
Optional[bool], pydantic.Field(alias="deadletterEnabled")
|
|
648
|
-
] = False
|
|
649
|
-
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
650
|
-
|
|
651
|
-
deadletter_path: Annotated[
|
|
652
|
-
Optional[str], pydantic.Field(alias="deadletterPath")
|
|
653
|
-
] = "$CRIBL_HOME/state/outputs/dead-letter"
|
|
654
|
-
r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
|
|
655
|
-
|
|
656
|
-
max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
|
|
657
|
-
r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
|
|
470
|
+
pydantic.Field(alias="onBackpressure"),
|
|
471
|
+
] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
|
|
472
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
658
473
|
|
|
659
474
|
is_mapping_obj: Annotated[Optional[bool], pydantic.Field(alias="isMappingObj")] = (
|
|
660
475
|
False
|
|
661
476
|
)
|
|
662
477
|
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
663
478
|
|
|
664
|
-
|
|
665
|
-
r"""Enter a JSON object that defines your desired data mapping"""
|
|
666
|
-
|
|
667
|
-
mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
|
|
668
|
-
r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
|
|
669
|
-
|
|
670
|
-
ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
|
|
671
|
-
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
672
|
-
|
|
673
|
-
on_backpressure: Annotated[
|
|
479
|
+
format_: Annotated[
|
|
674
480
|
Annotated[
|
|
675
|
-
Optional[
|
|
481
|
+
Optional[OutputAzureDataExplorerDataFormat],
|
|
676
482
|
PlainValidator(validate_open_enum(False)),
|
|
677
483
|
],
|
|
678
|
-
pydantic.Field(alias="
|
|
679
|
-
] =
|
|
680
|
-
r"""
|
|
484
|
+
pydantic.Field(alias="format"),
|
|
485
|
+
] = OutputAzureDataExplorerDataFormat.JSON
|
|
486
|
+
r"""Format of the output data"""
|
|
681
487
|
|
|
682
488
|
stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
|
|
683
489
|
"$CRIBL_HOME/state/outputs/staging"
|
|
@@ -728,6 +534,16 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
728
534
|
] = True
|
|
729
535
|
r"""Add the Output ID value to staging location"""
|
|
730
536
|
|
|
537
|
+
remove_empty_dirs: Annotated[
|
|
538
|
+
Optional[bool], pydantic.Field(alias="removeEmptyDirs")
|
|
539
|
+
] = True
|
|
540
|
+
r"""Remove empty staging directories after moving files"""
|
|
541
|
+
|
|
542
|
+
deadletter_enabled: Annotated[
|
|
543
|
+
Optional[bool], pydantic.Field(alias="deadletterEnabled")
|
|
544
|
+
] = False
|
|
545
|
+
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
546
|
+
|
|
731
547
|
timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 30
|
|
732
548
|
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
733
549
|
|
|
@@ -784,6 +600,15 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
784
600
|
] = True
|
|
785
601
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
786
602
|
|
|
603
|
+
compress: Annotated[
|
|
604
|
+
Optional[OutputAzureDataExplorerCompressCompression],
|
|
605
|
+
PlainValidator(validate_open_enum(False)),
|
|
606
|
+
] = OutputAzureDataExplorerCompressCompression.GZIP
|
|
607
|
+
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
608
|
+
|
|
609
|
+
mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
|
|
610
|
+
r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
|
|
611
|
+
|
|
787
612
|
concurrency: Optional[float] = 5
|
|
788
613
|
r"""Maximum number of ongoing requests before blocking"""
|
|
789
614
|
|
|
@@ -818,35 +643,6 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
818
643
|
keep_alive: Annotated[Optional[bool], pydantic.Field(alias="keepAlive")] = True
|
|
819
644
|
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
820
645
|
|
|
821
|
-
pq_strict_ordering: Annotated[
|
|
822
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
823
|
-
] = True
|
|
824
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
825
|
-
|
|
826
|
-
pq_rate_per_sec: Annotated[
|
|
827
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
828
|
-
] = 0
|
|
829
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
830
|
-
|
|
831
|
-
pq_mode: Annotated[
|
|
832
|
-
Annotated[
|
|
833
|
-
Optional[OutputAzureDataExplorerMode],
|
|
834
|
-
PlainValidator(validate_open_enum(False)),
|
|
835
|
-
],
|
|
836
|
-
pydantic.Field(alias="pqMode"),
|
|
837
|
-
] = OutputAzureDataExplorerMode.ERROR
|
|
838
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
839
|
-
|
|
840
|
-
pq_max_buffer_size: Annotated[
|
|
841
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
842
|
-
] = 42
|
|
843
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
844
|
-
|
|
845
|
-
pq_max_backpressure_sec: Annotated[
|
|
846
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
847
|
-
] = 30
|
|
848
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
849
|
-
|
|
850
646
|
pq_max_file_size: Annotated[
|
|
851
647
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
852
648
|
] = "1 MB"
|
|
@@ -878,6 +674,20 @@ class OutputAzureDataExplorer(BaseModel):
|
|
|
878
674
|
] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
|
|
879
675
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
880
676
|
|
|
677
|
+
pq_mode: Annotated[
|
|
678
|
+
Annotated[
|
|
679
|
+
Optional[OutputAzureDataExplorerMode],
|
|
680
|
+
PlainValidator(validate_open_enum(False)),
|
|
681
|
+
],
|
|
682
|
+
pydantic.Field(alias="pqMode"),
|
|
683
|
+
] = OutputAzureDataExplorerMode.ERROR
|
|
684
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
685
|
+
|
|
881
686
|
pq_controls: Annotated[
|
|
882
687
|
Optional[OutputAzureDataExplorerPqControls], pydantic.Field(alias="pqControls")
|
|
883
688
|
] = None
|
|
689
|
+
|
|
690
|
+
empty_dir_cleanup_sec: Annotated[
|
|
691
|
+
Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
|
|
692
|
+
] = 300
|
|
693
|
+
r"""How frequently, in seconds, to clean up empty directories"""
|