cribl-control-plane 0.2.1rc6__py3-none-any.whl → 0.2.1rc8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (103) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/lakedatasets.py +28 -0
  3. cribl_control_plane/models/__init__.py +124 -5
  4. cribl_control_plane/models/cribllakedataset.py +4 -0
  5. cribl_control_plane/models/cribllakedatasetupdate.py +4 -0
  6. cribl_control_plane/models/input.py +15 -15
  7. cribl_control_plane/models/inputappscope.py +20 -16
  8. cribl_control_plane/models/inputconfluentcloud.py +110 -0
  9. cribl_control_plane/models/inputcriblhttp.py +20 -16
  10. cribl_control_plane/models/inputcribllakehttp.py +20 -16
  11. cribl_control_plane/models/inputcribltcp.py +20 -16
  12. cribl_control_plane/models/inputdatadogagent.py +20 -16
  13. cribl_control_plane/models/inputedgeprometheus.py +44 -36
  14. cribl_control_plane/models/inputelastic.py +44 -27
  15. cribl_control_plane/models/inputeventhub.py +118 -0
  16. cribl_control_plane/models/inputfile.py +10 -5
  17. cribl_control_plane/models/inputfirehose.py +20 -16
  18. cribl_control_plane/models/inputgrafana.py +39 -31
  19. cribl_control_plane/models/inputhttp.py +20 -16
  20. cribl_control_plane/models/inputhttpraw.py +20 -16
  21. cribl_control_plane/models/inputkafka.py +108 -0
  22. cribl_control_plane/models/inputloki.py +20 -16
  23. cribl_control_plane/models/inputmetrics.py +20 -16
  24. cribl_control_plane/models/inputmodeldriventelemetry.py +20 -16
  25. cribl_control_plane/models/inputopentelemetry.py +19 -15
  26. cribl_control_plane/models/inputprometheus.py +44 -36
  27. cribl_control_plane/models/inputprometheusrw.py +20 -16
  28. cribl_control_plane/models/inputsplunk.py +20 -16
  29. cribl_control_plane/models/inputsplunkhec.py +19 -15
  30. cribl_control_plane/models/inputsyslog.py +39 -31
  31. cribl_control_plane/models/inputsystemmetrics.py +20 -10
  32. cribl_control_plane/models/inputtcp.py +30 -16
  33. cribl_control_plane/models/inputtcpjson.py +20 -16
  34. cribl_control_plane/models/inputwindowsmetrics.py +20 -10
  35. cribl_control_plane/models/inputwineventlogs.py +14 -0
  36. cribl_control_plane/models/inputwizwebhook.py +20 -16
  37. cribl_control_plane/models/inputzscalerhec.py +19 -15
  38. cribl_control_plane/models/jobinfo.py +10 -4
  39. cribl_control_plane/models/jobstatus.py +24 -3
  40. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  41. cribl_control_plane/models/output.py +21 -21
  42. cribl_control_plane/models/outputazureblob.py +7 -0
  43. cribl_control_plane/models/outputazuredataexplorer.py +283 -93
  44. cribl_control_plane/models/outputazureeventhub.py +169 -21
  45. cribl_control_plane/models/outputazurelogs.py +49 -21
  46. cribl_control_plane/models/outputchronicle.py +49 -21
  47. cribl_control_plane/models/outputclickhouse.py +49 -21
  48. cribl_control_plane/models/outputcloudwatch.py +49 -21
  49. cribl_control_plane/models/outputconfluentcloud.py +169 -22
  50. cribl_control_plane/models/outputcriblhttp.py +49 -21
  51. cribl_control_plane/models/outputcribltcp.py +49 -21
  52. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -22
  53. cribl_control_plane/models/outputdatabricks.py +7 -0
  54. cribl_control_plane/models/outputdatadog.py +49 -21
  55. cribl_control_plane/models/outputdataset.py +49 -21
  56. cribl_control_plane/models/outputdls3.py +7 -0
  57. cribl_control_plane/models/outputdynatracehttp.py +49 -21
  58. cribl_control_plane/models/outputdynatraceotlp.py +49 -21
  59. cribl_control_plane/models/outputelastic.py +74 -21
  60. cribl_control_plane/models/outputelasticcloud.py +74 -21
  61. cribl_control_plane/models/outputfilesystem.py +7 -0
  62. cribl_control_plane/models/outputgooglechronicle.py +65 -22
  63. cribl_control_plane/models/outputgooglecloudlogging.py +50 -22
  64. cribl_control_plane/models/outputgooglecloudstorage.py +7 -0
  65. cribl_control_plane/models/outputgooglepubsub.py +49 -21
  66. cribl_control_plane/models/outputgrafanacloud.py +98 -42
  67. cribl_control_plane/models/outputgraphite.py +49 -21
  68. cribl_control_plane/models/outputhoneycomb.py +49 -21
  69. cribl_control_plane/models/outputhumiohec.py +49 -21
  70. cribl_control_plane/models/outputinfluxdb.py +49 -21
  71. cribl_control_plane/models/outputkafka.py +164 -19
  72. cribl_control_plane/models/outputkinesis.py +56 -21
  73. cribl_control_plane/models/outputloki.py +47 -19
  74. cribl_control_plane/models/outputminio.py +7 -0
  75. cribl_control_plane/models/outputmsk.py +56 -19
  76. cribl_control_plane/models/outputnewrelic.py +49 -21
  77. cribl_control_plane/models/outputnewrelicevents.py +50 -22
  78. cribl_control_plane/models/outputopentelemetry.py +49 -21
  79. cribl_control_plane/models/outputprometheus.py +49 -21
  80. cribl_control_plane/models/outputs3.py +7 -0
  81. cribl_control_plane/models/outputsentinel.py +49 -21
  82. cribl_control_plane/models/outputsentineloneaisiem.py +50 -22
  83. cribl_control_plane/models/outputservicenow.py +49 -21
  84. cribl_control_plane/models/outputsignalfx.py +49 -21
  85. cribl_control_plane/models/outputsns.py +47 -19
  86. cribl_control_plane/models/outputsplunk.py +49 -21
  87. cribl_control_plane/models/outputsplunkhec.py +124 -21
  88. cribl_control_plane/models/outputsplunklb.py +49 -21
  89. cribl_control_plane/models/outputsqs.py +47 -19
  90. cribl_control_plane/models/outputstatsd.py +49 -21
  91. cribl_control_plane/models/outputstatsdext.py +49 -21
  92. cribl_control_plane/models/outputsumologic.py +49 -21
  93. cribl_control_plane/models/outputsyslog.py +129 -99
  94. cribl_control_plane/models/outputtcpjson.py +49 -21
  95. cribl_control_plane/models/outputwavefront.py +49 -21
  96. cribl_control_plane/models/outputwebhook.py +49 -21
  97. cribl_control_plane/models/outputxsiam.py +47 -19
  98. cribl_control_plane/models/runnablejobcollection.py +12 -8
  99. cribl_control_plane/models/runnablejobexecutor.py +12 -8
  100. cribl_control_plane/models/runnablejobscheduledsearch.py +12 -8
  101. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc8.dist-info}/METADATA +25 -7
  102. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc8.dist-info}/RECORD +103 -102
  103. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc8.dist-info}/WHEEL +0 -0
@@ -22,7 +22,9 @@ class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
22
22
  STREAMING = "streaming"
23
23
 
24
24
 
25
- class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnumMeta):
25
+ class OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint(
26
+ str, Enum, metaclass=utils.OpenEnumMeta
27
+ ):
26
28
  r"""Endpoint used to acquire authentication tokens from Azure"""
27
29
 
28
30
  HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
@@ -55,6 +57,68 @@ class OutputAzureDataExplorerCertificate(BaseModel):
55
57
  r"""The certificate you registered as credentials for your app in the Azure portal"""
56
58
 
57
59
 
60
+ class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
61
+ r"""Format of the output data"""
62
+
63
+ # JSON
64
+ JSON = "json"
65
+ # Raw
66
+ RAW = "raw"
67
+ # Parquet
68
+ PARQUET = "parquet"
69
+
70
+
71
+ class OutputAzureDataExplorerCompressCompression(
72
+ str, Enum, metaclass=utils.OpenEnumMeta
73
+ ):
74
+ r"""Data compression format to apply to HTTP content before it is delivered"""
75
+
76
+ NONE = "none"
77
+ GZIP = "gzip"
78
+
79
+
80
+ class OutputAzureDataExplorerCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
81
+ r"""Compression level to apply before moving files to final destination"""
82
+
83
+ # Best Speed
84
+ BEST_SPEED = "best_speed"
85
+ # Normal
86
+ NORMAL = "normal"
87
+ # Best Compression
88
+ BEST_COMPRESSION = "best_compression"
89
+
90
+
91
+ class OutputAzureDataExplorerParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
92
+ r"""Determines which data types are supported and how they are represented"""
93
+
94
+ # 1.0
95
+ PARQUET_1_0 = "PARQUET_1_0"
96
+ # 2.4
97
+ PARQUET_2_4 = "PARQUET_2_4"
98
+ # 2.6
99
+ PARQUET_2_6 = "PARQUET_2_6"
100
+
101
+
102
+ class OutputAzureDataExplorerDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
103
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
104
+
105
+ # V1
106
+ DATA_PAGE_V1 = "DATA_PAGE_V1"
107
+ # V2
108
+ DATA_PAGE_V2 = "DATA_PAGE_V2"
109
+
110
+
111
+ class OutputAzureDataExplorerKeyValueMetadatumTypedDict(TypedDict):
112
+ value: str
113
+ key: NotRequired[str]
114
+
115
+
116
+ class OutputAzureDataExplorerKeyValueMetadatum(BaseModel):
117
+ value: str
118
+
119
+ key: Optional[str] = ""
120
+
121
+
58
122
  class OutputAzureDataExplorerBackpressureBehavior(
59
123
  str, Enum, metaclass=utils.OpenEnumMeta
60
124
  ):
@@ -68,17 +132,6 @@ class OutputAzureDataExplorerBackpressureBehavior(
68
132
  QUEUE = "queue"
69
133
 
70
134
 
71
- class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
72
- r"""Format of the output data"""
73
-
74
- # JSON
75
- JSON = "json"
76
- # Raw
77
- RAW = "raw"
78
- # Parquet
79
- PARQUET = "parquet"
80
-
81
-
82
135
  class OutputAzureDataExplorerDiskSpaceProtection(
83
136
  str, Enum, metaclass=utils.OpenEnumMeta
84
137
  ):
@@ -205,13 +258,15 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
205
258
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
206
259
 
207
260
 
208
- class OutputAzureDataExplorerCompressCompression(
209
- str, Enum, metaclass=utils.OpenEnumMeta
210
- ):
211
- r"""Data compression format to apply to HTTP content before it is delivered"""
261
+ class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
262
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
212
263
 
213
- NONE = "none"
214
- GZIP = "gzip"
264
+ # Error
265
+ ERROR = "error"
266
+ # Backpressure
267
+ ALWAYS = "always"
268
+ # Always On
269
+ BACKPRESSURE = "backpressure"
215
270
 
216
271
 
217
272
  class OutputAzureDataExplorerPqCompressCompression(
@@ -234,17 +289,6 @@ class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEn
234
289
  DROP = "drop"
235
290
 
236
291
 
237
- class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
238
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
239
-
240
- # Error
241
- ERROR = "error"
242
- # Backpressure
243
- BACKPRESSURE = "backpressure"
244
- # Always On
245
- ALWAYS = "always"
246
-
247
-
248
292
  class OutputAzureDataExplorerPqControlsTypedDict(TypedDict):
249
293
  pass
250
294
 
@@ -280,7 +324,9 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
280
324
  validate_database_settings: NotRequired[bool]
281
325
  r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
282
326
  ingest_mode: NotRequired[IngestionMode]
283
- oauth_endpoint: NotRequired[MicrosoftEntraIDAuthenticationEndpoint]
327
+ oauth_endpoint: NotRequired[
328
+ OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint
329
+ ]
284
330
  r"""Endpoint used to acquire authentication tokens from Azure"""
285
331
  oauth_type: NotRequired[OutputAzureDataExplorerAuthenticationMethod]
286
332
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
@@ -290,14 +336,56 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
290
336
  text_secret: NotRequired[str]
291
337
  r"""Select or create a stored text secret"""
292
338
  certificate: NotRequired[OutputAzureDataExplorerCertificateTypedDict]
339
+ format_: NotRequired[OutputAzureDataExplorerDataFormat]
340
+ r"""Format of the output data"""
341
+ compress: NotRequired[OutputAzureDataExplorerCompressCompression]
342
+ r"""Data compression format to apply to HTTP content before it is delivered"""
343
+ compression_level: NotRequired[OutputAzureDataExplorerCompressionLevel]
344
+ r"""Compression level to apply before moving files to final destination"""
345
+ automatic_schema: NotRequired[bool]
346
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
347
+ parquet_schema: NotRequired[str]
348
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
349
+ parquet_version: NotRequired[OutputAzureDataExplorerParquetVersion]
350
+ r"""Determines which data types are supported and how they are represented"""
351
+ parquet_data_page_version: NotRequired[OutputAzureDataExplorerDataPageVersion]
352
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
353
+ parquet_row_group_length: NotRequired[float]
354
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
355
+ parquet_page_size: NotRequired[str]
356
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
357
+ should_log_invalid_rows: NotRequired[bool]
358
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
359
+ key_value_metadata: NotRequired[
360
+ List[OutputAzureDataExplorerKeyValueMetadatumTypedDict]
361
+ ]
362
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
363
+ enable_statistics: NotRequired[bool]
364
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
365
+ enable_write_page_index: NotRequired[bool]
366
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
367
+ enable_page_checksum: NotRequired[bool]
368
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
369
+ remove_empty_dirs: NotRequired[bool]
370
+ r"""Remove empty staging directories after moving files"""
371
+ empty_dir_cleanup_sec: NotRequired[float]
372
+ r"""How frequently, in seconds, to clean up empty directories"""
373
+ deadletter_enabled: NotRequired[bool]
374
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
375
+ deadletter_path: NotRequired[str]
376
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
377
+ max_retry_num: NotRequired[float]
378
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
379
+ is_mapping_obj: NotRequired[bool]
380
+ r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
381
+ mapping_obj: NotRequired[str]
382
+ r"""Enter a JSON object that defines your desired data mapping"""
383
+ mapping_ref: NotRequired[str]
384
+ r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
293
385
  ingest_url: NotRequired[str]
294
386
  r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
295
387
  on_backpressure: NotRequired[OutputAzureDataExplorerBackpressureBehavior]
296
388
  r"""How to handle events when all receivers are exerting backpressure"""
297
- is_mapping_obj: NotRequired[bool]
298
- r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
299
- format_: NotRequired[OutputAzureDataExplorerDataFormat]
300
- r"""Format of the output data"""
301
389
  stage_path: NotRequired[str]
302
390
  r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant and stable storage."""
303
391
  file_name_suffix: NotRequired[str]
@@ -316,10 +404,6 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
316
404
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
317
405
  add_id_to_stage_path: NotRequired[bool]
318
406
  r"""Add the Output ID value to staging location"""
319
- remove_empty_dirs: NotRequired[bool]
320
- r"""Remove empty staging directories after moving files"""
321
- deadletter_enabled: NotRequired[bool]
322
- r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
323
407
  timeout_sec: NotRequired[float]
324
408
  r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
325
409
  flush_immediately: NotRequired[bool]
@@ -345,10 +429,6 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
345
429
  ]
346
430
  response_honor_retry_after_header: NotRequired[bool]
347
431
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
348
- compress: NotRequired[OutputAzureDataExplorerCompressCompression]
349
- r"""Data compression format to apply to HTTP content before it is delivered"""
350
- mapping_ref: NotRequired[str]
351
- r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
352
432
  concurrency: NotRequired[float]
353
433
  r"""Maximum number of ongoing requests before blocking"""
354
434
  max_payload_size_kb: NotRequired[float]
@@ -366,6 +446,16 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
366
446
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
367
447
  keep_alive: NotRequired[bool]
368
448
  r"""Disable to close the connection immediately after sending the outgoing request"""
449
+ pq_strict_ordering: NotRequired[bool]
450
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
451
+ pq_rate_per_sec: NotRequired[float]
452
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
453
+ pq_mode: NotRequired[OutputAzureDataExplorerMode]
454
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
455
+ pq_max_buffer_size: NotRequired[float]
456
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
457
+ pq_max_backpressure_sec: NotRequired[float]
458
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
369
459
  pq_max_file_size: NotRequired[str]
370
460
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
371
461
  pq_max_size: NotRequired[str]
@@ -376,11 +466,7 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
376
466
  r"""Codec to use to compress the persisted data"""
377
467
  pq_on_backpressure: NotRequired[OutputAzureDataExplorerQueueFullBehavior]
378
468
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
379
- pq_mode: NotRequired[OutputAzureDataExplorerMode]
380
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
381
469
  pq_controls: NotRequired[OutputAzureDataExplorerPqControlsTypedDict]
382
- empty_dir_cleanup_sec: NotRequired[float]
383
- r"""How frequently, in seconds, to clean up empty directories"""
384
470
 
385
471
 
386
472
  class OutputAzureDataExplorer(BaseModel):
@@ -433,11 +519,11 @@ class OutputAzureDataExplorer(BaseModel):
433
519
 
434
520
  oauth_endpoint: Annotated[
435
521
  Annotated[
436
- Optional[MicrosoftEntraIDAuthenticationEndpoint],
522
+ Optional[OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint],
437
523
  PlainValidator(validate_open_enum(False)),
438
524
  ],
439
525
  pydantic.Field(alias="oauthEndpoint"),
440
- ] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
526
+ ] = OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
441
527
  r"""Endpoint used to acquire authentication tokens from Azure"""
442
528
 
443
529
  oauth_type: Annotated[
@@ -459,31 +545,139 @@ class OutputAzureDataExplorer(BaseModel):
459
545
 
460
546
  certificate: Optional[OutputAzureDataExplorerCertificate] = None
461
547
 
462
- ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
463
- r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
548
+ format_: Annotated[
549
+ Annotated[
550
+ Optional[OutputAzureDataExplorerDataFormat],
551
+ PlainValidator(validate_open_enum(False)),
552
+ ],
553
+ pydantic.Field(alias="format"),
554
+ ] = OutputAzureDataExplorerDataFormat.JSON
555
+ r"""Format of the output data"""
464
556
 
465
- on_backpressure: Annotated[
557
+ compress: Annotated[
558
+ Optional[OutputAzureDataExplorerCompressCompression],
559
+ PlainValidator(validate_open_enum(False)),
560
+ ] = OutputAzureDataExplorerCompressCompression.GZIP
561
+ r"""Data compression format to apply to HTTP content before it is delivered"""
562
+
563
+ compression_level: Annotated[
466
564
  Annotated[
467
- Optional[OutputAzureDataExplorerBackpressureBehavior],
565
+ Optional[OutputAzureDataExplorerCompressionLevel],
468
566
  PlainValidator(validate_open_enum(False)),
469
567
  ],
470
- pydantic.Field(alias="onBackpressure"),
471
- ] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
472
- r"""How to handle events when all receivers are exerting backpressure"""
568
+ pydantic.Field(alias="compressionLevel"),
569
+ ] = OutputAzureDataExplorerCompressionLevel.BEST_SPEED
570
+ r"""Compression level to apply before moving files to final destination"""
571
+
572
+ automatic_schema: Annotated[
573
+ Optional[bool], pydantic.Field(alias="automaticSchema")
574
+ ] = False
575
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
576
+
577
+ parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
578
+ None
579
+ )
580
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
581
+
582
+ parquet_version: Annotated[
583
+ Annotated[
584
+ Optional[OutputAzureDataExplorerParquetVersion],
585
+ PlainValidator(validate_open_enum(False)),
586
+ ],
587
+ pydantic.Field(alias="parquetVersion"),
588
+ ] = OutputAzureDataExplorerParquetVersion.PARQUET_2_6
589
+ r"""Determines which data types are supported and how they are represented"""
590
+
591
+ parquet_data_page_version: Annotated[
592
+ Annotated[
593
+ Optional[OutputAzureDataExplorerDataPageVersion],
594
+ PlainValidator(validate_open_enum(False)),
595
+ ],
596
+ pydantic.Field(alias="parquetDataPageVersion"),
597
+ ] = OutputAzureDataExplorerDataPageVersion.DATA_PAGE_V2
598
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
599
+
600
+ parquet_row_group_length: Annotated[
601
+ Optional[float], pydantic.Field(alias="parquetRowGroupLength")
602
+ ] = 10000
603
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
604
+
605
+ parquet_page_size: Annotated[
606
+ Optional[str], pydantic.Field(alias="parquetPageSize")
607
+ ] = "1MB"
608
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
609
+
610
+ should_log_invalid_rows: Annotated[
611
+ Optional[bool], pydantic.Field(alias="shouldLogInvalidRows")
612
+ ] = None
613
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
614
+
615
+ key_value_metadata: Annotated[
616
+ Optional[List[OutputAzureDataExplorerKeyValueMetadatum]],
617
+ pydantic.Field(alias="keyValueMetadata"),
618
+ ] = None
619
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
620
+
621
+ enable_statistics: Annotated[
622
+ Optional[bool], pydantic.Field(alias="enableStatistics")
623
+ ] = True
624
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
625
+
626
+ enable_write_page_index: Annotated[
627
+ Optional[bool], pydantic.Field(alias="enableWritePageIndex")
628
+ ] = True
629
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
630
+
631
+ enable_page_checksum: Annotated[
632
+ Optional[bool], pydantic.Field(alias="enablePageChecksum")
633
+ ] = False
634
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
635
+
636
+ remove_empty_dirs: Annotated[
637
+ Optional[bool], pydantic.Field(alias="removeEmptyDirs")
638
+ ] = True
639
+ r"""Remove empty staging directories after moving files"""
640
+
641
+ empty_dir_cleanup_sec: Annotated[
642
+ Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
643
+ ] = 300
644
+ r"""How frequently, in seconds, to clean up empty directories"""
645
+
646
+ deadletter_enabled: Annotated[
647
+ Optional[bool], pydantic.Field(alias="deadletterEnabled")
648
+ ] = False
649
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
650
+
651
+ deadletter_path: Annotated[
652
+ Optional[str], pydantic.Field(alias="deadletterPath")
653
+ ] = "$CRIBL_HOME/state/outputs/dead-letter"
654
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
655
+
656
+ max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
657
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
473
658
 
474
659
  is_mapping_obj: Annotated[Optional[bool], pydantic.Field(alias="isMappingObj")] = (
475
660
  False
476
661
  )
477
662
  r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
478
663
 
479
- format_: Annotated[
664
+ mapping_obj: Annotated[Optional[str], pydantic.Field(alias="mappingObj")] = None
665
+ r"""Enter a JSON object that defines your desired data mapping"""
666
+
667
+ mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
668
+ r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
669
+
670
+ ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
671
+ r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
672
+
673
+ on_backpressure: Annotated[
480
674
  Annotated[
481
- Optional[OutputAzureDataExplorerDataFormat],
675
+ Optional[OutputAzureDataExplorerBackpressureBehavior],
482
676
  PlainValidator(validate_open_enum(False)),
483
677
  ],
484
- pydantic.Field(alias="format"),
485
- ] = OutputAzureDataExplorerDataFormat.JSON
486
- r"""Format of the output data"""
678
+ pydantic.Field(alias="onBackpressure"),
679
+ ] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
680
+ r"""How to handle events when all receivers are exerting backpressure"""
487
681
 
488
682
  stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
489
683
  "$CRIBL_HOME/state/outputs/staging"
@@ -534,16 +728,6 @@ class OutputAzureDataExplorer(BaseModel):
534
728
  ] = True
535
729
  r"""Add the Output ID value to staging location"""
536
730
 
537
- remove_empty_dirs: Annotated[
538
- Optional[bool], pydantic.Field(alias="removeEmptyDirs")
539
- ] = True
540
- r"""Remove empty staging directories after moving files"""
541
-
542
- deadletter_enabled: Annotated[
543
- Optional[bool], pydantic.Field(alias="deadletterEnabled")
544
- ] = False
545
- r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
546
-
547
731
  timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 30
548
732
  r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
549
733
 
@@ -600,15 +784,6 @@ class OutputAzureDataExplorer(BaseModel):
600
784
  ] = True
601
785
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
602
786
 
603
- compress: Annotated[
604
- Optional[OutputAzureDataExplorerCompressCompression],
605
- PlainValidator(validate_open_enum(False)),
606
- ] = OutputAzureDataExplorerCompressCompression.GZIP
607
- r"""Data compression format to apply to HTTP content before it is delivered"""
608
-
609
- mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
610
- r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
611
-
612
787
  concurrency: Optional[float] = 5
613
788
  r"""Maximum number of ongoing requests before blocking"""
614
789
 
@@ -643,6 +818,35 @@ class OutputAzureDataExplorer(BaseModel):
643
818
  keep_alive: Annotated[Optional[bool], pydantic.Field(alias="keepAlive")] = True
644
819
  r"""Disable to close the connection immediately after sending the outgoing request"""
645
820
 
821
+ pq_strict_ordering: Annotated[
822
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
823
+ ] = True
824
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
825
+
826
+ pq_rate_per_sec: Annotated[
827
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
828
+ ] = 0
829
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
830
+
831
+ pq_mode: Annotated[
832
+ Annotated[
833
+ Optional[OutputAzureDataExplorerMode],
834
+ PlainValidator(validate_open_enum(False)),
835
+ ],
836
+ pydantic.Field(alias="pqMode"),
837
+ ] = OutputAzureDataExplorerMode.ERROR
838
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
839
+
840
+ pq_max_buffer_size: Annotated[
841
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
842
+ ] = 42
843
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
844
+
845
+ pq_max_backpressure_sec: Annotated[
846
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
847
+ ] = 30
848
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
849
+
646
850
  pq_max_file_size: Annotated[
647
851
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
648
852
  ] = "1 MB"
@@ -674,20 +878,6 @@ class OutputAzureDataExplorer(BaseModel):
674
878
  ] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
675
879
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
676
880
 
677
- pq_mode: Annotated[
678
- Annotated[
679
- Optional[OutputAzureDataExplorerMode],
680
- PlainValidator(validate_open_enum(False)),
681
- ],
682
- pydantic.Field(alias="pqMode"),
683
- ] = OutputAzureDataExplorerMode.ERROR
684
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
685
-
686
881
  pq_controls: Annotated[
687
882
  Optional[OutputAzureDataExplorerPqControls], pydantic.Field(alias="pqControls")
688
883
  ] = None
689
-
690
- empty_dir_cleanup_sec: Annotated[
691
- Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
692
- ] = 300
693
- r"""How frequently, in seconds, to clean up empty directories"""