cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -22,7 +23,9 @@ class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
22
23
  STREAMING = "streaming"
23
24
 
24
25
 
25
- class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnumMeta):
26
+ class OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint(
27
+ str, Enum, metaclass=utils.OpenEnumMeta
28
+ ):
26
29
  r"""Endpoint used to acquire authentication tokens from Azure"""
27
30
 
28
31
  HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
@@ -55,6 +58,68 @@ class OutputAzureDataExplorerCertificate(BaseModel):
55
58
  r"""The certificate you registered as credentials for your app in the Azure portal"""
56
59
 
57
60
 
61
+ class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
62
+ r"""Format of the output data"""
63
+
64
+ # JSON
65
+ JSON = "json"
66
+ # Raw
67
+ RAW = "raw"
68
+ # Parquet
69
+ PARQUET = "parquet"
70
+
71
+
72
+ class OutputAzureDataExplorerCompressCompression(
73
+ str, Enum, metaclass=utils.OpenEnumMeta
74
+ ):
75
+ r"""Data compression format to apply to HTTP content before it is delivered"""
76
+
77
+ NONE = "none"
78
+ GZIP = "gzip"
79
+
80
+
81
+ class OutputAzureDataExplorerCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
82
+ r"""Compression level to apply before moving files to final destination"""
83
+
84
+ # Best Speed
85
+ BEST_SPEED = "best_speed"
86
+ # Normal
87
+ NORMAL = "normal"
88
+ # Best Compression
89
+ BEST_COMPRESSION = "best_compression"
90
+
91
+
92
+ class OutputAzureDataExplorerParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
93
+ r"""Determines which data types are supported and how they are represented"""
94
+
95
+ # 1.0
96
+ PARQUET_1_0 = "PARQUET_1_0"
97
+ # 2.4
98
+ PARQUET_2_4 = "PARQUET_2_4"
99
+ # 2.6
100
+ PARQUET_2_6 = "PARQUET_2_6"
101
+
102
+
103
+ class OutputAzureDataExplorerDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
104
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
105
+
106
+ # V1
107
+ DATA_PAGE_V1 = "DATA_PAGE_V1"
108
+ # V2
109
+ DATA_PAGE_V2 = "DATA_PAGE_V2"
110
+
111
+
112
+ class OutputAzureDataExplorerKeyValueMetadatumTypedDict(TypedDict):
113
+ value: str
114
+ key: NotRequired[str]
115
+
116
+
117
+ class OutputAzureDataExplorerKeyValueMetadatum(BaseModel):
118
+ value: str
119
+
120
+ key: Optional[str] = ""
121
+
122
+
58
123
  class OutputAzureDataExplorerBackpressureBehavior(
59
124
  str, Enum, metaclass=utils.OpenEnumMeta
60
125
  ):
@@ -68,17 +133,6 @@ class OutputAzureDataExplorerBackpressureBehavior(
68
133
  QUEUE = "queue"
69
134
 
70
135
 
71
- class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
72
- r"""Format of the output data"""
73
-
74
- # JSON
75
- JSON = "json"
76
- # Raw
77
- RAW = "raw"
78
- # Parquet
79
- PARQUET = "parquet"
80
-
81
-
82
136
  class OutputAzureDataExplorerDiskSpaceProtection(
83
137
  str, Enum, metaclass=utils.OpenEnumMeta
84
138
  ):
@@ -109,6 +163,15 @@ class ExtentTag(BaseModel):
109
163
  Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
110
164
  ] = None
111
165
 
166
+ @field_serializer("prefix")
167
+ def serialize_prefix(self, value):
168
+ if isinstance(value, str):
169
+ try:
170
+ return models.PrefixOptional(value)
171
+ except ValueError:
172
+ return value
173
+ return value
174
+
112
175
 
113
176
  class IngestIfNotExistTypedDict(TypedDict):
114
177
  value: str
@@ -205,13 +268,15 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
205
268
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
206
269
 
207
270
 
208
- class OutputAzureDataExplorerCompressCompression(
209
- str, Enum, metaclass=utils.OpenEnumMeta
210
- ):
211
- r"""Data compression format to apply to HTTP content before it is delivered"""
271
+ class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
272
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
212
273
 
213
- NONE = "none"
214
- GZIP = "gzip"
274
+ # Error
275
+ ERROR = "error"
276
+ # Backpressure
277
+ ALWAYS = "always"
278
+ # Always On
279
+ BACKPRESSURE = "backpressure"
215
280
 
216
281
 
217
282
  class OutputAzureDataExplorerPqCompressCompression(
@@ -234,17 +299,6 @@ class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEn
234
299
  DROP = "drop"
235
300
 
236
301
 
237
- class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
238
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
239
-
240
- # Error
241
- ERROR = "error"
242
- # Backpressure
243
- BACKPRESSURE = "backpressure"
244
- # Always On
245
- ALWAYS = "always"
246
-
247
-
248
302
  class OutputAzureDataExplorerPqControlsTypedDict(TypedDict):
249
303
  pass
250
304
 
@@ -280,7 +334,9 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
280
334
  validate_database_settings: NotRequired[bool]
281
335
  r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
282
336
  ingest_mode: NotRequired[IngestionMode]
283
- oauth_endpoint: NotRequired[MicrosoftEntraIDAuthenticationEndpoint]
337
+ oauth_endpoint: NotRequired[
338
+ OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint
339
+ ]
284
340
  r"""Endpoint used to acquire authentication tokens from Azure"""
285
341
  oauth_type: NotRequired[OutputAzureDataExplorerAuthenticationMethod]
286
342
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
@@ -290,14 +346,56 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
290
346
  text_secret: NotRequired[str]
291
347
  r"""Select or create a stored text secret"""
292
348
  certificate: NotRequired[OutputAzureDataExplorerCertificateTypedDict]
349
+ format_: NotRequired[OutputAzureDataExplorerDataFormat]
350
+ r"""Format of the output data"""
351
+ compress: NotRequired[OutputAzureDataExplorerCompressCompression]
352
+ r"""Data compression format to apply to HTTP content before it is delivered"""
353
+ compression_level: NotRequired[OutputAzureDataExplorerCompressionLevel]
354
+ r"""Compression level to apply before moving files to final destination"""
355
+ automatic_schema: NotRequired[bool]
356
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
357
+ parquet_schema: NotRequired[str]
358
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
359
+ parquet_version: NotRequired[OutputAzureDataExplorerParquetVersion]
360
+ r"""Determines which data types are supported and how they are represented"""
361
+ parquet_data_page_version: NotRequired[OutputAzureDataExplorerDataPageVersion]
362
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
363
+ parquet_row_group_length: NotRequired[float]
364
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
365
+ parquet_page_size: NotRequired[str]
366
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
367
+ should_log_invalid_rows: NotRequired[bool]
368
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
369
+ key_value_metadata: NotRequired[
370
+ List[OutputAzureDataExplorerKeyValueMetadatumTypedDict]
371
+ ]
372
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
373
+ enable_statistics: NotRequired[bool]
374
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
375
+ enable_write_page_index: NotRequired[bool]
376
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
377
+ enable_page_checksum: NotRequired[bool]
378
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
379
+ remove_empty_dirs: NotRequired[bool]
380
+ r"""Remove empty staging directories after moving files"""
381
+ empty_dir_cleanup_sec: NotRequired[float]
382
+ r"""How frequently, in seconds, to clean up empty directories"""
383
+ deadletter_enabled: NotRequired[bool]
384
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
385
+ deadletter_path: NotRequired[str]
386
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
387
+ max_retry_num: NotRequired[float]
388
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
389
+ is_mapping_obj: NotRequired[bool]
390
+ r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
391
+ mapping_obj: NotRequired[str]
392
+ r"""Enter a JSON object that defines your desired data mapping"""
393
+ mapping_ref: NotRequired[str]
394
+ r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
293
395
  ingest_url: NotRequired[str]
294
396
  r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
295
397
  on_backpressure: NotRequired[OutputAzureDataExplorerBackpressureBehavior]
296
398
  r"""How to handle events when all receivers are exerting backpressure"""
297
- is_mapping_obj: NotRequired[bool]
298
- r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
299
- format_: NotRequired[OutputAzureDataExplorerDataFormat]
300
- r"""Format of the output data"""
301
399
  stage_path: NotRequired[str]
302
400
  r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant and stable storage."""
303
401
  file_name_suffix: NotRequired[str]
@@ -316,10 +414,6 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
316
414
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
317
415
  add_id_to_stage_path: NotRequired[bool]
318
416
  r"""Add the Output ID value to staging location"""
319
- remove_empty_dirs: NotRequired[bool]
320
- r"""Remove empty staging directories after moving files"""
321
- deadletter_enabled: NotRequired[bool]
322
- r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
323
417
  timeout_sec: NotRequired[float]
324
418
  r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
325
419
  flush_immediately: NotRequired[bool]
@@ -345,10 +439,6 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
345
439
  ]
346
440
  response_honor_retry_after_header: NotRequired[bool]
347
441
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
348
- compress: NotRequired[OutputAzureDataExplorerCompressCompression]
349
- r"""Data compression format to apply to HTTP content before it is delivered"""
350
- mapping_ref: NotRequired[str]
351
- r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
352
442
  concurrency: NotRequired[float]
353
443
  r"""Maximum number of ongoing requests before blocking"""
354
444
  max_payload_size_kb: NotRequired[float]
@@ -366,6 +456,16 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
366
456
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
367
457
  keep_alive: NotRequired[bool]
368
458
  r"""Disable to close the connection immediately after sending the outgoing request"""
459
+ pq_strict_ordering: NotRequired[bool]
460
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
461
+ pq_rate_per_sec: NotRequired[float]
462
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
463
+ pq_mode: NotRequired[OutputAzureDataExplorerMode]
464
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
465
+ pq_max_buffer_size: NotRequired[float]
466
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
467
+ pq_max_backpressure_sec: NotRequired[float]
468
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
369
469
  pq_max_file_size: NotRequired[str]
370
470
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
371
471
  pq_max_size: NotRequired[str]
@@ -376,11 +476,7 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
376
476
  r"""Codec to use to compress the persisted data"""
377
477
  pq_on_backpressure: NotRequired[OutputAzureDataExplorerQueueFullBehavior]
378
478
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
379
- pq_mode: NotRequired[OutputAzureDataExplorerMode]
380
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
381
479
  pq_controls: NotRequired[OutputAzureDataExplorerPqControlsTypedDict]
382
- empty_dir_cleanup_sec: NotRequired[float]
383
- r"""How frequently, in seconds, to clean up empty directories"""
384
480
 
385
481
 
386
482
  class OutputAzureDataExplorer(BaseModel):
@@ -433,11 +529,11 @@ class OutputAzureDataExplorer(BaseModel):
433
529
 
434
530
  oauth_endpoint: Annotated[
435
531
  Annotated[
436
- Optional[MicrosoftEntraIDAuthenticationEndpoint],
532
+ Optional[OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint],
437
533
  PlainValidator(validate_open_enum(False)),
438
534
  ],
439
535
  pydantic.Field(alias="oauthEndpoint"),
440
- ] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
536
+ ] = OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
441
537
  r"""Endpoint used to acquire authentication tokens from Azure"""
442
538
 
443
539
  oauth_type: Annotated[
@@ -459,31 +555,139 @@ class OutputAzureDataExplorer(BaseModel):
459
555
 
460
556
  certificate: Optional[OutputAzureDataExplorerCertificate] = None
461
557
 
462
- ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
463
- r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
558
+ format_: Annotated[
559
+ Annotated[
560
+ Optional[OutputAzureDataExplorerDataFormat],
561
+ PlainValidator(validate_open_enum(False)),
562
+ ],
563
+ pydantic.Field(alias="format"),
564
+ ] = OutputAzureDataExplorerDataFormat.JSON
565
+ r"""Format of the output data"""
464
566
 
465
- on_backpressure: Annotated[
567
+ compress: Annotated[
568
+ Optional[OutputAzureDataExplorerCompressCompression],
569
+ PlainValidator(validate_open_enum(False)),
570
+ ] = OutputAzureDataExplorerCompressCompression.GZIP
571
+ r"""Data compression format to apply to HTTP content before it is delivered"""
572
+
573
+ compression_level: Annotated[
466
574
  Annotated[
467
- Optional[OutputAzureDataExplorerBackpressureBehavior],
575
+ Optional[OutputAzureDataExplorerCompressionLevel],
468
576
  PlainValidator(validate_open_enum(False)),
469
577
  ],
470
- pydantic.Field(alias="onBackpressure"),
471
- ] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
472
- r"""How to handle events when all receivers are exerting backpressure"""
578
+ pydantic.Field(alias="compressionLevel"),
579
+ ] = OutputAzureDataExplorerCompressionLevel.BEST_SPEED
580
+ r"""Compression level to apply before moving files to final destination"""
581
+
582
+ automatic_schema: Annotated[
583
+ Optional[bool], pydantic.Field(alias="automaticSchema")
584
+ ] = False
585
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
586
+
587
+ parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
588
+ None
589
+ )
590
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
591
+
592
+ parquet_version: Annotated[
593
+ Annotated[
594
+ Optional[OutputAzureDataExplorerParquetVersion],
595
+ PlainValidator(validate_open_enum(False)),
596
+ ],
597
+ pydantic.Field(alias="parquetVersion"),
598
+ ] = OutputAzureDataExplorerParquetVersion.PARQUET_2_6
599
+ r"""Determines which data types are supported and how they are represented"""
600
+
601
+ parquet_data_page_version: Annotated[
602
+ Annotated[
603
+ Optional[OutputAzureDataExplorerDataPageVersion],
604
+ PlainValidator(validate_open_enum(False)),
605
+ ],
606
+ pydantic.Field(alias="parquetDataPageVersion"),
607
+ ] = OutputAzureDataExplorerDataPageVersion.DATA_PAGE_V2
608
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
609
+
610
+ parquet_row_group_length: Annotated[
611
+ Optional[float], pydantic.Field(alias="parquetRowGroupLength")
612
+ ] = 10000
613
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
614
+
615
+ parquet_page_size: Annotated[
616
+ Optional[str], pydantic.Field(alias="parquetPageSize")
617
+ ] = "1MB"
618
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
619
+
620
+ should_log_invalid_rows: Annotated[
621
+ Optional[bool], pydantic.Field(alias="shouldLogInvalidRows")
622
+ ] = None
623
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
624
+
625
+ key_value_metadata: Annotated[
626
+ Optional[List[OutputAzureDataExplorerKeyValueMetadatum]],
627
+ pydantic.Field(alias="keyValueMetadata"),
628
+ ] = None
629
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
630
+
631
+ enable_statistics: Annotated[
632
+ Optional[bool], pydantic.Field(alias="enableStatistics")
633
+ ] = True
634
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
635
+
636
+ enable_write_page_index: Annotated[
637
+ Optional[bool], pydantic.Field(alias="enableWritePageIndex")
638
+ ] = True
639
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
640
+
641
+ enable_page_checksum: Annotated[
642
+ Optional[bool], pydantic.Field(alias="enablePageChecksum")
643
+ ] = False
644
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
645
+
646
+ remove_empty_dirs: Annotated[
647
+ Optional[bool], pydantic.Field(alias="removeEmptyDirs")
648
+ ] = True
649
+ r"""Remove empty staging directories after moving files"""
650
+
651
+ empty_dir_cleanup_sec: Annotated[
652
+ Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
653
+ ] = 300
654
+ r"""How frequently, in seconds, to clean up empty directories"""
655
+
656
+ deadletter_enabled: Annotated[
657
+ Optional[bool], pydantic.Field(alias="deadletterEnabled")
658
+ ] = False
659
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
660
+
661
+ deadletter_path: Annotated[
662
+ Optional[str], pydantic.Field(alias="deadletterPath")
663
+ ] = "$CRIBL_HOME/state/outputs/dead-letter"
664
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
665
+
666
+ max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
667
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
473
668
 
474
669
  is_mapping_obj: Annotated[Optional[bool], pydantic.Field(alias="isMappingObj")] = (
475
670
  False
476
671
  )
477
672
  r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
478
673
 
479
- format_: Annotated[
674
+ mapping_obj: Annotated[Optional[str], pydantic.Field(alias="mappingObj")] = None
675
+ r"""Enter a JSON object that defines your desired data mapping"""
676
+
677
+ mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
678
+ r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
679
+
680
+ ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
681
+ r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
682
+
683
+ on_backpressure: Annotated[
480
684
  Annotated[
481
- Optional[OutputAzureDataExplorerDataFormat],
685
+ Optional[OutputAzureDataExplorerBackpressureBehavior],
482
686
  PlainValidator(validate_open_enum(False)),
483
687
  ],
484
- pydantic.Field(alias="format"),
485
- ] = OutputAzureDataExplorerDataFormat.JSON
486
- r"""Format of the output data"""
688
+ pydantic.Field(alias="onBackpressure"),
689
+ ] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
690
+ r"""How to handle events when all receivers are exerting backpressure"""
487
691
 
488
692
  stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
489
693
  "$CRIBL_HOME/state/outputs/staging"
@@ -534,16 +738,6 @@ class OutputAzureDataExplorer(BaseModel):
534
738
  ] = True
535
739
  r"""Add the Output ID value to staging location"""
536
740
 
537
- remove_empty_dirs: Annotated[
538
- Optional[bool], pydantic.Field(alias="removeEmptyDirs")
539
- ] = True
540
- r"""Remove empty staging directories after moving files"""
541
-
542
- deadletter_enabled: Annotated[
543
- Optional[bool], pydantic.Field(alias="deadletterEnabled")
544
- ] = False
545
- r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
546
-
547
741
  timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 30
548
742
  r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
549
743
 
@@ -600,15 +794,6 @@ class OutputAzureDataExplorer(BaseModel):
600
794
  ] = True
601
795
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
602
796
 
603
- compress: Annotated[
604
- Optional[OutputAzureDataExplorerCompressCompression],
605
- PlainValidator(validate_open_enum(False)),
606
- ] = OutputAzureDataExplorerCompressCompression.GZIP
607
- r"""Data compression format to apply to HTTP content before it is delivered"""
608
-
609
- mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
610
- r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
611
-
612
797
  concurrency: Optional[float] = 5
613
798
  r"""Maximum number of ongoing requests before blocking"""
614
799
 
@@ -643,6 +828,35 @@ class OutputAzureDataExplorer(BaseModel):
643
828
  keep_alive: Annotated[Optional[bool], pydantic.Field(alias="keepAlive")] = True
644
829
  r"""Disable to close the connection immediately after sending the outgoing request"""
645
830
 
831
+ pq_strict_ordering: Annotated[
832
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
833
+ ] = True
834
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
835
+
836
+ pq_rate_per_sec: Annotated[
837
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
838
+ ] = 0
839
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
840
+
841
+ pq_mode: Annotated[
842
+ Annotated[
843
+ Optional[OutputAzureDataExplorerMode],
844
+ PlainValidator(validate_open_enum(False)),
845
+ ],
846
+ pydantic.Field(alias="pqMode"),
847
+ ] = OutputAzureDataExplorerMode.ERROR
848
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
849
+
850
+ pq_max_buffer_size: Annotated[
851
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
852
+ ] = 42
853
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
854
+
855
+ pq_max_backpressure_sec: Annotated[
856
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
857
+ ] = 30
858
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
859
+
646
860
  pq_max_file_size: Annotated[
647
861
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
648
862
  ] = "1 MB"
@@ -674,20 +888,143 @@ class OutputAzureDataExplorer(BaseModel):
674
888
  ] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
675
889
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
676
890
 
677
- pq_mode: Annotated[
678
- Annotated[
679
- Optional[OutputAzureDataExplorerMode],
680
- PlainValidator(validate_open_enum(False)),
681
- ],
682
- pydantic.Field(alias="pqMode"),
683
- ] = OutputAzureDataExplorerMode.ERROR
684
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
685
-
686
891
  pq_controls: Annotated[
687
892
  Optional[OutputAzureDataExplorerPqControls], pydantic.Field(alias="pqControls")
688
893
  ] = None
689
894
 
690
- empty_dir_cleanup_sec: Annotated[
691
- Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
692
- ] = 300
693
- r"""How frequently, in seconds, to clean up empty directories"""
895
+ @field_serializer("ingest_mode")
896
+ def serialize_ingest_mode(self, value):
897
+ if isinstance(value, str):
898
+ try:
899
+ return models.IngestionMode(value)
900
+ except ValueError:
901
+ return value
902
+ return value
903
+
904
+ @field_serializer("oauth_endpoint")
905
+ def serialize_oauth_endpoint(self, value):
906
+ if isinstance(value, str):
907
+ try:
908
+ return models.OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint(
909
+ value
910
+ )
911
+ except ValueError:
912
+ return value
913
+ return value
914
+
915
+ @field_serializer("oauth_type")
916
+ def serialize_oauth_type(self, value):
917
+ if isinstance(value, str):
918
+ try:
919
+ return models.OutputAzureDataExplorerAuthenticationMethod(value)
920
+ except ValueError:
921
+ return value
922
+ return value
923
+
924
+ @field_serializer("format_")
925
+ def serialize_format_(self, value):
926
+ if isinstance(value, str):
927
+ try:
928
+ return models.OutputAzureDataExplorerDataFormat(value)
929
+ except ValueError:
930
+ return value
931
+ return value
932
+
933
+ @field_serializer("compress")
934
+ def serialize_compress(self, value):
935
+ if isinstance(value, str):
936
+ try:
937
+ return models.OutputAzureDataExplorerCompressCompression(value)
938
+ except ValueError:
939
+ return value
940
+ return value
941
+
942
+ @field_serializer("compression_level")
943
+ def serialize_compression_level(self, value):
944
+ if isinstance(value, str):
945
+ try:
946
+ return models.OutputAzureDataExplorerCompressionLevel(value)
947
+ except ValueError:
948
+ return value
949
+ return value
950
+
951
+ @field_serializer("parquet_version")
952
+ def serialize_parquet_version(self, value):
953
+ if isinstance(value, str):
954
+ try:
955
+ return models.OutputAzureDataExplorerParquetVersion(value)
956
+ except ValueError:
957
+ return value
958
+ return value
959
+
960
+ @field_serializer("parquet_data_page_version")
961
+ def serialize_parquet_data_page_version(self, value):
962
+ if isinstance(value, str):
963
+ try:
964
+ return models.OutputAzureDataExplorerDataPageVersion(value)
965
+ except ValueError:
966
+ return value
967
+ return value
968
+
969
+ @field_serializer("on_backpressure")
970
+ def serialize_on_backpressure(self, value):
971
+ if isinstance(value, str):
972
+ try:
973
+ return models.OutputAzureDataExplorerBackpressureBehavior(value)
974
+ except ValueError:
975
+ return value
976
+ return value
977
+
978
+ @field_serializer("on_disk_full_backpressure")
979
+ def serialize_on_disk_full_backpressure(self, value):
980
+ if isinstance(value, str):
981
+ try:
982
+ return models.OutputAzureDataExplorerDiskSpaceProtection(value)
983
+ except ValueError:
984
+ return value
985
+ return value
986
+
987
+ @field_serializer("report_level")
988
+ def serialize_report_level(self, value):
989
+ if isinstance(value, str):
990
+ try:
991
+ return models.ReportLevel(value)
992
+ except ValueError:
993
+ return value
994
+ return value
995
+
996
+ @field_serializer("report_method")
997
+ def serialize_report_method(self, value):
998
+ if isinstance(value, str):
999
+ try:
1000
+ return models.ReportMethod(value)
1001
+ except ValueError:
1002
+ return value
1003
+ return value
1004
+
1005
+ @field_serializer("pq_mode")
1006
+ def serialize_pq_mode(self, value):
1007
+ if isinstance(value, str):
1008
+ try:
1009
+ return models.OutputAzureDataExplorerMode(value)
1010
+ except ValueError:
1011
+ return value
1012
+ return value
1013
+
1014
+ @field_serializer("pq_compress")
1015
+ def serialize_pq_compress(self, value):
1016
+ if isinstance(value, str):
1017
+ try:
1018
+ return models.OutputAzureDataExplorerPqCompressCompression(value)
1019
+ except ValueError:
1020
+ return value
1021
+ return value
1022
+
1023
+ @field_serializer("pq_on_backpressure")
1024
+ def serialize_pq_on_backpressure(self, value):
1025
+ if isinstance(value, str):
1026
+ try:
1027
+ return models.OutputAzureDataExplorerQueueFullBehavior(value)
1028
+ except ValueError:
1029
+ return value
1030
+ return value