cribl-control-plane 0.2.1rc7__py3-none-any.whl → 0.3.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (179) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/errors/__init__.py +5 -8
  3. cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
  4. cribl_control_plane/groups_sdk.py +28 -52
  5. cribl_control_plane/health.py +16 -22
  6. cribl_control_plane/models/__init__.py +54 -217
  7. cribl_control_plane/models/appmode.py +14 -0
  8. cribl_control_plane/models/authtoken.py +1 -5
  9. cribl_control_plane/models/cacheconnection.py +0 -20
  10. cribl_control_plane/models/configgroup.py +7 -55
  11. cribl_control_plane/models/configgroupcloud.py +1 -11
  12. cribl_control_plane/models/createconfiggroupbyproductop.py +5 -17
  13. cribl_control_plane/models/createroutesappendbyidop.py +2 -2
  14. cribl_control_plane/models/createversionundoop.py +3 -3
  15. cribl_control_plane/models/cribllakedataset.py +1 -11
  16. cribl_control_plane/models/cribllakedatasetupdate.py +1 -11
  17. cribl_control_plane/models/datasetmetadata.py +1 -11
  18. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +0 -11
  19. cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
  20. cribl_control_plane/models/distributedsummary.py +0 -6
  21. cribl_control_plane/models/error.py +16 -0
  22. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +0 -20
  23. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +0 -20
  24. cribl_control_plane/models/getconfiggroupbyproductandidop.py +0 -11
  25. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +0 -11
  26. cribl_control_plane/models/gethealthinfoop.py +17 -0
  27. cribl_control_plane/models/getsummaryop.py +0 -11
  28. cribl_control_plane/models/hbcriblinfo.py +3 -24
  29. cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +8 -27
  30. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  31. cribl_control_plane/models/input.py +78 -80
  32. cribl_control_plane/models/inputappscope.py +17 -80
  33. cribl_control_plane/models/inputazureblob.py +1 -33
  34. cribl_control_plane/models/inputcollection.py +1 -24
  35. cribl_control_plane/models/inputconfluentcloud.py +18 -195
  36. cribl_control_plane/models/inputcribl.py +1 -24
  37. cribl_control_plane/models/inputcriblhttp.py +17 -62
  38. cribl_control_plane/models/inputcribllakehttp.py +17 -62
  39. cribl_control_plane/models/inputcriblmetrics.py +1 -24
  40. cribl_control_plane/models/inputcribltcp.py +17 -62
  41. cribl_control_plane/models/inputcrowdstrike.py +1 -54
  42. cribl_control_plane/models/inputdatadogagent.py +17 -62
  43. cribl_control_plane/models/inputdatagen.py +1 -24
  44. cribl_control_plane/models/inputedgeprometheus.py +34 -147
  45. cribl_control_plane/models/inputelastic.py +27 -119
  46. cribl_control_plane/models/inputeventhub.py +1 -182
  47. cribl_control_plane/models/inputexec.py +1 -33
  48. cribl_control_plane/models/inputfile.py +3 -42
  49. cribl_control_plane/models/inputfirehose.py +17 -62
  50. cribl_control_plane/models/inputgooglepubsub.py +1 -36
  51. cribl_control_plane/models/inputgrafana.py +32 -157
  52. cribl_control_plane/models/inputhttp.py +17 -62
  53. cribl_control_plane/models/inputhttpraw.py +17 -62
  54. cribl_control_plane/models/inputjournalfiles.py +1 -24
  55. cribl_control_plane/models/inputkafka.py +17 -189
  56. cribl_control_plane/models/inputkinesis.py +1 -80
  57. cribl_control_plane/models/inputkubeevents.py +1 -24
  58. cribl_control_plane/models/inputkubelogs.py +1 -33
  59. cribl_control_plane/models/inputkubemetrics.py +1 -33
  60. cribl_control_plane/models/inputloki.py +17 -71
  61. cribl_control_plane/models/inputmetrics.py +17 -62
  62. cribl_control_plane/models/inputmodeldriventelemetry.py +17 -62
  63. cribl_control_plane/models/inputmsk.py +18 -81
  64. cribl_control_plane/models/inputnetflow.py +1 -24
  65. cribl_control_plane/models/inputoffice365mgmt.py +1 -67
  66. cribl_control_plane/models/inputoffice365msgtrace.py +1 -67
  67. cribl_control_plane/models/inputoffice365service.py +1 -67
  68. cribl_control_plane/models/inputopentelemetry.py +16 -92
  69. cribl_control_plane/models/inputprometheus.py +34 -138
  70. cribl_control_plane/models/inputprometheusrw.py +17 -71
  71. cribl_control_plane/models/inputrawudp.py +1 -24
  72. cribl_control_plane/models/inputs3.py +1 -45
  73. cribl_control_plane/models/inputs3inventory.py +1 -54
  74. cribl_control_plane/models/inputsecuritylake.py +1 -54
  75. cribl_control_plane/models/inputsnmp.py +1 -40
  76. cribl_control_plane/models/inputsplunk.py +17 -85
  77. cribl_control_plane/models/inputsplunkhec.py +16 -70
  78. cribl_control_plane/models/inputsplunksearch.py +1 -63
  79. cribl_control_plane/models/inputsqs.py +1 -56
  80. cribl_control_plane/models/inputsyslog.py +32 -121
  81. cribl_control_plane/models/inputsystemmetrics.py +9 -142
  82. cribl_control_plane/models/inputsystemstate.py +1 -33
  83. cribl_control_plane/models/inputtcp.py +17 -81
  84. cribl_control_plane/models/inputtcpjson.py +17 -71
  85. cribl_control_plane/models/inputwef.py +1 -71
  86. cribl_control_plane/models/inputwindowsmetrics.py +9 -129
  87. cribl_control_plane/models/inputwineventlogs.py +1 -60
  88. cribl_control_plane/models/inputwiz.py +1 -45
  89. cribl_control_plane/models/inputwizwebhook.py +17 -62
  90. cribl_control_plane/models/inputzscalerhec.py +16 -70
  91. cribl_control_plane/models/jobinfo.py +1 -4
  92. cribl_control_plane/models/jobstatus.py +3 -34
  93. cribl_control_plane/models/listconfiggroupbyproductop.py +0 -11
  94. cribl_control_plane/models/logininfo.py +3 -3
  95. cribl_control_plane/models/masterworkerentry.py +1 -11
  96. cribl_control_plane/models/nodeprovidedinfo.py +1 -11
  97. cribl_control_plane/models/nodeupgradestatus.py +0 -38
  98. cribl_control_plane/models/output.py +88 -93
  99. cribl_control_plane/models/outputazureblob.py +1 -110
  100. cribl_control_plane/models/outputazuredataexplorer.py +87 -452
  101. cribl_control_plane/models/outputazureeventhub.py +19 -281
  102. cribl_control_plane/models/outputazurelogs.py +19 -115
  103. cribl_control_plane/models/outputchronicle.py +19 -115
  104. cribl_control_plane/models/outputclickhouse.py +19 -155
  105. cribl_control_plane/models/outputcloudwatch.py +19 -106
  106. cribl_control_plane/models/outputconfluentcloud.py +38 -311
  107. cribl_control_plane/models/outputcriblhttp.py +19 -135
  108. cribl_control_plane/models/outputcribllake.py +1 -97
  109. cribl_control_plane/models/outputcribltcp.py +19 -132
  110. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +20 -129
  111. cribl_control_plane/models/outputdatadog.py +19 -159
  112. cribl_control_plane/models/outputdataset.py +19 -143
  113. cribl_control_plane/models/outputdiskspool.py +1 -11
  114. cribl_control_plane/models/outputdls3.py +1 -152
  115. cribl_control_plane/models/outputdynatracehttp.py +19 -160
  116. cribl_control_plane/models/outputdynatraceotlp.py +19 -160
  117. cribl_control_plane/models/outputelastic.py +19 -163
  118. cribl_control_plane/models/outputelasticcloud.py +19 -140
  119. cribl_control_plane/models/outputexabeam.py +1 -61
  120. cribl_control_plane/models/outputfilesystem.py +1 -87
  121. cribl_control_plane/models/outputgooglechronicle.py +20 -166
  122. cribl_control_plane/models/outputgooglecloudlogging.py +20 -131
  123. cribl_control_plane/models/outputgooglecloudstorage.py +1 -136
  124. cribl_control_plane/models/outputgooglepubsub.py +19 -106
  125. cribl_control_plane/models/outputgrafanacloud.py +37 -288
  126. cribl_control_plane/models/outputgraphite.py +19 -105
  127. cribl_control_plane/models/outputhoneycomb.py +19 -115
  128. cribl_control_plane/models/outputhumiohec.py +19 -126
  129. cribl_control_plane/models/outputinfluxdb.py +19 -130
  130. cribl_control_plane/models/outputkafka.py +34 -302
  131. cribl_control_plane/models/outputkinesis.py +19 -133
  132. cribl_control_plane/models/outputloki.py +17 -129
  133. cribl_control_plane/models/outputminio.py +1 -145
  134. cribl_control_plane/models/outputmsk.py +34 -193
  135. cribl_control_plane/models/outputnewrelic.py +19 -136
  136. cribl_control_plane/models/outputnewrelicevents.py +20 -128
  137. cribl_control_plane/models/outputopentelemetry.py +19 -178
  138. cribl_control_plane/models/outputprometheus.py +19 -115
  139. cribl_control_plane/models/outputring.py +1 -31
  140. cribl_control_plane/models/outputs3.py +1 -152
  141. cribl_control_plane/models/outputsecuritylake.py +1 -114
  142. cribl_control_plane/models/outputsentinel.py +19 -135
  143. cribl_control_plane/models/outputsentineloneaisiem.py +20 -134
  144. cribl_control_plane/models/outputservicenow.py +19 -168
  145. cribl_control_plane/models/outputsignalfx.py +19 -115
  146. cribl_control_plane/models/outputsns.py +17 -113
  147. cribl_control_plane/models/outputsplunk.py +19 -153
  148. cribl_control_plane/models/outputsplunkhec.py +19 -208
  149. cribl_control_plane/models/outputsplunklb.py +19 -182
  150. cribl_control_plane/models/outputsqs.py +17 -124
  151. cribl_control_plane/models/outputstatsd.py +19 -105
  152. cribl_control_plane/models/outputstatsdext.py +19 -105
  153. cribl_control_plane/models/outputsumologic.py +19 -117
  154. cribl_control_plane/models/outputsyslog.py +96 -259
  155. cribl_control_plane/models/outputtcpjson.py +19 -141
  156. cribl_control_plane/models/outputwavefront.py +19 -115
  157. cribl_control_plane/models/outputwebhook.py +19 -161
  158. cribl_control_plane/models/outputxsiam.py +17 -113
  159. cribl_control_plane/models/packinfo.py +5 -8
  160. cribl_control_plane/models/packinstallinfo.py +5 -8
  161. cribl_control_plane/models/resourcepolicy.py +0 -11
  162. cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
  163. cribl_control_plane/models/routeconf.py +4 -3
  164. cribl_control_plane/models/runnablejobcollection.py +9 -72
  165. cribl_control_plane/models/runnablejobexecutor.py +9 -32
  166. cribl_control_plane/models/runnablejobscheduledsearch.py +9 -23
  167. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +0 -11
  168. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +0 -11
  169. cribl_control_plane/packs.py +7 -202
  170. cribl_control_plane/routes_sdk.py +6 -6
  171. cribl_control_plane/tokens.py +15 -23
  172. {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
  173. cribl_control_plane-0.3.0a1.dist-info/RECORD +330 -0
  174. cribl_control_plane/models/groupcreaterequest.py +0 -171
  175. cribl_control_plane/models/outpostnodeinfo.py +0 -16
  176. cribl_control_plane/models/outputdatabricks.py +0 -482
  177. cribl_control_plane/models/updatepacksop.py +0 -25
  178. cribl_control_plane-0.2.1rc7.dist-info/RECORD +0 -331
  179. {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
@@ -1,12 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import models, utils
4
+ from cribl_control_plane import utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
- from pydantic import field_serializer
10
9
  from pydantic.functional_validators import PlainValidator
11
10
  from typing import List, Optional
12
11
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -17,15 +16,11 @@ class OutputAzureDataExplorerType(str, Enum):
17
16
 
18
17
 
19
18
  class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
20
- # Batching
21
19
  BATCHING = "batching"
22
- # Streaming
23
20
  STREAMING = "streaming"
24
21
 
25
22
 
26
- class OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint(
27
- str, Enum, metaclass=utils.OpenEnumMeta
28
- ):
23
+ class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnumMeta):
29
24
  r"""Endpoint used to acquire authentication tokens from Azure"""
30
25
 
31
26
  HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
@@ -38,11 +33,8 @@ class OutputAzureDataExplorerAuthenticationMethod(
38
33
  ):
39
34
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
40
35
 
41
- # Client secret
42
36
  CLIENT_SECRET = "clientSecret"
43
- # Client secret (text secret)
44
37
  CLIENT_TEXT_SECRET = "clientTextSecret"
45
- # Certificate
46
38
  CERTIFICATE = "certificate"
47
39
 
48
40
 
@@ -58,96 +50,35 @@ class OutputAzureDataExplorerCertificate(BaseModel):
58
50
  r"""The certificate you registered as credentials for your app in the Azure portal"""
59
51
 
60
52
 
61
- class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
62
- r"""Format of the output data"""
63
-
64
- # JSON
65
- JSON = "json"
66
- # Raw
67
- RAW = "raw"
68
- # Parquet
69
- PARQUET = "parquet"
70
-
71
-
72
- class OutputAzureDataExplorerCompressCompression(
73
- str, Enum, metaclass=utils.OpenEnumMeta
74
- ):
75
- r"""Data compression format to apply to HTTP content before it is delivered"""
76
-
77
- NONE = "none"
78
- GZIP = "gzip"
79
-
80
-
81
- class OutputAzureDataExplorerCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
82
- r"""Compression level to apply before moving files to final destination"""
83
-
84
- # Best Speed
85
- BEST_SPEED = "best_speed"
86
- # Normal
87
- NORMAL = "normal"
88
- # Best Compression
89
- BEST_COMPRESSION = "best_compression"
90
-
91
-
92
- class OutputAzureDataExplorerParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
93
- r"""Determines which data types are supported and how they are represented"""
94
-
95
- # 1.0
96
- PARQUET_1_0 = "PARQUET_1_0"
97
- # 2.4
98
- PARQUET_2_4 = "PARQUET_2_4"
99
- # 2.6
100
- PARQUET_2_6 = "PARQUET_2_6"
101
-
102
-
103
- class OutputAzureDataExplorerDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
104
- r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
105
-
106
- # V1
107
- DATA_PAGE_V1 = "DATA_PAGE_V1"
108
- # V2
109
- DATA_PAGE_V2 = "DATA_PAGE_V2"
110
-
111
-
112
- class OutputAzureDataExplorerKeyValueMetadatumTypedDict(TypedDict):
113
- value: str
114
- key: NotRequired[str]
115
-
116
-
117
- class OutputAzureDataExplorerKeyValueMetadatum(BaseModel):
118
- value: str
119
-
120
- key: Optional[str] = ""
121
-
122
-
123
53
  class OutputAzureDataExplorerBackpressureBehavior(
124
54
  str, Enum, metaclass=utils.OpenEnumMeta
125
55
  ):
126
56
  r"""How to handle events when all receivers are exerting backpressure"""
127
57
 
128
- # Block
129
58
  BLOCK = "block"
130
- # Drop
131
59
  DROP = "drop"
132
- # Persistent Queue
133
60
  QUEUE = "queue"
134
61
 
135
62
 
63
+ class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
64
+ r"""Format of the output data"""
65
+
66
+ JSON = "json"
67
+ RAW = "raw"
68
+ PARQUET = "parquet"
69
+
70
+
136
71
  class OutputAzureDataExplorerDiskSpaceProtection(
137
72
  str, Enum, metaclass=utils.OpenEnumMeta
138
73
  ):
139
74
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
140
75
 
141
- # Block
142
76
  BLOCK = "block"
143
- # Drop
144
77
  DROP = "drop"
145
78
 
146
79
 
147
80
  class PrefixOptional(str, Enum, metaclass=utils.OpenEnumMeta):
148
- # drop-by
149
81
  DROP_BY = "dropBy"
150
- # ingest-by
151
82
  INGEST_BY = "ingestBy"
152
83
 
153
84
 
@@ -163,15 +94,6 @@ class ExtentTag(BaseModel):
163
94
  Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
164
95
  ] = None
165
96
 
166
- @field_serializer("prefix")
167
- def serialize_prefix(self, value):
168
- if isinstance(value, str):
169
- try:
170
- return models.PrefixOptional(value)
171
- except ValueError:
172
- return value
173
- return value
174
-
175
97
 
176
98
  class IngestIfNotExistTypedDict(TypedDict):
177
99
  value: str
@@ -184,22 +106,16 @@ class IngestIfNotExist(BaseModel):
184
106
  class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
185
107
  r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
186
108
 
187
- # FailuresOnly
188
109
  FAILURES_ONLY = "failuresOnly"
189
- # DoNotReport
190
110
  DO_NOT_REPORT = "doNotReport"
191
- # FailuresAndSuccesses
192
111
  FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
193
112
 
194
113
 
195
114
  class ReportMethod(str, Enum, metaclass=utils.OpenEnumMeta):
196
115
  r"""Target of the ingestion status reporting. Defaults to Queue."""
197
116
 
198
- # Queue
199
117
  QUEUE = "queue"
200
- # Table
201
118
  TABLE = "table"
202
- # QueueAndTable
203
119
  QUEUE_AND_TABLE = "queueAndTable"
204
120
 
205
121
 
@@ -268,15 +184,13 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
268
184
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
269
185
 
270
186
 
271
- class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
272
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
187
+ class OutputAzureDataExplorerCompressCompression(
188
+ str, Enum, metaclass=utils.OpenEnumMeta
189
+ ):
190
+ r"""Data compression format to apply to HTTP content before it is delivered"""
273
191
 
274
- # Error
275
- ERROR = "error"
276
- # Backpressure
277
- ALWAYS = "always"
278
- # Always On
279
- BACKPRESSURE = "backpressure"
192
+ NONE = "none"
193
+ GZIP = "gzip"
280
194
 
281
195
 
282
196
  class OutputAzureDataExplorerPqCompressCompression(
@@ -284,21 +198,25 @@ class OutputAzureDataExplorerPqCompressCompression(
284
198
  ):
285
199
  r"""Codec to use to compress the persisted data"""
286
200
 
287
- # None
288
201
  NONE = "none"
289
- # Gzip
290
202
  GZIP = "gzip"
291
203
 
292
204
 
293
205
  class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
294
206
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
295
207
 
296
- # Block
297
208
  BLOCK = "block"
298
- # Drop new data
299
209
  DROP = "drop"
300
210
 
301
211
 
212
+ class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
213
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
214
+
215
+ ERROR = "error"
216
+ BACKPRESSURE = "backpressure"
217
+ ALWAYS = "always"
218
+
219
+
302
220
  class OutputAzureDataExplorerPqControlsTypedDict(TypedDict):
303
221
  pass
304
222
 
@@ -334,9 +252,7 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
334
252
  validate_database_settings: NotRequired[bool]
335
253
  r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
336
254
  ingest_mode: NotRequired[IngestionMode]
337
- oauth_endpoint: NotRequired[
338
- OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint
339
- ]
255
+ oauth_endpoint: NotRequired[MicrosoftEntraIDAuthenticationEndpoint]
340
256
  r"""Endpoint used to acquire authentication tokens from Azure"""
341
257
  oauth_type: NotRequired[OutputAzureDataExplorerAuthenticationMethod]
342
258
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
@@ -346,56 +262,14 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
346
262
  text_secret: NotRequired[str]
347
263
  r"""Select or create a stored text secret"""
348
264
  certificate: NotRequired[OutputAzureDataExplorerCertificateTypedDict]
349
- format_: NotRequired[OutputAzureDataExplorerDataFormat]
350
- r"""Format of the output data"""
351
- compress: NotRequired[OutputAzureDataExplorerCompressCompression]
352
- r"""Data compression format to apply to HTTP content before it is delivered"""
353
- compression_level: NotRequired[OutputAzureDataExplorerCompressionLevel]
354
- r"""Compression level to apply before moving files to final destination"""
355
- automatic_schema: NotRequired[bool]
356
- r"""Automatically calculate the schema based on the events of each Parquet file generated"""
357
- parquet_schema: NotRequired[str]
358
- r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
359
- parquet_version: NotRequired[OutputAzureDataExplorerParquetVersion]
360
- r"""Determines which data types are supported and how they are represented"""
361
- parquet_data_page_version: NotRequired[OutputAzureDataExplorerDataPageVersion]
362
- r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
363
- parquet_row_group_length: NotRequired[float]
364
- r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
365
- parquet_page_size: NotRequired[str]
366
- r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
367
- should_log_invalid_rows: NotRequired[bool]
368
- r"""Log up to 3 rows that @{product} skips due to data mismatch"""
369
- key_value_metadata: NotRequired[
370
- List[OutputAzureDataExplorerKeyValueMetadatumTypedDict]
371
- ]
372
- r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
373
- enable_statistics: NotRequired[bool]
374
- r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
375
- enable_write_page_index: NotRequired[bool]
376
- r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
377
- enable_page_checksum: NotRequired[bool]
378
- r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
379
- remove_empty_dirs: NotRequired[bool]
380
- r"""Remove empty staging directories after moving files"""
381
- empty_dir_cleanup_sec: NotRequired[float]
382
- r"""How frequently, in seconds, to clean up empty directories"""
383
- deadletter_enabled: NotRequired[bool]
384
- r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
385
- deadletter_path: NotRequired[str]
386
- r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
387
- max_retry_num: NotRequired[float]
388
- r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
389
- is_mapping_obj: NotRequired[bool]
390
- r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
391
- mapping_obj: NotRequired[str]
392
- r"""Enter a JSON object that defines your desired data mapping"""
393
- mapping_ref: NotRequired[str]
394
- r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
395
265
  ingest_url: NotRequired[str]
396
266
  r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
397
267
  on_backpressure: NotRequired[OutputAzureDataExplorerBackpressureBehavior]
398
268
  r"""How to handle events when all receivers are exerting backpressure"""
269
+ is_mapping_obj: NotRequired[bool]
270
+ r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
271
+ format_: NotRequired[OutputAzureDataExplorerDataFormat]
272
+ r"""Format of the output data"""
399
273
  stage_path: NotRequired[str]
400
274
  r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant and stable storage."""
401
275
  file_name_suffix: NotRequired[str]
@@ -414,6 +288,10 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
414
288
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
415
289
  add_id_to_stage_path: NotRequired[bool]
416
290
  r"""Add the Output ID value to staging location"""
291
+ remove_empty_dirs: NotRequired[bool]
292
+ r"""Remove empty staging directories after moving files"""
293
+ deadletter_enabled: NotRequired[bool]
294
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
417
295
  timeout_sec: NotRequired[float]
418
296
  r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
419
297
  flush_immediately: NotRequired[bool]
@@ -439,6 +317,10 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
439
317
  ]
440
318
  response_honor_retry_after_header: NotRequired[bool]
441
319
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
320
+ compress: NotRequired[OutputAzureDataExplorerCompressCompression]
321
+ r"""Data compression format to apply to HTTP content before it is delivered"""
322
+ mapping_ref: NotRequired[str]
323
+ r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
442
324
  concurrency: NotRequired[float]
443
325
  r"""Maximum number of ongoing requests before blocking"""
444
326
  max_payload_size_kb: NotRequired[float]
@@ -456,16 +338,6 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
456
338
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
457
339
  keep_alive: NotRequired[bool]
458
340
  r"""Disable to close the connection immediately after sending the outgoing request"""
459
- pq_strict_ordering: NotRequired[bool]
460
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
461
- pq_rate_per_sec: NotRequired[float]
462
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
463
- pq_mode: NotRequired[OutputAzureDataExplorerMode]
464
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
465
- pq_max_buffer_size: NotRequired[float]
466
- r"""The maximum number of events to hold in memory before writing the events to disk"""
467
- pq_max_backpressure_sec: NotRequired[float]
468
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
469
341
  pq_max_file_size: NotRequired[str]
470
342
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
471
343
  pq_max_size: NotRequired[str]
@@ -476,7 +348,11 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
476
348
  r"""Codec to use to compress the persisted data"""
477
349
  pq_on_backpressure: NotRequired[OutputAzureDataExplorerQueueFullBehavior]
478
350
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
351
+ pq_mode: NotRequired[OutputAzureDataExplorerMode]
352
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
479
353
  pq_controls: NotRequired[OutputAzureDataExplorerPqControlsTypedDict]
354
+ empty_dir_cleanup_sec: NotRequired[float]
355
+ r"""How frequently, in seconds, to clean up empty directories"""
480
356
 
481
357
 
482
358
  class OutputAzureDataExplorer(BaseModel):
@@ -529,11 +405,11 @@ class OutputAzureDataExplorer(BaseModel):
529
405
 
530
406
  oauth_endpoint: Annotated[
531
407
  Annotated[
532
- Optional[OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint],
408
+ Optional[MicrosoftEntraIDAuthenticationEndpoint],
533
409
  PlainValidator(validate_open_enum(False)),
534
410
  ],
535
411
  pydantic.Field(alias="oauthEndpoint"),
536
- ] = OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
412
+ ] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
537
413
  r"""Endpoint used to acquire authentication tokens from Azure"""
538
414
 
539
415
  oauth_type: Annotated[
@@ -555,139 +431,31 @@ class OutputAzureDataExplorer(BaseModel):
555
431
 
556
432
  certificate: Optional[OutputAzureDataExplorerCertificate] = None
557
433
 
558
- format_: Annotated[
559
- Annotated[
560
- Optional[OutputAzureDataExplorerDataFormat],
561
- PlainValidator(validate_open_enum(False)),
562
- ],
563
- pydantic.Field(alias="format"),
564
- ] = OutputAzureDataExplorerDataFormat.JSON
565
- r"""Format of the output data"""
566
-
567
- compress: Annotated[
568
- Optional[OutputAzureDataExplorerCompressCompression],
569
- PlainValidator(validate_open_enum(False)),
570
- ] = OutputAzureDataExplorerCompressCompression.GZIP
571
- r"""Data compression format to apply to HTTP content before it is delivered"""
572
-
573
- compression_level: Annotated[
574
- Annotated[
575
- Optional[OutputAzureDataExplorerCompressionLevel],
576
- PlainValidator(validate_open_enum(False)),
577
- ],
578
- pydantic.Field(alias="compressionLevel"),
579
- ] = OutputAzureDataExplorerCompressionLevel.BEST_SPEED
580
- r"""Compression level to apply before moving files to final destination"""
581
-
582
- automatic_schema: Annotated[
583
- Optional[bool], pydantic.Field(alias="automaticSchema")
584
- ] = False
585
- r"""Automatically calculate the schema based on the events of each Parquet file generated"""
586
-
587
- parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
588
- None
589
- )
590
- r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
591
-
592
- parquet_version: Annotated[
593
- Annotated[
594
- Optional[OutputAzureDataExplorerParquetVersion],
595
- PlainValidator(validate_open_enum(False)),
596
- ],
597
- pydantic.Field(alias="parquetVersion"),
598
- ] = OutputAzureDataExplorerParquetVersion.PARQUET_2_6
599
- r"""Determines which data types are supported and how they are represented"""
434
+ ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
435
+ r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
600
436
 
601
- parquet_data_page_version: Annotated[
437
+ on_backpressure: Annotated[
602
438
  Annotated[
603
- Optional[OutputAzureDataExplorerDataPageVersion],
439
+ Optional[OutputAzureDataExplorerBackpressureBehavior],
604
440
  PlainValidator(validate_open_enum(False)),
605
441
  ],
606
- pydantic.Field(alias="parquetDataPageVersion"),
607
- ] = OutputAzureDataExplorerDataPageVersion.DATA_PAGE_V2
608
- r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
609
-
610
- parquet_row_group_length: Annotated[
611
- Optional[float], pydantic.Field(alias="parquetRowGroupLength")
612
- ] = 10000
613
- r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
614
-
615
- parquet_page_size: Annotated[
616
- Optional[str], pydantic.Field(alias="parquetPageSize")
617
- ] = "1MB"
618
- r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
619
-
620
- should_log_invalid_rows: Annotated[
621
- Optional[bool], pydantic.Field(alias="shouldLogInvalidRows")
622
- ] = None
623
- r"""Log up to 3 rows that @{product} skips due to data mismatch"""
624
-
625
- key_value_metadata: Annotated[
626
- Optional[List[OutputAzureDataExplorerKeyValueMetadatum]],
627
- pydantic.Field(alias="keyValueMetadata"),
628
- ] = None
629
- r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
630
-
631
- enable_statistics: Annotated[
632
- Optional[bool], pydantic.Field(alias="enableStatistics")
633
- ] = True
634
- r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
635
-
636
- enable_write_page_index: Annotated[
637
- Optional[bool], pydantic.Field(alias="enableWritePageIndex")
638
- ] = True
639
- r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
640
-
641
- enable_page_checksum: Annotated[
642
- Optional[bool], pydantic.Field(alias="enablePageChecksum")
643
- ] = False
644
- r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
645
-
646
- remove_empty_dirs: Annotated[
647
- Optional[bool], pydantic.Field(alias="removeEmptyDirs")
648
- ] = True
649
- r"""Remove empty staging directories after moving files"""
650
-
651
- empty_dir_cleanup_sec: Annotated[
652
- Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
653
- ] = 300
654
- r"""How frequently, in seconds, to clean up empty directories"""
655
-
656
- deadletter_enabled: Annotated[
657
- Optional[bool], pydantic.Field(alias="deadletterEnabled")
658
- ] = False
659
- r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
660
-
661
- deadletter_path: Annotated[
662
- Optional[str], pydantic.Field(alias="deadletterPath")
663
- ] = "$CRIBL_HOME/state/outputs/dead-letter"
664
- r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
665
-
666
- max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
667
- r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
442
+ pydantic.Field(alias="onBackpressure"),
443
+ ] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
444
+ r"""How to handle events when all receivers are exerting backpressure"""
668
445
 
669
446
  is_mapping_obj: Annotated[Optional[bool], pydantic.Field(alias="isMappingObj")] = (
670
447
  False
671
448
  )
672
449
  r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
673
450
 
674
- mapping_obj: Annotated[Optional[str], pydantic.Field(alias="mappingObj")] = None
675
- r"""Enter a JSON object that defines your desired data mapping"""
676
-
677
- mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
678
- r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
679
-
680
- ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
681
- r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
682
-
683
- on_backpressure: Annotated[
451
+ format_: Annotated[
684
452
  Annotated[
685
- Optional[OutputAzureDataExplorerBackpressureBehavior],
453
+ Optional[OutputAzureDataExplorerDataFormat],
686
454
  PlainValidator(validate_open_enum(False)),
687
455
  ],
688
- pydantic.Field(alias="onBackpressure"),
689
- ] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
690
- r"""How to handle events when all receivers are exerting backpressure"""
456
+ pydantic.Field(alias="format"),
457
+ ] = OutputAzureDataExplorerDataFormat.JSON
458
+ r"""Format of the output data"""
691
459
 
692
460
  stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
693
461
  "$CRIBL_HOME/state/outputs/staging"
@@ -738,6 +506,16 @@ class OutputAzureDataExplorer(BaseModel):
738
506
  ] = True
739
507
  r"""Add the Output ID value to staging location"""
740
508
 
509
+ remove_empty_dirs: Annotated[
510
+ Optional[bool], pydantic.Field(alias="removeEmptyDirs")
511
+ ] = True
512
+ r"""Remove empty staging directories after moving files"""
513
+
514
+ deadletter_enabled: Annotated[
515
+ Optional[bool], pydantic.Field(alias="deadletterEnabled")
516
+ ] = False
517
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
518
+
741
519
  timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 30
742
520
  r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
743
521
 
@@ -794,6 +572,15 @@ class OutputAzureDataExplorer(BaseModel):
794
572
  ] = True
795
573
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
796
574
 
575
+ compress: Annotated[
576
+ Optional[OutputAzureDataExplorerCompressCompression],
577
+ PlainValidator(validate_open_enum(False)),
578
+ ] = OutputAzureDataExplorerCompressCompression.GZIP
579
+ r"""Data compression format to apply to HTTP content before it is delivered"""
580
+
581
+ mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
582
+ r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
583
+
797
584
  concurrency: Optional[float] = 5
798
585
  r"""Maximum number of ongoing requests before blocking"""
799
586
 
@@ -828,35 +615,6 @@ class OutputAzureDataExplorer(BaseModel):
828
615
  keep_alive: Annotated[Optional[bool], pydantic.Field(alias="keepAlive")] = True
829
616
  r"""Disable to close the connection immediately after sending the outgoing request"""
830
617
 
831
- pq_strict_ordering: Annotated[
832
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
833
- ] = True
834
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
835
-
836
- pq_rate_per_sec: Annotated[
837
- Optional[float], pydantic.Field(alias="pqRatePerSec")
838
- ] = 0
839
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
840
-
841
- pq_mode: Annotated[
842
- Annotated[
843
- Optional[OutputAzureDataExplorerMode],
844
- PlainValidator(validate_open_enum(False)),
845
- ],
846
- pydantic.Field(alias="pqMode"),
847
- ] = OutputAzureDataExplorerMode.ERROR
848
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
849
-
850
- pq_max_buffer_size: Annotated[
851
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
852
- ] = 42
853
- r"""The maximum number of events to hold in memory before writing the events to disk"""
854
-
855
- pq_max_backpressure_sec: Annotated[
856
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
857
- ] = 30
858
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
859
-
860
618
  pq_max_file_size: Annotated[
861
619
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
862
620
  ] = "1 MB"
@@ -888,143 +646,20 @@ class OutputAzureDataExplorer(BaseModel):
888
646
  ] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
889
647
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
890
648
 
649
+ pq_mode: Annotated[
650
+ Annotated[
651
+ Optional[OutputAzureDataExplorerMode],
652
+ PlainValidator(validate_open_enum(False)),
653
+ ],
654
+ pydantic.Field(alias="pqMode"),
655
+ ] = OutputAzureDataExplorerMode.ERROR
656
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
657
+
891
658
  pq_controls: Annotated[
892
659
  Optional[OutputAzureDataExplorerPqControls], pydantic.Field(alias="pqControls")
893
660
  ] = None
894
661
 
895
- @field_serializer("ingest_mode")
896
- def serialize_ingest_mode(self, value):
897
- if isinstance(value, str):
898
- try:
899
- return models.IngestionMode(value)
900
- except ValueError:
901
- return value
902
- return value
903
-
904
- @field_serializer("oauth_endpoint")
905
- def serialize_oauth_endpoint(self, value):
906
- if isinstance(value, str):
907
- try:
908
- return models.OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint(
909
- value
910
- )
911
- except ValueError:
912
- return value
913
- return value
914
-
915
- @field_serializer("oauth_type")
916
- def serialize_oauth_type(self, value):
917
- if isinstance(value, str):
918
- try:
919
- return models.OutputAzureDataExplorerAuthenticationMethod(value)
920
- except ValueError:
921
- return value
922
- return value
923
-
924
- @field_serializer("format_")
925
- def serialize_format_(self, value):
926
- if isinstance(value, str):
927
- try:
928
- return models.OutputAzureDataExplorerDataFormat(value)
929
- except ValueError:
930
- return value
931
- return value
932
-
933
- @field_serializer("compress")
934
- def serialize_compress(self, value):
935
- if isinstance(value, str):
936
- try:
937
- return models.OutputAzureDataExplorerCompressCompression(value)
938
- except ValueError:
939
- return value
940
- return value
941
-
942
- @field_serializer("compression_level")
943
- def serialize_compression_level(self, value):
944
- if isinstance(value, str):
945
- try:
946
- return models.OutputAzureDataExplorerCompressionLevel(value)
947
- except ValueError:
948
- return value
949
- return value
950
-
951
- @field_serializer("parquet_version")
952
- def serialize_parquet_version(self, value):
953
- if isinstance(value, str):
954
- try:
955
- return models.OutputAzureDataExplorerParquetVersion(value)
956
- except ValueError:
957
- return value
958
- return value
959
-
960
- @field_serializer("parquet_data_page_version")
961
- def serialize_parquet_data_page_version(self, value):
962
- if isinstance(value, str):
963
- try:
964
- return models.OutputAzureDataExplorerDataPageVersion(value)
965
- except ValueError:
966
- return value
967
- return value
968
-
969
- @field_serializer("on_backpressure")
970
- def serialize_on_backpressure(self, value):
971
- if isinstance(value, str):
972
- try:
973
- return models.OutputAzureDataExplorerBackpressureBehavior(value)
974
- except ValueError:
975
- return value
976
- return value
977
-
978
- @field_serializer("on_disk_full_backpressure")
979
- def serialize_on_disk_full_backpressure(self, value):
980
- if isinstance(value, str):
981
- try:
982
- return models.OutputAzureDataExplorerDiskSpaceProtection(value)
983
- except ValueError:
984
- return value
985
- return value
986
-
987
- @field_serializer("report_level")
988
- def serialize_report_level(self, value):
989
- if isinstance(value, str):
990
- try:
991
- return models.ReportLevel(value)
992
- except ValueError:
993
- return value
994
- return value
995
-
996
- @field_serializer("report_method")
997
- def serialize_report_method(self, value):
998
- if isinstance(value, str):
999
- try:
1000
- return models.ReportMethod(value)
1001
- except ValueError:
1002
- return value
1003
- return value
1004
-
1005
- @field_serializer("pq_mode")
1006
- def serialize_pq_mode(self, value):
1007
- if isinstance(value, str):
1008
- try:
1009
- return models.OutputAzureDataExplorerMode(value)
1010
- except ValueError:
1011
- return value
1012
- return value
1013
-
1014
- @field_serializer("pq_compress")
1015
- def serialize_pq_compress(self, value):
1016
- if isinstance(value, str):
1017
- try:
1018
- return models.OutputAzureDataExplorerPqCompressCompression(value)
1019
- except ValueError:
1020
- return value
1021
- return value
1022
-
1023
- @field_serializer("pq_on_backpressure")
1024
- def serialize_pq_on_backpressure(self, value):
1025
- if isinstance(value, str):
1026
- try:
1027
- return models.OutputAzureDataExplorerQueueFullBehavior(value)
1028
- except ValueError:
1029
- return value
1030
- return value
662
+ empty_dir_cleanup_sec: Annotated[
663
+ Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
664
+ ] = 300
665
+ r"""How frequently, in seconds, to clean up empty directories"""