cribl-control-plane 0.2.1rc11__py3-none-any.whl → 0.4.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/acl.py +4 -4
- cribl_control_plane/branches.py +4 -4
- cribl_control_plane/commits.py +28 -28
- cribl_control_plane/commits_files.py +8 -8
- cribl_control_plane/configs_versions.py +8 -4
- cribl_control_plane/destinations.py +20 -20
- cribl_control_plane/destinations_pq.py +8 -8
- cribl_control_plane/groups_sdk.py +48 -24
- cribl_control_plane/hectokens.py +16 -8
- cribl_control_plane/lakedatasets.py +40 -20
- cribl_control_plane/models/__init__.py +568 -214
- cribl_control_plane/models/createconfiggroupbyproductop.py +20 -1
- cribl_control_plane/models/createcribllakedatasetbylakeidop.py +19 -1
- cribl_control_plane/models/createinputhectokenbyidop.py +20 -1
- cribl_control_plane/models/{countedlistgitdiffresult.py → createinputop.py} +9 -5
- cribl_control_plane/models/{countedlistgitshowresult.py → createoutputop.py} +9 -5
- cribl_control_plane/models/createoutputtestbyidop.py +20 -1
- cribl_control_plane/models/{countedlistpackinstallinfo.py → createpacksop.py} +6 -2
- cribl_control_plane/models/createpipelineop.py +24 -0
- cribl_control_plane/models/createroutesappendbyidop.py +20 -2
- cribl_control_plane/models/createversioncommitop.py +19 -1
- cribl_control_plane/models/{countedliststring.py → createversionpushop.py} +6 -2
- cribl_control_plane/models/createversionrevertop.py +19 -1
- cribl_control_plane/models/createversionundoop.py +18 -1
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +20 -1
- cribl_control_plane/models/deletecribllakedatasetbylakeidandidop.py +20 -1
- cribl_control_plane/models/deleteinputbyidop.py +20 -1
- cribl_control_plane/models/deleteoutputbyidop.py +20 -1
- cribl_control_plane/models/deleteoutputpqbyidop.py +19 -1
- cribl_control_plane/models/deletepacksbyidop.py +20 -1
- cribl_control_plane/models/deletepipelinebyidop.py +20 -1
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +19 -1
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +19 -1
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +19 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +19 -1
- cribl_control_plane/models/getcribllakedatasetbylakeidandidop.py +20 -1
- cribl_control_plane/models/getcribllakedatasetbylakeidop.py +20 -1
- cribl_control_plane/models/getinputbyidop.py +20 -1
- cribl_control_plane/models/getmasterworkerentryop.py +18 -1
- cribl_control_plane/models/getoutputbyidop.py +20 -1
- cribl_control_plane/models/getoutputpqbyidop.py +20 -1
- cribl_control_plane/models/getoutputsamplesbyidop.py +20 -1
- cribl_control_plane/models/getpacksbyidop.py +20 -1
- cribl_control_plane/models/getpacksop.py +19 -1
- cribl_control_plane/models/getpipelinebyidop.py +20 -1
- cribl_control_plane/models/getroutesbyidop.py +20 -1
- cribl_control_plane/models/getsummaryop.py +19 -1
- cribl_control_plane/models/{countedlistbranchinfo.py → getversionbranchop.py} +6 -2
- cribl_control_plane/models/getversioncountop.py +19 -1
- cribl_control_plane/models/getversiondiffop.py +19 -1
- cribl_control_plane/models/getversionfilesop.py +19 -1
- cribl_control_plane/models/{countedlistgitinfo.py → getversioninfoop.py} +6 -2
- cribl_control_plane/models/getversionop.py +19 -1
- cribl_control_plane/models/getversionshowop.py +19 -1
- cribl_control_plane/models/getversionstatusop.py +19 -1
- cribl_control_plane/models/input.py +18 -15
- cribl_control_plane/models/inputcloudflarehec.py +513 -0
- cribl_control_plane/models/inputfile.py +7 -0
- cribl_control_plane/models/listconfiggroupbyproductop.py +19 -1
- cribl_control_plane/models/{countedlistinput.py → listinputop.py} +6 -2
- cribl_control_plane/models/listmasterworkerentryop.py +19 -1
- cribl_control_plane/models/{countedlistoutput.py → listoutputop.py} +6 -2
- cribl_control_plane/models/{countedlistpipeline.py → listpipelineop.py} +6 -2
- cribl_control_plane/models/{countedlistroutes.py → listroutesop.py} +6 -2
- cribl_control_plane/models/output.py +23 -17
- cribl_control_plane/models/outputazureblob.py +14 -0
- cribl_control_plane/models/outputazuredataexplorer.py +7 -0
- cribl_control_plane/models/outputchronicle.py +5 -0
- cribl_control_plane/models/outputcloudflarer2.py +632 -0
- cribl_control_plane/models/outputcribllake.py +14 -0
- cribl_control_plane/models/outputdatabricks.py +19 -0
- cribl_control_plane/models/outputdls3.py +14 -0
- cribl_control_plane/models/outputexabeam.py +7 -0
- cribl_control_plane/models/outputfilesystem.py +14 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +14 -0
- cribl_control_plane/models/outputmicrosoftfabric.py +540 -0
- cribl_control_plane/models/outputminio.py +19 -4
- cribl_control_plane/models/outputs3.py +14 -0
- cribl_control_plane/models/outputsecuritylake.py +14 -0
- cribl_control_plane/models/outputsyslog.py +7 -0
- cribl_control_plane/models/runnablejobcollection.py +0 -8
- cribl_control_plane/models/runnablejobexecutor.py +0 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +0 -4
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +19 -1
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +20 -1
- cribl_control_plane/models/updatecribllakedatasetbylakeidandidop.py +20 -1
- cribl_control_plane/models/updateinputbyidop.py +19 -1
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +20 -1
- cribl_control_plane/models/updateoutputbyidop.py +19 -1
- cribl_control_plane/models/updatepacksbyidop.py +20 -1
- cribl_control_plane/models/updatepipelinebyidop.py +19 -1
- cribl_control_plane/models/updateroutesbyidop.py +19 -1
- cribl_control_plane/nodes.py +12 -8
- cribl_control_plane/packs.py +20 -20
- cribl_control_plane/pipelines.py +20 -20
- cribl_control_plane/routes_sdk.py +20 -16
- cribl_control_plane/samples.py +8 -8
- cribl_control_plane/sources.py +20 -20
- cribl_control_plane/statuses.py +4 -4
- cribl_control_plane/summaries.py +4 -8
- cribl_control_plane/teams.py +4 -4
- cribl_control_plane/utils/retries.py +69 -5
- cribl_control_plane/utils/unmarshal_json_response.py +15 -1
- cribl_control_plane/versions_configs.py +4 -4
- {cribl_control_plane-0.2.1rc11.dist-info → cribl_control_plane-0.4.0a6.dist-info}/METADATA +6 -14
- {cribl_control_plane-0.2.1rc11.dist-info → cribl_control_plane-0.4.0a6.dist-info}/RECORD +109 -123
- cribl_control_plane-0.4.0a6.dist-info/licenses/LICENSE +201 -0
- cribl_control_plane/models/countedlistconfiggroup.py +0 -20
- cribl_control_plane/models/countedlistcribllakedataset.py +0 -20
- cribl_control_plane/models/countedlistdistributedsummary.py +0 -20
- cribl_control_plane/models/countedlistgitcommitsummary.py +0 -20
- cribl_control_plane/models/countedlistgitcountresult.py +0 -20
- cribl_control_plane/models/countedlistgitfilesresponse.py +0 -20
- cribl_control_plane/models/countedlistgitlogresult.py +0 -20
- cribl_control_plane/models/countedlistgitrevertresult.py +0 -20
- cribl_control_plane/models/countedlistgitstatusresult.py +0 -20
- cribl_control_plane/models/countedlistinputsplunkhec.py +0 -20
- cribl_control_plane/models/countedlistjobinfo.py +0 -20
- cribl_control_plane/models/countedlistmasterworkerentry.py +0 -20
- cribl_control_plane/models/countedlistnumber.py +0 -19
- cribl_control_plane/models/countedlistobject.py +0 -19
- cribl_control_plane/models/countedlistoutputsamplesresponse.py +0 -20
- cribl_control_plane/models/countedlistoutputtestresponse.py +0 -20
- cribl_control_plane/models/countedlistpackinfo.py +0 -20
- cribl_control_plane/models/countedlistteamaccesscontrollist.py +0 -20
- cribl_control_plane/models/countedlistuseraccesscontrollist.py +0 -20
- {cribl_control_plane-0.2.1rc11.dist-info → cribl_control_plane-0.4.0a6.dist-info}/WHEEL +0 -0
|
@@ -146,6 +146,8 @@ class OutputDatabricksTypedDict(TypedDict):
|
|
|
146
146
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
147
147
|
on_disk_full_backpressure: NotRequired[OutputDatabricksDiskSpaceProtection]
|
|
148
148
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
149
|
+
force_close_on_shutdown: NotRequired[bool]
|
|
150
|
+
r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
|
|
149
151
|
scope: NotRequired[str]
|
|
150
152
|
r"""OAuth scope for Unity Catalog authentication"""
|
|
151
153
|
catalog: NotRequired[str]
|
|
@@ -154,6 +156,8 @@ class OutputDatabricksTypedDict(TypedDict):
|
|
|
154
156
|
r"""Name of the catalog schema to use for the output"""
|
|
155
157
|
events_volume_name: NotRequired[str]
|
|
156
158
|
r"""Name of the events volume in Databricks"""
|
|
159
|
+
timeout_sec: NotRequired[float]
|
|
160
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
157
161
|
description: NotRequired[str]
|
|
158
162
|
compress: NotRequired[OutputDatabricksCompression]
|
|
159
163
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
@@ -183,6 +187,8 @@ class OutputDatabricksTypedDict(TypedDict):
|
|
|
183
187
|
r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
|
|
184
188
|
empty_dir_cleanup_sec: NotRequired[float]
|
|
185
189
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
190
|
+
directory_batch_size: NotRequired[float]
|
|
191
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
186
192
|
deadletter_path: NotRequired[str]
|
|
187
193
|
r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
|
|
188
194
|
max_retry_num: NotRequired[float]
|
|
@@ -311,6 +317,11 @@ class OutputDatabricks(BaseModel):
|
|
|
311
317
|
] = OutputDatabricksDiskSpaceProtection.BLOCK
|
|
312
318
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
313
319
|
|
|
320
|
+
force_close_on_shutdown: Annotated[
|
|
321
|
+
Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
|
|
322
|
+
] = False
|
|
323
|
+
r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
|
|
324
|
+
|
|
314
325
|
scope: Optional[str] = "all-apis"
|
|
315
326
|
r"""OAuth scope for Unity Catalog authentication"""
|
|
316
327
|
|
|
@@ -325,6 +336,9 @@ class OutputDatabricks(BaseModel):
|
|
|
325
336
|
] = "events"
|
|
326
337
|
r"""Name of the events volume in Databricks"""
|
|
327
338
|
|
|
339
|
+
timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 60
|
|
340
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
341
|
+
|
|
328
342
|
description: Optional[str] = None
|
|
329
343
|
|
|
330
344
|
compress: Annotated[
|
|
@@ -410,6 +424,11 @@ class OutputDatabricks(BaseModel):
|
|
|
410
424
|
] = 300
|
|
411
425
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
412
426
|
|
|
427
|
+
directory_batch_size: Annotated[
|
|
428
|
+
Optional[float], pydantic.Field(alias="directoryBatchSize")
|
|
429
|
+
] = 1000
|
|
430
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
431
|
+
|
|
413
432
|
deadletter_path: Annotated[
|
|
414
433
|
Optional[str], pydantic.Field(alias="deadletterPath")
|
|
415
434
|
] = "$CRIBL_HOME/state/outputs/dead-letter"
|
|
@@ -234,6 +234,8 @@ class OutputDlS3TypedDict(TypedDict):
|
|
|
234
234
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
235
235
|
on_disk_full_backpressure: NotRequired[OutputDlS3DiskSpaceProtection]
|
|
236
236
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
237
|
+
force_close_on_shutdown: NotRequired[bool]
|
|
238
|
+
r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
|
|
237
239
|
max_file_open_time_sec: NotRequired[float]
|
|
238
240
|
r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
|
|
239
241
|
max_file_idle_time_sec: NotRequired[float]
|
|
@@ -279,6 +281,8 @@ class OutputDlS3TypedDict(TypedDict):
|
|
|
279
281
|
r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
|
|
280
282
|
empty_dir_cleanup_sec: NotRequired[float]
|
|
281
283
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
284
|
+
directory_batch_size: NotRequired[float]
|
|
285
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
282
286
|
deadletter_path: NotRequired[str]
|
|
283
287
|
r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
|
|
284
288
|
max_retry_num: NotRequired[float]
|
|
@@ -471,6 +475,11 @@ class OutputDlS3(BaseModel):
|
|
|
471
475
|
] = OutputDlS3DiskSpaceProtection.BLOCK
|
|
472
476
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
473
477
|
|
|
478
|
+
force_close_on_shutdown: Annotated[
|
|
479
|
+
Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
|
|
480
|
+
] = False
|
|
481
|
+
r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
|
|
482
|
+
|
|
474
483
|
max_file_open_time_sec: Annotated[
|
|
475
484
|
Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
|
|
476
485
|
] = 300
|
|
@@ -592,6 +601,11 @@ class OutputDlS3(BaseModel):
|
|
|
592
601
|
] = 300
|
|
593
602
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
594
603
|
|
|
604
|
+
directory_batch_size: Annotated[
|
|
605
|
+
Optional[float], pydantic.Field(alias="directoryBatchSize")
|
|
606
|
+
] = 1000
|
|
607
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
608
|
+
|
|
595
609
|
deadletter_path: Annotated[
|
|
596
610
|
Optional[str], pydantic.Field(alias="deadletterPath")
|
|
597
611
|
] = "$CRIBL_HOME/state/outputs/dead-letter"
|
|
@@ -137,6 +137,8 @@ class OutputExabeamTypedDict(TypedDict):
|
|
|
137
137
|
description: NotRequired[str]
|
|
138
138
|
empty_dir_cleanup_sec: NotRequired[float]
|
|
139
139
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
140
|
+
directory_batch_size: NotRequired[float]
|
|
141
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
140
142
|
deadletter_path: NotRequired[str]
|
|
141
143
|
r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
|
|
142
144
|
max_retry_num: NotRequired[float]
|
|
@@ -301,6 +303,11 @@ class OutputExabeam(BaseModel):
|
|
|
301
303
|
] = 300
|
|
302
304
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
303
305
|
|
|
306
|
+
directory_batch_size: Annotated[
|
|
307
|
+
Optional[float], pydantic.Field(alias="directoryBatchSize")
|
|
308
|
+
] = 1000
|
|
309
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
310
|
+
|
|
304
311
|
deadletter_path: Annotated[
|
|
305
312
|
Optional[str], pydantic.Field(alias="deadletterPath")
|
|
306
313
|
] = "$CRIBL_HOME/state/outputs/dead-letter"
|
|
@@ -140,6 +140,8 @@ class OutputFilesystemTypedDict(TypedDict):
|
|
|
140
140
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
141
141
|
on_disk_full_backpressure: NotRequired[OutputFilesystemDiskSpaceProtection]
|
|
142
142
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
143
|
+
force_close_on_shutdown: NotRequired[bool]
|
|
144
|
+
r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
|
|
143
145
|
description: NotRequired[str]
|
|
144
146
|
compress: NotRequired[OutputFilesystemCompression]
|
|
145
147
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
@@ -169,6 +171,8 @@ class OutputFilesystemTypedDict(TypedDict):
|
|
|
169
171
|
r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
|
|
170
172
|
empty_dir_cleanup_sec: NotRequired[float]
|
|
171
173
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
174
|
+
directory_batch_size: NotRequired[float]
|
|
175
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
172
176
|
deadletter_path: NotRequired[str]
|
|
173
177
|
r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
|
|
174
178
|
max_retry_num: NotRequired[float]
|
|
@@ -286,6 +290,11 @@ class OutputFilesystem(BaseModel):
|
|
|
286
290
|
] = OutputFilesystemDiskSpaceProtection.BLOCK
|
|
287
291
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
288
292
|
|
|
293
|
+
force_close_on_shutdown: Annotated[
|
|
294
|
+
Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
|
|
295
|
+
] = False
|
|
296
|
+
r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
|
|
297
|
+
|
|
289
298
|
description: Optional[str] = None
|
|
290
299
|
|
|
291
300
|
compress: Annotated[
|
|
@@ -371,6 +380,11 @@ class OutputFilesystem(BaseModel):
|
|
|
371
380
|
] = 300
|
|
372
381
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
373
382
|
|
|
383
|
+
directory_batch_size: Annotated[
|
|
384
|
+
Optional[float], pydantic.Field(alias="directoryBatchSize")
|
|
385
|
+
] = 1000
|
|
386
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
387
|
+
|
|
374
388
|
deadletter_path: Annotated[
|
|
375
389
|
Optional[str], pydantic.Field(alias="deadletterPath")
|
|
376
390
|
] = "$CRIBL_HOME/state/outputs/dead-letter"
|
|
@@ -211,6 +211,8 @@ class OutputGoogleCloudStorageTypedDict(TypedDict):
|
|
|
211
211
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
212
212
|
on_disk_full_backpressure: NotRequired[OutputGoogleCloudStorageDiskSpaceProtection]
|
|
213
213
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
214
|
+
force_close_on_shutdown: NotRequired[bool]
|
|
215
|
+
r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
|
|
214
216
|
description: NotRequired[str]
|
|
215
217
|
compress: NotRequired[OutputGoogleCloudStorageCompression]
|
|
216
218
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
@@ -242,6 +244,8 @@ class OutputGoogleCloudStorageTypedDict(TypedDict):
|
|
|
242
244
|
r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
|
|
243
245
|
empty_dir_cleanup_sec: NotRequired[float]
|
|
244
246
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
247
|
+
directory_batch_size: NotRequired[float]
|
|
248
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
245
249
|
deadletter_path: NotRequired[str]
|
|
246
250
|
r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
|
|
247
251
|
max_retry_num: NotRequired[float]
|
|
@@ -426,6 +430,11 @@ class OutputGoogleCloudStorage(BaseModel):
|
|
|
426
430
|
] = OutputGoogleCloudStorageDiskSpaceProtection.BLOCK
|
|
427
431
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
428
432
|
|
|
433
|
+
force_close_on_shutdown: Annotated[
|
|
434
|
+
Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
|
|
435
|
+
] = False
|
|
436
|
+
r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
|
|
437
|
+
|
|
429
438
|
description: Optional[str] = None
|
|
430
439
|
|
|
431
440
|
compress: Annotated[
|
|
@@ -512,6 +521,11 @@ class OutputGoogleCloudStorage(BaseModel):
|
|
|
512
521
|
] = 300
|
|
513
522
|
r"""How frequently, in seconds, to clean up empty directories"""
|
|
514
523
|
|
|
524
|
+
directory_batch_size: Annotated[
|
|
525
|
+
Optional[float], pydantic.Field(alias="directoryBatchSize")
|
|
526
|
+
] = 1000
|
|
527
|
+
r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
|
|
528
|
+
|
|
515
529
|
deadletter_path: Annotated[
|
|
516
530
|
Optional[str], pydantic.Field(alias="deadletterPath")
|
|
517
531
|
] = "$CRIBL_HOME/state/outputs/dead-letter"
|