cribl-control-plane 0.2.1rc11__py3-none-any.whl → 0.4.0a12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (136) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/acl.py +6 -4
  3. cribl_control_plane/basesdk.py +6 -0
  4. cribl_control_plane/branches.py +8 -4
  5. cribl_control_plane/commits.py +42 -28
  6. cribl_control_plane/commits_files.py +12 -8
  7. cribl_control_plane/configs_versions.py +10 -4
  8. cribl_control_plane/destinations.py +30 -20
  9. cribl_control_plane/destinations_pq.py +12 -8
  10. cribl_control_plane/groups_sdk.py +60 -24
  11. cribl_control_plane/health.py +2 -0
  12. cribl_control_plane/hectokens.py +32 -8
  13. cribl_control_plane/lakedatasets.py +86 -20
  14. cribl_control_plane/models/__init__.py +568 -214
  15. cribl_control_plane/models/addhectokenrequest.py +7 -1
  16. cribl_control_plane/models/createconfiggroupbyproductop.py +20 -1
  17. cribl_control_plane/models/createcribllakedatasetbylakeidop.py +19 -1
  18. cribl_control_plane/models/createinputhectokenbyidop.py +20 -1
  19. cribl_control_plane/models/{countedlistgitdiffresult.py → createinputop.py} +9 -5
  20. cribl_control_plane/models/{countedlistgitshowresult.py → createoutputop.py} +9 -5
  21. cribl_control_plane/models/createoutputtestbyidop.py +20 -1
  22. cribl_control_plane/models/{countedlistpackinstallinfo.py → createpacksop.py} +6 -2
  23. cribl_control_plane/models/createpipelineop.py +24 -0
  24. cribl_control_plane/models/createroutesappendbyidop.py +20 -2
  25. cribl_control_plane/models/createversioncommitop.py +19 -1
  26. cribl_control_plane/models/{countedliststring.py → createversionpushop.py} +6 -2
  27. cribl_control_plane/models/createversionrevertop.py +19 -1
  28. cribl_control_plane/models/createversionundoop.py +18 -1
  29. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +20 -1
  30. cribl_control_plane/models/deletecribllakedatasetbylakeidandidop.py +20 -1
  31. cribl_control_plane/models/deleteinputbyidop.py +20 -1
  32. cribl_control_plane/models/deleteoutputbyidop.py +20 -1
  33. cribl_control_plane/models/deleteoutputpqbyidop.py +19 -1
  34. cribl_control_plane/models/deletepacksbyidop.py +20 -1
  35. cribl_control_plane/models/deletepipelinebyidop.py +20 -1
  36. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +19 -1
  37. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +19 -1
  38. cribl_control_plane/models/getconfiggroupbyproductandidop.py +19 -1
  39. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +19 -1
  40. cribl_control_plane/models/getcribllakedatasetbylakeidandidop.py +20 -1
  41. cribl_control_plane/models/getcribllakedatasetbylakeidop.py +79 -2
  42. cribl_control_plane/models/getinputbyidop.py +20 -1
  43. cribl_control_plane/models/getmasterworkerentryop.py +18 -1
  44. cribl_control_plane/models/getoutputbyidop.py +20 -1
  45. cribl_control_plane/models/getoutputpqbyidop.py +20 -1
  46. cribl_control_plane/models/getoutputsamplesbyidop.py +20 -1
  47. cribl_control_plane/models/getpacksbyidop.py +20 -1
  48. cribl_control_plane/models/getpacksop.py +19 -1
  49. cribl_control_plane/models/getpipelinebyidop.py +20 -1
  50. cribl_control_plane/models/getroutesbyidop.py +20 -1
  51. cribl_control_plane/models/getsummaryop.py +19 -1
  52. cribl_control_plane/models/{countedlistbranchinfo.py → getversionbranchop.py} +6 -2
  53. cribl_control_plane/models/getversioncountop.py +19 -1
  54. cribl_control_plane/models/getversiondiffop.py +19 -1
  55. cribl_control_plane/models/getversionfilesop.py +19 -1
  56. cribl_control_plane/models/{countedlistgitinfo.py → getversioninfoop.py} +6 -2
  57. cribl_control_plane/models/getversionop.py +19 -1
  58. cribl_control_plane/models/getversionshowop.py +19 -1
  59. cribl_control_plane/models/getversionstatusop.py +19 -1
  60. cribl_control_plane/models/input.py +18 -15
  61. cribl_control_plane/models/inputcloudflarehec.py +513 -0
  62. cribl_control_plane/models/inputfile.py +7 -0
  63. cribl_control_plane/models/listconfiggroupbyproductop.py +19 -1
  64. cribl_control_plane/models/{countedlistinput.py → listinputop.py} +6 -2
  65. cribl_control_plane/models/listmasterworkerentryop.py +19 -1
  66. cribl_control_plane/models/{countedlistoutput.py → listoutputop.py} +6 -2
  67. cribl_control_plane/models/{countedlistpipeline.py → listpipelineop.py} +6 -2
  68. cribl_control_plane/models/{countedlistroutes.py → listroutesop.py} +6 -2
  69. cribl_control_plane/models/output.py +23 -17
  70. cribl_control_plane/models/outputazureblob.py +14 -0
  71. cribl_control_plane/models/outputazuredataexplorer.py +7 -0
  72. cribl_control_plane/models/outputchronicle.py +5 -0
  73. cribl_control_plane/models/outputcloudflarer2.py +632 -0
  74. cribl_control_plane/models/outputcribllake.py +14 -0
  75. cribl_control_plane/models/outputdatabricks.py +19 -0
  76. cribl_control_plane/models/outputdls3.py +14 -0
  77. cribl_control_plane/models/outputexabeam.py +7 -0
  78. cribl_control_plane/models/outputfilesystem.py +14 -0
  79. cribl_control_plane/models/outputgooglecloudstorage.py +14 -0
  80. cribl_control_plane/models/outputmicrosoftfabric.py +540 -0
  81. cribl_control_plane/models/outputminio.py +19 -4
  82. cribl_control_plane/models/outputnetflow.py +7 -0
  83. cribl_control_plane/models/outputs3.py +14 -0
  84. cribl_control_plane/models/outputsecuritylake.py +14 -0
  85. cribl_control_plane/models/outputsyslog.py +7 -0
  86. cribl_control_plane/models/runnablejobcollection.py +0 -8
  87. cribl_control_plane/models/runnablejobexecutor.py +0 -4
  88. cribl_control_plane/models/runnablejobscheduledsearch.py +0 -4
  89. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +19 -1
  90. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +20 -1
  91. cribl_control_plane/models/updatecribllakedatasetbylakeidandidop.py +20 -1
  92. cribl_control_plane/models/updatehectokenrequest.py +7 -1
  93. cribl_control_plane/models/updateinputbyidop.py +19 -1
  94. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +20 -1
  95. cribl_control_plane/models/updateoutputbyidop.py +19 -1
  96. cribl_control_plane/models/updatepacksbyidop.py +20 -1
  97. cribl_control_plane/models/updatepipelinebyidop.py +19 -1
  98. cribl_control_plane/models/updateroutesbyidop.py +19 -1
  99. cribl_control_plane/nodes.py +16 -8
  100. cribl_control_plane/packs.py +32 -20
  101. cribl_control_plane/pipelines.py +30 -20
  102. cribl_control_plane/routes_sdk.py +28 -16
  103. cribl_control_plane/samples.py +12 -8
  104. cribl_control_plane/sources.py +30 -20
  105. cribl_control_plane/statuses.py +6 -4
  106. cribl_control_plane/summaries.py +6 -8
  107. cribl_control_plane/teams.py +6 -4
  108. cribl_control_plane/tokens.py +2 -0
  109. cribl_control_plane/utils/forms.py +21 -10
  110. cribl_control_plane/utils/queryparams.py +14 -2
  111. cribl_control_plane/utils/retries.py +69 -5
  112. cribl_control_plane/utils/unmarshal_json_response.py +15 -1
  113. cribl_control_plane/versions_configs.py +6 -4
  114. {cribl_control_plane-0.2.1rc11.dist-info → cribl_control_plane-0.4.0a12.dist-info}/METADATA +28 -36
  115. {cribl_control_plane-0.2.1rc11.dist-info → cribl_control_plane-0.4.0a12.dist-info}/RECORD +117 -131
  116. cribl_control_plane-0.4.0a12.dist-info/licenses/LICENSE +201 -0
  117. cribl_control_plane/models/countedlistconfiggroup.py +0 -20
  118. cribl_control_plane/models/countedlistcribllakedataset.py +0 -20
  119. cribl_control_plane/models/countedlistdistributedsummary.py +0 -20
  120. cribl_control_plane/models/countedlistgitcommitsummary.py +0 -20
  121. cribl_control_plane/models/countedlistgitcountresult.py +0 -20
  122. cribl_control_plane/models/countedlistgitfilesresponse.py +0 -20
  123. cribl_control_plane/models/countedlistgitlogresult.py +0 -20
  124. cribl_control_plane/models/countedlistgitrevertresult.py +0 -20
  125. cribl_control_plane/models/countedlistgitstatusresult.py +0 -20
  126. cribl_control_plane/models/countedlistinputsplunkhec.py +0 -20
  127. cribl_control_plane/models/countedlistjobinfo.py +0 -20
  128. cribl_control_plane/models/countedlistmasterworkerentry.py +0 -20
  129. cribl_control_plane/models/countedlistnumber.py +0 -19
  130. cribl_control_plane/models/countedlistobject.py +0 -19
  131. cribl_control_plane/models/countedlistoutputsamplesresponse.py +0 -20
  132. cribl_control_plane/models/countedlistoutputtestresponse.py +0 -20
  133. cribl_control_plane/models/countedlistpackinfo.py +0 -20
  134. cribl_control_plane/models/countedlistteamaccesscontrollist.py +0 -20
  135. cribl_control_plane/models/countedlistuseraccesscontrollist.py +0 -20
  136. {cribl_control_plane-0.2.1rc11.dist-info → cribl_control_plane-0.4.0a12.dist-info}/WHEEL +0 -0
@@ -146,6 +146,8 @@ class OutputDatabricksTypedDict(TypedDict):
146
146
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
147
147
  on_disk_full_backpressure: NotRequired[OutputDatabricksDiskSpaceProtection]
148
148
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
149
+ force_close_on_shutdown: NotRequired[bool]
150
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
149
151
  scope: NotRequired[str]
150
152
  r"""OAuth scope for Unity Catalog authentication"""
151
153
  catalog: NotRequired[str]
@@ -154,6 +156,8 @@ class OutputDatabricksTypedDict(TypedDict):
154
156
  r"""Name of the catalog schema to use for the output"""
155
157
  events_volume_name: NotRequired[str]
156
158
  r"""Name of the events volume in Databricks"""
159
+ timeout_sec: NotRequired[float]
160
+ r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
157
161
  description: NotRequired[str]
158
162
  compress: NotRequired[OutputDatabricksCompression]
159
163
  r"""Data compression format to apply to HTTP content before it is delivered"""
@@ -183,6 +187,8 @@ class OutputDatabricksTypedDict(TypedDict):
183
187
  r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
184
188
  empty_dir_cleanup_sec: NotRequired[float]
185
189
  r"""How frequently, in seconds, to clean up empty directories"""
190
+ directory_batch_size: NotRequired[float]
191
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
186
192
  deadletter_path: NotRequired[str]
187
193
  r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
188
194
  max_retry_num: NotRequired[float]
@@ -311,6 +317,11 @@ class OutputDatabricks(BaseModel):
311
317
  ] = OutputDatabricksDiskSpaceProtection.BLOCK
312
318
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
313
319
 
320
+ force_close_on_shutdown: Annotated[
321
+ Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
322
+ ] = False
323
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
324
+
314
325
  scope: Optional[str] = "all-apis"
315
326
  r"""OAuth scope for Unity Catalog authentication"""
316
327
 
@@ -325,6 +336,9 @@ class OutputDatabricks(BaseModel):
325
336
  ] = "events"
326
337
  r"""Name of the events volume in Databricks"""
327
338
 
339
+ timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 60
340
+ r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
341
+
328
342
  description: Optional[str] = None
329
343
 
330
344
  compress: Annotated[
@@ -410,6 +424,11 @@ class OutputDatabricks(BaseModel):
410
424
  ] = 300
411
425
  r"""How frequently, in seconds, to clean up empty directories"""
412
426
 
427
+ directory_batch_size: Annotated[
428
+ Optional[float], pydantic.Field(alias="directoryBatchSize")
429
+ ] = 1000
430
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
431
+
413
432
  deadletter_path: Annotated[
414
433
  Optional[str], pydantic.Field(alias="deadletterPath")
415
434
  ] = "$CRIBL_HOME/state/outputs/dead-letter"
@@ -234,6 +234,8 @@ class OutputDlS3TypedDict(TypedDict):
234
234
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
235
235
  on_disk_full_backpressure: NotRequired[OutputDlS3DiskSpaceProtection]
236
236
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
237
+ force_close_on_shutdown: NotRequired[bool]
238
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
237
239
  max_file_open_time_sec: NotRequired[float]
238
240
  r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
239
241
  max_file_idle_time_sec: NotRequired[float]
@@ -279,6 +281,8 @@ class OutputDlS3TypedDict(TypedDict):
279
281
  r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
280
282
  empty_dir_cleanup_sec: NotRequired[float]
281
283
  r"""How frequently, in seconds, to clean up empty directories"""
284
+ directory_batch_size: NotRequired[float]
285
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
282
286
  deadletter_path: NotRequired[str]
283
287
  r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
284
288
  max_retry_num: NotRequired[float]
@@ -471,6 +475,11 @@ class OutputDlS3(BaseModel):
471
475
  ] = OutputDlS3DiskSpaceProtection.BLOCK
472
476
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
473
477
 
478
+ force_close_on_shutdown: Annotated[
479
+ Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
480
+ ] = False
481
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
482
+
474
483
  max_file_open_time_sec: Annotated[
475
484
  Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
476
485
  ] = 300
@@ -592,6 +601,11 @@ class OutputDlS3(BaseModel):
592
601
  ] = 300
593
602
  r"""How frequently, in seconds, to clean up empty directories"""
594
603
 
604
+ directory_batch_size: Annotated[
605
+ Optional[float], pydantic.Field(alias="directoryBatchSize")
606
+ ] = 1000
607
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
608
+
595
609
  deadletter_path: Annotated[
596
610
  Optional[str], pydantic.Field(alias="deadletterPath")
597
611
  ] = "$CRIBL_HOME/state/outputs/dead-letter"
@@ -137,6 +137,8 @@ class OutputExabeamTypedDict(TypedDict):
137
137
  description: NotRequired[str]
138
138
  empty_dir_cleanup_sec: NotRequired[float]
139
139
  r"""How frequently, in seconds, to clean up empty directories"""
140
+ directory_batch_size: NotRequired[float]
141
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
140
142
  deadletter_path: NotRequired[str]
141
143
  r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
142
144
  max_retry_num: NotRequired[float]
@@ -301,6 +303,11 @@ class OutputExabeam(BaseModel):
301
303
  ] = 300
302
304
  r"""How frequently, in seconds, to clean up empty directories"""
303
305
 
306
+ directory_batch_size: Annotated[
307
+ Optional[float], pydantic.Field(alias="directoryBatchSize")
308
+ ] = 1000
309
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
310
+
304
311
  deadletter_path: Annotated[
305
312
  Optional[str], pydantic.Field(alias="deadletterPath")
306
313
  ] = "$CRIBL_HOME/state/outputs/dead-letter"
@@ -140,6 +140,8 @@ class OutputFilesystemTypedDict(TypedDict):
140
140
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
141
141
  on_disk_full_backpressure: NotRequired[OutputFilesystemDiskSpaceProtection]
142
142
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
143
+ force_close_on_shutdown: NotRequired[bool]
144
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
143
145
  description: NotRequired[str]
144
146
  compress: NotRequired[OutputFilesystemCompression]
145
147
  r"""Data compression format to apply to HTTP content before it is delivered"""
@@ -169,6 +171,8 @@ class OutputFilesystemTypedDict(TypedDict):
169
171
  r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
170
172
  empty_dir_cleanup_sec: NotRequired[float]
171
173
  r"""How frequently, in seconds, to clean up empty directories"""
174
+ directory_batch_size: NotRequired[float]
175
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
172
176
  deadletter_path: NotRequired[str]
173
177
  r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
174
178
  max_retry_num: NotRequired[float]
@@ -286,6 +290,11 @@ class OutputFilesystem(BaseModel):
286
290
  ] = OutputFilesystemDiskSpaceProtection.BLOCK
287
291
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
288
292
 
293
+ force_close_on_shutdown: Annotated[
294
+ Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
295
+ ] = False
296
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
297
+
289
298
  description: Optional[str] = None
290
299
 
291
300
  compress: Annotated[
@@ -371,6 +380,11 @@ class OutputFilesystem(BaseModel):
371
380
  ] = 300
372
381
  r"""How frequently, in seconds, to clean up empty directories"""
373
382
 
383
+ directory_batch_size: Annotated[
384
+ Optional[float], pydantic.Field(alias="directoryBatchSize")
385
+ ] = 1000
386
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
387
+
374
388
  deadletter_path: Annotated[
375
389
  Optional[str], pydantic.Field(alias="deadletterPath")
376
390
  ] = "$CRIBL_HOME/state/outputs/dead-letter"
@@ -211,6 +211,8 @@ class OutputGoogleCloudStorageTypedDict(TypedDict):
211
211
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
212
212
  on_disk_full_backpressure: NotRequired[OutputGoogleCloudStorageDiskSpaceProtection]
213
213
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
214
+ force_close_on_shutdown: NotRequired[bool]
215
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
214
216
  description: NotRequired[str]
215
217
  compress: NotRequired[OutputGoogleCloudStorageCompression]
216
218
  r"""Data compression format to apply to HTTP content before it is delivered"""
@@ -242,6 +244,8 @@ class OutputGoogleCloudStorageTypedDict(TypedDict):
242
244
  r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
243
245
  empty_dir_cleanup_sec: NotRequired[float]
244
246
  r"""How frequently, in seconds, to clean up empty directories"""
247
+ directory_batch_size: NotRequired[float]
248
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
245
249
  deadletter_path: NotRequired[str]
246
250
  r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
247
251
  max_retry_num: NotRequired[float]
@@ -426,6 +430,11 @@ class OutputGoogleCloudStorage(BaseModel):
426
430
  ] = OutputGoogleCloudStorageDiskSpaceProtection.BLOCK
427
431
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
428
432
 
433
+ force_close_on_shutdown: Annotated[
434
+ Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
435
+ ] = False
436
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
437
+
429
438
  description: Optional[str] = None
430
439
 
431
440
  compress: Annotated[
@@ -512,6 +521,11 @@ class OutputGoogleCloudStorage(BaseModel):
512
521
  ] = 300
513
522
  r"""How frequently, in seconds, to clean up empty directories"""
514
523
 
524
+ directory_batch_size: Annotated[
525
+ Optional[float], pydantic.Field(alias="directoryBatchSize")
526
+ ] = 1000
527
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
528
+
515
529
  deadletter_path: Annotated[
516
530
  Optional[str], pydantic.Field(alias="deadletterPath")
517
531
  ] = "$CRIBL_HOME/state/outputs/dead-letter"