cribl-control-plane 0.2.1rc2__py3-none-any.whl → 0.2.1rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (96) hide show
  1. cribl_control_plane/_version.py +3 -3
  2. cribl_control_plane/models/__init__.py +99 -3
  3. cribl_control_plane/models/input.py +14 -14
  4. cribl_control_plane/models/inputappscope.py +20 -16
  5. cribl_control_plane/models/inputconfluentcloud.py +110 -0
  6. cribl_control_plane/models/inputcriblhttp.py +20 -16
  7. cribl_control_plane/models/inputcribllakehttp.py +20 -16
  8. cribl_control_plane/models/inputcribltcp.py +20 -16
  9. cribl_control_plane/models/inputdatadogagent.py +20 -16
  10. cribl_control_plane/models/inputedgeprometheus.py +44 -36
  11. cribl_control_plane/models/inputelastic.py +44 -27
  12. cribl_control_plane/models/inputeventhub.py +118 -0
  13. cribl_control_plane/models/inputfirehose.py +20 -16
  14. cribl_control_plane/models/inputgrafana.py +39 -31
  15. cribl_control_plane/models/inputhttp.py +20 -16
  16. cribl_control_plane/models/inputhttpraw.py +20 -16
  17. cribl_control_plane/models/inputkafka.py +108 -0
  18. cribl_control_plane/models/inputloki.py +20 -16
  19. cribl_control_plane/models/inputmetrics.py +20 -16
  20. cribl_control_plane/models/inputmodeldriventelemetry.py +20 -16
  21. cribl_control_plane/models/inputopentelemetry.py +19 -15
  22. cribl_control_plane/models/inputprometheus.py +44 -36
  23. cribl_control_plane/models/inputprometheusrw.py +20 -16
  24. cribl_control_plane/models/inputsplunk.py +20 -16
  25. cribl_control_plane/models/inputsplunkhec.py +19 -15
  26. cribl_control_plane/models/inputsyslog.py +39 -31
  27. cribl_control_plane/models/inputsystemmetrics.py +20 -10
  28. cribl_control_plane/models/inputtcp.py +30 -16
  29. cribl_control_plane/models/inputtcpjson.py +20 -16
  30. cribl_control_plane/models/inputwindowsmetrics.py +20 -10
  31. cribl_control_plane/models/inputwineventlogs.py +14 -0
  32. cribl_control_plane/models/inputwizwebhook.py +20 -16
  33. cribl_control_plane/models/inputzscalerhec.py +19 -15
  34. cribl_control_plane/models/output.py +22 -22
  35. cribl_control_plane/models/outputazureblob.py +7 -0
  36. cribl_control_plane/models/outputazuredataexplorer.py +283 -93
  37. cribl_control_plane/models/outputazureeventhub.py +169 -21
  38. cribl_control_plane/models/outputazurelogs.py +49 -21
  39. cribl_control_plane/models/outputchronicle.py +49 -21
  40. cribl_control_plane/models/outputclickhouse.py +49 -21
  41. cribl_control_plane/models/outputcloudwatch.py +49 -21
  42. cribl_control_plane/models/outputconfluentcloud.py +167 -22
  43. cribl_control_plane/models/outputcriblhttp.py +49 -21
  44. cribl_control_plane/models/outputcribltcp.py +49 -21
  45. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -22
  46. cribl_control_plane/models/outputdatabricks.py +7 -0
  47. cribl_control_plane/models/outputdatadog.py +49 -21
  48. cribl_control_plane/models/outputdataset.py +49 -21
  49. cribl_control_plane/models/outputdls3.py +7 -0
  50. cribl_control_plane/models/outputdynatracehttp.py +49 -21
  51. cribl_control_plane/models/outputdynatraceotlp.py +49 -21
  52. cribl_control_plane/models/outputelastic.py +74 -21
  53. cribl_control_plane/models/outputelasticcloud.py +74 -21
  54. cribl_control_plane/models/outputfilesystem.py +7 -0
  55. cribl_control_plane/models/outputgooglechronicle.py +65 -22
  56. cribl_control_plane/models/outputgooglecloudlogging.py +50 -22
  57. cribl_control_plane/models/outputgooglecloudstorage.py +7 -0
  58. cribl_control_plane/models/outputgooglepubsub.py +49 -21
  59. cribl_control_plane/models/outputgrafanacloud.py +98 -42
  60. cribl_control_plane/models/outputgraphite.py +49 -21
  61. cribl_control_plane/models/outputhoneycomb.py +49 -21
  62. cribl_control_plane/models/outputhumiohec.py +49 -21
  63. cribl_control_plane/models/outputinfluxdb.py +49 -21
  64. cribl_control_plane/models/outputkafka.py +162 -19
  65. cribl_control_plane/models/outputkinesis.py +56 -21
  66. cribl_control_plane/models/outputloki.py +47 -19
  67. cribl_control_plane/models/outputminio.py +7 -0
  68. cribl_control_plane/models/outputmsk.py +54 -19
  69. cribl_control_plane/models/outputnewrelic.py +49 -21
  70. cribl_control_plane/models/outputnewrelicevents.py +50 -22
  71. cribl_control_plane/models/outputopentelemetry.py +49 -21
  72. cribl_control_plane/models/outputprometheus.py +49 -21
  73. cribl_control_plane/models/outputs3.py +7 -0
  74. cribl_control_plane/models/outputsentinel.py +49 -21
  75. cribl_control_plane/models/outputsentineloneaisiem.py +50 -22
  76. cribl_control_plane/models/outputservicenow.py +49 -21
  77. cribl_control_plane/models/outputsignalfx.py +49 -21
  78. cribl_control_plane/models/outputsns.py +47 -19
  79. cribl_control_plane/models/outputsplunk.py +49 -21
  80. cribl_control_plane/models/outputsplunkhec.py +49 -21
  81. cribl_control_plane/models/outputsplunklb.py +49 -21
  82. cribl_control_plane/models/outputsqs.py +47 -19
  83. cribl_control_plane/models/outputstatsd.py +49 -21
  84. cribl_control_plane/models/outputstatsdext.py +49 -21
  85. cribl_control_plane/models/outputsumologic.py +49 -21
  86. cribl_control_plane/models/outputsyslog.py +129 -99
  87. cribl_control_plane/models/outputtcpjson.py +49 -21
  88. cribl_control_plane/models/outputwavefront.py +49 -21
  89. cribl_control_plane/models/outputwebhook.py +49 -21
  90. cribl_control_plane/models/outputxsiam.py +47 -19
  91. cribl_control_plane/models/runnablejobcollection.py +12 -8
  92. cribl_control_plane/models/runnablejobexecutor.py +12 -8
  93. cribl_control_plane/models/runnablejobscheduledsearch.py +12 -8
  94. {cribl_control_plane-0.2.1rc2.dist-info → cribl_control_plane-0.2.1rc4.dist-info}/METADATA +1 -1
  95. {cribl_control_plane-0.2.1rc2.dist-info → cribl_control_plane-0.2.1rc4.dist-info}/RECORD +96 -96
  96. {cribl_control_plane-0.2.1rc2.dist-info → cribl_control_plane-0.2.1rc4.dist-info}/WHEEL +0 -0
@@ -109,6 +109,17 @@ class OutputWavefrontBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
109
109
  QUEUE = "queue"
110
110
 
111
111
 
112
+ class OutputWavefrontMode(str, Enum, metaclass=utils.OpenEnumMeta):
113
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
114
+
115
+ # Error
116
+ ERROR = "error"
117
+ # Backpressure
118
+ ALWAYS = "always"
119
+ # Always On
120
+ BACKPRESSURE = "backpressure"
121
+
122
+
112
123
  class OutputWavefrontCompression(str, Enum, metaclass=utils.OpenEnumMeta):
113
124
  r"""Codec to use to compress the persisted data"""
114
125
 
@@ -127,17 +138,6 @@ class OutputWavefrontQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
127
138
  DROP = "drop"
128
139
 
129
140
 
130
- class OutputWavefrontMode(str, Enum, metaclass=utils.OpenEnumMeta):
131
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
132
-
133
- # Error
134
- ERROR = "error"
135
- # Backpressure
136
- BACKPRESSURE = "backpressure"
137
- # Always On
138
- ALWAYS = "always"
139
-
140
-
141
141
  class OutputWavefrontPqControlsTypedDict(TypedDict):
142
142
  pass
143
143
 
@@ -201,6 +201,16 @@ class OutputWavefrontTypedDict(TypedDict):
201
201
  r"""WaveFront API authentication token (see [here](https://docs.wavefront.com/wavefront_api.html#generating-an-api-token))"""
202
202
  text_secret: NotRequired[str]
203
203
  r"""Select or create a stored text secret"""
204
+ pq_strict_ordering: NotRequired[bool]
205
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
206
+ pq_rate_per_sec: NotRequired[float]
207
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
208
+ pq_mode: NotRequired[OutputWavefrontMode]
209
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
210
+ pq_max_buffer_size: NotRequired[float]
211
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
212
+ pq_max_backpressure_sec: NotRequired[float]
213
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
204
214
  pq_max_file_size: NotRequired[str]
205
215
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
206
216
  pq_max_size: NotRequired[str]
@@ -211,8 +221,6 @@ class OutputWavefrontTypedDict(TypedDict):
211
221
  r"""Codec to use to compress the persisted data"""
212
222
  pq_on_backpressure: NotRequired[OutputWavefrontQueueFullBehavior]
213
223
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
214
- pq_mode: NotRequired[OutputWavefrontMode]
215
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
216
224
  pq_controls: NotRequired[OutputWavefrontPqControlsTypedDict]
217
225
 
218
226
 
@@ -338,6 +346,34 @@ class OutputWavefront(BaseModel):
338
346
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
339
347
  r"""Select or create a stored text secret"""
340
348
 
349
+ pq_strict_ordering: Annotated[
350
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
351
+ ] = True
352
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
353
+
354
+ pq_rate_per_sec: Annotated[
355
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
356
+ ] = 0
357
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
358
+
359
+ pq_mode: Annotated[
360
+ Annotated[
361
+ Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
362
+ ],
363
+ pydantic.Field(alias="pqMode"),
364
+ ] = OutputWavefrontMode.ERROR
365
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
366
+
367
+ pq_max_buffer_size: Annotated[
368
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
369
+ ] = 42
370
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
371
+
372
+ pq_max_backpressure_sec: Annotated[
373
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
374
+ ] = 30
375
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
376
+
341
377
  pq_max_file_size: Annotated[
342
378
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
343
379
  ] = "1 MB"
@@ -369,14 +405,6 @@ class OutputWavefront(BaseModel):
369
405
  ] = OutputWavefrontQueueFullBehavior.BLOCK
370
406
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
371
407
 
372
- pq_mode: Annotated[
373
- Annotated[
374
- Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
375
- ],
376
- pydantic.Field(alias="pqMode"),
377
- ] = OutputWavefrontMode.ERROR
378
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
379
-
380
408
  pq_controls: Annotated[
381
409
  Optional[OutputWavefrontPqControls], pydantic.Field(alias="pqControls")
382
410
  ] = None
@@ -212,6 +212,17 @@ class OutputWebhookTLSSettingsClientSide(BaseModel):
212
212
  ] = None
213
213
 
214
214
 
215
+ class OutputWebhookMode(str, Enum, metaclass=utils.OpenEnumMeta):
216
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
217
+
218
+ # Error
219
+ ERROR = "error"
220
+ # Backpressure
221
+ ALWAYS = "always"
222
+ # Always On
223
+ BACKPRESSURE = "backpressure"
224
+
225
+
215
226
  class OutputWebhookCompression(str, Enum, metaclass=utils.OpenEnumMeta):
216
227
  r"""Codec to use to compress the persisted data"""
217
228
 
@@ -230,17 +241,6 @@ class OutputWebhookQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
230
241
  DROP = "drop"
231
242
 
232
243
 
233
- class OutputWebhookMode(str, Enum, metaclass=utils.OpenEnumMeta):
234
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
235
-
236
- # Error
237
- ERROR = "error"
238
- # Backpressure
239
- BACKPRESSURE = "backpressure"
240
- # Always On
241
- ALWAYS = "always"
242
-
243
-
244
244
  class OutputWebhookPqControlsTypedDict(TypedDict):
245
245
  pass
246
246
 
@@ -370,6 +370,16 @@ class OutputWebhookTypedDict(TypedDict):
370
370
  r"""Custom JavaScript code to format incoming event data accessible through the __e variable. The formatted content is added to (__e['__eventOut']) if available. Otherwise, the original event is serialized as JSON. Caution: This function is evaluated in an unprotected context, allowing you to execute almost any JavaScript code."""
371
371
  format_payload_code: NotRequired[str]
372
372
  r"""Optional JavaScript code to format the payload sent to the Destination. The payload, containing a batch of formatted events, is accessible through the __e['payload'] variable. The formatted payload is returned in the __e['__payloadOut'] variable. Caution: This function is evaluated in an unprotected context, allowing you to execute almost any JavaScript code."""
373
+ pq_strict_ordering: NotRequired[bool]
374
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
375
+ pq_rate_per_sec: NotRequired[float]
376
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
377
+ pq_mode: NotRequired[OutputWebhookMode]
378
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
379
+ pq_max_buffer_size: NotRequired[float]
380
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
381
+ pq_max_backpressure_sec: NotRequired[float]
382
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
373
383
  pq_max_file_size: NotRequired[str]
374
384
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
375
385
  pq_max_size: NotRequired[str]
@@ -380,8 +390,6 @@ class OutputWebhookTypedDict(TypedDict):
380
390
  r"""Codec to use to compress the persisted data"""
381
391
  pq_on_backpressure: NotRequired[OutputWebhookQueueFullBehavior]
382
392
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
383
- pq_mode: NotRequired[OutputWebhookMode]
384
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
385
393
  pq_controls: NotRequired[OutputWebhookPqControlsTypedDict]
386
394
  username: NotRequired[str]
387
395
  password: NotRequired[str]
@@ -599,6 +607,34 @@ class OutputWebhook(BaseModel):
599
607
  ] = None
600
608
  r"""Optional JavaScript code to format the payload sent to the Destination. The payload, containing a batch of formatted events, is accessible through the __e['payload'] variable. The formatted payload is returned in the __e['__payloadOut'] variable. Caution: This function is evaluated in an unprotected context, allowing you to execute almost any JavaScript code."""
601
609
 
610
+ pq_strict_ordering: Annotated[
611
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
612
+ ] = True
613
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
614
+
615
+ pq_rate_per_sec: Annotated[
616
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
617
+ ] = 0
618
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
619
+
620
+ pq_mode: Annotated[
621
+ Annotated[
622
+ Optional[OutputWebhookMode], PlainValidator(validate_open_enum(False))
623
+ ],
624
+ pydantic.Field(alias="pqMode"),
625
+ ] = OutputWebhookMode.ERROR
626
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
627
+
628
+ pq_max_buffer_size: Annotated[
629
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
630
+ ] = 42
631
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
632
+
633
+ pq_max_backpressure_sec: Annotated[
634
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
635
+ ] = 30
636
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
637
+
602
638
  pq_max_file_size: Annotated[
603
639
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
604
640
  ] = "1 MB"
@@ -630,14 +666,6 @@ class OutputWebhook(BaseModel):
630
666
  ] = OutputWebhookQueueFullBehavior.BLOCK
631
667
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
632
668
 
633
- pq_mode: Annotated[
634
- Annotated[
635
- Optional[OutputWebhookMode], PlainValidator(validate_open_enum(False))
636
- ],
637
- pydantic.Field(alias="pqMode"),
638
- ] = OutputWebhookMode.ERROR
639
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
640
-
641
669
  pq_controls: Annotated[
642
670
  Optional[OutputWebhookPqControls], pydantic.Field(alias="pqControls")
643
671
  ] = None
@@ -122,6 +122,17 @@ class OutputXsiamURL(BaseModel):
122
122
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
123
123
 
124
124
 
125
+ class OutputXsiamMode(str, Enum, metaclass=utils.OpenEnumMeta):
126
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
127
+
128
+ # Error
129
+ ERROR = "error"
130
+ # Backpressure
131
+ ALWAYS = "always"
132
+ # Always On
133
+ BACKPRESSURE = "backpressure"
134
+
135
+
125
136
  class OutputXsiamCompression(str, Enum, metaclass=utils.OpenEnumMeta):
126
137
  r"""Codec to use to compress the persisted data"""
127
138
 
@@ -140,17 +151,6 @@ class OutputXsiamQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
140
151
  DROP = "drop"
141
152
 
142
153
 
143
- class OutputXsiamMode(str, Enum, metaclass=utils.OpenEnumMeta):
144
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
145
-
146
- # Error
147
- ERROR = "error"
148
- # Backpressure
149
- BACKPRESSURE = "backpressure"
150
- # Always On
151
- ALWAYS = "always"
152
-
153
-
154
154
  class OutputXsiamPqControlsTypedDict(TypedDict):
155
155
  pass
156
156
 
@@ -223,6 +223,16 @@ class OutputXsiamTypedDict(TypedDict):
223
223
  r"""XSIAM authentication token"""
224
224
  text_secret: NotRequired[str]
225
225
  r"""Select or create a stored text secret"""
226
+ pq_strict_ordering: NotRequired[bool]
227
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
228
+ pq_rate_per_sec: NotRequired[float]
229
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
230
+ pq_mode: NotRequired[OutputXsiamMode]
231
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
232
+ pq_max_buffer_size: NotRequired[float]
233
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
234
+ pq_max_backpressure_sec: NotRequired[float]
235
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
226
236
  pq_max_file_size: NotRequired[str]
227
237
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
228
238
  pq_max_size: NotRequired[str]
@@ -233,8 +243,6 @@ class OutputXsiamTypedDict(TypedDict):
233
243
  r"""Codec to use to compress the persisted data"""
234
244
  pq_on_backpressure: NotRequired[OutputXsiamQueueFullBehavior]
235
245
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
236
- pq_mode: NotRequired[OutputXsiamMode]
237
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
238
246
  pq_controls: NotRequired[OutputXsiamPqControlsTypedDict]
239
247
 
240
248
 
@@ -387,6 +395,32 @@ class OutputXsiam(BaseModel):
387
395
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
388
396
  r"""Select or create a stored text secret"""
389
397
 
398
+ pq_strict_ordering: Annotated[
399
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
400
+ ] = True
401
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
402
+
403
+ pq_rate_per_sec: Annotated[
404
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
405
+ ] = 0
406
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
407
+
408
+ pq_mode: Annotated[
409
+ Annotated[Optional[OutputXsiamMode], PlainValidator(validate_open_enum(False))],
410
+ pydantic.Field(alias="pqMode"),
411
+ ] = OutputXsiamMode.ERROR
412
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
413
+
414
+ pq_max_buffer_size: Annotated[
415
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
416
+ ] = 42
417
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
418
+
419
+ pq_max_backpressure_sec: Annotated[
420
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
421
+ ] = 30
422
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
423
+
390
424
  pq_max_file_size: Annotated[
391
425
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
392
426
  ] = "1 MB"
@@ -417,12 +451,6 @@ class OutputXsiam(BaseModel):
417
451
  ] = OutputXsiamQueueFullBehavior.BLOCK
418
452
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
419
453
 
420
- pq_mode: Annotated[
421
- Annotated[Optional[OutputXsiamMode], PlainValidator(validate_open_enum(False))],
422
- pydantic.Field(alias="pqMode"),
423
- ] = OutputXsiamMode.ERROR
424
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
425
-
426
454
  pq_controls: Annotated[
427
455
  Optional[OutputXsiamPqControls], pydantic.Field(alias="pqControls")
428
456
  ] = None
@@ -175,13 +175,14 @@ class RunnableJobCollectionScheduleTypedDict(TypedDict):
175
175
 
176
176
  enabled: NotRequired[bool]
177
177
  r"""Enable to configure scheduling for this Collector"""
178
+ skippable: NotRequired[bool]
179
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
180
+ resume_missed: NotRequired[bool]
181
+ r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
178
182
  cron_schedule: NotRequired[str]
179
183
  r"""A cron schedule on which to run this job"""
180
184
  max_concurrent_runs: NotRequired[float]
181
185
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
182
- skippable: NotRequired[bool]
183
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
184
- resume_missed: NotRequired[Any]
185
186
  run: NotRequired[RunnableJobCollectionRunSettingsTypedDict]
186
187
 
187
188
 
@@ -191,6 +192,14 @@ class RunnableJobCollectionSchedule(BaseModel):
191
192
  enabled: Optional[bool] = None
192
193
  r"""Enable to configure scheduling for this Collector"""
193
194
 
195
+ skippable: Optional[bool] = True
196
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
197
+
198
+ resume_missed: Annotated[Optional[bool], pydantic.Field(alias="resumeMissed")] = (
199
+ False
200
+ )
201
+ r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
202
+
194
203
  cron_schedule: Annotated[Optional[str], pydantic.Field(alias="cronSchedule")] = (
195
204
  "*/5 * * * *"
196
205
  )
@@ -201,11 +210,6 @@ class RunnableJobCollectionSchedule(BaseModel):
201
210
  ] = 1
202
211
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
203
212
 
204
- skippable: Optional[bool] = True
205
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
206
-
207
- resume_missed: Annotated[Optional[Any], pydantic.Field(alias="resumeMissed")] = None
208
-
209
213
  run: Optional[RunnableJobCollectionRunSettings] = None
210
214
 
211
215
 
@@ -173,13 +173,14 @@ class RunnableJobExecutorScheduleTypedDict(TypedDict):
173
173
 
174
174
  enabled: NotRequired[bool]
175
175
  r"""Enable to configure scheduling for this Collector"""
176
+ skippable: NotRequired[bool]
177
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
178
+ resume_missed: NotRequired[bool]
179
+ r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
176
180
  cron_schedule: NotRequired[str]
177
181
  r"""A cron schedule on which to run this job"""
178
182
  max_concurrent_runs: NotRequired[float]
179
183
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
180
- skippable: NotRequired[bool]
181
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
182
- resume_missed: NotRequired[Any]
183
184
  run: NotRequired[RunnableJobExecutorRunSettingsTypedDict]
184
185
 
185
186
 
@@ -189,6 +190,14 @@ class RunnableJobExecutorSchedule(BaseModel):
189
190
  enabled: Optional[bool] = None
190
191
  r"""Enable to configure scheduling for this Collector"""
191
192
 
193
+ skippable: Optional[bool] = True
194
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
195
+
196
+ resume_missed: Annotated[Optional[bool], pydantic.Field(alias="resumeMissed")] = (
197
+ False
198
+ )
199
+ r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
200
+
192
201
  cron_schedule: Annotated[Optional[str], pydantic.Field(alias="cronSchedule")] = (
193
202
  "*/5 * * * *"
194
203
  )
@@ -199,11 +208,6 @@ class RunnableJobExecutorSchedule(BaseModel):
199
208
  ] = 1
200
209
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
201
210
 
202
- skippable: Optional[bool] = True
203
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
204
-
205
- resume_missed: Annotated[Optional[Any], pydantic.Field(alias="resumeMissed")] = None
206
-
207
211
  run: Optional[RunnableJobExecutorRunSettings] = None
208
212
 
209
213
 
@@ -174,13 +174,14 @@ class RunnableJobScheduledSearchScheduleTypedDict(TypedDict):
174
174
 
175
175
  enabled: NotRequired[bool]
176
176
  r"""Enable to configure scheduling for this Collector"""
177
+ skippable: NotRequired[bool]
178
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
179
+ resume_missed: NotRequired[bool]
180
+ r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
177
181
  cron_schedule: NotRequired[str]
178
182
  r"""A cron schedule on which to run this job"""
179
183
  max_concurrent_runs: NotRequired[float]
180
184
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
181
- skippable: NotRequired[bool]
182
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
183
- resume_missed: NotRequired[Any]
184
185
  run: NotRequired[RunnableJobScheduledSearchRunSettingsTypedDict]
185
186
 
186
187
 
@@ -190,6 +191,14 @@ class RunnableJobScheduledSearchSchedule(BaseModel):
190
191
  enabled: Optional[bool] = None
191
192
  r"""Enable to configure scheduling for this Collector"""
192
193
 
194
+ skippable: Optional[bool] = True
195
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
196
+
197
+ resume_missed: Annotated[Optional[bool], pydantic.Field(alias="resumeMissed")] = (
198
+ False
199
+ )
200
+ r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
201
+
193
202
  cron_schedule: Annotated[Optional[str], pydantic.Field(alias="cronSchedule")] = (
194
203
  "*/5 * * * *"
195
204
  )
@@ -200,11 +209,6 @@ class RunnableJobScheduledSearchSchedule(BaseModel):
200
209
  ] = 1
201
210
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
202
211
 
203
- skippable: Optional[bool] = True
204
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
205
-
206
- resume_missed: Annotated[Optional[Any], pydantic.Field(alias="resumeMissed")] = None
207
-
208
212
  run: Optional[RunnableJobScheduledSearchRunSettings] = None
209
213
 
210
214
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cribl-control-plane
3
- Version: 0.2.1rc2
3
+ Version: 0.2.1rc4
4
4
  Summary: Python Client SDK Generated by Speakeasy.
5
5
  Author: Speakeasy
6
6
  Requires-Python: >=3.9.2