cribl-control-plane 0.2.1rc4__py3-none-any.whl → 0.2.1rc5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (110) hide show
  1. cribl_control_plane/_version.py +3 -3
  2. cribl_control_plane/groups_sdk.py +3 -0
  3. cribl_control_plane/mappings.py +1185 -0
  4. cribl_control_plane/models/__init__.py +149 -105
  5. cribl_control_plane/models/createadminproductsmappingsactivatebyproductop.py +52 -0
  6. cribl_control_plane/models/createadminproductsmappingsbyproductop.py +53 -0
  7. cribl_control_plane/models/deleteadminproductsmappingsbyproductandidop.py +51 -0
  8. cribl_control_plane/models/getadminproductsmappingsbyproductandidop.py +51 -0
  9. cribl_control_plane/models/getadminproductsmappingsbyproductop.py +44 -0
  10. cribl_control_plane/models/input.py +14 -14
  11. cribl_control_plane/models/inputappscope.py +16 -20
  12. cribl_control_plane/models/inputconfluentcloud.py +0 -110
  13. cribl_control_plane/models/inputcriblhttp.py +16 -20
  14. cribl_control_plane/models/inputcribllakehttp.py +16 -20
  15. cribl_control_plane/models/inputcribltcp.py +16 -20
  16. cribl_control_plane/models/inputdatadogagent.py +16 -20
  17. cribl_control_plane/models/inputedgeprometheus.py +36 -44
  18. cribl_control_plane/models/inputelastic.py +27 -44
  19. cribl_control_plane/models/inputeventhub.py +0 -118
  20. cribl_control_plane/models/inputfirehose.py +16 -20
  21. cribl_control_plane/models/inputgrafana.py +31 -39
  22. cribl_control_plane/models/inputhttp.py +16 -20
  23. cribl_control_plane/models/inputhttpraw.py +16 -20
  24. cribl_control_plane/models/inputkafka.py +0 -108
  25. cribl_control_plane/models/inputloki.py +16 -20
  26. cribl_control_plane/models/inputmetrics.py +16 -20
  27. cribl_control_plane/models/inputmodeldriventelemetry.py +16 -20
  28. cribl_control_plane/models/inputopentelemetry.py +15 -19
  29. cribl_control_plane/models/inputprometheus.py +36 -44
  30. cribl_control_plane/models/inputprometheusrw.py +16 -20
  31. cribl_control_plane/models/inputsplunk.py +16 -20
  32. cribl_control_plane/models/inputsplunkhec.py +15 -19
  33. cribl_control_plane/models/inputsyslog.py +31 -39
  34. cribl_control_plane/models/inputsystemmetrics.py +10 -20
  35. cribl_control_plane/models/inputtcp.py +16 -30
  36. cribl_control_plane/models/inputtcpjson.py +16 -20
  37. cribl_control_plane/models/inputwindowsmetrics.py +10 -20
  38. cribl_control_plane/models/inputwineventlogs.py +0 -14
  39. cribl_control_plane/models/inputwizwebhook.py +16 -20
  40. cribl_control_plane/models/inputzscalerhec.py +15 -19
  41. cribl_control_plane/models/mappingruleset.py +53 -0
  42. cribl_control_plane/models/mappingrulesetevalmappingfunction.py +71 -0
  43. cribl_control_plane/models/mappingrulesetgenericmappingfunction.py +29 -0
  44. cribl_control_plane/models/output.py +22 -22
  45. cribl_control_plane/models/outputazureblob.py +0 -7
  46. cribl_control_plane/models/outputazuredataexplorer.py +93 -283
  47. cribl_control_plane/models/outputazureeventhub.py +21 -169
  48. cribl_control_plane/models/outputazurelogs.py +21 -49
  49. cribl_control_plane/models/outputchronicle.py +21 -49
  50. cribl_control_plane/models/outputclickhouse.py +21 -49
  51. cribl_control_plane/models/outputcloudwatch.py +21 -49
  52. cribl_control_plane/models/outputconfluentcloud.py +22 -167
  53. cribl_control_plane/models/outputcriblhttp.py +21 -49
  54. cribl_control_plane/models/outputcribltcp.py +21 -49
  55. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +22 -50
  56. cribl_control_plane/models/outputdatabricks.py +0 -7
  57. cribl_control_plane/models/outputdatadog.py +21 -49
  58. cribl_control_plane/models/outputdataset.py +21 -49
  59. cribl_control_plane/models/outputdls3.py +0 -7
  60. cribl_control_plane/models/outputdynatracehttp.py +21 -49
  61. cribl_control_plane/models/outputdynatraceotlp.py +21 -49
  62. cribl_control_plane/models/outputelastic.py +21 -74
  63. cribl_control_plane/models/outputelasticcloud.py +21 -74
  64. cribl_control_plane/models/outputfilesystem.py +0 -7
  65. cribl_control_plane/models/outputgooglechronicle.py +22 -65
  66. cribl_control_plane/models/outputgooglecloudlogging.py +22 -50
  67. cribl_control_plane/models/outputgooglecloudstorage.py +0 -7
  68. cribl_control_plane/models/outputgooglepubsub.py +21 -49
  69. cribl_control_plane/models/outputgrafanacloud.py +42 -98
  70. cribl_control_plane/models/outputgraphite.py +21 -49
  71. cribl_control_plane/models/outputhoneycomb.py +21 -49
  72. cribl_control_plane/models/outputhumiohec.py +21 -49
  73. cribl_control_plane/models/outputinfluxdb.py +21 -49
  74. cribl_control_plane/models/outputkafka.py +19 -162
  75. cribl_control_plane/models/outputkinesis.py +21 -56
  76. cribl_control_plane/models/outputloki.py +19 -47
  77. cribl_control_plane/models/outputminio.py +0 -7
  78. cribl_control_plane/models/outputmsk.py +19 -54
  79. cribl_control_plane/models/outputnewrelic.py +21 -49
  80. cribl_control_plane/models/outputnewrelicevents.py +22 -50
  81. cribl_control_plane/models/outputopentelemetry.py +21 -49
  82. cribl_control_plane/models/outputprometheus.py +21 -49
  83. cribl_control_plane/models/outputs3.py +0 -7
  84. cribl_control_plane/models/outputsentinel.py +21 -49
  85. cribl_control_plane/models/outputsentineloneaisiem.py +22 -50
  86. cribl_control_plane/models/outputservicenow.py +21 -49
  87. cribl_control_plane/models/outputsignalfx.py +21 -49
  88. cribl_control_plane/models/outputsns.py +19 -47
  89. cribl_control_plane/models/outputsplunk.py +21 -49
  90. cribl_control_plane/models/outputsplunkhec.py +21 -49
  91. cribl_control_plane/models/outputsplunklb.py +21 -49
  92. cribl_control_plane/models/outputsqs.py +19 -47
  93. cribl_control_plane/models/outputstatsd.py +21 -49
  94. cribl_control_plane/models/outputstatsdext.py +21 -49
  95. cribl_control_plane/models/outputsumologic.py +21 -49
  96. cribl_control_plane/models/outputsyslog.py +99 -129
  97. cribl_control_plane/models/outputtcpjson.py +21 -49
  98. cribl_control_plane/models/outputwavefront.py +21 -49
  99. cribl_control_plane/models/outputwebhook.py +21 -49
  100. cribl_control_plane/models/outputxsiam.py +19 -47
  101. cribl_control_plane/models/pipeline.py +4 -4
  102. cribl_control_plane/models/rulesetid.py +13 -0
  103. cribl_control_plane/models/runnablejobcollection.py +8 -12
  104. cribl_control_plane/models/runnablejobexecutor.py +8 -12
  105. cribl_control_plane/models/runnablejobscheduledsearch.py +8 -12
  106. cribl_control_plane/models/updateadminproductsmappingsbyproductandidop.py +63 -0
  107. cribl_control_plane/pipelines.py +8 -8
  108. {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/METADATA +11 -2
  109. {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/RECORD +110 -99
  110. {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/WHEEL +0 -0
@@ -109,17 +109,6 @@ class OutputWavefrontBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
109
109
  QUEUE = "queue"
110
110
 
111
111
 
112
- class OutputWavefrontMode(str, Enum, metaclass=utils.OpenEnumMeta):
113
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
114
-
115
- # Error
116
- ERROR = "error"
117
- # Backpressure
118
- ALWAYS = "always"
119
- # Always On
120
- BACKPRESSURE = "backpressure"
121
-
122
-
123
112
  class OutputWavefrontCompression(str, Enum, metaclass=utils.OpenEnumMeta):
124
113
  r"""Codec to use to compress the persisted data"""
125
114
 
@@ -138,6 +127,17 @@ class OutputWavefrontQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
138
127
  DROP = "drop"
139
128
 
140
129
 
130
+ class OutputWavefrontMode(str, Enum, metaclass=utils.OpenEnumMeta):
131
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
132
+
133
+ # Error
134
+ ERROR = "error"
135
+ # Backpressure
136
+ BACKPRESSURE = "backpressure"
137
+ # Always On
138
+ ALWAYS = "always"
139
+
140
+
141
141
  class OutputWavefrontPqControlsTypedDict(TypedDict):
142
142
  pass
143
143
 
@@ -201,16 +201,6 @@ class OutputWavefrontTypedDict(TypedDict):
201
201
  r"""WaveFront API authentication token (see [here](https://docs.wavefront.com/wavefront_api.html#generating-an-api-token))"""
202
202
  text_secret: NotRequired[str]
203
203
  r"""Select or create a stored text secret"""
204
- pq_strict_ordering: NotRequired[bool]
205
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
206
- pq_rate_per_sec: NotRequired[float]
207
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
208
- pq_mode: NotRequired[OutputWavefrontMode]
209
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
210
- pq_max_buffer_size: NotRequired[float]
211
- r"""The maximum number of events to hold in memory before writing the events to disk"""
212
- pq_max_backpressure_sec: NotRequired[float]
213
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
214
204
  pq_max_file_size: NotRequired[str]
215
205
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
216
206
  pq_max_size: NotRequired[str]
@@ -221,6 +211,8 @@ class OutputWavefrontTypedDict(TypedDict):
221
211
  r"""Codec to use to compress the persisted data"""
222
212
  pq_on_backpressure: NotRequired[OutputWavefrontQueueFullBehavior]
223
213
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
214
+ pq_mode: NotRequired[OutputWavefrontMode]
215
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
224
216
  pq_controls: NotRequired[OutputWavefrontPqControlsTypedDict]
225
217
 
226
218
 
@@ -346,34 +338,6 @@ class OutputWavefront(BaseModel):
346
338
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
347
339
  r"""Select or create a stored text secret"""
348
340
 
349
- pq_strict_ordering: Annotated[
350
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
351
- ] = True
352
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
353
-
354
- pq_rate_per_sec: Annotated[
355
- Optional[float], pydantic.Field(alias="pqRatePerSec")
356
- ] = 0
357
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
358
-
359
- pq_mode: Annotated[
360
- Annotated[
361
- Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
362
- ],
363
- pydantic.Field(alias="pqMode"),
364
- ] = OutputWavefrontMode.ERROR
365
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
366
-
367
- pq_max_buffer_size: Annotated[
368
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
369
- ] = 42
370
- r"""The maximum number of events to hold in memory before writing the events to disk"""
371
-
372
- pq_max_backpressure_sec: Annotated[
373
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
374
- ] = 30
375
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
376
-
377
341
  pq_max_file_size: Annotated[
378
342
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
379
343
  ] = "1 MB"
@@ -405,6 +369,14 @@ class OutputWavefront(BaseModel):
405
369
  ] = OutputWavefrontQueueFullBehavior.BLOCK
406
370
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
407
371
 
372
+ pq_mode: Annotated[
373
+ Annotated[
374
+ Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
375
+ ],
376
+ pydantic.Field(alias="pqMode"),
377
+ ] = OutputWavefrontMode.ERROR
378
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
379
+
408
380
  pq_controls: Annotated[
409
381
  Optional[OutputWavefrontPqControls], pydantic.Field(alias="pqControls")
410
382
  ] = None
@@ -212,17 +212,6 @@ class OutputWebhookTLSSettingsClientSide(BaseModel):
212
212
  ] = None
213
213
 
214
214
 
215
- class OutputWebhookMode(str, Enum, metaclass=utils.OpenEnumMeta):
216
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
217
-
218
- # Error
219
- ERROR = "error"
220
- # Backpressure
221
- ALWAYS = "always"
222
- # Always On
223
- BACKPRESSURE = "backpressure"
224
-
225
-
226
215
  class OutputWebhookCompression(str, Enum, metaclass=utils.OpenEnumMeta):
227
216
  r"""Codec to use to compress the persisted data"""
228
217
 
@@ -241,6 +230,17 @@ class OutputWebhookQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
241
230
  DROP = "drop"
242
231
 
243
232
 
233
+ class OutputWebhookMode(str, Enum, metaclass=utils.OpenEnumMeta):
234
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
235
+
236
+ # Error
237
+ ERROR = "error"
238
+ # Backpressure
239
+ BACKPRESSURE = "backpressure"
240
+ # Always On
241
+ ALWAYS = "always"
242
+
243
+
244
244
  class OutputWebhookPqControlsTypedDict(TypedDict):
245
245
  pass
246
246
 
@@ -370,16 +370,6 @@ class OutputWebhookTypedDict(TypedDict):
370
370
  r"""Custom JavaScript code to format incoming event data accessible through the __e variable. The formatted content is added to (__e['__eventOut']) if available. Otherwise, the original event is serialized as JSON. Caution: This function is evaluated in an unprotected context, allowing you to execute almost any JavaScript code."""
371
371
  format_payload_code: NotRequired[str]
372
372
  r"""Optional JavaScript code to format the payload sent to the Destination. The payload, containing a batch of formatted events, is accessible through the __e['payload'] variable. The formatted payload is returned in the __e['__payloadOut'] variable. Caution: This function is evaluated in an unprotected context, allowing you to execute almost any JavaScript code."""
373
- pq_strict_ordering: NotRequired[bool]
374
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
375
- pq_rate_per_sec: NotRequired[float]
376
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
377
- pq_mode: NotRequired[OutputWebhookMode]
378
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
379
- pq_max_buffer_size: NotRequired[float]
380
- r"""The maximum number of events to hold in memory before writing the events to disk"""
381
- pq_max_backpressure_sec: NotRequired[float]
382
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
383
373
  pq_max_file_size: NotRequired[str]
384
374
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
385
375
  pq_max_size: NotRequired[str]
@@ -390,6 +380,8 @@ class OutputWebhookTypedDict(TypedDict):
390
380
  r"""Codec to use to compress the persisted data"""
391
381
  pq_on_backpressure: NotRequired[OutputWebhookQueueFullBehavior]
392
382
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
383
+ pq_mode: NotRequired[OutputWebhookMode]
384
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
393
385
  pq_controls: NotRequired[OutputWebhookPqControlsTypedDict]
394
386
  username: NotRequired[str]
395
387
  password: NotRequired[str]
@@ -607,34 +599,6 @@ class OutputWebhook(BaseModel):
607
599
  ] = None
608
600
  r"""Optional JavaScript code to format the payload sent to the Destination. The payload, containing a batch of formatted events, is accessible through the __e['payload'] variable. The formatted payload is returned in the __e['__payloadOut'] variable. Caution: This function is evaluated in an unprotected context, allowing you to execute almost any JavaScript code."""
609
601
 
610
- pq_strict_ordering: Annotated[
611
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
612
- ] = True
613
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
614
-
615
- pq_rate_per_sec: Annotated[
616
- Optional[float], pydantic.Field(alias="pqRatePerSec")
617
- ] = 0
618
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
619
-
620
- pq_mode: Annotated[
621
- Annotated[
622
- Optional[OutputWebhookMode], PlainValidator(validate_open_enum(False))
623
- ],
624
- pydantic.Field(alias="pqMode"),
625
- ] = OutputWebhookMode.ERROR
626
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
627
-
628
- pq_max_buffer_size: Annotated[
629
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
630
- ] = 42
631
- r"""The maximum number of events to hold in memory before writing the events to disk"""
632
-
633
- pq_max_backpressure_sec: Annotated[
634
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
635
- ] = 30
636
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
637
-
638
602
  pq_max_file_size: Annotated[
639
603
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
640
604
  ] = "1 MB"
@@ -666,6 +630,14 @@ class OutputWebhook(BaseModel):
666
630
  ] = OutputWebhookQueueFullBehavior.BLOCK
667
631
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
668
632
 
633
+ pq_mode: Annotated[
634
+ Annotated[
635
+ Optional[OutputWebhookMode], PlainValidator(validate_open_enum(False))
636
+ ],
637
+ pydantic.Field(alias="pqMode"),
638
+ ] = OutputWebhookMode.ERROR
639
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
640
+
669
641
  pq_controls: Annotated[
670
642
  Optional[OutputWebhookPqControls], pydantic.Field(alias="pqControls")
671
643
  ] = None
@@ -122,17 +122,6 @@ class OutputXsiamURL(BaseModel):
122
122
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
123
123
 
124
124
 
125
- class OutputXsiamMode(str, Enum, metaclass=utils.OpenEnumMeta):
126
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
127
-
128
- # Error
129
- ERROR = "error"
130
- # Backpressure
131
- ALWAYS = "always"
132
- # Always On
133
- BACKPRESSURE = "backpressure"
134
-
135
-
136
125
  class OutputXsiamCompression(str, Enum, metaclass=utils.OpenEnumMeta):
137
126
  r"""Codec to use to compress the persisted data"""
138
127
 
@@ -151,6 +140,17 @@ class OutputXsiamQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
151
140
  DROP = "drop"
152
141
 
153
142
 
143
+ class OutputXsiamMode(str, Enum, metaclass=utils.OpenEnumMeta):
144
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
145
+
146
+ # Error
147
+ ERROR = "error"
148
+ # Backpressure
149
+ BACKPRESSURE = "backpressure"
150
+ # Always On
151
+ ALWAYS = "always"
152
+
153
+
154
154
  class OutputXsiamPqControlsTypedDict(TypedDict):
155
155
  pass
156
156
 
@@ -223,16 +223,6 @@ class OutputXsiamTypedDict(TypedDict):
223
223
  r"""XSIAM authentication token"""
224
224
  text_secret: NotRequired[str]
225
225
  r"""Select or create a stored text secret"""
226
- pq_strict_ordering: NotRequired[bool]
227
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
228
- pq_rate_per_sec: NotRequired[float]
229
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
230
- pq_mode: NotRequired[OutputXsiamMode]
231
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
232
- pq_max_buffer_size: NotRequired[float]
233
- r"""The maximum number of events to hold in memory before writing the events to disk"""
234
- pq_max_backpressure_sec: NotRequired[float]
235
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
236
226
  pq_max_file_size: NotRequired[str]
237
227
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
238
228
  pq_max_size: NotRequired[str]
@@ -243,6 +233,8 @@ class OutputXsiamTypedDict(TypedDict):
243
233
  r"""Codec to use to compress the persisted data"""
244
234
  pq_on_backpressure: NotRequired[OutputXsiamQueueFullBehavior]
245
235
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
236
+ pq_mode: NotRequired[OutputXsiamMode]
237
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
246
238
  pq_controls: NotRequired[OutputXsiamPqControlsTypedDict]
247
239
 
248
240
 
@@ -395,32 +387,6 @@ class OutputXsiam(BaseModel):
395
387
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
396
388
  r"""Select or create a stored text secret"""
397
389
 
398
- pq_strict_ordering: Annotated[
399
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
400
- ] = True
401
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
402
-
403
- pq_rate_per_sec: Annotated[
404
- Optional[float], pydantic.Field(alias="pqRatePerSec")
405
- ] = 0
406
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
407
-
408
- pq_mode: Annotated[
409
- Annotated[Optional[OutputXsiamMode], PlainValidator(validate_open_enum(False))],
410
- pydantic.Field(alias="pqMode"),
411
- ] = OutputXsiamMode.ERROR
412
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
413
-
414
- pq_max_buffer_size: Annotated[
415
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
416
- ] = 42
417
- r"""The maximum number of events to hold in memory before writing the events to disk"""
418
-
419
- pq_max_backpressure_sec: Annotated[
420
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
421
- ] = 30
422
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
423
-
424
390
  pq_max_file_size: Annotated[
425
391
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
426
392
  ] = "1 MB"
@@ -451,6 +417,12 @@ class OutputXsiam(BaseModel):
451
417
  ] = OutputXsiamQueueFullBehavior.BLOCK
452
418
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
453
419
 
420
+ pq_mode: Annotated[
421
+ Annotated[Optional[OutputXsiamMode], PlainValidator(validate_open_enum(False))],
422
+ pydantic.Field(alias="pqMode"),
423
+ ] = OutputXsiamMode.ERROR
424
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
425
+
454
426
  pq_controls: Annotated[
455
427
  Optional[OutputXsiamPqControls], pydantic.Field(alias="pqControls")
456
428
  ] = None
@@ -26,7 +26,7 @@ class PipelineGroups(BaseModel):
26
26
  r"""Whether this group is disabled"""
27
27
 
28
28
 
29
- class ConfTypedDict(TypedDict):
29
+ class PipelineConfTypedDict(TypedDict):
30
30
  async_func_timeout: NotRequired[int]
31
31
  r"""Time (in ms) to wait for an async function to complete processing of a data item"""
32
32
  output: NotRequired[str]
@@ -39,7 +39,7 @@ class ConfTypedDict(TypedDict):
39
39
  groups: NotRequired[Dict[str, PipelineGroupsTypedDict]]
40
40
 
41
41
 
42
- class Conf(BaseModel):
42
+ class PipelineConf(BaseModel):
43
43
  async_func_timeout: Annotated[
44
44
  Optional[int], pydantic.Field(alias="asyncFuncTimeout")
45
45
  ] = None
@@ -61,10 +61,10 @@ class Conf(BaseModel):
61
61
 
62
62
  class PipelineTypedDict(TypedDict):
63
63
  id: str
64
- conf: ConfTypedDict
64
+ conf: PipelineConfTypedDict
65
65
 
66
66
 
67
67
  class Pipeline(BaseModel):
68
68
  id: str
69
69
 
70
- conf: Conf
70
+ conf: PipelineConf
@@ -0,0 +1,13 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane.types import BaseModel
5
+ from typing_extensions import TypedDict
6
+
7
+
8
+ class RulesetIDTypedDict(TypedDict):
9
+ id: str
10
+
11
+
12
+ class RulesetID(BaseModel):
13
+ id: str
@@ -175,14 +175,13 @@ class RunnableJobCollectionScheduleTypedDict(TypedDict):
175
175
 
176
176
  enabled: NotRequired[bool]
177
177
  r"""Enable to configure scheduling for this Collector"""
178
- skippable: NotRequired[bool]
179
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
180
- resume_missed: NotRequired[bool]
181
- r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
182
178
  cron_schedule: NotRequired[str]
183
179
  r"""A cron schedule on which to run this job"""
184
180
  max_concurrent_runs: NotRequired[float]
185
181
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
182
+ skippable: NotRequired[bool]
183
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
184
+ resume_missed: NotRequired[Any]
186
185
  run: NotRequired[RunnableJobCollectionRunSettingsTypedDict]
187
186
 
188
187
 
@@ -192,14 +191,6 @@ class RunnableJobCollectionSchedule(BaseModel):
192
191
  enabled: Optional[bool] = None
193
192
  r"""Enable to configure scheduling for this Collector"""
194
193
 
195
- skippable: Optional[bool] = True
196
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
197
-
198
- resume_missed: Annotated[Optional[bool], pydantic.Field(alias="resumeMissed")] = (
199
- False
200
- )
201
- r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
202
-
203
194
  cron_schedule: Annotated[Optional[str], pydantic.Field(alias="cronSchedule")] = (
204
195
  "*/5 * * * *"
205
196
  )
@@ -210,6 +201,11 @@ class RunnableJobCollectionSchedule(BaseModel):
210
201
  ] = 1
211
202
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
212
203
 
204
+ skippable: Optional[bool] = True
205
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
206
+
207
+ resume_missed: Annotated[Optional[Any], pydantic.Field(alias="resumeMissed")] = None
208
+
213
209
  run: Optional[RunnableJobCollectionRunSettings] = None
214
210
 
215
211
 
@@ -173,14 +173,13 @@ class RunnableJobExecutorScheduleTypedDict(TypedDict):
173
173
 
174
174
  enabled: NotRequired[bool]
175
175
  r"""Enable to configure scheduling for this Collector"""
176
- skippable: NotRequired[bool]
177
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
178
- resume_missed: NotRequired[bool]
179
- r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
180
176
  cron_schedule: NotRequired[str]
181
177
  r"""A cron schedule on which to run this job"""
182
178
  max_concurrent_runs: NotRequired[float]
183
179
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
180
+ skippable: NotRequired[bool]
181
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
182
+ resume_missed: NotRequired[Any]
184
183
  run: NotRequired[RunnableJobExecutorRunSettingsTypedDict]
185
184
 
186
185
 
@@ -190,14 +189,6 @@ class RunnableJobExecutorSchedule(BaseModel):
190
189
  enabled: Optional[bool] = None
191
190
  r"""Enable to configure scheduling for this Collector"""
192
191
 
193
- skippable: Optional[bool] = True
194
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
195
-
196
- resume_missed: Annotated[Optional[bool], pydantic.Field(alias="resumeMissed")] = (
197
- False
198
- )
199
- r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
200
-
201
192
  cron_schedule: Annotated[Optional[str], pydantic.Field(alias="cronSchedule")] = (
202
193
  "*/5 * * * *"
203
194
  )
@@ -208,6 +199,11 @@ class RunnableJobExecutorSchedule(BaseModel):
208
199
  ] = 1
209
200
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
210
201
 
202
+ skippable: Optional[bool] = True
203
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
204
+
205
+ resume_missed: Annotated[Optional[Any], pydantic.Field(alias="resumeMissed")] = None
206
+
211
207
  run: Optional[RunnableJobExecutorRunSettings] = None
212
208
 
213
209
 
@@ -174,14 +174,13 @@ class RunnableJobScheduledSearchScheduleTypedDict(TypedDict):
174
174
 
175
175
  enabled: NotRequired[bool]
176
176
  r"""Enable to configure scheduling for this Collector"""
177
- skippable: NotRequired[bool]
178
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
179
- resume_missed: NotRequired[bool]
180
- r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
181
177
  cron_schedule: NotRequired[str]
182
178
  r"""A cron schedule on which to run this job"""
183
179
  max_concurrent_runs: NotRequired[float]
184
180
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
181
+ skippable: NotRequired[bool]
182
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
183
+ resume_missed: NotRequired[Any]
185
184
  run: NotRequired[RunnableJobScheduledSearchRunSettingsTypedDict]
186
185
 
187
186
 
@@ -191,14 +190,6 @@ class RunnableJobScheduledSearchSchedule(BaseModel):
191
190
  enabled: Optional[bool] = None
192
191
  r"""Enable to configure scheduling for this Collector"""
193
192
 
194
- skippable: Optional[bool] = True
195
- r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
196
-
197
- resume_missed: Annotated[Optional[bool], pydantic.Field(alias="resumeMissed")] = (
198
- False
199
- )
200
- r"""If Stream Leader (or single instance) restarts, run all missed jobs according to their original schedules"""
201
-
202
193
  cron_schedule: Annotated[Optional[str], pydantic.Field(alias="cronSchedule")] = (
203
194
  "*/5 * * * *"
204
195
  )
@@ -209,6 +200,11 @@ class RunnableJobScheduledSearchSchedule(BaseModel):
209
200
  ] = 1
210
201
  r"""The maximum number of instances of this scheduled job that may be running at any time"""
211
202
 
203
+ skippable: Optional[bool] = True
204
+ r"""Skippable jobs can be delayed, up to their next run time, if the system is hitting concurrency limits"""
205
+
206
+ resume_missed: Annotated[Optional[Any], pydantic.Field(alias="resumeMissed")] = None
207
+
212
208
  run: Optional[RunnableJobScheduledSearchRunSettings] = None
213
209
 
214
210
 
@@ -0,0 +1,63 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .mappingruleset import MappingRuleset, MappingRulesetTypedDict
5
+ from .productscore import ProductsCore
6
+ from cribl_control_plane.types import BaseModel
7
+ from cribl_control_plane.utils import (
8
+ FieldMetadata,
9
+ PathParamMetadata,
10
+ RequestMetadata,
11
+ validate_open_enum,
12
+ )
13
+ import pydantic
14
+ from pydantic.functional_validators import PlainValidator
15
+ from typing import List, Optional
16
+ from typing_extensions import Annotated, NotRequired, TypedDict
17
+
18
+
19
+ class UpdateAdminProductsMappingsByProductAndIDRequestTypedDict(TypedDict):
20
+ product: ProductsCore
21
+ r"""Name of the Cribl product to update the Mapping Ruleset for"""
22
+ id_param: str
23
+ r"""The <code>id</code> of the Mapping Ruleset to update."""
24
+ mapping_ruleset: MappingRulesetTypedDict
25
+ r"""MappingRuleset object"""
26
+
27
+
28
+ class UpdateAdminProductsMappingsByProductAndIDRequest(BaseModel):
29
+ product: Annotated[
30
+ Annotated[ProductsCore, PlainValidator(validate_open_enum(False))],
31
+ FieldMetadata(path=PathParamMetadata(style="simple", explode=False)),
32
+ ]
33
+ r"""Name of the Cribl product to update the Mapping Ruleset for"""
34
+
35
+ id_param: Annotated[
36
+ str,
37
+ pydantic.Field(alias="id"),
38
+ FieldMetadata(path=PathParamMetadata(style="simple", explode=False)),
39
+ ]
40
+ r"""The <code>id</code> of the Mapping Ruleset to update."""
41
+
42
+ mapping_ruleset: Annotated[
43
+ MappingRuleset,
44
+ FieldMetadata(request=RequestMetadata(media_type="application/json")),
45
+ ]
46
+ r"""MappingRuleset object"""
47
+
48
+
49
+ class UpdateAdminProductsMappingsByProductAndIDResponseTypedDict(TypedDict):
50
+ r"""A list containing the updated Mapping Ruleset objects"""
51
+
52
+ count: NotRequired[int]
53
+ r"""number of items present in the items array"""
54
+ items: NotRequired[List[MappingRulesetTypedDict]]
55
+
56
+
57
+ class UpdateAdminProductsMappingsByProductAndIDResponse(BaseModel):
58
+ r"""A list containing the updated Mapping Ruleset objects"""
59
+
60
+ count: Optional[int] = None
61
+ r"""number of items present in the items array"""
62
+
63
+ items: Optional[List[MappingRuleset]] = None
@@ -176,7 +176,7 @@ class Pipelines(BaseSDK):
176
176
  self,
177
177
  *,
178
178
  id: str,
179
- conf: Union[models.Conf, models.ConfTypedDict],
179
+ conf: Union[models.PipelineConf, models.PipelineConfTypedDict],
180
180
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
181
181
  server_url: Optional[str] = None,
182
182
  timeout_ms: Optional[int] = None,
@@ -205,7 +205,7 @@ class Pipelines(BaseSDK):
205
205
 
206
206
  request = models.Pipeline(
207
207
  id=id,
208
- conf=utils.get_pydantic_model(conf, models.Conf),
208
+ conf=utils.get_pydantic_model(conf, models.PipelineConf),
209
209
  )
210
210
 
211
211
  req = self._build_request(
@@ -269,7 +269,7 @@ class Pipelines(BaseSDK):
269
269
  self,
270
270
  *,
271
271
  id: str,
272
- conf: Union[models.Conf, models.ConfTypedDict],
272
+ conf: Union[models.PipelineConf, models.PipelineConfTypedDict],
273
273
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
274
274
  server_url: Optional[str] = None,
275
275
  timeout_ms: Optional[int] = None,
@@ -298,7 +298,7 @@ class Pipelines(BaseSDK):
298
298
 
299
299
  request = models.Pipeline(
300
300
  id=id,
301
- conf=utils.get_pydantic_model(conf, models.Conf),
301
+ conf=utils.get_pydantic_model(conf, models.PipelineConf),
302
302
  )
303
303
 
304
304
  req = self._build_request_async(
@@ -537,7 +537,7 @@ class Pipelines(BaseSDK):
537
537
  *,
538
538
  id_param: str,
539
539
  id: str,
540
- conf: Union[models.Conf, models.ConfTypedDict],
540
+ conf: Union[models.PipelineConf, models.PipelineConfTypedDict],
541
541
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
542
542
  server_url: Optional[str] = None,
543
543
  timeout_ms: Optional[int] = None,
@@ -569,7 +569,7 @@ class Pipelines(BaseSDK):
569
569
  id_param=id_param,
570
570
  pipeline=models.Pipeline(
571
571
  id=id,
572
- conf=utils.get_pydantic_model(conf, models.Conf),
572
+ conf=utils.get_pydantic_model(conf, models.PipelineConf),
573
573
  ),
574
574
  )
575
575
 
@@ -635,7 +635,7 @@ class Pipelines(BaseSDK):
635
635
  *,
636
636
  id_param: str,
637
637
  id: str,
638
- conf: Union[models.Conf, models.ConfTypedDict],
638
+ conf: Union[models.PipelineConf, models.PipelineConfTypedDict],
639
639
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
640
640
  server_url: Optional[str] = None,
641
641
  timeout_ms: Optional[int] = None,
@@ -667,7 +667,7 @@ class Pipelines(BaseSDK):
667
667
  id_param=id_param,
668
668
  pipeline=models.Pipeline(
669
669
  id=id,
670
- conf=utils.get_pydantic_model(conf, models.Conf),
670
+ conf=utils.get_pydantic_model(conf, models.PipelineConf),
671
671
  ),
672
672
  )
673
673