cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -415,3 +416,77 @@ class OutputCriblLake(BaseModel):
415
416
 
416
417
  max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
417
418
  r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
419
+
420
+ @field_serializer("signature_version")
421
+ def serialize_signature_version(self, value):
422
+ if isinstance(value, str):
423
+ try:
424
+ return models.OutputCriblLakeSignatureVersion(value)
425
+ except ValueError:
426
+ return value
427
+ return value
428
+
429
+ @field_serializer("object_acl")
430
+ def serialize_object_acl(self, value):
431
+ if isinstance(value, str):
432
+ try:
433
+ return models.OutputCriblLakeObjectACL(value)
434
+ except ValueError:
435
+ return value
436
+ return value
437
+
438
+ @field_serializer("storage_class")
439
+ def serialize_storage_class(self, value):
440
+ if isinstance(value, str):
441
+ try:
442
+ return models.OutputCriblLakeStorageClass(value)
443
+ except ValueError:
444
+ return value
445
+ return value
446
+
447
+ @field_serializer("server_side_encryption")
448
+ def serialize_server_side_encryption(self, value):
449
+ if isinstance(value, str):
450
+ try:
451
+ return models.OutputCriblLakeServerSideEncryptionForUploadedObjects(
452
+ value
453
+ )
454
+ except ValueError:
455
+ return value
456
+ return value
457
+
458
+ @field_serializer("on_backpressure")
459
+ def serialize_on_backpressure(self, value):
460
+ if isinstance(value, str):
461
+ try:
462
+ return models.OutputCriblLakeBackpressureBehavior(value)
463
+ except ValueError:
464
+ return value
465
+ return value
466
+
467
+ @field_serializer("on_disk_full_backpressure")
468
+ def serialize_on_disk_full_backpressure(self, value):
469
+ if isinstance(value, str):
470
+ try:
471
+ return models.OutputCriblLakeDiskSpaceProtection(value)
472
+ except ValueError:
473
+ return value
474
+ return value
475
+
476
+ @field_serializer("aws_authentication_method")
477
+ def serialize_aws_authentication_method(self, value):
478
+ if isinstance(value, str):
479
+ try:
480
+ return models.AwsAuthenticationMethod(value)
481
+ except ValueError:
482
+ return value
483
+ return value
484
+
485
+ @field_serializer("format_")
486
+ def serialize_format_(self, value):
487
+ if isinstance(value, str):
488
+ try:
489
+ return models.OutputCriblLakeFormat(value)
490
+ except ValueError:
491
+ return value
492
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -106,6 +107,24 @@ class OutputCriblTCPTLSSettingsClientSide(BaseModel):
106
107
  pydantic.Field(alias="maxVersion"),
107
108
  ] = None
108
109
 
110
+ @field_serializer("min_version")
111
+ def serialize_min_version(self, value):
112
+ if isinstance(value, str):
113
+ try:
114
+ return models.OutputCriblTCPMinimumTLSVersion(value)
115
+ except ValueError:
116
+ return value
117
+ return value
118
+
119
+ @field_serializer("max_version")
120
+ def serialize_max_version(self, value):
121
+ if isinstance(value, str):
122
+ try:
123
+ return models.OutputCriblTCPMaximumTLSVersion(value)
124
+ except ValueError:
125
+ return value
126
+ return value
127
+
109
128
 
110
129
  class OutputCriblTCPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
111
130
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -156,6 +175,26 @@ class OutputCriblTCPHost(BaseModel):
156
175
  weight: Optional[float] = 1
157
176
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
158
177
 
178
+ @field_serializer("tls")
179
+ def serialize_tls(self, value):
180
+ if isinstance(value, str):
181
+ try:
182
+ return models.OutputCriblTCPTLS(value)
183
+ except ValueError:
184
+ return value
185
+ return value
186
+
187
+
188
+ class OutputCriblTCPMode(str, Enum, metaclass=utils.OpenEnumMeta):
189
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
190
+
191
+ # Error
192
+ ERROR = "error"
193
+ # Backpressure
194
+ ALWAYS = "always"
195
+ # Always On
196
+ BACKPRESSURE = "backpressure"
197
+
159
198
 
160
199
  class OutputCriblTCPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
161
200
  r"""Codec to use to compress the persisted data"""
@@ -175,17 +214,6 @@ class OutputCriblTCPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
175
214
  DROP = "drop"
176
215
 
177
216
 
178
- class OutputCriblTCPMode(str, Enum, metaclass=utils.OpenEnumMeta):
179
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
180
-
181
- # Error
182
- ERROR = "error"
183
- # Backpressure
184
- BACKPRESSURE = "backpressure"
185
- # Always On
186
- ALWAYS = "always"
187
-
188
-
189
217
  class OutputCriblTCPPqControlsTypedDict(TypedDict):
190
218
  pass
191
219
 
@@ -240,6 +268,16 @@ class OutputCriblTCPTypedDict(TypedDict):
240
268
  r"""How far back in time to keep traffic stats for load balancing purposes"""
241
269
  max_concurrent_senders: NotRequired[float]
242
270
  r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
271
+ pq_strict_ordering: NotRequired[bool]
272
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
273
+ pq_rate_per_sec: NotRequired[float]
274
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
275
+ pq_mode: NotRequired[OutputCriblTCPMode]
276
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
277
+ pq_max_buffer_size: NotRequired[float]
278
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
279
+ pq_max_backpressure_sec: NotRequired[float]
280
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
243
281
  pq_max_file_size: NotRequired[str]
244
282
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
245
283
  pq_max_size: NotRequired[str]
@@ -250,8 +288,6 @@ class OutputCriblTCPTypedDict(TypedDict):
250
288
  r"""Codec to use to compress the persisted data"""
251
289
  pq_on_backpressure: NotRequired[OutputCriblTCPQueueFullBehavior]
252
290
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
253
- pq_mode: NotRequired[OutputCriblTCPMode]
254
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
255
291
  pq_controls: NotRequired[OutputCriblTCPPqControlsTypedDict]
256
292
 
257
293
 
@@ -355,6 +391,34 @@ class OutputCriblTCP(BaseModel):
355
391
  ] = 0
356
392
  r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
357
393
 
394
+ pq_strict_ordering: Annotated[
395
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
396
+ ] = True
397
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
398
+
399
+ pq_rate_per_sec: Annotated[
400
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
401
+ ] = 0
402
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
403
+
404
+ pq_mode: Annotated[
405
+ Annotated[
406
+ Optional[OutputCriblTCPMode], PlainValidator(validate_open_enum(False))
407
+ ],
408
+ pydantic.Field(alias="pqMode"),
409
+ ] = OutputCriblTCPMode.ERROR
410
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
411
+
412
+ pq_max_buffer_size: Annotated[
413
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
414
+ ] = 42
415
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
416
+
417
+ pq_max_backpressure_sec: Annotated[
418
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
419
+ ] = 30
420
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
421
+
358
422
  pq_max_file_size: Annotated[
359
423
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
360
424
  ] = "1 MB"
@@ -386,14 +450,51 @@ class OutputCriblTCP(BaseModel):
386
450
  ] = OutputCriblTCPQueueFullBehavior.BLOCK
387
451
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
388
452
 
389
- pq_mode: Annotated[
390
- Annotated[
391
- Optional[OutputCriblTCPMode], PlainValidator(validate_open_enum(False))
392
- ],
393
- pydantic.Field(alias="pqMode"),
394
- ] = OutputCriblTCPMode.ERROR
395
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
396
-
397
453
  pq_controls: Annotated[
398
454
  Optional[OutputCriblTCPPqControls], pydantic.Field(alias="pqControls")
399
455
  ] = None
456
+
457
+ @field_serializer("compression")
458
+ def serialize_compression(self, value):
459
+ if isinstance(value, str):
460
+ try:
461
+ return models.OutputCriblTCPCompression(value)
462
+ except ValueError:
463
+ return value
464
+ return value
465
+
466
+ @field_serializer("on_backpressure")
467
+ def serialize_on_backpressure(self, value):
468
+ if isinstance(value, str):
469
+ try:
470
+ return models.OutputCriblTCPBackpressureBehavior(value)
471
+ except ValueError:
472
+ return value
473
+ return value
474
+
475
+ @field_serializer("pq_mode")
476
+ def serialize_pq_mode(self, value):
477
+ if isinstance(value, str):
478
+ try:
479
+ return models.OutputCriblTCPMode(value)
480
+ except ValueError:
481
+ return value
482
+ return value
483
+
484
+ @field_serializer("pq_compress")
485
+ def serialize_pq_compress(self, value):
486
+ if isinstance(value, str):
487
+ try:
488
+ return models.OutputCriblTCPPqCompressCompression(value)
489
+ except ValueError:
490
+ return value
491
+ return value
492
+
493
+ @field_serializer("pq_on_backpressure")
494
+ def serialize_pq_on_backpressure(self, value):
495
+ if isinstance(value, str):
496
+ try:
497
+ return models.OutputCriblTCPQueueFullBehavior(value)
498
+ except ValueError:
499
+ return value
500
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -126,6 +127,17 @@ class OutputCrowdstrikeNextGenSiemBackpressureBehavior(
126
127
  QUEUE = "queue"
127
128
 
128
129
 
130
+ class OutputCrowdstrikeNextGenSiemMode(str, Enum, metaclass=utils.OpenEnumMeta):
131
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
132
+
133
+ # Error
134
+ ERROR = "error"
135
+ # Backpressure
136
+ ALWAYS = "always"
137
+ # Always On
138
+ BACKPRESSURE = "backpressure"
139
+
140
+
129
141
  class OutputCrowdstrikeNextGenSiemCompression(str, Enum, metaclass=utils.OpenEnumMeta):
130
142
  r"""Codec to use to compress the persisted data"""
131
143
 
@@ -146,17 +158,6 @@ class OutputCrowdstrikeNextGenSiemQueueFullBehavior(
146
158
  DROP = "drop"
147
159
 
148
160
 
149
- class OutputCrowdstrikeNextGenSiemMode(str, Enum, metaclass=utils.OpenEnumMeta):
150
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
151
-
152
- # Error
153
- ERROR = "error"
154
- # Backpressure
155
- BACKPRESSURE = "backpressure"
156
- # Always On
157
- ALWAYS = "always"
158
-
159
-
160
161
  class OutputCrowdstrikeNextGenSiemPqControlsTypedDict(TypedDict):
161
162
  pass
162
163
 
@@ -229,6 +230,16 @@ class OutputCrowdstrikeNextGenSiemTypedDict(TypedDict):
229
230
  token: NotRequired[str]
230
231
  text_secret: NotRequired[str]
231
232
  r"""Select or create a stored text secret"""
233
+ pq_strict_ordering: NotRequired[bool]
234
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
235
+ pq_rate_per_sec: NotRequired[float]
236
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
237
+ pq_mode: NotRequired[OutputCrowdstrikeNextGenSiemMode]
238
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
239
+ pq_max_buffer_size: NotRequired[float]
240
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
241
+ pq_max_backpressure_sec: NotRequired[float]
242
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
232
243
  pq_max_file_size: NotRequired[str]
233
244
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
234
245
  pq_max_size: NotRequired[str]
@@ -239,8 +250,6 @@ class OutputCrowdstrikeNextGenSiemTypedDict(TypedDict):
239
250
  r"""Codec to use to compress the persisted data"""
240
251
  pq_on_backpressure: NotRequired[OutputCrowdstrikeNextGenSiemQueueFullBehavior]
241
252
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
242
- pq_mode: NotRequired[OutputCrowdstrikeNextGenSiemMode]
243
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
244
253
  pq_controls: NotRequired[OutputCrowdstrikeNextGenSiemPqControlsTypedDict]
245
254
 
246
255
 
@@ -376,6 +385,35 @@ class OutputCrowdstrikeNextGenSiem(BaseModel):
376
385
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
377
386
  r"""Select or create a stored text secret"""
378
387
 
388
+ pq_strict_ordering: Annotated[
389
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
390
+ ] = True
391
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
392
+
393
+ pq_rate_per_sec: Annotated[
394
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
395
+ ] = 0
396
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
397
+
398
+ pq_mode: Annotated[
399
+ Annotated[
400
+ Optional[OutputCrowdstrikeNextGenSiemMode],
401
+ PlainValidator(validate_open_enum(False)),
402
+ ],
403
+ pydantic.Field(alias="pqMode"),
404
+ ] = OutputCrowdstrikeNextGenSiemMode.ERROR
405
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
406
+
407
+ pq_max_buffer_size: Annotated[
408
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
409
+ ] = 42
410
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
411
+
412
+ pq_max_backpressure_sec: Annotated[
413
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
414
+ ] = 30
415
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
416
+
379
417
  pq_max_file_size: Annotated[
380
418
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
381
419
  ] = "1 MB"
@@ -407,16 +445,72 @@ class OutputCrowdstrikeNextGenSiem(BaseModel):
407
445
  ] = OutputCrowdstrikeNextGenSiemQueueFullBehavior.BLOCK
408
446
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
409
447
 
410
- pq_mode: Annotated[
411
- Annotated[
412
- Optional[OutputCrowdstrikeNextGenSiemMode],
413
- PlainValidator(validate_open_enum(False)),
414
- ],
415
- pydantic.Field(alias="pqMode"),
416
- ] = OutputCrowdstrikeNextGenSiemMode.ERROR
417
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
418
-
419
448
  pq_controls: Annotated[
420
449
  Optional[OutputCrowdstrikeNextGenSiemPqControls],
421
450
  pydantic.Field(alias="pqControls"),
422
451
  ] = None
452
+
453
+ @field_serializer("failed_request_logging_mode")
454
+ def serialize_failed_request_logging_mode(self, value):
455
+ if isinstance(value, str):
456
+ try:
457
+ return models.OutputCrowdstrikeNextGenSiemFailedRequestLoggingMode(
458
+ value
459
+ )
460
+ except ValueError:
461
+ return value
462
+ return value
463
+
464
+ @field_serializer("format_")
465
+ def serialize_format_(self, value):
466
+ if isinstance(value, str):
467
+ try:
468
+ return models.OutputCrowdstrikeNextGenSiemRequestFormat(value)
469
+ except ValueError:
470
+ return value
471
+ return value
472
+
473
+ @field_serializer("auth_type")
474
+ def serialize_auth_type(self, value):
475
+ if isinstance(value, str):
476
+ try:
477
+ return models.OutputCrowdstrikeNextGenSiemAuthenticationMethod(value)
478
+ except ValueError:
479
+ return value
480
+ return value
481
+
482
+ @field_serializer("on_backpressure")
483
+ def serialize_on_backpressure(self, value):
484
+ if isinstance(value, str):
485
+ try:
486
+ return models.OutputCrowdstrikeNextGenSiemBackpressureBehavior(value)
487
+ except ValueError:
488
+ return value
489
+ return value
490
+
491
+ @field_serializer("pq_mode")
492
+ def serialize_pq_mode(self, value):
493
+ if isinstance(value, str):
494
+ try:
495
+ return models.OutputCrowdstrikeNextGenSiemMode(value)
496
+ except ValueError:
497
+ return value
498
+ return value
499
+
500
+ @field_serializer("pq_compress")
501
+ def serialize_pq_compress(self, value):
502
+ if isinstance(value, str):
503
+ try:
504
+ return models.OutputCrowdstrikeNextGenSiemCompression(value)
505
+ except ValueError:
506
+ return value
507
+ return value
508
+
509
+ @field_serializer("pq_on_backpressure")
510
+ def serialize_pq_on_backpressure(self, value):
511
+ if isinstance(value, str):
512
+ try:
513
+ return models.OutputCrowdstrikeNextGenSiemQueueFullBehavior(value)
514
+ except ValueError:
515
+ return value
516
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -99,7 +100,7 @@ class OutputDatabricksTypedDict(TypedDict):
99
100
  r"""Databricks workspace ID"""
100
101
  client_id: str
101
102
  r"""OAuth client ID for Unity Catalog authentication"""
102
- client_secret: str
103
+ client_text_secret: str
103
104
  r"""OAuth client secret for Unity Catalog authentication"""
104
105
  id: NotRequired[str]
105
106
  r"""Unique ID for this output"""
@@ -112,7 +113,7 @@ class OutputDatabricksTypedDict(TypedDict):
112
113
  streamtags: NotRequired[List[str]]
113
114
  r"""Tags for filtering and grouping in @{product}"""
114
115
  dest_path: NotRequired[str]
115
- r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
116
+ r"""Optional path to prepend to files before uploading."""
116
117
  stage_path: NotRequired[str]
117
118
  r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
118
119
  add_id_to_stage_path: NotRequired[bool]
@@ -160,6 +161,8 @@ class OutputDatabricksTypedDict(TypedDict):
160
161
  r"""Compression level to apply before moving files to final destination"""
161
162
  automatic_schema: NotRequired[bool]
162
163
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
164
+ parquet_schema: NotRequired[str]
165
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
163
166
  parquet_version: NotRequired[OutputDatabricksParquetVersion]
164
167
  r"""Determines which data types are supported and how they are represented"""
165
168
  parquet_data_page_version: NotRequired[OutputDatabricksDataPageVersion]
@@ -195,7 +198,7 @@ class OutputDatabricks(BaseModel):
195
198
  client_id: Annotated[str, pydantic.Field(alias="clientId")]
196
199
  r"""OAuth client ID for Unity Catalog authentication"""
197
200
 
198
- client_secret: Annotated[str, pydantic.Field(alias="clientSecret")]
201
+ client_text_secret: Annotated[str, pydantic.Field(alias="clientTextSecret")]
199
202
  r"""OAuth client secret for Unity Catalog authentication"""
200
203
 
201
204
  id: Optional[str] = None
@@ -216,7 +219,7 @@ class OutputDatabricks(BaseModel):
216
219
  r"""Tags for filtering and grouping in @{product}"""
217
220
 
218
221
  dest_path: Annotated[Optional[str], pydantic.Field(alias="destPath")] = ""
219
- r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
222
+ r"""Optional path to prepend to files before uploading."""
220
223
 
221
224
  stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
222
225
  "$CRIBL_HOME/state/outputs/staging"
@@ -343,6 +346,11 @@ class OutputDatabricks(BaseModel):
343
346
  ] = False
344
347
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
345
348
 
349
+ parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
350
+ None
351
+ )
352
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
353
+
346
354
  parquet_version: Annotated[
347
355
  Annotated[
348
356
  Optional[OutputDatabricksParquetVersion],
@@ -409,3 +417,66 @@ class OutputDatabricks(BaseModel):
409
417
 
410
418
  max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
411
419
  r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
420
+
421
+ @field_serializer("format_")
422
+ def serialize_format_(self, value):
423
+ if isinstance(value, str):
424
+ try:
425
+ return models.OutputDatabricksDataFormat(value)
426
+ except ValueError:
427
+ return value
428
+ return value
429
+
430
+ @field_serializer("on_backpressure")
431
+ def serialize_on_backpressure(self, value):
432
+ if isinstance(value, str):
433
+ try:
434
+ return models.OutputDatabricksBackpressureBehavior(value)
435
+ except ValueError:
436
+ return value
437
+ return value
438
+
439
+ @field_serializer("on_disk_full_backpressure")
440
+ def serialize_on_disk_full_backpressure(self, value):
441
+ if isinstance(value, str):
442
+ try:
443
+ return models.OutputDatabricksDiskSpaceProtection(value)
444
+ except ValueError:
445
+ return value
446
+ return value
447
+
448
+ @field_serializer("compress")
449
+ def serialize_compress(self, value):
450
+ if isinstance(value, str):
451
+ try:
452
+ return models.OutputDatabricksCompression(value)
453
+ except ValueError:
454
+ return value
455
+ return value
456
+
457
+ @field_serializer("compression_level")
458
+ def serialize_compression_level(self, value):
459
+ if isinstance(value, str):
460
+ try:
461
+ return models.OutputDatabricksCompressionLevel(value)
462
+ except ValueError:
463
+ return value
464
+ return value
465
+
466
+ @field_serializer("parquet_version")
467
+ def serialize_parquet_version(self, value):
468
+ if isinstance(value, str):
469
+ try:
470
+ return models.OutputDatabricksParquetVersion(value)
471
+ except ValueError:
472
+ return value
473
+ return value
474
+
475
+ @field_serializer("parquet_data_page_version")
476
+ def serialize_parquet_data_page_version(self, value):
477
+ if isinstance(value, str):
478
+ try:
479
+ return models.OutputDatabricksDataPageVersion(value)
480
+ except ValueError:
481
+ return value
482
+ return value