cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -106,6 +107,24 @@ class OutputTcpjsonTLSSettingsClientSide(BaseModel):
106
107
  pydantic.Field(alias="maxVersion"),
107
108
  ] = None
108
109
 
110
+ @field_serializer("min_version")
111
+ def serialize_min_version(self, value):
112
+ if isinstance(value, str):
113
+ try:
114
+ return models.OutputTcpjsonMinimumTLSVersion(value)
115
+ except ValueError:
116
+ return value
117
+ return value
118
+
119
+ @field_serializer("max_version")
120
+ def serialize_max_version(self, value):
121
+ if isinstance(value, str):
122
+ try:
123
+ return models.OutputTcpjsonMaximumTLSVersion(value)
124
+ except ValueError:
125
+ return value
126
+ return value
127
+
109
128
 
110
129
  class OutputTcpjsonBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
111
130
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -163,6 +182,26 @@ class OutputTcpjsonHost(BaseModel):
163
182
  weight: Optional[float] = 1
164
183
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
165
184
 
185
+ @field_serializer("tls")
186
+ def serialize_tls(self, value):
187
+ if isinstance(value, str):
188
+ try:
189
+ return models.OutputTcpjsonTLS(value)
190
+ except ValueError:
191
+ return value
192
+ return value
193
+
194
+
195
+ class OutputTcpjsonMode(str, Enum, metaclass=utils.OpenEnumMeta):
196
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
197
+
198
+ # Error
199
+ ERROR = "error"
200
+ # Backpressure
201
+ ALWAYS = "always"
202
+ # Always On
203
+ BACKPRESSURE = "backpressure"
204
+
166
205
 
167
206
  class OutputTcpjsonPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
168
207
  r"""Codec to use to compress the persisted data"""
@@ -182,17 +221,6 @@ class OutputTcpjsonQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
182
221
  DROP = "drop"
183
222
 
184
223
 
185
- class OutputTcpjsonMode(str, Enum, metaclass=utils.OpenEnumMeta):
186
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
187
-
188
- # Error
189
- ERROR = "error"
190
- # Backpressure
191
- BACKPRESSURE = "backpressure"
192
- # Always On
193
- ALWAYS = "always"
194
-
195
-
196
224
  class OutputTcpjsonPqControlsTypedDict(TypedDict):
197
225
  pass
198
226
 
@@ -249,6 +277,16 @@ class OutputTcpjsonTypedDict(TypedDict):
249
277
  r"""How far back in time to keep traffic stats for load balancing purposes"""
250
278
  max_concurrent_senders: NotRequired[float]
251
279
  r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
280
+ pq_strict_ordering: NotRequired[bool]
281
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
282
+ pq_rate_per_sec: NotRequired[float]
283
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
284
+ pq_mode: NotRequired[OutputTcpjsonMode]
285
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
286
+ pq_max_buffer_size: NotRequired[float]
287
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
288
+ pq_max_backpressure_sec: NotRequired[float]
289
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
252
290
  pq_max_file_size: NotRequired[str]
253
291
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
254
292
  pq_max_size: NotRequired[str]
@@ -259,8 +297,6 @@ class OutputTcpjsonTypedDict(TypedDict):
259
297
  r"""Codec to use to compress the persisted data"""
260
298
  pq_on_backpressure: NotRequired[OutputTcpjsonQueueFullBehavior]
261
299
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
262
- pq_mode: NotRequired[OutputTcpjsonMode]
263
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
264
300
  pq_controls: NotRequired[OutputTcpjsonPqControlsTypedDict]
265
301
  auth_token: NotRequired[str]
266
302
  r"""Optional authentication token to include as part of the connection header"""
@@ -375,6 +411,34 @@ class OutputTcpjson(BaseModel):
375
411
  ] = 0
376
412
  r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
377
413
 
414
+ pq_strict_ordering: Annotated[
415
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
416
+ ] = True
417
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
418
+
419
+ pq_rate_per_sec: Annotated[
420
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
421
+ ] = 0
422
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
423
+
424
+ pq_mode: Annotated[
425
+ Annotated[
426
+ Optional[OutputTcpjsonMode], PlainValidator(validate_open_enum(False))
427
+ ],
428
+ pydantic.Field(alias="pqMode"),
429
+ ] = OutputTcpjsonMode.ERROR
430
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
431
+
432
+ pq_max_buffer_size: Annotated[
433
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
434
+ ] = 42
435
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
436
+
437
+ pq_max_backpressure_sec: Annotated[
438
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
439
+ ] = 30
440
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
441
+
378
442
  pq_max_file_size: Annotated[
379
443
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
380
444
  ] = "1 MB"
@@ -406,14 +470,6 @@ class OutputTcpjson(BaseModel):
406
470
  ] = OutputTcpjsonQueueFullBehavior.BLOCK
407
471
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
408
472
 
409
- pq_mode: Annotated[
410
- Annotated[
411
- Optional[OutputTcpjsonMode], PlainValidator(validate_open_enum(False))
412
- ],
413
- pydantic.Field(alias="pqMode"),
414
- ] = OutputTcpjsonMode.ERROR
415
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
416
-
417
473
  pq_controls: Annotated[
418
474
  Optional[OutputTcpjsonPqControls], pydantic.Field(alias="pqControls")
419
475
  ] = None
@@ -423,3 +479,57 @@ class OutputTcpjson(BaseModel):
423
479
 
424
480
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
425
481
  r"""Select or create a stored text secret"""
482
+
483
+ @field_serializer("compression")
484
+ def serialize_compression(self, value):
485
+ if isinstance(value, str):
486
+ try:
487
+ return models.OutputTcpjsonCompression(value)
488
+ except ValueError:
489
+ return value
490
+ return value
491
+
492
+ @field_serializer("on_backpressure")
493
+ def serialize_on_backpressure(self, value):
494
+ if isinstance(value, str):
495
+ try:
496
+ return models.OutputTcpjsonBackpressureBehavior(value)
497
+ except ValueError:
498
+ return value
499
+ return value
500
+
501
+ @field_serializer("auth_type")
502
+ def serialize_auth_type(self, value):
503
+ if isinstance(value, str):
504
+ try:
505
+ return models.OutputTcpjsonAuthenticationMethod(value)
506
+ except ValueError:
507
+ return value
508
+ return value
509
+
510
+ @field_serializer("pq_mode")
511
+ def serialize_pq_mode(self, value):
512
+ if isinstance(value, str):
513
+ try:
514
+ return models.OutputTcpjsonMode(value)
515
+ except ValueError:
516
+ return value
517
+ return value
518
+
519
+ @field_serializer("pq_compress")
520
+ def serialize_pq_compress(self, value):
521
+ if isinstance(value, str):
522
+ try:
523
+ return models.OutputTcpjsonPqCompressCompression(value)
524
+ except ValueError:
525
+ return value
526
+ return value
527
+
528
+ @field_serializer("pq_on_backpressure")
529
+ def serialize_pq_on_backpressure(self, value):
530
+ if isinstance(value, str):
531
+ try:
532
+ return models.OutputTcpjsonQueueFullBehavior(value)
533
+ except ValueError:
534
+ return value
535
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -109,6 +110,17 @@ class OutputWavefrontBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
109
110
  QUEUE = "queue"
110
111
 
111
112
 
113
+ class OutputWavefrontMode(str, Enum, metaclass=utils.OpenEnumMeta):
114
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
115
+
116
+ # Error
117
+ ERROR = "error"
118
+ # Backpressure
119
+ ALWAYS = "always"
120
+ # Always On
121
+ BACKPRESSURE = "backpressure"
122
+
123
+
112
124
  class OutputWavefrontCompression(str, Enum, metaclass=utils.OpenEnumMeta):
113
125
  r"""Codec to use to compress the persisted data"""
114
126
 
@@ -127,17 +139,6 @@ class OutputWavefrontQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
127
139
  DROP = "drop"
128
140
 
129
141
 
130
- class OutputWavefrontMode(str, Enum, metaclass=utils.OpenEnumMeta):
131
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
132
-
133
- # Error
134
- ERROR = "error"
135
- # Backpressure
136
- BACKPRESSURE = "backpressure"
137
- # Always On
138
- ALWAYS = "always"
139
-
140
-
141
142
  class OutputWavefrontPqControlsTypedDict(TypedDict):
142
143
  pass
143
144
 
@@ -201,6 +202,16 @@ class OutputWavefrontTypedDict(TypedDict):
201
202
  r"""WaveFront API authentication token (see [here](https://docs.wavefront.com/wavefront_api.html#generating-an-api-token))"""
202
203
  text_secret: NotRequired[str]
203
204
  r"""Select or create a stored text secret"""
205
+ pq_strict_ordering: NotRequired[bool]
206
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
207
+ pq_rate_per_sec: NotRequired[float]
208
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
209
+ pq_mode: NotRequired[OutputWavefrontMode]
210
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
211
+ pq_max_buffer_size: NotRequired[float]
212
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
213
+ pq_max_backpressure_sec: NotRequired[float]
214
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
204
215
  pq_max_file_size: NotRequired[str]
205
216
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
206
217
  pq_max_size: NotRequired[str]
@@ -211,8 +222,6 @@ class OutputWavefrontTypedDict(TypedDict):
211
222
  r"""Codec to use to compress the persisted data"""
212
223
  pq_on_backpressure: NotRequired[OutputWavefrontQueueFullBehavior]
213
224
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
214
- pq_mode: NotRequired[OutputWavefrontMode]
215
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
216
225
  pq_controls: NotRequired[OutputWavefrontPqControlsTypedDict]
217
226
 
218
227
 
@@ -338,6 +347,34 @@ class OutputWavefront(BaseModel):
338
347
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
339
348
  r"""Select or create a stored text secret"""
340
349
 
350
+ pq_strict_ordering: Annotated[
351
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
352
+ ] = True
353
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
354
+
355
+ pq_rate_per_sec: Annotated[
356
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
357
+ ] = 0
358
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
359
+
360
+ pq_mode: Annotated[
361
+ Annotated[
362
+ Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
363
+ ],
364
+ pydantic.Field(alias="pqMode"),
365
+ ] = OutputWavefrontMode.ERROR
366
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
367
+
368
+ pq_max_buffer_size: Annotated[
369
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
370
+ ] = 42
371
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
372
+
373
+ pq_max_backpressure_sec: Annotated[
374
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
375
+ ] = 30
376
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
377
+
341
378
  pq_max_file_size: Annotated[
342
379
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
343
380
  ] = "1 MB"
@@ -369,14 +406,60 @@ class OutputWavefront(BaseModel):
369
406
  ] = OutputWavefrontQueueFullBehavior.BLOCK
370
407
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
371
408
 
372
- pq_mode: Annotated[
373
- Annotated[
374
- Optional[OutputWavefrontMode], PlainValidator(validate_open_enum(False))
375
- ],
376
- pydantic.Field(alias="pqMode"),
377
- ] = OutputWavefrontMode.ERROR
378
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
379
-
380
409
  pq_controls: Annotated[
381
410
  Optional[OutputWavefrontPqControls], pydantic.Field(alias="pqControls")
382
411
  ] = None
412
+
413
+ @field_serializer("auth_type")
414
+ def serialize_auth_type(self, value):
415
+ if isinstance(value, str):
416
+ try:
417
+ return models.OutputWavefrontAuthenticationMethod(value)
418
+ except ValueError:
419
+ return value
420
+ return value
421
+
422
+ @field_serializer("failed_request_logging_mode")
423
+ def serialize_failed_request_logging_mode(self, value):
424
+ if isinstance(value, str):
425
+ try:
426
+ return models.OutputWavefrontFailedRequestLoggingMode(value)
427
+ except ValueError:
428
+ return value
429
+ return value
430
+
431
+ @field_serializer("on_backpressure")
432
+ def serialize_on_backpressure(self, value):
433
+ if isinstance(value, str):
434
+ try:
435
+ return models.OutputWavefrontBackpressureBehavior(value)
436
+ except ValueError:
437
+ return value
438
+ return value
439
+
440
+ @field_serializer("pq_mode")
441
+ def serialize_pq_mode(self, value):
442
+ if isinstance(value, str):
443
+ try:
444
+ return models.OutputWavefrontMode(value)
445
+ except ValueError:
446
+ return value
447
+ return value
448
+
449
+ @field_serializer("pq_compress")
450
+ def serialize_pq_compress(self, value):
451
+ if isinstance(value, str):
452
+ try:
453
+ return models.OutputWavefrontCompression(value)
454
+ except ValueError:
455
+ return value
456
+ return value
457
+
458
+ @field_serializer("pq_on_backpressure")
459
+ def serialize_pq_on_backpressure(self, value):
460
+ if isinstance(value, str):
461
+ try:
462
+ return models.OutputWavefrontQueueFullBehavior(value)
463
+ except ValueError:
464
+ return value
465
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -211,6 +212,35 @@ class OutputWebhookTLSSettingsClientSide(BaseModel):
211
212
  pydantic.Field(alias="maxVersion"),
212
213
  ] = None
213
214
 
215
+ @field_serializer("min_version")
216
+ def serialize_min_version(self, value):
217
+ if isinstance(value, str):
218
+ try:
219
+ return models.OutputWebhookMinimumTLSVersion(value)
220
+ except ValueError:
221
+ return value
222
+ return value
223
+
224
+ @field_serializer("max_version")
225
+ def serialize_max_version(self, value):
226
+ if isinstance(value, str):
227
+ try:
228
+ return models.OutputWebhookMaximumTLSVersion(value)
229
+ except ValueError:
230
+ return value
231
+ return value
232
+
233
+
234
+ class OutputWebhookMode(str, Enum, metaclass=utils.OpenEnumMeta):
235
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
236
+
237
+ # Error
238
+ ERROR = "error"
239
+ # Backpressure
240
+ ALWAYS = "always"
241
+ # Always On
242
+ BACKPRESSURE = "backpressure"
243
+
214
244
 
215
245
  class OutputWebhookCompression(str, Enum, metaclass=utils.OpenEnumMeta):
216
246
  r"""Codec to use to compress the persisted data"""
@@ -230,17 +260,6 @@ class OutputWebhookQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
230
260
  DROP = "drop"
231
261
 
232
262
 
233
- class OutputWebhookMode(str, Enum, metaclass=utils.OpenEnumMeta):
234
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
235
-
236
- # Error
237
- ERROR = "error"
238
- # Backpressure
239
- BACKPRESSURE = "backpressure"
240
- # Always On
241
- ALWAYS = "always"
242
-
243
-
244
263
  class OutputWebhookPqControlsTypedDict(TypedDict):
245
264
  pass
246
265
 
@@ -370,6 +389,16 @@ class OutputWebhookTypedDict(TypedDict):
370
389
  r"""Custom JavaScript code to format incoming event data accessible through the __e variable. The formatted content is added to (__e['__eventOut']) if available. Otherwise, the original event is serialized as JSON. Caution: This function is evaluated in an unprotected context, allowing you to execute almost any JavaScript code."""
371
390
  format_payload_code: NotRequired[str]
372
391
  r"""Optional JavaScript code to format the payload sent to the Destination. The payload, containing a batch of formatted events, is accessible through the __e['payload'] variable. The formatted payload is returned in the __e['__payloadOut'] variable. Caution: This function is evaluated in an unprotected context, allowing you to execute almost any JavaScript code."""
392
+ pq_strict_ordering: NotRequired[bool]
393
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
394
+ pq_rate_per_sec: NotRequired[float]
395
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
396
+ pq_mode: NotRequired[OutputWebhookMode]
397
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
398
+ pq_max_buffer_size: NotRequired[float]
399
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
400
+ pq_max_backpressure_sec: NotRequired[float]
401
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
373
402
  pq_max_file_size: NotRequired[str]
374
403
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
375
404
  pq_max_size: NotRequired[str]
@@ -380,8 +409,6 @@ class OutputWebhookTypedDict(TypedDict):
380
409
  r"""Codec to use to compress the persisted data"""
381
410
  pq_on_backpressure: NotRequired[OutputWebhookQueueFullBehavior]
382
411
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
383
- pq_mode: NotRequired[OutputWebhookMode]
384
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
385
412
  pq_controls: NotRequired[OutputWebhookPqControlsTypedDict]
386
413
  username: NotRequired[str]
387
414
  password: NotRequired[str]
@@ -599,6 +626,34 @@ class OutputWebhook(BaseModel):
599
626
  ] = None
600
627
  r"""Optional JavaScript code to format the payload sent to the Destination. The payload, containing a batch of formatted events, is accessible through the __e['payload'] variable. The formatted payload is returned in the __e['__payloadOut'] variable. Caution: This function is evaluated in an unprotected context, allowing you to execute almost any JavaScript code."""
601
628
 
629
+ pq_strict_ordering: Annotated[
630
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
631
+ ] = True
632
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
633
+
634
+ pq_rate_per_sec: Annotated[
635
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
636
+ ] = 0
637
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
638
+
639
+ pq_mode: Annotated[
640
+ Annotated[
641
+ Optional[OutputWebhookMode], PlainValidator(validate_open_enum(False))
642
+ ],
643
+ pydantic.Field(alias="pqMode"),
644
+ ] = OutputWebhookMode.ERROR
645
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
646
+
647
+ pq_max_buffer_size: Annotated[
648
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
649
+ ] = 42
650
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
651
+
652
+ pq_max_backpressure_sec: Annotated[
653
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
654
+ ] = 30
655
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
656
+
602
657
  pq_max_file_size: Annotated[
603
658
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
604
659
  ] = "1 MB"
@@ -630,14 +685,6 @@ class OutputWebhook(BaseModel):
630
685
  ] = OutputWebhookQueueFullBehavior.BLOCK
631
686
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
632
687
 
633
- pq_mode: Annotated[
634
- Annotated[
635
- Optional[OutputWebhookMode], PlainValidator(validate_open_enum(False))
636
- ],
637
- pydantic.Field(alias="pqMode"),
638
- ] = OutputWebhookMode.ERROR
639
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
640
-
641
688
  pq_controls: Annotated[
642
689
  Optional[OutputWebhookPqControls], pydantic.Field(alias="pqControls")
643
690
  ] = None
@@ -710,3 +757,75 @@ class OutputWebhook(BaseModel):
710
757
  Optional[float], pydantic.Field(alias="loadBalanceStatsPeriodSec")
711
758
  ] = 300
712
759
  r"""How far back in time to keep traffic stats for load balancing purposes"""
760
+
761
+ @field_serializer("method")
762
+ def serialize_method(self, value):
763
+ if isinstance(value, str):
764
+ try:
765
+ return models.OutputWebhookMethod(value)
766
+ except ValueError:
767
+ return value
768
+ return value
769
+
770
+ @field_serializer("format_")
771
+ def serialize_format_(self, value):
772
+ if isinstance(value, str):
773
+ try:
774
+ return models.OutputWebhookFormat(value)
775
+ except ValueError:
776
+ return value
777
+ return value
778
+
779
+ @field_serializer("failed_request_logging_mode")
780
+ def serialize_failed_request_logging_mode(self, value):
781
+ if isinstance(value, str):
782
+ try:
783
+ return models.OutputWebhookFailedRequestLoggingMode(value)
784
+ except ValueError:
785
+ return value
786
+ return value
787
+
788
+ @field_serializer("on_backpressure")
789
+ def serialize_on_backpressure(self, value):
790
+ if isinstance(value, str):
791
+ try:
792
+ return models.OutputWebhookBackpressureBehavior(value)
793
+ except ValueError:
794
+ return value
795
+ return value
796
+
797
+ @field_serializer("auth_type")
798
+ def serialize_auth_type(self, value):
799
+ if isinstance(value, str):
800
+ try:
801
+ return models.OutputWebhookAuthenticationType(value)
802
+ except ValueError:
803
+ return value
804
+ return value
805
+
806
+ @field_serializer("pq_mode")
807
+ def serialize_pq_mode(self, value):
808
+ if isinstance(value, str):
809
+ try:
810
+ return models.OutputWebhookMode(value)
811
+ except ValueError:
812
+ return value
813
+ return value
814
+
815
+ @field_serializer("pq_compress")
816
+ def serialize_pq_compress(self, value):
817
+ if isinstance(value, str):
818
+ try:
819
+ return models.OutputWebhookCompression(value)
820
+ except ValueError:
821
+ return value
822
+ return value
823
+
824
+ @field_serializer("pq_on_backpressure")
825
+ def serialize_pq_on_backpressure(self, value):
826
+ if isinstance(value, str):
827
+ try:
828
+ return models.OutputWebhookQueueFullBehavior(value)
829
+ except ValueError:
830
+ return value
831
+ return value