cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -118,6 +119,17 @@ class OutputChronicleCustomLabel(BaseModel):
118
119
  value: str
119
120
 
120
121
 
122
+ class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
123
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
124
+
125
+ # Error
126
+ ERROR = "error"
127
+ # Backpressure
128
+ ALWAYS = "always"
129
+ # Always On
130
+ BACKPRESSURE = "backpressure"
131
+
132
+
121
133
  class OutputChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
122
134
  r"""Codec to use to compress the persisted data"""
123
135
 
@@ -136,17 +148,6 @@ class OutputChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
136
148
  DROP = "drop"
137
149
 
138
150
 
139
- class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
140
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
141
-
142
- # Error
143
- ERROR = "error"
144
- # Backpressure
145
- BACKPRESSURE = "backpressure"
146
- # Always On
147
- ALWAYS = "always"
148
-
149
-
150
151
  class OutputChroniclePqControlsTypedDict(TypedDict):
151
152
  pass
152
153
 
@@ -225,6 +226,16 @@ class OutputChronicleTypedDict(TypedDict):
225
226
  r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
226
227
  service_account_credentials_secret: NotRequired[str]
227
228
  r"""Select or create a stored text secret"""
229
+ pq_strict_ordering: NotRequired[bool]
230
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
231
+ pq_rate_per_sec: NotRequired[float]
232
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
233
+ pq_mode: NotRequired[OutputChronicleMode]
234
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
235
+ pq_max_buffer_size: NotRequired[float]
236
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
237
+ pq_max_backpressure_sec: NotRequired[float]
238
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
228
239
  pq_max_file_size: NotRequired[str]
229
240
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
230
241
  pq_max_size: NotRequired[str]
@@ -235,8 +246,6 @@ class OutputChronicleTypedDict(TypedDict):
235
246
  r"""Codec to use to compress the persisted data"""
236
247
  pq_on_backpressure: NotRequired[OutputChronicleQueueFullBehavior]
237
248
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
238
- pq_mode: NotRequired[OutputChronicleMode]
239
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
240
249
  pq_controls: NotRequired[OutputChroniclePqControlsTypedDict]
241
250
 
242
251
 
@@ -400,6 +409,34 @@ class OutputChronicle(BaseModel):
400
409
  ] = None
401
410
  r"""Select or create a stored text secret"""
402
411
 
412
+ pq_strict_ordering: Annotated[
413
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
414
+ ] = True
415
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
416
+
417
+ pq_rate_per_sec: Annotated[
418
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
419
+ ] = 0
420
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
421
+
422
+ pq_mode: Annotated[
423
+ Annotated[
424
+ Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
425
+ ],
426
+ pydantic.Field(alias="pqMode"),
427
+ ] = OutputChronicleMode.ERROR
428
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
429
+
430
+ pq_max_buffer_size: Annotated[
431
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
432
+ ] = 42
433
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
434
+
435
+ pq_max_backpressure_sec: Annotated[
436
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
437
+ ] = 30
438
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
439
+
403
440
  pq_max_file_size: Annotated[
404
441
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
405
442
  ] = "1 MB"
@@ -431,14 +468,60 @@ class OutputChronicle(BaseModel):
431
468
  ] = OutputChronicleQueueFullBehavior.BLOCK
432
469
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
433
470
 
434
- pq_mode: Annotated[
435
- Annotated[
436
- Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
437
- ],
438
- pydantic.Field(alias="pqMode"),
439
- ] = OutputChronicleMode.ERROR
440
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
441
-
442
471
  pq_controls: Annotated[
443
472
  Optional[OutputChroniclePqControls], pydantic.Field(alias="pqControls")
444
473
  ] = None
474
+
475
+ @field_serializer("authentication_method")
476
+ def serialize_authentication_method(self, value):
477
+ if isinstance(value, str):
478
+ try:
479
+ return models.OutputChronicleAuthenticationMethod(value)
480
+ except ValueError:
481
+ return value
482
+ return value
483
+
484
+ @field_serializer("failed_request_logging_mode")
485
+ def serialize_failed_request_logging_mode(self, value):
486
+ if isinstance(value, str):
487
+ try:
488
+ return models.OutputChronicleFailedRequestLoggingMode(value)
489
+ except ValueError:
490
+ return value
491
+ return value
492
+
493
+ @field_serializer("on_backpressure")
494
+ def serialize_on_backpressure(self, value):
495
+ if isinstance(value, str):
496
+ try:
497
+ return models.OutputChronicleBackpressureBehavior(value)
498
+ except ValueError:
499
+ return value
500
+ return value
501
+
502
+ @field_serializer("pq_mode")
503
+ def serialize_pq_mode(self, value):
504
+ if isinstance(value, str):
505
+ try:
506
+ return models.OutputChronicleMode(value)
507
+ except ValueError:
508
+ return value
509
+ return value
510
+
511
+ @field_serializer("pq_compress")
512
+ def serialize_pq_compress(self, value):
513
+ if isinstance(value, str):
514
+ try:
515
+ return models.OutputChronicleCompression(value)
516
+ except ValueError:
517
+ return value
518
+ return value
519
+
520
+ @field_serializer("pq_on_backpressure")
521
+ def serialize_pq_on_backpressure(self, value):
522
+ if isinstance(value, str):
523
+ try:
524
+ return models.OutputChronicleQueueFullBehavior(value)
525
+ except ValueError:
526
+ return value
527
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -114,6 +115,24 @@ class OutputClickHouseTLSSettingsClientSide(BaseModel):
114
115
  pydantic.Field(alias="maxVersion"),
115
116
  ] = None
116
117
 
118
+ @field_serializer("min_version")
119
+ def serialize_min_version(self, value):
120
+ if isinstance(value, str):
121
+ try:
122
+ return models.OutputClickHouseMinimumTLSVersion(value)
123
+ except ValueError:
124
+ return value
125
+ return value
126
+
127
+ @field_serializer("max_version")
128
+ def serialize_max_version(self, value):
129
+ if isinstance(value, str):
130
+ try:
131
+ return models.OutputClickHouseMaximumTLSVersion(value)
132
+ except ValueError:
133
+ return value
134
+ return value
135
+
117
136
 
118
137
  class OutputClickHouseExtraHTTPHeaderTypedDict(TypedDict):
119
138
  value: str
@@ -254,6 +273,17 @@ class ColumnMapping(BaseModel):
254
273
  r"""Type of the column in the ClickHouse database"""
255
274
 
256
275
 
276
+ class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
277
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
278
+
279
+ # Error
280
+ ERROR = "error"
281
+ # Backpressure
282
+ ALWAYS = "always"
283
+ # Always On
284
+ BACKPRESSURE = "backpressure"
285
+
286
+
257
287
  class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
258
288
  r"""Codec to use to compress the persisted data"""
259
289
 
@@ -272,17 +302,6 @@ class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
272
302
  DROP = "drop"
273
303
 
274
304
 
275
- class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
276
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
277
-
278
- # Error
279
- ERROR = "error"
280
- # Backpressure
281
- BACKPRESSURE = "backpressure"
282
- # Always On
283
- ALWAYS = "always"
284
-
285
-
286
305
  class OutputClickHousePqControlsTypedDict(TypedDict):
287
306
  pass
288
307
 
@@ -386,6 +405,16 @@ class OutputClickHouseTypedDict(TypedDict):
386
405
  describe_table: NotRequired[str]
387
406
  r"""Retrieves the table schema from ClickHouse and populates the Column Mapping table"""
388
407
  column_mappings: NotRequired[List[ColumnMappingTypedDict]]
408
+ pq_strict_ordering: NotRequired[bool]
409
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
410
+ pq_rate_per_sec: NotRequired[float]
411
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
412
+ pq_mode: NotRequired[OutputClickHouseMode]
413
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
414
+ pq_max_buffer_size: NotRequired[float]
415
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
416
+ pq_max_backpressure_sec: NotRequired[float]
417
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
389
418
  pq_max_file_size: NotRequired[str]
390
419
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
391
420
  pq_max_size: NotRequired[str]
@@ -396,8 +425,6 @@ class OutputClickHouseTypedDict(TypedDict):
396
425
  r"""Codec to use to compress the persisted data"""
397
426
  pq_on_backpressure: NotRequired[OutputClickHouseQueueFullBehavior]
398
427
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
399
- pq_mode: NotRequired[OutputClickHouseMode]
400
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
401
428
  pq_controls: NotRequired[OutputClickHousePqControlsTypedDict]
402
429
 
403
430
 
@@ -621,6 +648,34 @@ class OutputClickHouse(BaseModel):
621
648
  Optional[List[ColumnMapping]], pydantic.Field(alias="columnMappings")
622
649
  ] = None
623
650
 
651
+ pq_strict_ordering: Annotated[
652
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
653
+ ] = True
654
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
655
+
656
+ pq_rate_per_sec: Annotated[
657
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
658
+ ] = 0
659
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
660
+
661
+ pq_mode: Annotated[
662
+ Annotated[
663
+ Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
664
+ ],
665
+ pydantic.Field(alias="pqMode"),
666
+ ] = OutputClickHouseMode.ERROR
667
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
668
+
669
+ pq_max_buffer_size: Annotated[
670
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
671
+ ] = 42
672
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
673
+
674
+ pq_max_backpressure_sec: Annotated[
675
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
676
+ ] = 30
677
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
678
+
624
679
  pq_max_file_size: Annotated[
625
680
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
626
681
  ] = "1 MB"
@@ -652,14 +707,78 @@ class OutputClickHouse(BaseModel):
652
707
  ] = OutputClickHouseQueueFullBehavior.BLOCK
653
708
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
654
709
 
655
- pq_mode: Annotated[
656
- Annotated[
657
- Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
658
- ],
659
- pydantic.Field(alias="pqMode"),
660
- ] = OutputClickHouseMode.ERROR
661
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
662
-
663
710
  pq_controls: Annotated[
664
711
  Optional[OutputClickHousePqControls], pydantic.Field(alias="pqControls")
665
712
  ] = None
713
+
714
+ @field_serializer("auth_type")
715
+ def serialize_auth_type(self, value):
716
+ if isinstance(value, str):
717
+ try:
718
+ return models.OutputClickHouseAuthenticationType(value)
719
+ except ValueError:
720
+ return value
721
+ return value
722
+
723
+ @field_serializer("format_")
724
+ def serialize_format_(self, value):
725
+ if isinstance(value, str):
726
+ try:
727
+ return models.OutputClickHouseFormat(value)
728
+ except ValueError:
729
+ return value
730
+ return value
731
+
732
+ @field_serializer("mapping_type")
733
+ def serialize_mapping_type(self, value):
734
+ if isinstance(value, str):
735
+ try:
736
+ return models.MappingType(value)
737
+ except ValueError:
738
+ return value
739
+ return value
740
+
741
+ @field_serializer("failed_request_logging_mode")
742
+ def serialize_failed_request_logging_mode(self, value):
743
+ if isinstance(value, str):
744
+ try:
745
+ return models.OutputClickHouseFailedRequestLoggingMode(value)
746
+ except ValueError:
747
+ return value
748
+ return value
749
+
750
+ @field_serializer("on_backpressure")
751
+ def serialize_on_backpressure(self, value):
752
+ if isinstance(value, str):
753
+ try:
754
+ return models.OutputClickHouseBackpressureBehavior(value)
755
+ except ValueError:
756
+ return value
757
+ return value
758
+
759
+ @field_serializer("pq_mode")
760
+ def serialize_pq_mode(self, value):
761
+ if isinstance(value, str):
762
+ try:
763
+ return models.OutputClickHouseMode(value)
764
+ except ValueError:
765
+ return value
766
+ return value
767
+
768
+ @field_serializer("pq_compress")
769
+ def serialize_pq_compress(self, value):
770
+ if isinstance(value, str):
771
+ try:
772
+ return models.OutputClickHouseCompression(value)
773
+ except ValueError:
774
+ return value
775
+ return value
776
+
777
+ @field_serializer("pq_on_backpressure")
778
+ def serialize_pq_on_backpressure(self, value):
779
+ if isinstance(value, str):
780
+ try:
781
+ return models.OutputClickHouseQueueFullBehavior(value)
782
+ except ValueError:
783
+ return value
784
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -37,6 +38,17 @@ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMe
37
38
  QUEUE = "queue"
38
39
 
39
40
 
41
+ class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
42
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
43
+
44
+ # Error
45
+ ERROR = "error"
46
+ # Backpressure
47
+ ALWAYS = "always"
48
+ # Always On
49
+ BACKPRESSURE = "backpressure"
50
+
51
+
40
52
  class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
41
53
  r"""Codec to use to compress the persisted data"""
42
54
 
@@ -55,17 +67,6 @@ class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
55
67
  DROP = "drop"
56
68
 
57
69
 
58
- class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
59
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
60
-
61
- # Error
62
- ERROR = "error"
63
- # Backpressure
64
- BACKPRESSURE = "backpressure"
65
- # Always On
66
- ALWAYS = "always"
67
-
68
-
69
70
  class OutputCloudwatchPqControlsTypedDict(TypedDict):
70
71
  pass
71
72
 
@@ -121,6 +122,16 @@ class OutputCloudwatchTypedDict(TypedDict):
121
122
  aws_api_key: NotRequired[str]
122
123
  aws_secret: NotRequired[str]
123
124
  r"""Select or create a stored secret that references your access key and secret key"""
125
+ pq_strict_ordering: NotRequired[bool]
126
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
127
+ pq_rate_per_sec: NotRequired[float]
128
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
129
+ pq_mode: NotRequired[OutputCloudwatchMode]
130
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
131
+ pq_max_buffer_size: NotRequired[float]
132
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
133
+ pq_max_backpressure_sec: NotRequired[float]
134
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
124
135
  pq_max_file_size: NotRequired[str]
125
136
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
126
137
  pq_max_size: NotRequired[str]
@@ -131,8 +142,6 @@ class OutputCloudwatchTypedDict(TypedDict):
131
142
  r"""Codec to use to compress the persisted data"""
132
143
  pq_on_backpressure: NotRequired[OutputCloudwatchQueueFullBehavior]
133
144
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
134
- pq_mode: NotRequired[OutputCloudwatchMode]
135
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
136
145
  pq_controls: NotRequired[OutputCloudwatchPqControlsTypedDict]
137
146
 
138
147
 
@@ -240,6 +249,34 @@ class OutputCloudwatch(BaseModel):
240
249
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
241
250
  r"""Select or create a stored secret that references your access key and secret key"""
242
251
 
252
+ pq_strict_ordering: Annotated[
253
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
254
+ ] = True
255
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
256
+
257
+ pq_rate_per_sec: Annotated[
258
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
259
+ ] = 0
260
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
261
+
262
+ pq_mode: Annotated[
263
+ Annotated[
264
+ Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
265
+ ],
266
+ pydantic.Field(alias="pqMode"),
267
+ ] = OutputCloudwatchMode.ERROR
268
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
269
+
270
+ pq_max_buffer_size: Annotated[
271
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
272
+ ] = 42
273
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
274
+
275
+ pq_max_backpressure_sec: Annotated[
276
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
277
+ ] = 30
278
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
279
+
243
280
  pq_max_file_size: Annotated[
244
281
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
245
282
  ] = "1 MB"
@@ -271,14 +308,51 @@ class OutputCloudwatch(BaseModel):
271
308
  ] = OutputCloudwatchQueueFullBehavior.BLOCK
272
309
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
273
310
 
274
- pq_mode: Annotated[
275
- Annotated[
276
- Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
277
- ],
278
- pydantic.Field(alias="pqMode"),
279
- ] = OutputCloudwatchMode.ERROR
280
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
281
-
282
311
  pq_controls: Annotated[
283
312
  Optional[OutputCloudwatchPqControls], pydantic.Field(alias="pqControls")
284
313
  ] = None
314
+
315
+ @field_serializer("aws_authentication_method")
316
+ def serialize_aws_authentication_method(self, value):
317
+ if isinstance(value, str):
318
+ try:
319
+ return models.OutputCloudwatchAuthenticationMethod(value)
320
+ except ValueError:
321
+ return value
322
+ return value
323
+
324
+ @field_serializer("on_backpressure")
325
+ def serialize_on_backpressure(self, value):
326
+ if isinstance(value, str):
327
+ try:
328
+ return models.OutputCloudwatchBackpressureBehavior(value)
329
+ except ValueError:
330
+ return value
331
+ return value
332
+
333
+ @field_serializer("pq_mode")
334
+ def serialize_pq_mode(self, value):
335
+ if isinstance(value, str):
336
+ try:
337
+ return models.OutputCloudwatchMode(value)
338
+ except ValueError:
339
+ return value
340
+ return value
341
+
342
+ @field_serializer("pq_compress")
343
+ def serialize_pq_compress(self, value):
344
+ if isinstance(value, str):
345
+ try:
346
+ return models.OutputCloudwatchCompression(value)
347
+ except ValueError:
348
+ return value
349
+ return value
350
+
351
+ @field_serializer("pq_on_backpressure")
352
+ def serialize_pq_on_backpressure(self, value):
353
+ if isinstance(value, str):
354
+ try:
355
+ return models.OutputCloudwatchQueueFullBehavior(value)
356
+ except ValueError:
357
+ return value
358
+ return value