cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -135,6 +136,17 @@ class OutputLokiBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
135
136
  QUEUE = "queue"
136
137
 
137
138
 
139
+ class OutputLokiMode(str, Enum, metaclass=utils.OpenEnumMeta):
140
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
141
+
142
+ # Error
143
+ ERROR = "error"
144
+ # Backpressure
145
+ ALWAYS = "always"
146
+ # Always On
147
+ BACKPRESSURE = "backpressure"
148
+
149
+
138
150
  class OutputLokiCompression(str, Enum, metaclass=utils.OpenEnumMeta):
139
151
  r"""Codec to use to compress the persisted data"""
140
152
 
@@ -153,17 +165,6 @@ class OutputLokiQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
153
165
  DROP = "drop"
154
166
 
155
167
 
156
- class OutputLokiMode(str, Enum, metaclass=utils.OpenEnumMeta):
157
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
158
-
159
- # Error
160
- ERROR = "error"
161
- # Backpressure
162
- BACKPRESSURE = "backpressure"
163
- # Always On
164
- ALWAYS = "always"
165
-
166
-
167
168
  class OutputLokiPqControlsTypedDict(TypedDict):
168
169
  pass
169
170
 
@@ -240,6 +241,16 @@ class OutputLokiTypedDict(TypedDict):
240
241
  r"""Password (API key in Grafana Cloud domain) for authentication"""
241
242
  credentials_secret: NotRequired[str]
242
243
  r"""Select or create a secret that references your credentials"""
244
+ pq_strict_ordering: NotRequired[bool]
245
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
246
+ pq_rate_per_sec: NotRequired[float]
247
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
248
+ pq_mode: NotRequired[OutputLokiMode]
249
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
250
+ pq_max_buffer_size: NotRequired[float]
251
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
252
+ pq_max_backpressure_sec: NotRequired[float]
253
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
243
254
  pq_max_file_size: NotRequired[str]
244
255
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
245
256
  pq_max_size: NotRequired[str]
@@ -250,8 +261,6 @@ class OutputLokiTypedDict(TypedDict):
250
261
  r"""Codec to use to compress the persisted data"""
251
262
  pq_on_backpressure: NotRequired[OutputLokiQueueFullBehavior]
252
263
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
253
- pq_mode: NotRequired[OutputLokiMode]
254
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
255
264
  pq_controls: NotRequired[OutputLokiPqControlsTypedDict]
256
265
 
257
266
 
@@ -411,6 +420,32 @@ class OutputLoki(BaseModel):
411
420
  ] = None
412
421
  r"""Select or create a secret that references your credentials"""
413
422
 
423
+ pq_strict_ordering: Annotated[
424
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
425
+ ] = True
426
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
427
+
428
+ pq_rate_per_sec: Annotated[
429
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
430
+ ] = 0
431
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
432
+
433
+ pq_mode: Annotated[
434
+ Annotated[Optional[OutputLokiMode], PlainValidator(validate_open_enum(False))],
435
+ pydantic.Field(alias="pqMode"),
436
+ ] = OutputLokiMode.ERROR
437
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
438
+
439
+ pq_max_buffer_size: Annotated[
440
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
441
+ ] = 42
442
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
443
+
444
+ pq_max_backpressure_sec: Annotated[
445
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
446
+ ] = 30
447
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
448
+
414
449
  pq_max_file_size: Annotated[
415
450
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
416
451
  ] = "1 MB"
@@ -441,12 +476,69 @@ class OutputLoki(BaseModel):
441
476
  ] = OutputLokiQueueFullBehavior.BLOCK
442
477
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
443
478
 
444
- pq_mode: Annotated[
445
- Annotated[Optional[OutputLokiMode], PlainValidator(validate_open_enum(False))],
446
- pydantic.Field(alias="pqMode"),
447
- ] = OutputLokiMode.ERROR
448
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
449
-
450
479
  pq_controls: Annotated[
451
480
  Optional[OutputLokiPqControls], pydantic.Field(alias="pqControls")
452
481
  ] = None
482
+
483
+ @field_serializer("message_format")
484
+ def serialize_message_format(self, value):
485
+ if isinstance(value, str):
486
+ try:
487
+ return models.OutputLokiMessageFormat(value)
488
+ except ValueError:
489
+ return value
490
+ return value
491
+
492
+ @field_serializer("auth_type")
493
+ def serialize_auth_type(self, value):
494
+ if isinstance(value, str):
495
+ try:
496
+ return models.OutputLokiAuthenticationType(value)
497
+ except ValueError:
498
+ return value
499
+ return value
500
+
501
+ @field_serializer("failed_request_logging_mode")
502
+ def serialize_failed_request_logging_mode(self, value):
503
+ if isinstance(value, str):
504
+ try:
505
+ return models.OutputLokiFailedRequestLoggingMode(value)
506
+ except ValueError:
507
+ return value
508
+ return value
509
+
510
+ @field_serializer("on_backpressure")
511
+ def serialize_on_backpressure(self, value):
512
+ if isinstance(value, str):
513
+ try:
514
+ return models.OutputLokiBackpressureBehavior(value)
515
+ except ValueError:
516
+ return value
517
+ return value
518
+
519
+ @field_serializer("pq_mode")
520
+ def serialize_pq_mode(self, value):
521
+ if isinstance(value, str):
522
+ try:
523
+ return models.OutputLokiMode(value)
524
+ except ValueError:
525
+ return value
526
+ return value
527
+
528
+ @field_serializer("pq_compress")
529
+ def serialize_pq_compress(self, value):
530
+ if isinstance(value, str):
531
+ try:
532
+ return models.OutputLokiCompression(value)
533
+ except ValueError:
534
+ return value
535
+ return value
536
+
537
+ @field_serializer("pq_on_backpressure")
538
+ def serialize_pq_on_backpressure(self, value):
539
+ if isinstance(value, str):
540
+ try:
541
+ return models.OutputLokiQueueFullBehavior(value)
542
+ except ValueError:
543
+ return value
544
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -229,6 +230,8 @@ class OutputMinioTypedDict(TypedDict):
229
230
  r"""Compression level to apply before moving files to final destination"""
230
231
  automatic_schema: NotRequired[bool]
231
232
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
233
+ parquet_schema: NotRequired[str]
234
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
232
235
  parquet_version: NotRequired[OutputMinioParquetVersion]
233
236
  r"""Determines which data types are supported and how they are represented"""
234
237
  parquet_data_page_version: NotRequired[OutputMinioDataPageVersion]
@@ -470,6 +473,11 @@ class OutputMinio(BaseModel):
470
473
  ] = False
471
474
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
472
475
 
476
+ parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
477
+ None
478
+ )
479
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
480
+
473
481
  parquet_version: Annotated[
474
482
  Annotated[
475
483
  Optional[OutputMinioParquetVersion],
@@ -536,3 +544,111 @@ class OutputMinio(BaseModel):
536
544
 
537
545
  max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
538
546
  r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
547
+
548
+ @field_serializer("aws_authentication_method")
549
+ def serialize_aws_authentication_method(self, value):
550
+ if isinstance(value, str):
551
+ try:
552
+ return models.OutputMinioAuthenticationMethod(value)
553
+ except ValueError:
554
+ return value
555
+ return value
556
+
557
+ @field_serializer("signature_version")
558
+ def serialize_signature_version(self, value):
559
+ if isinstance(value, str):
560
+ try:
561
+ return models.OutputMinioSignatureVersion(value)
562
+ except ValueError:
563
+ return value
564
+ return value
565
+
566
+ @field_serializer("object_acl")
567
+ def serialize_object_acl(self, value):
568
+ if isinstance(value, str):
569
+ try:
570
+ return models.OutputMinioObjectACL(value)
571
+ except ValueError:
572
+ return value
573
+ return value
574
+
575
+ @field_serializer("storage_class")
576
+ def serialize_storage_class(self, value):
577
+ if isinstance(value, str):
578
+ try:
579
+ return models.OutputMinioStorageClass(value)
580
+ except ValueError:
581
+ return value
582
+ return value
583
+
584
+ @field_serializer("server_side_encryption")
585
+ def serialize_server_side_encryption(self, value):
586
+ if isinstance(value, str):
587
+ try:
588
+ return models.ServerSideEncryption(value)
589
+ except ValueError:
590
+ return value
591
+ return value
592
+
593
+ @field_serializer("format_")
594
+ def serialize_format_(self, value):
595
+ if isinstance(value, str):
596
+ try:
597
+ return models.OutputMinioDataFormat(value)
598
+ except ValueError:
599
+ return value
600
+ return value
601
+
602
+ @field_serializer("on_backpressure")
603
+ def serialize_on_backpressure(self, value):
604
+ if isinstance(value, str):
605
+ try:
606
+ return models.OutputMinioBackpressureBehavior(value)
607
+ except ValueError:
608
+ return value
609
+ return value
610
+
611
+ @field_serializer("on_disk_full_backpressure")
612
+ def serialize_on_disk_full_backpressure(self, value):
613
+ if isinstance(value, str):
614
+ try:
615
+ return models.OutputMinioDiskSpaceProtection(value)
616
+ except ValueError:
617
+ return value
618
+ return value
619
+
620
+ @field_serializer("compress")
621
+ def serialize_compress(self, value):
622
+ if isinstance(value, str):
623
+ try:
624
+ return models.OutputMinioCompression(value)
625
+ except ValueError:
626
+ return value
627
+ return value
628
+
629
+ @field_serializer("compression_level")
630
+ def serialize_compression_level(self, value):
631
+ if isinstance(value, str):
632
+ try:
633
+ return models.OutputMinioCompressionLevel(value)
634
+ except ValueError:
635
+ return value
636
+ return value
637
+
638
+ @field_serializer("parquet_version")
639
+ def serialize_parquet_version(self, value):
640
+ if isinstance(value, str):
641
+ try:
642
+ return models.OutputMinioParquetVersion(value)
643
+ except ValueError:
644
+ return value
645
+ return value
646
+
647
+ @field_serializer("parquet_data_page_version")
648
+ def serialize_parquet_data_page_version(self, value):
649
+ if isinstance(value, str):
650
+ try:
651
+ return models.OutputMinioDataPageVersion(value)
652
+ except ValueError:
653
+ return value
654
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -48,6 +49,8 @@ class OutputMskCompression(str, Enum, metaclass=utils.OpenEnumMeta):
48
49
  SNAPPY = "snappy"
49
50
  # LZ4
50
51
  LZ4 = "lz4"
52
+ # ZSTD
53
+ ZSTD = "zstd"
51
54
 
52
55
 
53
56
  class OutputMskAuthTypedDict(TypedDict):
@@ -155,6 +158,24 @@ class OutputMskKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
155
158
  pydantic.Field(alias="maxVersion"),
156
159
  ] = None
157
160
 
161
+ @field_serializer("min_version")
162
+ def serialize_min_version(self, value):
163
+ if isinstance(value, str):
164
+ try:
165
+ return models.OutputMskKafkaSchemaRegistryMinimumTLSVersion(value)
166
+ except ValueError:
167
+ return value
168
+ return value
169
+
170
+ @field_serializer("max_version")
171
+ def serialize_max_version(self, value):
172
+ if isinstance(value, str):
173
+ try:
174
+ return models.OutputMskKafkaSchemaRegistryMaximumTLSVersion(value)
175
+ except ValueError:
176
+ return value
177
+ return value
178
+
158
179
 
159
180
  class OutputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
160
181
  disabled: NotRequired[bool]
@@ -312,6 +333,24 @@ class OutputMskTLSSettingsClientSide(BaseModel):
312
333
  pydantic.Field(alias="maxVersion"),
313
334
  ] = None
314
335
 
336
+ @field_serializer("min_version")
337
+ def serialize_min_version(self, value):
338
+ if isinstance(value, str):
339
+ try:
340
+ return models.OutputMskMinimumTLSVersion(value)
341
+ except ValueError:
342
+ return value
343
+ return value
344
+
345
+ @field_serializer("max_version")
346
+ def serialize_max_version(self, value):
347
+ if isinstance(value, str):
348
+ try:
349
+ return models.OutputMskMaximumTLSVersion(value)
350
+ except ValueError:
351
+ return value
352
+ return value
353
+
315
354
 
316
355
  class OutputMskBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
317
356
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -324,6 +363,17 @@ class OutputMskBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
324
363
  QUEUE = "queue"
325
364
 
326
365
 
366
+ class OutputMskMode(str, Enum, metaclass=utils.OpenEnumMeta):
367
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
368
+
369
+ # Error
370
+ ERROR = "error"
371
+ # Backpressure
372
+ ALWAYS = "always"
373
+ # Always On
374
+ BACKPRESSURE = "backpressure"
375
+
376
+
327
377
  class OutputMskPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
328
378
  r"""Codec to use to compress the persisted data"""
329
379
 
@@ -342,17 +392,6 @@ class OutputMskQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
342
392
  DROP = "drop"
343
393
 
344
394
 
345
- class OutputMskMode(str, Enum, metaclass=utils.OpenEnumMeta):
346
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
347
-
348
- # Error
349
- ERROR = "error"
350
- # Backpressure
351
- BACKPRESSURE = "backpressure"
352
- # Always On
353
- ALWAYS = "always"
354
-
355
-
356
395
  class OutputMskPqControlsTypedDict(TypedDict):
357
396
  pass
358
397
 
@@ -438,6 +477,18 @@ class OutputMskTypedDict(TypedDict):
438
477
  r"""Select or create a stored secret that references your access key and secret key"""
439
478
  protobuf_library_id: NotRequired[str]
440
479
  r"""Select a set of Protobuf definitions for the events you want to send"""
480
+ protobuf_encoding_id: NotRequired[str]
481
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
482
+ pq_strict_ordering: NotRequired[bool]
483
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
484
+ pq_rate_per_sec: NotRequired[float]
485
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
486
+ pq_mode: NotRequired[OutputMskMode]
487
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
488
+ pq_max_buffer_size: NotRequired[float]
489
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
490
+ pq_max_backpressure_sec: NotRequired[float]
491
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
441
492
  pq_max_file_size: NotRequired[str]
442
493
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
443
494
  pq_max_size: NotRequired[str]
@@ -448,8 +499,6 @@ class OutputMskTypedDict(TypedDict):
448
499
  r"""Codec to use to compress the persisted data"""
449
500
  pq_on_backpressure: NotRequired[OutputMskQueueFullBehavior]
450
501
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
451
- pq_mode: NotRequired[OutputMskMode]
452
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
453
502
  pq_controls: NotRequired[OutputMskPqControlsTypedDict]
454
503
 
455
504
 
@@ -633,6 +682,37 @@ class OutputMsk(BaseModel):
633
682
  ] = None
634
683
  r"""Select a set of Protobuf definitions for the events you want to send"""
635
684
 
685
+ protobuf_encoding_id: Annotated[
686
+ Optional[str], pydantic.Field(alias="protobufEncodingId")
687
+ ] = None
688
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
689
+
690
+ pq_strict_ordering: Annotated[
691
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
692
+ ] = True
693
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
694
+
695
+ pq_rate_per_sec: Annotated[
696
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
697
+ ] = 0
698
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
699
+
700
+ pq_mode: Annotated[
701
+ Annotated[Optional[OutputMskMode], PlainValidator(validate_open_enum(False))],
702
+ pydantic.Field(alias="pqMode"),
703
+ ] = OutputMskMode.ERROR
704
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
705
+
706
+ pq_max_buffer_size: Annotated[
707
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
708
+ ] = 42
709
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
710
+
711
+ pq_max_backpressure_sec: Annotated[
712
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
713
+ ] = 30
714
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
715
+
636
716
  pq_max_file_size: Annotated[
637
717
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
638
718
  ] = "1 MB"
@@ -664,12 +744,87 @@ class OutputMsk(BaseModel):
664
744
  ] = OutputMskQueueFullBehavior.BLOCK
665
745
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
666
746
 
667
- pq_mode: Annotated[
668
- Annotated[Optional[OutputMskMode], PlainValidator(validate_open_enum(False))],
669
- pydantic.Field(alias="pqMode"),
670
- ] = OutputMskMode.ERROR
671
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
672
-
673
747
  pq_controls: Annotated[
674
748
  Optional[OutputMskPqControls], pydantic.Field(alias="pqControls")
675
749
  ] = None
750
+
751
+ @field_serializer("ack")
752
+ def serialize_ack(self, value):
753
+ if isinstance(value, str):
754
+ try:
755
+ return models.OutputMskAcknowledgments(value)
756
+ except ValueError:
757
+ return value
758
+ return value
759
+
760
+ @field_serializer("format_")
761
+ def serialize_format_(self, value):
762
+ if isinstance(value, str):
763
+ try:
764
+ return models.OutputMskRecordDataFormat(value)
765
+ except ValueError:
766
+ return value
767
+ return value
768
+
769
+ @field_serializer("compression")
770
+ def serialize_compression(self, value):
771
+ if isinstance(value, str):
772
+ try:
773
+ return models.OutputMskCompression(value)
774
+ except ValueError:
775
+ return value
776
+ return value
777
+
778
+ @field_serializer("aws_authentication_method")
779
+ def serialize_aws_authentication_method(self, value):
780
+ if isinstance(value, str):
781
+ try:
782
+ return models.OutputMskAuthenticationMethod(value)
783
+ except ValueError:
784
+ return value
785
+ return value
786
+
787
+ @field_serializer("signature_version")
788
+ def serialize_signature_version(self, value):
789
+ if isinstance(value, str):
790
+ try:
791
+ return models.OutputMskSignatureVersion(value)
792
+ except ValueError:
793
+ return value
794
+ return value
795
+
796
+ @field_serializer("on_backpressure")
797
+ def serialize_on_backpressure(self, value):
798
+ if isinstance(value, str):
799
+ try:
800
+ return models.OutputMskBackpressureBehavior(value)
801
+ except ValueError:
802
+ return value
803
+ return value
804
+
805
+ @field_serializer("pq_mode")
806
+ def serialize_pq_mode(self, value):
807
+ if isinstance(value, str):
808
+ try:
809
+ return models.OutputMskMode(value)
810
+ except ValueError:
811
+ return value
812
+ return value
813
+
814
+ @field_serializer("pq_compress")
815
+ def serialize_pq_compress(self, value):
816
+ if isinstance(value, str):
817
+ try:
818
+ return models.OutputMskPqCompressCompression(value)
819
+ except ValueError:
820
+ return value
821
+ return value
822
+
823
+ @field_serializer("pq_on_backpressure")
824
+ def serialize_pq_on_backpressure(self, value):
825
+ if isinstance(value, str):
826
+ try:
827
+ return models.OutputMskQueueFullBehavior(value)
828
+ except ValueError:
829
+ return value
830
+ return value