cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -91,6 +92,17 @@ class OutputGoogleCloudLoggingBackpressureBehavior(
91
92
  QUEUE = "queue"
92
93
 
93
94
 
95
+ class OutputGoogleCloudLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
96
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
97
+
98
+ # Error
99
+ ERROR = "error"
100
+ # Backpressure
101
+ ALWAYS = "always"
102
+ # Always On
103
+ BACKPRESSURE = "backpressure"
104
+
105
+
94
106
  class OutputGoogleCloudLoggingCompression(str, Enum, metaclass=utils.OpenEnumMeta):
95
107
  r"""Codec to use to compress the persisted data"""
96
108
 
@@ -111,17 +123,6 @@ class OutputGoogleCloudLoggingQueueFullBehavior(
111
123
  DROP = "drop"
112
124
 
113
125
 
114
- class OutputGoogleCloudLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
115
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
116
-
117
- # Error
118
- ERROR = "error"
119
- # Backpressure
120
- BACKPRESSURE = "backpressure"
121
- # Always On
122
- ALWAYS = "always"
123
-
124
-
125
126
  class OutputGoogleCloudLoggingPqControlsTypedDict(TypedDict):
126
127
  pass
127
128
 
@@ -243,6 +244,16 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
243
244
  description: NotRequired[str]
244
245
  payload_expression: NotRequired[str]
245
246
  r"""JavaScript expression to compute the value of the payload. Must evaluate to a JavaScript object value. If an invalid value is encountered it will result in the default value instead. Defaults to the entire event."""
247
+ pq_strict_ordering: NotRequired[bool]
248
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
249
+ pq_rate_per_sec: NotRequired[float]
250
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
251
+ pq_mode: NotRequired[OutputGoogleCloudLoggingMode]
252
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
253
+ pq_max_buffer_size: NotRequired[float]
254
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
255
+ pq_max_backpressure_sec: NotRequired[float]
256
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
246
257
  pq_max_file_size: NotRequired[str]
247
258
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
248
259
  pq_max_size: NotRequired[str]
@@ -253,8 +264,6 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
253
264
  r"""Codec to use to compress the persisted data"""
254
265
  pq_on_backpressure: NotRequired[OutputGoogleCloudLoggingQueueFullBehavior]
255
266
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
256
- pq_mode: NotRequired[OutputGoogleCloudLoggingMode]
257
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
258
267
  pq_controls: NotRequired[OutputGoogleCloudLoggingPqControlsTypedDict]
259
268
 
260
269
 
@@ -533,6 +542,35 @@ class OutputGoogleCloudLogging(BaseModel):
533
542
  ] = None
534
543
  r"""JavaScript expression to compute the value of the payload. Must evaluate to a JavaScript object value. If an invalid value is encountered it will result in the default value instead. Defaults to the entire event."""
535
544
 
545
+ pq_strict_ordering: Annotated[
546
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
547
+ ] = True
548
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
549
+
550
+ pq_rate_per_sec: Annotated[
551
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
552
+ ] = 0
553
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
554
+
555
+ pq_mode: Annotated[
556
+ Annotated[
557
+ Optional[OutputGoogleCloudLoggingMode],
558
+ PlainValidator(validate_open_enum(False)),
559
+ ],
560
+ pydantic.Field(alias="pqMode"),
561
+ ] = OutputGoogleCloudLoggingMode.ERROR
562
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
563
+
564
+ pq_max_buffer_size: Annotated[
565
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
566
+ ] = 42
567
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
568
+
569
+ pq_max_backpressure_sec: Annotated[
570
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
571
+ ] = 30
572
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
573
+
536
574
  pq_max_file_size: Annotated[
537
575
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
538
576
  ] = "1 MB"
@@ -564,15 +602,69 @@ class OutputGoogleCloudLogging(BaseModel):
564
602
  ] = OutputGoogleCloudLoggingQueueFullBehavior.BLOCK
565
603
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
566
604
 
567
- pq_mode: Annotated[
568
- Annotated[
569
- Optional[OutputGoogleCloudLoggingMode],
570
- PlainValidator(validate_open_enum(False)),
571
- ],
572
- pydantic.Field(alias="pqMode"),
573
- ] = OutputGoogleCloudLoggingMode.ERROR
574
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
575
-
576
605
  pq_controls: Annotated[
577
606
  Optional[OutputGoogleCloudLoggingPqControls], pydantic.Field(alias="pqControls")
578
607
  ] = None
608
+
609
+ @field_serializer("log_location_type")
610
+ def serialize_log_location_type(self, value):
611
+ if isinstance(value, str):
612
+ try:
613
+ return models.LogLocationType(value)
614
+ except ValueError:
615
+ return value
616
+ return value
617
+
618
+ @field_serializer("payload_format")
619
+ def serialize_payload_format(self, value):
620
+ if isinstance(value, str):
621
+ try:
622
+ return models.PayloadFormat(value)
623
+ except ValueError:
624
+ return value
625
+ return value
626
+
627
+ @field_serializer("google_auth_method")
628
+ def serialize_google_auth_method(self, value):
629
+ if isinstance(value, str):
630
+ try:
631
+ return models.OutputGoogleCloudLoggingGoogleAuthenticationMethod(value)
632
+ except ValueError:
633
+ return value
634
+ return value
635
+
636
+ @field_serializer("on_backpressure")
637
+ def serialize_on_backpressure(self, value):
638
+ if isinstance(value, str):
639
+ try:
640
+ return models.OutputGoogleCloudLoggingBackpressureBehavior(value)
641
+ except ValueError:
642
+ return value
643
+ return value
644
+
645
+ @field_serializer("pq_mode")
646
+ def serialize_pq_mode(self, value):
647
+ if isinstance(value, str):
648
+ try:
649
+ return models.OutputGoogleCloudLoggingMode(value)
650
+ except ValueError:
651
+ return value
652
+ return value
653
+
654
+ @field_serializer("pq_compress")
655
+ def serialize_pq_compress(self, value):
656
+ if isinstance(value, str):
657
+ try:
658
+ return models.OutputGoogleCloudLoggingCompression(value)
659
+ except ValueError:
660
+ return value
661
+ return value
662
+
663
+ @field_serializer("pq_on_backpressure")
664
+ def serialize_pq_on_backpressure(self, value):
665
+ if isinstance(value, str):
666
+ try:
667
+ return models.OutputGoogleCloudLoggingQueueFullBehavior(value)
668
+ except ValueError:
669
+ return value
670
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -217,6 +218,8 @@ class OutputGoogleCloudStorageTypedDict(TypedDict):
217
218
  r"""Compression level to apply before moving files to final destination"""
218
219
  automatic_schema: NotRequired[bool]
219
220
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
221
+ parquet_schema: NotRequired[str]
222
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
220
223
  parquet_version: NotRequired[OutputGoogleCloudStorageParquetVersion]
221
224
  r"""Determines which data types are supported and how they are represented"""
222
225
  parquet_data_page_version: NotRequired[OutputGoogleCloudStorageDataPageVersion]
@@ -445,6 +448,11 @@ class OutputGoogleCloudStorage(BaseModel):
445
448
  ] = False
446
449
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
447
450
 
451
+ parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
452
+ None
453
+ )
454
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
455
+
448
456
  parquet_version: Annotated[
449
457
  Annotated[
450
458
  Optional[OutputGoogleCloudStorageParquetVersion],
@@ -522,3 +530,102 @@ class OutputGoogleCloudStorage(BaseModel):
522
530
 
523
531
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
524
532
  r"""Select or create a stored secret that references your access key and secret key"""
533
+
534
+ @field_serializer("signature_version")
535
+ def serialize_signature_version(self, value):
536
+ if isinstance(value, str):
537
+ try:
538
+ return models.OutputGoogleCloudStorageSignatureVersion(value)
539
+ except ValueError:
540
+ return value
541
+ return value
542
+
543
+ @field_serializer("aws_authentication_method")
544
+ def serialize_aws_authentication_method(self, value):
545
+ if isinstance(value, str):
546
+ try:
547
+ return models.OutputGoogleCloudStorageAuthenticationMethod(value)
548
+ except ValueError:
549
+ return value
550
+ return value
551
+
552
+ @field_serializer("object_acl")
553
+ def serialize_object_acl(self, value):
554
+ if isinstance(value, str):
555
+ try:
556
+ return models.OutputGoogleCloudStorageObjectACL(value)
557
+ except ValueError:
558
+ return value
559
+ return value
560
+
561
+ @field_serializer("storage_class")
562
+ def serialize_storage_class(self, value):
563
+ if isinstance(value, str):
564
+ try:
565
+ return models.OutputGoogleCloudStorageStorageClass(value)
566
+ except ValueError:
567
+ return value
568
+ return value
569
+
570
+ @field_serializer("format_")
571
+ def serialize_format_(self, value):
572
+ if isinstance(value, str):
573
+ try:
574
+ return models.OutputGoogleCloudStorageDataFormat(value)
575
+ except ValueError:
576
+ return value
577
+ return value
578
+
579
+ @field_serializer("on_backpressure")
580
+ def serialize_on_backpressure(self, value):
581
+ if isinstance(value, str):
582
+ try:
583
+ return models.OutputGoogleCloudStorageBackpressureBehavior(value)
584
+ except ValueError:
585
+ return value
586
+ return value
587
+
588
+ @field_serializer("on_disk_full_backpressure")
589
+ def serialize_on_disk_full_backpressure(self, value):
590
+ if isinstance(value, str):
591
+ try:
592
+ return models.OutputGoogleCloudStorageDiskSpaceProtection(value)
593
+ except ValueError:
594
+ return value
595
+ return value
596
+
597
+ @field_serializer("compress")
598
+ def serialize_compress(self, value):
599
+ if isinstance(value, str):
600
+ try:
601
+ return models.OutputGoogleCloudStorageCompression(value)
602
+ except ValueError:
603
+ return value
604
+ return value
605
+
606
+ @field_serializer("compression_level")
607
+ def serialize_compression_level(self, value):
608
+ if isinstance(value, str):
609
+ try:
610
+ return models.OutputGoogleCloudStorageCompressionLevel(value)
611
+ except ValueError:
612
+ return value
613
+ return value
614
+
615
+ @field_serializer("parquet_version")
616
+ def serialize_parquet_version(self, value):
617
+ if isinstance(value, str):
618
+ try:
619
+ return models.OutputGoogleCloudStorageParquetVersion(value)
620
+ except ValueError:
621
+ return value
622
+ return value
623
+
624
+ @field_serializer("parquet_data_page_version")
625
+ def serialize_parquet_data_page_version(self, value):
626
+ if isinstance(value, str):
627
+ try:
628
+ return models.OutputGoogleCloudStorageDataPageVersion(value)
629
+ except ValueError:
630
+ return value
631
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -39,6 +40,17 @@ class OutputGooglePubsubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnum
39
40
  QUEUE = "queue"
40
41
 
41
42
 
43
+ class OutputGooglePubsubMode(str, Enum, metaclass=utils.OpenEnumMeta):
44
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
45
+
46
+ # Error
47
+ ERROR = "error"
48
+ # Backpressure
49
+ ALWAYS = "always"
50
+ # Always On
51
+ BACKPRESSURE = "backpressure"
52
+
53
+
42
54
  class OutputGooglePubsubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
43
55
  r"""Codec to use to compress the persisted data"""
44
56
 
@@ -57,17 +69,6 @@ class OutputGooglePubsubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMet
57
69
  DROP = "drop"
58
70
 
59
71
 
60
- class OutputGooglePubsubMode(str, Enum, metaclass=utils.OpenEnumMeta):
61
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
62
-
63
- # Error
64
- ERROR = "error"
65
- # Backpressure
66
- BACKPRESSURE = "backpressure"
67
- # Always On
68
- ALWAYS = "always"
69
-
70
-
71
72
  class OutputGooglePubsubPqControlsTypedDict(TypedDict):
72
73
  pass
73
74
 
@@ -117,6 +118,16 @@ class OutputGooglePubsubTypedDict(TypedDict):
117
118
  on_backpressure: NotRequired[OutputGooglePubsubBackpressureBehavior]
118
119
  r"""How to handle events when all receivers are exerting backpressure"""
119
120
  description: NotRequired[str]
121
+ pq_strict_ordering: NotRequired[bool]
122
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
123
+ pq_rate_per_sec: NotRequired[float]
124
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
125
+ pq_mode: NotRequired[OutputGooglePubsubMode]
126
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
127
+ pq_max_buffer_size: NotRequired[float]
128
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
129
+ pq_max_backpressure_sec: NotRequired[float]
130
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
120
131
  pq_max_file_size: NotRequired[str]
121
132
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
122
133
  pq_max_size: NotRequired[str]
@@ -127,8 +138,6 @@ class OutputGooglePubsubTypedDict(TypedDict):
127
138
  r"""Codec to use to compress the persisted data"""
128
139
  pq_on_backpressure: NotRequired[OutputGooglePubsubQueueFullBehavior]
129
140
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
130
- pq_mode: NotRequired[OutputGooglePubsubMode]
131
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
132
141
  pq_controls: NotRequired[OutputGooglePubsubPqControlsTypedDict]
133
142
 
134
143
 
@@ -220,6 +229,34 @@ class OutputGooglePubsub(BaseModel):
220
229
 
221
230
  description: Optional[str] = None
222
231
 
232
+ pq_strict_ordering: Annotated[
233
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
234
+ ] = True
235
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
236
+
237
+ pq_rate_per_sec: Annotated[
238
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
239
+ ] = 0
240
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
241
+
242
+ pq_mode: Annotated[
243
+ Annotated[
244
+ Optional[OutputGooglePubsubMode], PlainValidator(validate_open_enum(False))
245
+ ],
246
+ pydantic.Field(alias="pqMode"),
247
+ ] = OutputGooglePubsubMode.ERROR
248
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
249
+
250
+ pq_max_buffer_size: Annotated[
251
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
252
+ ] = 42
253
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
254
+
255
+ pq_max_backpressure_sec: Annotated[
256
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
257
+ ] = 30
258
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
259
+
223
260
  pq_max_file_size: Annotated[
224
261
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
225
262
  ] = "1 MB"
@@ -251,14 +288,51 @@ class OutputGooglePubsub(BaseModel):
251
288
  ] = OutputGooglePubsubQueueFullBehavior.BLOCK
252
289
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
253
290
 
254
- pq_mode: Annotated[
255
- Annotated[
256
- Optional[OutputGooglePubsubMode], PlainValidator(validate_open_enum(False))
257
- ],
258
- pydantic.Field(alias="pqMode"),
259
- ] = OutputGooglePubsubMode.ERROR
260
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
261
-
262
291
  pq_controls: Annotated[
263
292
  Optional[OutputGooglePubsubPqControls], pydantic.Field(alias="pqControls")
264
293
  ] = None
294
+
295
+ @field_serializer("google_auth_method")
296
+ def serialize_google_auth_method(self, value):
297
+ if isinstance(value, str):
298
+ try:
299
+ return models.OutputGooglePubsubGoogleAuthenticationMethod(value)
300
+ except ValueError:
301
+ return value
302
+ return value
303
+
304
+ @field_serializer("on_backpressure")
305
+ def serialize_on_backpressure(self, value):
306
+ if isinstance(value, str):
307
+ try:
308
+ return models.OutputGooglePubsubBackpressureBehavior(value)
309
+ except ValueError:
310
+ return value
311
+ return value
312
+
313
+ @field_serializer("pq_mode")
314
+ def serialize_pq_mode(self, value):
315
+ if isinstance(value, str):
316
+ try:
317
+ return models.OutputGooglePubsubMode(value)
318
+ except ValueError:
319
+ return value
320
+ return value
321
+
322
+ @field_serializer("pq_compress")
323
+ def serialize_pq_compress(self, value):
324
+ if isinstance(value, str):
325
+ try:
326
+ return models.OutputGooglePubsubCompression(value)
327
+ except ValueError:
328
+ return value
329
+ return value
330
+
331
+ @field_serializer("pq_on_backpressure")
332
+ def serialize_pq_on_backpressure(self, value):
333
+ if isinstance(value, str):
334
+ try:
335
+ return models.OutputGooglePubsubQueueFullBehavior(value)
336
+ except ValueError:
337
+ return value
338
+ return value