cribl-control-plane 0.2.1rc6__py3-none-any.whl → 0.2.1rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (154) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/models/__init__.py +114 -4
  4. cribl_control_plane/models/cacheconnection.py +20 -0
  5. cribl_control_plane/models/configgroup.py +20 -1
  6. cribl_control_plane/models/configgroupcloud.py +11 -1
  7. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  8. cribl_control_plane/models/cribllakedataset.py +11 -1
  9. cribl_control_plane/models/cribllakedatasetupdate.py +11 -1
  10. cribl_control_plane/models/datasetmetadata.py +11 -1
  11. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  12. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  13. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  15. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getsummaryop.py +11 -0
  17. cribl_control_plane/models/groupcreaterequest.py +20 -1
  18. cribl_control_plane/models/hbcriblinfo.py +11 -1
  19. cribl_control_plane/models/healthserverstatus.py +20 -1
  20. cribl_control_plane/models/input.py +15 -15
  21. cribl_control_plane/models/inputappscope.py +76 -17
  22. cribl_control_plane/models/inputazureblob.py +29 -1
  23. cribl_control_plane/models/inputcollection.py +20 -1
  24. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  25. cribl_control_plane/models/inputcribl.py +20 -1
  26. cribl_control_plane/models/inputcriblhttp.py +58 -17
  27. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  28. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  29. cribl_control_plane/models/inputcribltcp.py +58 -17
  30. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  31. cribl_control_plane/models/inputdatadogagent.py +58 -17
  32. cribl_control_plane/models/inputdatagen.py +20 -1
  33. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  34. cribl_control_plane/models/inputelastic.py +108 -27
  35. cribl_control_plane/models/inputeventhub.py +176 -1
  36. cribl_control_plane/models/inputexec.py +29 -1
  37. cribl_control_plane/models/inputfile.py +36 -3
  38. cribl_control_plane/models/inputfirehose.py +58 -17
  39. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  40. cribl_control_plane/models/inputgrafana.py +149 -32
  41. cribl_control_plane/models/inputhttp.py +58 -17
  42. cribl_control_plane/models/inputhttpraw.py +58 -17
  43. cribl_control_plane/models/inputjournalfiles.py +20 -1
  44. cribl_control_plane/models/inputkafka.py +182 -1
  45. cribl_control_plane/models/inputkinesis.py +65 -1
  46. cribl_control_plane/models/inputkubeevents.py +20 -1
  47. cribl_control_plane/models/inputkubelogs.py +29 -1
  48. cribl_control_plane/models/inputkubemetrics.py +29 -1
  49. cribl_control_plane/models/inputloki.py +67 -17
  50. cribl_control_plane/models/inputmetrics.py +58 -17
  51. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  52. cribl_control_plane/models/inputmsk.py +74 -1
  53. cribl_control_plane/models/inputnetflow.py +20 -1
  54. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  55. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  56. cribl_control_plane/models/inputoffice365service.py +56 -1
  57. cribl_control_plane/models/inputopentelemetry.py +84 -16
  58. cribl_control_plane/models/inputprometheus.py +131 -37
  59. cribl_control_plane/models/inputprometheusrw.py +67 -17
  60. cribl_control_plane/models/inputrawudp.py +20 -1
  61. cribl_control_plane/models/inputs3.py +38 -1
  62. cribl_control_plane/models/inputs3inventory.py +47 -1
  63. cribl_control_plane/models/inputsecuritylake.py +47 -1
  64. cribl_control_plane/models/inputsnmp.py +29 -1
  65. cribl_control_plane/models/inputsplunk.py +76 -17
  66. cribl_control_plane/models/inputsplunkhec.py +66 -16
  67. cribl_control_plane/models/inputsplunksearch.py +56 -1
  68. cribl_control_plane/models/inputsqs.py +47 -1
  69. cribl_control_plane/models/inputsyslog.py +113 -32
  70. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  71. cribl_control_plane/models/inputsystemstate.py +29 -1
  72. cribl_control_plane/models/inputtcp.py +77 -17
  73. cribl_control_plane/models/inputtcpjson.py +67 -17
  74. cribl_control_plane/models/inputwef.py +65 -1
  75. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  76. cribl_control_plane/models/inputwineventlogs.py +52 -1
  77. cribl_control_plane/models/inputwiz.py +38 -1
  78. cribl_control_plane/models/inputwizwebhook.py +58 -17
  79. cribl_control_plane/models/inputzscalerhec.py +66 -16
  80. cribl_control_plane/models/jobstatus.py +34 -3
  81. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  82. cribl_control_plane/models/masterworkerentry.py +11 -1
  83. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  84. cribl_control_plane/models/output.py +21 -21
  85. cribl_control_plane/models/outputazureblob.py +90 -1
  86. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  87. cribl_control_plane/models/outputazureeventhub.py +267 -22
  88. cribl_control_plane/models/outputazurelogs.py +105 -22
  89. cribl_control_plane/models/outputchronicle.py +105 -22
  90. cribl_control_plane/models/outputclickhouse.py +141 -22
  91. cribl_control_plane/models/outputcloudwatch.py +96 -22
  92. cribl_control_plane/models/outputconfluentcloud.py +290 -23
  93. cribl_control_plane/models/outputcriblhttp.py +123 -22
  94. cribl_control_plane/models/outputcribllake.py +76 -1
  95. cribl_control_plane/models/outputcribltcp.py +123 -22
  96. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  97. cribl_control_plane/models/outputdatabricks.py +72 -1
  98. cribl_control_plane/models/outputdatadog.py +132 -22
  99. cribl_control_plane/models/outputdataset.py +123 -22
  100. cribl_control_plane/models/outputdiskspool.py +11 -1
  101. cribl_control_plane/models/outputdls3.py +117 -1
  102. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  103. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  104. cribl_control_plane/models/outputelastic.py +148 -22
  105. cribl_control_plane/models/outputelasticcloud.py +130 -22
  106. cribl_control_plane/models/outputexabeam.py +47 -1
  107. cribl_control_plane/models/outputfilesystem.py +72 -1
  108. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  109. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  110. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  111. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  112. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  113. cribl_control_plane/models/outputgraphite.py +96 -22
  114. cribl_control_plane/models/outputhoneycomb.py +105 -22
  115. cribl_control_plane/models/outputhumiohec.py +114 -22
  116. cribl_control_plane/models/outputinfluxdb.py +114 -22
  117. cribl_control_plane/models/outputkafka.py +281 -20
  118. cribl_control_plane/models/outputkinesis.py +121 -22
  119. cribl_control_plane/models/outputloki.py +112 -20
  120. cribl_control_plane/models/outputminio.py +117 -1
  121. cribl_control_plane/models/outputmsk.py +173 -20
  122. cribl_control_plane/models/outputnewrelic.py +123 -22
  123. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  124. cribl_control_plane/models/outputopentelemetry.py +159 -22
  125. cribl_control_plane/models/outputprometheus.py +105 -22
  126. cribl_control_plane/models/outputring.py +29 -1
  127. cribl_control_plane/models/outputs3.py +117 -1
  128. cribl_control_plane/models/outputsecuritylake.py +85 -1
  129. cribl_control_plane/models/outputsentinel.py +123 -22
  130. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  131. cribl_control_plane/models/outputservicenow.py +150 -22
  132. cribl_control_plane/models/outputsignalfx.py +105 -22
  133. cribl_control_plane/models/outputsns.py +103 -20
  134. cribl_control_plane/models/outputsplunk.py +141 -22
  135. cribl_control_plane/models/outputsplunkhec.py +198 -22
  136. cribl_control_plane/models/outputsplunklb.py +170 -22
  137. cribl_control_plane/models/outputsqs.py +112 -20
  138. cribl_control_plane/models/outputstatsd.py +96 -22
  139. cribl_control_plane/models/outputstatsdext.py +96 -22
  140. cribl_control_plane/models/outputsumologic.py +105 -22
  141. cribl_control_plane/models/outputsyslog.py +238 -99
  142. cribl_control_plane/models/outputtcpjson.py +132 -22
  143. cribl_control_plane/models/outputwavefront.py +105 -22
  144. cribl_control_plane/models/outputwebhook.py +141 -22
  145. cribl_control_plane/models/outputxsiam.py +103 -20
  146. cribl_control_plane/models/resourcepolicy.py +11 -0
  147. cribl_control_plane/models/runnablejobcollection.py +68 -9
  148. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  149. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  150. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  151. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  152. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc7.dist-info}/METADATA +1 -1
  153. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc7.dist-info}/RECORD +154 -154
  154. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc7.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -155,6 +156,24 @@ class OutputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
155
156
  pydantic.Field(alias="maxVersion"),
156
157
  ] = None
157
158
 
159
+ @field_serializer("min_version")
160
+ def serialize_min_version(self, value):
161
+ if isinstance(value, str):
162
+ try:
163
+ return models.OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(value)
164
+ except ValueError:
165
+ return value
166
+ return value
167
+
168
+ @field_serializer("max_version")
169
+ def serialize_max_version(self, value):
170
+ if isinstance(value, str):
171
+ try:
172
+ return models.OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(value)
173
+ except ValueError:
174
+ return value
175
+ return value
176
+
158
177
 
159
178
  class OutputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
160
179
  disabled: NotRequired[bool]
@@ -212,6 +231,13 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
212
231
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
213
232
 
214
233
 
234
+ class OutputKafkaAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
235
+ r"""Enter credentials directly, or select a stored secret"""
236
+
237
+ MANUAL = "manual"
238
+ SECRET = "secret"
239
+
240
+
215
241
  class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
216
242
  # PLAIN
217
243
  PLAIN = "plain"
@@ -223,13 +249,58 @@ class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
223
249
  KERBEROS = "kerberos"
224
250
 
225
251
 
252
+ class OutputKafkaOauthParamTypedDict(TypedDict):
253
+ name: str
254
+ value: str
255
+
256
+
257
+ class OutputKafkaOauthParam(BaseModel):
258
+ name: str
259
+
260
+ value: str
261
+
262
+
263
+ class OutputKafkaSaslExtensionTypedDict(TypedDict):
264
+ name: str
265
+ value: str
266
+
267
+
268
+ class OutputKafkaSaslExtension(BaseModel):
269
+ name: str
270
+
271
+ value: str
272
+
273
+
226
274
  class OutputKafkaAuthenticationTypedDict(TypedDict):
227
275
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
228
276
 
229
277
  disabled: NotRequired[bool]
278
+ username: NotRequired[str]
279
+ password: NotRequired[str]
280
+ auth_type: NotRequired[OutputKafkaAuthenticationMethod]
281
+ r"""Enter credentials directly, or select a stored secret"""
282
+ credentials_secret: NotRequired[str]
283
+ r"""Select or create a secret that references your credentials"""
230
284
  mechanism: NotRequired[OutputKafkaSASLMechanism]
285
+ keytab_location: NotRequired[str]
286
+ r"""Location of keytab file for authentication principal"""
287
+ principal: NotRequired[str]
288
+ r"""Authentication principal, such as `kafka_user@example.com`"""
289
+ broker_service_class: NotRequired[str]
290
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
231
291
  oauth_enabled: NotRequired[bool]
232
292
  r"""Enable OAuth authentication"""
293
+ token_url: NotRequired[str]
294
+ r"""URL of the token endpoint to use for OAuth authentication"""
295
+ client_id: NotRequired[str]
296
+ r"""Client ID to use for OAuth authentication"""
297
+ oauth_secret_type: NotRequired[str]
298
+ client_text_secret: NotRequired[str]
299
+ r"""Select or create a stored text secret"""
300
+ oauth_params: NotRequired[List[OutputKafkaOauthParamTypedDict]]
301
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
302
+ sasl_extensions: NotRequired[List[OutputKafkaSaslExtensionTypedDict]]
303
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
233
304
 
234
305
 
235
306
  class OutputKafkaAuthentication(BaseModel):
@@ -237,15 +308,89 @@ class OutputKafkaAuthentication(BaseModel):
237
308
 
238
309
  disabled: Optional[bool] = True
239
310
 
311
+ username: Optional[str] = None
312
+
313
+ password: Optional[str] = None
314
+
315
+ auth_type: Annotated[
316
+ Annotated[
317
+ Optional[OutputKafkaAuthenticationMethod],
318
+ PlainValidator(validate_open_enum(False)),
319
+ ],
320
+ pydantic.Field(alias="authType"),
321
+ ] = OutputKafkaAuthenticationMethod.MANUAL
322
+ r"""Enter credentials directly, or select a stored secret"""
323
+
324
+ credentials_secret: Annotated[
325
+ Optional[str], pydantic.Field(alias="credentialsSecret")
326
+ ] = None
327
+ r"""Select or create a secret that references your credentials"""
328
+
240
329
  mechanism: Annotated[
241
330
  Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
242
331
  ] = OutputKafkaSASLMechanism.PLAIN
243
332
 
333
+ keytab_location: Annotated[
334
+ Optional[str], pydantic.Field(alias="keytabLocation")
335
+ ] = None
336
+ r"""Location of keytab file for authentication principal"""
337
+
338
+ principal: Optional[str] = None
339
+ r"""Authentication principal, such as `kafka_user@example.com`"""
340
+
341
+ broker_service_class: Annotated[
342
+ Optional[str], pydantic.Field(alias="brokerServiceClass")
343
+ ] = None
344
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
345
+
244
346
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
245
347
  False
246
348
  )
247
349
  r"""Enable OAuth authentication"""
248
350
 
351
+ token_url: Annotated[Optional[str], pydantic.Field(alias="tokenUrl")] = None
352
+ r"""URL of the token endpoint to use for OAuth authentication"""
353
+
354
+ client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
355
+ r"""Client ID to use for OAuth authentication"""
356
+
357
+ oauth_secret_type: Annotated[
358
+ Optional[str], pydantic.Field(alias="oauthSecretType")
359
+ ] = "secret"
360
+
361
+ client_text_secret: Annotated[
362
+ Optional[str], pydantic.Field(alias="clientTextSecret")
363
+ ] = None
364
+ r"""Select or create a stored text secret"""
365
+
366
+ oauth_params: Annotated[
367
+ Optional[List[OutputKafkaOauthParam]], pydantic.Field(alias="oauthParams")
368
+ ] = None
369
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
370
+
371
+ sasl_extensions: Annotated[
372
+ Optional[List[OutputKafkaSaslExtension]], pydantic.Field(alias="saslExtensions")
373
+ ] = None
374
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
375
+
376
+ @field_serializer("auth_type")
377
+ def serialize_auth_type(self, value):
378
+ if isinstance(value, str):
379
+ try:
380
+ return models.OutputKafkaAuthenticationMethod(value)
381
+ except ValueError:
382
+ return value
383
+ return value
384
+
385
+ @field_serializer("mechanism")
386
+ def serialize_mechanism(self, value):
387
+ if isinstance(value, str):
388
+ try:
389
+ return models.OutputKafkaSASLMechanism(value)
390
+ except ValueError:
391
+ return value
392
+ return value
393
+
249
394
 
250
395
  class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
251
396
  TL_SV1 = "TLSv1"
@@ -329,6 +474,24 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
329
474
  pydantic.Field(alias="maxVersion"),
330
475
  ] = None
331
476
 
477
+ @field_serializer("min_version")
478
+ def serialize_min_version(self, value):
479
+ if isinstance(value, str):
480
+ try:
481
+ return models.OutputKafkaMinimumTLSVersion(value)
482
+ except ValueError:
483
+ return value
484
+ return value
485
+
486
+ @field_serializer("max_version")
487
+ def serialize_max_version(self, value):
488
+ if isinstance(value, str):
489
+ try:
490
+ return models.OutputKafkaMaximumTLSVersion(value)
491
+ except ValueError:
492
+ return value
493
+ return value
494
+
332
495
 
333
496
  class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
334
497
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -341,6 +504,17 @@ class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
341
504
  QUEUE = "queue"
342
505
 
343
506
 
507
+ class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
508
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
509
+
510
+ # Error
511
+ ERROR = "error"
512
+ # Backpressure
513
+ ALWAYS = "always"
514
+ # Always On
515
+ BACKPRESSURE = "backpressure"
516
+
517
+
344
518
  class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
345
519
  r"""Codec to use to compress the persisted data"""
346
520
 
@@ -359,17 +533,6 @@ class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
359
533
  DROP = "drop"
360
534
 
361
535
 
362
- class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
363
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
364
-
365
- # Error
366
- ERROR = "error"
367
- # Backpressure
368
- BACKPRESSURE = "backpressure"
369
- # Always On
370
- ALWAYS = "always"
371
-
372
-
373
536
  class OutputKafkaPqControlsTypedDict(TypedDict):
374
537
  pass
375
538
 
@@ -433,6 +596,18 @@ class OutputKafkaTypedDict(TypedDict):
433
596
  description: NotRequired[str]
434
597
  protobuf_library_id: NotRequired[str]
435
598
  r"""Select a set of Protobuf definitions for the events you want to send"""
599
+ protobuf_encoding_id: NotRequired[str]
600
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
601
+ pq_strict_ordering: NotRequired[bool]
602
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
603
+ pq_rate_per_sec: NotRequired[float]
604
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
605
+ pq_mode: NotRequired[OutputKafkaMode]
606
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
607
+ pq_max_buffer_size: NotRequired[float]
608
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
609
+ pq_max_backpressure_sec: NotRequired[float]
610
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
436
611
  pq_max_file_size: NotRequired[str]
437
612
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
438
613
  pq_max_size: NotRequired[str]
@@ -443,8 +618,6 @@ class OutputKafkaTypedDict(TypedDict):
443
618
  r"""Codec to use to compress the persisted data"""
444
619
  pq_on_backpressure: NotRequired[OutputKafkaQueueFullBehavior]
445
620
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
446
- pq_mode: NotRequired[OutputKafkaMode]
447
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
448
621
  pq_controls: NotRequired[OutputKafkaPqControlsTypedDict]
449
622
 
450
623
 
@@ -568,6 +741,37 @@ class OutputKafka(BaseModel):
568
741
  ] = None
569
742
  r"""Select a set of Protobuf definitions for the events you want to send"""
570
743
 
744
+ protobuf_encoding_id: Annotated[
745
+ Optional[str], pydantic.Field(alias="protobufEncodingId")
746
+ ] = None
747
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
748
+
749
+ pq_strict_ordering: Annotated[
750
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
751
+ ] = True
752
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
753
+
754
+ pq_rate_per_sec: Annotated[
755
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
756
+ ] = 0
757
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
758
+
759
+ pq_mode: Annotated[
760
+ Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
761
+ pydantic.Field(alias="pqMode"),
762
+ ] = OutputKafkaMode.ERROR
763
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
764
+
765
+ pq_max_buffer_size: Annotated[
766
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
767
+ ] = 42
768
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
769
+
770
+ pq_max_backpressure_sec: Annotated[
771
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
772
+ ] = 30
773
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
774
+
571
775
  pq_max_file_size: Annotated[
572
776
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
573
777
  ] = "1 MB"
@@ -599,12 +803,69 @@ class OutputKafka(BaseModel):
599
803
  ] = OutputKafkaQueueFullBehavior.BLOCK
600
804
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
601
805
 
602
- pq_mode: Annotated[
603
- Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
604
- pydantic.Field(alias="pqMode"),
605
- ] = OutputKafkaMode.ERROR
606
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
607
-
608
806
  pq_controls: Annotated[
609
807
  Optional[OutputKafkaPqControls], pydantic.Field(alias="pqControls")
610
808
  ] = None
809
+
810
+ @field_serializer("ack")
811
+ def serialize_ack(self, value):
812
+ if isinstance(value, str):
813
+ try:
814
+ return models.OutputKafkaAcknowledgments(value)
815
+ except ValueError:
816
+ return value
817
+ return value
818
+
819
+ @field_serializer("format_")
820
+ def serialize_format_(self, value):
821
+ if isinstance(value, str):
822
+ try:
823
+ return models.OutputKafkaRecordDataFormat(value)
824
+ except ValueError:
825
+ return value
826
+ return value
827
+
828
+ @field_serializer("compression")
829
+ def serialize_compression(self, value):
830
+ if isinstance(value, str):
831
+ try:
832
+ return models.OutputKafkaCompression(value)
833
+ except ValueError:
834
+ return value
835
+ return value
836
+
837
+ @field_serializer("on_backpressure")
838
+ def serialize_on_backpressure(self, value):
839
+ if isinstance(value, str):
840
+ try:
841
+ return models.OutputKafkaBackpressureBehavior(value)
842
+ except ValueError:
843
+ return value
844
+ return value
845
+
846
+ @field_serializer("pq_mode")
847
+ def serialize_pq_mode(self, value):
848
+ if isinstance(value, str):
849
+ try:
850
+ return models.OutputKafkaMode(value)
851
+ except ValueError:
852
+ return value
853
+ return value
854
+
855
+ @field_serializer("pq_compress")
856
+ def serialize_pq_compress(self, value):
857
+ if isinstance(value, str):
858
+ try:
859
+ return models.OutputKafkaPqCompressCompression(value)
860
+ except ValueError:
861
+ return value
862
+ return value
863
+
864
+ @field_serializer("pq_on_backpressure")
865
+ def serialize_pq_on_backpressure(self, value):
866
+ if isinstance(value, str):
867
+ try:
868
+ return models.OutputKafkaQueueFullBehavior(value)
869
+ except ValueError:
870
+ return value
871
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -53,6 +54,17 @@ class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
53
54
  QUEUE = "queue"
54
55
 
55
56
 
57
+ class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
58
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
59
+
60
+ # Error
61
+ ERROR = "error"
62
+ # Backpressure
63
+ ALWAYS = "always"
64
+ # Always On
65
+ BACKPRESSURE = "backpressure"
66
+
67
+
56
68
  class OutputKinesisPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
57
69
  r"""Codec to use to compress the persisted data"""
58
70
 
@@ -71,17 +83,6 @@ class OutputKinesisQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
71
83
  DROP = "drop"
72
84
 
73
85
 
74
- class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
75
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
76
-
77
- # Error
78
- ERROR = "error"
79
- # Backpressure
80
- BACKPRESSURE = "backpressure"
81
- # Always On
82
- ALWAYS = "always"
83
-
84
-
85
86
  class OutputKinesisPqControlsTypedDict(TypedDict):
86
87
  pass
87
88
 
@@ -143,6 +144,18 @@ class OutputKinesisTypedDict(TypedDict):
143
144
  aws_api_key: NotRequired[str]
144
145
  aws_secret: NotRequired[str]
145
146
  r"""Select or create a stored secret that references your access key and secret key"""
147
+ max_events_per_flush: NotRequired[float]
148
+ r"""Maximum number of records to send in a single request"""
149
+ pq_strict_ordering: NotRequired[bool]
150
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
151
+ pq_rate_per_sec: NotRequired[float]
152
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
153
+ pq_mode: NotRequired[OutputKinesisMode]
154
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
155
+ pq_max_buffer_size: NotRequired[float]
156
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
157
+ pq_max_backpressure_sec: NotRequired[float]
158
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
146
159
  pq_max_file_size: NotRequired[str]
147
160
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
148
161
  pq_max_size: NotRequired[str]
@@ -153,8 +166,6 @@ class OutputKinesisTypedDict(TypedDict):
153
166
  r"""Codec to use to compress the persisted data"""
154
167
  pq_on_backpressure: NotRequired[OutputKinesisQueueFullBehavior]
155
168
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
156
- pq_mode: NotRequired[OutputKinesisMode]
157
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
158
169
  pq_controls: NotRequired[OutputKinesisPqControlsTypedDict]
159
170
 
160
171
 
@@ -281,6 +292,39 @@ class OutputKinesis(BaseModel):
281
292
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
282
293
  r"""Select or create a stored secret that references your access key and secret key"""
283
294
 
295
+ max_events_per_flush: Annotated[
296
+ Optional[float], pydantic.Field(alias="maxEventsPerFlush")
297
+ ] = 500
298
+ r"""Maximum number of records to send in a single request"""
299
+
300
+ pq_strict_ordering: Annotated[
301
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
302
+ ] = True
303
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
304
+
305
+ pq_rate_per_sec: Annotated[
306
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
307
+ ] = 0
308
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
309
+
310
+ pq_mode: Annotated[
311
+ Annotated[
312
+ Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
313
+ ],
314
+ pydantic.Field(alias="pqMode"),
315
+ ] = OutputKinesisMode.ERROR
316
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
317
+
318
+ pq_max_buffer_size: Annotated[
319
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
320
+ ] = 42
321
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
322
+
323
+ pq_max_backpressure_sec: Annotated[
324
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
325
+ ] = 30
326
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
327
+
284
328
  pq_max_file_size: Annotated[
285
329
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
286
330
  ] = "1 MB"
@@ -312,14 +356,69 @@ class OutputKinesis(BaseModel):
312
356
  ] = OutputKinesisQueueFullBehavior.BLOCK
313
357
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
314
358
 
315
- pq_mode: Annotated[
316
- Annotated[
317
- Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
318
- ],
319
- pydantic.Field(alias="pqMode"),
320
- ] = OutputKinesisMode.ERROR
321
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
322
-
323
359
  pq_controls: Annotated[
324
360
  Optional[OutputKinesisPqControls], pydantic.Field(alias="pqControls")
325
361
  ] = None
362
+
363
+ @field_serializer("aws_authentication_method")
364
+ def serialize_aws_authentication_method(self, value):
365
+ if isinstance(value, str):
366
+ try:
367
+ return models.OutputKinesisAuthenticationMethod(value)
368
+ except ValueError:
369
+ return value
370
+ return value
371
+
372
+ @field_serializer("signature_version")
373
+ def serialize_signature_version(self, value):
374
+ if isinstance(value, str):
375
+ try:
376
+ return models.OutputKinesisSignatureVersion(value)
377
+ except ValueError:
378
+ return value
379
+ return value
380
+
381
+ @field_serializer("compression")
382
+ def serialize_compression(self, value):
383
+ if isinstance(value, str):
384
+ try:
385
+ return models.OutputKinesisCompression(value)
386
+ except ValueError:
387
+ return value
388
+ return value
389
+
390
+ @field_serializer("on_backpressure")
391
+ def serialize_on_backpressure(self, value):
392
+ if isinstance(value, str):
393
+ try:
394
+ return models.OutputKinesisBackpressureBehavior(value)
395
+ except ValueError:
396
+ return value
397
+ return value
398
+
399
+ @field_serializer("pq_mode")
400
+ def serialize_pq_mode(self, value):
401
+ if isinstance(value, str):
402
+ try:
403
+ return models.OutputKinesisMode(value)
404
+ except ValueError:
405
+ return value
406
+ return value
407
+
408
+ @field_serializer("pq_compress")
409
+ def serialize_pq_compress(self, value):
410
+ if isinstance(value, str):
411
+ try:
412
+ return models.OutputKinesisPqCompressCompression(value)
413
+ except ValueError:
414
+ return value
415
+ return value
416
+
417
+ @field_serializer("pq_on_backpressure")
418
+ def serialize_pq_on_backpressure(self, value):
419
+ if isinstance(value, str):
420
+ try:
421
+ return models.OutputKinesisQueueFullBehavior(value)
422
+ except ValueError:
423
+ return value
424
+ return value