cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -48,6 +49,8 @@ class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
48
49
  SNAPPY = "snappy"
49
50
  # LZ4
50
51
  LZ4 = "lz4"
52
+ # ZSTD
53
+ ZSTD = "zstd"
51
54
 
52
55
 
53
56
  class OutputKafkaAuthTypedDict(TypedDict):
@@ -155,6 +158,24 @@ class OutputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
155
158
  pydantic.Field(alias="maxVersion"),
156
159
  ] = None
157
160
 
161
+ @field_serializer("min_version")
162
+ def serialize_min_version(self, value):
163
+ if isinstance(value, str):
164
+ try:
165
+ return models.OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(value)
166
+ except ValueError:
167
+ return value
168
+ return value
169
+
170
+ @field_serializer("max_version")
171
+ def serialize_max_version(self, value):
172
+ if isinstance(value, str):
173
+ try:
174
+ return models.OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(value)
175
+ except ValueError:
176
+ return value
177
+ return value
178
+
158
179
 
159
180
  class OutputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
160
181
  disabled: NotRequired[bool]
@@ -212,6 +233,13 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
212
233
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
213
234
 
214
235
 
236
+ class OutputKafkaAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
237
+ r"""Enter credentials directly, or select a stored secret"""
238
+
239
+ MANUAL = "manual"
240
+ SECRET = "secret"
241
+
242
+
215
243
  class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
216
244
  # PLAIN
217
245
  PLAIN = "plain"
@@ -223,13 +251,58 @@ class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
223
251
  KERBEROS = "kerberos"
224
252
 
225
253
 
254
+ class OutputKafkaOauthParamTypedDict(TypedDict):
255
+ name: str
256
+ value: str
257
+
258
+
259
+ class OutputKafkaOauthParam(BaseModel):
260
+ name: str
261
+
262
+ value: str
263
+
264
+
265
+ class OutputKafkaSaslExtensionTypedDict(TypedDict):
266
+ name: str
267
+ value: str
268
+
269
+
270
+ class OutputKafkaSaslExtension(BaseModel):
271
+ name: str
272
+
273
+ value: str
274
+
275
+
226
276
  class OutputKafkaAuthenticationTypedDict(TypedDict):
227
277
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
228
278
 
229
279
  disabled: NotRequired[bool]
280
+ username: NotRequired[str]
281
+ password: NotRequired[str]
282
+ auth_type: NotRequired[OutputKafkaAuthenticationMethod]
283
+ r"""Enter credentials directly, or select a stored secret"""
284
+ credentials_secret: NotRequired[str]
285
+ r"""Select or create a secret that references your credentials"""
230
286
  mechanism: NotRequired[OutputKafkaSASLMechanism]
287
+ keytab_location: NotRequired[str]
288
+ r"""Location of keytab file for authentication principal"""
289
+ principal: NotRequired[str]
290
+ r"""Authentication principal, such as `kafka_user@example.com`"""
291
+ broker_service_class: NotRequired[str]
292
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
231
293
  oauth_enabled: NotRequired[bool]
232
294
  r"""Enable OAuth authentication"""
295
+ token_url: NotRequired[str]
296
+ r"""URL of the token endpoint to use for OAuth authentication"""
297
+ client_id: NotRequired[str]
298
+ r"""Client ID to use for OAuth authentication"""
299
+ oauth_secret_type: NotRequired[str]
300
+ client_text_secret: NotRequired[str]
301
+ r"""Select or create a stored text secret"""
302
+ oauth_params: NotRequired[List[OutputKafkaOauthParamTypedDict]]
303
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
304
+ sasl_extensions: NotRequired[List[OutputKafkaSaslExtensionTypedDict]]
305
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
233
306
 
234
307
 
235
308
  class OutputKafkaAuthentication(BaseModel):
@@ -237,15 +310,89 @@ class OutputKafkaAuthentication(BaseModel):
237
310
 
238
311
  disabled: Optional[bool] = True
239
312
 
313
+ username: Optional[str] = None
314
+
315
+ password: Optional[str] = None
316
+
317
+ auth_type: Annotated[
318
+ Annotated[
319
+ Optional[OutputKafkaAuthenticationMethod],
320
+ PlainValidator(validate_open_enum(False)),
321
+ ],
322
+ pydantic.Field(alias="authType"),
323
+ ] = OutputKafkaAuthenticationMethod.MANUAL
324
+ r"""Enter credentials directly, or select a stored secret"""
325
+
326
+ credentials_secret: Annotated[
327
+ Optional[str], pydantic.Field(alias="credentialsSecret")
328
+ ] = None
329
+ r"""Select or create a secret that references your credentials"""
330
+
240
331
  mechanism: Annotated[
241
332
  Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
242
333
  ] = OutputKafkaSASLMechanism.PLAIN
243
334
 
335
+ keytab_location: Annotated[
336
+ Optional[str], pydantic.Field(alias="keytabLocation")
337
+ ] = None
338
+ r"""Location of keytab file for authentication principal"""
339
+
340
+ principal: Optional[str] = None
341
+ r"""Authentication principal, such as `kafka_user@example.com`"""
342
+
343
+ broker_service_class: Annotated[
344
+ Optional[str], pydantic.Field(alias="brokerServiceClass")
345
+ ] = None
346
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
347
+
244
348
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
245
349
  False
246
350
  )
247
351
  r"""Enable OAuth authentication"""
248
352
 
353
+ token_url: Annotated[Optional[str], pydantic.Field(alias="tokenUrl")] = None
354
+ r"""URL of the token endpoint to use for OAuth authentication"""
355
+
356
+ client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
357
+ r"""Client ID to use for OAuth authentication"""
358
+
359
+ oauth_secret_type: Annotated[
360
+ Optional[str], pydantic.Field(alias="oauthSecretType")
361
+ ] = "secret"
362
+
363
+ client_text_secret: Annotated[
364
+ Optional[str], pydantic.Field(alias="clientTextSecret")
365
+ ] = None
366
+ r"""Select or create a stored text secret"""
367
+
368
+ oauth_params: Annotated[
369
+ Optional[List[OutputKafkaOauthParam]], pydantic.Field(alias="oauthParams")
370
+ ] = None
371
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
372
+
373
+ sasl_extensions: Annotated[
374
+ Optional[List[OutputKafkaSaslExtension]], pydantic.Field(alias="saslExtensions")
375
+ ] = None
376
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
377
+
378
+ @field_serializer("auth_type")
379
+ def serialize_auth_type(self, value):
380
+ if isinstance(value, str):
381
+ try:
382
+ return models.OutputKafkaAuthenticationMethod(value)
383
+ except ValueError:
384
+ return value
385
+ return value
386
+
387
+ @field_serializer("mechanism")
388
+ def serialize_mechanism(self, value):
389
+ if isinstance(value, str):
390
+ try:
391
+ return models.OutputKafkaSASLMechanism(value)
392
+ except ValueError:
393
+ return value
394
+ return value
395
+
249
396
 
250
397
  class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
251
398
  TL_SV1 = "TLSv1"
@@ -329,6 +476,24 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
329
476
  pydantic.Field(alias="maxVersion"),
330
477
  ] = None
331
478
 
479
+ @field_serializer("min_version")
480
+ def serialize_min_version(self, value):
481
+ if isinstance(value, str):
482
+ try:
483
+ return models.OutputKafkaMinimumTLSVersion(value)
484
+ except ValueError:
485
+ return value
486
+ return value
487
+
488
+ @field_serializer("max_version")
489
+ def serialize_max_version(self, value):
490
+ if isinstance(value, str):
491
+ try:
492
+ return models.OutputKafkaMaximumTLSVersion(value)
493
+ except ValueError:
494
+ return value
495
+ return value
496
+
332
497
 
333
498
  class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
334
499
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -341,6 +506,17 @@ class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
341
506
  QUEUE = "queue"
342
507
 
343
508
 
509
+ class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
510
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
511
+
512
+ # Error
513
+ ERROR = "error"
514
+ # Backpressure
515
+ ALWAYS = "always"
516
+ # Always On
517
+ BACKPRESSURE = "backpressure"
518
+
519
+
344
520
  class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
345
521
  r"""Codec to use to compress the persisted data"""
346
522
 
@@ -359,17 +535,6 @@ class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
359
535
  DROP = "drop"
360
536
 
361
537
 
362
- class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
363
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
364
-
365
- # Error
366
- ERROR = "error"
367
- # Backpressure
368
- BACKPRESSURE = "backpressure"
369
- # Always On
370
- ALWAYS = "always"
371
-
372
-
373
538
  class OutputKafkaPqControlsTypedDict(TypedDict):
374
539
  pass
375
540
 
@@ -433,6 +598,18 @@ class OutputKafkaTypedDict(TypedDict):
433
598
  description: NotRequired[str]
434
599
  protobuf_library_id: NotRequired[str]
435
600
  r"""Select a set of Protobuf definitions for the events you want to send"""
601
+ protobuf_encoding_id: NotRequired[str]
602
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
603
+ pq_strict_ordering: NotRequired[bool]
604
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
605
+ pq_rate_per_sec: NotRequired[float]
606
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
607
+ pq_mode: NotRequired[OutputKafkaMode]
608
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
609
+ pq_max_buffer_size: NotRequired[float]
610
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
611
+ pq_max_backpressure_sec: NotRequired[float]
612
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
436
613
  pq_max_file_size: NotRequired[str]
437
614
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
438
615
  pq_max_size: NotRequired[str]
@@ -443,8 +620,6 @@ class OutputKafkaTypedDict(TypedDict):
443
620
  r"""Codec to use to compress the persisted data"""
444
621
  pq_on_backpressure: NotRequired[OutputKafkaQueueFullBehavior]
445
622
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
446
- pq_mode: NotRequired[OutputKafkaMode]
447
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
448
623
  pq_controls: NotRequired[OutputKafkaPqControlsTypedDict]
449
624
 
450
625
 
@@ -568,6 +743,37 @@ class OutputKafka(BaseModel):
568
743
  ] = None
569
744
  r"""Select a set of Protobuf definitions for the events you want to send"""
570
745
 
746
+ protobuf_encoding_id: Annotated[
747
+ Optional[str], pydantic.Field(alias="protobufEncodingId")
748
+ ] = None
749
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
750
+
751
+ pq_strict_ordering: Annotated[
752
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
753
+ ] = True
754
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
755
+
756
+ pq_rate_per_sec: Annotated[
757
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
758
+ ] = 0
759
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
760
+
761
+ pq_mode: Annotated[
762
+ Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
763
+ pydantic.Field(alias="pqMode"),
764
+ ] = OutputKafkaMode.ERROR
765
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
766
+
767
+ pq_max_buffer_size: Annotated[
768
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
769
+ ] = 42
770
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
771
+
772
+ pq_max_backpressure_sec: Annotated[
773
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
774
+ ] = 30
775
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
776
+
571
777
  pq_max_file_size: Annotated[
572
778
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
573
779
  ] = "1 MB"
@@ -599,12 +805,69 @@ class OutputKafka(BaseModel):
599
805
  ] = OutputKafkaQueueFullBehavior.BLOCK
600
806
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
601
807
 
602
- pq_mode: Annotated[
603
- Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
604
- pydantic.Field(alias="pqMode"),
605
- ] = OutputKafkaMode.ERROR
606
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
607
-
608
808
  pq_controls: Annotated[
609
809
  Optional[OutputKafkaPqControls], pydantic.Field(alias="pqControls")
610
810
  ] = None
811
+
812
+ @field_serializer("ack")
813
+ def serialize_ack(self, value):
814
+ if isinstance(value, str):
815
+ try:
816
+ return models.OutputKafkaAcknowledgments(value)
817
+ except ValueError:
818
+ return value
819
+ return value
820
+
821
+ @field_serializer("format_")
822
+ def serialize_format_(self, value):
823
+ if isinstance(value, str):
824
+ try:
825
+ return models.OutputKafkaRecordDataFormat(value)
826
+ except ValueError:
827
+ return value
828
+ return value
829
+
830
+ @field_serializer("compression")
831
+ def serialize_compression(self, value):
832
+ if isinstance(value, str):
833
+ try:
834
+ return models.OutputKafkaCompression(value)
835
+ except ValueError:
836
+ return value
837
+ return value
838
+
839
+ @field_serializer("on_backpressure")
840
+ def serialize_on_backpressure(self, value):
841
+ if isinstance(value, str):
842
+ try:
843
+ return models.OutputKafkaBackpressureBehavior(value)
844
+ except ValueError:
845
+ return value
846
+ return value
847
+
848
+ @field_serializer("pq_mode")
849
+ def serialize_pq_mode(self, value):
850
+ if isinstance(value, str):
851
+ try:
852
+ return models.OutputKafkaMode(value)
853
+ except ValueError:
854
+ return value
855
+ return value
856
+
857
+ @field_serializer("pq_compress")
858
+ def serialize_pq_compress(self, value):
859
+ if isinstance(value, str):
860
+ try:
861
+ return models.OutputKafkaPqCompressCompression(value)
862
+ except ValueError:
863
+ return value
864
+ return value
865
+
866
+ @field_serializer("pq_on_backpressure")
867
+ def serialize_pq_on_backpressure(self, value):
868
+ if isinstance(value, str):
869
+ try:
870
+ return models.OutputKafkaQueueFullBehavior(value)
871
+ except ValueError:
872
+ return value
873
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -53,6 +54,17 @@ class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
53
54
  QUEUE = "queue"
54
55
 
55
56
 
57
+ class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
58
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
59
+
60
+ # Error
61
+ ERROR = "error"
62
+ # Backpressure
63
+ ALWAYS = "always"
64
+ # Always On
65
+ BACKPRESSURE = "backpressure"
66
+
67
+
56
68
  class OutputKinesisPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
57
69
  r"""Codec to use to compress the persisted data"""
58
70
 
@@ -71,17 +83,6 @@ class OutputKinesisQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
71
83
  DROP = "drop"
72
84
 
73
85
 
74
- class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
75
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
76
-
77
- # Error
78
- ERROR = "error"
79
- # Backpressure
80
- BACKPRESSURE = "backpressure"
81
- # Always On
82
- ALWAYS = "always"
83
-
84
-
85
86
  class OutputKinesisPqControlsTypedDict(TypedDict):
86
87
  pass
87
88
 
@@ -143,6 +144,18 @@ class OutputKinesisTypedDict(TypedDict):
143
144
  aws_api_key: NotRequired[str]
144
145
  aws_secret: NotRequired[str]
145
146
  r"""Select or create a stored secret that references your access key and secret key"""
147
+ max_events_per_flush: NotRequired[float]
148
+ r"""Maximum number of records to send in a single request"""
149
+ pq_strict_ordering: NotRequired[bool]
150
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
151
+ pq_rate_per_sec: NotRequired[float]
152
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
153
+ pq_mode: NotRequired[OutputKinesisMode]
154
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
155
+ pq_max_buffer_size: NotRequired[float]
156
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
157
+ pq_max_backpressure_sec: NotRequired[float]
158
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
146
159
  pq_max_file_size: NotRequired[str]
147
160
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
148
161
  pq_max_size: NotRequired[str]
@@ -153,8 +166,6 @@ class OutputKinesisTypedDict(TypedDict):
153
166
  r"""Codec to use to compress the persisted data"""
154
167
  pq_on_backpressure: NotRequired[OutputKinesisQueueFullBehavior]
155
168
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
156
- pq_mode: NotRequired[OutputKinesisMode]
157
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
158
169
  pq_controls: NotRequired[OutputKinesisPqControlsTypedDict]
159
170
 
160
171
 
@@ -281,6 +292,39 @@ class OutputKinesis(BaseModel):
281
292
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
282
293
  r"""Select or create a stored secret that references your access key and secret key"""
283
294
 
295
+ max_events_per_flush: Annotated[
296
+ Optional[float], pydantic.Field(alias="maxEventsPerFlush")
297
+ ] = 500
298
+ r"""Maximum number of records to send in a single request"""
299
+
300
+ pq_strict_ordering: Annotated[
301
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
302
+ ] = True
303
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
304
+
305
+ pq_rate_per_sec: Annotated[
306
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
307
+ ] = 0
308
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
309
+
310
+ pq_mode: Annotated[
311
+ Annotated[
312
+ Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
313
+ ],
314
+ pydantic.Field(alias="pqMode"),
315
+ ] = OutputKinesisMode.ERROR
316
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
317
+
318
+ pq_max_buffer_size: Annotated[
319
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
320
+ ] = 42
321
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
322
+
323
+ pq_max_backpressure_sec: Annotated[
324
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
325
+ ] = 30
326
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
327
+
284
328
  pq_max_file_size: Annotated[
285
329
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
286
330
  ] = "1 MB"
@@ -312,14 +356,69 @@ class OutputKinesis(BaseModel):
312
356
  ] = OutputKinesisQueueFullBehavior.BLOCK
313
357
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
314
358
 
315
- pq_mode: Annotated[
316
- Annotated[
317
- Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
318
- ],
319
- pydantic.Field(alias="pqMode"),
320
- ] = OutputKinesisMode.ERROR
321
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
322
-
323
359
  pq_controls: Annotated[
324
360
  Optional[OutputKinesisPqControls], pydantic.Field(alias="pqControls")
325
361
  ] = None
362
+
363
+ @field_serializer("aws_authentication_method")
364
+ def serialize_aws_authentication_method(self, value):
365
+ if isinstance(value, str):
366
+ try:
367
+ return models.OutputKinesisAuthenticationMethod(value)
368
+ except ValueError:
369
+ return value
370
+ return value
371
+
372
+ @field_serializer("signature_version")
373
+ def serialize_signature_version(self, value):
374
+ if isinstance(value, str):
375
+ try:
376
+ return models.OutputKinesisSignatureVersion(value)
377
+ except ValueError:
378
+ return value
379
+ return value
380
+
381
+ @field_serializer("compression")
382
+ def serialize_compression(self, value):
383
+ if isinstance(value, str):
384
+ try:
385
+ return models.OutputKinesisCompression(value)
386
+ except ValueError:
387
+ return value
388
+ return value
389
+
390
+ @field_serializer("on_backpressure")
391
+ def serialize_on_backpressure(self, value):
392
+ if isinstance(value, str):
393
+ try:
394
+ return models.OutputKinesisBackpressureBehavior(value)
395
+ except ValueError:
396
+ return value
397
+ return value
398
+
399
+ @field_serializer("pq_mode")
400
+ def serialize_pq_mode(self, value):
401
+ if isinstance(value, str):
402
+ try:
403
+ return models.OutputKinesisMode(value)
404
+ except ValueError:
405
+ return value
406
+ return value
407
+
408
+ @field_serializer("pq_compress")
409
+ def serialize_pq_compress(self, value):
410
+ if isinstance(value, str):
411
+ try:
412
+ return models.OutputKinesisPqCompressCompression(value)
413
+ except ValueError:
414
+ return value
415
+ return value
416
+
417
+ @field_serializer("pq_on_backpressure")
418
+ def serialize_pq_on_backpressure(self, value):
419
+ if isinstance(value, str):
420
+ try:
421
+ return models.OutputKinesisQueueFullBehavior(value)
422
+ except ValueError:
423
+ return value
424
+ return value