cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -97,6 +98,24 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
97
98
  pydantic.Field(alias="maxVersion"),
98
99
  ] = None
99
100
 
101
+ @field_serializer("min_version")
102
+ def serialize_min_version(self, value):
103
+ if isinstance(value, str):
104
+ try:
105
+ return models.OutputConfluentCloudMinimumTLSVersion(value)
106
+ except ValueError:
107
+ return value
108
+ return value
109
+
110
+ @field_serializer("max_version")
111
+ def serialize_max_version(self, value):
112
+ if isinstance(value, str):
113
+ try:
114
+ return models.OutputConfluentCloudMaximumTLSVersion(value)
115
+ except ValueError:
116
+ return value
117
+ return value
118
+
100
119
 
101
120
  class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
102
121
  r"""Control the number of required acknowledgments."""
@@ -131,6 +150,8 @@ class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
131
150
  SNAPPY = "snappy"
132
151
  # LZ4
133
152
  LZ4 = "lz4"
153
+ # ZSTD
154
+ ZSTD = "zstd"
134
155
 
135
156
 
136
157
  class OutputConfluentCloudAuthTypedDict(TypedDict):
@@ -238,6 +259,28 @@ class OutputConfluentCloudKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
238
259
  pydantic.Field(alias="maxVersion"),
239
260
  ] = None
240
261
 
262
+ @field_serializer("min_version")
263
+ def serialize_min_version(self, value):
264
+ if isinstance(value, str):
265
+ try:
266
+ return models.OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
267
+ value
268
+ )
269
+ except ValueError:
270
+ return value
271
+ return value
272
+
273
+ @field_serializer("max_version")
274
+ def serialize_max_version(self, value):
275
+ if isinstance(value, str):
276
+ try:
277
+ return models.OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
278
+ value
279
+ )
280
+ except ValueError:
281
+ return value
282
+ return value
283
+
241
284
 
242
285
  class OutputConfluentCloudKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
243
286
  disabled: NotRequired[bool]
@@ -297,6 +340,13 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
297
340
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
298
341
 
299
342
 
343
+ class OutputConfluentCloudAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
344
+ r"""Enter credentials directly, or select a stored secret"""
345
+
346
+ MANUAL = "manual"
347
+ SECRET = "secret"
348
+
349
+
300
350
  class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
301
351
  # PLAIN
302
352
  PLAIN = "plain"
@@ -308,13 +358,58 @@ class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta)
308
358
  KERBEROS = "kerberos"
309
359
 
310
360
 
361
+ class OutputConfluentCloudOauthParamTypedDict(TypedDict):
362
+ name: str
363
+ value: str
364
+
365
+
366
+ class OutputConfluentCloudOauthParam(BaseModel):
367
+ name: str
368
+
369
+ value: str
370
+
371
+
372
+ class OutputConfluentCloudSaslExtensionTypedDict(TypedDict):
373
+ name: str
374
+ value: str
375
+
376
+
377
+ class OutputConfluentCloudSaslExtension(BaseModel):
378
+ name: str
379
+
380
+ value: str
381
+
382
+
311
383
  class OutputConfluentCloudAuthenticationTypedDict(TypedDict):
312
384
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
313
385
 
314
386
  disabled: NotRequired[bool]
387
+ username: NotRequired[str]
388
+ password: NotRequired[str]
389
+ auth_type: NotRequired[OutputConfluentCloudAuthenticationMethod]
390
+ r"""Enter credentials directly, or select a stored secret"""
391
+ credentials_secret: NotRequired[str]
392
+ r"""Select or create a secret that references your credentials"""
315
393
  mechanism: NotRequired[OutputConfluentCloudSASLMechanism]
394
+ keytab_location: NotRequired[str]
395
+ r"""Location of keytab file for authentication principal"""
396
+ principal: NotRequired[str]
397
+ r"""Authentication principal, such as `kafka_user@example.com`"""
398
+ broker_service_class: NotRequired[str]
399
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
316
400
  oauth_enabled: NotRequired[bool]
317
401
  r"""Enable OAuth authentication"""
402
+ token_url: NotRequired[str]
403
+ r"""URL of the token endpoint to use for OAuth authentication"""
404
+ client_id: NotRequired[str]
405
+ r"""Client ID to use for OAuth authentication"""
406
+ oauth_secret_type: NotRequired[str]
407
+ client_text_secret: NotRequired[str]
408
+ r"""Select or create a stored text secret"""
409
+ oauth_params: NotRequired[List[OutputConfluentCloudOauthParamTypedDict]]
410
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
411
+ sasl_extensions: NotRequired[List[OutputConfluentCloudSaslExtensionTypedDict]]
412
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
318
413
 
319
414
 
320
415
  class OutputConfluentCloudAuthentication(BaseModel):
@@ -322,16 +417,92 @@ class OutputConfluentCloudAuthentication(BaseModel):
322
417
 
323
418
  disabled: Optional[bool] = True
324
419
 
420
+ username: Optional[str] = None
421
+
422
+ password: Optional[str] = None
423
+
424
+ auth_type: Annotated[
425
+ Annotated[
426
+ Optional[OutputConfluentCloudAuthenticationMethod],
427
+ PlainValidator(validate_open_enum(False)),
428
+ ],
429
+ pydantic.Field(alias="authType"),
430
+ ] = OutputConfluentCloudAuthenticationMethod.MANUAL
431
+ r"""Enter credentials directly, or select a stored secret"""
432
+
433
+ credentials_secret: Annotated[
434
+ Optional[str], pydantic.Field(alias="credentialsSecret")
435
+ ] = None
436
+ r"""Select or create a secret that references your credentials"""
437
+
325
438
  mechanism: Annotated[
326
439
  Optional[OutputConfluentCloudSASLMechanism],
327
440
  PlainValidator(validate_open_enum(False)),
328
441
  ] = OutputConfluentCloudSASLMechanism.PLAIN
329
442
 
443
+ keytab_location: Annotated[
444
+ Optional[str], pydantic.Field(alias="keytabLocation")
445
+ ] = None
446
+ r"""Location of keytab file for authentication principal"""
447
+
448
+ principal: Optional[str] = None
449
+ r"""Authentication principal, such as `kafka_user@example.com`"""
450
+
451
+ broker_service_class: Annotated[
452
+ Optional[str], pydantic.Field(alias="brokerServiceClass")
453
+ ] = None
454
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
455
+
330
456
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
331
457
  False
332
458
  )
333
459
  r"""Enable OAuth authentication"""
334
460
 
461
+ token_url: Annotated[Optional[str], pydantic.Field(alias="tokenUrl")] = None
462
+ r"""URL of the token endpoint to use for OAuth authentication"""
463
+
464
+ client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
465
+ r"""Client ID to use for OAuth authentication"""
466
+
467
+ oauth_secret_type: Annotated[
468
+ Optional[str], pydantic.Field(alias="oauthSecretType")
469
+ ] = "secret"
470
+
471
+ client_text_secret: Annotated[
472
+ Optional[str], pydantic.Field(alias="clientTextSecret")
473
+ ] = None
474
+ r"""Select or create a stored text secret"""
475
+
476
+ oauth_params: Annotated[
477
+ Optional[List[OutputConfluentCloudOauthParam]],
478
+ pydantic.Field(alias="oauthParams"),
479
+ ] = None
480
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
481
+
482
+ sasl_extensions: Annotated[
483
+ Optional[List[OutputConfluentCloudSaslExtension]],
484
+ pydantic.Field(alias="saslExtensions"),
485
+ ] = None
486
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
487
+
488
+ @field_serializer("auth_type")
489
+ def serialize_auth_type(self, value):
490
+ if isinstance(value, str):
491
+ try:
492
+ return models.OutputConfluentCloudAuthenticationMethod(value)
493
+ except ValueError:
494
+ return value
495
+ return value
496
+
497
+ @field_serializer("mechanism")
498
+ def serialize_mechanism(self, value):
499
+ if isinstance(value, str):
500
+ try:
501
+ return models.OutputConfluentCloudSASLMechanism(value)
502
+ except ValueError:
503
+ return value
504
+ return value
505
+
335
506
 
336
507
  class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
337
508
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -344,6 +515,17 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
344
515
  QUEUE = "queue"
345
516
 
346
517
 
518
+ class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
519
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
520
+
521
+ # Error
522
+ ERROR = "error"
523
+ # Backpressure
524
+ ALWAYS = "always"
525
+ # Always On
526
+ BACKPRESSURE = "backpressure"
527
+
528
+
347
529
  class OutputConfluentCloudPqCompressCompression(
348
530
  str, Enum, metaclass=utils.OpenEnumMeta
349
531
  ):
@@ -364,17 +546,6 @@ class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumM
364
546
  DROP = "drop"
365
547
 
366
548
 
367
- class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
368
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
369
-
370
- # Error
371
- ERROR = "error"
372
- # Backpressure
373
- BACKPRESSURE = "backpressure"
374
- # Always On
375
- ALWAYS = "always"
376
-
377
-
378
549
  class OutputConfluentCloudPqControlsTypedDict(TypedDict):
379
550
  pass
380
551
 
@@ -438,6 +609,18 @@ class OutputConfluentCloudTypedDict(TypedDict):
438
609
  description: NotRequired[str]
439
610
  protobuf_library_id: NotRequired[str]
440
611
  r"""Select a set of Protobuf definitions for the events you want to send"""
612
+ protobuf_encoding_id: NotRequired[str]
613
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
614
+ pq_strict_ordering: NotRequired[bool]
615
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
616
+ pq_rate_per_sec: NotRequired[float]
617
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
618
+ pq_mode: NotRequired[OutputConfluentCloudMode]
619
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
620
+ pq_max_buffer_size: NotRequired[float]
621
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
622
+ pq_max_backpressure_sec: NotRequired[float]
623
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
441
624
  pq_max_file_size: NotRequired[str]
442
625
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
443
626
  pq_max_size: NotRequired[str]
@@ -448,8 +631,6 @@ class OutputConfluentCloudTypedDict(TypedDict):
448
631
  r"""Codec to use to compress the persisted data"""
449
632
  pq_on_backpressure: NotRequired[OutputConfluentCloudQueueFullBehavior]
450
633
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
451
- pq_mode: NotRequired[OutputConfluentCloudMode]
452
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
453
634
  pq_controls: NotRequired[OutputConfluentCloudPqControlsTypedDict]
454
635
 
455
636
 
@@ -575,6 +756,40 @@ class OutputConfluentCloud(BaseModel):
575
756
  ] = None
576
757
  r"""Select a set of Protobuf definitions for the events you want to send"""
577
758
 
759
+ protobuf_encoding_id: Annotated[
760
+ Optional[str], pydantic.Field(alias="protobufEncodingId")
761
+ ] = None
762
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
763
+
764
+ pq_strict_ordering: Annotated[
765
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
766
+ ] = True
767
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
768
+
769
+ pq_rate_per_sec: Annotated[
770
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
771
+ ] = 0
772
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
773
+
774
+ pq_mode: Annotated[
775
+ Annotated[
776
+ Optional[OutputConfluentCloudMode],
777
+ PlainValidator(validate_open_enum(False)),
778
+ ],
779
+ pydantic.Field(alias="pqMode"),
780
+ ] = OutputConfluentCloudMode.ERROR
781
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
782
+
783
+ pq_max_buffer_size: Annotated[
784
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
785
+ ] = 42
786
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
787
+
788
+ pq_max_backpressure_sec: Annotated[
789
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
790
+ ] = 30
791
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
792
+
578
793
  pq_max_file_size: Annotated[
579
794
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
580
795
  ] = "1 MB"
@@ -606,15 +821,69 @@ class OutputConfluentCloud(BaseModel):
606
821
  ] = OutputConfluentCloudQueueFullBehavior.BLOCK
607
822
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
608
823
 
609
- pq_mode: Annotated[
610
- Annotated[
611
- Optional[OutputConfluentCloudMode],
612
- PlainValidator(validate_open_enum(False)),
613
- ],
614
- pydantic.Field(alias="pqMode"),
615
- ] = OutputConfluentCloudMode.ERROR
616
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
617
-
618
824
  pq_controls: Annotated[
619
825
  Optional[OutputConfluentCloudPqControls], pydantic.Field(alias="pqControls")
620
826
  ] = None
827
+
828
+ @field_serializer("ack")
829
+ def serialize_ack(self, value):
830
+ if isinstance(value, str):
831
+ try:
832
+ return models.OutputConfluentCloudAcknowledgments(value)
833
+ except ValueError:
834
+ return value
835
+ return value
836
+
837
+ @field_serializer("format_")
838
+ def serialize_format_(self, value):
839
+ if isinstance(value, str):
840
+ try:
841
+ return models.OutputConfluentCloudRecordDataFormat(value)
842
+ except ValueError:
843
+ return value
844
+ return value
845
+
846
+ @field_serializer("compression")
847
+ def serialize_compression(self, value):
848
+ if isinstance(value, str):
849
+ try:
850
+ return models.OutputConfluentCloudCompression(value)
851
+ except ValueError:
852
+ return value
853
+ return value
854
+
855
+ @field_serializer("on_backpressure")
856
+ def serialize_on_backpressure(self, value):
857
+ if isinstance(value, str):
858
+ try:
859
+ return models.OutputConfluentCloudBackpressureBehavior(value)
860
+ except ValueError:
861
+ return value
862
+ return value
863
+
864
+ @field_serializer("pq_mode")
865
+ def serialize_pq_mode(self, value):
866
+ if isinstance(value, str):
867
+ try:
868
+ return models.OutputConfluentCloudMode(value)
869
+ except ValueError:
870
+ return value
871
+ return value
872
+
873
+ @field_serializer("pq_compress")
874
+ def serialize_pq_compress(self, value):
875
+ if isinstance(value, str):
876
+ try:
877
+ return models.OutputConfluentCloudPqCompressCompression(value)
878
+ except ValueError:
879
+ return value
880
+ return value
881
+
882
+ @field_serializer("pq_on_backpressure")
883
+ def serialize_pq_on_backpressure(self, value):
884
+ if isinstance(value, str):
885
+ try:
886
+ return models.OutputConfluentCloudQueueFullBehavior(value)
887
+ except ValueError:
888
+ return value
889
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -97,6 +98,24 @@ class OutputCriblHTTPTLSSettingsClientSide(BaseModel):
97
98
  pydantic.Field(alias="maxVersion"),
98
99
  ] = None
99
100
 
101
+ @field_serializer("min_version")
102
+ def serialize_min_version(self, value):
103
+ if isinstance(value, str):
104
+ try:
105
+ return models.OutputCriblHTTPMinimumTLSVersion(value)
106
+ except ValueError:
107
+ return value
108
+ return value
109
+
110
+ @field_serializer("max_version")
111
+ def serialize_max_version(self, value):
112
+ if isinstance(value, str):
113
+ try:
114
+ return models.OutputCriblHTTPMaximumTLSVersion(value)
115
+ except ValueError:
116
+ return value
117
+ return value
118
+
100
119
 
101
120
  class OutputCriblHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
102
121
  r"""Codec to use to compress the data before sending"""
@@ -209,6 +228,17 @@ class OutputCriblHTTPURL(BaseModel):
209
228
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
210
229
 
211
230
 
231
+ class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
232
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
233
+
234
+ # Error
235
+ ERROR = "error"
236
+ # Backpressure
237
+ ALWAYS = "always"
238
+ # Always On
239
+ BACKPRESSURE = "backpressure"
240
+
241
+
212
242
  class OutputCriblHTTPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
213
243
  r"""Codec to use to compress the persisted data"""
214
244
 
@@ -227,17 +257,6 @@ class OutputCriblHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
227
257
  DROP = "drop"
228
258
 
229
259
 
230
- class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
231
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
232
-
233
- # Error
234
- ERROR = "error"
235
- # Backpressure
236
- BACKPRESSURE = "backpressure"
237
- # Always On
238
- ALWAYS = "always"
239
-
240
-
241
260
  class OutputCriblHTTPPqControlsTypedDict(TypedDict):
242
261
  pass
243
262
 
@@ -309,6 +328,16 @@ class OutputCriblHTTPTypedDict(TypedDict):
309
328
  r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
310
329
  load_balance_stats_period_sec: NotRequired[float]
311
330
  r"""How far back in time to keep traffic stats for load balancing purposes"""
331
+ pq_strict_ordering: NotRequired[bool]
332
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
333
+ pq_rate_per_sec: NotRequired[float]
334
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
335
+ pq_mode: NotRequired[OutputCriblHTTPMode]
336
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
337
+ pq_max_buffer_size: NotRequired[float]
338
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
339
+ pq_max_backpressure_sec: NotRequired[float]
340
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
312
341
  pq_max_file_size: NotRequired[str]
313
342
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
314
343
  pq_max_size: NotRequired[str]
@@ -319,8 +348,6 @@ class OutputCriblHTTPTypedDict(TypedDict):
319
348
  r"""Codec to use to compress the persisted data"""
320
349
  pq_on_backpressure: NotRequired[OutputCriblHTTPQueueFullBehavior]
321
350
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
322
- pq_mode: NotRequired[OutputCriblHTTPMode]
323
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
324
351
  pq_controls: NotRequired[OutputCriblHTTPPqControlsTypedDict]
325
352
 
326
353
 
@@ -465,6 +492,34 @@ class OutputCriblHTTP(BaseModel):
465
492
  ] = 300
466
493
  r"""How far back in time to keep traffic stats for load balancing purposes"""
467
494
 
495
+ pq_strict_ordering: Annotated[
496
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
497
+ ] = True
498
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
499
+
500
+ pq_rate_per_sec: Annotated[
501
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
502
+ ] = 0
503
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
504
+
505
+ pq_mode: Annotated[
506
+ Annotated[
507
+ Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
508
+ ],
509
+ pydantic.Field(alias="pqMode"),
510
+ ] = OutputCriblHTTPMode.ERROR
511
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
512
+
513
+ pq_max_buffer_size: Annotated[
514
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
515
+ ] = 42
516
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
517
+
518
+ pq_max_backpressure_sec: Annotated[
519
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
520
+ ] = 30
521
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
522
+
468
523
  pq_max_file_size: Annotated[
469
524
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
470
525
  ] = "1 MB"
@@ -496,14 +551,60 @@ class OutputCriblHTTP(BaseModel):
496
551
  ] = OutputCriblHTTPQueueFullBehavior.BLOCK
497
552
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
498
553
 
499
- pq_mode: Annotated[
500
- Annotated[
501
- Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
502
- ],
503
- pydantic.Field(alias="pqMode"),
504
- ] = OutputCriblHTTPMode.ERROR
505
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
506
-
507
554
  pq_controls: Annotated[
508
555
  Optional[OutputCriblHTTPPqControls], pydantic.Field(alias="pqControls")
509
556
  ] = None
557
+
558
+ @field_serializer("compression")
559
+ def serialize_compression(self, value):
560
+ if isinstance(value, str):
561
+ try:
562
+ return models.OutputCriblHTTPCompression(value)
563
+ except ValueError:
564
+ return value
565
+ return value
566
+
567
+ @field_serializer("failed_request_logging_mode")
568
+ def serialize_failed_request_logging_mode(self, value):
569
+ if isinstance(value, str):
570
+ try:
571
+ return models.OutputCriblHTTPFailedRequestLoggingMode(value)
572
+ except ValueError:
573
+ return value
574
+ return value
575
+
576
+ @field_serializer("on_backpressure")
577
+ def serialize_on_backpressure(self, value):
578
+ if isinstance(value, str):
579
+ try:
580
+ return models.OutputCriblHTTPBackpressureBehavior(value)
581
+ except ValueError:
582
+ return value
583
+ return value
584
+
585
+ @field_serializer("pq_mode")
586
+ def serialize_pq_mode(self, value):
587
+ if isinstance(value, str):
588
+ try:
589
+ return models.OutputCriblHTTPMode(value)
590
+ except ValueError:
591
+ return value
592
+ return value
593
+
594
+ @field_serializer("pq_compress")
595
+ def serialize_pq_compress(self, value):
596
+ if isinstance(value, str):
597
+ try:
598
+ return models.OutputCriblHTTPPqCompressCompression(value)
599
+ except ValueError:
600
+ return value
601
+ return value
602
+
603
+ @field_serializer("pq_on_backpressure")
604
+ def serialize_pq_on_backpressure(self, value):
605
+ if isinstance(value, str):
606
+ try:
607
+ return models.OutputCriblHTTPQueueFullBehavior(value)
608
+ except ValueError:
609
+ return value
610
+ return value