cribl-control-plane 0.2.1rc6__py3-none-any.whl → 0.2.1rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (154) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/models/__init__.py +114 -4
  4. cribl_control_plane/models/cacheconnection.py +20 -0
  5. cribl_control_plane/models/configgroup.py +20 -1
  6. cribl_control_plane/models/configgroupcloud.py +11 -1
  7. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  8. cribl_control_plane/models/cribllakedataset.py +11 -1
  9. cribl_control_plane/models/cribllakedatasetupdate.py +11 -1
  10. cribl_control_plane/models/datasetmetadata.py +11 -1
  11. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  12. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  13. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  15. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getsummaryop.py +11 -0
  17. cribl_control_plane/models/groupcreaterequest.py +20 -1
  18. cribl_control_plane/models/hbcriblinfo.py +11 -1
  19. cribl_control_plane/models/healthserverstatus.py +20 -1
  20. cribl_control_plane/models/input.py +15 -15
  21. cribl_control_plane/models/inputappscope.py +76 -17
  22. cribl_control_plane/models/inputazureblob.py +29 -1
  23. cribl_control_plane/models/inputcollection.py +20 -1
  24. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  25. cribl_control_plane/models/inputcribl.py +20 -1
  26. cribl_control_plane/models/inputcriblhttp.py +58 -17
  27. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  28. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  29. cribl_control_plane/models/inputcribltcp.py +58 -17
  30. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  31. cribl_control_plane/models/inputdatadogagent.py +58 -17
  32. cribl_control_plane/models/inputdatagen.py +20 -1
  33. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  34. cribl_control_plane/models/inputelastic.py +108 -27
  35. cribl_control_plane/models/inputeventhub.py +176 -1
  36. cribl_control_plane/models/inputexec.py +29 -1
  37. cribl_control_plane/models/inputfile.py +36 -3
  38. cribl_control_plane/models/inputfirehose.py +58 -17
  39. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  40. cribl_control_plane/models/inputgrafana.py +149 -32
  41. cribl_control_plane/models/inputhttp.py +58 -17
  42. cribl_control_plane/models/inputhttpraw.py +58 -17
  43. cribl_control_plane/models/inputjournalfiles.py +20 -1
  44. cribl_control_plane/models/inputkafka.py +182 -1
  45. cribl_control_plane/models/inputkinesis.py +65 -1
  46. cribl_control_plane/models/inputkubeevents.py +20 -1
  47. cribl_control_plane/models/inputkubelogs.py +29 -1
  48. cribl_control_plane/models/inputkubemetrics.py +29 -1
  49. cribl_control_plane/models/inputloki.py +67 -17
  50. cribl_control_plane/models/inputmetrics.py +58 -17
  51. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  52. cribl_control_plane/models/inputmsk.py +74 -1
  53. cribl_control_plane/models/inputnetflow.py +20 -1
  54. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  55. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  56. cribl_control_plane/models/inputoffice365service.py +56 -1
  57. cribl_control_plane/models/inputopentelemetry.py +84 -16
  58. cribl_control_plane/models/inputprometheus.py +131 -37
  59. cribl_control_plane/models/inputprometheusrw.py +67 -17
  60. cribl_control_plane/models/inputrawudp.py +20 -1
  61. cribl_control_plane/models/inputs3.py +38 -1
  62. cribl_control_plane/models/inputs3inventory.py +47 -1
  63. cribl_control_plane/models/inputsecuritylake.py +47 -1
  64. cribl_control_plane/models/inputsnmp.py +29 -1
  65. cribl_control_plane/models/inputsplunk.py +76 -17
  66. cribl_control_plane/models/inputsplunkhec.py +66 -16
  67. cribl_control_plane/models/inputsplunksearch.py +56 -1
  68. cribl_control_plane/models/inputsqs.py +47 -1
  69. cribl_control_plane/models/inputsyslog.py +113 -32
  70. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  71. cribl_control_plane/models/inputsystemstate.py +29 -1
  72. cribl_control_plane/models/inputtcp.py +77 -17
  73. cribl_control_plane/models/inputtcpjson.py +67 -17
  74. cribl_control_plane/models/inputwef.py +65 -1
  75. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  76. cribl_control_plane/models/inputwineventlogs.py +52 -1
  77. cribl_control_plane/models/inputwiz.py +38 -1
  78. cribl_control_plane/models/inputwizwebhook.py +58 -17
  79. cribl_control_plane/models/inputzscalerhec.py +66 -16
  80. cribl_control_plane/models/jobstatus.py +34 -3
  81. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  82. cribl_control_plane/models/masterworkerentry.py +11 -1
  83. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  84. cribl_control_plane/models/output.py +21 -21
  85. cribl_control_plane/models/outputazureblob.py +90 -1
  86. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  87. cribl_control_plane/models/outputazureeventhub.py +267 -22
  88. cribl_control_plane/models/outputazurelogs.py +105 -22
  89. cribl_control_plane/models/outputchronicle.py +105 -22
  90. cribl_control_plane/models/outputclickhouse.py +141 -22
  91. cribl_control_plane/models/outputcloudwatch.py +96 -22
  92. cribl_control_plane/models/outputconfluentcloud.py +290 -23
  93. cribl_control_plane/models/outputcriblhttp.py +123 -22
  94. cribl_control_plane/models/outputcribllake.py +76 -1
  95. cribl_control_plane/models/outputcribltcp.py +123 -22
  96. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  97. cribl_control_plane/models/outputdatabricks.py +72 -1
  98. cribl_control_plane/models/outputdatadog.py +132 -22
  99. cribl_control_plane/models/outputdataset.py +123 -22
  100. cribl_control_plane/models/outputdiskspool.py +11 -1
  101. cribl_control_plane/models/outputdls3.py +117 -1
  102. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  103. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  104. cribl_control_plane/models/outputelastic.py +148 -22
  105. cribl_control_plane/models/outputelasticcloud.py +130 -22
  106. cribl_control_plane/models/outputexabeam.py +47 -1
  107. cribl_control_plane/models/outputfilesystem.py +72 -1
  108. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  109. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  110. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  111. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  112. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  113. cribl_control_plane/models/outputgraphite.py +96 -22
  114. cribl_control_plane/models/outputhoneycomb.py +105 -22
  115. cribl_control_plane/models/outputhumiohec.py +114 -22
  116. cribl_control_plane/models/outputinfluxdb.py +114 -22
  117. cribl_control_plane/models/outputkafka.py +281 -20
  118. cribl_control_plane/models/outputkinesis.py +121 -22
  119. cribl_control_plane/models/outputloki.py +112 -20
  120. cribl_control_plane/models/outputminio.py +117 -1
  121. cribl_control_plane/models/outputmsk.py +173 -20
  122. cribl_control_plane/models/outputnewrelic.py +123 -22
  123. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  124. cribl_control_plane/models/outputopentelemetry.py +159 -22
  125. cribl_control_plane/models/outputprometheus.py +105 -22
  126. cribl_control_plane/models/outputring.py +29 -1
  127. cribl_control_plane/models/outputs3.py +117 -1
  128. cribl_control_plane/models/outputsecuritylake.py +85 -1
  129. cribl_control_plane/models/outputsentinel.py +123 -22
  130. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  131. cribl_control_plane/models/outputservicenow.py +150 -22
  132. cribl_control_plane/models/outputsignalfx.py +105 -22
  133. cribl_control_plane/models/outputsns.py +103 -20
  134. cribl_control_plane/models/outputsplunk.py +141 -22
  135. cribl_control_plane/models/outputsplunkhec.py +198 -22
  136. cribl_control_plane/models/outputsplunklb.py +170 -22
  137. cribl_control_plane/models/outputsqs.py +112 -20
  138. cribl_control_plane/models/outputstatsd.py +96 -22
  139. cribl_control_plane/models/outputstatsdext.py +96 -22
  140. cribl_control_plane/models/outputsumologic.py +105 -22
  141. cribl_control_plane/models/outputsyslog.py +238 -99
  142. cribl_control_plane/models/outputtcpjson.py +132 -22
  143. cribl_control_plane/models/outputwavefront.py +105 -22
  144. cribl_control_plane/models/outputwebhook.py +141 -22
  145. cribl_control_plane/models/outputxsiam.py +103 -20
  146. cribl_control_plane/models/resourcepolicy.py +11 -0
  147. cribl_control_plane/models/runnablejobcollection.py +68 -9
  148. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  149. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  150. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  151. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  152. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc7.dist-info}/METADATA +1 -1
  153. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc7.dist-info}/RECORD +154 -154
  154. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc7.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -97,6 +98,24 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
97
98
  pydantic.Field(alias="maxVersion"),
98
99
  ] = None
99
100
 
101
+ @field_serializer("min_version")
102
+ def serialize_min_version(self, value):
103
+ if isinstance(value, str):
104
+ try:
105
+ return models.OutputConfluentCloudMinimumTLSVersion(value)
106
+ except ValueError:
107
+ return value
108
+ return value
109
+
110
+ @field_serializer("max_version")
111
+ def serialize_max_version(self, value):
112
+ if isinstance(value, str):
113
+ try:
114
+ return models.OutputConfluentCloudMaximumTLSVersion(value)
115
+ except ValueError:
116
+ return value
117
+ return value
118
+
100
119
 
101
120
  class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
102
121
  r"""Control the number of required acknowledgments."""
@@ -238,6 +257,28 @@ class OutputConfluentCloudKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
238
257
  pydantic.Field(alias="maxVersion"),
239
258
  ] = None
240
259
 
260
+ @field_serializer("min_version")
261
+ def serialize_min_version(self, value):
262
+ if isinstance(value, str):
263
+ try:
264
+ return models.OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
265
+ value
266
+ )
267
+ except ValueError:
268
+ return value
269
+ return value
270
+
271
+ @field_serializer("max_version")
272
+ def serialize_max_version(self, value):
273
+ if isinstance(value, str):
274
+ try:
275
+ return models.OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
276
+ value
277
+ )
278
+ except ValueError:
279
+ return value
280
+ return value
281
+
241
282
 
242
283
  class OutputConfluentCloudKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
243
284
  disabled: NotRequired[bool]
@@ -297,6 +338,13 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
297
338
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
298
339
 
299
340
 
341
+ class OutputConfluentCloudAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
342
+ r"""Enter credentials directly, or select a stored secret"""
343
+
344
+ MANUAL = "manual"
345
+ SECRET = "secret"
346
+
347
+
300
348
  class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
301
349
  # PLAIN
302
350
  PLAIN = "plain"
@@ -308,13 +356,58 @@ class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta)
308
356
  KERBEROS = "kerberos"
309
357
 
310
358
 
359
+ class OutputConfluentCloudOauthParamTypedDict(TypedDict):
360
+ name: str
361
+ value: str
362
+
363
+
364
+ class OutputConfluentCloudOauthParam(BaseModel):
365
+ name: str
366
+
367
+ value: str
368
+
369
+
370
+ class OutputConfluentCloudSaslExtensionTypedDict(TypedDict):
371
+ name: str
372
+ value: str
373
+
374
+
375
+ class OutputConfluentCloudSaslExtension(BaseModel):
376
+ name: str
377
+
378
+ value: str
379
+
380
+
311
381
  class OutputConfluentCloudAuthenticationTypedDict(TypedDict):
312
382
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
313
383
 
314
384
  disabled: NotRequired[bool]
385
+ username: NotRequired[str]
386
+ password: NotRequired[str]
387
+ auth_type: NotRequired[OutputConfluentCloudAuthenticationMethod]
388
+ r"""Enter credentials directly, or select a stored secret"""
389
+ credentials_secret: NotRequired[str]
390
+ r"""Select or create a secret that references your credentials"""
315
391
  mechanism: NotRequired[OutputConfluentCloudSASLMechanism]
392
+ keytab_location: NotRequired[str]
393
+ r"""Location of keytab file for authentication principal"""
394
+ principal: NotRequired[str]
395
+ r"""Authentication principal, such as `kafka_user@example.com`"""
396
+ broker_service_class: NotRequired[str]
397
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
316
398
  oauth_enabled: NotRequired[bool]
317
399
  r"""Enable OAuth authentication"""
400
+ token_url: NotRequired[str]
401
+ r"""URL of the token endpoint to use for OAuth authentication"""
402
+ client_id: NotRequired[str]
403
+ r"""Client ID to use for OAuth authentication"""
404
+ oauth_secret_type: NotRequired[str]
405
+ client_text_secret: NotRequired[str]
406
+ r"""Select or create a stored text secret"""
407
+ oauth_params: NotRequired[List[OutputConfluentCloudOauthParamTypedDict]]
408
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
409
+ sasl_extensions: NotRequired[List[OutputConfluentCloudSaslExtensionTypedDict]]
410
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
318
411
 
319
412
 
320
413
  class OutputConfluentCloudAuthentication(BaseModel):
@@ -322,16 +415,92 @@ class OutputConfluentCloudAuthentication(BaseModel):
322
415
 
323
416
  disabled: Optional[bool] = True
324
417
 
418
+ username: Optional[str] = None
419
+
420
+ password: Optional[str] = None
421
+
422
+ auth_type: Annotated[
423
+ Annotated[
424
+ Optional[OutputConfluentCloudAuthenticationMethod],
425
+ PlainValidator(validate_open_enum(False)),
426
+ ],
427
+ pydantic.Field(alias="authType"),
428
+ ] = OutputConfluentCloudAuthenticationMethod.MANUAL
429
+ r"""Enter credentials directly, or select a stored secret"""
430
+
431
+ credentials_secret: Annotated[
432
+ Optional[str], pydantic.Field(alias="credentialsSecret")
433
+ ] = None
434
+ r"""Select or create a secret that references your credentials"""
435
+
325
436
  mechanism: Annotated[
326
437
  Optional[OutputConfluentCloudSASLMechanism],
327
438
  PlainValidator(validate_open_enum(False)),
328
439
  ] = OutputConfluentCloudSASLMechanism.PLAIN
329
440
 
441
+ keytab_location: Annotated[
442
+ Optional[str], pydantic.Field(alias="keytabLocation")
443
+ ] = None
444
+ r"""Location of keytab file for authentication principal"""
445
+
446
+ principal: Optional[str] = None
447
+ r"""Authentication principal, such as `kafka_user@example.com`"""
448
+
449
+ broker_service_class: Annotated[
450
+ Optional[str], pydantic.Field(alias="brokerServiceClass")
451
+ ] = None
452
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
453
+
330
454
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
331
455
  False
332
456
  )
333
457
  r"""Enable OAuth authentication"""
334
458
 
459
+ token_url: Annotated[Optional[str], pydantic.Field(alias="tokenUrl")] = None
460
+ r"""URL of the token endpoint to use for OAuth authentication"""
461
+
462
+ client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
463
+ r"""Client ID to use for OAuth authentication"""
464
+
465
+ oauth_secret_type: Annotated[
466
+ Optional[str], pydantic.Field(alias="oauthSecretType")
467
+ ] = "secret"
468
+
469
+ client_text_secret: Annotated[
470
+ Optional[str], pydantic.Field(alias="clientTextSecret")
471
+ ] = None
472
+ r"""Select or create a stored text secret"""
473
+
474
+ oauth_params: Annotated[
475
+ Optional[List[OutputConfluentCloudOauthParam]],
476
+ pydantic.Field(alias="oauthParams"),
477
+ ] = None
478
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
479
+
480
+ sasl_extensions: Annotated[
481
+ Optional[List[OutputConfluentCloudSaslExtension]],
482
+ pydantic.Field(alias="saslExtensions"),
483
+ ] = None
484
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
485
+
486
+ @field_serializer("auth_type")
487
+ def serialize_auth_type(self, value):
488
+ if isinstance(value, str):
489
+ try:
490
+ return models.OutputConfluentCloudAuthenticationMethod(value)
491
+ except ValueError:
492
+ return value
493
+ return value
494
+
495
+ @field_serializer("mechanism")
496
+ def serialize_mechanism(self, value):
497
+ if isinstance(value, str):
498
+ try:
499
+ return models.OutputConfluentCloudSASLMechanism(value)
500
+ except ValueError:
501
+ return value
502
+ return value
503
+
335
504
 
336
505
  class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
337
506
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -344,6 +513,17 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
344
513
  QUEUE = "queue"
345
514
 
346
515
 
516
+ class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
517
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
518
+
519
+ # Error
520
+ ERROR = "error"
521
+ # Backpressure
522
+ ALWAYS = "always"
523
+ # Always On
524
+ BACKPRESSURE = "backpressure"
525
+
526
+
347
527
  class OutputConfluentCloudPqCompressCompression(
348
528
  str, Enum, metaclass=utils.OpenEnumMeta
349
529
  ):
@@ -364,17 +544,6 @@ class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumM
364
544
  DROP = "drop"
365
545
 
366
546
 
367
- class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
368
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
369
-
370
- # Error
371
- ERROR = "error"
372
- # Backpressure
373
- BACKPRESSURE = "backpressure"
374
- # Always On
375
- ALWAYS = "always"
376
-
377
-
378
547
  class OutputConfluentCloudPqControlsTypedDict(TypedDict):
379
548
  pass
380
549
 
@@ -438,6 +607,18 @@ class OutputConfluentCloudTypedDict(TypedDict):
438
607
  description: NotRequired[str]
439
608
  protobuf_library_id: NotRequired[str]
440
609
  r"""Select a set of Protobuf definitions for the events you want to send"""
610
+ protobuf_encoding_id: NotRequired[str]
611
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
612
+ pq_strict_ordering: NotRequired[bool]
613
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
614
+ pq_rate_per_sec: NotRequired[float]
615
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
616
+ pq_mode: NotRequired[OutputConfluentCloudMode]
617
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
618
+ pq_max_buffer_size: NotRequired[float]
619
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
620
+ pq_max_backpressure_sec: NotRequired[float]
621
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
441
622
  pq_max_file_size: NotRequired[str]
442
623
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
443
624
  pq_max_size: NotRequired[str]
@@ -448,8 +629,6 @@ class OutputConfluentCloudTypedDict(TypedDict):
448
629
  r"""Codec to use to compress the persisted data"""
449
630
  pq_on_backpressure: NotRequired[OutputConfluentCloudQueueFullBehavior]
450
631
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
451
- pq_mode: NotRequired[OutputConfluentCloudMode]
452
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
453
632
  pq_controls: NotRequired[OutputConfluentCloudPqControlsTypedDict]
454
633
 
455
634
 
@@ -575,6 +754,40 @@ class OutputConfluentCloud(BaseModel):
575
754
  ] = None
576
755
  r"""Select a set of Protobuf definitions for the events you want to send"""
577
756
 
757
+ protobuf_encoding_id: Annotated[
758
+ Optional[str], pydantic.Field(alias="protobufEncodingId")
759
+ ] = None
760
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
761
+
762
+ pq_strict_ordering: Annotated[
763
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
764
+ ] = True
765
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
766
+
767
+ pq_rate_per_sec: Annotated[
768
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
769
+ ] = 0
770
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
771
+
772
+ pq_mode: Annotated[
773
+ Annotated[
774
+ Optional[OutputConfluentCloudMode],
775
+ PlainValidator(validate_open_enum(False)),
776
+ ],
777
+ pydantic.Field(alias="pqMode"),
778
+ ] = OutputConfluentCloudMode.ERROR
779
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
780
+
781
+ pq_max_buffer_size: Annotated[
782
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
783
+ ] = 42
784
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
785
+
786
+ pq_max_backpressure_sec: Annotated[
787
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
788
+ ] = 30
789
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
790
+
578
791
  pq_max_file_size: Annotated[
579
792
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
580
793
  ] = "1 MB"
@@ -606,15 +819,69 @@ class OutputConfluentCloud(BaseModel):
606
819
  ] = OutputConfluentCloudQueueFullBehavior.BLOCK
607
820
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
608
821
 
609
- pq_mode: Annotated[
610
- Annotated[
611
- Optional[OutputConfluentCloudMode],
612
- PlainValidator(validate_open_enum(False)),
613
- ],
614
- pydantic.Field(alias="pqMode"),
615
- ] = OutputConfluentCloudMode.ERROR
616
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
617
-
618
822
  pq_controls: Annotated[
619
823
  Optional[OutputConfluentCloudPqControls], pydantic.Field(alias="pqControls")
620
824
  ] = None
825
+
826
+ @field_serializer("ack")
827
+ def serialize_ack(self, value):
828
+ if isinstance(value, str):
829
+ try:
830
+ return models.OutputConfluentCloudAcknowledgments(value)
831
+ except ValueError:
832
+ return value
833
+ return value
834
+
835
+ @field_serializer("format_")
836
+ def serialize_format_(self, value):
837
+ if isinstance(value, str):
838
+ try:
839
+ return models.OutputConfluentCloudRecordDataFormat(value)
840
+ except ValueError:
841
+ return value
842
+ return value
843
+
844
+ @field_serializer("compression")
845
+ def serialize_compression(self, value):
846
+ if isinstance(value, str):
847
+ try:
848
+ return models.OutputConfluentCloudCompression(value)
849
+ except ValueError:
850
+ return value
851
+ return value
852
+
853
+ @field_serializer("on_backpressure")
854
+ def serialize_on_backpressure(self, value):
855
+ if isinstance(value, str):
856
+ try:
857
+ return models.OutputConfluentCloudBackpressureBehavior(value)
858
+ except ValueError:
859
+ return value
860
+ return value
861
+
862
+ @field_serializer("pq_mode")
863
+ def serialize_pq_mode(self, value):
864
+ if isinstance(value, str):
865
+ try:
866
+ return models.OutputConfluentCloudMode(value)
867
+ except ValueError:
868
+ return value
869
+ return value
870
+
871
+ @field_serializer("pq_compress")
872
+ def serialize_pq_compress(self, value):
873
+ if isinstance(value, str):
874
+ try:
875
+ return models.OutputConfluentCloudPqCompressCompression(value)
876
+ except ValueError:
877
+ return value
878
+ return value
879
+
880
+ @field_serializer("pq_on_backpressure")
881
+ def serialize_pq_on_backpressure(self, value):
882
+ if isinstance(value, str):
883
+ try:
884
+ return models.OutputConfluentCloudQueueFullBehavior(value)
885
+ except ValueError:
886
+ return value
887
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -97,6 +98,24 @@ class OutputCriblHTTPTLSSettingsClientSide(BaseModel):
97
98
  pydantic.Field(alias="maxVersion"),
98
99
  ] = None
99
100
 
101
+ @field_serializer("min_version")
102
+ def serialize_min_version(self, value):
103
+ if isinstance(value, str):
104
+ try:
105
+ return models.OutputCriblHTTPMinimumTLSVersion(value)
106
+ except ValueError:
107
+ return value
108
+ return value
109
+
110
+ @field_serializer("max_version")
111
+ def serialize_max_version(self, value):
112
+ if isinstance(value, str):
113
+ try:
114
+ return models.OutputCriblHTTPMaximumTLSVersion(value)
115
+ except ValueError:
116
+ return value
117
+ return value
118
+
100
119
 
101
120
  class OutputCriblHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
102
121
  r"""Codec to use to compress the data before sending"""
@@ -209,6 +228,17 @@ class OutputCriblHTTPURL(BaseModel):
209
228
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
210
229
 
211
230
 
231
+ class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
232
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
233
+
234
+ # Error
235
+ ERROR = "error"
236
+ # Backpressure
237
+ ALWAYS = "always"
238
+ # Always On
239
+ BACKPRESSURE = "backpressure"
240
+
241
+
212
242
  class OutputCriblHTTPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
213
243
  r"""Codec to use to compress the persisted data"""
214
244
 
@@ -227,17 +257,6 @@ class OutputCriblHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
227
257
  DROP = "drop"
228
258
 
229
259
 
230
- class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
231
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
232
-
233
- # Error
234
- ERROR = "error"
235
- # Backpressure
236
- BACKPRESSURE = "backpressure"
237
- # Always On
238
- ALWAYS = "always"
239
-
240
-
241
260
  class OutputCriblHTTPPqControlsTypedDict(TypedDict):
242
261
  pass
243
262
 
@@ -309,6 +328,16 @@ class OutputCriblHTTPTypedDict(TypedDict):
309
328
  r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
310
329
  load_balance_stats_period_sec: NotRequired[float]
311
330
  r"""How far back in time to keep traffic stats for load balancing purposes"""
331
+ pq_strict_ordering: NotRequired[bool]
332
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
333
+ pq_rate_per_sec: NotRequired[float]
334
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
335
+ pq_mode: NotRequired[OutputCriblHTTPMode]
336
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
337
+ pq_max_buffer_size: NotRequired[float]
338
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
339
+ pq_max_backpressure_sec: NotRequired[float]
340
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
312
341
  pq_max_file_size: NotRequired[str]
313
342
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
314
343
  pq_max_size: NotRequired[str]
@@ -319,8 +348,6 @@ class OutputCriblHTTPTypedDict(TypedDict):
319
348
  r"""Codec to use to compress the persisted data"""
320
349
  pq_on_backpressure: NotRequired[OutputCriblHTTPQueueFullBehavior]
321
350
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
322
- pq_mode: NotRequired[OutputCriblHTTPMode]
323
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
324
351
  pq_controls: NotRequired[OutputCriblHTTPPqControlsTypedDict]
325
352
 
326
353
 
@@ -465,6 +492,34 @@ class OutputCriblHTTP(BaseModel):
465
492
  ] = 300
466
493
  r"""How far back in time to keep traffic stats for load balancing purposes"""
467
494
 
495
+ pq_strict_ordering: Annotated[
496
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
497
+ ] = True
498
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
499
+
500
+ pq_rate_per_sec: Annotated[
501
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
502
+ ] = 0
503
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
504
+
505
+ pq_mode: Annotated[
506
+ Annotated[
507
+ Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
508
+ ],
509
+ pydantic.Field(alias="pqMode"),
510
+ ] = OutputCriblHTTPMode.ERROR
511
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
512
+
513
+ pq_max_buffer_size: Annotated[
514
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
515
+ ] = 42
516
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
517
+
518
+ pq_max_backpressure_sec: Annotated[
519
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
520
+ ] = 30
521
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
522
+
468
523
  pq_max_file_size: Annotated[
469
524
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
470
525
  ] = "1 MB"
@@ -496,14 +551,60 @@ class OutputCriblHTTP(BaseModel):
496
551
  ] = OutputCriblHTTPQueueFullBehavior.BLOCK
497
552
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
498
553
 
499
- pq_mode: Annotated[
500
- Annotated[
501
- Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
502
- ],
503
- pydantic.Field(alias="pqMode"),
504
- ] = OutputCriblHTTPMode.ERROR
505
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
506
-
507
554
  pq_controls: Annotated[
508
555
  Optional[OutputCriblHTTPPqControls], pydantic.Field(alias="pqControls")
509
556
  ] = None
557
+
558
+ @field_serializer("compression")
559
+ def serialize_compression(self, value):
560
+ if isinstance(value, str):
561
+ try:
562
+ return models.OutputCriblHTTPCompression(value)
563
+ except ValueError:
564
+ return value
565
+ return value
566
+
567
+ @field_serializer("failed_request_logging_mode")
568
+ def serialize_failed_request_logging_mode(self, value):
569
+ if isinstance(value, str):
570
+ try:
571
+ return models.OutputCriblHTTPFailedRequestLoggingMode(value)
572
+ except ValueError:
573
+ return value
574
+ return value
575
+
576
+ @field_serializer("on_backpressure")
577
+ def serialize_on_backpressure(self, value):
578
+ if isinstance(value, str):
579
+ try:
580
+ return models.OutputCriblHTTPBackpressureBehavior(value)
581
+ except ValueError:
582
+ return value
583
+ return value
584
+
585
+ @field_serializer("pq_mode")
586
+ def serialize_pq_mode(self, value):
587
+ if isinstance(value, str):
588
+ try:
589
+ return models.OutputCriblHTTPMode(value)
590
+ except ValueError:
591
+ return value
592
+ return value
593
+
594
+ @field_serializer("pq_compress")
595
+ def serialize_pq_compress(self, value):
596
+ if isinstance(value, str):
597
+ try:
598
+ return models.OutputCriblHTTPPqCompressCompression(value)
599
+ except ValueError:
600
+ return value
601
+ return value
602
+
603
+ @field_serializer("pq_on_backpressure")
604
+ def serialize_pq_on_backpressure(self, value):
605
+ if isinstance(value, str):
606
+ try:
607
+ return models.OutputCriblHTTPQueueFullBehavior(value)
608
+ except ValueError:
609
+ return value
610
+ return value