cribl-control-plane 0.2.1rc3__py3-none-any.whl → 0.2.1rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (102) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +0 -3
  3. cribl_control_plane/models/__init__.py +105 -28
  4. cribl_control_plane/models/input.py +14 -14
  5. cribl_control_plane/models/inputappscope.py +20 -16
  6. cribl_control_plane/models/inputconfluentcloud.py +110 -0
  7. cribl_control_plane/models/inputcriblhttp.py +20 -16
  8. cribl_control_plane/models/inputcribllakehttp.py +20 -16
  9. cribl_control_plane/models/inputcribltcp.py +20 -16
  10. cribl_control_plane/models/inputdatadogagent.py +20 -16
  11. cribl_control_plane/models/inputedgeprometheus.py +44 -36
  12. cribl_control_plane/models/inputelastic.py +44 -27
  13. cribl_control_plane/models/inputeventhub.py +118 -0
  14. cribl_control_plane/models/inputfirehose.py +20 -16
  15. cribl_control_plane/models/inputgrafana.py +39 -31
  16. cribl_control_plane/models/inputhttp.py +20 -16
  17. cribl_control_plane/models/inputhttpraw.py +20 -16
  18. cribl_control_plane/models/inputkafka.py +108 -0
  19. cribl_control_plane/models/inputloki.py +20 -16
  20. cribl_control_plane/models/inputmetrics.py +20 -16
  21. cribl_control_plane/models/inputmodeldriventelemetry.py +20 -16
  22. cribl_control_plane/models/inputopentelemetry.py +19 -15
  23. cribl_control_plane/models/inputprometheus.py +44 -36
  24. cribl_control_plane/models/inputprometheusrw.py +20 -16
  25. cribl_control_plane/models/inputsplunk.py +20 -16
  26. cribl_control_plane/models/inputsplunkhec.py +19 -15
  27. cribl_control_plane/models/inputsyslog.py +39 -31
  28. cribl_control_plane/models/inputsystemmetrics.py +20 -10
  29. cribl_control_plane/models/inputtcp.py +30 -16
  30. cribl_control_plane/models/inputtcpjson.py +20 -16
  31. cribl_control_plane/models/inputwindowsmetrics.py +20 -10
  32. cribl_control_plane/models/inputwineventlogs.py +14 -0
  33. cribl_control_plane/models/inputwizwebhook.py +20 -16
  34. cribl_control_plane/models/inputzscalerhec.py +19 -15
  35. cribl_control_plane/models/output.py +22 -22
  36. cribl_control_plane/models/outputazureblob.py +7 -0
  37. cribl_control_plane/models/outputazuredataexplorer.py +283 -93
  38. cribl_control_plane/models/outputazureeventhub.py +169 -21
  39. cribl_control_plane/models/outputazurelogs.py +49 -21
  40. cribl_control_plane/models/outputchronicle.py +49 -21
  41. cribl_control_plane/models/outputclickhouse.py +49 -21
  42. cribl_control_plane/models/outputcloudwatch.py +49 -21
  43. cribl_control_plane/models/outputconfluentcloud.py +167 -22
  44. cribl_control_plane/models/outputcriblhttp.py +49 -21
  45. cribl_control_plane/models/outputcribltcp.py +49 -21
  46. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -22
  47. cribl_control_plane/models/outputdatabricks.py +7 -0
  48. cribl_control_plane/models/outputdatadog.py +49 -21
  49. cribl_control_plane/models/outputdataset.py +49 -21
  50. cribl_control_plane/models/outputdls3.py +7 -0
  51. cribl_control_plane/models/outputdynatracehttp.py +49 -21
  52. cribl_control_plane/models/outputdynatraceotlp.py +49 -21
  53. cribl_control_plane/models/outputelastic.py +74 -21
  54. cribl_control_plane/models/outputelasticcloud.py +74 -21
  55. cribl_control_plane/models/outputfilesystem.py +7 -0
  56. cribl_control_plane/models/outputgooglechronicle.py +65 -22
  57. cribl_control_plane/models/outputgooglecloudlogging.py +50 -22
  58. cribl_control_plane/models/outputgooglecloudstorage.py +7 -0
  59. cribl_control_plane/models/outputgooglepubsub.py +49 -21
  60. cribl_control_plane/models/outputgrafanacloud.py +98 -42
  61. cribl_control_plane/models/outputgraphite.py +49 -21
  62. cribl_control_plane/models/outputhoneycomb.py +49 -21
  63. cribl_control_plane/models/outputhumiohec.py +49 -21
  64. cribl_control_plane/models/outputinfluxdb.py +49 -21
  65. cribl_control_plane/models/outputkafka.py +162 -19
  66. cribl_control_plane/models/outputkinesis.py +56 -21
  67. cribl_control_plane/models/outputloki.py +47 -19
  68. cribl_control_plane/models/outputminio.py +7 -0
  69. cribl_control_plane/models/outputmsk.py +54 -19
  70. cribl_control_plane/models/outputnewrelic.py +49 -21
  71. cribl_control_plane/models/outputnewrelicevents.py +50 -22
  72. cribl_control_plane/models/outputopentelemetry.py +49 -21
  73. cribl_control_plane/models/outputprometheus.py +49 -21
  74. cribl_control_plane/models/outputs3.py +7 -0
  75. cribl_control_plane/models/outputsentinel.py +49 -21
  76. cribl_control_plane/models/outputsentineloneaisiem.py +50 -22
  77. cribl_control_plane/models/outputservicenow.py +49 -21
  78. cribl_control_plane/models/outputsignalfx.py +49 -21
  79. cribl_control_plane/models/outputsns.py +47 -19
  80. cribl_control_plane/models/outputsplunk.py +49 -21
  81. cribl_control_plane/models/outputsplunkhec.py +49 -21
  82. cribl_control_plane/models/outputsplunklb.py +49 -21
  83. cribl_control_plane/models/outputsqs.py +47 -19
  84. cribl_control_plane/models/outputstatsd.py +49 -21
  85. cribl_control_plane/models/outputstatsdext.py +49 -21
  86. cribl_control_plane/models/outputsumologic.py +49 -21
  87. cribl_control_plane/models/outputsyslog.py +129 -99
  88. cribl_control_plane/models/outputtcpjson.py +49 -21
  89. cribl_control_plane/models/outputwavefront.py +49 -21
  90. cribl_control_plane/models/outputwebhook.py +49 -21
  91. cribl_control_plane/models/outputxsiam.py +47 -19
  92. cribl_control_plane/models/pipeline.py +4 -4
  93. cribl_control_plane/models/runnablejobcollection.py +12 -8
  94. cribl_control_plane/models/runnablejobexecutor.py +12 -8
  95. cribl_control_plane/models/runnablejobscheduledsearch.py +12 -8
  96. cribl_control_plane/pipelines.py +8 -8
  97. {cribl_control_plane-0.2.1rc3.dist-info → cribl_control_plane-0.2.1rc4.dist-info}/METADATA +2 -6
  98. {cribl_control_plane-0.2.1rc3.dist-info → cribl_control_plane-0.2.1rc4.dist-info}/RECORD +99 -102
  99. cribl_control_plane/mappings.py +0 -205
  100. cribl_control_plane/models/createadminproductsmappingsactivatebyproductop.py +0 -52
  101. cribl_control_plane/models/rulesetid.py +0 -13
  102. {cribl_control_plane-0.2.1rc3.dist-info → cribl_control_plane-0.2.1rc4.dist-info}/WHEEL +0 -0
@@ -37,6 +37,17 @@ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMe
37
37
  QUEUE = "queue"
38
38
 
39
39
 
40
+ class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
41
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
42
+
43
+ # Error
44
+ ERROR = "error"
45
+ # Backpressure
46
+ ALWAYS = "always"
47
+ # Always On
48
+ BACKPRESSURE = "backpressure"
49
+
50
+
40
51
  class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
41
52
  r"""Codec to use to compress the persisted data"""
42
53
 
@@ -55,17 +66,6 @@ class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
55
66
  DROP = "drop"
56
67
 
57
68
 
58
- class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
59
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
60
-
61
- # Error
62
- ERROR = "error"
63
- # Backpressure
64
- BACKPRESSURE = "backpressure"
65
- # Always On
66
- ALWAYS = "always"
67
-
68
-
69
69
  class OutputCloudwatchPqControlsTypedDict(TypedDict):
70
70
  pass
71
71
 
@@ -121,6 +121,16 @@ class OutputCloudwatchTypedDict(TypedDict):
121
121
  aws_api_key: NotRequired[str]
122
122
  aws_secret: NotRequired[str]
123
123
  r"""Select or create a stored secret that references your access key and secret key"""
124
+ pq_strict_ordering: NotRequired[bool]
125
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
126
+ pq_rate_per_sec: NotRequired[float]
127
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
128
+ pq_mode: NotRequired[OutputCloudwatchMode]
129
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
130
+ pq_max_buffer_size: NotRequired[float]
131
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
132
+ pq_max_backpressure_sec: NotRequired[float]
133
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
124
134
  pq_max_file_size: NotRequired[str]
125
135
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
126
136
  pq_max_size: NotRequired[str]
@@ -131,8 +141,6 @@ class OutputCloudwatchTypedDict(TypedDict):
131
141
  r"""Codec to use to compress the persisted data"""
132
142
  pq_on_backpressure: NotRequired[OutputCloudwatchQueueFullBehavior]
133
143
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
134
- pq_mode: NotRequired[OutputCloudwatchMode]
135
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
136
144
  pq_controls: NotRequired[OutputCloudwatchPqControlsTypedDict]
137
145
 
138
146
 
@@ -240,6 +248,34 @@ class OutputCloudwatch(BaseModel):
240
248
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
241
249
  r"""Select or create a stored secret that references your access key and secret key"""
242
250
 
251
+ pq_strict_ordering: Annotated[
252
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
253
+ ] = True
254
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
255
+
256
+ pq_rate_per_sec: Annotated[
257
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
258
+ ] = 0
259
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
260
+
261
+ pq_mode: Annotated[
262
+ Annotated[
263
+ Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
264
+ ],
265
+ pydantic.Field(alias="pqMode"),
266
+ ] = OutputCloudwatchMode.ERROR
267
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
268
+
269
+ pq_max_buffer_size: Annotated[
270
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
271
+ ] = 42
272
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
273
+
274
+ pq_max_backpressure_sec: Annotated[
275
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
276
+ ] = 30
277
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
278
+
243
279
  pq_max_file_size: Annotated[
244
280
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
245
281
  ] = "1 MB"
@@ -271,14 +307,6 @@ class OutputCloudwatch(BaseModel):
271
307
  ] = OutputCloudwatchQueueFullBehavior.BLOCK
272
308
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
273
309
 
274
- pq_mode: Annotated[
275
- Annotated[
276
- Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
277
- ],
278
- pydantic.Field(alias="pqMode"),
279
- ] = OutputCloudwatchMode.ERROR
280
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
281
-
282
310
  pq_controls: Annotated[
283
311
  Optional[OutputCloudwatchPqControls], pydantic.Field(alias="pqControls")
284
312
  ] = None
@@ -297,6 +297,13 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
297
297
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
298
298
 
299
299
 
300
+ class OutputConfluentCloudAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
301
+ r"""Enter credentials directly, or select a stored secret"""
302
+
303
+ MANUAL = "manual"
304
+ SECRET = "secret"
305
+
306
+
300
307
  class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
301
308
  # PLAIN
302
309
  PLAIN = "plain"
@@ -308,13 +315,58 @@ class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta)
308
315
  KERBEROS = "kerberos"
309
316
 
310
317
 
318
+ class OutputConfluentCloudOauthParamTypedDict(TypedDict):
319
+ name: str
320
+ value: str
321
+
322
+
323
+ class OutputConfluentCloudOauthParam(BaseModel):
324
+ name: str
325
+
326
+ value: str
327
+
328
+
329
+ class OutputConfluentCloudSaslExtensionTypedDict(TypedDict):
330
+ name: str
331
+ value: str
332
+
333
+
334
+ class OutputConfluentCloudSaslExtension(BaseModel):
335
+ name: str
336
+
337
+ value: str
338
+
339
+
311
340
  class OutputConfluentCloudAuthenticationTypedDict(TypedDict):
312
341
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
313
342
 
314
343
  disabled: NotRequired[bool]
344
+ username: NotRequired[str]
345
+ password: NotRequired[str]
346
+ auth_type: NotRequired[OutputConfluentCloudAuthenticationMethod]
347
+ r"""Enter credentials directly, or select a stored secret"""
348
+ credentials_secret: NotRequired[str]
349
+ r"""Select or create a secret that references your credentials"""
315
350
  mechanism: NotRequired[OutputConfluentCloudSASLMechanism]
351
+ keytab_location: NotRequired[str]
352
+ r"""Location of keytab file for authentication principal"""
353
+ principal: NotRequired[str]
354
+ r"""Authentication principal, such as `kafka_user@example.com`"""
355
+ broker_service_class: NotRequired[str]
356
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
316
357
  oauth_enabled: NotRequired[bool]
317
358
  r"""Enable OAuth authentication"""
359
+ token_url: NotRequired[str]
360
+ r"""URL of the token endpoint to use for OAuth authentication"""
361
+ client_id: NotRequired[str]
362
+ r"""Client ID to use for OAuth authentication"""
363
+ oauth_secret_type: NotRequired[str]
364
+ client_text_secret: NotRequired[str]
365
+ r"""Select or create a stored text secret"""
366
+ oauth_params: NotRequired[List[OutputConfluentCloudOauthParamTypedDict]]
367
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
368
+ sasl_extensions: NotRequired[List[OutputConfluentCloudSaslExtensionTypedDict]]
369
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
318
370
 
319
371
 
320
372
  class OutputConfluentCloudAuthentication(BaseModel):
@@ -322,16 +374,74 @@ class OutputConfluentCloudAuthentication(BaseModel):
322
374
 
323
375
  disabled: Optional[bool] = True
324
376
 
377
+ username: Optional[str] = None
378
+
379
+ password: Optional[str] = None
380
+
381
+ auth_type: Annotated[
382
+ Annotated[
383
+ Optional[OutputConfluentCloudAuthenticationMethod],
384
+ PlainValidator(validate_open_enum(False)),
385
+ ],
386
+ pydantic.Field(alias="authType"),
387
+ ] = OutputConfluentCloudAuthenticationMethod.MANUAL
388
+ r"""Enter credentials directly, or select a stored secret"""
389
+
390
+ credentials_secret: Annotated[
391
+ Optional[str], pydantic.Field(alias="credentialsSecret")
392
+ ] = None
393
+ r"""Select or create a secret that references your credentials"""
394
+
325
395
  mechanism: Annotated[
326
396
  Optional[OutputConfluentCloudSASLMechanism],
327
397
  PlainValidator(validate_open_enum(False)),
328
398
  ] = OutputConfluentCloudSASLMechanism.PLAIN
329
399
 
400
+ keytab_location: Annotated[
401
+ Optional[str], pydantic.Field(alias="keytabLocation")
402
+ ] = None
403
+ r"""Location of keytab file for authentication principal"""
404
+
405
+ principal: Optional[str] = None
406
+ r"""Authentication principal, such as `kafka_user@example.com`"""
407
+
408
+ broker_service_class: Annotated[
409
+ Optional[str], pydantic.Field(alias="brokerServiceClass")
410
+ ] = None
411
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
412
+
330
413
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
331
414
  False
332
415
  )
333
416
  r"""Enable OAuth authentication"""
334
417
 
418
+ token_url: Annotated[Optional[str], pydantic.Field(alias="tokenUrl")] = None
419
+ r"""URL of the token endpoint to use for OAuth authentication"""
420
+
421
+ client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
422
+ r"""Client ID to use for OAuth authentication"""
423
+
424
+ oauth_secret_type: Annotated[
425
+ Optional[str], pydantic.Field(alias="oauthSecretType")
426
+ ] = "secret"
427
+
428
+ client_text_secret: Annotated[
429
+ Optional[str], pydantic.Field(alias="clientTextSecret")
430
+ ] = None
431
+ r"""Select or create a stored text secret"""
432
+
433
+ oauth_params: Annotated[
434
+ Optional[List[OutputConfluentCloudOauthParam]],
435
+ pydantic.Field(alias="oauthParams"),
436
+ ] = None
437
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
438
+
439
+ sasl_extensions: Annotated[
440
+ Optional[List[OutputConfluentCloudSaslExtension]],
441
+ pydantic.Field(alias="saslExtensions"),
442
+ ] = None
443
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
444
+
335
445
 
336
446
  class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
337
447
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -344,6 +454,17 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
344
454
  QUEUE = "queue"
345
455
 
346
456
 
457
+ class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
458
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
459
+
460
+ # Error
461
+ ERROR = "error"
462
+ # Backpressure
463
+ ALWAYS = "always"
464
+ # Always On
465
+ BACKPRESSURE = "backpressure"
466
+
467
+
347
468
  class OutputConfluentCloudPqCompressCompression(
348
469
  str, Enum, metaclass=utils.OpenEnumMeta
349
470
  ):
@@ -364,17 +485,6 @@ class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumM
364
485
  DROP = "drop"
365
486
 
366
487
 
367
- class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
368
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
369
-
370
- # Error
371
- ERROR = "error"
372
- # Backpressure
373
- BACKPRESSURE = "backpressure"
374
- # Always On
375
- ALWAYS = "always"
376
-
377
-
378
488
  class OutputConfluentCloudPqControlsTypedDict(TypedDict):
379
489
  pass
380
490
 
@@ -438,6 +548,18 @@ class OutputConfluentCloudTypedDict(TypedDict):
438
548
  description: NotRequired[str]
439
549
  protobuf_library_id: NotRequired[str]
440
550
  r"""Select a set of Protobuf definitions for the events you want to send"""
551
+ protobuf_encoding_id: NotRequired[str]
552
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
553
+ pq_strict_ordering: NotRequired[bool]
554
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
555
+ pq_rate_per_sec: NotRequired[float]
556
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
557
+ pq_mode: NotRequired[OutputConfluentCloudMode]
558
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
559
+ pq_max_buffer_size: NotRequired[float]
560
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
561
+ pq_max_backpressure_sec: NotRequired[float]
562
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
441
563
  pq_max_file_size: NotRequired[str]
442
564
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
443
565
  pq_max_size: NotRequired[str]
@@ -448,8 +570,6 @@ class OutputConfluentCloudTypedDict(TypedDict):
448
570
  r"""Codec to use to compress the persisted data"""
449
571
  pq_on_backpressure: NotRequired[OutputConfluentCloudQueueFullBehavior]
450
572
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
451
- pq_mode: NotRequired[OutputConfluentCloudMode]
452
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
453
573
  pq_controls: NotRequired[OutputConfluentCloudPqControlsTypedDict]
454
574
 
455
575
 
@@ -575,6 +695,40 @@ class OutputConfluentCloud(BaseModel):
575
695
  ] = None
576
696
  r"""Select a set of Protobuf definitions for the events you want to send"""
577
697
 
698
+ protobuf_encoding_id: Annotated[
699
+ Optional[str], pydantic.Field(alias="protobufEncodingId")
700
+ ] = None
701
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
702
+
703
+ pq_strict_ordering: Annotated[
704
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
705
+ ] = True
706
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
707
+
708
+ pq_rate_per_sec: Annotated[
709
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
710
+ ] = 0
711
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
712
+
713
+ pq_mode: Annotated[
714
+ Annotated[
715
+ Optional[OutputConfluentCloudMode],
716
+ PlainValidator(validate_open_enum(False)),
717
+ ],
718
+ pydantic.Field(alias="pqMode"),
719
+ ] = OutputConfluentCloudMode.ERROR
720
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
721
+
722
+ pq_max_buffer_size: Annotated[
723
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
724
+ ] = 42
725
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
726
+
727
+ pq_max_backpressure_sec: Annotated[
728
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
729
+ ] = 30
730
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
731
+
578
732
  pq_max_file_size: Annotated[
579
733
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
580
734
  ] = "1 MB"
@@ -606,15 +760,6 @@ class OutputConfluentCloud(BaseModel):
606
760
  ] = OutputConfluentCloudQueueFullBehavior.BLOCK
607
761
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
608
762
 
609
- pq_mode: Annotated[
610
- Annotated[
611
- Optional[OutputConfluentCloudMode],
612
- PlainValidator(validate_open_enum(False)),
613
- ],
614
- pydantic.Field(alias="pqMode"),
615
- ] = OutputConfluentCloudMode.ERROR
616
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
617
-
618
763
  pq_controls: Annotated[
619
764
  Optional[OutputConfluentCloudPqControls], pydantic.Field(alias="pqControls")
620
765
  ] = None
@@ -209,6 +209,17 @@ class OutputCriblHTTPURL(BaseModel):
209
209
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
210
210
 
211
211
 
212
+ class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
213
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
214
+
215
+ # Error
216
+ ERROR = "error"
217
+ # Backpressure
218
+ ALWAYS = "always"
219
+ # Always On
220
+ BACKPRESSURE = "backpressure"
221
+
222
+
212
223
  class OutputCriblHTTPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
213
224
  r"""Codec to use to compress the persisted data"""
214
225
 
@@ -227,17 +238,6 @@ class OutputCriblHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
227
238
  DROP = "drop"
228
239
 
229
240
 
230
- class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
231
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
232
-
233
- # Error
234
- ERROR = "error"
235
- # Backpressure
236
- BACKPRESSURE = "backpressure"
237
- # Always On
238
- ALWAYS = "always"
239
-
240
-
241
241
  class OutputCriblHTTPPqControlsTypedDict(TypedDict):
242
242
  pass
243
243
 
@@ -309,6 +309,16 @@ class OutputCriblHTTPTypedDict(TypedDict):
309
309
  r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
310
310
  load_balance_stats_period_sec: NotRequired[float]
311
311
  r"""How far back in time to keep traffic stats for load balancing purposes"""
312
+ pq_strict_ordering: NotRequired[bool]
313
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
314
+ pq_rate_per_sec: NotRequired[float]
315
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
316
+ pq_mode: NotRequired[OutputCriblHTTPMode]
317
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
318
+ pq_max_buffer_size: NotRequired[float]
319
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
320
+ pq_max_backpressure_sec: NotRequired[float]
321
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
312
322
  pq_max_file_size: NotRequired[str]
313
323
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
314
324
  pq_max_size: NotRequired[str]
@@ -319,8 +329,6 @@ class OutputCriblHTTPTypedDict(TypedDict):
319
329
  r"""Codec to use to compress the persisted data"""
320
330
  pq_on_backpressure: NotRequired[OutputCriblHTTPQueueFullBehavior]
321
331
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
322
- pq_mode: NotRequired[OutputCriblHTTPMode]
323
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
324
332
  pq_controls: NotRequired[OutputCriblHTTPPqControlsTypedDict]
325
333
 
326
334
 
@@ -465,6 +473,34 @@ class OutputCriblHTTP(BaseModel):
465
473
  ] = 300
466
474
  r"""How far back in time to keep traffic stats for load balancing purposes"""
467
475
 
476
+ pq_strict_ordering: Annotated[
477
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
478
+ ] = True
479
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
480
+
481
+ pq_rate_per_sec: Annotated[
482
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
483
+ ] = 0
484
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
485
+
486
+ pq_mode: Annotated[
487
+ Annotated[
488
+ Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
489
+ ],
490
+ pydantic.Field(alias="pqMode"),
491
+ ] = OutputCriblHTTPMode.ERROR
492
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
493
+
494
+ pq_max_buffer_size: Annotated[
495
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
496
+ ] = 42
497
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
498
+
499
+ pq_max_backpressure_sec: Annotated[
500
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
501
+ ] = 30
502
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
503
+
468
504
  pq_max_file_size: Annotated[
469
505
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
470
506
  ] = "1 MB"
@@ -496,14 +532,6 @@ class OutputCriblHTTP(BaseModel):
496
532
  ] = OutputCriblHTTPQueueFullBehavior.BLOCK
497
533
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
498
534
 
499
- pq_mode: Annotated[
500
- Annotated[
501
- Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
502
- ],
503
- pydantic.Field(alias="pqMode"),
504
- ] = OutputCriblHTTPMode.ERROR
505
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
506
-
507
535
  pq_controls: Annotated[
508
536
  Optional[OutputCriblHTTPPqControls], pydantic.Field(alias="pqControls")
509
537
  ] = None
@@ -157,6 +157,17 @@ class OutputCriblTCPHost(BaseModel):
157
157
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
158
158
 
159
159
 
160
+ class OutputCriblTCPMode(str, Enum, metaclass=utils.OpenEnumMeta):
161
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
162
+
163
+ # Error
164
+ ERROR = "error"
165
+ # Backpressure
166
+ ALWAYS = "always"
167
+ # Always On
168
+ BACKPRESSURE = "backpressure"
169
+
170
+
160
171
  class OutputCriblTCPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
161
172
  r"""Codec to use to compress the persisted data"""
162
173
 
@@ -175,17 +186,6 @@ class OutputCriblTCPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
175
186
  DROP = "drop"
176
187
 
177
188
 
178
- class OutputCriblTCPMode(str, Enum, metaclass=utils.OpenEnumMeta):
179
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
180
-
181
- # Error
182
- ERROR = "error"
183
- # Backpressure
184
- BACKPRESSURE = "backpressure"
185
- # Always On
186
- ALWAYS = "always"
187
-
188
-
189
189
  class OutputCriblTCPPqControlsTypedDict(TypedDict):
190
190
  pass
191
191
 
@@ -240,6 +240,16 @@ class OutputCriblTCPTypedDict(TypedDict):
240
240
  r"""How far back in time to keep traffic stats for load balancing purposes"""
241
241
  max_concurrent_senders: NotRequired[float]
242
242
  r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
243
+ pq_strict_ordering: NotRequired[bool]
244
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
245
+ pq_rate_per_sec: NotRequired[float]
246
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
247
+ pq_mode: NotRequired[OutputCriblTCPMode]
248
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
249
+ pq_max_buffer_size: NotRequired[float]
250
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
251
+ pq_max_backpressure_sec: NotRequired[float]
252
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
243
253
  pq_max_file_size: NotRequired[str]
244
254
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
245
255
  pq_max_size: NotRequired[str]
@@ -250,8 +260,6 @@ class OutputCriblTCPTypedDict(TypedDict):
250
260
  r"""Codec to use to compress the persisted data"""
251
261
  pq_on_backpressure: NotRequired[OutputCriblTCPQueueFullBehavior]
252
262
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
253
- pq_mode: NotRequired[OutputCriblTCPMode]
254
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
255
263
  pq_controls: NotRequired[OutputCriblTCPPqControlsTypedDict]
256
264
 
257
265
 
@@ -355,6 +363,34 @@ class OutputCriblTCP(BaseModel):
355
363
  ] = 0
356
364
  r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
357
365
 
366
+ pq_strict_ordering: Annotated[
367
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
368
+ ] = True
369
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
370
+
371
+ pq_rate_per_sec: Annotated[
372
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
373
+ ] = 0
374
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
375
+
376
+ pq_mode: Annotated[
377
+ Annotated[
378
+ Optional[OutputCriblTCPMode], PlainValidator(validate_open_enum(False))
379
+ ],
380
+ pydantic.Field(alias="pqMode"),
381
+ ] = OutputCriblTCPMode.ERROR
382
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
383
+
384
+ pq_max_buffer_size: Annotated[
385
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
386
+ ] = 42
387
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
388
+
389
+ pq_max_backpressure_sec: Annotated[
390
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
391
+ ] = 30
392
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
393
+
358
394
  pq_max_file_size: Annotated[
359
395
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
360
396
  ] = "1 MB"
@@ -386,14 +422,6 @@ class OutputCriblTCP(BaseModel):
386
422
  ] = OutputCriblTCPQueueFullBehavior.BLOCK
387
423
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
388
424
 
389
- pq_mode: Annotated[
390
- Annotated[
391
- Optional[OutputCriblTCPMode], PlainValidator(validate_open_enum(False))
392
- ],
393
- pydantic.Field(alias="pqMode"),
394
- ] = OutputCriblTCPMode.ERROR
395
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
396
-
397
425
  pq_controls: Annotated[
398
426
  Optional[OutputCriblTCPPqControls], pydantic.Field(alias="pqControls")
399
427
  ] = None