cribl-control-plane 0.2.1rc6__py3-none-any.whl → 0.2.1rc8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (103) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/lakedatasets.py +28 -0
  3. cribl_control_plane/models/__init__.py +124 -5
  4. cribl_control_plane/models/cribllakedataset.py +4 -0
  5. cribl_control_plane/models/cribllakedatasetupdate.py +4 -0
  6. cribl_control_plane/models/input.py +15 -15
  7. cribl_control_plane/models/inputappscope.py +20 -16
  8. cribl_control_plane/models/inputconfluentcloud.py +110 -0
  9. cribl_control_plane/models/inputcriblhttp.py +20 -16
  10. cribl_control_plane/models/inputcribllakehttp.py +20 -16
  11. cribl_control_plane/models/inputcribltcp.py +20 -16
  12. cribl_control_plane/models/inputdatadogagent.py +20 -16
  13. cribl_control_plane/models/inputedgeprometheus.py +44 -36
  14. cribl_control_plane/models/inputelastic.py +44 -27
  15. cribl_control_plane/models/inputeventhub.py +118 -0
  16. cribl_control_plane/models/inputfile.py +10 -5
  17. cribl_control_plane/models/inputfirehose.py +20 -16
  18. cribl_control_plane/models/inputgrafana.py +39 -31
  19. cribl_control_plane/models/inputhttp.py +20 -16
  20. cribl_control_plane/models/inputhttpraw.py +20 -16
  21. cribl_control_plane/models/inputkafka.py +108 -0
  22. cribl_control_plane/models/inputloki.py +20 -16
  23. cribl_control_plane/models/inputmetrics.py +20 -16
  24. cribl_control_plane/models/inputmodeldriventelemetry.py +20 -16
  25. cribl_control_plane/models/inputopentelemetry.py +19 -15
  26. cribl_control_plane/models/inputprometheus.py +44 -36
  27. cribl_control_plane/models/inputprometheusrw.py +20 -16
  28. cribl_control_plane/models/inputsplunk.py +20 -16
  29. cribl_control_plane/models/inputsplunkhec.py +19 -15
  30. cribl_control_plane/models/inputsyslog.py +39 -31
  31. cribl_control_plane/models/inputsystemmetrics.py +20 -10
  32. cribl_control_plane/models/inputtcp.py +30 -16
  33. cribl_control_plane/models/inputtcpjson.py +20 -16
  34. cribl_control_plane/models/inputwindowsmetrics.py +20 -10
  35. cribl_control_plane/models/inputwineventlogs.py +14 -0
  36. cribl_control_plane/models/inputwizwebhook.py +20 -16
  37. cribl_control_plane/models/inputzscalerhec.py +19 -15
  38. cribl_control_plane/models/jobinfo.py +10 -4
  39. cribl_control_plane/models/jobstatus.py +24 -3
  40. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  41. cribl_control_plane/models/output.py +21 -21
  42. cribl_control_plane/models/outputazureblob.py +7 -0
  43. cribl_control_plane/models/outputazuredataexplorer.py +283 -93
  44. cribl_control_plane/models/outputazureeventhub.py +169 -21
  45. cribl_control_plane/models/outputazurelogs.py +49 -21
  46. cribl_control_plane/models/outputchronicle.py +49 -21
  47. cribl_control_plane/models/outputclickhouse.py +49 -21
  48. cribl_control_plane/models/outputcloudwatch.py +49 -21
  49. cribl_control_plane/models/outputconfluentcloud.py +169 -22
  50. cribl_control_plane/models/outputcriblhttp.py +49 -21
  51. cribl_control_plane/models/outputcribltcp.py +49 -21
  52. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -22
  53. cribl_control_plane/models/outputdatabricks.py +7 -0
  54. cribl_control_plane/models/outputdatadog.py +49 -21
  55. cribl_control_plane/models/outputdataset.py +49 -21
  56. cribl_control_plane/models/outputdls3.py +7 -0
  57. cribl_control_plane/models/outputdynatracehttp.py +49 -21
  58. cribl_control_plane/models/outputdynatraceotlp.py +49 -21
  59. cribl_control_plane/models/outputelastic.py +74 -21
  60. cribl_control_plane/models/outputelasticcloud.py +74 -21
  61. cribl_control_plane/models/outputfilesystem.py +7 -0
  62. cribl_control_plane/models/outputgooglechronicle.py +65 -22
  63. cribl_control_plane/models/outputgooglecloudlogging.py +50 -22
  64. cribl_control_plane/models/outputgooglecloudstorage.py +7 -0
  65. cribl_control_plane/models/outputgooglepubsub.py +49 -21
  66. cribl_control_plane/models/outputgrafanacloud.py +98 -42
  67. cribl_control_plane/models/outputgraphite.py +49 -21
  68. cribl_control_plane/models/outputhoneycomb.py +49 -21
  69. cribl_control_plane/models/outputhumiohec.py +49 -21
  70. cribl_control_plane/models/outputinfluxdb.py +49 -21
  71. cribl_control_plane/models/outputkafka.py +164 -19
  72. cribl_control_plane/models/outputkinesis.py +56 -21
  73. cribl_control_plane/models/outputloki.py +47 -19
  74. cribl_control_plane/models/outputminio.py +7 -0
  75. cribl_control_plane/models/outputmsk.py +56 -19
  76. cribl_control_plane/models/outputnewrelic.py +49 -21
  77. cribl_control_plane/models/outputnewrelicevents.py +50 -22
  78. cribl_control_plane/models/outputopentelemetry.py +49 -21
  79. cribl_control_plane/models/outputprometheus.py +49 -21
  80. cribl_control_plane/models/outputs3.py +7 -0
  81. cribl_control_plane/models/outputsentinel.py +49 -21
  82. cribl_control_plane/models/outputsentineloneaisiem.py +50 -22
  83. cribl_control_plane/models/outputservicenow.py +49 -21
  84. cribl_control_plane/models/outputsignalfx.py +49 -21
  85. cribl_control_plane/models/outputsns.py +47 -19
  86. cribl_control_plane/models/outputsplunk.py +49 -21
  87. cribl_control_plane/models/outputsplunkhec.py +124 -21
  88. cribl_control_plane/models/outputsplunklb.py +49 -21
  89. cribl_control_plane/models/outputsqs.py +47 -19
  90. cribl_control_plane/models/outputstatsd.py +49 -21
  91. cribl_control_plane/models/outputstatsdext.py +49 -21
  92. cribl_control_plane/models/outputsumologic.py +49 -21
  93. cribl_control_plane/models/outputsyslog.py +129 -99
  94. cribl_control_plane/models/outputtcpjson.py +49 -21
  95. cribl_control_plane/models/outputwavefront.py +49 -21
  96. cribl_control_plane/models/outputwebhook.py +49 -21
  97. cribl_control_plane/models/outputxsiam.py +47 -19
  98. cribl_control_plane/models/runnablejobcollection.py +12 -8
  99. cribl_control_plane/models/runnablejobexecutor.py +12 -8
  100. cribl_control_plane/models/runnablejobscheduledsearch.py +12 -8
  101. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc8.dist-info}/METADATA +25 -7
  102. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc8.dist-info}/RECORD +103 -102
  103. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc8.dist-info}/WHEEL +0 -0
@@ -37,6 +37,17 @@ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMe
37
37
  QUEUE = "queue"
38
38
 
39
39
 
40
+ class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
41
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
42
+
43
+ # Error
44
+ ERROR = "error"
45
+ # Backpressure
46
+ ALWAYS = "always"
47
+ # Always On
48
+ BACKPRESSURE = "backpressure"
49
+
50
+
40
51
  class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
41
52
  r"""Codec to use to compress the persisted data"""
42
53
 
@@ -55,17 +66,6 @@ class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
55
66
  DROP = "drop"
56
67
 
57
68
 
58
- class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
59
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
60
-
61
- # Error
62
- ERROR = "error"
63
- # Backpressure
64
- BACKPRESSURE = "backpressure"
65
- # Always On
66
- ALWAYS = "always"
67
-
68
-
69
69
  class OutputCloudwatchPqControlsTypedDict(TypedDict):
70
70
  pass
71
71
 
@@ -121,6 +121,16 @@ class OutputCloudwatchTypedDict(TypedDict):
121
121
  aws_api_key: NotRequired[str]
122
122
  aws_secret: NotRequired[str]
123
123
  r"""Select or create a stored secret that references your access key and secret key"""
124
+ pq_strict_ordering: NotRequired[bool]
125
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
126
+ pq_rate_per_sec: NotRequired[float]
127
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
128
+ pq_mode: NotRequired[OutputCloudwatchMode]
129
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
130
+ pq_max_buffer_size: NotRequired[float]
131
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
132
+ pq_max_backpressure_sec: NotRequired[float]
133
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
124
134
  pq_max_file_size: NotRequired[str]
125
135
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
126
136
  pq_max_size: NotRequired[str]
@@ -131,8 +141,6 @@ class OutputCloudwatchTypedDict(TypedDict):
131
141
  r"""Codec to use to compress the persisted data"""
132
142
  pq_on_backpressure: NotRequired[OutputCloudwatchQueueFullBehavior]
133
143
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
134
- pq_mode: NotRequired[OutputCloudwatchMode]
135
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
136
144
  pq_controls: NotRequired[OutputCloudwatchPqControlsTypedDict]
137
145
 
138
146
 
@@ -240,6 +248,34 @@ class OutputCloudwatch(BaseModel):
240
248
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
241
249
  r"""Select or create a stored secret that references your access key and secret key"""
242
250
 
251
+ pq_strict_ordering: Annotated[
252
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
253
+ ] = True
254
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
255
+
256
+ pq_rate_per_sec: Annotated[
257
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
258
+ ] = 0
259
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
260
+
261
+ pq_mode: Annotated[
262
+ Annotated[
263
+ Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
264
+ ],
265
+ pydantic.Field(alias="pqMode"),
266
+ ] = OutputCloudwatchMode.ERROR
267
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
268
+
269
+ pq_max_buffer_size: Annotated[
270
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
271
+ ] = 42
272
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
273
+
274
+ pq_max_backpressure_sec: Annotated[
275
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
276
+ ] = 30
277
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
278
+
243
279
  pq_max_file_size: Annotated[
244
280
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
245
281
  ] = "1 MB"
@@ -271,14 +307,6 @@ class OutputCloudwatch(BaseModel):
271
307
  ] = OutputCloudwatchQueueFullBehavior.BLOCK
272
308
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
273
309
 
274
- pq_mode: Annotated[
275
- Annotated[
276
- Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
277
- ],
278
- pydantic.Field(alias="pqMode"),
279
- ] = OutputCloudwatchMode.ERROR
280
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
281
-
282
310
  pq_controls: Annotated[
283
311
  Optional[OutputCloudwatchPqControls], pydantic.Field(alias="pqControls")
284
312
  ] = None
@@ -131,6 +131,8 @@ class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
131
131
  SNAPPY = "snappy"
132
132
  # LZ4
133
133
  LZ4 = "lz4"
134
+ # ZSTD
135
+ ZSTD = "zstd"
134
136
 
135
137
 
136
138
  class OutputConfluentCloudAuthTypedDict(TypedDict):
@@ -297,6 +299,13 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
297
299
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
298
300
 
299
301
 
302
+ class OutputConfluentCloudAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
303
+ r"""Enter credentials directly, or select a stored secret"""
304
+
305
+ MANUAL = "manual"
306
+ SECRET = "secret"
307
+
308
+
300
309
  class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
301
310
  # PLAIN
302
311
  PLAIN = "plain"
@@ -308,13 +317,58 @@ class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta)
308
317
  KERBEROS = "kerberos"
309
318
 
310
319
 
320
+ class OutputConfluentCloudOauthParamTypedDict(TypedDict):
321
+ name: str
322
+ value: str
323
+
324
+
325
+ class OutputConfluentCloudOauthParam(BaseModel):
326
+ name: str
327
+
328
+ value: str
329
+
330
+
331
+ class OutputConfluentCloudSaslExtensionTypedDict(TypedDict):
332
+ name: str
333
+ value: str
334
+
335
+
336
+ class OutputConfluentCloudSaslExtension(BaseModel):
337
+ name: str
338
+
339
+ value: str
340
+
341
+
311
342
  class OutputConfluentCloudAuthenticationTypedDict(TypedDict):
312
343
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
313
344
 
314
345
  disabled: NotRequired[bool]
346
+ username: NotRequired[str]
347
+ password: NotRequired[str]
348
+ auth_type: NotRequired[OutputConfluentCloudAuthenticationMethod]
349
+ r"""Enter credentials directly, or select a stored secret"""
350
+ credentials_secret: NotRequired[str]
351
+ r"""Select or create a secret that references your credentials"""
315
352
  mechanism: NotRequired[OutputConfluentCloudSASLMechanism]
353
+ keytab_location: NotRequired[str]
354
+ r"""Location of keytab file for authentication principal"""
355
+ principal: NotRequired[str]
356
+ r"""Authentication principal, such as `kafka_user@example.com`"""
357
+ broker_service_class: NotRequired[str]
358
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
316
359
  oauth_enabled: NotRequired[bool]
317
360
  r"""Enable OAuth authentication"""
361
+ token_url: NotRequired[str]
362
+ r"""URL of the token endpoint to use for OAuth authentication"""
363
+ client_id: NotRequired[str]
364
+ r"""Client ID to use for OAuth authentication"""
365
+ oauth_secret_type: NotRequired[str]
366
+ client_text_secret: NotRequired[str]
367
+ r"""Select or create a stored text secret"""
368
+ oauth_params: NotRequired[List[OutputConfluentCloudOauthParamTypedDict]]
369
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
370
+ sasl_extensions: NotRequired[List[OutputConfluentCloudSaslExtensionTypedDict]]
371
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
318
372
 
319
373
 
320
374
  class OutputConfluentCloudAuthentication(BaseModel):
@@ -322,16 +376,74 @@ class OutputConfluentCloudAuthentication(BaseModel):
322
376
 
323
377
  disabled: Optional[bool] = True
324
378
 
379
+ username: Optional[str] = None
380
+
381
+ password: Optional[str] = None
382
+
383
+ auth_type: Annotated[
384
+ Annotated[
385
+ Optional[OutputConfluentCloudAuthenticationMethod],
386
+ PlainValidator(validate_open_enum(False)),
387
+ ],
388
+ pydantic.Field(alias="authType"),
389
+ ] = OutputConfluentCloudAuthenticationMethod.MANUAL
390
+ r"""Enter credentials directly, or select a stored secret"""
391
+
392
+ credentials_secret: Annotated[
393
+ Optional[str], pydantic.Field(alias="credentialsSecret")
394
+ ] = None
395
+ r"""Select or create a secret that references your credentials"""
396
+
325
397
  mechanism: Annotated[
326
398
  Optional[OutputConfluentCloudSASLMechanism],
327
399
  PlainValidator(validate_open_enum(False)),
328
400
  ] = OutputConfluentCloudSASLMechanism.PLAIN
329
401
 
402
+ keytab_location: Annotated[
403
+ Optional[str], pydantic.Field(alias="keytabLocation")
404
+ ] = None
405
+ r"""Location of keytab file for authentication principal"""
406
+
407
+ principal: Optional[str] = None
408
+ r"""Authentication principal, such as `kafka_user@example.com`"""
409
+
410
+ broker_service_class: Annotated[
411
+ Optional[str], pydantic.Field(alias="brokerServiceClass")
412
+ ] = None
413
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
414
+
330
415
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
331
416
  False
332
417
  )
333
418
  r"""Enable OAuth authentication"""
334
419
 
420
+ token_url: Annotated[Optional[str], pydantic.Field(alias="tokenUrl")] = None
421
+ r"""URL of the token endpoint to use for OAuth authentication"""
422
+
423
+ client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
424
+ r"""Client ID to use for OAuth authentication"""
425
+
426
+ oauth_secret_type: Annotated[
427
+ Optional[str], pydantic.Field(alias="oauthSecretType")
428
+ ] = "secret"
429
+
430
+ client_text_secret: Annotated[
431
+ Optional[str], pydantic.Field(alias="clientTextSecret")
432
+ ] = None
433
+ r"""Select or create a stored text secret"""
434
+
435
+ oauth_params: Annotated[
436
+ Optional[List[OutputConfluentCloudOauthParam]],
437
+ pydantic.Field(alias="oauthParams"),
438
+ ] = None
439
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
440
+
441
+ sasl_extensions: Annotated[
442
+ Optional[List[OutputConfluentCloudSaslExtension]],
443
+ pydantic.Field(alias="saslExtensions"),
444
+ ] = None
445
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
446
+
335
447
 
336
448
  class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
337
449
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -344,6 +456,17 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
344
456
  QUEUE = "queue"
345
457
 
346
458
 
459
+ class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
460
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
461
+
462
+ # Error
463
+ ERROR = "error"
464
+ # Backpressure
465
+ ALWAYS = "always"
466
+ # Always On
467
+ BACKPRESSURE = "backpressure"
468
+
469
+
347
470
  class OutputConfluentCloudPqCompressCompression(
348
471
  str, Enum, metaclass=utils.OpenEnumMeta
349
472
  ):
@@ -364,17 +487,6 @@ class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumM
364
487
  DROP = "drop"
365
488
 
366
489
 
367
- class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
368
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
369
-
370
- # Error
371
- ERROR = "error"
372
- # Backpressure
373
- BACKPRESSURE = "backpressure"
374
- # Always On
375
- ALWAYS = "always"
376
-
377
-
378
490
  class OutputConfluentCloudPqControlsTypedDict(TypedDict):
379
491
  pass
380
492
 
@@ -438,6 +550,18 @@ class OutputConfluentCloudTypedDict(TypedDict):
438
550
  description: NotRequired[str]
439
551
  protobuf_library_id: NotRequired[str]
440
552
  r"""Select a set of Protobuf definitions for the events you want to send"""
553
+ protobuf_encoding_id: NotRequired[str]
554
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
555
+ pq_strict_ordering: NotRequired[bool]
556
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
557
+ pq_rate_per_sec: NotRequired[float]
558
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
559
+ pq_mode: NotRequired[OutputConfluentCloudMode]
560
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
561
+ pq_max_buffer_size: NotRequired[float]
562
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
563
+ pq_max_backpressure_sec: NotRequired[float]
564
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
441
565
  pq_max_file_size: NotRequired[str]
442
566
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
443
567
  pq_max_size: NotRequired[str]
@@ -448,8 +572,6 @@ class OutputConfluentCloudTypedDict(TypedDict):
448
572
  r"""Codec to use to compress the persisted data"""
449
573
  pq_on_backpressure: NotRequired[OutputConfluentCloudQueueFullBehavior]
450
574
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
451
- pq_mode: NotRequired[OutputConfluentCloudMode]
452
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
453
575
  pq_controls: NotRequired[OutputConfluentCloudPqControlsTypedDict]
454
576
 
455
577
 
@@ -575,6 +697,40 @@ class OutputConfluentCloud(BaseModel):
575
697
  ] = None
576
698
  r"""Select a set of Protobuf definitions for the events you want to send"""
577
699
 
700
+ protobuf_encoding_id: Annotated[
701
+ Optional[str], pydantic.Field(alias="protobufEncodingId")
702
+ ] = None
703
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
704
+
705
+ pq_strict_ordering: Annotated[
706
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
707
+ ] = True
708
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
709
+
710
+ pq_rate_per_sec: Annotated[
711
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
712
+ ] = 0
713
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
714
+
715
+ pq_mode: Annotated[
716
+ Annotated[
717
+ Optional[OutputConfluentCloudMode],
718
+ PlainValidator(validate_open_enum(False)),
719
+ ],
720
+ pydantic.Field(alias="pqMode"),
721
+ ] = OutputConfluentCloudMode.ERROR
722
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
723
+
724
+ pq_max_buffer_size: Annotated[
725
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
726
+ ] = 42
727
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
728
+
729
+ pq_max_backpressure_sec: Annotated[
730
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
731
+ ] = 30
732
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
733
+
578
734
  pq_max_file_size: Annotated[
579
735
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
580
736
  ] = "1 MB"
@@ -606,15 +762,6 @@ class OutputConfluentCloud(BaseModel):
606
762
  ] = OutputConfluentCloudQueueFullBehavior.BLOCK
607
763
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
608
764
 
609
- pq_mode: Annotated[
610
- Annotated[
611
- Optional[OutputConfluentCloudMode],
612
- PlainValidator(validate_open_enum(False)),
613
- ],
614
- pydantic.Field(alias="pqMode"),
615
- ] = OutputConfluentCloudMode.ERROR
616
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
617
-
618
765
  pq_controls: Annotated[
619
766
  Optional[OutputConfluentCloudPqControls], pydantic.Field(alias="pqControls")
620
767
  ] = None
@@ -209,6 +209,17 @@ class OutputCriblHTTPURL(BaseModel):
209
209
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
210
210
 
211
211
 
212
+ class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
213
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
214
+
215
+ # Error
216
+ ERROR = "error"
217
+ # Backpressure
218
+ ALWAYS = "always"
219
+ # Always On
220
+ BACKPRESSURE = "backpressure"
221
+
222
+
212
223
  class OutputCriblHTTPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
213
224
  r"""Codec to use to compress the persisted data"""
214
225
 
@@ -227,17 +238,6 @@ class OutputCriblHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
227
238
  DROP = "drop"
228
239
 
229
240
 
230
- class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
231
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
232
-
233
- # Error
234
- ERROR = "error"
235
- # Backpressure
236
- BACKPRESSURE = "backpressure"
237
- # Always On
238
- ALWAYS = "always"
239
-
240
-
241
241
  class OutputCriblHTTPPqControlsTypedDict(TypedDict):
242
242
  pass
243
243
 
@@ -309,6 +309,16 @@ class OutputCriblHTTPTypedDict(TypedDict):
309
309
  r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
310
310
  load_balance_stats_period_sec: NotRequired[float]
311
311
  r"""How far back in time to keep traffic stats for load balancing purposes"""
312
+ pq_strict_ordering: NotRequired[bool]
313
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
314
+ pq_rate_per_sec: NotRequired[float]
315
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
316
+ pq_mode: NotRequired[OutputCriblHTTPMode]
317
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
318
+ pq_max_buffer_size: NotRequired[float]
319
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
320
+ pq_max_backpressure_sec: NotRequired[float]
321
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
312
322
  pq_max_file_size: NotRequired[str]
313
323
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
314
324
  pq_max_size: NotRequired[str]
@@ -319,8 +329,6 @@ class OutputCriblHTTPTypedDict(TypedDict):
319
329
  r"""Codec to use to compress the persisted data"""
320
330
  pq_on_backpressure: NotRequired[OutputCriblHTTPQueueFullBehavior]
321
331
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
322
- pq_mode: NotRequired[OutputCriblHTTPMode]
323
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
324
332
  pq_controls: NotRequired[OutputCriblHTTPPqControlsTypedDict]
325
333
 
326
334
 
@@ -465,6 +473,34 @@ class OutputCriblHTTP(BaseModel):
465
473
  ] = 300
466
474
  r"""How far back in time to keep traffic stats for load balancing purposes"""
467
475
 
476
+ pq_strict_ordering: Annotated[
477
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
478
+ ] = True
479
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
480
+
481
+ pq_rate_per_sec: Annotated[
482
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
483
+ ] = 0
484
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
485
+
486
+ pq_mode: Annotated[
487
+ Annotated[
488
+ Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
489
+ ],
490
+ pydantic.Field(alias="pqMode"),
491
+ ] = OutputCriblHTTPMode.ERROR
492
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
493
+
494
+ pq_max_buffer_size: Annotated[
495
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
496
+ ] = 42
497
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
498
+
499
+ pq_max_backpressure_sec: Annotated[
500
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
501
+ ] = 30
502
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
503
+
468
504
  pq_max_file_size: Annotated[
469
505
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
470
506
  ] = "1 MB"
@@ -496,14 +532,6 @@ class OutputCriblHTTP(BaseModel):
496
532
  ] = OutputCriblHTTPQueueFullBehavior.BLOCK
497
533
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
498
534
 
499
- pq_mode: Annotated[
500
- Annotated[
501
- Optional[OutputCriblHTTPMode], PlainValidator(validate_open_enum(False))
502
- ],
503
- pydantic.Field(alias="pqMode"),
504
- ] = OutputCriblHTTPMode.ERROR
505
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
506
-
507
535
  pq_controls: Annotated[
508
536
  Optional[OutputCriblHTTPPqControls], pydantic.Field(alias="pqControls")
509
537
  ] = None
@@ -157,6 +157,17 @@ class OutputCriblTCPHost(BaseModel):
157
157
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
158
158
 
159
159
 
160
+ class OutputCriblTCPMode(str, Enum, metaclass=utils.OpenEnumMeta):
161
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
162
+
163
+ # Error
164
+ ERROR = "error"
165
+ # Backpressure
166
+ ALWAYS = "always"
167
+ # Always On
168
+ BACKPRESSURE = "backpressure"
169
+
170
+
160
171
  class OutputCriblTCPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
161
172
  r"""Codec to use to compress the persisted data"""
162
173
 
@@ -175,17 +186,6 @@ class OutputCriblTCPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
175
186
  DROP = "drop"
176
187
 
177
188
 
178
- class OutputCriblTCPMode(str, Enum, metaclass=utils.OpenEnumMeta):
179
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
180
-
181
- # Error
182
- ERROR = "error"
183
- # Backpressure
184
- BACKPRESSURE = "backpressure"
185
- # Always On
186
- ALWAYS = "always"
187
-
188
-
189
189
  class OutputCriblTCPPqControlsTypedDict(TypedDict):
190
190
  pass
191
191
 
@@ -240,6 +240,16 @@ class OutputCriblTCPTypedDict(TypedDict):
240
240
  r"""How far back in time to keep traffic stats for load balancing purposes"""
241
241
  max_concurrent_senders: NotRequired[float]
242
242
  r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
243
+ pq_strict_ordering: NotRequired[bool]
244
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
245
+ pq_rate_per_sec: NotRequired[float]
246
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
247
+ pq_mode: NotRequired[OutputCriblTCPMode]
248
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
249
+ pq_max_buffer_size: NotRequired[float]
250
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
251
+ pq_max_backpressure_sec: NotRequired[float]
252
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
243
253
  pq_max_file_size: NotRequired[str]
244
254
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
245
255
  pq_max_size: NotRequired[str]
@@ -250,8 +260,6 @@ class OutputCriblTCPTypedDict(TypedDict):
250
260
  r"""Codec to use to compress the persisted data"""
251
261
  pq_on_backpressure: NotRequired[OutputCriblTCPQueueFullBehavior]
252
262
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
253
- pq_mode: NotRequired[OutputCriblTCPMode]
254
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
255
263
  pq_controls: NotRequired[OutputCriblTCPPqControlsTypedDict]
256
264
 
257
265
 
@@ -355,6 +363,34 @@ class OutputCriblTCP(BaseModel):
355
363
  ] = 0
356
364
  r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
357
365
 
366
+ pq_strict_ordering: Annotated[
367
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
368
+ ] = True
369
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
370
+
371
+ pq_rate_per_sec: Annotated[
372
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
373
+ ] = 0
374
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
375
+
376
+ pq_mode: Annotated[
377
+ Annotated[
378
+ Optional[OutputCriblTCPMode], PlainValidator(validate_open_enum(False))
379
+ ],
380
+ pydantic.Field(alias="pqMode"),
381
+ ] = OutputCriblTCPMode.ERROR
382
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
383
+
384
+ pq_max_buffer_size: Annotated[
385
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
386
+ ] = 42
387
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
388
+
389
+ pq_max_backpressure_sec: Annotated[
390
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
391
+ ] = 30
392
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
393
+
358
394
  pq_max_file_size: Annotated[
359
395
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
360
396
  ] = "1 MB"
@@ -386,14 +422,6 @@ class OutputCriblTCP(BaseModel):
386
422
  ] = OutputCriblTCPQueueFullBehavior.BLOCK
387
423
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
388
424
 
389
- pq_mode: Annotated[
390
- Annotated[
391
- Optional[OutputCriblTCPMode], PlainValidator(validate_open_enum(False))
392
- ],
393
- pydantic.Field(alias="pqMode"),
394
- ] = OutputCriblTCPMode.ERROR
395
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
396
-
397
425
  pq_controls: Annotated[
398
426
  Optional[OutputCriblTCPPqControls], pydantic.Field(alias="pqControls")
399
427
  ] = None