cribl-control-plane 0.2.1rc6__py3-none-any.whl → 0.2.1rc8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (103) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/lakedatasets.py +28 -0
  3. cribl_control_plane/models/__init__.py +124 -5
  4. cribl_control_plane/models/cribllakedataset.py +4 -0
  5. cribl_control_plane/models/cribllakedatasetupdate.py +4 -0
  6. cribl_control_plane/models/input.py +15 -15
  7. cribl_control_plane/models/inputappscope.py +20 -16
  8. cribl_control_plane/models/inputconfluentcloud.py +110 -0
  9. cribl_control_plane/models/inputcriblhttp.py +20 -16
  10. cribl_control_plane/models/inputcribllakehttp.py +20 -16
  11. cribl_control_plane/models/inputcribltcp.py +20 -16
  12. cribl_control_plane/models/inputdatadogagent.py +20 -16
  13. cribl_control_plane/models/inputedgeprometheus.py +44 -36
  14. cribl_control_plane/models/inputelastic.py +44 -27
  15. cribl_control_plane/models/inputeventhub.py +118 -0
  16. cribl_control_plane/models/inputfile.py +10 -5
  17. cribl_control_plane/models/inputfirehose.py +20 -16
  18. cribl_control_plane/models/inputgrafana.py +39 -31
  19. cribl_control_plane/models/inputhttp.py +20 -16
  20. cribl_control_plane/models/inputhttpraw.py +20 -16
  21. cribl_control_plane/models/inputkafka.py +108 -0
  22. cribl_control_plane/models/inputloki.py +20 -16
  23. cribl_control_plane/models/inputmetrics.py +20 -16
  24. cribl_control_plane/models/inputmodeldriventelemetry.py +20 -16
  25. cribl_control_plane/models/inputopentelemetry.py +19 -15
  26. cribl_control_plane/models/inputprometheus.py +44 -36
  27. cribl_control_plane/models/inputprometheusrw.py +20 -16
  28. cribl_control_plane/models/inputsplunk.py +20 -16
  29. cribl_control_plane/models/inputsplunkhec.py +19 -15
  30. cribl_control_plane/models/inputsyslog.py +39 -31
  31. cribl_control_plane/models/inputsystemmetrics.py +20 -10
  32. cribl_control_plane/models/inputtcp.py +30 -16
  33. cribl_control_plane/models/inputtcpjson.py +20 -16
  34. cribl_control_plane/models/inputwindowsmetrics.py +20 -10
  35. cribl_control_plane/models/inputwineventlogs.py +14 -0
  36. cribl_control_plane/models/inputwizwebhook.py +20 -16
  37. cribl_control_plane/models/inputzscalerhec.py +19 -15
  38. cribl_control_plane/models/jobinfo.py +10 -4
  39. cribl_control_plane/models/jobstatus.py +24 -3
  40. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  41. cribl_control_plane/models/output.py +21 -21
  42. cribl_control_plane/models/outputazureblob.py +7 -0
  43. cribl_control_plane/models/outputazuredataexplorer.py +283 -93
  44. cribl_control_plane/models/outputazureeventhub.py +169 -21
  45. cribl_control_plane/models/outputazurelogs.py +49 -21
  46. cribl_control_plane/models/outputchronicle.py +49 -21
  47. cribl_control_plane/models/outputclickhouse.py +49 -21
  48. cribl_control_plane/models/outputcloudwatch.py +49 -21
  49. cribl_control_plane/models/outputconfluentcloud.py +169 -22
  50. cribl_control_plane/models/outputcriblhttp.py +49 -21
  51. cribl_control_plane/models/outputcribltcp.py +49 -21
  52. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -22
  53. cribl_control_plane/models/outputdatabricks.py +7 -0
  54. cribl_control_plane/models/outputdatadog.py +49 -21
  55. cribl_control_plane/models/outputdataset.py +49 -21
  56. cribl_control_plane/models/outputdls3.py +7 -0
  57. cribl_control_plane/models/outputdynatracehttp.py +49 -21
  58. cribl_control_plane/models/outputdynatraceotlp.py +49 -21
  59. cribl_control_plane/models/outputelastic.py +74 -21
  60. cribl_control_plane/models/outputelasticcloud.py +74 -21
  61. cribl_control_plane/models/outputfilesystem.py +7 -0
  62. cribl_control_plane/models/outputgooglechronicle.py +65 -22
  63. cribl_control_plane/models/outputgooglecloudlogging.py +50 -22
  64. cribl_control_plane/models/outputgooglecloudstorage.py +7 -0
  65. cribl_control_plane/models/outputgooglepubsub.py +49 -21
  66. cribl_control_plane/models/outputgrafanacloud.py +98 -42
  67. cribl_control_plane/models/outputgraphite.py +49 -21
  68. cribl_control_plane/models/outputhoneycomb.py +49 -21
  69. cribl_control_plane/models/outputhumiohec.py +49 -21
  70. cribl_control_plane/models/outputinfluxdb.py +49 -21
  71. cribl_control_plane/models/outputkafka.py +164 -19
  72. cribl_control_plane/models/outputkinesis.py +56 -21
  73. cribl_control_plane/models/outputloki.py +47 -19
  74. cribl_control_plane/models/outputminio.py +7 -0
  75. cribl_control_plane/models/outputmsk.py +56 -19
  76. cribl_control_plane/models/outputnewrelic.py +49 -21
  77. cribl_control_plane/models/outputnewrelicevents.py +50 -22
  78. cribl_control_plane/models/outputopentelemetry.py +49 -21
  79. cribl_control_plane/models/outputprometheus.py +49 -21
  80. cribl_control_plane/models/outputs3.py +7 -0
  81. cribl_control_plane/models/outputsentinel.py +49 -21
  82. cribl_control_plane/models/outputsentineloneaisiem.py +50 -22
  83. cribl_control_plane/models/outputservicenow.py +49 -21
  84. cribl_control_plane/models/outputsignalfx.py +49 -21
  85. cribl_control_plane/models/outputsns.py +47 -19
  86. cribl_control_plane/models/outputsplunk.py +49 -21
  87. cribl_control_plane/models/outputsplunkhec.py +124 -21
  88. cribl_control_plane/models/outputsplunklb.py +49 -21
  89. cribl_control_plane/models/outputsqs.py +47 -19
  90. cribl_control_plane/models/outputstatsd.py +49 -21
  91. cribl_control_plane/models/outputstatsdext.py +49 -21
  92. cribl_control_plane/models/outputsumologic.py +49 -21
  93. cribl_control_plane/models/outputsyslog.py +129 -99
  94. cribl_control_plane/models/outputtcpjson.py +49 -21
  95. cribl_control_plane/models/outputwavefront.py +49 -21
  96. cribl_control_plane/models/outputwebhook.py +49 -21
  97. cribl_control_plane/models/outputxsiam.py +47 -19
  98. cribl_control_plane/models/runnablejobcollection.py +12 -8
  99. cribl_control_plane/models/runnablejobexecutor.py +12 -8
  100. cribl_control_plane/models/runnablejobscheduledsearch.py +12 -8
  101. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc8.dist-info}/METADATA +25 -7
  102. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc8.dist-info}/RECORD +103 -102
  103. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.2.1rc8.dist-info}/WHEEL +0 -0
@@ -118,6 +118,17 @@ class OutputHumioHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
118
118
  QUEUE = "queue"
119
119
 
120
120
 
121
+ class OutputHumioHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
122
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
123
+
124
+ # Error
125
+ ERROR = "error"
126
+ # Backpressure
127
+ ALWAYS = "always"
128
+ # Always On
129
+ BACKPRESSURE = "backpressure"
130
+
131
+
121
132
  class OutputHumioHecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
122
133
  r"""Codec to use to compress the persisted data"""
123
134
 
@@ -136,17 +147,6 @@ class OutputHumioHecQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
136
147
  DROP = "drop"
137
148
 
138
149
 
139
- class OutputHumioHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
140
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
141
-
142
- # Error
143
- ERROR = "error"
144
- # Backpressure
145
- BACKPRESSURE = "backpressure"
146
- # Always On
147
- ALWAYS = "always"
148
-
149
-
150
150
  class OutputHumioHecPqControlsTypedDict(TypedDict):
151
151
  pass
152
152
 
@@ -212,6 +212,16 @@ class OutputHumioHecTypedDict(TypedDict):
212
212
  r"""CrowdStrike Falcon LogScale authentication token"""
213
213
  text_secret: NotRequired[str]
214
214
  r"""Select or create a stored text secret"""
215
+ pq_strict_ordering: NotRequired[bool]
216
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
217
+ pq_rate_per_sec: NotRequired[float]
218
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
219
+ pq_mode: NotRequired[OutputHumioHecMode]
220
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
221
+ pq_max_buffer_size: NotRequired[float]
222
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
223
+ pq_max_backpressure_sec: NotRequired[float]
224
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
215
225
  pq_max_file_size: NotRequired[str]
216
226
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
217
227
  pq_max_size: NotRequired[str]
@@ -222,8 +232,6 @@ class OutputHumioHecTypedDict(TypedDict):
222
232
  r"""Codec to use to compress the persisted data"""
223
233
  pq_on_backpressure: NotRequired[OutputHumioHecQueueFullBehavior]
224
234
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
225
- pq_mode: NotRequired[OutputHumioHecMode]
226
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
227
235
  pq_controls: NotRequired[OutputHumioHecPqControlsTypedDict]
228
236
 
229
237
 
@@ -358,6 +366,34 @@ class OutputHumioHec(BaseModel):
358
366
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
359
367
  r"""Select or create a stored text secret"""
360
368
 
369
+ pq_strict_ordering: Annotated[
370
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
371
+ ] = True
372
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
373
+
374
+ pq_rate_per_sec: Annotated[
375
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
376
+ ] = 0
377
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
378
+
379
+ pq_mode: Annotated[
380
+ Annotated[
381
+ Optional[OutputHumioHecMode], PlainValidator(validate_open_enum(False))
382
+ ],
383
+ pydantic.Field(alias="pqMode"),
384
+ ] = OutputHumioHecMode.ERROR
385
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
386
+
387
+ pq_max_buffer_size: Annotated[
388
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
389
+ ] = 42
390
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
391
+
392
+ pq_max_backpressure_sec: Annotated[
393
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
394
+ ] = 30
395
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
396
+
361
397
  pq_max_file_size: Annotated[
362
398
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
363
399
  ] = "1 MB"
@@ -389,14 +425,6 @@ class OutputHumioHec(BaseModel):
389
425
  ] = OutputHumioHecQueueFullBehavior.BLOCK
390
426
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
391
427
 
392
- pq_mode: Annotated[
393
- Annotated[
394
- Optional[OutputHumioHecMode], PlainValidator(validate_open_enum(False))
395
- ],
396
- pydantic.Field(alias="pqMode"),
397
- ] = OutputHumioHecMode.ERROR
398
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
399
-
400
428
  pq_controls: Annotated[
401
429
  Optional[OutputHumioHecPqControls], pydantic.Field(alias="pqControls")
402
430
  ] = None
@@ -130,6 +130,17 @@ class OutputInfluxdbAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
130
130
  OAUTH = "oauth"
131
131
 
132
132
 
133
+ class OutputInfluxdbMode(str, Enum, metaclass=utils.OpenEnumMeta):
134
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
135
+
136
+ # Error
137
+ ERROR = "error"
138
+ # Backpressure
139
+ ALWAYS = "always"
140
+ # Always On
141
+ BACKPRESSURE = "backpressure"
142
+
143
+
133
144
  class OutputInfluxdbCompression(str, Enum, metaclass=utils.OpenEnumMeta):
134
145
  r"""Codec to use to compress the persisted data"""
135
146
 
@@ -148,17 +159,6 @@ class OutputInfluxdbQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
148
159
  DROP = "drop"
149
160
 
150
161
 
151
- class OutputInfluxdbMode(str, Enum, metaclass=utils.OpenEnumMeta):
152
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
153
-
154
- # Error
155
- ERROR = "error"
156
- # Backpressure
157
- BACKPRESSURE = "backpressure"
158
- # Always On
159
- ALWAYS = "always"
160
-
161
-
162
162
  class OutputInfluxdbPqControlsTypedDict(TypedDict):
163
163
  pass
164
164
 
@@ -262,6 +262,16 @@ class OutputInfluxdbTypedDict(TypedDict):
262
262
  r"""Bucket to write to."""
263
263
  org: NotRequired[str]
264
264
  r"""Organization ID for this bucket."""
265
+ pq_strict_ordering: NotRequired[bool]
266
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
267
+ pq_rate_per_sec: NotRequired[float]
268
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
269
+ pq_mode: NotRequired[OutputInfluxdbMode]
270
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
271
+ pq_max_buffer_size: NotRequired[float]
272
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
273
+ pq_max_backpressure_sec: NotRequired[float]
274
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
265
275
  pq_max_file_size: NotRequired[str]
266
276
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
267
277
  pq_max_size: NotRequired[str]
@@ -272,8 +282,6 @@ class OutputInfluxdbTypedDict(TypedDict):
272
282
  r"""Codec to use to compress the persisted data"""
273
283
  pq_on_backpressure: NotRequired[OutputInfluxdbQueueFullBehavior]
274
284
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
275
- pq_mode: NotRequired[OutputInfluxdbMode]
276
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
277
285
  pq_controls: NotRequired[OutputInfluxdbPqControlsTypedDict]
278
286
  username: NotRequired[str]
279
287
  password: NotRequired[str]
@@ -447,6 +455,34 @@ class OutputInfluxdb(BaseModel):
447
455
  org: Optional[str] = None
448
456
  r"""Organization ID for this bucket."""
449
457
 
458
+ pq_strict_ordering: Annotated[
459
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
460
+ ] = True
461
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
462
+
463
+ pq_rate_per_sec: Annotated[
464
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
465
+ ] = 0
466
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
467
+
468
+ pq_mode: Annotated[
469
+ Annotated[
470
+ Optional[OutputInfluxdbMode], PlainValidator(validate_open_enum(False))
471
+ ],
472
+ pydantic.Field(alias="pqMode"),
473
+ ] = OutputInfluxdbMode.ERROR
474
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
475
+
476
+ pq_max_buffer_size: Annotated[
477
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
478
+ ] = 42
479
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
480
+
481
+ pq_max_backpressure_sec: Annotated[
482
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
483
+ ] = 30
484
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
485
+
450
486
  pq_max_file_size: Annotated[
451
487
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
452
488
  ] = "1 MB"
@@ -478,14 +514,6 @@ class OutputInfluxdb(BaseModel):
478
514
  ] = OutputInfluxdbQueueFullBehavior.BLOCK
479
515
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
480
516
 
481
- pq_mode: Annotated[
482
- Annotated[
483
- Optional[OutputInfluxdbMode], PlainValidator(validate_open_enum(False))
484
- ],
485
- pydantic.Field(alias="pqMode"),
486
- ] = OutputInfluxdbMode.ERROR
487
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
488
-
489
517
  pq_controls: Annotated[
490
518
  Optional[OutputInfluxdbPqControls], pydantic.Field(alias="pqControls")
491
519
  ] = None
@@ -48,6 +48,8 @@ class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
48
48
  SNAPPY = "snappy"
49
49
  # LZ4
50
50
  LZ4 = "lz4"
51
+ # ZSTD
52
+ ZSTD = "zstd"
51
53
 
52
54
 
53
55
  class OutputKafkaAuthTypedDict(TypedDict):
@@ -212,6 +214,13 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
212
214
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
213
215
 
214
216
 
217
+ class OutputKafkaAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
218
+ r"""Enter credentials directly, or select a stored secret"""
219
+
220
+ MANUAL = "manual"
221
+ SECRET = "secret"
222
+
223
+
215
224
  class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
216
225
  # PLAIN
217
226
  PLAIN = "plain"
@@ -223,13 +232,58 @@ class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
223
232
  KERBEROS = "kerberos"
224
233
 
225
234
 
235
+ class OutputKafkaOauthParamTypedDict(TypedDict):
236
+ name: str
237
+ value: str
238
+
239
+
240
+ class OutputKafkaOauthParam(BaseModel):
241
+ name: str
242
+
243
+ value: str
244
+
245
+
246
+ class OutputKafkaSaslExtensionTypedDict(TypedDict):
247
+ name: str
248
+ value: str
249
+
250
+
251
+ class OutputKafkaSaslExtension(BaseModel):
252
+ name: str
253
+
254
+ value: str
255
+
256
+
226
257
  class OutputKafkaAuthenticationTypedDict(TypedDict):
227
258
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
228
259
 
229
260
  disabled: NotRequired[bool]
261
+ username: NotRequired[str]
262
+ password: NotRequired[str]
263
+ auth_type: NotRequired[OutputKafkaAuthenticationMethod]
264
+ r"""Enter credentials directly, or select a stored secret"""
265
+ credentials_secret: NotRequired[str]
266
+ r"""Select or create a secret that references your credentials"""
230
267
  mechanism: NotRequired[OutputKafkaSASLMechanism]
268
+ keytab_location: NotRequired[str]
269
+ r"""Location of keytab file for authentication principal"""
270
+ principal: NotRequired[str]
271
+ r"""Authentication principal, such as `kafka_user@example.com`"""
272
+ broker_service_class: NotRequired[str]
273
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
231
274
  oauth_enabled: NotRequired[bool]
232
275
  r"""Enable OAuth authentication"""
276
+ token_url: NotRequired[str]
277
+ r"""URL of the token endpoint to use for OAuth authentication"""
278
+ client_id: NotRequired[str]
279
+ r"""Client ID to use for OAuth authentication"""
280
+ oauth_secret_type: NotRequired[str]
281
+ client_text_secret: NotRequired[str]
282
+ r"""Select or create a stored text secret"""
283
+ oauth_params: NotRequired[List[OutputKafkaOauthParamTypedDict]]
284
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
285
+ sasl_extensions: NotRequired[List[OutputKafkaSaslExtensionTypedDict]]
286
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
233
287
 
234
288
 
235
289
  class OutputKafkaAuthentication(BaseModel):
@@ -237,15 +291,71 @@ class OutputKafkaAuthentication(BaseModel):
237
291
 
238
292
  disabled: Optional[bool] = True
239
293
 
294
+ username: Optional[str] = None
295
+
296
+ password: Optional[str] = None
297
+
298
+ auth_type: Annotated[
299
+ Annotated[
300
+ Optional[OutputKafkaAuthenticationMethod],
301
+ PlainValidator(validate_open_enum(False)),
302
+ ],
303
+ pydantic.Field(alias="authType"),
304
+ ] = OutputKafkaAuthenticationMethod.MANUAL
305
+ r"""Enter credentials directly, or select a stored secret"""
306
+
307
+ credentials_secret: Annotated[
308
+ Optional[str], pydantic.Field(alias="credentialsSecret")
309
+ ] = None
310
+ r"""Select or create a secret that references your credentials"""
311
+
240
312
  mechanism: Annotated[
241
313
  Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
242
314
  ] = OutputKafkaSASLMechanism.PLAIN
243
315
 
316
+ keytab_location: Annotated[
317
+ Optional[str], pydantic.Field(alias="keytabLocation")
318
+ ] = None
319
+ r"""Location of keytab file for authentication principal"""
320
+
321
+ principal: Optional[str] = None
322
+ r"""Authentication principal, such as `kafka_user@example.com`"""
323
+
324
+ broker_service_class: Annotated[
325
+ Optional[str], pydantic.Field(alias="brokerServiceClass")
326
+ ] = None
327
+ r"""Kerberos service class for Kafka brokers, such as `kafka`"""
328
+
244
329
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
245
330
  False
246
331
  )
247
332
  r"""Enable OAuth authentication"""
248
333
 
334
+ token_url: Annotated[Optional[str], pydantic.Field(alias="tokenUrl")] = None
335
+ r"""URL of the token endpoint to use for OAuth authentication"""
336
+
337
+ client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
338
+ r"""Client ID to use for OAuth authentication"""
339
+
340
+ oauth_secret_type: Annotated[
341
+ Optional[str], pydantic.Field(alias="oauthSecretType")
342
+ ] = "secret"
343
+
344
+ client_text_secret: Annotated[
345
+ Optional[str], pydantic.Field(alias="clientTextSecret")
346
+ ] = None
347
+ r"""Select or create a stored text secret"""
348
+
349
+ oauth_params: Annotated[
350
+ Optional[List[OutputKafkaOauthParam]], pydantic.Field(alias="oauthParams")
351
+ ] = None
352
+ r"""Additional fields to send to the token endpoint, such as scope or audience"""
353
+
354
+ sasl_extensions: Annotated[
355
+ Optional[List[OutputKafkaSaslExtension]], pydantic.Field(alias="saslExtensions")
356
+ ] = None
357
+ r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
358
+
249
359
 
250
360
  class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
251
361
  TL_SV1 = "TLSv1"
@@ -341,6 +451,17 @@ class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
341
451
  QUEUE = "queue"
342
452
 
343
453
 
454
+ class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
455
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
456
+
457
+ # Error
458
+ ERROR = "error"
459
+ # Backpressure
460
+ ALWAYS = "always"
461
+ # Always On
462
+ BACKPRESSURE = "backpressure"
463
+
464
+
344
465
  class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
345
466
  r"""Codec to use to compress the persisted data"""
346
467
 
@@ -359,17 +480,6 @@ class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
359
480
  DROP = "drop"
360
481
 
361
482
 
362
- class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
363
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
364
-
365
- # Error
366
- ERROR = "error"
367
- # Backpressure
368
- BACKPRESSURE = "backpressure"
369
- # Always On
370
- ALWAYS = "always"
371
-
372
-
373
483
  class OutputKafkaPqControlsTypedDict(TypedDict):
374
484
  pass
375
485
 
@@ -433,6 +543,18 @@ class OutputKafkaTypedDict(TypedDict):
433
543
  description: NotRequired[str]
434
544
  protobuf_library_id: NotRequired[str]
435
545
  r"""Select a set of Protobuf definitions for the events you want to send"""
546
+ protobuf_encoding_id: NotRequired[str]
547
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
548
+ pq_strict_ordering: NotRequired[bool]
549
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
550
+ pq_rate_per_sec: NotRequired[float]
551
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
552
+ pq_mode: NotRequired[OutputKafkaMode]
553
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
554
+ pq_max_buffer_size: NotRequired[float]
555
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
556
+ pq_max_backpressure_sec: NotRequired[float]
557
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
436
558
  pq_max_file_size: NotRequired[str]
437
559
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
438
560
  pq_max_size: NotRequired[str]
@@ -443,8 +565,6 @@ class OutputKafkaTypedDict(TypedDict):
443
565
  r"""Codec to use to compress the persisted data"""
444
566
  pq_on_backpressure: NotRequired[OutputKafkaQueueFullBehavior]
445
567
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
446
- pq_mode: NotRequired[OutputKafkaMode]
447
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
448
568
  pq_controls: NotRequired[OutputKafkaPqControlsTypedDict]
449
569
 
450
570
 
@@ -568,6 +688,37 @@ class OutputKafka(BaseModel):
568
688
  ] = None
569
689
  r"""Select a set of Protobuf definitions for the events you want to send"""
570
690
 
691
+ protobuf_encoding_id: Annotated[
692
+ Optional[str], pydantic.Field(alias="protobufEncodingId")
693
+ ] = None
694
+ r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
695
+
696
+ pq_strict_ordering: Annotated[
697
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
698
+ ] = True
699
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
700
+
701
+ pq_rate_per_sec: Annotated[
702
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
703
+ ] = 0
704
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
705
+
706
+ pq_mode: Annotated[
707
+ Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
708
+ pydantic.Field(alias="pqMode"),
709
+ ] = OutputKafkaMode.ERROR
710
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
711
+
712
+ pq_max_buffer_size: Annotated[
713
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
714
+ ] = 42
715
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
716
+
717
+ pq_max_backpressure_sec: Annotated[
718
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
719
+ ] = 30
720
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
721
+
571
722
  pq_max_file_size: Annotated[
572
723
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
573
724
  ] = "1 MB"
@@ -599,12 +750,6 @@ class OutputKafka(BaseModel):
599
750
  ] = OutputKafkaQueueFullBehavior.BLOCK
600
751
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
601
752
 
602
- pq_mode: Annotated[
603
- Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
604
- pydantic.Field(alias="pqMode"),
605
- ] = OutputKafkaMode.ERROR
606
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
607
-
608
753
  pq_controls: Annotated[
609
754
  Optional[OutputKafkaPqControls], pydantic.Field(alias="pqControls")
610
755
  ] = None
@@ -53,6 +53,17 @@ class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
53
53
  QUEUE = "queue"
54
54
 
55
55
 
56
+ class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
57
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
58
+
59
+ # Error
60
+ ERROR = "error"
61
+ # Backpressure
62
+ ALWAYS = "always"
63
+ # Always On
64
+ BACKPRESSURE = "backpressure"
65
+
66
+
56
67
  class OutputKinesisPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
57
68
  r"""Codec to use to compress the persisted data"""
58
69
 
@@ -71,17 +82,6 @@ class OutputKinesisQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
71
82
  DROP = "drop"
72
83
 
73
84
 
74
- class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
75
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
76
-
77
- # Error
78
- ERROR = "error"
79
- # Backpressure
80
- BACKPRESSURE = "backpressure"
81
- # Always On
82
- ALWAYS = "always"
83
-
84
-
85
85
  class OutputKinesisPqControlsTypedDict(TypedDict):
86
86
  pass
87
87
 
@@ -143,6 +143,18 @@ class OutputKinesisTypedDict(TypedDict):
143
143
  aws_api_key: NotRequired[str]
144
144
  aws_secret: NotRequired[str]
145
145
  r"""Select or create a stored secret that references your access key and secret key"""
146
+ max_events_per_flush: NotRequired[float]
147
+ r"""Maximum number of records to send in a single request"""
148
+ pq_strict_ordering: NotRequired[bool]
149
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
150
+ pq_rate_per_sec: NotRequired[float]
151
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
152
+ pq_mode: NotRequired[OutputKinesisMode]
153
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
154
+ pq_max_buffer_size: NotRequired[float]
155
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
156
+ pq_max_backpressure_sec: NotRequired[float]
157
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
146
158
  pq_max_file_size: NotRequired[str]
147
159
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
148
160
  pq_max_size: NotRequired[str]
@@ -153,8 +165,6 @@ class OutputKinesisTypedDict(TypedDict):
153
165
  r"""Codec to use to compress the persisted data"""
154
166
  pq_on_backpressure: NotRequired[OutputKinesisQueueFullBehavior]
155
167
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
156
- pq_mode: NotRequired[OutputKinesisMode]
157
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
158
168
  pq_controls: NotRequired[OutputKinesisPqControlsTypedDict]
159
169
 
160
170
 
@@ -281,6 +291,39 @@ class OutputKinesis(BaseModel):
281
291
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
282
292
  r"""Select or create a stored secret that references your access key and secret key"""
283
293
 
294
+ max_events_per_flush: Annotated[
295
+ Optional[float], pydantic.Field(alias="maxEventsPerFlush")
296
+ ] = 500
297
+ r"""Maximum number of records to send in a single request"""
298
+
299
+ pq_strict_ordering: Annotated[
300
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
301
+ ] = True
302
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
303
+
304
+ pq_rate_per_sec: Annotated[
305
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
306
+ ] = 0
307
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
308
+
309
+ pq_mode: Annotated[
310
+ Annotated[
311
+ Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
312
+ ],
313
+ pydantic.Field(alias="pqMode"),
314
+ ] = OutputKinesisMode.ERROR
315
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
316
+
317
+ pq_max_buffer_size: Annotated[
318
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
319
+ ] = 42
320
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
321
+
322
+ pq_max_backpressure_sec: Annotated[
323
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
324
+ ] = 30
325
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
326
+
284
327
  pq_max_file_size: Annotated[
285
328
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
286
329
  ] = "1 MB"
@@ -312,14 +355,6 @@ class OutputKinesis(BaseModel):
312
355
  ] = OutputKinesisQueueFullBehavior.BLOCK
313
356
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
314
357
 
315
- pq_mode: Annotated[
316
- Annotated[
317
- Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
318
- ],
319
- pydantic.Field(alias="pqMode"),
320
- ] = OutputKinesisMode.ERROR
321
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
322
-
323
358
  pq_controls: Annotated[
324
359
  Optional[OutputKinesisPqControls], pydantic.Field(alias="pqControls")
325
360
  ] = None