cribl-control-plane 0.2.1rc4__py3-none-any.whl → 0.2.1rc5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (110) hide show
  1. cribl_control_plane/_version.py +3 -3
  2. cribl_control_plane/groups_sdk.py +3 -0
  3. cribl_control_plane/mappings.py +1185 -0
  4. cribl_control_plane/models/__init__.py +149 -105
  5. cribl_control_plane/models/createadminproductsmappingsactivatebyproductop.py +52 -0
  6. cribl_control_plane/models/createadminproductsmappingsbyproductop.py +53 -0
  7. cribl_control_plane/models/deleteadminproductsmappingsbyproductandidop.py +51 -0
  8. cribl_control_plane/models/getadminproductsmappingsbyproductandidop.py +51 -0
  9. cribl_control_plane/models/getadminproductsmappingsbyproductop.py +44 -0
  10. cribl_control_plane/models/input.py +14 -14
  11. cribl_control_plane/models/inputappscope.py +16 -20
  12. cribl_control_plane/models/inputconfluentcloud.py +0 -110
  13. cribl_control_plane/models/inputcriblhttp.py +16 -20
  14. cribl_control_plane/models/inputcribllakehttp.py +16 -20
  15. cribl_control_plane/models/inputcribltcp.py +16 -20
  16. cribl_control_plane/models/inputdatadogagent.py +16 -20
  17. cribl_control_plane/models/inputedgeprometheus.py +36 -44
  18. cribl_control_plane/models/inputelastic.py +27 -44
  19. cribl_control_plane/models/inputeventhub.py +0 -118
  20. cribl_control_plane/models/inputfirehose.py +16 -20
  21. cribl_control_plane/models/inputgrafana.py +31 -39
  22. cribl_control_plane/models/inputhttp.py +16 -20
  23. cribl_control_plane/models/inputhttpraw.py +16 -20
  24. cribl_control_plane/models/inputkafka.py +0 -108
  25. cribl_control_plane/models/inputloki.py +16 -20
  26. cribl_control_plane/models/inputmetrics.py +16 -20
  27. cribl_control_plane/models/inputmodeldriventelemetry.py +16 -20
  28. cribl_control_plane/models/inputopentelemetry.py +15 -19
  29. cribl_control_plane/models/inputprometheus.py +36 -44
  30. cribl_control_plane/models/inputprometheusrw.py +16 -20
  31. cribl_control_plane/models/inputsplunk.py +16 -20
  32. cribl_control_plane/models/inputsplunkhec.py +15 -19
  33. cribl_control_plane/models/inputsyslog.py +31 -39
  34. cribl_control_plane/models/inputsystemmetrics.py +10 -20
  35. cribl_control_plane/models/inputtcp.py +16 -30
  36. cribl_control_plane/models/inputtcpjson.py +16 -20
  37. cribl_control_plane/models/inputwindowsmetrics.py +10 -20
  38. cribl_control_plane/models/inputwineventlogs.py +0 -14
  39. cribl_control_plane/models/inputwizwebhook.py +16 -20
  40. cribl_control_plane/models/inputzscalerhec.py +15 -19
  41. cribl_control_plane/models/mappingruleset.py +53 -0
  42. cribl_control_plane/models/mappingrulesetevalmappingfunction.py +71 -0
  43. cribl_control_plane/models/mappingrulesetgenericmappingfunction.py +29 -0
  44. cribl_control_plane/models/output.py +22 -22
  45. cribl_control_plane/models/outputazureblob.py +0 -7
  46. cribl_control_plane/models/outputazuredataexplorer.py +93 -283
  47. cribl_control_plane/models/outputazureeventhub.py +21 -169
  48. cribl_control_plane/models/outputazurelogs.py +21 -49
  49. cribl_control_plane/models/outputchronicle.py +21 -49
  50. cribl_control_plane/models/outputclickhouse.py +21 -49
  51. cribl_control_plane/models/outputcloudwatch.py +21 -49
  52. cribl_control_plane/models/outputconfluentcloud.py +22 -167
  53. cribl_control_plane/models/outputcriblhttp.py +21 -49
  54. cribl_control_plane/models/outputcribltcp.py +21 -49
  55. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +22 -50
  56. cribl_control_plane/models/outputdatabricks.py +0 -7
  57. cribl_control_plane/models/outputdatadog.py +21 -49
  58. cribl_control_plane/models/outputdataset.py +21 -49
  59. cribl_control_plane/models/outputdls3.py +0 -7
  60. cribl_control_plane/models/outputdynatracehttp.py +21 -49
  61. cribl_control_plane/models/outputdynatraceotlp.py +21 -49
  62. cribl_control_plane/models/outputelastic.py +21 -74
  63. cribl_control_plane/models/outputelasticcloud.py +21 -74
  64. cribl_control_plane/models/outputfilesystem.py +0 -7
  65. cribl_control_plane/models/outputgooglechronicle.py +22 -65
  66. cribl_control_plane/models/outputgooglecloudlogging.py +22 -50
  67. cribl_control_plane/models/outputgooglecloudstorage.py +0 -7
  68. cribl_control_plane/models/outputgooglepubsub.py +21 -49
  69. cribl_control_plane/models/outputgrafanacloud.py +42 -98
  70. cribl_control_plane/models/outputgraphite.py +21 -49
  71. cribl_control_plane/models/outputhoneycomb.py +21 -49
  72. cribl_control_plane/models/outputhumiohec.py +21 -49
  73. cribl_control_plane/models/outputinfluxdb.py +21 -49
  74. cribl_control_plane/models/outputkafka.py +19 -162
  75. cribl_control_plane/models/outputkinesis.py +21 -56
  76. cribl_control_plane/models/outputloki.py +19 -47
  77. cribl_control_plane/models/outputminio.py +0 -7
  78. cribl_control_plane/models/outputmsk.py +19 -54
  79. cribl_control_plane/models/outputnewrelic.py +21 -49
  80. cribl_control_plane/models/outputnewrelicevents.py +22 -50
  81. cribl_control_plane/models/outputopentelemetry.py +21 -49
  82. cribl_control_plane/models/outputprometheus.py +21 -49
  83. cribl_control_plane/models/outputs3.py +0 -7
  84. cribl_control_plane/models/outputsentinel.py +21 -49
  85. cribl_control_plane/models/outputsentineloneaisiem.py +22 -50
  86. cribl_control_plane/models/outputservicenow.py +21 -49
  87. cribl_control_plane/models/outputsignalfx.py +21 -49
  88. cribl_control_plane/models/outputsns.py +19 -47
  89. cribl_control_plane/models/outputsplunk.py +21 -49
  90. cribl_control_plane/models/outputsplunkhec.py +21 -49
  91. cribl_control_plane/models/outputsplunklb.py +21 -49
  92. cribl_control_plane/models/outputsqs.py +19 -47
  93. cribl_control_plane/models/outputstatsd.py +21 -49
  94. cribl_control_plane/models/outputstatsdext.py +21 -49
  95. cribl_control_plane/models/outputsumologic.py +21 -49
  96. cribl_control_plane/models/outputsyslog.py +99 -129
  97. cribl_control_plane/models/outputtcpjson.py +21 -49
  98. cribl_control_plane/models/outputwavefront.py +21 -49
  99. cribl_control_plane/models/outputwebhook.py +21 -49
  100. cribl_control_plane/models/outputxsiam.py +19 -47
  101. cribl_control_plane/models/pipeline.py +4 -4
  102. cribl_control_plane/models/rulesetid.py +13 -0
  103. cribl_control_plane/models/runnablejobcollection.py +8 -12
  104. cribl_control_plane/models/runnablejobexecutor.py +8 -12
  105. cribl_control_plane/models/runnablejobscheduledsearch.py +8 -12
  106. cribl_control_plane/models/updateadminproductsmappingsbyproductandidop.py +63 -0
  107. cribl_control_plane/pipelines.py +8 -8
  108. {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/METADATA +11 -2
  109. {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/RECORD +110 -99
  110. {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/WHEEL +0 -0
@@ -118,17 +118,6 @@ class OutputHumioHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
118
118
  QUEUE = "queue"
119
119
 
120
120
 
121
- class OutputHumioHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
122
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
123
-
124
- # Error
125
- ERROR = "error"
126
- # Backpressure
127
- ALWAYS = "always"
128
- # Always On
129
- BACKPRESSURE = "backpressure"
130
-
131
-
132
121
  class OutputHumioHecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
133
122
  r"""Codec to use to compress the persisted data"""
134
123
 
@@ -147,6 +136,17 @@ class OutputHumioHecQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
147
136
  DROP = "drop"
148
137
 
149
138
 
139
+ class OutputHumioHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
140
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
141
+
142
+ # Error
143
+ ERROR = "error"
144
+ # Backpressure
145
+ BACKPRESSURE = "backpressure"
146
+ # Always On
147
+ ALWAYS = "always"
148
+
149
+
150
150
  class OutputHumioHecPqControlsTypedDict(TypedDict):
151
151
  pass
152
152
 
@@ -212,16 +212,6 @@ class OutputHumioHecTypedDict(TypedDict):
212
212
  r"""CrowdStrike Falcon LogScale authentication token"""
213
213
  text_secret: NotRequired[str]
214
214
  r"""Select or create a stored text secret"""
215
- pq_strict_ordering: NotRequired[bool]
216
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
217
- pq_rate_per_sec: NotRequired[float]
218
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
219
- pq_mode: NotRequired[OutputHumioHecMode]
220
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
221
- pq_max_buffer_size: NotRequired[float]
222
- r"""The maximum number of events to hold in memory before writing the events to disk"""
223
- pq_max_backpressure_sec: NotRequired[float]
224
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
225
215
  pq_max_file_size: NotRequired[str]
226
216
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
227
217
  pq_max_size: NotRequired[str]
@@ -232,6 +222,8 @@ class OutputHumioHecTypedDict(TypedDict):
232
222
  r"""Codec to use to compress the persisted data"""
233
223
  pq_on_backpressure: NotRequired[OutputHumioHecQueueFullBehavior]
234
224
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
225
+ pq_mode: NotRequired[OutputHumioHecMode]
226
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
235
227
  pq_controls: NotRequired[OutputHumioHecPqControlsTypedDict]
236
228
 
237
229
 
@@ -366,34 +358,6 @@ class OutputHumioHec(BaseModel):
366
358
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
367
359
  r"""Select or create a stored text secret"""
368
360
 
369
- pq_strict_ordering: Annotated[
370
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
371
- ] = True
372
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
373
-
374
- pq_rate_per_sec: Annotated[
375
- Optional[float], pydantic.Field(alias="pqRatePerSec")
376
- ] = 0
377
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
378
-
379
- pq_mode: Annotated[
380
- Annotated[
381
- Optional[OutputHumioHecMode], PlainValidator(validate_open_enum(False))
382
- ],
383
- pydantic.Field(alias="pqMode"),
384
- ] = OutputHumioHecMode.ERROR
385
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
386
-
387
- pq_max_buffer_size: Annotated[
388
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
389
- ] = 42
390
- r"""The maximum number of events to hold in memory before writing the events to disk"""
391
-
392
- pq_max_backpressure_sec: Annotated[
393
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
394
- ] = 30
395
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
396
-
397
361
  pq_max_file_size: Annotated[
398
362
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
399
363
  ] = "1 MB"
@@ -425,6 +389,14 @@ class OutputHumioHec(BaseModel):
425
389
  ] = OutputHumioHecQueueFullBehavior.BLOCK
426
390
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
427
391
 
392
+ pq_mode: Annotated[
393
+ Annotated[
394
+ Optional[OutputHumioHecMode], PlainValidator(validate_open_enum(False))
395
+ ],
396
+ pydantic.Field(alias="pqMode"),
397
+ ] = OutputHumioHecMode.ERROR
398
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
399
+
428
400
  pq_controls: Annotated[
429
401
  Optional[OutputHumioHecPqControls], pydantic.Field(alias="pqControls")
430
402
  ] = None
@@ -130,17 +130,6 @@ class OutputInfluxdbAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
130
130
  OAUTH = "oauth"
131
131
 
132
132
 
133
- class OutputInfluxdbMode(str, Enum, metaclass=utils.OpenEnumMeta):
134
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
135
-
136
- # Error
137
- ERROR = "error"
138
- # Backpressure
139
- ALWAYS = "always"
140
- # Always On
141
- BACKPRESSURE = "backpressure"
142
-
143
-
144
133
  class OutputInfluxdbCompression(str, Enum, metaclass=utils.OpenEnumMeta):
145
134
  r"""Codec to use to compress the persisted data"""
146
135
 
@@ -159,6 +148,17 @@ class OutputInfluxdbQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
159
148
  DROP = "drop"
160
149
 
161
150
 
151
+ class OutputInfluxdbMode(str, Enum, metaclass=utils.OpenEnumMeta):
152
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
153
+
154
+ # Error
155
+ ERROR = "error"
156
+ # Backpressure
157
+ BACKPRESSURE = "backpressure"
158
+ # Always On
159
+ ALWAYS = "always"
160
+
161
+
162
162
  class OutputInfluxdbPqControlsTypedDict(TypedDict):
163
163
  pass
164
164
 
@@ -262,16 +262,6 @@ class OutputInfluxdbTypedDict(TypedDict):
262
262
  r"""Bucket to write to."""
263
263
  org: NotRequired[str]
264
264
  r"""Organization ID for this bucket."""
265
- pq_strict_ordering: NotRequired[bool]
266
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
267
- pq_rate_per_sec: NotRequired[float]
268
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
269
- pq_mode: NotRequired[OutputInfluxdbMode]
270
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
271
- pq_max_buffer_size: NotRequired[float]
272
- r"""The maximum number of events to hold in memory before writing the events to disk"""
273
- pq_max_backpressure_sec: NotRequired[float]
274
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
275
265
  pq_max_file_size: NotRequired[str]
276
266
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
277
267
  pq_max_size: NotRequired[str]
@@ -282,6 +272,8 @@ class OutputInfluxdbTypedDict(TypedDict):
282
272
  r"""Codec to use to compress the persisted data"""
283
273
  pq_on_backpressure: NotRequired[OutputInfluxdbQueueFullBehavior]
284
274
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
275
+ pq_mode: NotRequired[OutputInfluxdbMode]
276
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
285
277
  pq_controls: NotRequired[OutputInfluxdbPqControlsTypedDict]
286
278
  username: NotRequired[str]
287
279
  password: NotRequired[str]
@@ -455,34 +447,6 @@ class OutputInfluxdb(BaseModel):
455
447
  org: Optional[str] = None
456
448
  r"""Organization ID for this bucket."""
457
449
 
458
- pq_strict_ordering: Annotated[
459
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
460
- ] = True
461
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
462
-
463
- pq_rate_per_sec: Annotated[
464
- Optional[float], pydantic.Field(alias="pqRatePerSec")
465
- ] = 0
466
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
467
-
468
- pq_mode: Annotated[
469
- Annotated[
470
- Optional[OutputInfluxdbMode], PlainValidator(validate_open_enum(False))
471
- ],
472
- pydantic.Field(alias="pqMode"),
473
- ] = OutputInfluxdbMode.ERROR
474
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
475
-
476
- pq_max_buffer_size: Annotated[
477
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
478
- ] = 42
479
- r"""The maximum number of events to hold in memory before writing the events to disk"""
480
-
481
- pq_max_backpressure_sec: Annotated[
482
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
483
- ] = 30
484
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
485
-
486
450
  pq_max_file_size: Annotated[
487
451
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
488
452
  ] = "1 MB"
@@ -514,6 +478,14 @@ class OutputInfluxdb(BaseModel):
514
478
  ] = OutputInfluxdbQueueFullBehavior.BLOCK
515
479
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
516
480
 
481
+ pq_mode: Annotated[
482
+ Annotated[
483
+ Optional[OutputInfluxdbMode], PlainValidator(validate_open_enum(False))
484
+ ],
485
+ pydantic.Field(alias="pqMode"),
486
+ ] = OutputInfluxdbMode.ERROR
487
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
488
+
517
489
  pq_controls: Annotated[
518
490
  Optional[OutputInfluxdbPqControls], pydantic.Field(alias="pqControls")
519
491
  ] = None
@@ -212,13 +212,6 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
212
212
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
213
213
 
214
214
 
215
- class OutputKafkaAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
216
- r"""Enter credentials directly, or select a stored secret"""
217
-
218
- MANUAL = "manual"
219
- SECRET = "secret"
220
-
221
-
222
215
  class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
223
216
  # PLAIN
224
217
  PLAIN = "plain"
@@ -230,58 +223,13 @@ class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
230
223
  KERBEROS = "kerberos"
231
224
 
232
225
 
233
- class OutputKafkaOauthParamTypedDict(TypedDict):
234
- name: str
235
- value: str
236
-
237
-
238
- class OutputKafkaOauthParam(BaseModel):
239
- name: str
240
-
241
- value: str
242
-
243
-
244
- class OutputKafkaSaslExtensionTypedDict(TypedDict):
245
- name: str
246
- value: str
247
-
248
-
249
- class OutputKafkaSaslExtension(BaseModel):
250
- name: str
251
-
252
- value: str
253
-
254
-
255
226
  class OutputKafkaAuthenticationTypedDict(TypedDict):
256
227
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
257
228
 
258
229
  disabled: NotRequired[bool]
259
- username: NotRequired[str]
260
- password: NotRequired[str]
261
- auth_type: NotRequired[OutputKafkaAuthenticationMethod]
262
- r"""Enter credentials directly, or select a stored secret"""
263
- credentials_secret: NotRequired[str]
264
- r"""Select or create a secret that references your credentials"""
265
230
  mechanism: NotRequired[OutputKafkaSASLMechanism]
266
- keytab_location: NotRequired[str]
267
- r"""Location of keytab file for authentication principal"""
268
- principal: NotRequired[str]
269
- r"""Authentication principal, such as `kafka_user@example.com`"""
270
- broker_service_class: NotRequired[str]
271
- r"""Kerberos service class for Kafka brokers, such as `kafka`"""
272
231
  oauth_enabled: NotRequired[bool]
273
232
  r"""Enable OAuth authentication"""
274
- token_url: NotRequired[str]
275
- r"""URL of the token endpoint to use for OAuth authentication"""
276
- client_id: NotRequired[str]
277
- r"""Client ID to use for OAuth authentication"""
278
- oauth_secret_type: NotRequired[str]
279
- client_text_secret: NotRequired[str]
280
- r"""Select or create a stored text secret"""
281
- oauth_params: NotRequired[List[OutputKafkaOauthParamTypedDict]]
282
- r"""Additional fields to send to the token endpoint, such as scope or audience"""
283
- sasl_extensions: NotRequired[List[OutputKafkaSaslExtensionTypedDict]]
284
- r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
285
233
 
286
234
 
287
235
  class OutputKafkaAuthentication(BaseModel):
@@ -289,71 +237,15 @@ class OutputKafkaAuthentication(BaseModel):
289
237
 
290
238
  disabled: Optional[bool] = True
291
239
 
292
- username: Optional[str] = None
293
-
294
- password: Optional[str] = None
295
-
296
- auth_type: Annotated[
297
- Annotated[
298
- Optional[OutputKafkaAuthenticationMethod],
299
- PlainValidator(validate_open_enum(False)),
300
- ],
301
- pydantic.Field(alias="authType"),
302
- ] = OutputKafkaAuthenticationMethod.MANUAL
303
- r"""Enter credentials directly, or select a stored secret"""
304
-
305
- credentials_secret: Annotated[
306
- Optional[str], pydantic.Field(alias="credentialsSecret")
307
- ] = None
308
- r"""Select or create a secret that references your credentials"""
309
-
310
240
  mechanism: Annotated[
311
241
  Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
312
242
  ] = OutputKafkaSASLMechanism.PLAIN
313
243
 
314
- keytab_location: Annotated[
315
- Optional[str], pydantic.Field(alias="keytabLocation")
316
- ] = None
317
- r"""Location of keytab file for authentication principal"""
318
-
319
- principal: Optional[str] = None
320
- r"""Authentication principal, such as `kafka_user@example.com`"""
321
-
322
- broker_service_class: Annotated[
323
- Optional[str], pydantic.Field(alias="brokerServiceClass")
324
- ] = None
325
- r"""Kerberos service class for Kafka brokers, such as `kafka`"""
326
-
327
244
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
328
245
  False
329
246
  )
330
247
  r"""Enable OAuth authentication"""
331
248
 
332
- token_url: Annotated[Optional[str], pydantic.Field(alias="tokenUrl")] = None
333
- r"""URL of the token endpoint to use for OAuth authentication"""
334
-
335
- client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
336
- r"""Client ID to use for OAuth authentication"""
337
-
338
- oauth_secret_type: Annotated[
339
- Optional[str], pydantic.Field(alias="oauthSecretType")
340
- ] = "secret"
341
-
342
- client_text_secret: Annotated[
343
- Optional[str], pydantic.Field(alias="clientTextSecret")
344
- ] = None
345
- r"""Select or create a stored text secret"""
346
-
347
- oauth_params: Annotated[
348
- Optional[List[OutputKafkaOauthParam]], pydantic.Field(alias="oauthParams")
349
- ] = None
350
- r"""Additional fields to send to the token endpoint, such as scope or audience"""
351
-
352
- sasl_extensions: Annotated[
353
- Optional[List[OutputKafkaSaslExtension]], pydantic.Field(alias="saslExtensions")
354
- ] = None
355
- r"""Additional SASL extension fields, such as Confluent's logicalCluster or identityPoolId"""
356
-
357
249
 
358
250
  class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
359
251
  TL_SV1 = "TLSv1"
@@ -449,17 +341,6 @@ class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
449
341
  QUEUE = "queue"
450
342
 
451
343
 
452
- class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
453
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
454
-
455
- # Error
456
- ERROR = "error"
457
- # Backpressure
458
- ALWAYS = "always"
459
- # Always On
460
- BACKPRESSURE = "backpressure"
461
-
462
-
463
344
  class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
464
345
  r"""Codec to use to compress the persisted data"""
465
346
 
@@ -478,6 +359,17 @@ class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
478
359
  DROP = "drop"
479
360
 
480
361
 
362
+ class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
363
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
364
+
365
+ # Error
366
+ ERROR = "error"
367
+ # Backpressure
368
+ BACKPRESSURE = "backpressure"
369
+ # Always On
370
+ ALWAYS = "always"
371
+
372
+
481
373
  class OutputKafkaPqControlsTypedDict(TypedDict):
482
374
  pass
483
375
 
@@ -541,18 +433,6 @@ class OutputKafkaTypedDict(TypedDict):
541
433
  description: NotRequired[str]
542
434
  protobuf_library_id: NotRequired[str]
543
435
  r"""Select a set of Protobuf definitions for the events you want to send"""
544
- protobuf_encoding_id: NotRequired[str]
545
- r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
546
- pq_strict_ordering: NotRequired[bool]
547
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
548
- pq_rate_per_sec: NotRequired[float]
549
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
550
- pq_mode: NotRequired[OutputKafkaMode]
551
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
552
- pq_max_buffer_size: NotRequired[float]
553
- r"""The maximum number of events to hold in memory before writing the events to disk"""
554
- pq_max_backpressure_sec: NotRequired[float]
555
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
556
436
  pq_max_file_size: NotRequired[str]
557
437
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
558
438
  pq_max_size: NotRequired[str]
@@ -563,6 +443,8 @@ class OutputKafkaTypedDict(TypedDict):
563
443
  r"""Codec to use to compress the persisted data"""
564
444
  pq_on_backpressure: NotRequired[OutputKafkaQueueFullBehavior]
565
445
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
446
+ pq_mode: NotRequired[OutputKafkaMode]
447
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
566
448
  pq_controls: NotRequired[OutputKafkaPqControlsTypedDict]
567
449
 
568
450
 
@@ -686,37 +568,6 @@ class OutputKafka(BaseModel):
686
568
  ] = None
687
569
  r"""Select a set of Protobuf definitions for the events you want to send"""
688
570
 
689
- protobuf_encoding_id: Annotated[
690
- Optional[str], pydantic.Field(alias="protobufEncodingId")
691
- ] = None
692
- r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
693
-
694
- pq_strict_ordering: Annotated[
695
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
696
- ] = True
697
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
698
-
699
- pq_rate_per_sec: Annotated[
700
- Optional[float], pydantic.Field(alias="pqRatePerSec")
701
- ] = 0
702
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
703
-
704
- pq_mode: Annotated[
705
- Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
706
- pydantic.Field(alias="pqMode"),
707
- ] = OutputKafkaMode.ERROR
708
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
709
-
710
- pq_max_buffer_size: Annotated[
711
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
712
- ] = 42
713
- r"""The maximum number of events to hold in memory before writing the events to disk"""
714
-
715
- pq_max_backpressure_sec: Annotated[
716
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
717
- ] = 30
718
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
719
-
720
571
  pq_max_file_size: Annotated[
721
572
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
722
573
  ] = "1 MB"
@@ -748,6 +599,12 @@ class OutputKafka(BaseModel):
748
599
  ] = OutputKafkaQueueFullBehavior.BLOCK
749
600
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
750
601
 
602
+ pq_mode: Annotated[
603
+ Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
604
+ pydantic.Field(alias="pqMode"),
605
+ ] = OutputKafkaMode.ERROR
606
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
607
+
751
608
  pq_controls: Annotated[
752
609
  Optional[OutputKafkaPqControls], pydantic.Field(alias="pqControls")
753
610
  ] = None
@@ -53,17 +53,6 @@ class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
53
53
  QUEUE = "queue"
54
54
 
55
55
 
56
- class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
57
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
58
-
59
- # Error
60
- ERROR = "error"
61
- # Backpressure
62
- ALWAYS = "always"
63
- # Always On
64
- BACKPRESSURE = "backpressure"
65
-
66
-
67
56
  class OutputKinesisPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
68
57
  r"""Codec to use to compress the persisted data"""
69
58
 
@@ -82,6 +71,17 @@ class OutputKinesisQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
82
71
  DROP = "drop"
83
72
 
84
73
 
74
+ class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
75
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
76
+
77
+ # Error
78
+ ERROR = "error"
79
+ # Backpressure
80
+ BACKPRESSURE = "backpressure"
81
+ # Always On
82
+ ALWAYS = "always"
83
+
84
+
85
85
  class OutputKinesisPqControlsTypedDict(TypedDict):
86
86
  pass
87
87
 
@@ -143,18 +143,6 @@ class OutputKinesisTypedDict(TypedDict):
143
143
  aws_api_key: NotRequired[str]
144
144
  aws_secret: NotRequired[str]
145
145
  r"""Select or create a stored secret that references your access key and secret key"""
146
- max_events_per_flush: NotRequired[float]
147
- r"""Maximum number of records to send in a single request"""
148
- pq_strict_ordering: NotRequired[bool]
149
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
150
- pq_rate_per_sec: NotRequired[float]
151
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
152
- pq_mode: NotRequired[OutputKinesisMode]
153
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
154
- pq_max_buffer_size: NotRequired[float]
155
- r"""The maximum number of events to hold in memory before writing the events to disk"""
156
- pq_max_backpressure_sec: NotRequired[float]
157
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
158
146
  pq_max_file_size: NotRequired[str]
159
147
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
160
148
  pq_max_size: NotRequired[str]
@@ -165,6 +153,8 @@ class OutputKinesisTypedDict(TypedDict):
165
153
  r"""Codec to use to compress the persisted data"""
166
154
  pq_on_backpressure: NotRequired[OutputKinesisQueueFullBehavior]
167
155
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
156
+ pq_mode: NotRequired[OutputKinesisMode]
157
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
168
158
  pq_controls: NotRequired[OutputKinesisPqControlsTypedDict]
169
159
 
170
160
 
@@ -291,39 +281,6 @@ class OutputKinesis(BaseModel):
291
281
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
292
282
  r"""Select or create a stored secret that references your access key and secret key"""
293
283
 
294
- max_events_per_flush: Annotated[
295
- Optional[float], pydantic.Field(alias="maxEventsPerFlush")
296
- ] = 500
297
- r"""Maximum number of records to send in a single request"""
298
-
299
- pq_strict_ordering: Annotated[
300
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
301
- ] = True
302
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
303
-
304
- pq_rate_per_sec: Annotated[
305
- Optional[float], pydantic.Field(alias="pqRatePerSec")
306
- ] = 0
307
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
308
-
309
- pq_mode: Annotated[
310
- Annotated[
311
- Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
312
- ],
313
- pydantic.Field(alias="pqMode"),
314
- ] = OutputKinesisMode.ERROR
315
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
316
-
317
- pq_max_buffer_size: Annotated[
318
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
319
- ] = 42
320
- r"""The maximum number of events to hold in memory before writing the events to disk"""
321
-
322
- pq_max_backpressure_sec: Annotated[
323
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
324
- ] = 30
325
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
326
-
327
284
  pq_max_file_size: Annotated[
328
285
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
329
286
  ] = "1 MB"
@@ -355,6 +312,14 @@ class OutputKinesis(BaseModel):
355
312
  ] = OutputKinesisQueueFullBehavior.BLOCK
356
313
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
357
314
 
315
+ pq_mode: Annotated[
316
+ Annotated[
317
+ Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
318
+ ],
319
+ pydantic.Field(alias="pqMode"),
320
+ ] = OutputKinesisMode.ERROR
321
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
322
+
358
323
  pq_controls: Annotated[
359
324
  Optional[OutputKinesisPqControls], pydantic.Field(alias="pqControls")
360
325
  ] = None