cribl-control-plane 0.2.1rc11__py3-none-any.whl → 0.4.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (128) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/acl.py +4 -4
  3. cribl_control_plane/branches.py +4 -4
  4. cribl_control_plane/commits.py +28 -28
  5. cribl_control_plane/commits_files.py +8 -8
  6. cribl_control_plane/configs_versions.py +8 -4
  7. cribl_control_plane/destinations.py +20 -20
  8. cribl_control_plane/destinations_pq.py +8 -8
  9. cribl_control_plane/groups_sdk.py +48 -24
  10. cribl_control_plane/hectokens.py +16 -8
  11. cribl_control_plane/lakedatasets.py +40 -20
  12. cribl_control_plane/models/__init__.py +568 -214
  13. cribl_control_plane/models/createconfiggroupbyproductop.py +20 -1
  14. cribl_control_plane/models/createcribllakedatasetbylakeidop.py +19 -1
  15. cribl_control_plane/models/createinputhectokenbyidop.py +20 -1
  16. cribl_control_plane/models/{countedlistgitdiffresult.py → createinputop.py} +9 -5
  17. cribl_control_plane/models/{countedlistgitshowresult.py → createoutputop.py} +9 -5
  18. cribl_control_plane/models/createoutputtestbyidop.py +20 -1
  19. cribl_control_plane/models/{countedlistpackinstallinfo.py → createpacksop.py} +6 -2
  20. cribl_control_plane/models/createpipelineop.py +24 -0
  21. cribl_control_plane/models/createroutesappendbyidop.py +20 -2
  22. cribl_control_plane/models/createversioncommitop.py +19 -1
  23. cribl_control_plane/models/{countedliststring.py → createversionpushop.py} +6 -2
  24. cribl_control_plane/models/createversionrevertop.py +19 -1
  25. cribl_control_plane/models/createversionundoop.py +18 -1
  26. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +20 -1
  27. cribl_control_plane/models/deletecribllakedatasetbylakeidandidop.py +20 -1
  28. cribl_control_plane/models/deleteinputbyidop.py +20 -1
  29. cribl_control_plane/models/deleteoutputbyidop.py +20 -1
  30. cribl_control_plane/models/deleteoutputpqbyidop.py +19 -1
  31. cribl_control_plane/models/deletepacksbyidop.py +20 -1
  32. cribl_control_plane/models/deletepipelinebyidop.py +20 -1
  33. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +19 -1
  34. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +19 -1
  35. cribl_control_plane/models/getconfiggroupbyproductandidop.py +19 -1
  36. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +19 -1
  37. cribl_control_plane/models/getcribllakedatasetbylakeidandidop.py +20 -1
  38. cribl_control_plane/models/getcribllakedatasetbylakeidop.py +20 -1
  39. cribl_control_plane/models/getinputbyidop.py +20 -1
  40. cribl_control_plane/models/getmasterworkerentryop.py +18 -1
  41. cribl_control_plane/models/getoutputbyidop.py +20 -1
  42. cribl_control_plane/models/getoutputpqbyidop.py +20 -1
  43. cribl_control_plane/models/getoutputsamplesbyidop.py +20 -1
  44. cribl_control_plane/models/getpacksbyidop.py +20 -1
  45. cribl_control_plane/models/getpacksop.py +19 -1
  46. cribl_control_plane/models/getpipelinebyidop.py +20 -1
  47. cribl_control_plane/models/getroutesbyidop.py +20 -1
  48. cribl_control_plane/models/getsummaryop.py +19 -1
  49. cribl_control_plane/models/{countedlistbranchinfo.py → getversionbranchop.py} +6 -2
  50. cribl_control_plane/models/getversioncountop.py +19 -1
  51. cribl_control_plane/models/getversiondiffop.py +19 -1
  52. cribl_control_plane/models/getversionfilesop.py +19 -1
  53. cribl_control_plane/models/{countedlistgitinfo.py → getversioninfoop.py} +6 -2
  54. cribl_control_plane/models/getversionop.py +19 -1
  55. cribl_control_plane/models/getversionshowop.py +19 -1
  56. cribl_control_plane/models/getversionstatusop.py +19 -1
  57. cribl_control_plane/models/input.py +18 -15
  58. cribl_control_plane/models/inputcloudflarehec.py +513 -0
  59. cribl_control_plane/models/inputfile.py +7 -0
  60. cribl_control_plane/models/listconfiggroupbyproductop.py +19 -1
  61. cribl_control_plane/models/{countedlistinput.py → listinputop.py} +6 -2
  62. cribl_control_plane/models/listmasterworkerentryop.py +19 -1
  63. cribl_control_plane/models/{countedlistoutput.py → listoutputop.py} +6 -2
  64. cribl_control_plane/models/{countedlistpipeline.py → listpipelineop.py} +6 -2
  65. cribl_control_plane/models/{countedlistroutes.py → listroutesop.py} +6 -2
  66. cribl_control_plane/models/output.py +23 -17
  67. cribl_control_plane/models/outputazureblob.py +14 -0
  68. cribl_control_plane/models/outputazuredataexplorer.py +7 -0
  69. cribl_control_plane/models/outputchronicle.py +5 -0
  70. cribl_control_plane/models/outputcloudflarer2.py +632 -0
  71. cribl_control_plane/models/outputcribllake.py +14 -0
  72. cribl_control_plane/models/outputdatabricks.py +19 -0
  73. cribl_control_plane/models/outputdls3.py +14 -0
  74. cribl_control_plane/models/outputexabeam.py +7 -0
  75. cribl_control_plane/models/outputfilesystem.py +14 -0
  76. cribl_control_plane/models/outputgooglecloudstorage.py +14 -0
  77. cribl_control_plane/models/outputmicrosoftfabric.py +540 -0
  78. cribl_control_plane/models/outputminio.py +19 -4
  79. cribl_control_plane/models/outputs3.py +14 -0
  80. cribl_control_plane/models/outputsecuritylake.py +14 -0
  81. cribl_control_plane/models/outputsyslog.py +7 -0
  82. cribl_control_plane/models/runnablejobcollection.py +0 -8
  83. cribl_control_plane/models/runnablejobexecutor.py +0 -4
  84. cribl_control_plane/models/runnablejobscheduledsearch.py +0 -4
  85. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +19 -1
  86. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +20 -1
  87. cribl_control_plane/models/updatecribllakedatasetbylakeidandidop.py +20 -1
  88. cribl_control_plane/models/updateinputbyidop.py +19 -1
  89. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +20 -1
  90. cribl_control_plane/models/updateoutputbyidop.py +19 -1
  91. cribl_control_plane/models/updatepacksbyidop.py +20 -1
  92. cribl_control_plane/models/updatepipelinebyidop.py +19 -1
  93. cribl_control_plane/models/updateroutesbyidop.py +19 -1
  94. cribl_control_plane/nodes.py +12 -8
  95. cribl_control_plane/packs.py +20 -20
  96. cribl_control_plane/pipelines.py +20 -20
  97. cribl_control_plane/routes_sdk.py +20 -16
  98. cribl_control_plane/samples.py +8 -8
  99. cribl_control_plane/sources.py +20 -20
  100. cribl_control_plane/statuses.py +4 -4
  101. cribl_control_plane/summaries.py +4 -8
  102. cribl_control_plane/teams.py +4 -4
  103. cribl_control_plane/utils/retries.py +69 -5
  104. cribl_control_plane/utils/unmarshal_json_response.py +15 -1
  105. cribl_control_plane/versions_configs.py +4 -4
  106. {cribl_control_plane-0.2.1rc11.dist-info → cribl_control_plane-0.4.0a6.dist-info}/METADATA +6 -14
  107. {cribl_control_plane-0.2.1rc11.dist-info → cribl_control_plane-0.4.0a6.dist-info}/RECORD +109 -123
  108. cribl_control_plane-0.4.0a6.dist-info/licenses/LICENSE +201 -0
  109. cribl_control_plane/models/countedlistconfiggroup.py +0 -20
  110. cribl_control_plane/models/countedlistcribllakedataset.py +0 -20
  111. cribl_control_plane/models/countedlistdistributedsummary.py +0 -20
  112. cribl_control_plane/models/countedlistgitcommitsummary.py +0 -20
  113. cribl_control_plane/models/countedlistgitcountresult.py +0 -20
  114. cribl_control_plane/models/countedlistgitfilesresponse.py +0 -20
  115. cribl_control_plane/models/countedlistgitlogresult.py +0 -20
  116. cribl_control_plane/models/countedlistgitrevertresult.py +0 -20
  117. cribl_control_plane/models/countedlistgitstatusresult.py +0 -20
  118. cribl_control_plane/models/countedlistinputsplunkhec.py +0 -20
  119. cribl_control_plane/models/countedlistjobinfo.py +0 -20
  120. cribl_control_plane/models/countedlistmasterworkerentry.py +0 -20
  121. cribl_control_plane/models/countedlistnumber.py +0 -19
  122. cribl_control_plane/models/countedlistobject.py +0 -19
  123. cribl_control_plane/models/countedlistoutputsamplesresponse.py +0 -20
  124. cribl_control_plane/models/countedlistoutputtestresponse.py +0 -20
  125. cribl_control_plane/models/countedlistpackinfo.py +0 -20
  126. cribl_control_plane/models/countedlistteamaccesscontrollist.py +0 -20
  127. cribl_control_plane/models/countedlistuseraccesscontrollist.py +0 -20
  128. {cribl_control_plane-0.2.1rc11.dist-info → cribl_control_plane-0.4.0a6.dist-info}/WHEEL +0 -0
@@ -0,0 +1,540 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import models, utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic import field_serializer
10
+ from pydantic.functional_validators import PlainValidator
11
+ from typing import List, Optional
12
+ from typing_extensions import Annotated, NotRequired, TypedDict
13
+
14
+
15
+ class OutputMicrosoftFabricType(str, Enum):
16
+ MICROSOFT_FABRIC = "microsoft_fabric"
17
+
18
+
19
+ class OutputMicrosoftFabricAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
20
+ r"""Control the number of required acknowledgments"""
21
+
22
+ # Leader
23
+ ONE = 1
24
+ # None
25
+ ZERO = 0
26
+ # All
27
+ MINUS_1 = -1
28
+
29
+
30
+ class OutputMicrosoftFabricRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
31
+ r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
32
+
33
+ # JSON
34
+ JSON = "json"
35
+ # Field _raw
36
+ RAW = "raw"
37
+
38
+
39
+ class OutputMicrosoftFabricSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
40
+ # PLAIN
41
+ PLAIN = "plain"
42
+ # OAUTHBEARER
43
+ OAUTHBEARER = "oauthbearer"
44
+
45
+
46
+ class OutputMicrosoftFabricAuthenticationMethod(
47
+ str, Enum, metaclass=utils.OpenEnumMeta
48
+ ):
49
+ SECRET = "secret"
50
+ CERTIFICATE = "certificate"
51
+
52
+
53
+ class OutputMicrosoftFabricMicrosoftEntraIDAuthenticationEndpoint(
54
+ str, Enum, metaclass=utils.OpenEnumMeta
55
+ ):
56
+ r"""Endpoint used to acquire authentication tokens from Azure"""
57
+
58
+ HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
59
+ HTTPS_LOGIN_MICROSOFTONLINE_US = "https://login.microsoftonline.us"
60
+ HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
61
+
62
+
63
+ class OutputMicrosoftFabricAuthenticationTypedDict(TypedDict):
64
+ r"""Authentication parameters to use when connecting to bootstrap server. Using TLS is highly recommended."""
65
+
66
+ disabled: NotRequired[bool]
67
+ mechanism: NotRequired[OutputMicrosoftFabricSASLMechanism]
68
+ username: NotRequired[str]
69
+ r"""The username for authentication. This should always be $ConnectionString."""
70
+ text_secret: NotRequired[str]
71
+ r"""Select or create a stored text secret corresponding to the SASL JASS Password Primary or Password Secondary"""
72
+ client_secret_auth_type: NotRequired[OutputMicrosoftFabricAuthenticationMethod]
73
+ client_text_secret: NotRequired[str]
74
+ r"""Select or create a stored text secret"""
75
+ certificate_name: NotRequired[str]
76
+ r"""Select or create a stored certificate"""
77
+ cert_path: NotRequired[str]
78
+ priv_key_path: NotRequired[str]
79
+ passphrase: NotRequired[str]
80
+ oauth_endpoint: NotRequired[
81
+ OutputMicrosoftFabricMicrosoftEntraIDAuthenticationEndpoint
82
+ ]
83
+ r"""Endpoint used to acquire authentication tokens from Azure"""
84
+ client_id: NotRequired[str]
85
+ r"""client_id to pass in the OAuth request parameter"""
86
+ tenant_id: NotRequired[str]
87
+ r"""Directory ID (tenant identifier) in Azure Active Directory"""
88
+ scope: NotRequired[str]
89
+ r"""Scope to pass in the OAuth request parameter"""
90
+
91
+
92
+ class OutputMicrosoftFabricAuthentication(BaseModel):
93
+ r"""Authentication parameters to use when connecting to bootstrap server. Using TLS is highly recommended."""
94
+
95
+ disabled: Optional[bool] = False
96
+
97
+ mechanism: Annotated[
98
+ Optional[OutputMicrosoftFabricSASLMechanism],
99
+ PlainValidator(validate_open_enum(False)),
100
+ ] = OutputMicrosoftFabricSASLMechanism.PLAIN
101
+
102
+ username: Optional[str] = "$ConnectionString"
103
+ r"""The username for authentication. This should always be $ConnectionString."""
104
+
105
+ text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
106
+ r"""Select or create a stored text secret corresponding to the SASL JASS Password Primary or Password Secondary"""
107
+
108
+ client_secret_auth_type: Annotated[
109
+ Annotated[
110
+ Optional[OutputMicrosoftFabricAuthenticationMethod],
111
+ PlainValidator(validate_open_enum(False)),
112
+ ],
113
+ pydantic.Field(alias="clientSecretAuthType"),
114
+ ] = OutputMicrosoftFabricAuthenticationMethod.SECRET
115
+
116
+ client_text_secret: Annotated[
117
+ Optional[str], pydantic.Field(alias="clientTextSecret")
118
+ ] = None
119
+ r"""Select or create a stored text secret"""
120
+
121
+ certificate_name: Annotated[
122
+ Optional[str], pydantic.Field(alias="certificateName")
123
+ ] = None
124
+ r"""Select or create a stored certificate"""
125
+
126
+ cert_path: Annotated[Optional[str], pydantic.Field(alias="certPath")] = None
127
+
128
+ priv_key_path: Annotated[Optional[str], pydantic.Field(alias="privKeyPath")] = None
129
+
130
+ passphrase: Optional[str] = None
131
+
132
+ oauth_endpoint: Annotated[
133
+ Annotated[
134
+ Optional[OutputMicrosoftFabricMicrosoftEntraIDAuthenticationEndpoint],
135
+ PlainValidator(validate_open_enum(False)),
136
+ ],
137
+ pydantic.Field(alias="oauthEndpoint"),
138
+ ] = OutputMicrosoftFabricMicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
139
+ r"""Endpoint used to acquire authentication tokens from Azure"""
140
+
141
+ client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
142
+ r"""client_id to pass in the OAuth request parameter"""
143
+
144
+ tenant_id: Annotated[Optional[str], pydantic.Field(alias="tenantId")] = None
145
+ r"""Directory ID (tenant identifier) in Azure Active Directory"""
146
+
147
+ scope: Optional[str] = None
148
+ r"""Scope to pass in the OAuth request parameter"""
149
+
150
+ @field_serializer("mechanism")
151
+ def serialize_mechanism(self, value):
152
+ if isinstance(value, str):
153
+ try:
154
+ return models.OutputMicrosoftFabricSASLMechanism(value)
155
+ except ValueError:
156
+ return value
157
+ return value
158
+
159
+ @field_serializer("client_secret_auth_type")
160
+ def serialize_client_secret_auth_type(self, value):
161
+ if isinstance(value, str):
162
+ try:
163
+ return models.OutputMicrosoftFabricAuthenticationMethod(value)
164
+ except ValueError:
165
+ return value
166
+ return value
167
+
168
+ @field_serializer("oauth_endpoint")
169
+ def serialize_oauth_endpoint(self, value):
170
+ if isinstance(value, str):
171
+ try:
172
+ return (
173
+ models.OutputMicrosoftFabricMicrosoftEntraIDAuthenticationEndpoint(
174
+ value
175
+ )
176
+ )
177
+ except ValueError:
178
+ return value
179
+ return value
180
+
181
+
182
+ class OutputMicrosoftFabricTLSSettingsClientSideTypedDict(TypedDict):
183
+ disabled: NotRequired[bool]
184
+ reject_unauthorized: NotRequired[bool]
185
+ r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
186
+
187
+
188
+ class OutputMicrosoftFabricTLSSettingsClientSide(BaseModel):
189
+ disabled: Optional[bool] = False
190
+
191
+ reject_unauthorized: Annotated[
192
+ Optional[bool], pydantic.Field(alias="rejectUnauthorized")
193
+ ] = True
194
+ r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
195
+
196
+
197
+ class OutputMicrosoftFabricBackpressureBehavior(
198
+ str, Enum, metaclass=utils.OpenEnumMeta
199
+ ):
200
+ r"""How to handle events when all receivers are exerting backpressure"""
201
+
202
+ # Block
203
+ BLOCK = "block"
204
+ # Drop
205
+ DROP = "drop"
206
+ # Persistent Queue
207
+ QUEUE = "queue"
208
+
209
+
210
+ class OutputMicrosoftFabricMode(str, Enum, metaclass=utils.OpenEnumMeta):
211
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
212
+
213
+ # Error
214
+ ERROR = "error"
215
+ # Backpressure
216
+ ALWAYS = "always"
217
+ # Always On
218
+ BACKPRESSURE = "backpressure"
219
+
220
+
221
+ class OutputMicrosoftFabricCompression(str, Enum, metaclass=utils.OpenEnumMeta):
222
+ r"""Codec to use to compress the persisted data"""
223
+
224
+ # None
225
+ NONE = "none"
226
+ # Gzip
227
+ GZIP = "gzip"
228
+
229
+
230
+ class OutputMicrosoftFabricQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
231
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
232
+
233
+ # Block
234
+ BLOCK = "block"
235
+ # Drop new data
236
+ DROP = "drop"
237
+
238
+
239
+ class OutputMicrosoftFabricPqControlsTypedDict(TypedDict):
240
+ pass
241
+
242
+
243
+ class OutputMicrosoftFabricPqControls(BaseModel):
244
+ pass
245
+
246
+
247
+ class OutputMicrosoftFabricTypedDict(TypedDict):
248
+ type: OutputMicrosoftFabricType
249
+ topic: str
250
+ r"""Topic name from Fabric Eventstream's endpoint"""
251
+ bootstrap_server: str
252
+ r"""Bootstrap server from Fabric Eventstream's endpoint"""
253
+ id: NotRequired[str]
254
+ r"""Unique ID for this output"""
255
+ pipeline: NotRequired[str]
256
+ r"""Pipeline to process data before sending out to this output"""
257
+ system_fields: NotRequired[List[str]]
258
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
259
+ environment: NotRequired[str]
260
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
261
+ streamtags: NotRequired[List[str]]
262
+ r"""Tags for filtering and grouping in @{product}"""
263
+ ack: NotRequired[OutputMicrosoftFabricAcknowledgments]
264
+ r"""Control the number of required acknowledgments"""
265
+ format_: NotRequired[OutputMicrosoftFabricRecordDataFormat]
266
+ r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
267
+ max_record_size_kb: NotRequired[float]
268
+ r"""Maximum size of each record batch before compression. Setting should be < message.max.bytes settings in Event Hubs brokers."""
269
+ flush_event_count: NotRequired[float]
270
+ r"""Maximum number of events in a batch before forcing a flush"""
271
+ flush_period_sec: NotRequired[float]
272
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
273
+ connection_timeout: NotRequired[float]
274
+ r"""Maximum time to wait for a connection to complete successfully"""
275
+ request_timeout: NotRequired[float]
276
+ r"""Maximum time to wait for Kafka to respond to a request"""
277
+ max_retries: NotRequired[float]
278
+ r"""If messages are failing, you can set the maximum number of retries as high as 100 to prevent loss of data"""
279
+ max_back_off: NotRequired[float]
280
+ r"""The maximum wait time for a retry, in milliseconds. Default (and minimum) is 30,000 ms (30 seconds); maximum is 180,000 ms (180 seconds)."""
281
+ initial_backoff: NotRequired[float]
282
+ r"""Initial value used to calculate the retry, in milliseconds. Maximum is 600,000 ms (10 minutes)."""
283
+ backoff_rate: NotRequired[float]
284
+ r"""Set the backoff multiplier (2-20) to control the retry frequency for failed messages. For faster retries, use a lower multiplier. For slower retries with more delay between attempts, use a higher multiplier. The multiplier is used in an exponential backoff formula; see the Kafka [documentation](https://kafka.js.org/docs/retry-detailed) for details."""
285
+ authentication_timeout: NotRequired[float]
286
+ r"""Maximum time to wait for Kafka to respond to an authentication request"""
287
+ reauthentication_threshold: NotRequired[float]
288
+ r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
289
+ sasl: NotRequired[OutputMicrosoftFabricAuthenticationTypedDict]
290
+ r"""Authentication parameters to use when connecting to bootstrap server. Using TLS is highly recommended."""
291
+ tls: NotRequired[OutputMicrosoftFabricTLSSettingsClientSideTypedDict]
292
+ on_backpressure: NotRequired[OutputMicrosoftFabricBackpressureBehavior]
293
+ r"""How to handle events when all receivers are exerting backpressure"""
294
+ description: NotRequired[str]
295
+ pq_strict_ordering: NotRequired[bool]
296
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
297
+ pq_rate_per_sec: NotRequired[float]
298
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
299
+ pq_mode: NotRequired[OutputMicrosoftFabricMode]
300
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
301
+ pq_max_buffer_size: NotRequired[float]
302
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
303
+ pq_max_backpressure_sec: NotRequired[float]
304
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
305
+ pq_max_file_size: NotRequired[str]
306
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
307
+ pq_max_size: NotRequired[str]
308
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
309
+ pq_path: NotRequired[str]
310
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
311
+ pq_compress: NotRequired[OutputMicrosoftFabricCompression]
312
+ r"""Codec to use to compress the persisted data"""
313
+ pq_on_backpressure: NotRequired[OutputMicrosoftFabricQueueFullBehavior]
314
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
315
+ pq_controls: NotRequired[OutputMicrosoftFabricPqControlsTypedDict]
316
+
317
+
318
+ class OutputMicrosoftFabric(BaseModel):
319
+ type: OutputMicrosoftFabricType
320
+
321
+ topic: str
322
+ r"""Topic name from Fabric Eventstream's endpoint"""
323
+
324
+ bootstrap_server: str
325
+ r"""Bootstrap server from Fabric Eventstream's endpoint"""
326
+
327
+ id: Optional[str] = None
328
+ r"""Unique ID for this output"""
329
+
330
+ pipeline: Optional[str] = None
331
+ r"""Pipeline to process data before sending out to this output"""
332
+
333
+ system_fields: Annotated[
334
+ Optional[List[str]], pydantic.Field(alias="systemFields")
335
+ ] = None
336
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
337
+
338
+ environment: Optional[str] = None
339
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
340
+
341
+ streamtags: Optional[List[str]] = None
342
+ r"""Tags for filtering and grouping in @{product}"""
343
+
344
+ ack: Annotated[
345
+ Optional[OutputMicrosoftFabricAcknowledgments],
346
+ PlainValidator(validate_open_enum(True)),
347
+ ] = OutputMicrosoftFabricAcknowledgments.ONE
348
+ r"""Control the number of required acknowledgments"""
349
+
350
+ format_: Annotated[
351
+ Annotated[
352
+ Optional[OutputMicrosoftFabricRecordDataFormat],
353
+ PlainValidator(validate_open_enum(False)),
354
+ ],
355
+ pydantic.Field(alias="format"),
356
+ ] = OutputMicrosoftFabricRecordDataFormat.JSON
357
+ r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
358
+
359
+ max_record_size_kb: Annotated[
360
+ Optional[float], pydantic.Field(alias="maxRecordSizeKB")
361
+ ] = 768
362
+ r"""Maximum size of each record batch before compression. Setting should be < message.max.bytes settings in Event Hubs brokers."""
363
+
364
+ flush_event_count: Annotated[
365
+ Optional[float], pydantic.Field(alias="flushEventCount")
366
+ ] = 1000
367
+ r"""Maximum number of events in a batch before forcing a flush"""
368
+
369
+ flush_period_sec: Annotated[
370
+ Optional[float], pydantic.Field(alias="flushPeriodSec")
371
+ ] = 1
372
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
373
+
374
+ connection_timeout: Annotated[
375
+ Optional[float], pydantic.Field(alias="connectionTimeout")
376
+ ] = 10000
377
+ r"""Maximum time to wait for a connection to complete successfully"""
378
+
379
+ request_timeout: Annotated[
380
+ Optional[float], pydantic.Field(alias="requestTimeout")
381
+ ] = 60000
382
+ r"""Maximum time to wait for Kafka to respond to a request"""
383
+
384
+ max_retries: Annotated[Optional[float], pydantic.Field(alias="maxRetries")] = 5
385
+ r"""If messages are failing, you can set the maximum number of retries as high as 100 to prevent loss of data"""
386
+
387
+ max_back_off: Annotated[Optional[float], pydantic.Field(alias="maxBackOff")] = 30000
388
+ r"""The maximum wait time for a retry, in milliseconds. Default (and minimum) is 30,000 ms (30 seconds); maximum is 180,000 ms (180 seconds)."""
389
+
390
+ initial_backoff: Annotated[
391
+ Optional[float], pydantic.Field(alias="initialBackoff")
392
+ ] = 300
393
+ r"""Initial value used to calculate the retry, in milliseconds. Maximum is 600,000 ms (10 minutes)."""
394
+
395
+ backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
396
+ r"""Set the backoff multiplier (2-20) to control the retry frequency for failed messages. For faster retries, use a lower multiplier. For slower retries with more delay between attempts, use a higher multiplier. The multiplier is used in an exponential backoff formula; see the Kafka [documentation](https://kafka.js.org/docs/retry-detailed) for details."""
397
+
398
+ authentication_timeout: Annotated[
399
+ Optional[float], pydantic.Field(alias="authenticationTimeout")
400
+ ] = 10000
401
+ r"""Maximum time to wait for Kafka to respond to an authentication request"""
402
+
403
+ reauthentication_threshold: Annotated[
404
+ Optional[float], pydantic.Field(alias="reauthenticationThreshold")
405
+ ] = 10000
406
+ r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
407
+
408
+ sasl: Optional[OutputMicrosoftFabricAuthentication] = None
409
+ r"""Authentication parameters to use when connecting to bootstrap server. Using TLS is highly recommended."""
410
+
411
+ tls: Optional[OutputMicrosoftFabricTLSSettingsClientSide] = None
412
+
413
+ on_backpressure: Annotated[
414
+ Annotated[
415
+ Optional[OutputMicrosoftFabricBackpressureBehavior],
416
+ PlainValidator(validate_open_enum(False)),
417
+ ],
418
+ pydantic.Field(alias="onBackpressure"),
419
+ ] = OutputMicrosoftFabricBackpressureBehavior.BLOCK
420
+ r"""How to handle events when all receivers are exerting backpressure"""
421
+
422
+ description: Optional[str] = None
423
+
424
+ pq_strict_ordering: Annotated[
425
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
426
+ ] = True
427
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
428
+
429
+ pq_rate_per_sec: Annotated[
430
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
431
+ ] = 0
432
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
433
+
434
+ pq_mode: Annotated[
435
+ Annotated[
436
+ Optional[OutputMicrosoftFabricMode],
437
+ PlainValidator(validate_open_enum(False)),
438
+ ],
439
+ pydantic.Field(alias="pqMode"),
440
+ ] = OutputMicrosoftFabricMode.ERROR
441
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
442
+
443
+ pq_max_buffer_size: Annotated[
444
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
445
+ ] = 42
446
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
447
+
448
+ pq_max_backpressure_sec: Annotated[
449
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
450
+ ] = 30
451
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
452
+
453
+ pq_max_file_size: Annotated[
454
+ Optional[str], pydantic.Field(alias="pqMaxFileSize")
455
+ ] = "1 MB"
456
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
457
+
458
+ pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
459
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
460
+
461
+ pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
462
+ "$CRIBL_HOME/state/queues"
463
+ )
464
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
465
+
466
+ pq_compress: Annotated[
467
+ Annotated[
468
+ Optional[OutputMicrosoftFabricCompression],
469
+ PlainValidator(validate_open_enum(False)),
470
+ ],
471
+ pydantic.Field(alias="pqCompress"),
472
+ ] = OutputMicrosoftFabricCompression.NONE
473
+ r"""Codec to use to compress the persisted data"""
474
+
475
+ pq_on_backpressure: Annotated[
476
+ Annotated[
477
+ Optional[OutputMicrosoftFabricQueueFullBehavior],
478
+ PlainValidator(validate_open_enum(False)),
479
+ ],
480
+ pydantic.Field(alias="pqOnBackpressure"),
481
+ ] = OutputMicrosoftFabricQueueFullBehavior.BLOCK
482
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
483
+
484
+ pq_controls: Annotated[
485
+ Optional[OutputMicrosoftFabricPqControls], pydantic.Field(alias="pqControls")
486
+ ] = None
487
+
488
+ @field_serializer("ack")
489
+ def serialize_ack(self, value):
490
+ if isinstance(value, str):
491
+ try:
492
+ return models.OutputMicrosoftFabricAcknowledgments(value)
493
+ except ValueError:
494
+ return value
495
+ return value
496
+
497
+ @field_serializer("format_")
498
+ def serialize_format_(self, value):
499
+ if isinstance(value, str):
500
+ try:
501
+ return models.OutputMicrosoftFabricRecordDataFormat(value)
502
+ except ValueError:
503
+ return value
504
+ return value
505
+
506
+ @field_serializer("on_backpressure")
507
+ def serialize_on_backpressure(self, value):
508
+ if isinstance(value, str):
509
+ try:
510
+ return models.OutputMicrosoftFabricBackpressureBehavior(value)
511
+ except ValueError:
512
+ return value
513
+ return value
514
+
515
+ @field_serializer("pq_mode")
516
+ def serialize_pq_mode(self, value):
517
+ if isinstance(value, str):
518
+ try:
519
+ return models.OutputMicrosoftFabricMode(value)
520
+ except ValueError:
521
+ return value
522
+ return value
523
+
524
+ @field_serializer("pq_compress")
525
+ def serialize_pq_compress(self, value):
526
+ if isinstance(value, str):
527
+ try:
528
+ return models.OutputMicrosoftFabricCompression(value)
529
+ except ValueError:
530
+ return value
531
+ return value
532
+
533
+ @field_serializer("pq_on_backpressure")
534
+ def serialize_pq_on_backpressure(self, value):
535
+ if isinstance(value, str):
536
+ try:
537
+ return models.OutputMicrosoftFabricQueueFullBehavior(value)
538
+ except ValueError:
539
+ return value
540
+ return value
@@ -62,7 +62,7 @@ class OutputMinioStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
62
62
  REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
63
63
 
64
64
 
65
- class ServerSideEncryption(str, Enum, metaclass=utils.OpenEnumMeta):
65
+ class OutputMinioServerSideEncryption(str, Enum, metaclass=utils.OpenEnumMeta):
66
66
  r"""Server-side encryption for uploaded objects"""
67
67
 
68
68
  # Amazon S3 Managed Key
@@ -181,7 +181,7 @@ class OutputMinioTypedDict(TypedDict):
181
181
  r"""Object ACL to assign to uploaded objects"""
182
182
  storage_class: NotRequired[OutputMinioStorageClass]
183
183
  r"""Storage class to select for uploaded objects"""
184
- server_side_encryption: NotRequired[ServerSideEncryption]
184
+ server_side_encryption: NotRequired[OutputMinioServerSideEncryption]
185
185
  r"""Server-side encryption for uploaded objects"""
186
186
  reuse_connections: NotRequired[bool]
187
187
  r"""Reuse connections between requests, which can improve performance"""
@@ -213,6 +213,8 @@ class OutputMinioTypedDict(TypedDict):
213
213
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
214
214
  on_disk_full_backpressure: NotRequired[OutputMinioDiskSpaceProtection]
215
215
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
216
+ force_close_on_shutdown: NotRequired[bool]
217
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
216
218
  max_file_open_time_sec: NotRequired[float]
217
219
  r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
218
220
  max_file_idle_time_sec: NotRequired[float]
@@ -252,6 +254,8 @@ class OutputMinioTypedDict(TypedDict):
252
254
  r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
253
255
  empty_dir_cleanup_sec: NotRequired[float]
254
256
  r"""How frequently, in seconds, to clean up empty directories"""
257
+ directory_batch_size: NotRequired[float]
258
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
255
259
  deadletter_path: NotRequired[str]
256
260
  r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
257
261
  max_retry_num: NotRequired[float]
@@ -341,7 +345,8 @@ class OutputMinio(BaseModel):
341
345
 
342
346
  server_side_encryption: Annotated[
343
347
  Annotated[
344
- Optional[ServerSideEncryption], PlainValidator(validate_open_enum(False))
348
+ Optional[OutputMinioServerSideEncryption],
349
+ PlainValidator(validate_open_enum(False)),
345
350
  ],
346
351
  pydantic.Field(alias="serverSideEncryption"),
347
352
  ] = None
@@ -431,6 +436,11 @@ class OutputMinio(BaseModel):
431
436
  ] = OutputMinioDiskSpaceProtection.BLOCK
432
437
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
433
438
 
439
+ force_close_on_shutdown: Annotated[
440
+ Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
441
+ ] = False
442
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
443
+
434
444
  max_file_open_time_sec: Annotated[
435
445
  Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
436
446
  ] = 300
@@ -537,6 +547,11 @@ class OutputMinio(BaseModel):
537
547
  ] = 300
538
548
  r"""How frequently, in seconds, to clean up empty directories"""
539
549
 
550
+ directory_batch_size: Annotated[
551
+ Optional[float], pydantic.Field(alias="directoryBatchSize")
552
+ ] = 1000
553
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
554
+
540
555
  deadletter_path: Annotated[
541
556
  Optional[str], pydantic.Field(alias="deadletterPath")
542
557
  ] = "$CRIBL_HOME/state/outputs/dead-letter"
@@ -585,7 +600,7 @@ class OutputMinio(BaseModel):
585
600
  def serialize_server_side_encryption(self, value):
586
601
  if isinstance(value, str):
587
602
  try:
588
- return models.ServerSideEncryption(value)
603
+ return models.OutputMinioServerSideEncryption(value)
589
604
  except ValueError:
590
605
  return value
591
606
  return value
@@ -234,6 +234,8 @@ class OutputS3TypedDict(TypedDict):
234
234
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
235
235
  on_disk_full_backpressure: NotRequired[OutputS3DiskSpaceProtection]
236
236
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
237
+ force_close_on_shutdown: NotRequired[bool]
238
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
237
239
  max_file_open_time_sec: NotRequired[float]
238
240
  r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
239
241
  max_file_idle_time_sec: NotRequired[float]
@@ -277,6 +279,8 @@ class OutputS3TypedDict(TypedDict):
277
279
  r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
278
280
  empty_dir_cleanup_sec: NotRequired[float]
279
281
  r"""How frequently, in seconds, to clean up empty directories"""
282
+ directory_batch_size: NotRequired[float]
283
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
280
284
  deadletter_path: NotRequired[str]
281
285
  r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
282
286
  max_retry_num: NotRequired[float]
@@ -474,6 +478,11 @@ class OutputS3(BaseModel):
474
478
  ] = OutputS3DiskSpaceProtection.BLOCK
475
479
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
476
480
 
481
+ force_close_on_shutdown: Annotated[
482
+ Optional[bool], pydantic.Field(alias="forceCloseOnShutdown")
483
+ ] = False
484
+ r"""Force all staged files to close during an orderly Node shutdown. This triggers immediate upload of in-progress data — regardless of idle time, file age, or size thresholds — to minimize data loss."""
485
+
477
486
  max_file_open_time_sec: Annotated[
478
487
  Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
479
488
  ] = 300
@@ -588,6 +597,11 @@ class OutputS3(BaseModel):
588
597
  ] = 300
589
598
  r"""How frequently, in seconds, to clean up empty directories"""
590
599
 
600
+ directory_batch_size: Annotated[
601
+ Optional[float], pydantic.Field(alias="directoryBatchSize")
602
+ ] = 1000
603
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
604
+
591
605
  deadletter_path: Annotated[
592
606
  Optional[str], pydantic.Field(alias="deadletterPath")
593
607
  ] = "$CRIBL_HOME/state/outputs/dead-letter"