cribl-control-plane 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (197) hide show
  1. cribl_control_plane/__init__.py +17 -0
  2. cribl_control_plane/_hooks/__init__.py +5 -0
  3. cribl_control_plane/_hooks/clientcredentials.py +211 -0
  4. cribl_control_plane/_hooks/registration.py +13 -0
  5. cribl_control_plane/_hooks/sdkhooks.py +81 -0
  6. cribl_control_plane/_hooks/types.py +112 -0
  7. cribl_control_plane/_version.py +15 -0
  8. cribl_control_plane/auth_sdk.py +184 -0
  9. cribl_control_plane/basesdk.py +358 -0
  10. cribl_control_plane/errors/__init__.py +60 -0
  11. cribl_control_plane/errors/apierror.py +38 -0
  12. cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
  13. cribl_control_plane/errors/error.py +24 -0
  14. cribl_control_plane/errors/healthstatus_error.py +38 -0
  15. cribl_control_plane/errors/no_response_error.py +13 -0
  16. cribl_control_plane/errors/responsevalidationerror.py +25 -0
  17. cribl_control_plane/health.py +166 -0
  18. cribl_control_plane/httpclient.py +126 -0
  19. cribl_control_plane/models/__init__.py +7305 -0
  20. cribl_control_plane/models/addhectokenrequest.py +34 -0
  21. cribl_control_plane/models/authtoken.py +13 -0
  22. cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
  23. cribl_control_plane/models/createinputop.py +24 -0
  24. cribl_control_plane/models/createoutputop.py +24 -0
  25. cribl_control_plane/models/createoutputtestbyidop.py +46 -0
  26. cribl_control_plane/models/criblevent.py +14 -0
  27. cribl_control_plane/models/deleteinputbyidop.py +37 -0
  28. cribl_control_plane/models/deleteoutputbyidop.py +37 -0
  29. cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
  30. cribl_control_plane/models/getinputbyidop.py +37 -0
  31. cribl_control_plane/models/getoutputbyidop.py +37 -0
  32. cribl_control_plane/models/getoutputpqbyidop.py +36 -0
  33. cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
  34. cribl_control_plane/models/healthstatus.py +36 -0
  35. cribl_control_plane/models/input.py +199 -0
  36. cribl_control_plane/models/inputappscope.py +448 -0
  37. cribl_control_plane/models/inputazureblob.py +308 -0
  38. cribl_control_plane/models/inputcollection.py +208 -0
  39. cribl_control_plane/models/inputconfluentcloud.py +585 -0
  40. cribl_control_plane/models/inputcribl.py +165 -0
  41. cribl_control_plane/models/inputcriblhttp.py +341 -0
  42. cribl_control_plane/models/inputcribllakehttp.py +342 -0
  43. cribl_control_plane/models/inputcriblmetrics.py +175 -0
  44. cribl_control_plane/models/inputcribltcp.py +299 -0
  45. cribl_control_plane/models/inputcrowdstrike.py +410 -0
  46. cribl_control_plane/models/inputdatadogagent.py +364 -0
  47. cribl_control_plane/models/inputdatagen.py +180 -0
  48. cribl_control_plane/models/inputedgeprometheus.py +551 -0
  49. cribl_control_plane/models/inputelastic.py +494 -0
  50. cribl_control_plane/models/inputeventhub.py +360 -0
  51. cribl_control_plane/models/inputexec.py +213 -0
  52. cribl_control_plane/models/inputfile.py +259 -0
  53. cribl_control_plane/models/inputfirehose.py +341 -0
  54. cribl_control_plane/models/inputgooglepubsub.py +247 -0
  55. cribl_control_plane/models/inputgrafana_union.py +1247 -0
  56. cribl_control_plane/models/inputhttp.py +403 -0
  57. cribl_control_plane/models/inputhttpraw.py +407 -0
  58. cribl_control_plane/models/inputjournalfiles.py +208 -0
  59. cribl_control_plane/models/inputkafka.py +581 -0
  60. cribl_control_plane/models/inputkinesis.py +363 -0
  61. cribl_control_plane/models/inputkubeevents.py +182 -0
  62. cribl_control_plane/models/inputkubelogs.py +256 -0
  63. cribl_control_plane/models/inputkubemetrics.py +233 -0
  64. cribl_control_plane/models/inputloki.py +468 -0
  65. cribl_control_plane/models/inputmetrics.py +290 -0
  66. cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
  67. cribl_control_plane/models/inputmsk.py +654 -0
  68. cribl_control_plane/models/inputnetflow.py +224 -0
  69. cribl_control_plane/models/inputoffice365mgmt.py +384 -0
  70. cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
  71. cribl_control_plane/models/inputoffice365service.py +377 -0
  72. cribl_control_plane/models/inputopentelemetry.py +516 -0
  73. cribl_control_plane/models/inputprometheus.py +464 -0
  74. cribl_control_plane/models/inputprometheusrw.py +470 -0
  75. cribl_control_plane/models/inputrawudp.py +207 -0
  76. cribl_control_plane/models/inputs3.py +416 -0
  77. cribl_control_plane/models/inputs3inventory.py +440 -0
  78. cribl_control_plane/models/inputsecuritylake.py +425 -0
  79. cribl_control_plane/models/inputsnmp.py +274 -0
  80. cribl_control_plane/models/inputsplunk.py +387 -0
  81. cribl_control_plane/models/inputsplunkhec.py +478 -0
  82. cribl_control_plane/models/inputsplunksearch.py +537 -0
  83. cribl_control_plane/models/inputsqs.py +320 -0
  84. cribl_control_plane/models/inputsyslog_union.py +759 -0
  85. cribl_control_plane/models/inputsystemmetrics.py +533 -0
  86. cribl_control_plane/models/inputsystemstate.py +417 -0
  87. cribl_control_plane/models/inputtcp.py +359 -0
  88. cribl_control_plane/models/inputtcpjson.py +334 -0
  89. cribl_control_plane/models/inputwef.py +498 -0
  90. cribl_control_plane/models/inputwindowsmetrics.py +457 -0
  91. cribl_control_plane/models/inputwineventlogs.py +222 -0
  92. cribl_control_plane/models/inputwiz.py +334 -0
  93. cribl_control_plane/models/inputzscalerhec.py +439 -0
  94. cribl_control_plane/models/listinputop.py +24 -0
  95. cribl_control_plane/models/listoutputop.py +24 -0
  96. cribl_control_plane/models/logininfo.py +16 -0
  97. cribl_control_plane/models/output.py +229 -0
  98. cribl_control_plane/models/outputazureblob.py +471 -0
  99. cribl_control_plane/models/outputazuredataexplorer.py +660 -0
  100. cribl_control_plane/models/outputazureeventhub.py +321 -0
  101. cribl_control_plane/models/outputazurelogs.py +386 -0
  102. cribl_control_plane/models/outputclickhouse.py +650 -0
  103. cribl_control_plane/models/outputcloudwatch.py +273 -0
  104. cribl_control_plane/models/outputconfluentcloud.py +591 -0
  105. cribl_control_plane/models/outputcriblhttp.py +494 -0
  106. cribl_control_plane/models/outputcribllake.py +396 -0
  107. cribl_control_plane/models/outputcribltcp.py +387 -0
  108. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
  109. cribl_control_plane/models/outputdatadog.py +472 -0
  110. cribl_control_plane/models/outputdataset.py +437 -0
  111. cribl_control_plane/models/outputdefault.py +55 -0
  112. cribl_control_plane/models/outputdevnull.py +50 -0
  113. cribl_control_plane/models/outputdiskspool.py +89 -0
  114. cribl_control_plane/models/outputdls3.py +560 -0
  115. cribl_control_plane/models/outputdynatracehttp.py +454 -0
  116. cribl_control_plane/models/outputdynatraceotlp.py +486 -0
  117. cribl_control_plane/models/outputelastic.py +494 -0
  118. cribl_control_plane/models/outputelasticcloud.py +407 -0
  119. cribl_control_plane/models/outputexabeam.py +297 -0
  120. cribl_control_plane/models/outputfilesystem.py +357 -0
  121. cribl_control_plane/models/outputgooglechronicle.py +486 -0
  122. cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
  123. cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
  124. cribl_control_plane/models/outputgooglepubsub.py +274 -0
  125. cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
  126. cribl_control_plane/models/outputgraphite.py +225 -0
  127. cribl_control_plane/models/outputhoneycomb.py +369 -0
  128. cribl_control_plane/models/outputhumiohec.py +389 -0
  129. cribl_control_plane/models/outputinfluxdb.py +523 -0
  130. cribl_control_plane/models/outputkafka.py +581 -0
  131. cribl_control_plane/models/outputkinesis.py +312 -0
  132. cribl_control_plane/models/outputloki.py +425 -0
  133. cribl_control_plane/models/outputminio.py +512 -0
  134. cribl_control_plane/models/outputmsk.py +654 -0
  135. cribl_control_plane/models/outputnetflow.py +80 -0
  136. cribl_control_plane/models/outputnewrelic.py +424 -0
  137. cribl_control_plane/models/outputnewrelicevents.py +401 -0
  138. cribl_control_plane/models/outputopentelemetry.py +669 -0
  139. cribl_control_plane/models/outputprometheus.py +485 -0
  140. cribl_control_plane/models/outputring.py +121 -0
  141. cribl_control_plane/models/outputrouter.py +83 -0
  142. cribl_control_plane/models/outputs3.py +556 -0
  143. cribl_control_plane/models/outputsamplesresponse.py +14 -0
  144. cribl_control_plane/models/outputsecuritylake.py +505 -0
  145. cribl_control_plane/models/outputsentinel.py +488 -0
  146. cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
  147. cribl_control_plane/models/outputservicenow.py +543 -0
  148. cribl_control_plane/models/outputsignalfx.py +369 -0
  149. cribl_control_plane/models/outputsnmp.py +80 -0
  150. cribl_control_plane/models/outputsns.py +274 -0
  151. cribl_control_plane/models/outputsplunk.py +383 -0
  152. cribl_control_plane/models/outputsplunkhec.py +434 -0
  153. cribl_control_plane/models/outputsplunklb.py +558 -0
  154. cribl_control_plane/models/outputsqs.py +328 -0
  155. cribl_control_plane/models/outputstatsd.py +224 -0
  156. cribl_control_plane/models/outputstatsdext.py +225 -0
  157. cribl_control_plane/models/outputsumologic.py +378 -0
  158. cribl_control_plane/models/outputsyslog.py +415 -0
  159. cribl_control_plane/models/outputtcpjson.py +413 -0
  160. cribl_control_plane/models/outputtestrequest.py +15 -0
  161. cribl_control_plane/models/outputtestresponse.py +29 -0
  162. cribl_control_plane/models/outputwavefront.py +369 -0
  163. cribl_control_plane/models/outputwebhook.py +689 -0
  164. cribl_control_plane/models/outputxsiam.py +415 -0
  165. cribl_control_plane/models/schemeclientoauth.py +24 -0
  166. cribl_control_plane/models/security.py +36 -0
  167. cribl_control_plane/models/updatehectokenrequest.py +31 -0
  168. cribl_control_plane/models/updateinputbyidop.py +44 -0
  169. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
  170. cribl_control_plane/models/updateoutputbyidop.py +44 -0
  171. cribl_control_plane/outputs.py +1615 -0
  172. cribl_control_plane/py.typed +1 -0
  173. cribl_control_plane/sdk.py +164 -0
  174. cribl_control_plane/sdkconfiguration.py +36 -0
  175. cribl_control_plane/sources.py +1355 -0
  176. cribl_control_plane/types/__init__.py +21 -0
  177. cribl_control_plane/types/basemodel.py +39 -0
  178. cribl_control_plane/utils/__init__.py +187 -0
  179. cribl_control_plane/utils/annotations.py +55 -0
  180. cribl_control_plane/utils/datetimes.py +23 -0
  181. cribl_control_plane/utils/enums.py +74 -0
  182. cribl_control_plane/utils/eventstreaming.py +238 -0
  183. cribl_control_plane/utils/forms.py +223 -0
  184. cribl_control_plane/utils/headers.py +136 -0
  185. cribl_control_plane/utils/logger.py +27 -0
  186. cribl_control_plane/utils/metadata.py +118 -0
  187. cribl_control_plane/utils/queryparams.py +205 -0
  188. cribl_control_plane/utils/requestbodies.py +66 -0
  189. cribl_control_plane/utils/retries.py +217 -0
  190. cribl_control_plane/utils/security.py +207 -0
  191. cribl_control_plane/utils/serializers.py +249 -0
  192. cribl_control_plane/utils/unmarshal_json_response.py +24 -0
  193. cribl_control_plane/utils/url.py +155 -0
  194. cribl_control_plane/utils/values.py +137 -0
  195. cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
  196. cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
  197. cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
@@ -0,0 +1,360 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class InputEventhubType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ EVENTHUB = "eventhub"
16
+
17
+
18
+ class InputEventhubConnectionTypedDict(TypedDict):
19
+ output: str
20
+ pipeline: NotRequired[str]
21
+
22
+
23
+ class InputEventhubConnection(BaseModel):
24
+ output: str
25
+
26
+ pipeline: Optional[str] = None
27
+
28
+
29
+ class InputEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
31
+
32
+ SMART = "smart"
33
+ ALWAYS = "always"
34
+
35
+
36
+ class InputEventhubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
+ r"""Codec to use to compress the persisted data"""
38
+
39
+ NONE = "none"
40
+ GZIP = "gzip"
41
+
42
+
43
+ class InputEventhubPqTypedDict(TypedDict):
44
+ mode: NotRequired[InputEventhubMode]
45
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
46
+ max_buffer_size: NotRequired[float]
47
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
48
+ commit_frequency: NotRequired[float]
49
+ r"""The number of events to send downstream before committing that Stream has read them"""
50
+ max_file_size: NotRequired[str]
51
+ r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
52
+ max_size: NotRequired[str]
53
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
54
+ path: NotRequired[str]
55
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
56
+ compress: NotRequired[InputEventhubCompression]
57
+ r"""Codec to use to compress the persisted data"""
58
+
59
+
60
+ class InputEventhubPq(BaseModel):
61
+ mode: Annotated[
62
+ Optional[InputEventhubMode], PlainValidator(validate_open_enum(False))
63
+ ] = InputEventhubMode.ALWAYS
64
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
65
+
66
+ max_buffer_size: Annotated[
67
+ Optional[float], pydantic.Field(alias="maxBufferSize")
68
+ ] = 1000
69
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
70
+
71
+ commit_frequency: Annotated[
72
+ Optional[float], pydantic.Field(alias="commitFrequency")
73
+ ] = 42
74
+ r"""The number of events to send downstream before committing that Stream has read them"""
75
+
76
+ max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
77
+ "1 MB"
78
+ )
79
+ r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
80
+
81
+ max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
82
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
83
+
84
+ path: Optional[str] = "$CRIBL_HOME/state/queues"
85
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
86
+
87
+ compress: Annotated[
88
+ Optional[InputEventhubCompression], PlainValidator(validate_open_enum(False))
89
+ ] = InputEventhubCompression.NONE
90
+ r"""Codec to use to compress the persisted data"""
91
+
92
+
93
+ class InputEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
94
+ PLAIN = "plain"
95
+ OAUTHBEARER = "oauthbearer"
96
+
97
+
98
+ class InputEventhubAuthenticationTypedDict(TypedDict):
99
+ r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
100
+
101
+ disabled: NotRequired[bool]
102
+ mechanism: NotRequired[InputEventhubSASLMechanism]
103
+
104
+
105
+ class InputEventhubAuthentication(BaseModel):
106
+ r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
107
+
108
+ disabled: Optional[bool] = False
109
+
110
+ mechanism: Annotated[
111
+ Optional[InputEventhubSASLMechanism], PlainValidator(validate_open_enum(False))
112
+ ] = InputEventhubSASLMechanism.PLAIN
113
+
114
+
115
+ class InputEventhubTLSSettingsClientSideTypedDict(TypedDict):
116
+ disabled: NotRequired[bool]
117
+ reject_unauthorized: NotRequired[bool]
118
+ r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
119
+
120
+
121
+ class InputEventhubTLSSettingsClientSide(BaseModel):
122
+ disabled: Optional[bool] = False
123
+
124
+ reject_unauthorized: Annotated[
125
+ Optional[bool], pydantic.Field(alias="rejectUnauthorized")
126
+ ] = True
127
+ r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
128
+
129
+
130
+ class InputEventhubMetadatumTypedDict(TypedDict):
131
+ name: str
132
+ value: str
133
+ r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
134
+
135
+
136
+ class InputEventhubMetadatum(BaseModel):
137
+ name: str
138
+
139
+ value: str
140
+ r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
141
+
142
+
143
+ class InputEventhubTypedDict(TypedDict):
144
+ type: InputEventhubType
145
+ brokers: List[str]
146
+ r"""List of Event Hubs Kafka brokers to connect to (example: yourdomain.servicebus.windows.net:9093). The hostname can be found in the host portion of the primary or secondary connection string in Shared Access Policies."""
147
+ topics: List[str]
148
+ r"""The name of the Event Hub (Kafka topic) to subscribe to. Warning: To optimize performance, Cribl suggests subscribing each Event Hubs Source to only a single topic."""
149
+ id: NotRequired[str]
150
+ r"""Unique ID for this input"""
151
+ disabled: NotRequired[bool]
152
+ pipeline: NotRequired[str]
153
+ r"""Pipeline to process data from this Source before sending it through the Routes"""
154
+ send_to_routes: NotRequired[bool]
155
+ r"""Select whether to send data to Routes, or directly to Destinations."""
156
+ environment: NotRequired[str]
157
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
158
+ pq_enabled: NotRequired[bool]
159
+ r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
160
+ streamtags: NotRequired[List[str]]
161
+ r"""Tags for filtering and grouping in @{product}"""
162
+ connections: NotRequired[List[InputEventhubConnectionTypedDict]]
163
+ r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
164
+ pq: NotRequired[InputEventhubPqTypedDict]
165
+ group_id: NotRequired[str]
166
+ r"""The consumer group this instance belongs to. Default is 'Cribl'."""
167
+ from_beginning: NotRequired[bool]
168
+ r"""Start reading from earliest available data; relevant only during initial subscription"""
169
+ connection_timeout: NotRequired[float]
170
+ r"""Maximum time to wait for a connection to complete successfully"""
171
+ request_timeout: NotRequired[float]
172
+ r"""Maximum time to wait for Kafka to respond to a request"""
173
+ max_retries: NotRequired[float]
174
+ r"""If messages are failing, you can set the maximum number of retries as high as 100 to prevent loss of data"""
175
+ max_back_off: NotRequired[float]
176
+ r"""The maximum wait time for a retry, in milliseconds. Default (and minimum) is 30,000 ms (30 seconds); maximum is 180,000 ms (180 seconds)."""
177
+ initial_backoff: NotRequired[float]
178
+ r"""Initial value used to calculate the retry, in milliseconds. Maximum is 600,000 ms (10 minutes)."""
179
+ backoff_rate: NotRequired[float]
180
+ r"""Set the backoff multiplier (2-20) to control the retry frequency for failed messages. For faster retries, use a lower multiplier. For slower retries with more delay between attempts, use a higher multiplier. The multiplier is used in an exponential backoff formula; see the Kafka [documentation](https://kafka.js.org/docs/retry-detailed) for details."""
181
+ authentication_timeout: NotRequired[float]
182
+ r"""Maximum time to wait for Kafka to respond to an authentication request"""
183
+ reauthentication_threshold: NotRequired[float]
184
+ r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
185
+ sasl: NotRequired[InputEventhubAuthenticationTypedDict]
186
+ r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
187
+ tls: NotRequired[InputEventhubTLSSettingsClientSideTypedDict]
188
+ session_timeout: NotRequired[float]
189
+ r"""Timeout (session.timeout.ms in Kafka domain) used to detect client failures when using Kafka's group-management facilities.
190
+ If the client sends no heartbeats to the broker before the timeout expires, the broker will remove the client from the group and initiate a rebalance.
191
+ Value must be lower than rebalanceTimeout.
192
+ See details [here](https://github.com/Azure/azure-event-hubs-for-kafka/blob/master/CONFIGURATION.md).
193
+ """
194
+ rebalance_timeout: NotRequired[float]
195
+ r"""Maximum allowed time (rebalance.timeout.ms in Kafka domain) for each worker to join the group after a rebalance begins.
196
+ If the timeout is exceeded, the coordinator broker will remove the worker from the group.
197
+ See [Recommended configurations](https://github.com/Azure/azure-event-hubs-for-kafka/blob/master/CONFIGURATION.md).
198
+ """
199
+ heartbeat_interval: NotRequired[float]
200
+ r"""Expected time (heartbeat.interval.ms in Kafka domain) between heartbeats to the consumer coordinator when using Kafka's group-management facilities.
201
+ Value must be lower than sessionTimeout and typically should not exceed 1/3 of the sessionTimeout value.
202
+ See [Recommended configurations](https://github.com/Azure/azure-event-hubs-for-kafka/blob/master/CONFIGURATION.md).
203
+ """
204
+ auto_commit_interval: NotRequired[float]
205
+ r"""How often to commit offsets. If both this and Offset commit threshold are set, @{product} commits offsets when either condition is met. If both are empty, @{product} commits offsets after each batch."""
206
+ auto_commit_threshold: NotRequired[float]
207
+ r"""How many events are needed to trigger an offset commit. If both this and Offset commit interval are set, @{product} commits offsets when either condition is met. If both are empty, @{product} commits offsets after each batch."""
208
+ max_bytes_per_partition: NotRequired[float]
209
+ r"""Maximum amount of data that Kafka will return per partition, per fetch request. Must equal or exceed the maximum message size (maxBytesPerPartition) that Kafka is configured to allow. Otherwise, @{product} can get stuck trying to retrieve messages. Defaults to 1048576 (1 MB)."""
210
+ max_bytes: NotRequired[float]
211
+ r"""Maximum number of bytes that Kafka will return per fetch request. Defaults to 10485760 (10 MB)."""
212
+ max_socket_errors: NotRequired[float]
213
+ r"""Maximum number of network errors before the consumer re-creates a socket"""
214
+ minimize_duplicates: NotRequired[bool]
215
+ r"""Minimize duplicate events by starting only one consumer for each topic partition"""
216
+ metadata: NotRequired[List[InputEventhubMetadatumTypedDict]]
217
+ r"""Fields to add to events from this input"""
218
+ description: NotRequired[str]
219
+
220
+
221
+ class InputEventhub(BaseModel):
222
+ type: Annotated[InputEventhubType, PlainValidator(validate_open_enum(False))]
223
+
224
+ brokers: List[str]
225
+ r"""List of Event Hubs Kafka brokers to connect to (example: yourdomain.servicebus.windows.net:9093). The hostname can be found in the host portion of the primary or secondary connection string in Shared Access Policies."""
226
+
227
+ topics: List[str]
228
+ r"""The name of the Event Hub (Kafka topic) to subscribe to. Warning: To optimize performance, Cribl suggests subscribing each Event Hubs Source to only a single topic."""
229
+
230
+ id: Optional[str] = None
231
+ r"""Unique ID for this input"""
232
+
233
+ disabled: Optional[bool] = False
234
+
235
+ pipeline: Optional[str] = None
236
+ r"""Pipeline to process data from this Source before sending it through the Routes"""
237
+
238
+ send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
239
+ True
240
+ )
241
+ r"""Select whether to send data to Routes, or directly to Destinations."""
242
+
243
+ environment: Optional[str] = None
244
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
245
+
246
+ pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
247
+ r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
248
+
249
+ streamtags: Optional[List[str]] = None
250
+ r"""Tags for filtering and grouping in @{product}"""
251
+
252
+ connections: Optional[List[InputEventhubConnection]] = None
253
+ r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
254
+
255
+ pq: Optional[InputEventhubPq] = None
256
+
257
+ group_id: Annotated[Optional[str], pydantic.Field(alias="groupId")] = "Cribl"
258
+ r"""The consumer group this instance belongs to. Default is 'Cribl'."""
259
+
260
+ from_beginning: Annotated[Optional[bool], pydantic.Field(alias="fromBeginning")] = (
261
+ True
262
+ )
263
+ r"""Start reading from earliest available data; relevant only during initial subscription"""
264
+
265
+ connection_timeout: Annotated[
266
+ Optional[float], pydantic.Field(alias="connectionTimeout")
267
+ ] = 10000
268
+ r"""Maximum time to wait for a connection to complete successfully"""
269
+
270
+ request_timeout: Annotated[
271
+ Optional[float], pydantic.Field(alias="requestTimeout")
272
+ ] = 60000
273
+ r"""Maximum time to wait for Kafka to respond to a request"""
274
+
275
+ max_retries: Annotated[Optional[float], pydantic.Field(alias="maxRetries")] = 5
276
+ r"""If messages are failing, you can set the maximum number of retries as high as 100 to prevent loss of data"""
277
+
278
+ max_back_off: Annotated[Optional[float], pydantic.Field(alias="maxBackOff")] = 30000
279
+ r"""The maximum wait time for a retry, in milliseconds. Default (and minimum) is 30,000 ms (30 seconds); maximum is 180,000 ms (180 seconds)."""
280
+
281
+ initial_backoff: Annotated[
282
+ Optional[float], pydantic.Field(alias="initialBackoff")
283
+ ] = 300
284
+ r"""Initial value used to calculate the retry, in milliseconds. Maximum is 600,000 ms (10 minutes)."""
285
+
286
+ backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
287
+ r"""Set the backoff multiplier (2-20) to control the retry frequency for failed messages. For faster retries, use a lower multiplier. For slower retries with more delay between attempts, use a higher multiplier. The multiplier is used in an exponential backoff formula; see the Kafka [documentation](https://kafka.js.org/docs/retry-detailed) for details."""
288
+
289
+ authentication_timeout: Annotated[
290
+ Optional[float], pydantic.Field(alias="authenticationTimeout")
291
+ ] = 10000
292
+ r"""Maximum time to wait for Kafka to respond to an authentication request"""
293
+
294
+ reauthentication_threshold: Annotated[
295
+ Optional[float], pydantic.Field(alias="reauthenticationThreshold")
296
+ ] = 10000
297
+ r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
298
+
299
+ sasl: Optional[InputEventhubAuthentication] = None
300
+ r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
301
+
302
+ tls: Optional[InputEventhubTLSSettingsClientSide] = None
303
+
304
+ session_timeout: Annotated[
305
+ Optional[float], pydantic.Field(alias="sessionTimeout")
306
+ ] = 30000
307
+ r"""Timeout (session.timeout.ms in Kafka domain) used to detect client failures when using Kafka's group-management facilities.
308
+ If the client sends no heartbeats to the broker before the timeout expires, the broker will remove the client from the group and initiate a rebalance.
309
+ Value must be lower than rebalanceTimeout.
310
+ See details [here](https://github.com/Azure/azure-event-hubs-for-kafka/blob/master/CONFIGURATION.md).
311
+ """
312
+
313
+ rebalance_timeout: Annotated[
314
+ Optional[float], pydantic.Field(alias="rebalanceTimeout")
315
+ ] = 60000
316
+ r"""Maximum allowed time (rebalance.timeout.ms in Kafka domain) for each worker to join the group after a rebalance begins.
317
+ If the timeout is exceeded, the coordinator broker will remove the worker from the group.
318
+ See [Recommended configurations](https://github.com/Azure/azure-event-hubs-for-kafka/blob/master/CONFIGURATION.md).
319
+ """
320
+
321
+ heartbeat_interval: Annotated[
322
+ Optional[float], pydantic.Field(alias="heartbeatInterval")
323
+ ] = 3000
324
+ r"""Expected time (heartbeat.interval.ms in Kafka domain) between heartbeats to the consumer coordinator when using Kafka's group-management facilities.
325
+ Value must be lower than sessionTimeout and typically should not exceed 1/3 of the sessionTimeout value.
326
+ See [Recommended configurations](https://github.com/Azure/azure-event-hubs-for-kafka/blob/master/CONFIGURATION.md).
327
+ """
328
+
329
+ auto_commit_interval: Annotated[
330
+ Optional[float], pydantic.Field(alias="autoCommitInterval")
331
+ ] = None
332
+ r"""How often to commit offsets. If both this and Offset commit threshold are set, @{product} commits offsets when either condition is met. If both are empty, @{product} commits offsets after each batch."""
333
+
334
+ auto_commit_threshold: Annotated[
335
+ Optional[float], pydantic.Field(alias="autoCommitThreshold")
336
+ ] = None
337
+ r"""How many events are needed to trigger an offset commit. If both this and Offset commit interval are set, @{product} commits offsets when either condition is met. If both are empty, @{product} commits offsets after each batch."""
338
+
339
+ max_bytes_per_partition: Annotated[
340
+ Optional[float], pydantic.Field(alias="maxBytesPerPartition")
341
+ ] = 1048576
342
+ r"""Maximum amount of data that Kafka will return per partition, per fetch request. Must equal or exceed the maximum message size (maxBytesPerPartition) that Kafka is configured to allow. Otherwise, @{product} can get stuck trying to retrieve messages. Defaults to 1048576 (1 MB)."""
343
+
344
+ max_bytes: Annotated[Optional[float], pydantic.Field(alias="maxBytes")] = 10485760
345
+ r"""Maximum number of bytes that Kafka will return per fetch request. Defaults to 10485760 (10 MB)."""
346
+
347
+ max_socket_errors: Annotated[
348
+ Optional[float], pydantic.Field(alias="maxSocketErrors")
349
+ ] = 0
350
+ r"""Maximum number of network errors before the consumer re-creates a socket"""
351
+
352
+ minimize_duplicates: Annotated[
353
+ Optional[bool], pydantic.Field(alias="minimizeDuplicates")
354
+ ] = False
355
+ r"""Minimize duplicate events by starting only one consumer for each topic partition"""
356
+
357
+ metadata: Optional[List[InputEventhubMetadatum]] = None
358
+ r"""Fields to add to events from this input"""
359
+
360
+ description: Optional[str] = None
@@ -0,0 +1,213 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class InputExecType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ EXEC = "exec"
16
+
17
+
18
+ class InputExecConnectionTypedDict(TypedDict):
19
+ output: str
20
+ pipeline: NotRequired[str]
21
+
22
+
23
+ class InputExecConnection(BaseModel):
24
+ output: str
25
+
26
+ pipeline: Optional[str] = None
27
+
28
+
29
+ class InputExecMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
31
+
32
+ SMART = "smart"
33
+ ALWAYS = "always"
34
+
35
+
36
+ class InputExecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
+ r"""Codec to use to compress the persisted data"""
38
+
39
+ NONE = "none"
40
+ GZIP = "gzip"
41
+
42
+
43
+ class InputExecPqTypedDict(TypedDict):
44
+ mode: NotRequired[InputExecMode]
45
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
46
+ max_buffer_size: NotRequired[float]
47
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
48
+ commit_frequency: NotRequired[float]
49
+ r"""The number of events to send downstream before committing that Stream has read them"""
50
+ max_file_size: NotRequired[str]
51
+ r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
52
+ max_size: NotRequired[str]
53
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
54
+ path: NotRequired[str]
55
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
56
+ compress: NotRequired[InputExecCompression]
57
+ r"""Codec to use to compress the persisted data"""
58
+
59
+
60
+ class InputExecPq(BaseModel):
61
+ mode: Annotated[
62
+ Optional[InputExecMode], PlainValidator(validate_open_enum(False))
63
+ ] = InputExecMode.ALWAYS
64
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
65
+
66
+ max_buffer_size: Annotated[
67
+ Optional[float], pydantic.Field(alias="maxBufferSize")
68
+ ] = 1000
69
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
70
+
71
+ commit_frequency: Annotated[
72
+ Optional[float], pydantic.Field(alias="commitFrequency")
73
+ ] = 42
74
+ r"""The number of events to send downstream before committing that Stream has read them"""
75
+
76
+ max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
77
+ "1 MB"
78
+ )
79
+ r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
80
+
81
+ max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
82
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
83
+
84
+ path: Optional[str] = "$CRIBL_HOME/state/queues"
85
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
86
+
87
+ compress: Annotated[
88
+ Optional[InputExecCompression], PlainValidator(validate_open_enum(False))
89
+ ] = InputExecCompression.NONE
90
+ r"""Codec to use to compress the persisted data"""
91
+
92
+
93
+ class ScheduleType(str, Enum, metaclass=utils.OpenEnumMeta):
94
+ r"""Select a schedule type; either an interval (in seconds) or a cron-style schedule."""
95
+
96
+ INTERVAL = "interval"
97
+ CRON_SCHEDULE = "cronSchedule"
98
+
99
+
100
+ class InputExecMetadatumTypedDict(TypedDict):
101
+ name: str
102
+ value: str
103
+ r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
104
+
105
+
106
+ class InputExecMetadatum(BaseModel):
107
+ name: str
108
+
109
+ value: str
110
+ r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
111
+
112
+
113
+ class InputExecTypedDict(TypedDict):
114
+ type: InputExecType
115
+ command: str
116
+ r"""Command to execute; supports Bourne shell (or CMD on Windows) syntax"""
117
+ id: NotRequired[str]
118
+ r"""Unique ID for this input"""
119
+ disabled: NotRequired[bool]
120
+ pipeline: NotRequired[str]
121
+ r"""Pipeline to process data from this Source before sending it through the Routes"""
122
+ send_to_routes: NotRequired[bool]
123
+ r"""Select whether to send data to Routes, or directly to Destinations."""
124
+ environment: NotRequired[str]
125
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
126
+ pq_enabled: NotRequired[bool]
127
+ r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
128
+ streamtags: NotRequired[List[str]]
129
+ r"""Tags for filtering and grouping in @{product}"""
130
+ connections: NotRequired[List[InputExecConnectionTypedDict]]
131
+ r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
132
+ pq: NotRequired[InputExecPqTypedDict]
133
+ retries: NotRequired[float]
134
+ r"""Maximum number of retry attempts in the event that the command fails"""
135
+ schedule_type: NotRequired[ScheduleType]
136
+ r"""Select a schedule type; either an interval (in seconds) or a cron-style schedule."""
137
+ breaker_rulesets: NotRequired[List[str]]
138
+ r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
139
+ stale_channel_flush_ms: NotRequired[float]
140
+ r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
141
+ metadata: NotRequired[List[InputExecMetadatumTypedDict]]
142
+ r"""Fields to add to events from this input"""
143
+ description: NotRequired[str]
144
+ interval: NotRequired[float]
145
+ r"""Interval between command executions in seconds."""
146
+ cron_schedule: NotRequired[str]
147
+ r"""Cron schedule to execute the command on."""
148
+
149
+
150
+ class InputExec(BaseModel):
151
+ type: Annotated[InputExecType, PlainValidator(validate_open_enum(False))]
152
+
153
+ command: str
154
+ r"""Command to execute; supports Bourne shell (or CMD on Windows) syntax"""
155
+
156
+ id: Optional[str] = None
157
+ r"""Unique ID for this input"""
158
+
159
+ disabled: Optional[bool] = False
160
+
161
+ pipeline: Optional[str] = None
162
+ r"""Pipeline to process data from this Source before sending it through the Routes"""
163
+
164
+ send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
165
+ True
166
+ )
167
+ r"""Select whether to send data to Routes, or directly to Destinations."""
168
+
169
+ environment: Optional[str] = None
170
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
171
+
172
+ pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
173
+ r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
174
+
175
+ streamtags: Optional[List[str]] = None
176
+ r"""Tags for filtering and grouping in @{product}"""
177
+
178
+ connections: Optional[List[InputExecConnection]] = None
179
+ r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
180
+
181
+ pq: Optional[InputExecPq] = None
182
+
183
+ retries: Optional[float] = 10
184
+ r"""Maximum number of retry attempts in the event that the command fails"""
185
+
186
+ schedule_type: Annotated[
187
+ Annotated[Optional[ScheduleType], PlainValidator(validate_open_enum(False))],
188
+ pydantic.Field(alias="scheduleType"),
189
+ ] = ScheduleType.INTERVAL
190
+ r"""Select a schedule type; either an interval (in seconds) or a cron-style schedule."""
191
+
192
+ breaker_rulesets: Annotated[
193
+ Optional[List[str]], pydantic.Field(alias="breakerRulesets")
194
+ ] = None
195
+ r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
196
+
197
+ stale_channel_flush_ms: Annotated[
198
+ Optional[float], pydantic.Field(alias="staleChannelFlushMs")
199
+ ] = 10000
200
+ r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
201
+
202
+ metadata: Optional[List[InputExecMetadatum]] = None
203
+ r"""Fields to add to events from this input"""
204
+
205
+ description: Optional[str] = None
206
+
207
+ interval: Optional[float] = 60
208
+ r"""Interval between command executions in seconds."""
209
+
210
+ cron_schedule: Annotated[Optional[str], pydantic.Field(alias="cronSchedule")] = (
211
+ "* * * * *"
212
+ )
213
+ r"""Cron schedule to execute the command on."""