cribl-control-plane 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (197) hide show
  1. cribl_control_plane/__init__.py +17 -0
  2. cribl_control_plane/_hooks/__init__.py +5 -0
  3. cribl_control_plane/_hooks/clientcredentials.py +211 -0
  4. cribl_control_plane/_hooks/registration.py +13 -0
  5. cribl_control_plane/_hooks/sdkhooks.py +81 -0
  6. cribl_control_plane/_hooks/types.py +112 -0
  7. cribl_control_plane/_version.py +15 -0
  8. cribl_control_plane/auth_sdk.py +184 -0
  9. cribl_control_plane/basesdk.py +358 -0
  10. cribl_control_plane/errors/__init__.py +60 -0
  11. cribl_control_plane/errors/apierror.py +38 -0
  12. cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
  13. cribl_control_plane/errors/error.py +24 -0
  14. cribl_control_plane/errors/healthstatus_error.py +38 -0
  15. cribl_control_plane/errors/no_response_error.py +13 -0
  16. cribl_control_plane/errors/responsevalidationerror.py +25 -0
  17. cribl_control_plane/health.py +166 -0
  18. cribl_control_plane/httpclient.py +126 -0
  19. cribl_control_plane/models/__init__.py +7305 -0
  20. cribl_control_plane/models/addhectokenrequest.py +34 -0
  21. cribl_control_plane/models/authtoken.py +13 -0
  22. cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
  23. cribl_control_plane/models/createinputop.py +24 -0
  24. cribl_control_plane/models/createoutputop.py +24 -0
  25. cribl_control_plane/models/createoutputtestbyidop.py +46 -0
  26. cribl_control_plane/models/criblevent.py +14 -0
  27. cribl_control_plane/models/deleteinputbyidop.py +37 -0
  28. cribl_control_plane/models/deleteoutputbyidop.py +37 -0
  29. cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
  30. cribl_control_plane/models/getinputbyidop.py +37 -0
  31. cribl_control_plane/models/getoutputbyidop.py +37 -0
  32. cribl_control_plane/models/getoutputpqbyidop.py +36 -0
  33. cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
  34. cribl_control_plane/models/healthstatus.py +36 -0
  35. cribl_control_plane/models/input.py +199 -0
  36. cribl_control_plane/models/inputappscope.py +448 -0
  37. cribl_control_plane/models/inputazureblob.py +308 -0
  38. cribl_control_plane/models/inputcollection.py +208 -0
  39. cribl_control_plane/models/inputconfluentcloud.py +585 -0
  40. cribl_control_plane/models/inputcribl.py +165 -0
  41. cribl_control_plane/models/inputcriblhttp.py +341 -0
  42. cribl_control_plane/models/inputcribllakehttp.py +342 -0
  43. cribl_control_plane/models/inputcriblmetrics.py +175 -0
  44. cribl_control_plane/models/inputcribltcp.py +299 -0
  45. cribl_control_plane/models/inputcrowdstrike.py +410 -0
  46. cribl_control_plane/models/inputdatadogagent.py +364 -0
  47. cribl_control_plane/models/inputdatagen.py +180 -0
  48. cribl_control_plane/models/inputedgeprometheus.py +551 -0
  49. cribl_control_plane/models/inputelastic.py +494 -0
  50. cribl_control_plane/models/inputeventhub.py +360 -0
  51. cribl_control_plane/models/inputexec.py +213 -0
  52. cribl_control_plane/models/inputfile.py +259 -0
  53. cribl_control_plane/models/inputfirehose.py +341 -0
  54. cribl_control_plane/models/inputgooglepubsub.py +247 -0
  55. cribl_control_plane/models/inputgrafana_union.py +1247 -0
  56. cribl_control_plane/models/inputhttp.py +403 -0
  57. cribl_control_plane/models/inputhttpraw.py +407 -0
  58. cribl_control_plane/models/inputjournalfiles.py +208 -0
  59. cribl_control_plane/models/inputkafka.py +581 -0
  60. cribl_control_plane/models/inputkinesis.py +363 -0
  61. cribl_control_plane/models/inputkubeevents.py +182 -0
  62. cribl_control_plane/models/inputkubelogs.py +256 -0
  63. cribl_control_plane/models/inputkubemetrics.py +233 -0
  64. cribl_control_plane/models/inputloki.py +468 -0
  65. cribl_control_plane/models/inputmetrics.py +290 -0
  66. cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
  67. cribl_control_plane/models/inputmsk.py +654 -0
  68. cribl_control_plane/models/inputnetflow.py +224 -0
  69. cribl_control_plane/models/inputoffice365mgmt.py +384 -0
  70. cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
  71. cribl_control_plane/models/inputoffice365service.py +377 -0
  72. cribl_control_plane/models/inputopentelemetry.py +516 -0
  73. cribl_control_plane/models/inputprometheus.py +464 -0
  74. cribl_control_plane/models/inputprometheusrw.py +470 -0
  75. cribl_control_plane/models/inputrawudp.py +207 -0
  76. cribl_control_plane/models/inputs3.py +416 -0
  77. cribl_control_plane/models/inputs3inventory.py +440 -0
  78. cribl_control_plane/models/inputsecuritylake.py +425 -0
  79. cribl_control_plane/models/inputsnmp.py +274 -0
  80. cribl_control_plane/models/inputsplunk.py +387 -0
  81. cribl_control_plane/models/inputsplunkhec.py +478 -0
  82. cribl_control_plane/models/inputsplunksearch.py +537 -0
  83. cribl_control_plane/models/inputsqs.py +320 -0
  84. cribl_control_plane/models/inputsyslog_union.py +759 -0
  85. cribl_control_plane/models/inputsystemmetrics.py +533 -0
  86. cribl_control_plane/models/inputsystemstate.py +417 -0
  87. cribl_control_plane/models/inputtcp.py +359 -0
  88. cribl_control_plane/models/inputtcpjson.py +334 -0
  89. cribl_control_plane/models/inputwef.py +498 -0
  90. cribl_control_plane/models/inputwindowsmetrics.py +457 -0
  91. cribl_control_plane/models/inputwineventlogs.py +222 -0
  92. cribl_control_plane/models/inputwiz.py +334 -0
  93. cribl_control_plane/models/inputzscalerhec.py +439 -0
  94. cribl_control_plane/models/listinputop.py +24 -0
  95. cribl_control_plane/models/listoutputop.py +24 -0
  96. cribl_control_plane/models/logininfo.py +16 -0
  97. cribl_control_plane/models/output.py +229 -0
  98. cribl_control_plane/models/outputazureblob.py +471 -0
  99. cribl_control_plane/models/outputazuredataexplorer.py +660 -0
  100. cribl_control_plane/models/outputazureeventhub.py +321 -0
  101. cribl_control_plane/models/outputazurelogs.py +386 -0
  102. cribl_control_plane/models/outputclickhouse.py +650 -0
  103. cribl_control_plane/models/outputcloudwatch.py +273 -0
  104. cribl_control_plane/models/outputconfluentcloud.py +591 -0
  105. cribl_control_plane/models/outputcriblhttp.py +494 -0
  106. cribl_control_plane/models/outputcribllake.py +396 -0
  107. cribl_control_plane/models/outputcribltcp.py +387 -0
  108. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
  109. cribl_control_plane/models/outputdatadog.py +472 -0
  110. cribl_control_plane/models/outputdataset.py +437 -0
  111. cribl_control_plane/models/outputdefault.py +55 -0
  112. cribl_control_plane/models/outputdevnull.py +50 -0
  113. cribl_control_plane/models/outputdiskspool.py +89 -0
  114. cribl_control_plane/models/outputdls3.py +560 -0
  115. cribl_control_plane/models/outputdynatracehttp.py +454 -0
  116. cribl_control_plane/models/outputdynatraceotlp.py +486 -0
  117. cribl_control_plane/models/outputelastic.py +494 -0
  118. cribl_control_plane/models/outputelasticcloud.py +407 -0
  119. cribl_control_plane/models/outputexabeam.py +297 -0
  120. cribl_control_plane/models/outputfilesystem.py +357 -0
  121. cribl_control_plane/models/outputgooglechronicle.py +486 -0
  122. cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
  123. cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
  124. cribl_control_plane/models/outputgooglepubsub.py +274 -0
  125. cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
  126. cribl_control_plane/models/outputgraphite.py +225 -0
  127. cribl_control_plane/models/outputhoneycomb.py +369 -0
  128. cribl_control_plane/models/outputhumiohec.py +389 -0
  129. cribl_control_plane/models/outputinfluxdb.py +523 -0
  130. cribl_control_plane/models/outputkafka.py +581 -0
  131. cribl_control_plane/models/outputkinesis.py +312 -0
  132. cribl_control_plane/models/outputloki.py +425 -0
  133. cribl_control_plane/models/outputminio.py +512 -0
  134. cribl_control_plane/models/outputmsk.py +654 -0
  135. cribl_control_plane/models/outputnetflow.py +80 -0
  136. cribl_control_plane/models/outputnewrelic.py +424 -0
  137. cribl_control_plane/models/outputnewrelicevents.py +401 -0
  138. cribl_control_plane/models/outputopentelemetry.py +669 -0
  139. cribl_control_plane/models/outputprometheus.py +485 -0
  140. cribl_control_plane/models/outputring.py +121 -0
  141. cribl_control_plane/models/outputrouter.py +83 -0
  142. cribl_control_plane/models/outputs3.py +556 -0
  143. cribl_control_plane/models/outputsamplesresponse.py +14 -0
  144. cribl_control_plane/models/outputsecuritylake.py +505 -0
  145. cribl_control_plane/models/outputsentinel.py +488 -0
  146. cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
  147. cribl_control_plane/models/outputservicenow.py +543 -0
  148. cribl_control_plane/models/outputsignalfx.py +369 -0
  149. cribl_control_plane/models/outputsnmp.py +80 -0
  150. cribl_control_plane/models/outputsns.py +274 -0
  151. cribl_control_plane/models/outputsplunk.py +383 -0
  152. cribl_control_plane/models/outputsplunkhec.py +434 -0
  153. cribl_control_plane/models/outputsplunklb.py +558 -0
  154. cribl_control_plane/models/outputsqs.py +328 -0
  155. cribl_control_plane/models/outputstatsd.py +224 -0
  156. cribl_control_plane/models/outputstatsdext.py +225 -0
  157. cribl_control_plane/models/outputsumologic.py +378 -0
  158. cribl_control_plane/models/outputsyslog.py +415 -0
  159. cribl_control_plane/models/outputtcpjson.py +413 -0
  160. cribl_control_plane/models/outputtestrequest.py +15 -0
  161. cribl_control_plane/models/outputtestresponse.py +29 -0
  162. cribl_control_plane/models/outputwavefront.py +369 -0
  163. cribl_control_plane/models/outputwebhook.py +689 -0
  164. cribl_control_plane/models/outputxsiam.py +415 -0
  165. cribl_control_plane/models/schemeclientoauth.py +24 -0
  166. cribl_control_plane/models/security.py +36 -0
  167. cribl_control_plane/models/updatehectokenrequest.py +31 -0
  168. cribl_control_plane/models/updateinputbyidop.py +44 -0
  169. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
  170. cribl_control_plane/models/updateoutputbyidop.py +44 -0
  171. cribl_control_plane/outputs.py +1615 -0
  172. cribl_control_plane/py.typed +1 -0
  173. cribl_control_plane/sdk.py +164 -0
  174. cribl_control_plane/sdkconfiguration.py +36 -0
  175. cribl_control_plane/sources.py +1355 -0
  176. cribl_control_plane/types/__init__.py +21 -0
  177. cribl_control_plane/types/basemodel.py +39 -0
  178. cribl_control_plane/utils/__init__.py +187 -0
  179. cribl_control_plane/utils/annotations.py +55 -0
  180. cribl_control_plane/utils/datetimes.py +23 -0
  181. cribl_control_plane/utils/enums.py +74 -0
  182. cribl_control_plane/utils/eventstreaming.py +238 -0
  183. cribl_control_plane/utils/forms.py +223 -0
  184. cribl_control_plane/utils/headers.py +136 -0
  185. cribl_control_plane/utils/logger.py +27 -0
  186. cribl_control_plane/utils/metadata.py +118 -0
  187. cribl_control_plane/utils/queryparams.py +205 -0
  188. cribl_control_plane/utils/requestbodies.py +66 -0
  189. cribl_control_plane/utils/retries.py +217 -0
  190. cribl_control_plane/utils/security.py +207 -0
  191. cribl_control_plane/utils/serializers.py +249 -0
  192. cribl_control_plane/utils/unmarshal_json_response.py +24 -0
  193. cribl_control_plane/utils/url.py +155 -0
  194. cribl_control_plane/utils/values.py +137 -0
  195. cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
  196. cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
  197. cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
@@ -0,0 +1,256 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class InputKubeLogsType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ KUBE_LOGS = "kube_logs"
16
+
17
+
18
+ class InputKubeLogsConnectionTypedDict(TypedDict):
19
+ output: str
20
+ pipeline: NotRequired[str]
21
+
22
+
23
+ class InputKubeLogsConnection(BaseModel):
24
+ output: str
25
+
26
+ pipeline: Optional[str] = None
27
+
28
+
29
+ class InputKubeLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
31
+
32
+ SMART = "smart"
33
+ ALWAYS = "always"
34
+
35
+
36
+ class InputKubeLogsPqCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
+ r"""Codec to use to compress the persisted data"""
38
+
39
+ NONE = "none"
40
+ GZIP = "gzip"
41
+
42
+
43
+ class InputKubeLogsPqTypedDict(TypedDict):
44
+ mode: NotRequired[InputKubeLogsMode]
45
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
46
+ max_buffer_size: NotRequired[float]
47
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
48
+ commit_frequency: NotRequired[float]
49
+ r"""The number of events to send downstream before committing that Stream has read them"""
50
+ max_file_size: NotRequired[str]
51
+ r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
52
+ max_size: NotRequired[str]
53
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
54
+ path: NotRequired[str]
55
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
56
+ compress: NotRequired[InputKubeLogsPqCompression]
57
+ r"""Codec to use to compress the persisted data"""
58
+
59
+
60
+ class InputKubeLogsPq(BaseModel):
61
+ mode: Annotated[
62
+ Optional[InputKubeLogsMode], PlainValidator(validate_open_enum(False))
63
+ ] = InputKubeLogsMode.ALWAYS
64
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
65
+
66
+ max_buffer_size: Annotated[
67
+ Optional[float], pydantic.Field(alias="maxBufferSize")
68
+ ] = 1000
69
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
70
+
71
+ commit_frequency: Annotated[
72
+ Optional[float], pydantic.Field(alias="commitFrequency")
73
+ ] = 42
74
+ r"""The number of events to send downstream before committing that Stream has read them"""
75
+
76
+ max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
77
+ "1 MB"
78
+ )
79
+ r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
80
+
81
+ max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
82
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
83
+
84
+ path: Optional[str] = "$CRIBL_HOME/state/queues"
85
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
86
+
87
+ compress: Annotated[
88
+ Optional[InputKubeLogsPqCompression], PlainValidator(validate_open_enum(False))
89
+ ] = InputKubeLogsPqCompression.NONE
90
+ r"""Codec to use to compress the persisted data"""
91
+
92
+
93
+ class InputKubeLogsRuleTypedDict(TypedDict):
94
+ filter_: str
95
+ r"""JavaScript expression applied to Pod objects. Return 'true' to include it."""
96
+ description: NotRequired[str]
97
+ r"""Optional description of this rule's purpose"""
98
+
99
+
100
+ class InputKubeLogsRule(BaseModel):
101
+ filter_: Annotated[str, pydantic.Field(alias="filter")]
102
+ r"""JavaScript expression applied to Pod objects. Return 'true' to include it."""
103
+
104
+ description: Optional[str] = None
105
+ r"""Optional description of this rule's purpose"""
106
+
107
+
108
+ class InputKubeLogsMetadatumTypedDict(TypedDict):
109
+ name: str
110
+ value: str
111
+ r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
112
+
113
+
114
+ class InputKubeLogsMetadatum(BaseModel):
115
+ name: str
116
+
117
+ value: str
118
+ r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
119
+
120
+
121
+ class InputKubeLogsPersistenceCompression(str, Enum, metaclass=utils.OpenEnumMeta):
122
+ r"""Data compression format. Default is gzip."""
123
+
124
+ NONE = "none"
125
+ GZIP = "gzip"
126
+
127
+
128
+ class InputKubeLogsDiskSpoolingTypedDict(TypedDict):
129
+ enable: NotRequired[bool]
130
+ r"""Spool events on disk for Cribl Edge and Search. Default is disabled."""
131
+ time_window: NotRequired[str]
132
+ r"""Time period for grouping spooled events. Default is 10m."""
133
+ max_data_size: NotRequired[str]
134
+ r"""Maximum disk space that can be consumed before older buckets are deleted. Examples: 420MB, 4GB. Default is 1GB."""
135
+ max_data_time: NotRequired[str]
136
+ r"""Maximum amount of time to retain data before older buckets are deleted. Examples: 2h, 4d. Default is 24h."""
137
+ compress: NotRequired[InputKubeLogsPersistenceCompression]
138
+ r"""Data compression format. Default is gzip."""
139
+
140
+
141
+ class InputKubeLogsDiskSpooling(BaseModel):
142
+ enable: Optional[bool] = False
143
+ r"""Spool events on disk for Cribl Edge and Search. Default is disabled."""
144
+
145
+ time_window: Annotated[Optional[str], pydantic.Field(alias="timeWindow")] = "10m"
146
+ r"""Time period for grouping spooled events. Default is 10m."""
147
+
148
+ max_data_size: Annotated[Optional[str], pydantic.Field(alias="maxDataSize")] = "1GB"
149
+ r"""Maximum disk space that can be consumed before older buckets are deleted. Examples: 420MB, 4GB. Default is 1GB."""
150
+
151
+ max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
152
+ r"""Maximum amount of time to retain data before older buckets are deleted. Examples: 2h, 4d. Default is 24h."""
153
+
154
+ compress: Annotated[
155
+ Optional[InputKubeLogsPersistenceCompression],
156
+ PlainValidator(validate_open_enum(False)),
157
+ ] = InputKubeLogsPersistenceCompression.GZIP
158
+ r"""Data compression format. Default is gzip."""
159
+
160
+
161
+ class InputKubeLogsTypedDict(TypedDict):
162
+ id: str
163
+ r"""Unique ID for this input"""
164
+ type: InputKubeLogsType
165
+ disabled: NotRequired[bool]
166
+ pipeline: NotRequired[str]
167
+ r"""Pipeline to process data from this Source before sending it through the Routes"""
168
+ send_to_routes: NotRequired[bool]
169
+ r"""Select whether to send data to Routes, or directly to Destinations."""
170
+ environment: NotRequired[str]
171
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
172
+ pq_enabled: NotRequired[bool]
173
+ r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
174
+ streamtags: NotRequired[List[str]]
175
+ r"""Tags for filtering and grouping in @{product}"""
176
+ connections: NotRequired[List[InputKubeLogsConnectionTypedDict]]
177
+ r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
178
+ pq: NotRequired[InputKubeLogsPqTypedDict]
179
+ interval: NotRequired[float]
180
+ r"""Time, in seconds, between checks for new containers. Default is 15 secs."""
181
+ rules: NotRequired[List[InputKubeLogsRuleTypedDict]]
182
+ r"""Add rules to decide which Pods to collect logs from. Logs are collected if no rules are given or if all the rules' expressions evaluate to true."""
183
+ timestamps: NotRequired[bool]
184
+ r"""For use when containers do not emit a timestamp, prefix each line of output with a timestamp. If you enable this setting, you can use the Kubernetes Logs Event Breaker and the kubernetes_logs Pre-processing Pipeline to remove them from the events after the timestamps are extracted."""
185
+ metadata: NotRequired[List[InputKubeLogsMetadatumTypedDict]]
186
+ r"""Fields to add to events from this input"""
187
+ persistence: NotRequired[InputKubeLogsDiskSpoolingTypedDict]
188
+ breaker_rulesets: NotRequired[List[str]]
189
+ r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
190
+ stale_channel_flush_ms: NotRequired[float]
191
+ r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
192
+ enable_load_balancing: NotRequired[bool]
193
+ r"""Load balance traffic across all Worker Processes"""
194
+ description: NotRequired[str]
195
+
196
+
197
+ class InputKubeLogs(BaseModel):
198
+ id: str
199
+ r"""Unique ID for this input"""
200
+
201
+ type: Annotated[InputKubeLogsType, PlainValidator(validate_open_enum(False))]
202
+
203
+ disabled: Optional[bool] = False
204
+
205
+ pipeline: Optional[str] = None
206
+ r"""Pipeline to process data from this Source before sending it through the Routes"""
207
+
208
+ send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
209
+ True
210
+ )
211
+ r"""Select whether to send data to Routes, or directly to Destinations."""
212
+
213
+ environment: Optional[str] = None
214
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
215
+
216
+ pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
217
+ r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
218
+
219
+ streamtags: Optional[List[str]] = None
220
+ r"""Tags for filtering and grouping in @{product}"""
221
+
222
+ connections: Optional[List[InputKubeLogsConnection]] = None
223
+ r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
224
+
225
+ pq: Optional[InputKubeLogsPq] = None
226
+
227
+ interval: Optional[float] = 15
228
+ r"""Time, in seconds, between checks for new containers. Default is 15 secs."""
229
+
230
+ rules: Optional[List[InputKubeLogsRule]] = None
231
+ r"""Add rules to decide which Pods to collect logs from. Logs are collected if no rules are given or if all the rules' expressions evaluate to true."""
232
+
233
+ timestamps: Optional[bool] = False
234
+ r"""For use when containers do not emit a timestamp, prefix each line of output with a timestamp. If you enable this setting, you can use the Kubernetes Logs Event Breaker and the kubernetes_logs Pre-processing Pipeline to remove them from the events after the timestamps are extracted."""
235
+
236
+ metadata: Optional[List[InputKubeLogsMetadatum]] = None
237
+ r"""Fields to add to events from this input"""
238
+
239
+ persistence: Optional[InputKubeLogsDiskSpooling] = None
240
+
241
+ breaker_rulesets: Annotated[
242
+ Optional[List[str]], pydantic.Field(alias="breakerRulesets")
243
+ ] = None
244
+ r"""A list of event-breaking rulesets that will be applied, in order, to the input data stream"""
245
+
246
+ stale_channel_flush_ms: Annotated[
247
+ Optional[float], pydantic.Field(alias="staleChannelFlushMs")
248
+ ] = 10000
249
+ r"""How long (in milliseconds) the Event Breaker will wait for new data to be sent to a specific channel before flushing the data stream out, as is, to the Pipelines"""
250
+
251
+ enable_load_balancing: Annotated[
252
+ Optional[bool], pydantic.Field(alias="enableLoadBalancing")
253
+ ] = False
254
+ r"""Load balance traffic across all Worker Processes"""
255
+
256
+ description: Optional[str] = None
@@ -0,0 +1,233 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class InputKubeMetricsType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ KUBE_METRICS = "kube_metrics"
16
+
17
+
18
+ class InputKubeMetricsConnectionTypedDict(TypedDict):
19
+ output: str
20
+ pipeline: NotRequired[str]
21
+
22
+
23
+ class InputKubeMetricsConnection(BaseModel):
24
+ output: str
25
+
26
+ pipeline: Optional[str] = None
27
+
28
+
29
+ class InputKubeMetricsMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
31
+
32
+ SMART = "smart"
33
+ ALWAYS = "always"
34
+
35
+
36
+ class InputKubeMetricsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
+ r"""Codec to use to compress the persisted data"""
38
+
39
+ NONE = "none"
40
+ GZIP = "gzip"
41
+
42
+
43
+ class InputKubeMetricsPqTypedDict(TypedDict):
44
+ mode: NotRequired[InputKubeMetricsMode]
45
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
46
+ max_buffer_size: NotRequired[float]
47
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
48
+ commit_frequency: NotRequired[float]
49
+ r"""The number of events to send downstream before committing that Stream has read them"""
50
+ max_file_size: NotRequired[str]
51
+ r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
52
+ max_size: NotRequired[str]
53
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
54
+ path: NotRequired[str]
55
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
56
+ compress: NotRequired[InputKubeMetricsCompression]
57
+ r"""Codec to use to compress the persisted data"""
58
+
59
+
60
+ class InputKubeMetricsPq(BaseModel):
61
+ mode: Annotated[
62
+ Optional[InputKubeMetricsMode], PlainValidator(validate_open_enum(False))
63
+ ] = InputKubeMetricsMode.ALWAYS
64
+ r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
65
+
66
+ max_buffer_size: Annotated[
67
+ Optional[float], pydantic.Field(alias="maxBufferSize")
68
+ ] = 1000
69
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
70
+
71
+ commit_frequency: Annotated[
72
+ Optional[float], pydantic.Field(alias="commitFrequency")
73
+ ] = 42
74
+ r"""The number of events to send downstream before committing that Stream has read them"""
75
+
76
+ max_file_size: Annotated[Optional[str], pydantic.Field(alias="maxFileSize")] = (
77
+ "1 MB"
78
+ )
79
+ r"""The maximum size to store in each queue file before closing and optionally compressing. Enter a numeral with units of KB, MB, etc."""
80
+
81
+ max_size: Annotated[Optional[str], pydantic.Field(alias="maxSize")] = "5GB"
82
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
83
+
84
+ path: Optional[str] = "$CRIBL_HOME/state/queues"
85
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/inputs/<input-id>"""
86
+
87
+ compress: Annotated[
88
+ Optional[InputKubeMetricsCompression], PlainValidator(validate_open_enum(False))
89
+ ] = InputKubeMetricsCompression.NONE
90
+ r"""Codec to use to compress the persisted data"""
91
+
92
+
93
+ class InputKubeMetricsRuleTypedDict(TypedDict):
94
+ filter_: str
95
+ r"""JavaScript expression applied to Kubernetes objects. Return 'true' to include it."""
96
+ description: NotRequired[str]
97
+ r"""Optional description of this rule's purpose"""
98
+
99
+
100
+ class InputKubeMetricsRule(BaseModel):
101
+ filter_: Annotated[str, pydantic.Field(alias="filter")]
102
+ r"""JavaScript expression applied to Kubernetes objects. Return 'true' to include it."""
103
+
104
+ description: Optional[str] = None
105
+ r"""Optional description of this rule's purpose"""
106
+
107
+
108
+ class InputKubeMetricsMetadatumTypedDict(TypedDict):
109
+ name: str
110
+ value: str
111
+ r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
112
+
113
+
114
+ class InputKubeMetricsMetadatum(BaseModel):
115
+ name: str
116
+
117
+ value: str
118
+ r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
119
+
120
+
121
+ class InputKubeMetricsDataCompressionFormat(str, Enum, metaclass=utils.OpenEnumMeta):
122
+ NONE = "none"
123
+ GZIP = "gzip"
124
+
125
+
126
+ class InputKubeMetricsPersistenceTypedDict(TypedDict):
127
+ enable: NotRequired[bool]
128
+ r"""Spool metrics on disk for Cribl Search"""
129
+ time_window: NotRequired[str]
130
+ r"""Time span for each file bucket"""
131
+ max_data_size: NotRequired[str]
132
+ r"""Maximum disk space allowed to be consumed (examples: 420MB, 4GB). When limit is reached, older data will be deleted."""
133
+ max_data_time: NotRequired[str]
134
+ r"""Maximum amount of time to retain data (examples: 2h, 4d). When limit is reached, older data will be deleted."""
135
+ compress: NotRequired[InputKubeMetricsDataCompressionFormat]
136
+ dest_path: NotRequired[str]
137
+ r"""Path to use to write metrics. Defaults to $CRIBL_HOME/state/<id>"""
138
+
139
+
140
+ class InputKubeMetricsPersistence(BaseModel):
141
+ enable: Optional[bool] = False
142
+ r"""Spool metrics on disk for Cribl Search"""
143
+
144
+ time_window: Annotated[Optional[str], pydantic.Field(alias="timeWindow")] = "10m"
145
+ r"""Time span for each file bucket"""
146
+
147
+ max_data_size: Annotated[Optional[str], pydantic.Field(alias="maxDataSize")] = "1GB"
148
+ r"""Maximum disk space allowed to be consumed (examples: 420MB, 4GB). When limit is reached, older data will be deleted."""
149
+
150
+ max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
151
+ r"""Maximum amount of time to retain data (examples: 2h, 4d). When limit is reached, older data will be deleted."""
152
+
153
+ compress: Annotated[
154
+ Optional[InputKubeMetricsDataCompressionFormat],
155
+ PlainValidator(validate_open_enum(False)),
156
+ ] = InputKubeMetricsDataCompressionFormat.GZIP
157
+
158
+ dest_path: Annotated[Optional[str], pydantic.Field(alias="destPath")] = (
159
+ "$CRIBL_HOME/state/kube_metrics"
160
+ )
161
+ r"""Path to use to write metrics. Defaults to $CRIBL_HOME/state/<id>"""
162
+
163
+
164
+ class InputKubeMetricsTypedDict(TypedDict):
165
+ id: str
166
+ r"""Unique ID for this input"""
167
+ type: InputKubeMetricsType
168
+ disabled: NotRequired[bool]
169
+ pipeline: NotRequired[str]
170
+ r"""Pipeline to process data from this Source before sending it through the Routes"""
171
+ send_to_routes: NotRequired[bool]
172
+ r"""Select whether to send data to Routes, or directly to Destinations."""
173
+ environment: NotRequired[str]
174
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
175
+ pq_enabled: NotRequired[bool]
176
+ r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
177
+ streamtags: NotRequired[List[str]]
178
+ r"""Tags for filtering and grouping in @{product}"""
179
+ connections: NotRequired[List[InputKubeMetricsConnectionTypedDict]]
180
+ r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
181
+ pq: NotRequired[InputKubeMetricsPqTypedDict]
182
+ interval: NotRequired[float]
183
+ r"""Time, in seconds, between consecutive metrics collections. Default is 15 secs."""
184
+ rules: NotRequired[List[InputKubeMetricsRuleTypedDict]]
185
+ r"""Add rules to decide which Kubernetes objects to generate metrics for. Events are generated if no rules are given or of all the rules' expressions evaluate to true."""
186
+ metadata: NotRequired[List[InputKubeMetricsMetadatumTypedDict]]
187
+ r"""Fields to add to events from this input"""
188
+ persistence: NotRequired[InputKubeMetricsPersistenceTypedDict]
189
+ description: NotRequired[str]
190
+
191
+
192
+ class InputKubeMetrics(BaseModel):
193
+ id: str
194
+ r"""Unique ID for this input"""
195
+
196
+ type: Annotated[InputKubeMetricsType, PlainValidator(validate_open_enum(False))]
197
+
198
+ disabled: Optional[bool] = False
199
+
200
+ pipeline: Optional[str] = None
201
+ r"""Pipeline to process data from this Source before sending it through the Routes"""
202
+
203
+ send_to_routes: Annotated[Optional[bool], pydantic.Field(alias="sendToRoutes")] = (
204
+ True
205
+ )
206
+ r"""Select whether to send data to Routes, or directly to Destinations."""
207
+
208
+ environment: Optional[str] = None
209
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
210
+
211
+ pq_enabled: Annotated[Optional[bool], pydantic.Field(alias="pqEnabled")] = False
212
+ r"""Use a disk queue to minimize data loss when connected services block. See [Cribl Docs](https://docs.cribl.io/stream/persistent-queues) for PQ defaults (Cribl-managed Cloud Workers) and configuration options (on-prem and hybrid Workers)."""
213
+
214
+ streamtags: Optional[List[str]] = None
215
+ r"""Tags for filtering and grouping in @{product}"""
216
+
217
+ connections: Optional[List[InputKubeMetricsConnection]] = None
218
+ r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
219
+
220
+ pq: Optional[InputKubeMetricsPq] = None
221
+
222
+ interval: Optional[float] = 15
223
+ r"""Time, in seconds, between consecutive metrics collections. Default is 15 secs."""
224
+
225
+ rules: Optional[List[InputKubeMetricsRule]] = None
226
+ r"""Add rules to decide which Kubernetes objects to generate metrics for. Events are generated if no rules are given or of all the rules' expressions evaluate to true."""
227
+
228
+ metadata: Optional[List[InputKubeMetricsMetadatum]] = None
229
+ r"""Fields to add to events from this input"""
230
+
231
+ persistence: Optional[InputKubeMetricsPersistence] = None
232
+
233
+ description: Optional[str] = None