cribl-control-plane 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (197) hide show
  1. cribl_control_plane/__init__.py +17 -0
  2. cribl_control_plane/_hooks/__init__.py +5 -0
  3. cribl_control_plane/_hooks/clientcredentials.py +211 -0
  4. cribl_control_plane/_hooks/registration.py +13 -0
  5. cribl_control_plane/_hooks/sdkhooks.py +81 -0
  6. cribl_control_plane/_hooks/types.py +112 -0
  7. cribl_control_plane/_version.py +15 -0
  8. cribl_control_plane/auth_sdk.py +184 -0
  9. cribl_control_plane/basesdk.py +358 -0
  10. cribl_control_plane/errors/__init__.py +60 -0
  11. cribl_control_plane/errors/apierror.py +38 -0
  12. cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
  13. cribl_control_plane/errors/error.py +24 -0
  14. cribl_control_plane/errors/healthstatus_error.py +38 -0
  15. cribl_control_plane/errors/no_response_error.py +13 -0
  16. cribl_control_plane/errors/responsevalidationerror.py +25 -0
  17. cribl_control_plane/health.py +166 -0
  18. cribl_control_plane/httpclient.py +126 -0
  19. cribl_control_plane/models/__init__.py +7305 -0
  20. cribl_control_plane/models/addhectokenrequest.py +34 -0
  21. cribl_control_plane/models/authtoken.py +13 -0
  22. cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
  23. cribl_control_plane/models/createinputop.py +24 -0
  24. cribl_control_plane/models/createoutputop.py +24 -0
  25. cribl_control_plane/models/createoutputtestbyidop.py +46 -0
  26. cribl_control_plane/models/criblevent.py +14 -0
  27. cribl_control_plane/models/deleteinputbyidop.py +37 -0
  28. cribl_control_plane/models/deleteoutputbyidop.py +37 -0
  29. cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
  30. cribl_control_plane/models/getinputbyidop.py +37 -0
  31. cribl_control_plane/models/getoutputbyidop.py +37 -0
  32. cribl_control_plane/models/getoutputpqbyidop.py +36 -0
  33. cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
  34. cribl_control_plane/models/healthstatus.py +36 -0
  35. cribl_control_plane/models/input.py +199 -0
  36. cribl_control_plane/models/inputappscope.py +448 -0
  37. cribl_control_plane/models/inputazureblob.py +308 -0
  38. cribl_control_plane/models/inputcollection.py +208 -0
  39. cribl_control_plane/models/inputconfluentcloud.py +585 -0
  40. cribl_control_plane/models/inputcribl.py +165 -0
  41. cribl_control_plane/models/inputcriblhttp.py +341 -0
  42. cribl_control_plane/models/inputcribllakehttp.py +342 -0
  43. cribl_control_plane/models/inputcriblmetrics.py +175 -0
  44. cribl_control_plane/models/inputcribltcp.py +299 -0
  45. cribl_control_plane/models/inputcrowdstrike.py +410 -0
  46. cribl_control_plane/models/inputdatadogagent.py +364 -0
  47. cribl_control_plane/models/inputdatagen.py +180 -0
  48. cribl_control_plane/models/inputedgeprometheus.py +551 -0
  49. cribl_control_plane/models/inputelastic.py +494 -0
  50. cribl_control_plane/models/inputeventhub.py +360 -0
  51. cribl_control_plane/models/inputexec.py +213 -0
  52. cribl_control_plane/models/inputfile.py +259 -0
  53. cribl_control_plane/models/inputfirehose.py +341 -0
  54. cribl_control_plane/models/inputgooglepubsub.py +247 -0
  55. cribl_control_plane/models/inputgrafana_union.py +1247 -0
  56. cribl_control_plane/models/inputhttp.py +403 -0
  57. cribl_control_plane/models/inputhttpraw.py +407 -0
  58. cribl_control_plane/models/inputjournalfiles.py +208 -0
  59. cribl_control_plane/models/inputkafka.py +581 -0
  60. cribl_control_plane/models/inputkinesis.py +363 -0
  61. cribl_control_plane/models/inputkubeevents.py +182 -0
  62. cribl_control_plane/models/inputkubelogs.py +256 -0
  63. cribl_control_plane/models/inputkubemetrics.py +233 -0
  64. cribl_control_plane/models/inputloki.py +468 -0
  65. cribl_control_plane/models/inputmetrics.py +290 -0
  66. cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
  67. cribl_control_plane/models/inputmsk.py +654 -0
  68. cribl_control_plane/models/inputnetflow.py +224 -0
  69. cribl_control_plane/models/inputoffice365mgmt.py +384 -0
  70. cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
  71. cribl_control_plane/models/inputoffice365service.py +377 -0
  72. cribl_control_plane/models/inputopentelemetry.py +516 -0
  73. cribl_control_plane/models/inputprometheus.py +464 -0
  74. cribl_control_plane/models/inputprometheusrw.py +470 -0
  75. cribl_control_plane/models/inputrawudp.py +207 -0
  76. cribl_control_plane/models/inputs3.py +416 -0
  77. cribl_control_plane/models/inputs3inventory.py +440 -0
  78. cribl_control_plane/models/inputsecuritylake.py +425 -0
  79. cribl_control_plane/models/inputsnmp.py +274 -0
  80. cribl_control_plane/models/inputsplunk.py +387 -0
  81. cribl_control_plane/models/inputsplunkhec.py +478 -0
  82. cribl_control_plane/models/inputsplunksearch.py +537 -0
  83. cribl_control_plane/models/inputsqs.py +320 -0
  84. cribl_control_plane/models/inputsyslog_union.py +759 -0
  85. cribl_control_plane/models/inputsystemmetrics.py +533 -0
  86. cribl_control_plane/models/inputsystemstate.py +417 -0
  87. cribl_control_plane/models/inputtcp.py +359 -0
  88. cribl_control_plane/models/inputtcpjson.py +334 -0
  89. cribl_control_plane/models/inputwef.py +498 -0
  90. cribl_control_plane/models/inputwindowsmetrics.py +457 -0
  91. cribl_control_plane/models/inputwineventlogs.py +222 -0
  92. cribl_control_plane/models/inputwiz.py +334 -0
  93. cribl_control_plane/models/inputzscalerhec.py +439 -0
  94. cribl_control_plane/models/listinputop.py +24 -0
  95. cribl_control_plane/models/listoutputop.py +24 -0
  96. cribl_control_plane/models/logininfo.py +16 -0
  97. cribl_control_plane/models/output.py +229 -0
  98. cribl_control_plane/models/outputazureblob.py +471 -0
  99. cribl_control_plane/models/outputazuredataexplorer.py +660 -0
  100. cribl_control_plane/models/outputazureeventhub.py +321 -0
  101. cribl_control_plane/models/outputazurelogs.py +386 -0
  102. cribl_control_plane/models/outputclickhouse.py +650 -0
  103. cribl_control_plane/models/outputcloudwatch.py +273 -0
  104. cribl_control_plane/models/outputconfluentcloud.py +591 -0
  105. cribl_control_plane/models/outputcriblhttp.py +494 -0
  106. cribl_control_plane/models/outputcribllake.py +396 -0
  107. cribl_control_plane/models/outputcribltcp.py +387 -0
  108. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
  109. cribl_control_plane/models/outputdatadog.py +472 -0
  110. cribl_control_plane/models/outputdataset.py +437 -0
  111. cribl_control_plane/models/outputdefault.py +55 -0
  112. cribl_control_plane/models/outputdevnull.py +50 -0
  113. cribl_control_plane/models/outputdiskspool.py +89 -0
  114. cribl_control_plane/models/outputdls3.py +560 -0
  115. cribl_control_plane/models/outputdynatracehttp.py +454 -0
  116. cribl_control_plane/models/outputdynatraceotlp.py +486 -0
  117. cribl_control_plane/models/outputelastic.py +494 -0
  118. cribl_control_plane/models/outputelasticcloud.py +407 -0
  119. cribl_control_plane/models/outputexabeam.py +297 -0
  120. cribl_control_plane/models/outputfilesystem.py +357 -0
  121. cribl_control_plane/models/outputgooglechronicle.py +486 -0
  122. cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
  123. cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
  124. cribl_control_plane/models/outputgooglepubsub.py +274 -0
  125. cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
  126. cribl_control_plane/models/outputgraphite.py +225 -0
  127. cribl_control_plane/models/outputhoneycomb.py +369 -0
  128. cribl_control_plane/models/outputhumiohec.py +389 -0
  129. cribl_control_plane/models/outputinfluxdb.py +523 -0
  130. cribl_control_plane/models/outputkafka.py +581 -0
  131. cribl_control_plane/models/outputkinesis.py +312 -0
  132. cribl_control_plane/models/outputloki.py +425 -0
  133. cribl_control_plane/models/outputminio.py +512 -0
  134. cribl_control_plane/models/outputmsk.py +654 -0
  135. cribl_control_plane/models/outputnetflow.py +80 -0
  136. cribl_control_plane/models/outputnewrelic.py +424 -0
  137. cribl_control_plane/models/outputnewrelicevents.py +401 -0
  138. cribl_control_plane/models/outputopentelemetry.py +669 -0
  139. cribl_control_plane/models/outputprometheus.py +485 -0
  140. cribl_control_plane/models/outputring.py +121 -0
  141. cribl_control_plane/models/outputrouter.py +83 -0
  142. cribl_control_plane/models/outputs3.py +556 -0
  143. cribl_control_plane/models/outputsamplesresponse.py +14 -0
  144. cribl_control_plane/models/outputsecuritylake.py +505 -0
  145. cribl_control_plane/models/outputsentinel.py +488 -0
  146. cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
  147. cribl_control_plane/models/outputservicenow.py +543 -0
  148. cribl_control_plane/models/outputsignalfx.py +369 -0
  149. cribl_control_plane/models/outputsnmp.py +80 -0
  150. cribl_control_plane/models/outputsns.py +274 -0
  151. cribl_control_plane/models/outputsplunk.py +383 -0
  152. cribl_control_plane/models/outputsplunkhec.py +434 -0
  153. cribl_control_plane/models/outputsplunklb.py +558 -0
  154. cribl_control_plane/models/outputsqs.py +328 -0
  155. cribl_control_plane/models/outputstatsd.py +224 -0
  156. cribl_control_plane/models/outputstatsdext.py +225 -0
  157. cribl_control_plane/models/outputsumologic.py +378 -0
  158. cribl_control_plane/models/outputsyslog.py +415 -0
  159. cribl_control_plane/models/outputtcpjson.py +413 -0
  160. cribl_control_plane/models/outputtestrequest.py +15 -0
  161. cribl_control_plane/models/outputtestresponse.py +29 -0
  162. cribl_control_plane/models/outputwavefront.py +369 -0
  163. cribl_control_plane/models/outputwebhook.py +689 -0
  164. cribl_control_plane/models/outputxsiam.py +415 -0
  165. cribl_control_plane/models/schemeclientoauth.py +24 -0
  166. cribl_control_plane/models/security.py +36 -0
  167. cribl_control_plane/models/updatehectokenrequest.py +31 -0
  168. cribl_control_plane/models/updateinputbyidop.py +44 -0
  169. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
  170. cribl_control_plane/models/updateoutputbyidop.py +44 -0
  171. cribl_control_plane/outputs.py +1615 -0
  172. cribl_control_plane/py.typed +1 -0
  173. cribl_control_plane/sdk.py +164 -0
  174. cribl_control_plane/sdkconfiguration.py +36 -0
  175. cribl_control_plane/sources.py +1355 -0
  176. cribl_control_plane/types/__init__.py +21 -0
  177. cribl_control_plane/types/basemodel.py +39 -0
  178. cribl_control_plane/utils/__init__.py +187 -0
  179. cribl_control_plane/utils/annotations.py +55 -0
  180. cribl_control_plane/utils/datetimes.py +23 -0
  181. cribl_control_plane/utils/enums.py +74 -0
  182. cribl_control_plane/utils/eventstreaming.py +238 -0
  183. cribl_control_plane/utils/forms.py +223 -0
  184. cribl_control_plane/utils/headers.py +136 -0
  185. cribl_control_plane/utils/logger.py +27 -0
  186. cribl_control_plane/utils/metadata.py +118 -0
  187. cribl_control_plane/utils/queryparams.py +205 -0
  188. cribl_control_plane/utils/requestbodies.py +66 -0
  189. cribl_control_plane/utils/retries.py +217 -0
  190. cribl_control_plane/utils/security.py +207 -0
  191. cribl_control_plane/utils/serializers.py +249 -0
  192. cribl_control_plane/utils/unmarshal_json_response.py +24 -0
  193. cribl_control_plane/utils/url.py +155 -0
  194. cribl_control_plane/utils/values.py +137 -0
  195. cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
  196. cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
  197. cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
@@ -0,0 +1,485 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputPrometheusType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ PROMETHEUS = "prometheus"
16
+
17
+
18
+ class OutputPrometheusExtraHTTPHeaderTypedDict(TypedDict):
19
+ value: str
20
+ name: NotRequired[str]
21
+
22
+
23
+ class OutputPrometheusExtraHTTPHeader(BaseModel):
24
+ value: str
25
+
26
+ name: Optional[str] = None
27
+
28
+
29
+ class OutputPrometheusFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
+ r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
31
+
32
+ PAYLOAD = "payload"
33
+ PAYLOAD_AND_HEADERS = "payloadAndHeaders"
34
+ NONE = "none"
35
+
36
+
37
+ class OutputPrometheusResponseRetrySettingTypedDict(TypedDict):
38
+ http_status: float
39
+ r"""The HTTP response status code that will trigger retries"""
40
+ initial_backoff: NotRequired[float]
41
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
42
+ backoff_rate: NotRequired[float]
43
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
44
+ max_backoff: NotRequired[float]
45
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
46
+
47
+
48
+ class OutputPrometheusResponseRetrySetting(BaseModel):
49
+ http_status: Annotated[float, pydantic.Field(alias="httpStatus")]
50
+ r"""The HTTP response status code that will trigger retries"""
51
+
52
+ initial_backoff: Annotated[
53
+ Optional[float], pydantic.Field(alias="initialBackoff")
54
+ ] = 1000
55
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
56
+
57
+ backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
58
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
59
+
60
+ max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
61
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
62
+
63
+
64
+ class OutputPrometheusTimeoutRetrySettingsTypedDict(TypedDict):
65
+ timeout_retry: NotRequired[bool]
66
+ initial_backoff: NotRequired[float]
67
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
68
+ backoff_rate: NotRequired[float]
69
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
70
+ max_backoff: NotRequired[float]
71
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
72
+
73
+
74
+ class OutputPrometheusTimeoutRetrySettings(BaseModel):
75
+ timeout_retry: Annotated[Optional[bool], pydantic.Field(alias="timeoutRetry")] = (
76
+ False
77
+ )
78
+
79
+ initial_backoff: Annotated[
80
+ Optional[float], pydantic.Field(alias="initialBackoff")
81
+ ] = 1000
82
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
83
+
84
+ backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
85
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
86
+
87
+ max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
88
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
89
+
90
+
91
+ class OutputPrometheusBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
92
+ r"""How to handle events when all receivers are exerting backpressure"""
93
+
94
+ BLOCK = "block"
95
+ DROP = "drop"
96
+ QUEUE = "queue"
97
+
98
+
99
+ class OutputPrometheusAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
100
+ r"""Remote Write authentication type"""
101
+
102
+ NONE = "none"
103
+ BASIC = "basic"
104
+ CREDENTIALS_SECRET = "credentialsSecret"
105
+ TOKEN = "token"
106
+ TEXT_SECRET = "textSecret"
107
+ OAUTH = "oauth"
108
+
109
+
110
+ class OutputPrometheusCompression(str, Enum, metaclass=utils.OpenEnumMeta):
111
+ r"""Codec to use to compress the persisted data"""
112
+
113
+ NONE = "none"
114
+ GZIP = "gzip"
115
+
116
+
117
+ class OutputPrometheusQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
118
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
119
+
120
+ BLOCK = "block"
121
+ DROP = "drop"
122
+
123
+
124
+ class OutputPrometheusMode(str, Enum, metaclass=utils.OpenEnumMeta):
125
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
126
+
127
+ ERROR = "error"
128
+ BACKPRESSURE = "backpressure"
129
+ ALWAYS = "always"
130
+
131
+
132
+ class OutputPrometheusPqControlsTypedDict(TypedDict):
133
+ pass
134
+
135
+
136
+ class OutputPrometheusPqControls(BaseModel):
137
+ pass
138
+
139
+
140
+ class OutputPrometheusOauthParamTypedDict(TypedDict):
141
+ name: str
142
+ r"""OAuth parameter name"""
143
+ value: str
144
+ r"""OAuth parameter value"""
145
+
146
+
147
+ class OutputPrometheusOauthParam(BaseModel):
148
+ name: str
149
+ r"""OAuth parameter name"""
150
+
151
+ value: str
152
+ r"""OAuth parameter value"""
153
+
154
+
155
+ class OutputPrometheusOauthHeaderTypedDict(TypedDict):
156
+ name: str
157
+ r"""OAuth header name"""
158
+ value: str
159
+ r"""OAuth header value"""
160
+
161
+
162
+ class OutputPrometheusOauthHeader(BaseModel):
163
+ name: str
164
+ r"""OAuth header name"""
165
+
166
+ value: str
167
+ r"""OAuth header value"""
168
+
169
+
170
+ class OutputPrometheusTypedDict(TypedDict):
171
+ type: OutputPrometheusType
172
+ url: str
173
+ r"""The endpoint to send metrics to"""
174
+ id: NotRequired[str]
175
+ r"""Unique ID for this output"""
176
+ pipeline: NotRequired[str]
177
+ r"""Pipeline to process data before sending out to this output"""
178
+ system_fields: NotRequired[List[str]]
179
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards. These fields are added as dimensions to generated metrics."""
180
+ environment: NotRequired[str]
181
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
182
+ streamtags: NotRequired[List[str]]
183
+ r"""Tags for filtering and grouping in @{product}"""
184
+ metric_rename_expr: NotRequired[str]
185
+ r"""JavaScript expression that can be used to rename metrics. For example, name.replace(/\./g, '_') will replace all '.' characters in a metric's name with the supported '_' character. Use the 'name' global variable to access the metric's name. You can access event fields' values via __e.<fieldName>."""
186
+ send_metadata: NotRequired[bool]
187
+ r"""Generate and send metadata (`type` and `metricFamilyName`) requests"""
188
+ concurrency: NotRequired[float]
189
+ r"""Maximum number of ongoing requests before blocking"""
190
+ max_payload_size_kb: NotRequired[float]
191
+ r"""Maximum size, in KB, of the request body"""
192
+ max_payload_events: NotRequired[float]
193
+ r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
194
+ reject_unauthorized: NotRequired[bool]
195
+ r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
196
+ Enabled by default. When this setting is also present in TLS Settings (Client Side),
197
+ that value will take precedence.
198
+ """
199
+ timeout_sec: NotRequired[float]
200
+ r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
201
+ flush_period_sec: NotRequired[float]
202
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
203
+ extra_http_headers: NotRequired[List[OutputPrometheusExtraHTTPHeaderTypedDict]]
204
+ r"""Headers to add to all events"""
205
+ use_round_robin_dns: NotRequired[bool]
206
+ r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
207
+ failed_request_logging_mode: NotRequired[OutputPrometheusFailedRequestLoggingMode]
208
+ r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
209
+ safe_headers: NotRequired[List[str]]
210
+ r"""List of headers that are safe to log in plain text"""
211
+ response_retry_settings: NotRequired[
212
+ List[OutputPrometheusResponseRetrySettingTypedDict]
213
+ ]
214
+ r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
215
+ timeout_retry_settings: NotRequired[OutputPrometheusTimeoutRetrySettingsTypedDict]
216
+ response_honor_retry_after_header: NotRequired[bool]
217
+ r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
218
+ on_backpressure: NotRequired[OutputPrometheusBackpressureBehavior]
219
+ r"""How to handle events when all receivers are exerting backpressure"""
220
+ auth_type: NotRequired[OutputPrometheusAuthenticationType]
221
+ r"""Remote Write authentication type"""
222
+ description: NotRequired[str]
223
+ metrics_flush_period_sec: NotRequired[float]
224
+ r"""How frequently metrics metadata is sent out. Value cannot be smaller than the base Flush period set above."""
225
+ pq_max_file_size: NotRequired[str]
226
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
227
+ pq_max_size: NotRequired[str]
228
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
229
+ pq_path: NotRequired[str]
230
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
231
+ pq_compress: NotRequired[OutputPrometheusCompression]
232
+ r"""Codec to use to compress the persisted data"""
233
+ pq_on_backpressure: NotRequired[OutputPrometheusQueueFullBehavior]
234
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
235
+ pq_mode: NotRequired[OutputPrometheusMode]
236
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
237
+ pq_controls: NotRequired[OutputPrometheusPqControlsTypedDict]
238
+ username: NotRequired[str]
239
+ password: NotRequired[str]
240
+ token: NotRequired[str]
241
+ r"""Bearer token to include in the authorization header"""
242
+ credentials_secret: NotRequired[str]
243
+ r"""Select or create a secret that references your credentials"""
244
+ text_secret: NotRequired[str]
245
+ r"""Select or create a stored text secret"""
246
+ login_url: NotRequired[str]
247
+ r"""URL for OAuth"""
248
+ secret_param_name: NotRequired[str]
249
+ r"""Secret parameter name to pass in request body"""
250
+ secret: NotRequired[str]
251
+ r"""Secret parameter value to pass in request body"""
252
+ token_attribute_name: NotRequired[str]
253
+ r"""Name of the auth token attribute in the OAuth response. Can be top-level (e.g., 'token'); or nested, using a period (e.g., 'data.token')."""
254
+ auth_header_expr: NotRequired[str]
255
+ r"""JavaScript expression to compute the Authorization header value to pass in requests. The value `${token}` is used to reference the token obtained from authentication, e.g.: `Bearer ${token}`."""
256
+ token_timeout_secs: NotRequired[float]
257
+ r"""How often the OAuth token should be refreshed."""
258
+ oauth_params: NotRequired[List[OutputPrometheusOauthParamTypedDict]]
259
+ r"""Additional parameters to send in the OAuth login request. @{product} will combine the secret with these parameters, and will send the URL-encoded result in a POST request to the endpoint specified in the 'Login URL'. We'll automatically add the content-type header 'application/x-www-form-urlencoded' when sending this request."""
260
+ oauth_headers: NotRequired[List[OutputPrometheusOauthHeaderTypedDict]]
261
+ r"""Additional headers to send in the OAuth login request. @{product} will automatically add the content-type header 'application/x-www-form-urlencoded' when sending this request."""
262
+
263
+
264
+ class OutputPrometheus(BaseModel):
265
+ type: Annotated[OutputPrometheusType, PlainValidator(validate_open_enum(False))]
266
+
267
+ url: str
268
+ r"""The endpoint to send metrics to"""
269
+
270
+ id: Optional[str] = None
271
+ r"""Unique ID for this output"""
272
+
273
+ pipeline: Optional[str] = None
274
+ r"""Pipeline to process data before sending out to this output"""
275
+
276
+ system_fields: Annotated[
277
+ Optional[List[str]], pydantic.Field(alias="systemFields")
278
+ ] = None
279
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards. These fields are added as dimensions to generated metrics."""
280
+
281
+ environment: Optional[str] = None
282
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
283
+
284
+ streamtags: Optional[List[str]] = None
285
+ r"""Tags for filtering and grouping in @{product}"""
286
+
287
+ metric_rename_expr: Annotated[
288
+ Optional[str], pydantic.Field(alias="metricRenameExpr")
289
+ ] = "name.replace(/[^a-zA-Z0-9_]/g, '_')"
290
+ r"""JavaScript expression that can be used to rename metrics. For example, name.replace(/\./g, '_') will replace all '.' characters in a metric's name with the supported '_' character. Use the 'name' global variable to access the metric's name. You can access event fields' values via __e.<fieldName>."""
291
+
292
+ send_metadata: Annotated[Optional[bool], pydantic.Field(alias="sendMetadata")] = (
293
+ True
294
+ )
295
+ r"""Generate and send metadata (`type` and `metricFamilyName`) requests"""
296
+
297
+ concurrency: Optional[float] = 5
298
+ r"""Maximum number of ongoing requests before blocking"""
299
+
300
+ max_payload_size_kb: Annotated[
301
+ Optional[float], pydantic.Field(alias="maxPayloadSizeKB")
302
+ ] = 4096
303
+ r"""Maximum size, in KB, of the request body"""
304
+
305
+ max_payload_events: Annotated[
306
+ Optional[float], pydantic.Field(alias="maxPayloadEvents")
307
+ ] = 0
308
+ r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
309
+
310
+ reject_unauthorized: Annotated[
311
+ Optional[bool], pydantic.Field(alias="rejectUnauthorized")
312
+ ] = True
313
+ r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
314
+ Enabled by default. When this setting is also present in TLS Settings (Client Side),
315
+ that value will take precedence.
316
+ """
317
+
318
+ timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 30
319
+ r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
320
+
321
+ flush_period_sec: Annotated[
322
+ Optional[float], pydantic.Field(alias="flushPeriodSec")
323
+ ] = 1
324
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
325
+
326
+ extra_http_headers: Annotated[
327
+ Optional[List[OutputPrometheusExtraHTTPHeader]],
328
+ pydantic.Field(alias="extraHttpHeaders"),
329
+ ] = None
330
+ r"""Headers to add to all events"""
331
+
332
+ use_round_robin_dns: Annotated[
333
+ Optional[bool], pydantic.Field(alias="useRoundRobinDns")
334
+ ] = False
335
+ r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
336
+
337
+ failed_request_logging_mode: Annotated[
338
+ Annotated[
339
+ Optional[OutputPrometheusFailedRequestLoggingMode],
340
+ PlainValidator(validate_open_enum(False)),
341
+ ],
342
+ pydantic.Field(alias="failedRequestLoggingMode"),
343
+ ] = OutputPrometheusFailedRequestLoggingMode.NONE
344
+ r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
345
+
346
+ safe_headers: Annotated[
347
+ Optional[List[str]], pydantic.Field(alias="safeHeaders")
348
+ ] = None
349
+ r"""List of headers that are safe to log in plain text"""
350
+
351
+ response_retry_settings: Annotated[
352
+ Optional[List[OutputPrometheusResponseRetrySetting]],
353
+ pydantic.Field(alias="responseRetrySettings"),
354
+ ] = None
355
+ r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
356
+
357
+ timeout_retry_settings: Annotated[
358
+ Optional[OutputPrometheusTimeoutRetrySettings],
359
+ pydantic.Field(alias="timeoutRetrySettings"),
360
+ ] = None
361
+
362
+ response_honor_retry_after_header: Annotated[
363
+ Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
364
+ ] = False
365
+ r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
366
+
367
+ on_backpressure: Annotated[
368
+ Annotated[
369
+ Optional[OutputPrometheusBackpressureBehavior],
370
+ PlainValidator(validate_open_enum(False)),
371
+ ],
372
+ pydantic.Field(alias="onBackpressure"),
373
+ ] = OutputPrometheusBackpressureBehavior.BLOCK
374
+ r"""How to handle events when all receivers are exerting backpressure"""
375
+
376
+ auth_type: Annotated[
377
+ Annotated[
378
+ Optional[OutputPrometheusAuthenticationType],
379
+ PlainValidator(validate_open_enum(False)),
380
+ ],
381
+ pydantic.Field(alias="authType"),
382
+ ] = OutputPrometheusAuthenticationType.NONE
383
+ r"""Remote Write authentication type"""
384
+
385
+ description: Optional[str] = None
386
+
387
+ metrics_flush_period_sec: Annotated[
388
+ Optional[float], pydantic.Field(alias="metricsFlushPeriodSec")
389
+ ] = 60
390
+ r"""How frequently metrics metadata is sent out. Value cannot be smaller than the base Flush period set above."""
391
+
392
+ pq_max_file_size: Annotated[
393
+ Optional[str], pydantic.Field(alias="pqMaxFileSize")
394
+ ] = "1 MB"
395
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
396
+
397
+ pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
398
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
399
+
400
+ pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
401
+ "$CRIBL_HOME/state/queues"
402
+ )
403
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
404
+
405
+ pq_compress: Annotated[
406
+ Annotated[
407
+ Optional[OutputPrometheusCompression],
408
+ PlainValidator(validate_open_enum(False)),
409
+ ],
410
+ pydantic.Field(alias="pqCompress"),
411
+ ] = OutputPrometheusCompression.NONE
412
+ r"""Codec to use to compress the persisted data"""
413
+
414
+ pq_on_backpressure: Annotated[
415
+ Annotated[
416
+ Optional[OutputPrometheusQueueFullBehavior],
417
+ PlainValidator(validate_open_enum(False)),
418
+ ],
419
+ pydantic.Field(alias="pqOnBackpressure"),
420
+ ] = OutputPrometheusQueueFullBehavior.BLOCK
421
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
422
+
423
+ pq_mode: Annotated[
424
+ Annotated[
425
+ Optional[OutputPrometheusMode], PlainValidator(validate_open_enum(False))
426
+ ],
427
+ pydantic.Field(alias="pqMode"),
428
+ ] = OutputPrometheusMode.ERROR
429
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
430
+
431
+ pq_controls: Annotated[
432
+ Optional[OutputPrometheusPqControls], pydantic.Field(alias="pqControls")
433
+ ] = None
434
+
435
+ username: Optional[str] = None
436
+
437
+ password: Optional[str] = None
438
+
439
+ token: Optional[str] = None
440
+ r"""Bearer token to include in the authorization header"""
441
+
442
+ credentials_secret: Annotated[
443
+ Optional[str], pydantic.Field(alias="credentialsSecret")
444
+ ] = None
445
+ r"""Select or create a secret that references your credentials"""
446
+
447
+ text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
448
+ r"""Select or create a stored text secret"""
449
+
450
+ login_url: Annotated[Optional[str], pydantic.Field(alias="loginUrl")] = None
451
+ r"""URL for OAuth"""
452
+
453
+ secret_param_name: Annotated[
454
+ Optional[str], pydantic.Field(alias="secretParamName")
455
+ ] = None
456
+ r"""Secret parameter name to pass in request body"""
457
+
458
+ secret: Optional[str] = None
459
+ r"""Secret parameter value to pass in request body"""
460
+
461
+ token_attribute_name: Annotated[
462
+ Optional[str], pydantic.Field(alias="tokenAttributeName")
463
+ ] = None
464
+ r"""Name of the auth token attribute in the OAuth response. Can be top-level (e.g., 'token'); or nested, using a period (e.g., 'data.token')."""
465
+
466
+ auth_header_expr: Annotated[
467
+ Optional[str], pydantic.Field(alias="authHeaderExpr")
468
+ ] = "`Bearer ${token}`"
469
+ r"""JavaScript expression to compute the Authorization header value to pass in requests. The value `${token}` is used to reference the token obtained from authentication, e.g.: `Bearer ${token}`."""
470
+
471
+ token_timeout_secs: Annotated[
472
+ Optional[float], pydantic.Field(alias="tokenTimeoutSecs")
473
+ ] = 3600
474
+ r"""How often the OAuth token should be refreshed."""
475
+
476
+ oauth_params: Annotated[
477
+ Optional[List[OutputPrometheusOauthParam]], pydantic.Field(alias="oauthParams")
478
+ ] = None
479
+ r"""Additional parameters to send in the OAuth login request. @{product} will combine the secret with these parameters, and will send the URL-encoded result in a POST request to the endpoint specified in the 'Login URL'. We'll automatically add the content-type header 'application/x-www-form-urlencoded' when sending this request."""
480
+
481
+ oauth_headers: Annotated[
482
+ Optional[List[OutputPrometheusOauthHeader]],
483
+ pydantic.Field(alias="oauthHeaders"),
484
+ ] = None
485
+ r"""Additional headers to send in the OAuth login request. @{product} will automatically add the content-type header 'application/x-www-form-urlencoded' when sending this request."""
@@ -0,0 +1,121 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputRingType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ RING = "ring"
16
+
17
+
18
+ class OutputRingDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ r"""Format of the output data."""
20
+
21
+ JSON = "json"
22
+ RAW = "raw"
23
+
24
+
25
+ class OutputRingDataCompressionFormat(str, Enum, metaclass=utils.OpenEnumMeta):
26
+ NONE = "none"
27
+ GZIP = "gzip"
28
+
29
+
30
+ class OutputRingBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
31
+ r"""How to handle events when all receivers are exerting backpressure"""
32
+
33
+ BLOCK = "block"
34
+ DROP = "drop"
35
+
36
+
37
+ class OutputRingTypedDict(TypedDict):
38
+ id: str
39
+ r"""Unique ID for this output"""
40
+ type: OutputRingType
41
+ pipeline: NotRequired[str]
42
+ r"""Pipeline to process data before sending out to this output"""
43
+ system_fields: NotRequired[List[str]]
44
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
45
+ environment: NotRequired[str]
46
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
47
+ streamtags: NotRequired[List[str]]
48
+ r"""Tags for filtering and grouping in @{product}"""
49
+ format_: NotRequired[OutputRingDataFormat]
50
+ r"""Format of the output data."""
51
+ partition_expr: NotRequired[str]
52
+ r"""JS expression to define how files are partitioned and organized. If left blank, Cribl Stream will fallback on event.__partition."""
53
+ max_data_size: NotRequired[str]
54
+ r"""Maximum disk space allowed to be consumed (examples: 420MB, 4GB). When limit is reached, older data will be deleted."""
55
+ max_data_time: NotRequired[str]
56
+ r"""Maximum amount of time to retain data (examples: 2h, 4d). When limit is reached, older data will be deleted."""
57
+ compress: NotRequired[OutputRingDataCompressionFormat]
58
+ dest_path: NotRequired[str]
59
+ r"""Path to use to write metrics. Defaults to $CRIBL_HOME/state/<id>"""
60
+ on_backpressure: NotRequired[OutputRingBackpressureBehavior]
61
+ r"""How to handle events when all receivers are exerting backpressure"""
62
+ description: NotRequired[str]
63
+
64
+
65
+ class OutputRing(BaseModel):
66
+ id: str
67
+ r"""Unique ID for this output"""
68
+
69
+ type: Annotated[OutputRingType, PlainValidator(validate_open_enum(False))]
70
+
71
+ pipeline: Optional[str] = None
72
+ r"""Pipeline to process data before sending out to this output"""
73
+
74
+ system_fields: Annotated[
75
+ Optional[List[str]], pydantic.Field(alias="systemFields")
76
+ ] = None
77
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
78
+
79
+ environment: Optional[str] = None
80
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
81
+
82
+ streamtags: Optional[List[str]] = None
83
+ r"""Tags for filtering and grouping in @{product}"""
84
+
85
+ format_: Annotated[
86
+ Annotated[
87
+ Optional[OutputRingDataFormat], PlainValidator(validate_open_enum(False))
88
+ ],
89
+ pydantic.Field(alias="format"),
90
+ ] = OutputRingDataFormat.JSON
91
+ r"""Format of the output data."""
92
+
93
+ partition_expr: Annotated[Optional[str], pydantic.Field(alias="partitionExpr")] = (
94
+ None
95
+ )
96
+ r"""JS expression to define how files are partitioned and organized. If left blank, Cribl Stream will fallback on event.__partition."""
97
+
98
+ max_data_size: Annotated[Optional[str], pydantic.Field(alias="maxDataSize")] = "1GB"
99
+ r"""Maximum disk space allowed to be consumed (examples: 420MB, 4GB). When limit is reached, older data will be deleted."""
100
+
101
+ max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
102
+ r"""Maximum amount of time to retain data (examples: 2h, 4d). When limit is reached, older data will be deleted."""
103
+
104
+ compress: Annotated[
105
+ Optional[OutputRingDataCompressionFormat],
106
+ PlainValidator(validate_open_enum(False)),
107
+ ] = OutputRingDataCompressionFormat.GZIP
108
+
109
+ dest_path: Annotated[Optional[str], pydantic.Field(alias="destPath")] = None
110
+ r"""Path to use to write metrics. Defaults to $CRIBL_HOME/state/<id>"""
111
+
112
+ on_backpressure: Annotated[
113
+ Annotated[
114
+ Optional[OutputRingBackpressureBehavior],
115
+ PlainValidator(validate_open_enum(False)),
116
+ ],
117
+ pydantic.Field(alias="onBackpressure"),
118
+ ] = OutputRingBackpressureBehavior.BLOCK
119
+ r"""How to handle events when all receivers are exerting backpressure"""
120
+
121
+ description: Optional[str] = None