cribl-control-plane 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (197) hide show
  1. cribl_control_plane/__init__.py +17 -0
  2. cribl_control_plane/_hooks/__init__.py +5 -0
  3. cribl_control_plane/_hooks/clientcredentials.py +211 -0
  4. cribl_control_plane/_hooks/registration.py +13 -0
  5. cribl_control_plane/_hooks/sdkhooks.py +81 -0
  6. cribl_control_plane/_hooks/types.py +112 -0
  7. cribl_control_plane/_version.py +15 -0
  8. cribl_control_plane/auth_sdk.py +184 -0
  9. cribl_control_plane/basesdk.py +358 -0
  10. cribl_control_plane/errors/__init__.py +60 -0
  11. cribl_control_plane/errors/apierror.py +38 -0
  12. cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
  13. cribl_control_plane/errors/error.py +24 -0
  14. cribl_control_plane/errors/healthstatus_error.py +38 -0
  15. cribl_control_plane/errors/no_response_error.py +13 -0
  16. cribl_control_plane/errors/responsevalidationerror.py +25 -0
  17. cribl_control_plane/health.py +166 -0
  18. cribl_control_plane/httpclient.py +126 -0
  19. cribl_control_plane/models/__init__.py +7305 -0
  20. cribl_control_plane/models/addhectokenrequest.py +34 -0
  21. cribl_control_plane/models/authtoken.py +13 -0
  22. cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
  23. cribl_control_plane/models/createinputop.py +24 -0
  24. cribl_control_plane/models/createoutputop.py +24 -0
  25. cribl_control_plane/models/createoutputtestbyidop.py +46 -0
  26. cribl_control_plane/models/criblevent.py +14 -0
  27. cribl_control_plane/models/deleteinputbyidop.py +37 -0
  28. cribl_control_plane/models/deleteoutputbyidop.py +37 -0
  29. cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
  30. cribl_control_plane/models/getinputbyidop.py +37 -0
  31. cribl_control_plane/models/getoutputbyidop.py +37 -0
  32. cribl_control_plane/models/getoutputpqbyidop.py +36 -0
  33. cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
  34. cribl_control_plane/models/healthstatus.py +36 -0
  35. cribl_control_plane/models/input.py +199 -0
  36. cribl_control_plane/models/inputappscope.py +448 -0
  37. cribl_control_plane/models/inputazureblob.py +308 -0
  38. cribl_control_plane/models/inputcollection.py +208 -0
  39. cribl_control_plane/models/inputconfluentcloud.py +585 -0
  40. cribl_control_plane/models/inputcribl.py +165 -0
  41. cribl_control_plane/models/inputcriblhttp.py +341 -0
  42. cribl_control_plane/models/inputcribllakehttp.py +342 -0
  43. cribl_control_plane/models/inputcriblmetrics.py +175 -0
  44. cribl_control_plane/models/inputcribltcp.py +299 -0
  45. cribl_control_plane/models/inputcrowdstrike.py +410 -0
  46. cribl_control_plane/models/inputdatadogagent.py +364 -0
  47. cribl_control_plane/models/inputdatagen.py +180 -0
  48. cribl_control_plane/models/inputedgeprometheus.py +551 -0
  49. cribl_control_plane/models/inputelastic.py +494 -0
  50. cribl_control_plane/models/inputeventhub.py +360 -0
  51. cribl_control_plane/models/inputexec.py +213 -0
  52. cribl_control_plane/models/inputfile.py +259 -0
  53. cribl_control_plane/models/inputfirehose.py +341 -0
  54. cribl_control_plane/models/inputgooglepubsub.py +247 -0
  55. cribl_control_plane/models/inputgrafana_union.py +1247 -0
  56. cribl_control_plane/models/inputhttp.py +403 -0
  57. cribl_control_plane/models/inputhttpraw.py +407 -0
  58. cribl_control_plane/models/inputjournalfiles.py +208 -0
  59. cribl_control_plane/models/inputkafka.py +581 -0
  60. cribl_control_plane/models/inputkinesis.py +363 -0
  61. cribl_control_plane/models/inputkubeevents.py +182 -0
  62. cribl_control_plane/models/inputkubelogs.py +256 -0
  63. cribl_control_plane/models/inputkubemetrics.py +233 -0
  64. cribl_control_plane/models/inputloki.py +468 -0
  65. cribl_control_plane/models/inputmetrics.py +290 -0
  66. cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
  67. cribl_control_plane/models/inputmsk.py +654 -0
  68. cribl_control_plane/models/inputnetflow.py +224 -0
  69. cribl_control_plane/models/inputoffice365mgmt.py +384 -0
  70. cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
  71. cribl_control_plane/models/inputoffice365service.py +377 -0
  72. cribl_control_plane/models/inputopentelemetry.py +516 -0
  73. cribl_control_plane/models/inputprometheus.py +464 -0
  74. cribl_control_plane/models/inputprometheusrw.py +470 -0
  75. cribl_control_plane/models/inputrawudp.py +207 -0
  76. cribl_control_plane/models/inputs3.py +416 -0
  77. cribl_control_plane/models/inputs3inventory.py +440 -0
  78. cribl_control_plane/models/inputsecuritylake.py +425 -0
  79. cribl_control_plane/models/inputsnmp.py +274 -0
  80. cribl_control_plane/models/inputsplunk.py +387 -0
  81. cribl_control_plane/models/inputsplunkhec.py +478 -0
  82. cribl_control_plane/models/inputsplunksearch.py +537 -0
  83. cribl_control_plane/models/inputsqs.py +320 -0
  84. cribl_control_plane/models/inputsyslog_union.py +759 -0
  85. cribl_control_plane/models/inputsystemmetrics.py +533 -0
  86. cribl_control_plane/models/inputsystemstate.py +417 -0
  87. cribl_control_plane/models/inputtcp.py +359 -0
  88. cribl_control_plane/models/inputtcpjson.py +334 -0
  89. cribl_control_plane/models/inputwef.py +498 -0
  90. cribl_control_plane/models/inputwindowsmetrics.py +457 -0
  91. cribl_control_plane/models/inputwineventlogs.py +222 -0
  92. cribl_control_plane/models/inputwiz.py +334 -0
  93. cribl_control_plane/models/inputzscalerhec.py +439 -0
  94. cribl_control_plane/models/listinputop.py +24 -0
  95. cribl_control_plane/models/listoutputop.py +24 -0
  96. cribl_control_plane/models/logininfo.py +16 -0
  97. cribl_control_plane/models/output.py +229 -0
  98. cribl_control_plane/models/outputazureblob.py +471 -0
  99. cribl_control_plane/models/outputazuredataexplorer.py +660 -0
  100. cribl_control_plane/models/outputazureeventhub.py +321 -0
  101. cribl_control_plane/models/outputazurelogs.py +386 -0
  102. cribl_control_plane/models/outputclickhouse.py +650 -0
  103. cribl_control_plane/models/outputcloudwatch.py +273 -0
  104. cribl_control_plane/models/outputconfluentcloud.py +591 -0
  105. cribl_control_plane/models/outputcriblhttp.py +494 -0
  106. cribl_control_plane/models/outputcribllake.py +396 -0
  107. cribl_control_plane/models/outputcribltcp.py +387 -0
  108. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
  109. cribl_control_plane/models/outputdatadog.py +472 -0
  110. cribl_control_plane/models/outputdataset.py +437 -0
  111. cribl_control_plane/models/outputdefault.py +55 -0
  112. cribl_control_plane/models/outputdevnull.py +50 -0
  113. cribl_control_plane/models/outputdiskspool.py +89 -0
  114. cribl_control_plane/models/outputdls3.py +560 -0
  115. cribl_control_plane/models/outputdynatracehttp.py +454 -0
  116. cribl_control_plane/models/outputdynatraceotlp.py +486 -0
  117. cribl_control_plane/models/outputelastic.py +494 -0
  118. cribl_control_plane/models/outputelasticcloud.py +407 -0
  119. cribl_control_plane/models/outputexabeam.py +297 -0
  120. cribl_control_plane/models/outputfilesystem.py +357 -0
  121. cribl_control_plane/models/outputgooglechronicle.py +486 -0
  122. cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
  123. cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
  124. cribl_control_plane/models/outputgooglepubsub.py +274 -0
  125. cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
  126. cribl_control_plane/models/outputgraphite.py +225 -0
  127. cribl_control_plane/models/outputhoneycomb.py +369 -0
  128. cribl_control_plane/models/outputhumiohec.py +389 -0
  129. cribl_control_plane/models/outputinfluxdb.py +523 -0
  130. cribl_control_plane/models/outputkafka.py +581 -0
  131. cribl_control_plane/models/outputkinesis.py +312 -0
  132. cribl_control_plane/models/outputloki.py +425 -0
  133. cribl_control_plane/models/outputminio.py +512 -0
  134. cribl_control_plane/models/outputmsk.py +654 -0
  135. cribl_control_plane/models/outputnetflow.py +80 -0
  136. cribl_control_plane/models/outputnewrelic.py +424 -0
  137. cribl_control_plane/models/outputnewrelicevents.py +401 -0
  138. cribl_control_plane/models/outputopentelemetry.py +669 -0
  139. cribl_control_plane/models/outputprometheus.py +485 -0
  140. cribl_control_plane/models/outputring.py +121 -0
  141. cribl_control_plane/models/outputrouter.py +83 -0
  142. cribl_control_plane/models/outputs3.py +556 -0
  143. cribl_control_plane/models/outputsamplesresponse.py +14 -0
  144. cribl_control_plane/models/outputsecuritylake.py +505 -0
  145. cribl_control_plane/models/outputsentinel.py +488 -0
  146. cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
  147. cribl_control_plane/models/outputservicenow.py +543 -0
  148. cribl_control_plane/models/outputsignalfx.py +369 -0
  149. cribl_control_plane/models/outputsnmp.py +80 -0
  150. cribl_control_plane/models/outputsns.py +274 -0
  151. cribl_control_plane/models/outputsplunk.py +383 -0
  152. cribl_control_plane/models/outputsplunkhec.py +434 -0
  153. cribl_control_plane/models/outputsplunklb.py +558 -0
  154. cribl_control_plane/models/outputsqs.py +328 -0
  155. cribl_control_plane/models/outputstatsd.py +224 -0
  156. cribl_control_plane/models/outputstatsdext.py +225 -0
  157. cribl_control_plane/models/outputsumologic.py +378 -0
  158. cribl_control_plane/models/outputsyslog.py +415 -0
  159. cribl_control_plane/models/outputtcpjson.py +413 -0
  160. cribl_control_plane/models/outputtestrequest.py +15 -0
  161. cribl_control_plane/models/outputtestresponse.py +29 -0
  162. cribl_control_plane/models/outputwavefront.py +369 -0
  163. cribl_control_plane/models/outputwebhook.py +689 -0
  164. cribl_control_plane/models/outputxsiam.py +415 -0
  165. cribl_control_plane/models/schemeclientoauth.py +24 -0
  166. cribl_control_plane/models/security.py +36 -0
  167. cribl_control_plane/models/updatehectokenrequest.py +31 -0
  168. cribl_control_plane/models/updateinputbyidop.py +44 -0
  169. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
  170. cribl_control_plane/models/updateoutputbyidop.py +44 -0
  171. cribl_control_plane/outputs.py +1615 -0
  172. cribl_control_plane/py.typed +1 -0
  173. cribl_control_plane/sdk.py +164 -0
  174. cribl_control_plane/sdkconfiguration.py +36 -0
  175. cribl_control_plane/sources.py +1355 -0
  176. cribl_control_plane/types/__init__.py +21 -0
  177. cribl_control_plane/types/basemodel.py +39 -0
  178. cribl_control_plane/utils/__init__.py +187 -0
  179. cribl_control_plane/utils/annotations.py +55 -0
  180. cribl_control_plane/utils/datetimes.py +23 -0
  181. cribl_control_plane/utils/enums.py +74 -0
  182. cribl_control_plane/utils/eventstreaming.py +238 -0
  183. cribl_control_plane/utils/forms.py +223 -0
  184. cribl_control_plane/utils/headers.py +136 -0
  185. cribl_control_plane/utils/logger.py +27 -0
  186. cribl_control_plane/utils/metadata.py +118 -0
  187. cribl_control_plane/utils/queryparams.py +205 -0
  188. cribl_control_plane/utils/requestbodies.py +66 -0
  189. cribl_control_plane/utils/retries.py +217 -0
  190. cribl_control_plane/utils/security.py +207 -0
  191. cribl_control_plane/utils/serializers.py +249 -0
  192. cribl_control_plane/utils/unmarshal_json_response.py +24 -0
  193. cribl_control_plane/utils/url.py +155 -0
  194. cribl_control_plane/utils/values.py +137 -0
  195. cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
  196. cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
  197. cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
@@ -0,0 +1,486 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputGoogleChronicleType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ GOOGLE_CHRONICLE = "google_chronicle"
16
+
17
+
18
+ class OutputGoogleChronicleAPIVersion(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ V1 = "v1"
20
+ V2 = "v2"
21
+
22
+
23
+ class OutputGoogleChronicleAuthenticationMethod(
24
+ str, Enum, metaclass=utils.OpenEnumMeta
25
+ ):
26
+ MANUAL = "manual"
27
+ SECRET = "secret"
28
+ SERVICE_ACCOUNT = "serviceAccount"
29
+ SERVICE_ACCOUNT_SECRET = "serviceAccountSecret"
30
+
31
+
32
+ class OutputGoogleChronicleResponseRetrySettingTypedDict(TypedDict):
33
+ http_status: float
34
+ r"""The HTTP response status code that will trigger retries"""
35
+ initial_backoff: NotRequired[float]
36
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
37
+ backoff_rate: NotRequired[float]
38
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
39
+ max_backoff: NotRequired[float]
40
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
41
+
42
+
43
+ class OutputGoogleChronicleResponseRetrySetting(BaseModel):
44
+ http_status: Annotated[float, pydantic.Field(alias="httpStatus")]
45
+ r"""The HTTP response status code that will trigger retries"""
46
+
47
+ initial_backoff: Annotated[
48
+ Optional[float], pydantic.Field(alias="initialBackoff")
49
+ ] = 1000
50
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
51
+
52
+ backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
53
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
54
+
55
+ max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
56
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
57
+
58
+
59
+ class OutputGoogleChronicleTimeoutRetrySettingsTypedDict(TypedDict):
60
+ timeout_retry: NotRequired[bool]
61
+ initial_backoff: NotRequired[float]
62
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
63
+ backoff_rate: NotRequired[float]
64
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
65
+ max_backoff: NotRequired[float]
66
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
67
+
68
+
69
+ class OutputGoogleChronicleTimeoutRetrySettings(BaseModel):
70
+ timeout_retry: Annotated[Optional[bool], pydantic.Field(alias="timeoutRetry")] = (
71
+ False
72
+ )
73
+
74
+ initial_backoff: Annotated[
75
+ Optional[float], pydantic.Field(alias="initialBackoff")
76
+ ] = 1000
77
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
78
+
79
+ backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
80
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
81
+
82
+ max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
83
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
84
+
85
+
86
+ class SendEventsAs(str, Enum, metaclass=utils.OpenEnumMeta):
87
+ UNSTRUCTURED = "unstructured"
88
+ UDM = "udm"
89
+
90
+
91
+ class OutputGoogleChronicleExtraHTTPHeaderTypedDict(TypedDict):
92
+ value: str
93
+ name: NotRequired[str]
94
+
95
+
96
+ class OutputGoogleChronicleExtraHTTPHeader(BaseModel):
97
+ value: str
98
+
99
+ name: Optional[str] = None
100
+
101
+
102
+ class OutputGoogleChronicleFailedRequestLoggingMode(
103
+ str, Enum, metaclass=utils.OpenEnumMeta
104
+ ):
105
+ r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
106
+
107
+ PAYLOAD = "payload"
108
+ PAYLOAD_AND_HEADERS = "payloadAndHeaders"
109
+ NONE = "none"
110
+
111
+
112
+ class OutputGoogleChronicleBackpressureBehavior(
113
+ str, Enum, metaclass=utils.OpenEnumMeta
114
+ ):
115
+ r"""How to handle events when all receivers are exerting backpressure"""
116
+
117
+ BLOCK = "block"
118
+ DROP = "drop"
119
+ QUEUE = "queue"
120
+
121
+
122
+ class ExtraLogTypeTypedDict(TypedDict):
123
+ log_type: str
124
+ description: NotRequired[str]
125
+
126
+
127
+ class ExtraLogType(BaseModel):
128
+ log_type: Annotated[str, pydantic.Field(alias="logType")]
129
+
130
+ description: Optional[str] = None
131
+
132
+
133
+ class CustomLabelTypedDict(TypedDict):
134
+ key: str
135
+ value: str
136
+
137
+
138
+ class CustomLabel(BaseModel):
139
+ key: str
140
+
141
+ value: str
142
+
143
+
144
+ class OutputGoogleChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
145
+ r"""Codec to use to compress the persisted data"""
146
+
147
+ NONE = "none"
148
+ GZIP = "gzip"
149
+
150
+
151
+ class OutputGoogleChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
152
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
153
+
154
+ BLOCK = "block"
155
+ DROP = "drop"
156
+
157
+
158
+ class OutputGoogleChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
159
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
160
+
161
+ ERROR = "error"
162
+ BACKPRESSURE = "backpressure"
163
+ ALWAYS = "always"
164
+
165
+
166
+ class OutputGoogleChroniclePqControlsTypedDict(TypedDict):
167
+ pass
168
+
169
+
170
+ class OutputGoogleChroniclePqControls(BaseModel):
171
+ pass
172
+
173
+
174
+ class OutputGoogleChronicleTypedDict(TypedDict):
175
+ type: OutputGoogleChronicleType
176
+ id: NotRequired[str]
177
+ r"""Unique ID for this output"""
178
+ pipeline: NotRequired[str]
179
+ r"""Pipeline to process data before sending out to this output"""
180
+ system_fields: NotRequired[List[str]]
181
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
182
+ environment: NotRequired[str]
183
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
184
+ streamtags: NotRequired[List[str]]
185
+ r"""Tags for filtering and grouping in @{product}"""
186
+ api_version: NotRequired[OutputGoogleChronicleAPIVersion]
187
+ authentication_method: NotRequired[OutputGoogleChronicleAuthenticationMethod]
188
+ response_retry_settings: NotRequired[
189
+ List[OutputGoogleChronicleResponseRetrySettingTypedDict]
190
+ ]
191
+ r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
192
+ timeout_retry_settings: NotRequired[
193
+ OutputGoogleChronicleTimeoutRetrySettingsTypedDict
194
+ ]
195
+ response_honor_retry_after_header: NotRequired[bool]
196
+ r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
197
+ log_format_type: NotRequired[SendEventsAs]
198
+ region: NotRequired[str]
199
+ r"""Regional endpoint to send events to"""
200
+ concurrency: NotRequired[float]
201
+ r"""Maximum number of ongoing requests before blocking"""
202
+ max_payload_size_kb: NotRequired[float]
203
+ r"""Maximum size, in KB, of the request body"""
204
+ max_payload_events: NotRequired[float]
205
+ r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
206
+ compress: NotRequired[bool]
207
+ r"""Compress the payload body before sending"""
208
+ reject_unauthorized: NotRequired[bool]
209
+ r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
210
+ Enabled by default. When this setting is also present in TLS Settings (Client Side),
211
+ that value will take precedence.
212
+ """
213
+ timeout_sec: NotRequired[float]
214
+ r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
215
+ flush_period_sec: NotRequired[float]
216
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
217
+ extra_http_headers: NotRequired[List[OutputGoogleChronicleExtraHTTPHeaderTypedDict]]
218
+ r"""Headers to add to all events"""
219
+ failed_request_logging_mode: NotRequired[
220
+ OutputGoogleChronicleFailedRequestLoggingMode
221
+ ]
222
+ r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
223
+ safe_headers: NotRequired[List[str]]
224
+ r"""List of headers that are safe to log in plain text"""
225
+ use_round_robin_dns: NotRequired[bool]
226
+ r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
227
+ on_backpressure: NotRequired[OutputGoogleChronicleBackpressureBehavior]
228
+ r"""How to handle events when all receivers are exerting backpressure"""
229
+ total_memory_limit_kb: NotRequired[float]
230
+ r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
231
+ description: NotRequired[str]
232
+ extra_log_types: NotRequired[List[ExtraLogTypeTypedDict]]
233
+ r"""Custom log types. If the value \"Custom\" is selected in the setting \"Default log type\" above, the first custom log type in this table will be automatically selected as default log type."""
234
+ log_type: NotRequired[str]
235
+ r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
236
+ log_text_field: NotRequired[str]
237
+ r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
238
+ customer_id: NotRequired[str]
239
+ r"""Unique identifier (UUID) corresponding to a particular SecOps instance. Provided by your SecOps representative."""
240
+ namespace: NotRequired[str]
241
+ r"""User-configured environment namespace to identify the data domain the logs originated from. Use namespace as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
242
+ custom_labels: NotRequired[List[CustomLabelTypedDict]]
243
+ r"""Custom labels to be added to every batch"""
244
+ api_key: NotRequired[str]
245
+ r"""Organization's API key in Google SecOps"""
246
+ api_key_secret: NotRequired[str]
247
+ r"""Select or create a stored text secret"""
248
+ service_account_credentials: NotRequired[str]
249
+ r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
250
+ service_account_credentials_secret: NotRequired[str]
251
+ r"""Select or create a stored text secret"""
252
+ pq_max_file_size: NotRequired[str]
253
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
254
+ pq_max_size: NotRequired[str]
255
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
256
+ pq_path: NotRequired[str]
257
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
258
+ pq_compress: NotRequired[OutputGoogleChronicleCompression]
259
+ r"""Codec to use to compress the persisted data"""
260
+ pq_on_backpressure: NotRequired[OutputGoogleChronicleQueueFullBehavior]
261
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
262
+ pq_mode: NotRequired[OutputGoogleChronicleMode]
263
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
264
+ pq_controls: NotRequired[OutputGoogleChroniclePqControlsTypedDict]
265
+
266
+
267
+ class OutputGoogleChronicle(BaseModel):
268
+ type: Annotated[
269
+ OutputGoogleChronicleType, PlainValidator(validate_open_enum(False))
270
+ ]
271
+
272
+ id: Optional[str] = None
273
+ r"""Unique ID for this output"""
274
+
275
+ pipeline: Optional[str] = None
276
+ r"""Pipeline to process data before sending out to this output"""
277
+
278
+ system_fields: Annotated[
279
+ Optional[List[str]], pydantic.Field(alias="systemFields")
280
+ ] = None
281
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
282
+
283
+ environment: Optional[str] = None
284
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
285
+
286
+ streamtags: Optional[List[str]] = None
287
+ r"""Tags for filtering and grouping in @{product}"""
288
+
289
+ api_version: Annotated[
290
+ Annotated[
291
+ Optional[OutputGoogleChronicleAPIVersion],
292
+ PlainValidator(validate_open_enum(False)),
293
+ ],
294
+ pydantic.Field(alias="apiVersion"),
295
+ ] = OutputGoogleChronicleAPIVersion.V1
296
+
297
+ authentication_method: Annotated[
298
+ Annotated[
299
+ Optional[OutputGoogleChronicleAuthenticationMethod],
300
+ PlainValidator(validate_open_enum(False)),
301
+ ],
302
+ pydantic.Field(alias="authenticationMethod"),
303
+ ] = OutputGoogleChronicleAuthenticationMethod.SERVICE_ACCOUNT
304
+
305
+ response_retry_settings: Annotated[
306
+ Optional[List[OutputGoogleChronicleResponseRetrySetting]],
307
+ pydantic.Field(alias="responseRetrySettings"),
308
+ ] = None
309
+ r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
310
+
311
+ timeout_retry_settings: Annotated[
312
+ Optional[OutputGoogleChronicleTimeoutRetrySettings],
313
+ pydantic.Field(alias="timeoutRetrySettings"),
314
+ ] = None
315
+
316
+ response_honor_retry_after_header: Annotated[
317
+ Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
318
+ ] = False
319
+ r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
320
+
321
+ log_format_type: Annotated[
322
+ Annotated[Optional[SendEventsAs], PlainValidator(validate_open_enum(False))],
323
+ pydantic.Field(alias="logFormatType"),
324
+ ] = SendEventsAs.UNSTRUCTURED
325
+
326
+ region: Optional[str] = None
327
+ r"""Regional endpoint to send events to"""
328
+
329
+ concurrency: Optional[float] = 5
330
+ r"""Maximum number of ongoing requests before blocking"""
331
+
332
+ max_payload_size_kb: Annotated[
333
+ Optional[float], pydantic.Field(alias="maxPayloadSizeKB")
334
+ ] = 1024
335
+ r"""Maximum size, in KB, of the request body"""
336
+
337
+ max_payload_events: Annotated[
338
+ Optional[float], pydantic.Field(alias="maxPayloadEvents")
339
+ ] = 0
340
+ r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
341
+
342
+ compress: Optional[bool] = True
343
+ r"""Compress the payload body before sending"""
344
+
345
+ reject_unauthorized: Annotated[
346
+ Optional[bool], pydantic.Field(alias="rejectUnauthorized")
347
+ ] = True
348
+ r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
349
+ Enabled by default. When this setting is also present in TLS Settings (Client Side),
350
+ that value will take precedence.
351
+ """
352
+
353
+ timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 90
354
+ r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
355
+
356
+ flush_period_sec: Annotated[
357
+ Optional[float], pydantic.Field(alias="flushPeriodSec")
358
+ ] = 1
359
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
360
+
361
+ extra_http_headers: Annotated[
362
+ Optional[List[OutputGoogleChronicleExtraHTTPHeader]],
363
+ pydantic.Field(alias="extraHttpHeaders"),
364
+ ] = None
365
+ r"""Headers to add to all events"""
366
+
367
+ failed_request_logging_mode: Annotated[
368
+ Annotated[
369
+ Optional[OutputGoogleChronicleFailedRequestLoggingMode],
370
+ PlainValidator(validate_open_enum(False)),
371
+ ],
372
+ pydantic.Field(alias="failedRequestLoggingMode"),
373
+ ] = OutputGoogleChronicleFailedRequestLoggingMode.NONE
374
+ r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
375
+
376
+ safe_headers: Annotated[
377
+ Optional[List[str]], pydantic.Field(alias="safeHeaders")
378
+ ] = None
379
+ r"""List of headers that are safe to log in plain text"""
380
+
381
+ use_round_robin_dns: Annotated[
382
+ Optional[bool], pydantic.Field(alias="useRoundRobinDns")
383
+ ] = False
384
+ r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
385
+
386
+ on_backpressure: Annotated[
387
+ Annotated[
388
+ Optional[OutputGoogleChronicleBackpressureBehavior],
389
+ PlainValidator(validate_open_enum(False)),
390
+ ],
391
+ pydantic.Field(alias="onBackpressure"),
392
+ ] = OutputGoogleChronicleBackpressureBehavior.BLOCK
393
+ r"""How to handle events when all receivers are exerting backpressure"""
394
+
395
+ total_memory_limit_kb: Annotated[
396
+ Optional[float], pydantic.Field(alias="totalMemoryLimitKB")
397
+ ] = None
398
+ r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
399
+
400
+ description: Optional[str] = None
401
+
402
+ extra_log_types: Annotated[
403
+ Optional[List[ExtraLogType]], pydantic.Field(alias="extraLogTypes")
404
+ ] = None
405
+ r"""Custom log types. If the value \"Custom\" is selected in the setting \"Default log type\" above, the first custom log type in this table will be automatically selected as default log type."""
406
+
407
+ log_type: Annotated[Optional[str], pydantic.Field(alias="logType")] = None
408
+ r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
409
+
410
+ log_text_field: Annotated[Optional[str], pydantic.Field(alias="logTextField")] = (
411
+ None
412
+ )
413
+ r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
414
+
415
+ customer_id: Annotated[Optional[str], pydantic.Field(alias="customerId")] = None
416
+ r"""Unique identifier (UUID) corresponding to a particular SecOps instance. Provided by your SecOps representative."""
417
+
418
+ namespace: Optional[str] = None
419
+ r"""User-configured environment namespace to identify the data domain the logs originated from. Use namespace as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
420
+
421
+ custom_labels: Annotated[
422
+ Optional[List[CustomLabel]], pydantic.Field(alias="customLabels")
423
+ ] = None
424
+ r"""Custom labels to be added to every batch"""
425
+
426
+ api_key: Annotated[Optional[str], pydantic.Field(alias="apiKey")] = None
427
+ r"""Organization's API key in Google SecOps"""
428
+
429
+ api_key_secret: Annotated[Optional[str], pydantic.Field(alias="apiKeySecret")] = (
430
+ None
431
+ )
432
+ r"""Select or create a stored text secret"""
433
+
434
+ service_account_credentials: Annotated[
435
+ Optional[str], pydantic.Field(alias="serviceAccountCredentials")
436
+ ] = None
437
+ r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
438
+
439
+ service_account_credentials_secret: Annotated[
440
+ Optional[str], pydantic.Field(alias="serviceAccountCredentialsSecret")
441
+ ] = None
442
+ r"""Select or create a stored text secret"""
443
+
444
+ pq_max_file_size: Annotated[
445
+ Optional[str], pydantic.Field(alias="pqMaxFileSize")
446
+ ] = "1 MB"
447
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
448
+
449
+ pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
450
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
451
+
452
+ pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
453
+ "$CRIBL_HOME/state/queues"
454
+ )
455
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
456
+
457
+ pq_compress: Annotated[
458
+ Annotated[
459
+ Optional[OutputGoogleChronicleCompression],
460
+ PlainValidator(validate_open_enum(False)),
461
+ ],
462
+ pydantic.Field(alias="pqCompress"),
463
+ ] = OutputGoogleChronicleCompression.NONE
464
+ r"""Codec to use to compress the persisted data"""
465
+
466
+ pq_on_backpressure: Annotated[
467
+ Annotated[
468
+ Optional[OutputGoogleChronicleQueueFullBehavior],
469
+ PlainValidator(validate_open_enum(False)),
470
+ ],
471
+ pydantic.Field(alias="pqOnBackpressure"),
472
+ ] = OutputGoogleChronicleQueueFullBehavior.BLOCK
473
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
474
+
475
+ pq_mode: Annotated[
476
+ Annotated[
477
+ Optional[OutputGoogleChronicleMode],
478
+ PlainValidator(validate_open_enum(False)),
479
+ ],
480
+ pydantic.Field(alias="pqMode"),
481
+ ] = OutputGoogleChronicleMode.ERROR
482
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
483
+
484
+ pq_controls: Annotated[
485
+ Optional[OutputGoogleChroniclePqControls], pydantic.Field(alias="pqControls")
486
+ ] = None