cribl-control-plane 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (197) hide show
  1. cribl_control_plane/__init__.py +17 -0
  2. cribl_control_plane/_hooks/__init__.py +5 -0
  3. cribl_control_plane/_hooks/clientcredentials.py +211 -0
  4. cribl_control_plane/_hooks/registration.py +13 -0
  5. cribl_control_plane/_hooks/sdkhooks.py +81 -0
  6. cribl_control_plane/_hooks/types.py +112 -0
  7. cribl_control_plane/_version.py +15 -0
  8. cribl_control_plane/auth_sdk.py +184 -0
  9. cribl_control_plane/basesdk.py +358 -0
  10. cribl_control_plane/errors/__init__.py +60 -0
  11. cribl_control_plane/errors/apierror.py +38 -0
  12. cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
  13. cribl_control_plane/errors/error.py +24 -0
  14. cribl_control_plane/errors/healthstatus_error.py +38 -0
  15. cribl_control_plane/errors/no_response_error.py +13 -0
  16. cribl_control_plane/errors/responsevalidationerror.py +25 -0
  17. cribl_control_plane/health.py +166 -0
  18. cribl_control_plane/httpclient.py +126 -0
  19. cribl_control_plane/models/__init__.py +7305 -0
  20. cribl_control_plane/models/addhectokenrequest.py +34 -0
  21. cribl_control_plane/models/authtoken.py +13 -0
  22. cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
  23. cribl_control_plane/models/createinputop.py +24 -0
  24. cribl_control_plane/models/createoutputop.py +24 -0
  25. cribl_control_plane/models/createoutputtestbyidop.py +46 -0
  26. cribl_control_plane/models/criblevent.py +14 -0
  27. cribl_control_plane/models/deleteinputbyidop.py +37 -0
  28. cribl_control_plane/models/deleteoutputbyidop.py +37 -0
  29. cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
  30. cribl_control_plane/models/getinputbyidop.py +37 -0
  31. cribl_control_plane/models/getoutputbyidop.py +37 -0
  32. cribl_control_plane/models/getoutputpqbyidop.py +36 -0
  33. cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
  34. cribl_control_plane/models/healthstatus.py +36 -0
  35. cribl_control_plane/models/input.py +199 -0
  36. cribl_control_plane/models/inputappscope.py +448 -0
  37. cribl_control_plane/models/inputazureblob.py +308 -0
  38. cribl_control_plane/models/inputcollection.py +208 -0
  39. cribl_control_plane/models/inputconfluentcloud.py +585 -0
  40. cribl_control_plane/models/inputcribl.py +165 -0
  41. cribl_control_plane/models/inputcriblhttp.py +341 -0
  42. cribl_control_plane/models/inputcribllakehttp.py +342 -0
  43. cribl_control_plane/models/inputcriblmetrics.py +175 -0
  44. cribl_control_plane/models/inputcribltcp.py +299 -0
  45. cribl_control_plane/models/inputcrowdstrike.py +410 -0
  46. cribl_control_plane/models/inputdatadogagent.py +364 -0
  47. cribl_control_plane/models/inputdatagen.py +180 -0
  48. cribl_control_plane/models/inputedgeprometheus.py +551 -0
  49. cribl_control_plane/models/inputelastic.py +494 -0
  50. cribl_control_plane/models/inputeventhub.py +360 -0
  51. cribl_control_plane/models/inputexec.py +213 -0
  52. cribl_control_plane/models/inputfile.py +259 -0
  53. cribl_control_plane/models/inputfirehose.py +341 -0
  54. cribl_control_plane/models/inputgooglepubsub.py +247 -0
  55. cribl_control_plane/models/inputgrafana_union.py +1247 -0
  56. cribl_control_plane/models/inputhttp.py +403 -0
  57. cribl_control_plane/models/inputhttpraw.py +407 -0
  58. cribl_control_plane/models/inputjournalfiles.py +208 -0
  59. cribl_control_plane/models/inputkafka.py +581 -0
  60. cribl_control_plane/models/inputkinesis.py +363 -0
  61. cribl_control_plane/models/inputkubeevents.py +182 -0
  62. cribl_control_plane/models/inputkubelogs.py +256 -0
  63. cribl_control_plane/models/inputkubemetrics.py +233 -0
  64. cribl_control_plane/models/inputloki.py +468 -0
  65. cribl_control_plane/models/inputmetrics.py +290 -0
  66. cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
  67. cribl_control_plane/models/inputmsk.py +654 -0
  68. cribl_control_plane/models/inputnetflow.py +224 -0
  69. cribl_control_plane/models/inputoffice365mgmt.py +384 -0
  70. cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
  71. cribl_control_plane/models/inputoffice365service.py +377 -0
  72. cribl_control_plane/models/inputopentelemetry.py +516 -0
  73. cribl_control_plane/models/inputprometheus.py +464 -0
  74. cribl_control_plane/models/inputprometheusrw.py +470 -0
  75. cribl_control_plane/models/inputrawudp.py +207 -0
  76. cribl_control_plane/models/inputs3.py +416 -0
  77. cribl_control_plane/models/inputs3inventory.py +440 -0
  78. cribl_control_plane/models/inputsecuritylake.py +425 -0
  79. cribl_control_plane/models/inputsnmp.py +274 -0
  80. cribl_control_plane/models/inputsplunk.py +387 -0
  81. cribl_control_plane/models/inputsplunkhec.py +478 -0
  82. cribl_control_plane/models/inputsplunksearch.py +537 -0
  83. cribl_control_plane/models/inputsqs.py +320 -0
  84. cribl_control_plane/models/inputsyslog_union.py +759 -0
  85. cribl_control_plane/models/inputsystemmetrics.py +533 -0
  86. cribl_control_plane/models/inputsystemstate.py +417 -0
  87. cribl_control_plane/models/inputtcp.py +359 -0
  88. cribl_control_plane/models/inputtcpjson.py +334 -0
  89. cribl_control_plane/models/inputwef.py +498 -0
  90. cribl_control_plane/models/inputwindowsmetrics.py +457 -0
  91. cribl_control_plane/models/inputwineventlogs.py +222 -0
  92. cribl_control_plane/models/inputwiz.py +334 -0
  93. cribl_control_plane/models/inputzscalerhec.py +439 -0
  94. cribl_control_plane/models/listinputop.py +24 -0
  95. cribl_control_plane/models/listoutputop.py +24 -0
  96. cribl_control_plane/models/logininfo.py +16 -0
  97. cribl_control_plane/models/output.py +229 -0
  98. cribl_control_plane/models/outputazureblob.py +471 -0
  99. cribl_control_plane/models/outputazuredataexplorer.py +660 -0
  100. cribl_control_plane/models/outputazureeventhub.py +321 -0
  101. cribl_control_plane/models/outputazurelogs.py +386 -0
  102. cribl_control_plane/models/outputclickhouse.py +650 -0
  103. cribl_control_plane/models/outputcloudwatch.py +273 -0
  104. cribl_control_plane/models/outputconfluentcloud.py +591 -0
  105. cribl_control_plane/models/outputcriblhttp.py +494 -0
  106. cribl_control_plane/models/outputcribllake.py +396 -0
  107. cribl_control_plane/models/outputcribltcp.py +387 -0
  108. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
  109. cribl_control_plane/models/outputdatadog.py +472 -0
  110. cribl_control_plane/models/outputdataset.py +437 -0
  111. cribl_control_plane/models/outputdefault.py +55 -0
  112. cribl_control_plane/models/outputdevnull.py +50 -0
  113. cribl_control_plane/models/outputdiskspool.py +89 -0
  114. cribl_control_plane/models/outputdls3.py +560 -0
  115. cribl_control_plane/models/outputdynatracehttp.py +454 -0
  116. cribl_control_plane/models/outputdynatraceotlp.py +486 -0
  117. cribl_control_plane/models/outputelastic.py +494 -0
  118. cribl_control_plane/models/outputelasticcloud.py +407 -0
  119. cribl_control_plane/models/outputexabeam.py +297 -0
  120. cribl_control_plane/models/outputfilesystem.py +357 -0
  121. cribl_control_plane/models/outputgooglechronicle.py +486 -0
  122. cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
  123. cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
  124. cribl_control_plane/models/outputgooglepubsub.py +274 -0
  125. cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
  126. cribl_control_plane/models/outputgraphite.py +225 -0
  127. cribl_control_plane/models/outputhoneycomb.py +369 -0
  128. cribl_control_plane/models/outputhumiohec.py +389 -0
  129. cribl_control_plane/models/outputinfluxdb.py +523 -0
  130. cribl_control_plane/models/outputkafka.py +581 -0
  131. cribl_control_plane/models/outputkinesis.py +312 -0
  132. cribl_control_plane/models/outputloki.py +425 -0
  133. cribl_control_plane/models/outputminio.py +512 -0
  134. cribl_control_plane/models/outputmsk.py +654 -0
  135. cribl_control_plane/models/outputnetflow.py +80 -0
  136. cribl_control_plane/models/outputnewrelic.py +424 -0
  137. cribl_control_plane/models/outputnewrelicevents.py +401 -0
  138. cribl_control_plane/models/outputopentelemetry.py +669 -0
  139. cribl_control_plane/models/outputprometheus.py +485 -0
  140. cribl_control_plane/models/outputring.py +121 -0
  141. cribl_control_plane/models/outputrouter.py +83 -0
  142. cribl_control_plane/models/outputs3.py +556 -0
  143. cribl_control_plane/models/outputsamplesresponse.py +14 -0
  144. cribl_control_plane/models/outputsecuritylake.py +505 -0
  145. cribl_control_plane/models/outputsentinel.py +488 -0
  146. cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
  147. cribl_control_plane/models/outputservicenow.py +543 -0
  148. cribl_control_plane/models/outputsignalfx.py +369 -0
  149. cribl_control_plane/models/outputsnmp.py +80 -0
  150. cribl_control_plane/models/outputsns.py +274 -0
  151. cribl_control_plane/models/outputsplunk.py +383 -0
  152. cribl_control_plane/models/outputsplunkhec.py +434 -0
  153. cribl_control_plane/models/outputsplunklb.py +558 -0
  154. cribl_control_plane/models/outputsqs.py +328 -0
  155. cribl_control_plane/models/outputstatsd.py +224 -0
  156. cribl_control_plane/models/outputstatsdext.py +225 -0
  157. cribl_control_plane/models/outputsumologic.py +378 -0
  158. cribl_control_plane/models/outputsyslog.py +415 -0
  159. cribl_control_plane/models/outputtcpjson.py +413 -0
  160. cribl_control_plane/models/outputtestrequest.py +15 -0
  161. cribl_control_plane/models/outputtestresponse.py +29 -0
  162. cribl_control_plane/models/outputwavefront.py +369 -0
  163. cribl_control_plane/models/outputwebhook.py +689 -0
  164. cribl_control_plane/models/outputxsiam.py +415 -0
  165. cribl_control_plane/models/schemeclientoauth.py +24 -0
  166. cribl_control_plane/models/security.py +36 -0
  167. cribl_control_plane/models/updatehectokenrequest.py +31 -0
  168. cribl_control_plane/models/updateinputbyidop.py +44 -0
  169. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
  170. cribl_control_plane/models/updateoutputbyidop.py +44 -0
  171. cribl_control_plane/outputs.py +1615 -0
  172. cribl_control_plane/py.typed +1 -0
  173. cribl_control_plane/sdk.py +164 -0
  174. cribl_control_plane/sdkconfiguration.py +36 -0
  175. cribl_control_plane/sources.py +1355 -0
  176. cribl_control_plane/types/__init__.py +21 -0
  177. cribl_control_plane/types/basemodel.py +39 -0
  178. cribl_control_plane/utils/__init__.py +187 -0
  179. cribl_control_plane/utils/annotations.py +55 -0
  180. cribl_control_plane/utils/datetimes.py +23 -0
  181. cribl_control_plane/utils/enums.py +74 -0
  182. cribl_control_plane/utils/eventstreaming.py +238 -0
  183. cribl_control_plane/utils/forms.py +223 -0
  184. cribl_control_plane/utils/headers.py +136 -0
  185. cribl_control_plane/utils/logger.py +27 -0
  186. cribl_control_plane/utils/metadata.py +118 -0
  187. cribl_control_plane/utils/queryparams.py +205 -0
  188. cribl_control_plane/utils/requestbodies.py +66 -0
  189. cribl_control_plane/utils/retries.py +217 -0
  190. cribl_control_plane/utils/security.py +207 -0
  191. cribl_control_plane/utils/serializers.py +249 -0
  192. cribl_control_plane/utils/unmarshal_json_response.py +24 -0
  193. cribl_control_plane/utils/url.py +155 -0
  194. cribl_control_plane/utils/values.py +137 -0
  195. cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
  196. cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
  197. cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
@@ -0,0 +1,297 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputExabeamType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ EXABEAM = "exabeam"
16
+
17
+
18
+ class OutputExabeamSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ r"""Signature version to use for signing Google Cloud Storage requests"""
20
+
21
+ V2 = "v2"
22
+ V4 = "v4"
23
+
24
+
25
+ class OutputExabeamObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
26
+ r"""Object ACL to assign to uploaded objects"""
27
+
28
+ PRIVATE = "private"
29
+ BUCKET_OWNER_READ = "bucket-owner-read"
30
+ BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
31
+ PROJECT_PRIVATE = "project-private"
32
+ AUTHENTICATED_READ = "authenticated-read"
33
+ PUBLIC_READ = "public-read"
34
+
35
+
36
+ class OutputExabeamStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
37
+ r"""Storage class to select for uploaded objects"""
38
+
39
+ STANDARD = "STANDARD"
40
+ NEARLINE = "NEARLINE"
41
+ COLDLINE = "COLDLINE"
42
+ ARCHIVE = "ARCHIVE"
43
+
44
+
45
+ class OutputExabeamBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
46
+ r"""How to handle events when all receivers are exerting backpressure"""
47
+
48
+ BLOCK = "block"
49
+ DROP = "drop"
50
+
51
+
52
+ class OutputExabeamDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
53
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
54
+
55
+ BLOCK = "block"
56
+ DROP = "drop"
57
+
58
+
59
+ class OutputExabeamTypedDict(TypedDict):
60
+ bucket: str
61
+ r"""Name of the destination bucket. A constant or a JavaScript expression that can only be evaluated at init time. Example of referencing a JavaScript Global Variable: `myBucket-${C.vars.myVar}`."""
62
+ region: str
63
+ r"""Region where the bucket is located"""
64
+ collector_instance_id: str
65
+ r"""ID of the Exabeam Collector where data should be sent. Example: 11112222-3333-4444-5555-666677778888
66
+
67
+ """
68
+ id: NotRequired[str]
69
+ r"""Unique ID for this output"""
70
+ type: NotRequired[OutputExabeamType]
71
+ pipeline: NotRequired[str]
72
+ r"""Pipeline to process data before sending out to this output"""
73
+ system_fields: NotRequired[List[str]]
74
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
75
+ environment: NotRequired[str]
76
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
77
+ streamtags: NotRequired[List[str]]
78
+ r"""Tags for filtering and grouping in @{product}"""
79
+ stage_path: NotRequired[str]
80
+ r"""Filesystem location in which to buffer files, before compressing and moving to final destination. Use performant and stable storage."""
81
+ endpoint: NotRequired[str]
82
+ r"""Google Cloud Storage service endpoint"""
83
+ signature_version: NotRequired[OutputExabeamSignatureVersion]
84
+ r"""Signature version to use for signing Google Cloud Storage requests"""
85
+ object_acl: NotRequired[OutputExabeamObjectACL]
86
+ r"""Object ACL to assign to uploaded objects"""
87
+ storage_class: NotRequired[OutputExabeamStorageClass]
88
+ r"""Storage class to select for uploaded objects"""
89
+ reuse_connections: NotRequired[bool]
90
+ r"""Reuse connections between requests, which can improve performance"""
91
+ reject_unauthorized: NotRequired[bool]
92
+ r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
93
+ add_id_to_stage_path: NotRequired[bool]
94
+ r"""Add the Output ID value to staging location"""
95
+ remove_empty_dirs: NotRequired[bool]
96
+ r"""Remove empty staging directories after moving files"""
97
+ max_file_open_time_sec: NotRequired[float]
98
+ r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
99
+ max_file_idle_time_sec: NotRequired[float]
100
+ r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
101
+ max_open_files: NotRequired[float]
102
+ r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
103
+ on_backpressure: NotRequired[OutputExabeamBackpressureBehavior]
104
+ r"""How to handle events when all receivers are exerting backpressure"""
105
+ deadletter_enabled: NotRequired[bool]
106
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
107
+ on_disk_full_backpressure: NotRequired[OutputExabeamDiskSpaceProtection]
108
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
109
+ max_file_size_mb: NotRequired[float]
110
+ r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
111
+ encoded_configuration: NotRequired[str]
112
+ r"""Enter an encoded string containing Exabeam configurations"""
113
+ site_name: NotRequired[str]
114
+ r"""Constant or JavaScript expression to create an Exabeam site name. Values that aren't successfully evaluated will be treated as string constants."""
115
+ site_id: NotRequired[str]
116
+ r"""Exabeam site ID. If left blank, @{product} will use the value of the Exabeam site name."""
117
+ timezone_offset: NotRequired[str]
118
+ aws_api_key: NotRequired[str]
119
+ r"""HMAC access key. Can be a constant or a JavaScript expression, such as `${C.env.GCS_ACCESS_KEY}`."""
120
+ aws_secret_key: NotRequired[str]
121
+ r"""HMAC secret. Can be a constant or a JavaScript expression, such as `${C.env.GCS_SECRET}`."""
122
+ description: NotRequired[str]
123
+ empty_dir_cleanup_sec: NotRequired[float]
124
+ r"""How frequently, in seconds, to clean up empty directories"""
125
+ deadletter_path: NotRequired[str]
126
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
127
+ max_retry_num: NotRequired[float]
128
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
129
+
130
+
131
+ class OutputExabeam(BaseModel):
132
+ bucket: str
133
+ r"""Name of the destination bucket. A constant or a JavaScript expression that can only be evaluated at init time. Example of referencing a JavaScript Global Variable: `myBucket-${C.vars.myVar}`."""
134
+
135
+ region: str
136
+ r"""Region where the bucket is located"""
137
+
138
+ collector_instance_id: Annotated[str, pydantic.Field(alias="collectorInstanceId")]
139
+ r"""ID of the Exabeam Collector where data should be sent. Example: 11112222-3333-4444-5555-666677778888
140
+
141
+ """
142
+
143
+ id: Optional[str] = None
144
+ r"""Unique ID for this output"""
145
+
146
+ type: Annotated[
147
+ Optional[OutputExabeamType], PlainValidator(validate_open_enum(False))
148
+ ] = None
149
+
150
+ pipeline: Optional[str] = None
151
+ r"""Pipeline to process data before sending out to this output"""
152
+
153
+ system_fields: Annotated[
154
+ Optional[List[str]], pydantic.Field(alias="systemFields")
155
+ ] = None
156
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
157
+
158
+ environment: Optional[str] = None
159
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
160
+
161
+ streamtags: Optional[List[str]] = None
162
+ r"""Tags for filtering and grouping in @{product}"""
163
+
164
+ stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
165
+ "$CRIBL_HOME/state/outputs/staging"
166
+ )
167
+ r"""Filesystem location in which to buffer files, before compressing and moving to final destination. Use performant and stable storage."""
168
+
169
+ endpoint: Optional[str] = "https://storage.googleapis.com"
170
+ r"""Google Cloud Storage service endpoint"""
171
+
172
+ signature_version: Annotated[
173
+ Annotated[
174
+ Optional[OutputExabeamSignatureVersion],
175
+ PlainValidator(validate_open_enum(False)),
176
+ ],
177
+ pydantic.Field(alias="signatureVersion"),
178
+ ] = OutputExabeamSignatureVersion.V4
179
+ r"""Signature version to use for signing Google Cloud Storage requests"""
180
+
181
+ object_acl: Annotated[
182
+ Annotated[
183
+ Optional[OutputExabeamObjectACL], PlainValidator(validate_open_enum(False))
184
+ ],
185
+ pydantic.Field(alias="objectACL"),
186
+ ] = OutputExabeamObjectACL.PRIVATE
187
+ r"""Object ACL to assign to uploaded objects"""
188
+
189
+ storage_class: Annotated[
190
+ Annotated[
191
+ Optional[OutputExabeamStorageClass],
192
+ PlainValidator(validate_open_enum(False)),
193
+ ],
194
+ pydantic.Field(alias="storageClass"),
195
+ ] = None
196
+ r"""Storage class to select for uploaded objects"""
197
+
198
+ reuse_connections: Annotated[
199
+ Optional[bool], pydantic.Field(alias="reuseConnections")
200
+ ] = True
201
+ r"""Reuse connections between requests, which can improve performance"""
202
+
203
+ reject_unauthorized: Annotated[
204
+ Optional[bool], pydantic.Field(alias="rejectUnauthorized")
205
+ ] = True
206
+ r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
207
+
208
+ add_id_to_stage_path: Annotated[
209
+ Optional[bool], pydantic.Field(alias="addIdToStagePath")
210
+ ] = True
211
+ r"""Add the Output ID value to staging location"""
212
+
213
+ remove_empty_dirs: Annotated[
214
+ Optional[bool], pydantic.Field(alias="removeEmptyDirs")
215
+ ] = True
216
+ r"""Remove empty staging directories after moving files"""
217
+
218
+ max_file_open_time_sec: Annotated[
219
+ Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
220
+ ] = 300
221
+ r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
222
+
223
+ max_file_idle_time_sec: Annotated[
224
+ Optional[float], pydantic.Field(alias="maxFileIdleTimeSec")
225
+ ] = 30
226
+ r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
227
+
228
+ max_open_files: Annotated[Optional[float], pydantic.Field(alias="maxOpenFiles")] = (
229
+ 100
230
+ )
231
+ r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
232
+
233
+ on_backpressure: Annotated[
234
+ Annotated[
235
+ Optional[OutputExabeamBackpressureBehavior],
236
+ PlainValidator(validate_open_enum(False)),
237
+ ],
238
+ pydantic.Field(alias="onBackpressure"),
239
+ ] = OutputExabeamBackpressureBehavior.BLOCK
240
+ r"""How to handle events when all receivers are exerting backpressure"""
241
+
242
+ deadletter_enabled: Annotated[
243
+ Optional[bool], pydantic.Field(alias="deadletterEnabled")
244
+ ] = False
245
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
246
+
247
+ on_disk_full_backpressure: Annotated[
248
+ Annotated[
249
+ Optional[OutputExabeamDiskSpaceProtection],
250
+ PlainValidator(validate_open_enum(False)),
251
+ ],
252
+ pydantic.Field(alias="onDiskFullBackpressure"),
253
+ ] = OutputExabeamDiskSpaceProtection.BLOCK
254
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
255
+
256
+ max_file_size_mb: Annotated[
257
+ Optional[float], pydantic.Field(alias="maxFileSizeMB")
258
+ ] = 10
259
+ r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
260
+
261
+ encoded_configuration: Annotated[
262
+ Optional[str], pydantic.Field(alias="encodedConfiguration")
263
+ ] = None
264
+ r"""Enter an encoded string containing Exabeam configurations"""
265
+
266
+ site_name: Annotated[Optional[str], pydantic.Field(alias="siteName")] = None
267
+ r"""Constant or JavaScript expression to create an Exabeam site name. Values that aren't successfully evaluated will be treated as string constants."""
268
+
269
+ site_id: Annotated[Optional[str], pydantic.Field(alias="siteId")] = None
270
+ r"""Exabeam site ID. If left blank, @{product} will use the value of the Exabeam site name."""
271
+
272
+ timezone_offset: Annotated[
273
+ Optional[str], pydantic.Field(alias="timezoneOffset")
274
+ ] = None
275
+
276
+ aws_api_key: Annotated[Optional[str], pydantic.Field(alias="awsApiKey")] = None
277
+ r"""HMAC access key. Can be a constant or a JavaScript expression, such as `${C.env.GCS_ACCESS_KEY}`."""
278
+
279
+ aws_secret_key: Annotated[Optional[str], pydantic.Field(alias="awsSecretKey")] = (
280
+ None
281
+ )
282
+ r"""HMAC secret. Can be a constant or a JavaScript expression, such as `${C.env.GCS_SECRET}`."""
283
+
284
+ description: Optional[str] = None
285
+
286
+ empty_dir_cleanup_sec: Annotated[
287
+ Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
288
+ ] = 300
289
+ r"""How frequently, in seconds, to clean up empty directories"""
290
+
291
+ deadletter_path: Annotated[
292
+ Optional[str], pydantic.Field(alias="deadletterPath")
293
+ ] = "$CRIBL_HOME/state/outputs/dead-letter"
294
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
295
+
296
+ max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
297
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
@@ -0,0 +1,357 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputFilesystemType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ FILESYSTEM = "filesystem"
16
+
17
+
18
+ class OutputFilesystemDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ r"""Format of the output data"""
20
+
21
+ JSON = "json"
22
+ RAW = "raw"
23
+ PARQUET = "parquet"
24
+
25
+
26
+ class OutputFilesystemBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
27
+ r"""How to handle events when all receivers are exerting backpressure"""
28
+
29
+ BLOCK = "block"
30
+ DROP = "drop"
31
+
32
+
33
+ class OutputFilesystemDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
34
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
35
+
36
+ BLOCK = "block"
37
+ DROP = "drop"
38
+
39
+
40
+ class OutputFilesystemCompression(str, Enum, metaclass=utils.OpenEnumMeta):
41
+ r"""Data compression format to apply to HTTP content before it is delivered"""
42
+
43
+ NONE = "none"
44
+ GZIP = "gzip"
45
+
46
+
47
+ class OutputFilesystemCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
48
+ r"""Compression level to apply before moving files to final destination"""
49
+
50
+ BEST_SPEED = "best_speed"
51
+ NORMAL = "normal"
52
+ BEST_COMPRESSION = "best_compression"
53
+
54
+
55
+ class OutputFilesystemParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
56
+ r"""Determines which data types are supported and how they are represented"""
57
+
58
+ PARQUET_1_0 = "PARQUET_1_0"
59
+ PARQUET_2_4 = "PARQUET_2_4"
60
+ PARQUET_2_6 = "PARQUET_2_6"
61
+
62
+
63
+ class OutputFilesystemDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
64
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
65
+
66
+ DATA_PAGE_V1 = "DATA_PAGE_V1"
67
+ DATA_PAGE_V2 = "DATA_PAGE_V2"
68
+
69
+
70
+ class OutputFilesystemKeyValueMetadatumTypedDict(TypedDict):
71
+ value: str
72
+ key: NotRequired[str]
73
+
74
+
75
+ class OutputFilesystemKeyValueMetadatum(BaseModel):
76
+ value: str
77
+
78
+ key: Optional[str] = ""
79
+
80
+
81
+ class OutputFilesystemTypedDict(TypedDict):
82
+ type: OutputFilesystemType
83
+ dest_path: str
84
+ r"""Final destination for the output files"""
85
+ id: NotRequired[str]
86
+ r"""Unique ID for this output"""
87
+ pipeline: NotRequired[str]
88
+ r"""Pipeline to process data before sending out to this output"""
89
+ system_fields: NotRequired[List[str]]
90
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
91
+ environment: NotRequired[str]
92
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
93
+ streamtags: NotRequired[List[str]]
94
+ r"""Tags for filtering and grouping in @{product}"""
95
+ stage_path: NotRequired[str]
96
+ r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
97
+ add_id_to_stage_path: NotRequired[bool]
98
+ r"""Add the Output ID value to staging location"""
99
+ remove_empty_dirs: NotRequired[bool]
100
+ r"""Remove empty staging directories after moving files"""
101
+ partition_expr: NotRequired[str]
102
+ r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
103
+ format_: NotRequired[OutputFilesystemDataFormat]
104
+ r"""Format of the output data"""
105
+ base_file_name: NotRequired[str]
106
+ r"""JavaScript expression to define the output filename prefix (can be constant)"""
107
+ file_name_suffix: NotRequired[str]
108
+ r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
109
+ max_file_size_mb: NotRequired[float]
110
+ r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
111
+ max_file_open_time_sec: NotRequired[float]
112
+ r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
113
+ max_file_idle_time_sec: NotRequired[float]
114
+ r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
115
+ max_open_files: NotRequired[float]
116
+ r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
117
+ header_line: NotRequired[str]
118
+ r"""If set, this line will be written to the beginning of each output file"""
119
+ write_high_water_mark: NotRequired[float]
120
+ r"""Buffer size used to write to a file"""
121
+ on_backpressure: NotRequired[OutputFilesystemBackpressureBehavior]
122
+ r"""How to handle events when all receivers are exerting backpressure"""
123
+ deadletter_enabled: NotRequired[bool]
124
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
125
+ on_disk_full_backpressure: NotRequired[OutputFilesystemDiskSpaceProtection]
126
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
127
+ description: NotRequired[str]
128
+ compress: NotRequired[OutputFilesystemCompression]
129
+ r"""Data compression format to apply to HTTP content before it is delivered"""
130
+ compression_level: NotRequired[OutputFilesystemCompressionLevel]
131
+ r"""Compression level to apply before moving files to final destination"""
132
+ automatic_schema: NotRequired[bool]
133
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
134
+ parquet_version: NotRequired[OutputFilesystemParquetVersion]
135
+ r"""Determines which data types are supported and how they are represented"""
136
+ parquet_data_page_version: NotRequired[OutputFilesystemDataPageVersion]
137
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
138
+ parquet_row_group_length: NotRequired[float]
139
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
140
+ parquet_page_size: NotRequired[str]
141
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
142
+ should_log_invalid_rows: NotRequired[bool]
143
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
144
+ key_value_metadata: NotRequired[List[OutputFilesystemKeyValueMetadatumTypedDict]]
145
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
146
+ enable_statistics: NotRequired[bool]
147
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
148
+ enable_write_page_index: NotRequired[bool]
149
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
150
+ enable_page_checksum: NotRequired[bool]
151
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
152
+ empty_dir_cleanup_sec: NotRequired[float]
153
+ r"""How frequently, in seconds, to clean up empty directories"""
154
+ deadletter_path: NotRequired[str]
155
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
156
+ max_retry_num: NotRequired[float]
157
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
158
+
159
+
160
+ class OutputFilesystem(BaseModel):
161
+ type: Annotated[OutputFilesystemType, PlainValidator(validate_open_enum(False))]
162
+
163
+ dest_path: Annotated[str, pydantic.Field(alias="destPath")]
164
+ r"""Final destination for the output files"""
165
+
166
+ id: Optional[str] = None
167
+ r"""Unique ID for this output"""
168
+
169
+ pipeline: Optional[str] = None
170
+ r"""Pipeline to process data before sending out to this output"""
171
+
172
+ system_fields: Annotated[
173
+ Optional[List[str]], pydantic.Field(alias="systemFields")
174
+ ] = None
175
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
176
+
177
+ environment: Optional[str] = None
178
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
179
+
180
+ streamtags: Optional[List[str]] = None
181
+ r"""Tags for filtering and grouping in @{product}"""
182
+
183
+ stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = None
184
+ r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
185
+
186
+ add_id_to_stage_path: Annotated[
187
+ Optional[bool], pydantic.Field(alias="addIdToStagePath")
188
+ ] = True
189
+ r"""Add the Output ID value to staging location"""
190
+
191
+ remove_empty_dirs: Annotated[
192
+ Optional[bool], pydantic.Field(alias="removeEmptyDirs")
193
+ ] = True
194
+ r"""Remove empty staging directories after moving files"""
195
+
196
+ partition_expr: Annotated[Optional[str], pydantic.Field(alias="partitionExpr")] = (
197
+ "C.Time.strftime(_time ? _time : Date.now()/1000, '%Y/%m/%d')"
198
+ )
199
+ r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
200
+
201
+ format_: Annotated[
202
+ Annotated[
203
+ Optional[OutputFilesystemDataFormat],
204
+ PlainValidator(validate_open_enum(False)),
205
+ ],
206
+ pydantic.Field(alias="format"),
207
+ ] = OutputFilesystemDataFormat.JSON
208
+ r"""Format of the output data"""
209
+
210
+ base_file_name: Annotated[Optional[str], pydantic.Field(alias="baseFileName")] = (
211
+ "`CriblOut`"
212
+ )
213
+ r"""JavaScript expression to define the output filename prefix (can be constant)"""
214
+
215
+ file_name_suffix: Annotated[
216
+ Optional[str], pydantic.Field(alias="fileNameSuffix")
217
+ ] = '`.${C.env["CRIBL_WORKER_ID"]}.${__format}${__compression === "gzip" ? ".gz" : ""}`'
218
+ r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
219
+
220
+ max_file_size_mb: Annotated[
221
+ Optional[float], pydantic.Field(alias="maxFileSizeMB")
222
+ ] = 32
223
+ r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
224
+
225
+ max_file_open_time_sec: Annotated[
226
+ Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
227
+ ] = 300
228
+ r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
229
+
230
+ max_file_idle_time_sec: Annotated[
231
+ Optional[float], pydantic.Field(alias="maxFileIdleTimeSec")
232
+ ] = 30
233
+ r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
234
+
235
+ max_open_files: Annotated[Optional[float], pydantic.Field(alias="maxOpenFiles")] = (
236
+ 100
237
+ )
238
+ r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
239
+
240
+ header_line: Annotated[Optional[str], pydantic.Field(alias="headerLine")] = ""
241
+ r"""If set, this line will be written to the beginning of each output file"""
242
+
243
+ write_high_water_mark: Annotated[
244
+ Optional[float], pydantic.Field(alias="writeHighWaterMark")
245
+ ] = 64
246
+ r"""Buffer size used to write to a file"""
247
+
248
+ on_backpressure: Annotated[
249
+ Annotated[
250
+ Optional[OutputFilesystemBackpressureBehavior],
251
+ PlainValidator(validate_open_enum(False)),
252
+ ],
253
+ pydantic.Field(alias="onBackpressure"),
254
+ ] = OutputFilesystemBackpressureBehavior.BLOCK
255
+ r"""How to handle events when all receivers are exerting backpressure"""
256
+
257
+ deadletter_enabled: Annotated[
258
+ Optional[bool], pydantic.Field(alias="deadletterEnabled")
259
+ ] = False
260
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
261
+
262
+ on_disk_full_backpressure: Annotated[
263
+ Annotated[
264
+ Optional[OutputFilesystemDiskSpaceProtection],
265
+ PlainValidator(validate_open_enum(False)),
266
+ ],
267
+ pydantic.Field(alias="onDiskFullBackpressure"),
268
+ ] = OutputFilesystemDiskSpaceProtection.BLOCK
269
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
270
+
271
+ description: Optional[str] = None
272
+
273
+ compress: Annotated[
274
+ Optional[OutputFilesystemCompression], PlainValidator(validate_open_enum(False))
275
+ ] = OutputFilesystemCompression.GZIP
276
+ r"""Data compression format to apply to HTTP content before it is delivered"""
277
+
278
+ compression_level: Annotated[
279
+ Annotated[
280
+ Optional[OutputFilesystemCompressionLevel],
281
+ PlainValidator(validate_open_enum(False)),
282
+ ],
283
+ pydantic.Field(alias="compressionLevel"),
284
+ ] = OutputFilesystemCompressionLevel.BEST_SPEED
285
+ r"""Compression level to apply before moving files to final destination"""
286
+
287
+ automatic_schema: Annotated[
288
+ Optional[bool], pydantic.Field(alias="automaticSchema")
289
+ ] = False
290
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
291
+
292
+ parquet_version: Annotated[
293
+ Annotated[
294
+ Optional[OutputFilesystemParquetVersion],
295
+ PlainValidator(validate_open_enum(False)),
296
+ ],
297
+ pydantic.Field(alias="parquetVersion"),
298
+ ] = OutputFilesystemParquetVersion.PARQUET_2_6
299
+ r"""Determines which data types are supported and how they are represented"""
300
+
301
+ parquet_data_page_version: Annotated[
302
+ Annotated[
303
+ Optional[OutputFilesystemDataPageVersion],
304
+ PlainValidator(validate_open_enum(False)),
305
+ ],
306
+ pydantic.Field(alias="parquetDataPageVersion"),
307
+ ] = OutputFilesystemDataPageVersion.DATA_PAGE_V2
308
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
309
+
310
+ parquet_row_group_length: Annotated[
311
+ Optional[float], pydantic.Field(alias="parquetRowGroupLength")
312
+ ] = 10000
313
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
314
+
315
+ parquet_page_size: Annotated[
316
+ Optional[str], pydantic.Field(alias="parquetPageSize")
317
+ ] = "1MB"
318
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
319
+
320
+ should_log_invalid_rows: Annotated[
321
+ Optional[bool], pydantic.Field(alias="shouldLogInvalidRows")
322
+ ] = None
323
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
324
+
325
+ key_value_metadata: Annotated[
326
+ Optional[List[OutputFilesystemKeyValueMetadatum]],
327
+ pydantic.Field(alias="keyValueMetadata"),
328
+ ] = None
329
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
330
+
331
+ enable_statistics: Annotated[
332
+ Optional[bool], pydantic.Field(alias="enableStatistics")
333
+ ] = True
334
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
335
+
336
+ enable_write_page_index: Annotated[
337
+ Optional[bool], pydantic.Field(alias="enableWritePageIndex")
338
+ ] = True
339
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
340
+
341
+ enable_page_checksum: Annotated[
342
+ Optional[bool], pydantic.Field(alias="enablePageChecksum")
343
+ ] = False
344
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
345
+
346
+ empty_dir_cleanup_sec: Annotated[
347
+ Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
348
+ ] = 300
349
+ r"""How frequently, in seconds, to clean up empty directories"""
350
+
351
+ deadletter_path: Annotated[
352
+ Optional[str], pydantic.Field(alias="deadletterPath")
353
+ ] = "$CRIBL_HOME/state/outputs/dead-letter"
354
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
355
+
356
+ max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
357
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""