cribl-control-plane 0.2.1__py3-none-any.whl → 0.2.1rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (157) hide show
  1. cribl_control_plane/_version.py +3 -5
  2. cribl_control_plane/errors/__init__.py +8 -5
  3. cribl_control_plane/errors/{healthstatus_error.py → healthserverstatus_error.py} +10 -9
  4. cribl_control_plane/groups_sdk.py +52 -28
  5. cribl_control_plane/health.py +22 -16
  6. cribl_control_plane/models/__init__.py +103 -50
  7. cribl_control_plane/models/authtoken.py +5 -1
  8. cribl_control_plane/models/configgroup.py +35 -6
  9. cribl_control_plane/models/createconfiggroupbyproductop.py +6 -5
  10. cribl_control_plane/models/createroutesappendbyidop.py +2 -2
  11. cribl_control_plane/models/createversionundoop.py +3 -3
  12. cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
  13. cribl_control_plane/models/distributedsummary.py +6 -0
  14. cribl_control_plane/models/groupcreaterequest.py +152 -0
  15. cribl_control_plane/models/hbcriblinfo.py +14 -3
  16. cribl_control_plane/models/{healthstatus.py → healthserverstatus.py} +7 -7
  17. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  18. cribl_control_plane/models/input.py +65 -63
  19. cribl_control_plane/models/inputappscope.py +4 -0
  20. cribl_control_plane/models/inputazureblob.py +4 -0
  21. cribl_control_plane/models/inputcollection.py +4 -0
  22. cribl_control_plane/models/inputconfluentcloud.py +8 -18
  23. cribl_control_plane/models/inputcribl.py +4 -0
  24. cribl_control_plane/models/inputcriblhttp.py +4 -0
  25. cribl_control_plane/models/inputcribllakehttp.py +4 -0
  26. cribl_control_plane/models/inputcriblmetrics.py +4 -0
  27. cribl_control_plane/models/inputcribltcp.py +4 -0
  28. cribl_control_plane/models/inputcrowdstrike.py +7 -0
  29. cribl_control_plane/models/inputdatadogagent.py +4 -0
  30. cribl_control_plane/models/inputdatagen.py +4 -0
  31. cribl_control_plane/models/inputedgeprometheus.py +12 -0
  32. cribl_control_plane/models/inputelastic.py +11 -0
  33. cribl_control_plane/models/inputeventhub.py +6 -0
  34. cribl_control_plane/models/inputexec.py +4 -0
  35. cribl_control_plane/models/inputfile.py +6 -0
  36. cribl_control_plane/models/inputfirehose.py +4 -0
  37. cribl_control_plane/models/inputgooglepubsub.py +7 -0
  38. cribl_control_plane/models/inputgrafana.py +8 -0
  39. cribl_control_plane/models/inputhttp.py +4 -0
  40. cribl_control_plane/models/inputhttpraw.py +4 -0
  41. cribl_control_plane/models/inputjournalfiles.py +4 -0
  42. cribl_control_plane/models/inputkafka.py +8 -17
  43. cribl_control_plane/models/inputkinesis.py +15 -0
  44. cribl_control_plane/models/inputkubeevents.py +4 -0
  45. cribl_control_plane/models/inputkubelogs.py +4 -0
  46. cribl_control_plane/models/inputkubemetrics.py +4 -0
  47. cribl_control_plane/models/inputloki.py +4 -0
  48. cribl_control_plane/models/inputmetrics.py +4 -0
  49. cribl_control_plane/models/inputmodeldriventelemetry.py +4 -0
  50. cribl_control_plane/models/inputmsk.py +7 -17
  51. cribl_control_plane/models/inputnetflow.py +4 -0
  52. cribl_control_plane/models/inputoffice365mgmt.py +11 -0
  53. cribl_control_plane/models/inputoffice365msgtrace.py +11 -0
  54. cribl_control_plane/models/inputoffice365service.py +11 -0
  55. cribl_control_plane/models/inputopentelemetry.py +8 -0
  56. cribl_control_plane/models/inputprometheus.py +10 -0
  57. cribl_control_plane/models/inputprometheusrw.py +4 -0
  58. cribl_control_plane/models/inputrawudp.py +4 -0
  59. cribl_control_plane/models/inputs3.py +7 -0
  60. cribl_control_plane/models/inputs3inventory.py +7 -0
  61. cribl_control_plane/models/inputsecuritylake.py +7 -0
  62. cribl_control_plane/models/inputsnmp.py +11 -0
  63. cribl_control_plane/models/inputsplunk.py +9 -0
  64. cribl_control_plane/models/inputsplunkhec.py +4 -0
  65. cribl_control_plane/models/inputsplunksearch.py +7 -0
  66. cribl_control_plane/models/inputsqs.py +9 -0
  67. cribl_control_plane/models/inputsyslog.py +8 -0
  68. cribl_control_plane/models/inputsystemmetrics.py +32 -0
  69. cribl_control_plane/models/inputsystemstate.py +4 -0
  70. cribl_control_plane/models/inputtcp.py +4 -0
  71. cribl_control_plane/models/inputtcpjson.py +4 -0
  72. cribl_control_plane/models/inputwef.py +6 -0
  73. cribl_control_plane/models/inputwindowsmetrics.py +28 -0
  74. cribl_control_plane/models/inputwineventlogs.py +8 -0
  75. cribl_control_plane/models/inputwiz.py +7 -0
  76. cribl_control_plane/models/inputwizwebhook.py +4 -0
  77. cribl_control_plane/models/inputzscalerhec.py +4 -0
  78. cribl_control_plane/models/jobinfo.py +4 -1
  79. cribl_control_plane/models/logininfo.py +3 -3
  80. cribl_control_plane/models/nodeprovidedinfo.py +11 -1
  81. cribl_control_plane/models/outpostnodeinfo.py +16 -0
  82. cribl_control_plane/models/output.py +77 -72
  83. cribl_control_plane/models/outputazureblob.py +20 -0
  84. cribl_control_plane/models/outputazuredataexplorer.py +28 -0
  85. cribl_control_plane/models/outputazureeventhub.py +17 -0
  86. cribl_control_plane/models/outputazurelogs.py +13 -0
  87. cribl_control_plane/models/outputchronicle.py +13 -0
  88. cribl_control_plane/models/outputclickhouse.py +17 -0
  89. cribl_control_plane/models/outputcloudwatch.py +13 -0
  90. cribl_control_plane/models/outputconfluentcloud.py +24 -18
  91. cribl_control_plane/models/outputcriblhttp.py +15 -0
  92. cribl_control_plane/models/outputcribllake.py +21 -0
  93. cribl_control_plane/models/outputcribltcp.py +12 -0
  94. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +15 -0
  95. cribl_control_plane/models/outputdatabricks.py +411 -0
  96. cribl_control_plane/models/outputdatadog.py +30 -0
  97. cribl_control_plane/models/outputdataset.py +23 -0
  98. cribl_control_plane/models/outputdls3.py +35 -0
  99. cribl_control_plane/models/outputdynatracehttp.py +22 -0
  100. cribl_control_plane/models/outputdynatraceotlp.py +22 -0
  101. cribl_control_plane/models/outputelastic.py +18 -0
  102. cribl_control_plane/models/outputelasticcloud.py +13 -0
  103. cribl_control_plane/models/outputexabeam.py +14 -0
  104. cribl_control_plane/models/outputfilesystem.py +15 -0
  105. cribl_control_plane/models/outputgooglechronicle.py +21 -0
  106. cribl_control_plane/models/outputgooglecloudlogging.py +19 -0
  107. cribl_control_plane/models/outputgooglecloudstorage.py +28 -0
  108. cribl_control_plane/models/outputgooglepubsub.py +13 -0
  109. cribl_control_plane/models/outputgrafanacloud.py +50 -0
  110. cribl_control_plane/models/outputgraphite.py +12 -0
  111. cribl_control_plane/models/outputhoneycomb.py +13 -0
  112. cribl_control_plane/models/outputhumiohec.py +15 -0
  113. cribl_control_plane/models/outputinfluxdb.py +19 -0
  114. cribl_control_plane/models/outputkafka.py +24 -17
  115. cribl_control_plane/models/outputkinesis.py +15 -0
  116. cribl_control_plane/models/outputloki.py +20 -0
  117. cribl_control_plane/models/outputminio.py +28 -0
  118. cribl_control_plane/models/outputmsk.py +23 -17
  119. cribl_control_plane/models/outputnewrelic.py +16 -0
  120. cribl_control_plane/models/outputnewrelicevents.py +16 -0
  121. cribl_control_plane/models/outputopentelemetry.py +22 -0
  122. cribl_control_plane/models/outputprometheus.py +13 -0
  123. cribl_control_plane/models/outputring.py +2 -0
  124. cribl_control_plane/models/outputs3.py +35 -0
  125. cribl_control_plane/models/outputsecuritylake.py +29 -0
  126. cribl_control_plane/models/outputsentinel.py +15 -0
  127. cribl_control_plane/models/outputsentineloneaisiem.py +13 -0
  128. cribl_control_plane/models/outputservicenow.py +21 -0
  129. cribl_control_plane/models/outputsignalfx.py +13 -0
  130. cribl_control_plane/models/outputsns.py +13 -0
  131. cribl_control_plane/models/outputsplunk.py +15 -0
  132. cribl_control_plane/models/outputsplunkhec.py +13 -0
  133. cribl_control_plane/models/outputsplunklb.py +15 -0
  134. cribl_control_plane/models/outputsqs.py +15 -0
  135. cribl_control_plane/models/outputstatsd.py +12 -0
  136. cribl_control_plane/models/outputstatsdext.py +12 -0
  137. cribl_control_plane/models/outputsumologic.py +15 -0
  138. cribl_control_plane/models/outputsyslog.py +24 -0
  139. cribl_control_plane/models/outputtcpjson.py +12 -0
  140. cribl_control_plane/models/outputwavefront.py +13 -0
  141. cribl_control_plane/models/outputwebhook.py +23 -0
  142. cribl_control_plane/models/outputxsiam.py +13 -0
  143. cribl_control_plane/models/packinfo.py +8 -5
  144. cribl_control_plane/models/packinstallinfo.py +8 -5
  145. cribl_control_plane/models/routeconf.py +3 -4
  146. cribl_control_plane/models/runnablejobcollection.py +4 -0
  147. cribl_control_plane/models/updatepacksop.py +25 -0
  148. cribl_control_plane/models/{routecloneconf.py → uploadpackresponse.py} +4 -4
  149. cribl_control_plane/packs.py +202 -7
  150. cribl_control_plane/routes_sdk.py +6 -6
  151. cribl_control_plane/tokens.py +23 -15
  152. {cribl_control_plane-0.2.1.dist-info → cribl_control_plane-0.2.1rc2.dist-info}/METADATA +37 -5
  153. {cribl_control_plane-0.2.1.dist-info → cribl_control_plane-0.2.1rc2.dist-info}/RECORD +154 -153
  154. cribl_control_plane/models/appmode.py +0 -14
  155. cribl_control_plane/models/error.py +0 -16
  156. cribl_control_plane/models/gethealthinfoop.py +0 -17
  157. {cribl_control_plane-0.2.1.dist-info → cribl_control_plane-0.2.1rc2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,411 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputDatabricksType(str, Enum):
15
+ DATABRICKS = "databricks"
16
+
17
+
18
+ class OutputDatabricksDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ r"""Format of the output data"""
20
+
21
+ # JSON
22
+ JSON = "json"
23
+ # Raw
24
+ RAW = "raw"
25
+ # Parquet
26
+ PARQUET = "parquet"
27
+
28
+
29
+ class OutputDatabricksBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
30
+ r"""How to handle events when all receivers are exerting backpressure"""
31
+
32
+ # Block
33
+ BLOCK = "block"
34
+ # Drop
35
+ DROP = "drop"
36
+
37
+
38
+ class OutputDatabricksDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
39
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
40
+
41
+ # Block
42
+ BLOCK = "block"
43
+ # Drop
44
+ DROP = "drop"
45
+
46
+
47
+ class OutputDatabricksCompression(str, Enum, metaclass=utils.OpenEnumMeta):
48
+ r"""Data compression format to apply to HTTP content before it is delivered"""
49
+
50
+ NONE = "none"
51
+ GZIP = "gzip"
52
+
53
+
54
+ class OutputDatabricksCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
55
+ r"""Compression level to apply before moving files to final destination"""
56
+
57
+ # Best Speed
58
+ BEST_SPEED = "best_speed"
59
+ # Normal
60
+ NORMAL = "normal"
61
+ # Best Compression
62
+ BEST_COMPRESSION = "best_compression"
63
+
64
+
65
+ class OutputDatabricksParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
66
+ r"""Determines which data types are supported and how they are represented"""
67
+
68
+ # 1.0
69
+ PARQUET_1_0 = "PARQUET_1_0"
70
+ # 2.4
71
+ PARQUET_2_4 = "PARQUET_2_4"
72
+ # 2.6
73
+ PARQUET_2_6 = "PARQUET_2_6"
74
+
75
+
76
+ class OutputDatabricksDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
77
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
78
+
79
+ # V1
80
+ DATA_PAGE_V1 = "DATA_PAGE_V1"
81
+ # V2
82
+ DATA_PAGE_V2 = "DATA_PAGE_V2"
83
+
84
+
85
+ class OutputDatabricksKeyValueMetadatumTypedDict(TypedDict):
86
+ value: str
87
+ key: NotRequired[str]
88
+
89
+
90
+ class OutputDatabricksKeyValueMetadatum(BaseModel):
91
+ value: str
92
+
93
+ key: Optional[str] = ""
94
+
95
+
96
+ class OutputDatabricksTypedDict(TypedDict):
97
+ type: OutputDatabricksType
98
+ workspace_id: str
99
+ r"""Databricks workspace ID"""
100
+ client_id: str
101
+ r"""OAuth client ID for Unity Catalog authentication"""
102
+ client_secret: str
103
+ r"""OAuth client secret for Unity Catalog authentication"""
104
+ id: NotRequired[str]
105
+ r"""Unique ID for this output"""
106
+ pipeline: NotRequired[str]
107
+ r"""Pipeline to process data before sending out to this output"""
108
+ system_fields: NotRequired[List[str]]
109
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
110
+ environment: NotRequired[str]
111
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
112
+ streamtags: NotRequired[List[str]]
113
+ r"""Tags for filtering and grouping in @{product}"""
114
+ dest_path: NotRequired[str]
115
+ r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
116
+ stage_path: NotRequired[str]
117
+ r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
118
+ add_id_to_stage_path: NotRequired[bool]
119
+ r"""Add the Output ID value to staging location"""
120
+ remove_empty_dirs: NotRequired[bool]
121
+ r"""Remove empty staging directories after moving files"""
122
+ partition_expr: NotRequired[str]
123
+ r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
124
+ format_: NotRequired[OutputDatabricksDataFormat]
125
+ r"""Format of the output data"""
126
+ base_file_name: NotRequired[str]
127
+ r"""JavaScript expression to define the output filename prefix (can be constant)"""
128
+ file_name_suffix: NotRequired[str]
129
+ r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
130
+ max_file_size_mb: NotRequired[float]
131
+ r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
132
+ max_file_open_time_sec: NotRequired[float]
133
+ r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
134
+ max_file_idle_time_sec: NotRequired[float]
135
+ r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
136
+ max_open_files: NotRequired[float]
137
+ r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
138
+ header_line: NotRequired[str]
139
+ r"""If set, this line will be written to the beginning of each output file"""
140
+ write_high_water_mark: NotRequired[float]
141
+ r"""Buffer size used to write to a file"""
142
+ on_backpressure: NotRequired[OutputDatabricksBackpressureBehavior]
143
+ r"""How to handle events when all receivers are exerting backpressure"""
144
+ deadletter_enabled: NotRequired[bool]
145
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
146
+ on_disk_full_backpressure: NotRequired[OutputDatabricksDiskSpaceProtection]
147
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
148
+ scope: NotRequired[str]
149
+ r"""OAuth scope for Unity Catalog authentication"""
150
+ catalog: NotRequired[str]
151
+ r"""Name of the catalog to use for the output"""
152
+ schema_: NotRequired[str]
153
+ r"""Name of the catalog schema to use for the output"""
154
+ events_volume_name: NotRequired[str]
155
+ r"""Name of the events volume in Databricks"""
156
+ description: NotRequired[str]
157
+ compress: NotRequired[OutputDatabricksCompression]
158
+ r"""Data compression format to apply to HTTP content before it is delivered"""
159
+ compression_level: NotRequired[OutputDatabricksCompressionLevel]
160
+ r"""Compression level to apply before moving files to final destination"""
161
+ automatic_schema: NotRequired[bool]
162
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
163
+ parquet_version: NotRequired[OutputDatabricksParquetVersion]
164
+ r"""Determines which data types are supported and how they are represented"""
165
+ parquet_data_page_version: NotRequired[OutputDatabricksDataPageVersion]
166
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
167
+ parquet_row_group_length: NotRequired[float]
168
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
169
+ parquet_page_size: NotRequired[str]
170
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
171
+ should_log_invalid_rows: NotRequired[bool]
172
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
173
+ key_value_metadata: NotRequired[List[OutputDatabricksKeyValueMetadatumTypedDict]]
174
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
175
+ enable_statistics: NotRequired[bool]
176
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
177
+ enable_write_page_index: NotRequired[bool]
178
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
179
+ enable_page_checksum: NotRequired[bool]
180
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
181
+ empty_dir_cleanup_sec: NotRequired[float]
182
+ r"""How frequently, in seconds, to clean up empty directories"""
183
+ deadletter_path: NotRequired[str]
184
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
185
+ max_retry_num: NotRequired[float]
186
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
187
+
188
+
189
+ class OutputDatabricks(BaseModel):
190
+ type: OutputDatabricksType
191
+
192
+ workspace_id: Annotated[str, pydantic.Field(alias="workspaceId")]
193
+ r"""Databricks workspace ID"""
194
+
195
+ client_id: Annotated[str, pydantic.Field(alias="clientId")]
196
+ r"""OAuth client ID for Unity Catalog authentication"""
197
+
198
+ client_secret: Annotated[str, pydantic.Field(alias="clientSecret")]
199
+ r"""OAuth client secret for Unity Catalog authentication"""
200
+
201
+ id: Optional[str] = None
202
+ r"""Unique ID for this output"""
203
+
204
+ pipeline: Optional[str] = None
205
+ r"""Pipeline to process data before sending out to this output"""
206
+
207
+ system_fields: Annotated[
208
+ Optional[List[str]], pydantic.Field(alias="systemFields")
209
+ ] = None
210
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
211
+
212
+ environment: Optional[str] = None
213
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
214
+
215
+ streamtags: Optional[List[str]] = None
216
+ r"""Tags for filtering and grouping in @{product}"""
217
+
218
+ dest_path: Annotated[Optional[str], pydantic.Field(alias="destPath")] = ""
219
+ r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
220
+
221
+ stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
222
+ "$CRIBL_HOME/state/outputs/staging"
223
+ )
224
+ r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
225
+
226
+ add_id_to_stage_path: Annotated[
227
+ Optional[bool], pydantic.Field(alias="addIdToStagePath")
228
+ ] = True
229
+ r"""Add the Output ID value to staging location"""
230
+
231
+ remove_empty_dirs: Annotated[
232
+ Optional[bool], pydantic.Field(alias="removeEmptyDirs")
233
+ ] = True
234
+ r"""Remove empty staging directories after moving files"""
235
+
236
+ partition_expr: Annotated[Optional[str], pydantic.Field(alias="partitionExpr")] = (
237
+ "C.Time.strftime(_time ? _time : Date.now()/1000, '%Y/%m/%d')"
238
+ )
239
+ r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
240
+
241
+ format_: Annotated[
242
+ Annotated[
243
+ Optional[OutputDatabricksDataFormat],
244
+ PlainValidator(validate_open_enum(False)),
245
+ ],
246
+ pydantic.Field(alias="format"),
247
+ ] = OutputDatabricksDataFormat.JSON
248
+ r"""Format of the output data"""
249
+
250
+ base_file_name: Annotated[Optional[str], pydantic.Field(alias="baseFileName")] = (
251
+ "`CriblOut`"
252
+ )
253
+ r"""JavaScript expression to define the output filename prefix (can be constant)"""
254
+
255
+ file_name_suffix: Annotated[
256
+ Optional[str], pydantic.Field(alias="fileNameSuffix")
257
+ ] = '`.${C.env["CRIBL_WORKER_ID"]}.${__format}${__compression === "gzip" ? ".gz" : ""}`'
258
+ r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
259
+
260
+ max_file_size_mb: Annotated[
261
+ Optional[float], pydantic.Field(alias="maxFileSizeMB")
262
+ ] = 32
263
+ r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
264
+
265
+ max_file_open_time_sec: Annotated[
266
+ Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
267
+ ] = 300
268
+ r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
269
+
270
+ max_file_idle_time_sec: Annotated[
271
+ Optional[float], pydantic.Field(alias="maxFileIdleTimeSec")
272
+ ] = 30
273
+ r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
274
+
275
+ max_open_files: Annotated[Optional[float], pydantic.Field(alias="maxOpenFiles")] = (
276
+ 100
277
+ )
278
+ r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
279
+
280
+ header_line: Annotated[Optional[str], pydantic.Field(alias="headerLine")] = ""
281
+ r"""If set, this line will be written to the beginning of each output file"""
282
+
283
+ write_high_water_mark: Annotated[
284
+ Optional[float], pydantic.Field(alias="writeHighWaterMark")
285
+ ] = 64
286
+ r"""Buffer size used to write to a file"""
287
+
288
+ on_backpressure: Annotated[
289
+ Annotated[
290
+ Optional[OutputDatabricksBackpressureBehavior],
291
+ PlainValidator(validate_open_enum(False)),
292
+ ],
293
+ pydantic.Field(alias="onBackpressure"),
294
+ ] = OutputDatabricksBackpressureBehavior.BLOCK
295
+ r"""How to handle events when all receivers are exerting backpressure"""
296
+
297
+ deadletter_enabled: Annotated[
298
+ Optional[bool], pydantic.Field(alias="deadletterEnabled")
299
+ ] = False
300
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
301
+
302
+ on_disk_full_backpressure: Annotated[
303
+ Annotated[
304
+ Optional[OutputDatabricksDiskSpaceProtection],
305
+ PlainValidator(validate_open_enum(False)),
306
+ ],
307
+ pydantic.Field(alias="onDiskFullBackpressure"),
308
+ ] = OutputDatabricksDiskSpaceProtection.BLOCK
309
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
310
+
311
+ scope: Optional[str] = "all-apis"
312
+ r"""OAuth scope for Unity Catalog authentication"""
313
+
314
+ catalog: Optional[str] = "main"
315
+ r"""Name of the catalog to use for the output"""
316
+
317
+ schema_: Annotated[Optional[str], pydantic.Field(alias="schema")] = "external"
318
+ r"""Name of the catalog schema to use for the output"""
319
+
320
+ events_volume_name: Annotated[
321
+ Optional[str], pydantic.Field(alias="eventsVolumeName")
322
+ ] = "events"
323
+ r"""Name of the events volume in Databricks"""
324
+
325
+ description: Optional[str] = None
326
+
327
+ compress: Annotated[
328
+ Optional[OutputDatabricksCompression], PlainValidator(validate_open_enum(False))
329
+ ] = OutputDatabricksCompression.GZIP
330
+ r"""Data compression format to apply to HTTP content before it is delivered"""
331
+
332
+ compression_level: Annotated[
333
+ Annotated[
334
+ Optional[OutputDatabricksCompressionLevel],
335
+ PlainValidator(validate_open_enum(False)),
336
+ ],
337
+ pydantic.Field(alias="compressionLevel"),
338
+ ] = OutputDatabricksCompressionLevel.BEST_SPEED
339
+ r"""Compression level to apply before moving files to final destination"""
340
+
341
+ automatic_schema: Annotated[
342
+ Optional[bool], pydantic.Field(alias="automaticSchema")
343
+ ] = False
344
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
345
+
346
+ parquet_version: Annotated[
347
+ Annotated[
348
+ Optional[OutputDatabricksParquetVersion],
349
+ PlainValidator(validate_open_enum(False)),
350
+ ],
351
+ pydantic.Field(alias="parquetVersion"),
352
+ ] = OutputDatabricksParquetVersion.PARQUET_2_6
353
+ r"""Determines which data types are supported and how they are represented"""
354
+
355
+ parquet_data_page_version: Annotated[
356
+ Annotated[
357
+ Optional[OutputDatabricksDataPageVersion],
358
+ PlainValidator(validate_open_enum(False)),
359
+ ],
360
+ pydantic.Field(alias="parquetDataPageVersion"),
361
+ ] = OutputDatabricksDataPageVersion.DATA_PAGE_V2
362
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
363
+
364
+ parquet_row_group_length: Annotated[
365
+ Optional[float], pydantic.Field(alias="parquetRowGroupLength")
366
+ ] = 10000
367
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
368
+
369
+ parquet_page_size: Annotated[
370
+ Optional[str], pydantic.Field(alias="parquetPageSize")
371
+ ] = "1MB"
372
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
373
+
374
+ should_log_invalid_rows: Annotated[
375
+ Optional[bool], pydantic.Field(alias="shouldLogInvalidRows")
376
+ ] = None
377
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
378
+
379
+ key_value_metadata: Annotated[
380
+ Optional[List[OutputDatabricksKeyValueMetadatum]],
381
+ pydantic.Field(alias="keyValueMetadata"),
382
+ ] = None
383
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
384
+
385
+ enable_statistics: Annotated[
386
+ Optional[bool], pydantic.Field(alias="enableStatistics")
387
+ ] = True
388
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
389
+
390
+ enable_write_page_index: Annotated[
391
+ Optional[bool], pydantic.Field(alias="enableWritePageIndex")
392
+ ] = True
393
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
394
+
395
+ enable_page_checksum: Annotated[
396
+ Optional[bool], pydantic.Field(alias="enablePageChecksum")
397
+ ] = False
398
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
399
+
400
+ empty_dir_cleanup_sec: Annotated[
401
+ Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
402
+ ] = 300
403
+ r"""How frequently, in seconds, to clean up empty directories"""
404
+
405
+ deadletter_path: Annotated[
406
+ Optional[str], pydantic.Field(alias="deadletterPath")
407
+ ] = "$CRIBL_HOME/state/outputs/dead-letter"
408
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
409
+
410
+ max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
411
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
@@ -18,32 +18,49 @@ class OutputDatadogType(str, Enum):
18
18
  class SendLogsAs(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""The content type to use when sending logs"""
20
20
 
21
+ # text/plain
21
22
  TEXT = "text"
23
+ # application/json
22
24
  JSON = "json"
23
25
 
24
26
 
25
27
  class OutputDatadogSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
26
28
  r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
27
29
 
30
+ # emergency
28
31
  EMERGENCY = "emergency"
32
+ # alert
29
33
  ALERT = "alert"
34
+ # critical
30
35
  CRITICAL = "critical"
36
+ # error
31
37
  ERROR = "error"
38
+ # warning
32
39
  WARNING = "warning"
40
+ # notice
33
41
  NOTICE = "notice"
42
+ # info
34
43
  INFO = "info"
44
+ # debug
35
45
  DEBUG = "debug"
36
46
 
37
47
 
38
48
  class DatadogSite(str, Enum, metaclass=utils.OpenEnumMeta):
39
49
  r"""Datadog site to which events should be sent"""
40
50
 
51
+ # US
41
52
  US = "us"
53
+ # US3
42
54
  US3 = "us3"
55
+ # US5
43
56
  US5 = "us5"
57
+ # Europe
44
58
  EU = "eu"
59
+ # US1-FED
45
60
  FED1 = "fed1"
61
+ # AP1
46
62
  AP1 = "ap1"
63
+ # Custom
47
64
  CUSTOM = "custom"
48
65
 
49
66
 
@@ -61,8 +78,11 @@ class OutputDatadogExtraHTTPHeader(BaseModel):
61
78
  class OutputDatadogFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
62
79
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
63
80
 
81
+ # Payload
64
82
  PAYLOAD = "payload"
83
+ # Payload + Headers
65
84
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
85
+ # None
66
86
  NONE = "none"
67
87
 
68
88
 
@@ -123,8 +143,11 @@ class OutputDatadogTimeoutRetrySettings(BaseModel):
123
143
  class OutputDatadogBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
124
144
  r"""How to handle events when all receivers are exerting backpressure"""
125
145
 
146
+ # Block
126
147
  BLOCK = "block"
148
+ # Drop
127
149
  DROP = "drop"
150
+ # Persistent Queue
128
151
  QUEUE = "queue"
129
152
 
130
153
 
@@ -138,22 +161,29 @@ class OutputDatadogAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
138
161
  class OutputDatadogCompression(str, Enum, metaclass=utils.OpenEnumMeta):
139
162
  r"""Codec to use to compress the persisted data"""
140
163
 
164
+ # None
141
165
  NONE = "none"
166
+ # Gzip
142
167
  GZIP = "gzip"
143
168
 
144
169
 
145
170
  class OutputDatadogQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
146
171
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
147
172
 
173
+ # Block
148
174
  BLOCK = "block"
175
+ # Drop new data
149
176
  DROP = "drop"
150
177
 
151
178
 
152
179
  class OutputDatadogMode(str, Enum, metaclass=utils.OpenEnumMeta):
153
180
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
154
181
 
182
+ # Error
155
183
  ERROR = "error"
184
+ # Backpressure
156
185
  BACKPRESSURE = "backpressure"
186
+ # Always On
157
187
  ALWAYS = "always"
158
188
 
159
189
 
@@ -18,12 +18,19 @@ class OutputDatasetType(str, Enum):
18
18
  class OutputDatasetSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
20
20
 
21
+ # 0 - finest
21
22
  FINEST = "finest"
23
+ # 1 - finer
22
24
  FINER = "finer"
25
+ # 2 - fine
23
26
  FINE = "fine"
27
+ # 3 - info
24
28
  INFO = "info"
29
+ # 4 - warning
25
30
  WARNING = "warning"
31
+ # 5 - error
26
32
  ERROR = "error"
33
+ # 6 - fatal
27
34
  FATAL = "fatal"
28
35
 
29
36
 
@@ -84,8 +91,11 @@ class OutputDatasetTimeoutRetrySettings(BaseModel):
84
91
  class DataSetSite(str, Enum, metaclass=utils.OpenEnumMeta):
85
92
  r"""DataSet site to which events should be sent"""
86
93
 
94
+ # US
87
95
  US = "us"
96
+ # Europe
88
97
  EU = "eu"
98
+ # Custom
89
99
  CUSTOM = "custom"
90
100
 
91
101
 
@@ -103,16 +113,22 @@ class OutputDatasetExtraHTTPHeader(BaseModel):
103
113
  class OutputDatasetFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
104
114
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
105
115
 
116
+ # Payload
106
117
  PAYLOAD = "payload"
118
+ # Payload + Headers
107
119
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
120
+ # None
108
121
  NONE = "none"
109
122
 
110
123
 
111
124
  class OutputDatasetBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
112
125
  r"""How to handle events when all receivers are exerting backpressure"""
113
126
 
127
+ # Block
114
128
  BLOCK = "block"
129
+ # Drop
115
130
  DROP = "drop"
131
+ # Persistent Queue
116
132
  QUEUE = "queue"
117
133
 
118
134
 
@@ -126,22 +142,29 @@ class OutputDatasetAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
126
142
  class OutputDatasetCompression(str, Enum, metaclass=utils.OpenEnumMeta):
127
143
  r"""Codec to use to compress the persisted data"""
128
144
 
145
+ # None
129
146
  NONE = "none"
147
+ # Gzip
130
148
  GZIP = "gzip"
131
149
 
132
150
 
133
151
  class OutputDatasetQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
134
152
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
135
153
 
154
+ # Block
136
155
  BLOCK = "block"
156
+ # Drop new data
137
157
  DROP = "drop"
138
158
 
139
159
 
140
160
  class OutputDatasetMode(str, Enum, metaclass=utils.OpenEnumMeta):
141
161
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
142
162
 
163
+ # Error
143
164
  ERROR = "error"
165
+ # Backpressure
144
166
  BACKPRESSURE = "backpressure"
167
+ # Always On
145
168
  ALWAYS = "always"
146
169
 
147
170