cribl-control-plane 0.0.49__py3-none-any.whl → 0.1.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (173) hide show
  1. cribl_control_plane/_version.py +4 -6
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/health.py +6 -2
  4. cribl_control_plane/models/__init__.py +68 -30
  5. cribl_control_plane/models/cacheconnection.py +10 -2
  6. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  7. cribl_control_plane/models/cloudprovider.py +2 -1
  8. cribl_control_plane/models/configgroup.py +7 -2
  9. cribl_control_plane/models/configgroupcloud.py +6 -2
  10. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  11. cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
  12. cribl_control_plane/models/createversionpushop.py +5 -5
  13. cribl_control_plane/models/cribllakedataset.py +8 -2
  14. cribl_control_plane/models/datasetmetadata.py +8 -2
  15. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  16. cribl_control_plane/models/error.py +16 -0
  17. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  18. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  19. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  20. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  21. cribl_control_plane/models/gethealthinfoop.py +17 -0
  22. cribl_control_plane/models/getsummaryop.py +7 -2
  23. cribl_control_plane/models/getversionshowop.py +6 -5
  24. cribl_control_plane/models/gitshowresult.py +19 -0
  25. cribl_control_plane/models/hbcriblinfo.py +24 -3
  26. cribl_control_plane/models/healthstatus.py +7 -4
  27. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  28. cribl_control_plane/models/inputappscope.py +34 -14
  29. cribl_control_plane/models/inputazureblob.py +17 -6
  30. cribl_control_plane/models/inputcollection.py +11 -4
  31. cribl_control_plane/models/inputconfluentcloud.py +41 -32
  32. cribl_control_plane/models/inputcribl.py +11 -4
  33. cribl_control_plane/models/inputcriblhttp.py +23 -8
  34. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  35. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  36. cribl_control_plane/models/inputcribltcp.py +23 -8
  37. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  38. cribl_control_plane/models/inputdatadogagent.py +24 -8
  39. cribl_control_plane/models/inputdatagen.py +11 -4
  40. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  41. cribl_control_plane/models/inputelastic.py +40 -14
  42. cribl_control_plane/models/inputeventhub.py +15 -6
  43. cribl_control_plane/models/inputexec.py +14 -6
  44. cribl_control_plane/models/inputfile.py +15 -6
  45. cribl_control_plane/models/inputfirehose.py +23 -8
  46. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  47. cribl_control_plane/models/inputgrafana.py +67 -24
  48. cribl_control_plane/models/inputhttp.py +23 -8
  49. cribl_control_plane/models/inputhttpraw.py +23 -8
  50. cribl_control_plane/models/inputjournalfiles.py +12 -4
  51. cribl_control_plane/models/inputkafka.py +41 -28
  52. cribl_control_plane/models/inputkinesis.py +38 -14
  53. cribl_control_plane/models/inputkubeevents.py +11 -4
  54. cribl_control_plane/models/inputkubelogs.py +16 -8
  55. cribl_control_plane/models/inputkubemetrics.py +16 -8
  56. cribl_control_plane/models/inputloki.py +29 -10
  57. cribl_control_plane/models/inputmetrics.py +23 -8
  58. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  59. cribl_control_plane/models/inputmsk.py +48 -30
  60. cribl_control_plane/models/inputnetflow.py +11 -4
  61. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  62. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  63. cribl_control_plane/models/inputoffice365service.py +35 -16
  64. cribl_control_plane/models/inputopentelemetry.py +38 -16
  65. cribl_control_plane/models/inputprometheus.py +50 -18
  66. cribl_control_plane/models/inputprometheusrw.py +30 -10
  67. cribl_control_plane/models/inputrawudp.py +11 -4
  68. cribl_control_plane/models/inputs3.py +21 -8
  69. cribl_control_plane/models/inputs3inventory.py +26 -10
  70. cribl_control_plane/models/inputsecuritylake.py +27 -10
  71. cribl_control_plane/models/inputsnmp.py +16 -6
  72. cribl_control_plane/models/inputsplunk.py +33 -12
  73. cribl_control_plane/models/inputsplunkhec.py +29 -10
  74. cribl_control_plane/models/inputsplunksearch.py +33 -14
  75. cribl_control_plane/models/inputsqs.py +27 -10
  76. cribl_control_plane/models/inputsyslog.py +43 -16
  77. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  78. cribl_control_plane/models/inputsystemstate.py +16 -8
  79. cribl_control_plane/models/inputtcp.py +29 -10
  80. cribl_control_plane/models/inputtcpjson.py +29 -10
  81. cribl_control_plane/models/inputwef.py +37 -14
  82. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  83. cribl_control_plane/models/inputwineventlogs.py +20 -10
  84. cribl_control_plane/models/inputwiz.py +21 -8
  85. cribl_control_plane/models/inputwizwebhook.py +23 -8
  86. cribl_control_plane/models/inputzscalerhec.py +29 -10
  87. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  88. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  89. cribl_control_plane/models/masterworkerentry.py +7 -2
  90. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  91. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  92. cribl_control_plane/models/nodeprovidedinfo.py +3 -0
  93. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  94. cribl_control_plane/models/nodeupgradestate.py +2 -1
  95. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  96. cribl_control_plane/models/output.py +3 -0
  97. cribl_control_plane/models/outputazureblob.py +48 -18
  98. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  99. cribl_control_plane/models/outputazureeventhub.py +40 -18
  100. cribl_control_plane/models/outputazurelogs.py +35 -12
  101. cribl_control_plane/models/outputclickhouse.py +55 -20
  102. cribl_control_plane/models/outputcloudwatch.py +29 -10
  103. cribl_control_plane/models/outputconfluentcloud.py +71 -44
  104. cribl_control_plane/models/outputcriblhttp.py +44 -16
  105. cribl_control_plane/models/outputcribllake.py +46 -16
  106. cribl_control_plane/models/outputcribltcp.py +45 -18
  107. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  108. cribl_control_plane/models/outputdatabricks.py +439 -0
  109. cribl_control_plane/models/outputdatadog.py +48 -20
  110. cribl_control_plane/models/outputdataset.py +46 -18
  111. cribl_control_plane/models/outputdiskspool.py +7 -2
  112. cribl_control_plane/models/outputdls3.py +68 -24
  113. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  114. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  115. cribl_control_plane/models/outputelastic.py +43 -18
  116. cribl_control_plane/models/outputelasticcloud.py +36 -12
  117. cribl_control_plane/models/outputexabeam.py +29 -10
  118. cribl_control_plane/models/outputfilesystem.py +39 -14
  119. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  120. cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
  121. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  122. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  123. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  124. cribl_control_plane/models/outputgraphite.py +31 -14
  125. cribl_control_plane/models/outputhoneycomb.py +35 -12
  126. cribl_control_plane/models/outputhumiohec.py +43 -16
  127. cribl_control_plane/models/outputinfluxdb.py +42 -16
  128. cribl_control_plane/models/outputkafka.py +69 -40
  129. cribl_control_plane/models/outputkinesis.py +40 -16
  130. cribl_control_plane/models/outputloki.py +41 -16
  131. cribl_control_plane/models/outputminio.py +65 -24
  132. cribl_control_plane/models/outputmsk.py +77 -42
  133. cribl_control_plane/models/outputnewrelic.py +43 -18
  134. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  135. cribl_control_plane/models/outputopentelemetry.py +67 -26
  136. cribl_control_plane/models/outputprometheus.py +35 -12
  137. cribl_control_plane/models/outputring.py +19 -8
  138. cribl_control_plane/models/outputs3.py +68 -26
  139. cribl_control_plane/models/outputsecuritylake.py +52 -18
  140. cribl_control_plane/models/outputsentinel.py +45 -18
  141. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  142. cribl_control_plane/models/outputservicenow.py +60 -24
  143. cribl_control_plane/models/outputsignalfx.py +37 -14
  144. cribl_control_plane/models/outputsns.py +36 -14
  145. cribl_control_plane/models/outputsplunk.py +60 -24
  146. cribl_control_plane/models/outputsplunkhec.py +35 -12
  147. cribl_control_plane/models/outputsplunklb.py +77 -30
  148. cribl_control_plane/models/outputsqs.py +41 -16
  149. cribl_control_plane/models/outputstatsd.py +30 -14
  150. cribl_control_plane/models/outputstatsdext.py +29 -12
  151. cribl_control_plane/models/outputsumologic.py +35 -12
  152. cribl_control_plane/models/outputsyslog.py +58 -24
  153. cribl_control_plane/models/outputtcpjson.py +52 -20
  154. cribl_control_plane/models/outputwavefront.py +35 -12
  155. cribl_control_plane/models/outputwebhook.py +58 -22
  156. cribl_control_plane/models/outputxsiam.py +35 -14
  157. cribl_control_plane/models/productscore.py +2 -1
  158. cribl_control_plane/models/rbacresource.py +2 -1
  159. cribl_control_plane/models/resourcepolicy.py +4 -2
  160. cribl_control_plane/models/routeconf.py +3 -4
  161. cribl_control_plane/models/runnablejobcollection.py +30 -13
  162. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  163. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  164. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  165. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  166. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
  167. cribl_control_plane/models/workertypes.py +2 -1
  168. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/METADATA +1 -1
  169. cribl_control_plane-0.1.0b1.dist-info/RECORD +327 -0
  170. cribl_control_plane/models/appmode.py +0 -13
  171. cribl_control_plane/models/routecloneconf.py +0 -13
  172. cribl_control_plane-0.0.49.dist-info/RECORD +0 -325
  173. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,439 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputDatabricksType(str, Enum):
15
+ DATABRICKS = "databricks"
16
+
17
+
18
+ class OutputDatabricksDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ r"""Format of the output data"""
20
+
21
+ JSON = "json"
22
+ RAW = "raw"
23
+ PARQUET = "parquet"
24
+
25
+
26
+ class OutputDatabricksBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
27
+ r"""How to handle events when all receivers are exerting backpressure"""
28
+
29
+ BLOCK = "block"
30
+ DROP = "drop"
31
+
32
+
33
+ class OutputDatabricksDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
34
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
35
+
36
+ BLOCK = "block"
37
+ DROP = "drop"
38
+
39
+
40
+ class OutputDatabricksAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
41
+ r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
42
+
43
+ MANUAL = "manual"
44
+ SECRET = "secret"
45
+
46
+
47
+ class OutputDatabricksCompression(str, Enum, metaclass=utils.OpenEnumMeta):
48
+ r"""Data compression format to apply to HTTP content before it is delivered"""
49
+
50
+ NONE = "none"
51
+ GZIP = "gzip"
52
+
53
+
54
+ class OutputDatabricksCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
55
+ r"""Compression level to apply before moving files to final destination"""
56
+
57
+ BEST_SPEED = "best_speed"
58
+ NORMAL = "normal"
59
+ BEST_COMPRESSION = "best_compression"
60
+
61
+
62
+ class OutputDatabricksParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
63
+ r"""Determines which data types are supported and how they are represented"""
64
+
65
+ PARQUET_1_0 = "PARQUET_1_0"
66
+ PARQUET_2_4 = "PARQUET_2_4"
67
+ PARQUET_2_6 = "PARQUET_2_6"
68
+
69
+
70
+ class OutputDatabricksDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
71
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
72
+
73
+ DATA_PAGE_V1 = "DATA_PAGE_V1"
74
+ DATA_PAGE_V2 = "DATA_PAGE_V2"
75
+
76
+
77
+ class OutputDatabricksKeyValueMetadatumTypedDict(TypedDict):
78
+ value: str
79
+ key: NotRequired[str]
80
+
81
+
82
+ class OutputDatabricksKeyValueMetadatum(BaseModel):
83
+ value: str
84
+
85
+ key: Optional[str] = ""
86
+
87
+
88
+ class OutputDatabricksTypedDict(TypedDict):
89
+ type: OutputDatabricksType
90
+ id: NotRequired[str]
91
+ r"""Unique ID for this output"""
92
+ pipeline: NotRequired[str]
93
+ r"""Pipeline to process data before sending out to this output"""
94
+ system_fields: NotRequired[List[str]]
95
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
96
+ environment: NotRequired[str]
97
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
98
+ streamtags: NotRequired[List[str]]
99
+ r"""Tags for filtering and grouping in @{product}"""
100
+ dest_path: NotRequired[str]
101
+ r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
102
+ stage_path: NotRequired[str]
103
+ r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
104
+ add_id_to_stage_path: NotRequired[bool]
105
+ r"""Add the Output ID value to staging location"""
106
+ remove_empty_dirs: NotRequired[bool]
107
+ r"""Remove empty staging directories after moving files"""
108
+ partition_expr: NotRequired[str]
109
+ r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
110
+ format_: NotRequired[OutputDatabricksDataFormat]
111
+ r"""Format of the output data"""
112
+ base_file_name: NotRequired[str]
113
+ r"""JavaScript expression to define the output filename prefix (can be constant)"""
114
+ file_name_suffix: NotRequired[str]
115
+ r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
116
+ max_file_size_mb: NotRequired[float]
117
+ r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
118
+ max_file_open_time_sec: NotRequired[float]
119
+ r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
120
+ max_file_idle_time_sec: NotRequired[float]
121
+ r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
122
+ max_open_files: NotRequired[float]
123
+ r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
124
+ header_line: NotRequired[str]
125
+ r"""If set, this line will be written to the beginning of each output file"""
126
+ write_high_water_mark: NotRequired[float]
127
+ r"""Buffer size used to write to a file"""
128
+ on_backpressure: NotRequired[OutputDatabricksBackpressureBehavior]
129
+ r"""How to handle events when all receivers are exerting backpressure"""
130
+ deadletter_enabled: NotRequired[bool]
131
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
132
+ on_disk_full_backpressure: NotRequired[OutputDatabricksDiskSpaceProtection]
133
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
134
+ unity_auth_method: NotRequired[OutputDatabricksAuthenticationMethod]
135
+ r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
136
+ login_url: NotRequired[str]
137
+ r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
138
+ client_id: NotRequired[str]
139
+ r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
140
+ client_secret: NotRequired[str]
141
+ r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
142
+ client_text_secret: NotRequired[str]
143
+ r"""Select or create a stored secret that references your Client ID and Client Secret"""
144
+ scope: NotRequired[str]
145
+ r"""OAuth scope for Unity Catalog authentication"""
146
+ token_timeout_secs: NotRequired[float]
147
+ r"""How often the OAuth token should be refreshed"""
148
+ default_catalog: NotRequired[str]
149
+ r"""Name of the catalog to use for the output"""
150
+ default_schema: NotRequired[str]
151
+ r"""Name of the catalog schema to use for the output"""
152
+ events_volume_name: NotRequired[str]
153
+ r"""Name of the events volume in Databricks"""
154
+ over_write_files: NotRequired[bool]
155
+ r"""Uploaded files should be overwritten if they already exist. If disabled, upload will fail if a file already exists."""
156
+ description: NotRequired[str]
157
+ compress: NotRequired[OutputDatabricksCompression]
158
+ r"""Data compression format to apply to HTTP content before it is delivered"""
159
+ compression_level: NotRequired[OutputDatabricksCompressionLevel]
160
+ r"""Compression level to apply before moving files to final destination"""
161
+ automatic_schema: NotRequired[bool]
162
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
163
+ parquet_version: NotRequired[OutputDatabricksParquetVersion]
164
+ r"""Determines which data types are supported and how they are represented"""
165
+ parquet_data_page_version: NotRequired[OutputDatabricksDataPageVersion]
166
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
167
+ parquet_row_group_length: NotRequired[float]
168
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
169
+ parquet_page_size: NotRequired[str]
170
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
171
+ should_log_invalid_rows: NotRequired[bool]
172
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
173
+ key_value_metadata: NotRequired[List[OutputDatabricksKeyValueMetadatumTypedDict]]
174
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
175
+ enable_statistics: NotRequired[bool]
176
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
177
+ enable_write_page_index: NotRequired[bool]
178
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
179
+ enable_page_checksum: NotRequired[bool]
180
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
181
+ empty_dir_cleanup_sec: NotRequired[float]
182
+ r"""How frequently, in seconds, to clean up empty directories"""
183
+ deadletter_path: NotRequired[str]
184
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
185
+ max_retry_num: NotRequired[float]
186
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
187
+
188
+
189
+ class OutputDatabricks(BaseModel):
190
+ type: OutputDatabricksType
191
+
192
+ id: Optional[str] = None
193
+ r"""Unique ID for this output"""
194
+
195
+ pipeline: Optional[str] = None
196
+ r"""Pipeline to process data before sending out to this output"""
197
+
198
+ system_fields: Annotated[
199
+ Optional[List[str]], pydantic.Field(alias="systemFields")
200
+ ] = None
201
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
202
+
203
+ environment: Optional[str] = None
204
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
205
+
206
+ streamtags: Optional[List[str]] = None
207
+ r"""Tags for filtering and grouping in @{product}"""
208
+
209
+ dest_path: Annotated[Optional[str], pydantic.Field(alias="destPath")] = ""
210
+ r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
211
+
212
+ stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
213
+ "$CRIBL_HOME/state/outputs/staging"
214
+ )
215
+ r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
216
+
217
+ add_id_to_stage_path: Annotated[
218
+ Optional[bool], pydantic.Field(alias="addIdToStagePath")
219
+ ] = True
220
+ r"""Add the Output ID value to staging location"""
221
+
222
+ remove_empty_dirs: Annotated[
223
+ Optional[bool], pydantic.Field(alias="removeEmptyDirs")
224
+ ] = True
225
+ r"""Remove empty staging directories after moving files"""
226
+
227
+ partition_expr: Annotated[Optional[str], pydantic.Field(alias="partitionExpr")] = (
228
+ "C.Time.strftime(_time ? _time : Date.now()/1000, '%Y/%m/%d')"
229
+ )
230
+ r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
231
+
232
+ format_: Annotated[
233
+ Annotated[
234
+ Optional[OutputDatabricksDataFormat],
235
+ PlainValidator(validate_open_enum(False)),
236
+ ],
237
+ pydantic.Field(alias="format"),
238
+ ] = OutputDatabricksDataFormat.JSON
239
+ r"""Format of the output data"""
240
+
241
+ base_file_name: Annotated[Optional[str], pydantic.Field(alias="baseFileName")] = (
242
+ "`CriblOut`"
243
+ )
244
+ r"""JavaScript expression to define the output filename prefix (can be constant)"""
245
+
246
+ file_name_suffix: Annotated[
247
+ Optional[str], pydantic.Field(alias="fileNameSuffix")
248
+ ] = '`.${C.env["CRIBL_WORKER_ID"]}.${__format}${__compression === "gzip" ? ".gz" : ""}`'
249
+ r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
250
+
251
+ max_file_size_mb: Annotated[
252
+ Optional[float], pydantic.Field(alias="maxFileSizeMB")
253
+ ] = 32
254
+ r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
255
+
256
+ max_file_open_time_sec: Annotated[
257
+ Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
258
+ ] = 300
259
+ r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
260
+
261
+ max_file_idle_time_sec: Annotated[
262
+ Optional[float], pydantic.Field(alias="maxFileIdleTimeSec")
263
+ ] = 30
264
+ r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
265
+
266
+ max_open_files: Annotated[Optional[float], pydantic.Field(alias="maxOpenFiles")] = (
267
+ 100
268
+ )
269
+ r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
270
+
271
+ header_line: Annotated[Optional[str], pydantic.Field(alias="headerLine")] = ""
272
+ r"""If set, this line will be written to the beginning of each output file"""
273
+
274
+ write_high_water_mark: Annotated[
275
+ Optional[float], pydantic.Field(alias="writeHighWaterMark")
276
+ ] = 64
277
+ r"""Buffer size used to write to a file"""
278
+
279
+ on_backpressure: Annotated[
280
+ Annotated[
281
+ Optional[OutputDatabricksBackpressureBehavior],
282
+ PlainValidator(validate_open_enum(False)),
283
+ ],
284
+ pydantic.Field(alias="onBackpressure"),
285
+ ] = OutputDatabricksBackpressureBehavior.BLOCK
286
+ r"""How to handle events when all receivers are exerting backpressure"""
287
+
288
+ deadletter_enabled: Annotated[
289
+ Optional[bool], pydantic.Field(alias="deadletterEnabled")
290
+ ] = False
291
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
292
+
293
+ on_disk_full_backpressure: Annotated[
294
+ Annotated[
295
+ Optional[OutputDatabricksDiskSpaceProtection],
296
+ PlainValidator(validate_open_enum(False)),
297
+ ],
298
+ pydantic.Field(alias="onDiskFullBackpressure"),
299
+ ] = OutputDatabricksDiskSpaceProtection.BLOCK
300
+ r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
301
+
302
+ unity_auth_method: Annotated[
303
+ Annotated[
304
+ Optional[OutputDatabricksAuthenticationMethod],
305
+ PlainValidator(validate_open_enum(False)),
306
+ ],
307
+ pydantic.Field(alias="unityAuthMethod"),
308
+ ] = OutputDatabricksAuthenticationMethod.MANUAL
309
+ r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
310
+
311
+ login_url: Annotated[Optional[str], pydantic.Field(alias="loginUrl")] = None
312
+ r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
313
+
314
+ client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
315
+ r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
316
+
317
+ client_secret: Annotated[Optional[str], pydantic.Field(alias="clientSecret")] = None
318
+ r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
319
+
320
+ client_text_secret: Annotated[
321
+ Optional[str], pydantic.Field(alias="clientTextSecret")
322
+ ] = None
323
+ r"""Select or create a stored secret that references your Client ID and Client Secret"""
324
+
325
+ scope: Optional[str] = "all-apis"
326
+ r"""OAuth scope for Unity Catalog authentication"""
327
+
328
+ token_timeout_secs: Annotated[
329
+ Optional[float], pydantic.Field(alias="tokenTimeoutSecs")
330
+ ] = 3600
331
+ r"""How often the OAuth token should be refreshed"""
332
+
333
+ default_catalog: Annotated[
334
+ Optional[str], pydantic.Field(alias="defaultCatalog")
335
+ ] = "main"
336
+ r"""Name of the catalog to use for the output"""
337
+
338
+ default_schema: Annotated[Optional[str], pydantic.Field(alias="defaultSchema")] = (
339
+ "external"
340
+ )
341
+ r"""Name of the catalog schema to use for the output"""
342
+
343
+ events_volume_name: Annotated[
344
+ Optional[str], pydantic.Field(alias="eventsVolumeName")
345
+ ] = "events"
346
+ r"""Name of the events volume in Databricks"""
347
+
348
+ over_write_files: Annotated[
349
+ Optional[bool], pydantic.Field(alias="overWriteFiles")
350
+ ] = False
351
+ r"""Uploaded files should be overwritten if they already exist. If disabled, upload will fail if a file already exists."""
352
+
353
+ description: Optional[str] = None
354
+
355
+ compress: Annotated[
356
+ Optional[OutputDatabricksCompression], PlainValidator(validate_open_enum(False))
357
+ ] = OutputDatabricksCompression.GZIP
358
+ r"""Data compression format to apply to HTTP content before it is delivered"""
359
+
360
+ compression_level: Annotated[
361
+ Annotated[
362
+ Optional[OutputDatabricksCompressionLevel],
363
+ PlainValidator(validate_open_enum(False)),
364
+ ],
365
+ pydantic.Field(alias="compressionLevel"),
366
+ ] = OutputDatabricksCompressionLevel.BEST_SPEED
367
+ r"""Compression level to apply before moving files to final destination"""
368
+
369
+ automatic_schema: Annotated[
370
+ Optional[bool], pydantic.Field(alias="automaticSchema")
371
+ ] = False
372
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
373
+
374
+ parquet_version: Annotated[
375
+ Annotated[
376
+ Optional[OutputDatabricksParquetVersion],
377
+ PlainValidator(validate_open_enum(False)),
378
+ ],
379
+ pydantic.Field(alias="parquetVersion"),
380
+ ] = OutputDatabricksParquetVersion.PARQUET_2_6
381
+ r"""Determines which data types are supported and how they are represented"""
382
+
383
+ parquet_data_page_version: Annotated[
384
+ Annotated[
385
+ Optional[OutputDatabricksDataPageVersion],
386
+ PlainValidator(validate_open_enum(False)),
387
+ ],
388
+ pydantic.Field(alias="parquetDataPageVersion"),
389
+ ] = OutputDatabricksDataPageVersion.DATA_PAGE_V2
390
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
391
+
392
+ parquet_row_group_length: Annotated[
393
+ Optional[float], pydantic.Field(alias="parquetRowGroupLength")
394
+ ] = 10000
395
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
396
+
397
+ parquet_page_size: Annotated[
398
+ Optional[str], pydantic.Field(alias="parquetPageSize")
399
+ ] = "1MB"
400
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
401
+
402
+ should_log_invalid_rows: Annotated[
403
+ Optional[bool], pydantic.Field(alias="shouldLogInvalidRows")
404
+ ] = None
405
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
406
+
407
+ key_value_metadata: Annotated[
408
+ Optional[List[OutputDatabricksKeyValueMetadatum]],
409
+ pydantic.Field(alias="keyValueMetadata"),
410
+ ] = None
411
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
412
+
413
+ enable_statistics: Annotated[
414
+ Optional[bool], pydantic.Field(alias="enableStatistics")
415
+ ] = True
416
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
417
+
418
+ enable_write_page_index: Annotated[
419
+ Optional[bool], pydantic.Field(alias="enableWritePageIndex")
420
+ ] = True
421
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
422
+
423
+ enable_page_checksum: Annotated[
424
+ Optional[bool], pydantic.Field(alias="enablePageChecksum")
425
+ ] = False
426
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
427
+
428
+ empty_dir_cleanup_sec: Annotated[
429
+ Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
430
+ ] = 300
431
+ r"""How frequently, in seconds, to clean up empty directories"""
432
+
433
+ deadletter_path: Annotated[
434
+ Optional[str], pydantic.Field(alias="deadletterPath")
435
+ ] = "$CRIBL_HOME/state/outputs/dead-letter"
436
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
437
+
438
+ max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
439
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,14 +15,14 @@ class OutputDatadogType(str, Enum):
12
15
  DATADOG = "datadog"
13
16
 
14
17
 
15
- class SendLogsAs(str, Enum):
18
+ class SendLogsAs(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""The content type to use when sending logs"""
17
20
 
18
21
  TEXT = "text"
19
22
  JSON = "json"
20
23
 
21
24
 
22
- class OutputDatadogSeverity(str, Enum):
25
+ class OutputDatadogSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
23
26
  r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
24
27
 
25
28
  EMERGENCY = "emergency"
@@ -32,7 +35,7 @@ class OutputDatadogSeverity(str, Enum):
32
35
  DEBUG = "debug"
33
36
 
34
37
 
35
- class DatadogSite(str, Enum):
38
+ class DatadogSite(str, Enum, metaclass=utils.OpenEnumMeta):
36
39
  r"""Datadog site to which events should be sent"""
37
40
 
38
41
  US = "us"
@@ -55,7 +58,7 @@ class OutputDatadogExtraHTTPHeader(BaseModel):
55
58
  name: Optional[str] = None
56
59
 
57
60
 
58
- class OutputDatadogFailedRequestLoggingMode(str, Enum):
61
+ class OutputDatadogFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
59
62
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
60
63
 
61
64
  PAYLOAD = "payload"
@@ -117,7 +120,7 @@ class OutputDatadogTimeoutRetrySettings(BaseModel):
117
120
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
118
121
 
119
122
 
120
- class OutputDatadogBackpressureBehavior(str, Enum):
123
+ class OutputDatadogBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
121
124
  r"""How to handle events when all receivers are exerting backpressure"""
122
125
 
123
126
  BLOCK = "block"
@@ -125,28 +128,28 @@ class OutputDatadogBackpressureBehavior(str, Enum):
125
128
  QUEUE = "queue"
126
129
 
127
130
 
128
- class OutputDatadogAuthenticationMethod(str, Enum):
131
+ class OutputDatadogAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
129
132
  r"""Enter API key directly, or select a stored secret"""
130
133
 
131
134
  MANUAL = "manual"
132
135
  SECRET = "secret"
133
136
 
134
137
 
135
- class OutputDatadogCompression(str, Enum):
138
+ class OutputDatadogCompression(str, Enum, metaclass=utils.OpenEnumMeta):
136
139
  r"""Codec to use to compress the persisted data"""
137
140
 
138
141
  NONE = "none"
139
142
  GZIP = "gzip"
140
143
 
141
144
 
142
- class OutputDatadogQueueFullBehavior(str, Enum):
145
+ class OutputDatadogQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
143
146
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
144
147
 
145
148
  BLOCK = "block"
146
149
  DROP = "drop"
147
150
 
148
151
 
149
- class OutputDatadogMode(str, Enum):
152
+ class OutputDatadogMode(str, Enum, metaclass=utils.OpenEnumMeta):
150
153
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
151
154
 
152
155
  ERROR = "error"
@@ -276,7 +279,8 @@ class OutputDatadog(BaseModel):
276
279
  r"""Tags for filtering and grouping in @{product}"""
277
280
 
278
281
  content_type: Annotated[
279
- Optional[SendLogsAs], pydantic.Field(alias="contentType")
282
+ Annotated[Optional[SendLogsAs], PlainValidator(validate_open_enum(False))],
283
+ pydantic.Field(alias="contentType"),
280
284
  ] = SendLogsAs.JSON
281
285
  r"""The content type to use when sending logs"""
282
286
 
@@ -303,10 +307,14 @@ class OutputDatadog(BaseModel):
303
307
  ] = False
304
308
  r"""Allow API key to be set from the event's '__agent_api_key' field"""
305
309
 
306
- severity: Optional[OutputDatadogSeverity] = None
310
+ severity: Annotated[
311
+ Optional[OutputDatadogSeverity], PlainValidator(validate_open_enum(False))
312
+ ] = None
307
313
  r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
308
314
 
309
- site: Optional[DatadogSite] = DatadogSite.US
315
+ site: Annotated[
316
+ Optional[DatadogSite], PlainValidator(validate_open_enum(False))
317
+ ] = DatadogSite.US
310
318
  r"""Datadog site to which events should be sent"""
311
319
 
312
320
  send_counters_as_count: Annotated[
@@ -358,7 +366,10 @@ class OutputDatadog(BaseModel):
358
366
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
359
367
 
360
368
  failed_request_logging_mode: Annotated[
361
- Optional[OutputDatadogFailedRequestLoggingMode],
369
+ Annotated[
370
+ Optional[OutputDatadogFailedRequestLoggingMode],
371
+ PlainValidator(validate_open_enum(False)),
372
+ ],
362
373
  pydantic.Field(alias="failedRequestLoggingMode"),
363
374
  ] = OutputDatadogFailedRequestLoggingMode.NONE
364
375
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -385,13 +396,20 @@ class OutputDatadog(BaseModel):
385
396
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
386
397
 
387
398
  on_backpressure: Annotated[
388
- Optional[OutputDatadogBackpressureBehavior],
399
+ Annotated[
400
+ Optional[OutputDatadogBackpressureBehavior],
401
+ PlainValidator(validate_open_enum(False)),
402
+ ],
389
403
  pydantic.Field(alias="onBackpressure"),
390
404
  ] = OutputDatadogBackpressureBehavior.BLOCK
391
405
  r"""How to handle events when all receivers are exerting backpressure"""
392
406
 
393
407
  auth_type: Annotated[
394
- Optional[OutputDatadogAuthenticationMethod], pydantic.Field(alias="authType")
408
+ Annotated[
409
+ Optional[OutputDatadogAuthenticationMethod],
410
+ PlainValidator(validate_open_enum(False)),
411
+ ],
412
+ pydantic.Field(alias="authType"),
395
413
  ] = OutputDatadogAuthenticationMethod.MANUAL
396
414
  r"""Enter API key directly, or select a stored secret"""
397
415
 
@@ -418,19 +436,29 @@ class OutputDatadog(BaseModel):
418
436
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
419
437
 
420
438
  pq_compress: Annotated[
421
- Optional[OutputDatadogCompression], pydantic.Field(alias="pqCompress")
439
+ Annotated[
440
+ Optional[OutputDatadogCompression],
441
+ PlainValidator(validate_open_enum(False)),
442
+ ],
443
+ pydantic.Field(alias="pqCompress"),
422
444
  ] = OutputDatadogCompression.NONE
423
445
  r"""Codec to use to compress the persisted data"""
424
446
 
425
447
  pq_on_backpressure: Annotated[
426
- Optional[OutputDatadogQueueFullBehavior],
448
+ Annotated[
449
+ Optional[OutputDatadogQueueFullBehavior],
450
+ PlainValidator(validate_open_enum(False)),
451
+ ],
427
452
  pydantic.Field(alias="pqOnBackpressure"),
428
453
  ] = OutputDatadogQueueFullBehavior.BLOCK
429
454
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
430
455
 
431
- pq_mode: Annotated[Optional[OutputDatadogMode], pydantic.Field(alias="pqMode")] = (
432
- OutputDatadogMode.ERROR
433
- )
456
+ pq_mode: Annotated[
457
+ Annotated[
458
+ Optional[OutputDatadogMode], PlainValidator(validate_open_enum(False))
459
+ ],
460
+ pydantic.Field(alias="pqMode"),
461
+ ] = OutputDatadogMode.ERROR
434
462
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
435
463
 
436
464
  pq_controls: Annotated[