cribl-control-plane 0.1.0b1__py3-none-any.whl → 0.1.1rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (150) hide show
  1. cribl_control_plane/_hooks/clientcredentials.py +91 -41
  2. cribl_control_plane/_version.py +4 -4
  3. cribl_control_plane/errors/apierror.py +1 -1
  4. cribl_control_plane/errors/criblcontrolplaneerror.py +1 -1
  5. cribl_control_plane/errors/error.py +1 -1
  6. cribl_control_plane/errors/healthstatus_error.py +1 -1
  7. cribl_control_plane/errors/no_response_error.py +1 -1
  8. cribl_control_plane/errors/responsevalidationerror.py +1 -1
  9. cribl_control_plane/groups_sdk.py +4 -4
  10. cribl_control_plane/httpclient.py +0 -1
  11. cribl_control_plane/lakedatasets.py +12 -12
  12. cribl_control_plane/models/__init__.py +92 -25
  13. cribl_control_plane/models/configgroup.py +17 -2
  14. cribl_control_plane/models/createversionundoop.py +3 -3
  15. cribl_control_plane/models/cribllakedatasetupdate.py +81 -0
  16. cribl_control_plane/models/distributedsummary.py +6 -0
  17. cribl_control_plane/models/gitinfo.py +14 -3
  18. cribl_control_plane/models/input.py +65 -63
  19. cribl_control_plane/models/inputappscope.py +4 -0
  20. cribl_control_plane/models/inputazureblob.py +4 -0
  21. cribl_control_plane/models/inputcollection.py +4 -0
  22. cribl_control_plane/models/inputconfluentcloud.py +8 -0
  23. cribl_control_plane/models/inputcribl.py +4 -0
  24. cribl_control_plane/models/inputcriblhttp.py +4 -0
  25. cribl_control_plane/models/inputcribllakehttp.py +4 -0
  26. cribl_control_plane/models/inputcriblmetrics.py +4 -0
  27. cribl_control_plane/models/inputcribltcp.py +4 -0
  28. cribl_control_plane/models/inputcrowdstrike.py +7 -0
  29. cribl_control_plane/models/inputdatadogagent.py +4 -0
  30. cribl_control_plane/models/inputdatagen.py +4 -0
  31. cribl_control_plane/models/inputedgeprometheus.py +12 -0
  32. cribl_control_plane/models/inputelastic.py +11 -0
  33. cribl_control_plane/models/inputeventhub.py +6 -0
  34. cribl_control_plane/models/inputexec.py +4 -0
  35. cribl_control_plane/models/inputfile.py +6 -0
  36. cribl_control_plane/models/inputfirehose.py +4 -0
  37. cribl_control_plane/models/inputgooglepubsub.py +7 -0
  38. cribl_control_plane/models/inputgrafana.py +8 -0
  39. cribl_control_plane/models/inputhttp.py +4 -0
  40. cribl_control_plane/models/inputhttpraw.py +4 -0
  41. cribl_control_plane/models/inputjournalfiles.py +4 -0
  42. cribl_control_plane/models/inputkafka.py +8 -0
  43. cribl_control_plane/models/inputkinesis.py +15 -0
  44. cribl_control_plane/models/inputkubeevents.py +4 -0
  45. cribl_control_plane/models/inputkubelogs.py +4 -0
  46. cribl_control_plane/models/inputkubemetrics.py +4 -0
  47. cribl_control_plane/models/inputloki.py +4 -0
  48. cribl_control_plane/models/inputmetrics.py +4 -0
  49. cribl_control_plane/models/inputmodeldriventelemetry.py +4 -0
  50. cribl_control_plane/models/inputmsk.py +7 -0
  51. cribl_control_plane/models/inputnetflow.py +4 -0
  52. cribl_control_plane/models/inputoffice365mgmt.py +11 -0
  53. cribl_control_plane/models/inputoffice365msgtrace.py +11 -0
  54. cribl_control_plane/models/inputoffice365service.py +11 -0
  55. cribl_control_plane/models/inputopentelemetry.py +8 -0
  56. cribl_control_plane/models/inputprometheus.py +10 -0
  57. cribl_control_plane/models/inputprometheusrw.py +4 -0
  58. cribl_control_plane/models/inputrawudp.py +4 -0
  59. cribl_control_plane/models/inputs3.py +7 -0
  60. cribl_control_plane/models/inputs3inventory.py +7 -0
  61. cribl_control_plane/models/inputsecuritylake.py +7 -0
  62. cribl_control_plane/models/inputsnmp.py +11 -0
  63. cribl_control_plane/models/inputsplunk.py +9 -0
  64. cribl_control_plane/models/inputsplunkhec.py +4 -0
  65. cribl_control_plane/models/inputsplunksearch.py +7 -0
  66. cribl_control_plane/models/inputsqs.py +17 -10
  67. cribl_control_plane/models/inputsyslog.py +8 -0
  68. cribl_control_plane/models/inputsystemmetrics.py +32 -0
  69. cribl_control_plane/models/inputsystemstate.py +4 -0
  70. cribl_control_plane/models/inputtcp.py +4 -0
  71. cribl_control_plane/models/inputtcpjson.py +4 -0
  72. cribl_control_plane/models/inputwef.py +6 -0
  73. cribl_control_plane/models/inputwindowsmetrics.py +28 -0
  74. cribl_control_plane/models/inputwineventlogs.py +8 -0
  75. cribl_control_plane/models/inputwiz.py +7 -0
  76. cribl_control_plane/models/inputwizwebhook.py +4 -0
  77. cribl_control_plane/models/inputzscalerhec.py +4 -0
  78. cribl_control_plane/models/jobinfo.py +4 -1
  79. cribl_control_plane/models/nodeprovidedinfo.py +4 -1
  80. cribl_control_plane/models/output.py +86 -81
  81. cribl_control_plane/models/outputazureblob.py +20 -0
  82. cribl_control_plane/models/outputazuredataexplorer.py +28 -0
  83. cribl_control_plane/models/outputazureeventhub.py +17 -0
  84. cribl_control_plane/models/outputazurelogs.py +13 -0
  85. cribl_control_plane/models/outputchronicle.py +444 -0
  86. cribl_control_plane/models/outputclickhouse.py +17 -0
  87. cribl_control_plane/models/outputcloudwatch.py +13 -0
  88. cribl_control_plane/models/outputconfluentcloud.py +24 -0
  89. cribl_control_plane/models/outputcriblhttp.py +15 -0
  90. cribl_control_plane/models/outputcribllake.py +21 -0
  91. cribl_control_plane/models/outputcribltcp.py +12 -0
  92. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +15 -0
  93. cribl_control_plane/models/outputdatabricks.py +28 -176
  94. cribl_control_plane/models/outputdatadog.py +30 -0
  95. cribl_control_plane/models/outputdataset.py +23 -0
  96. cribl_control_plane/models/outputdls3.py +35 -0
  97. cribl_control_plane/models/outputdynatracehttp.py +22 -0
  98. cribl_control_plane/models/outputdynatraceotlp.py +22 -0
  99. cribl_control_plane/models/outputelastic.py +18 -0
  100. cribl_control_plane/models/outputelasticcloud.py +13 -0
  101. cribl_control_plane/models/outputexabeam.py +14 -0
  102. cribl_control_plane/models/outputfilesystem.py +15 -0
  103. cribl_control_plane/models/outputgooglechronicle.py +26 -4
  104. cribl_control_plane/models/outputgooglecloudlogging.py +28 -4
  105. cribl_control_plane/models/outputgooglecloudstorage.py +28 -0
  106. cribl_control_plane/models/outputgooglepubsub.py +13 -0
  107. cribl_control_plane/models/outputgrafanacloud.py +50 -0
  108. cribl_control_plane/models/outputgraphite.py +12 -0
  109. cribl_control_plane/models/outputhoneycomb.py +13 -0
  110. cribl_control_plane/models/outputhumiohec.py +15 -0
  111. cribl_control_plane/models/outputinfluxdb.py +19 -0
  112. cribl_control_plane/models/outputkafka.py +24 -0
  113. cribl_control_plane/models/outputkinesis.py +15 -0
  114. cribl_control_plane/models/outputloki.py +20 -0
  115. cribl_control_plane/models/outputminio.py +28 -0
  116. cribl_control_plane/models/outputmsk.py +23 -0
  117. cribl_control_plane/models/outputnewrelic.py +16 -0
  118. cribl_control_plane/models/outputnewrelicevents.py +16 -0
  119. cribl_control_plane/models/outputopentelemetry.py +22 -0
  120. cribl_control_plane/models/outputprometheus.py +13 -0
  121. cribl_control_plane/models/outputring.py +2 -0
  122. cribl_control_plane/models/outputs3.py +35 -0
  123. cribl_control_plane/models/outputsecuritylake.py +29 -0
  124. cribl_control_plane/models/outputsentinel.py +15 -0
  125. cribl_control_plane/models/outputsentineloneaisiem.py +13 -0
  126. cribl_control_plane/models/outputservicenow.py +21 -0
  127. cribl_control_plane/models/outputsignalfx.py +13 -0
  128. cribl_control_plane/models/outputsns.py +13 -0
  129. cribl_control_plane/models/outputsplunk.py +15 -0
  130. cribl_control_plane/models/outputsplunkhec.py +13 -0
  131. cribl_control_plane/models/outputsplunklb.py +15 -0
  132. cribl_control_plane/models/outputsqs.py +23 -10
  133. cribl_control_plane/models/outputstatsd.py +12 -0
  134. cribl_control_plane/models/outputstatsdext.py +12 -0
  135. cribl_control_plane/models/outputsumologic.py +15 -0
  136. cribl_control_plane/models/outputsyslog.py +24 -0
  137. cribl_control_plane/models/outputtcpjson.py +12 -0
  138. cribl_control_plane/models/outputwavefront.py +13 -0
  139. cribl_control_plane/models/outputwebhook.py +23 -0
  140. cribl_control_plane/models/outputxsiam.py +13 -0
  141. cribl_control_plane/models/packinfo.py +3 -0
  142. cribl_control_plane/models/packinstallinfo.py +3 -0
  143. cribl_control_plane/models/runnablejobcollection.py +4 -0
  144. cribl_control_plane/models/updatecribllakedatasetbylakeidandidop.py +9 -5
  145. cribl_control_plane/models/updatepacksop.py +27 -0
  146. cribl_control_plane/models/uploadpackresponse.py +13 -0
  147. cribl_control_plane/packs.py +196 -1
  148. {cribl_control_plane-0.1.0b1.dist-info → cribl_control_plane-0.1.1rc1.dist-info}/METADATA +47 -13
  149. {cribl_control_plane-0.1.0b1.dist-info → cribl_control_plane-0.1.1rc1.dist-info}/RECORD +150 -146
  150. {cribl_control_plane-0.1.0b1.dist-info → cribl_control_plane-0.1.1rc1.dist-info}/WHEEL +0 -0
@@ -101,7 +101,9 @@ class OutputCriblHTTPTLSSettingsClientSide(BaseModel):
101
101
  class OutputCriblHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
102
102
  r"""Codec to use to compress the data before sending"""
103
103
 
104
+ # None
104
105
  NONE = "none"
106
+ # Gzip
105
107
  GZIP = "gzip"
106
108
 
107
109
 
@@ -119,8 +121,11 @@ class OutputCriblHTTPExtraHTTPHeader(BaseModel):
119
121
  class OutputCriblHTTPFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
120
122
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
121
123
 
124
+ # Payload
122
125
  PAYLOAD = "payload"
126
+ # Payload + Headers
123
127
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
128
+ # None
124
129
  NONE = "none"
125
130
 
126
131
 
@@ -181,8 +186,11 @@ class OutputCriblHTTPTimeoutRetrySettings(BaseModel):
181
186
  class OutputCriblHTTPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
182
187
  r"""How to handle events when all receivers are exerting backpressure"""
183
188
 
189
+ # Block
184
190
  BLOCK = "block"
191
+ # Drop
185
192
  DROP = "drop"
193
+ # Persistent Queue
186
194
  QUEUE = "queue"
187
195
 
188
196
 
@@ -204,22 +212,29 @@ class OutputCriblHTTPURL(BaseModel):
204
212
  class OutputCriblHTTPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
205
213
  r"""Codec to use to compress the persisted data"""
206
214
 
215
+ # None
207
216
  NONE = "none"
217
+ # Gzip
208
218
  GZIP = "gzip"
209
219
 
210
220
 
211
221
  class OutputCriblHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
212
222
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
213
223
 
224
+ # Block
214
225
  BLOCK = "block"
226
+ # Drop new data
215
227
  DROP = "drop"
216
228
 
217
229
 
218
230
  class OutputCriblHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
219
231
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
220
232
 
233
+ # Error
221
234
  ERROR = "error"
235
+ # Backpressure
222
236
  BACKPRESSURE = "backpressure"
237
+ # Always On
223
238
  ALWAYS = "always"
224
239
 
225
240
 
@@ -25,46 +25,67 @@ class OutputCriblLakeSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
25
25
  class OutputCriblLakeObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
26
26
  r"""Object ACL to assign to uploaded objects"""
27
27
 
28
+ # Private
28
29
  PRIVATE = "private"
30
+ # Public Read Only
29
31
  PUBLIC_READ = "public-read"
32
+ # Public Read/Write
30
33
  PUBLIC_READ_WRITE = "public-read-write"
34
+ # Authenticated Read Only
31
35
  AUTHENTICATED_READ = "authenticated-read"
36
+ # AWS EC2 AMI Read Only
32
37
  AWS_EXEC_READ = "aws-exec-read"
38
+ # Bucket Owner Read Only
33
39
  BUCKET_OWNER_READ = "bucket-owner-read"
40
+ # Bucket Owner Full Control
34
41
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
35
42
 
36
43
 
37
44
  class OutputCriblLakeStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
38
45
  r"""Storage class to select for uploaded objects"""
39
46
 
47
+ # Standard
40
48
  STANDARD = "STANDARD"
49
+ # Reduced Redundancy Storage
41
50
  REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
51
+ # Standard, Infrequent Access
42
52
  STANDARD_IA = "STANDARD_IA"
53
+ # One Zone, Infrequent Access
43
54
  ONEZONE_IA = "ONEZONE_IA"
55
+ # Intelligent Tiering
44
56
  INTELLIGENT_TIERING = "INTELLIGENT_TIERING"
57
+ # Glacier Flexible Retrieval
45
58
  GLACIER = "GLACIER"
59
+ # Glacier Instant Retrieval
46
60
  GLACIER_IR = "GLACIER_IR"
61
+ # Glacier Deep Archive
47
62
  DEEP_ARCHIVE = "DEEP_ARCHIVE"
48
63
 
49
64
 
50
65
  class OutputCriblLakeServerSideEncryptionForUploadedObjects(
51
66
  str, Enum, metaclass=utils.OpenEnumMeta
52
67
  ):
68
+ # Amazon S3 Managed Key
53
69
  AES256 = "AES256"
70
+ # AWS KMS Managed Key
54
71
  AWS_KMS = "aws:kms"
55
72
 
56
73
 
57
74
  class OutputCriblLakeBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
58
75
  r"""How to handle events when all receivers are exerting backpressure"""
59
76
 
77
+ # Block
60
78
  BLOCK = "block"
79
+ # Drop
61
80
  DROP = "drop"
62
81
 
63
82
 
64
83
  class OutputCriblLakeDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
65
84
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
66
85
 
86
+ # Block
67
87
  BLOCK = "block"
88
+ # Drop
68
89
  DROP = "drop"
69
90
 
70
91
 
@@ -18,7 +18,9 @@ class OutputCriblTCPType(str, Enum):
18
18
  class OutputCriblTCPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Codec to use to compress the data before sending"""
20
20
 
21
+ # None
21
22
  NONE = "none"
23
+ # Gzip
22
24
  GZIP = "gzip"
23
25
 
24
26
 
@@ -108,8 +110,11 @@ class OutputCriblTCPTLSSettingsClientSide(BaseModel):
108
110
  class OutputCriblTCPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
109
111
  r"""How to handle events when all receivers are exerting backpressure"""
110
112
 
113
+ # Block
111
114
  BLOCK = "block"
115
+ # Drop
112
116
  DROP = "drop"
117
+ # Persistent Queue
113
118
  QUEUE = "queue"
114
119
 
115
120
 
@@ -155,22 +160,29 @@ class OutputCriblTCPHost(BaseModel):
155
160
  class OutputCriblTCPPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
156
161
  r"""Codec to use to compress the persisted data"""
157
162
 
163
+ # None
158
164
  NONE = "none"
165
+ # Gzip
159
166
  GZIP = "gzip"
160
167
 
161
168
 
162
169
  class OutputCriblTCPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
163
170
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
164
171
 
172
+ # Block
165
173
  BLOCK = "block"
174
+ # Drop new data
166
175
  DROP = "drop"
167
176
 
168
177
 
169
178
  class OutputCriblTCPMode(str, Enum, metaclass=utils.OpenEnumMeta):
170
179
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
171
180
 
181
+ # Error
172
182
  ERROR = "error"
183
+ # Backpressure
173
184
  BACKPRESSURE = "backpressure"
185
+ # Always On
174
186
  ALWAYS = "always"
175
187
 
176
188
 
@@ -31,8 +31,11 @@ class OutputCrowdstrikeNextGenSiemFailedRequestLoggingMode(
31
31
  ):
32
32
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
33
33
 
34
+ # Payload
34
35
  PAYLOAD = "payload"
36
+ # Payload + Headers
35
37
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
38
+ # None
36
39
  NONE = "none"
37
40
 
38
41
 
@@ -41,7 +44,9 @@ class OutputCrowdstrikeNextGenSiemRequestFormat(
41
44
  ):
42
45
  r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
43
46
 
47
+ # JSON
44
48
  JSON = "JSON"
49
+ # Raw
45
50
  RAW = "raw"
46
51
 
47
52
 
@@ -113,15 +118,20 @@ class OutputCrowdstrikeNextGenSiemBackpressureBehavior(
113
118
  ):
114
119
  r"""How to handle events when all receivers are exerting backpressure"""
115
120
 
121
+ # Block
116
122
  BLOCK = "block"
123
+ # Drop
117
124
  DROP = "drop"
125
+ # Persistent Queue
118
126
  QUEUE = "queue"
119
127
 
120
128
 
121
129
  class OutputCrowdstrikeNextGenSiemCompression(str, Enum, metaclass=utils.OpenEnumMeta):
122
130
  r"""Codec to use to compress the persisted data"""
123
131
 
132
+ # None
124
133
  NONE = "none"
134
+ # Gzip
125
135
  GZIP = "gzip"
126
136
 
127
137
 
@@ -130,15 +140,20 @@ class OutputCrowdstrikeNextGenSiemQueueFullBehavior(
130
140
  ):
131
141
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
132
142
 
143
+ # Block
133
144
  BLOCK = "block"
145
+ # Drop new data
134
146
  DROP = "drop"
135
147
 
136
148
 
137
149
  class OutputCrowdstrikeNextGenSiemMode(str, Enum, metaclass=utils.OpenEnumMeta):
138
150
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
139
151
 
152
+ # Error
140
153
  ERROR = "error"
154
+ # Backpressure
141
155
  BACKPRESSURE = "backpressure"
156
+ # Always On
142
157
  ALWAYS = "always"
143
158
 
144
159
 
@@ -18,75 +18,47 @@ class OutputDatabricksType(str, Enum):
18
18
  class OutputDatabricksDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Format of the output data"""
20
20
 
21
+ # JSON
21
22
  JSON = "json"
23
+ # Raw
22
24
  RAW = "raw"
25
+ # Parquet
23
26
  PARQUET = "parquet"
24
27
 
25
28
 
26
29
  class OutputDatabricksBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""How to handle events when all receivers are exerting backpressure"""
28
31
 
32
+ # Block
29
33
  BLOCK = "block"
34
+ # Drop
30
35
  DROP = "drop"
31
36
 
32
37
 
33
38
  class OutputDatabricksDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
34
39
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
35
40
 
41
+ # Block
36
42
  BLOCK = "block"
43
+ # Drop
37
44
  DROP = "drop"
38
45
 
39
46
 
40
47
  class OutputDatabricksAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
41
48
  r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
42
49
 
50
+ # Manual
43
51
  MANUAL = "manual"
52
+ # Secret Key pair
44
53
  SECRET = "secret"
45
54
 
46
55
 
47
- class OutputDatabricksCompression(str, Enum, metaclass=utils.OpenEnumMeta):
48
- r"""Data compression format to apply to HTTP content before it is delivered"""
49
-
50
- NONE = "none"
51
- GZIP = "gzip"
52
-
53
-
54
- class OutputDatabricksCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
55
- r"""Compression level to apply before moving files to final destination"""
56
-
57
- BEST_SPEED = "best_speed"
58
- NORMAL = "normal"
59
- BEST_COMPRESSION = "best_compression"
60
-
61
-
62
- class OutputDatabricksParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
63
- r"""Determines which data types are supported and how they are represented"""
64
-
65
- PARQUET_1_0 = "PARQUET_1_0"
66
- PARQUET_2_4 = "PARQUET_2_4"
67
- PARQUET_2_6 = "PARQUET_2_6"
68
-
69
-
70
- class OutputDatabricksDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
71
- r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
72
-
73
- DATA_PAGE_V1 = "DATA_PAGE_V1"
74
- DATA_PAGE_V2 = "DATA_PAGE_V2"
75
-
76
-
77
- class OutputDatabricksKeyValueMetadatumTypedDict(TypedDict):
78
- value: str
79
- key: NotRequired[str]
80
-
81
-
82
- class OutputDatabricksKeyValueMetadatum(BaseModel):
83
- value: str
84
-
85
- key: Optional[str] = ""
86
-
87
-
88
56
  class OutputDatabricksTypedDict(TypedDict):
89
57
  type: OutputDatabricksType
58
+ login_url: str
59
+ r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
60
+ client_id: str
61
+ r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
90
62
  id: NotRequired[str]
91
63
  r"""Unique ID for this output"""
92
64
  pipeline: NotRequired[str]
@@ -133,14 +105,6 @@ class OutputDatabricksTypedDict(TypedDict):
133
105
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
134
106
  unity_auth_method: NotRequired[OutputDatabricksAuthenticationMethod]
135
107
  r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
136
- login_url: NotRequired[str]
137
- r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
138
- client_id: NotRequired[str]
139
- r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
140
- client_secret: NotRequired[str]
141
- r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
142
- client_text_secret: NotRequired[str]
143
- r"""Select or create a stored secret that references your Client ID and Client Secret"""
144
108
  scope: NotRequired[str]
145
109
  r"""OAuth scope for Unity Catalog authentication"""
146
110
  token_timeout_secs: NotRequired[float]
@@ -154,41 +118,21 @@ class OutputDatabricksTypedDict(TypedDict):
154
118
  over_write_files: NotRequired[bool]
155
119
  r"""Uploaded files should be overwritten if they already exist. If disabled, upload will fail if a file already exists."""
156
120
  description: NotRequired[str]
157
- compress: NotRequired[OutputDatabricksCompression]
158
- r"""Data compression format to apply to HTTP content before it is delivered"""
159
- compression_level: NotRequired[OutputDatabricksCompressionLevel]
160
- r"""Compression level to apply before moving files to final destination"""
161
- automatic_schema: NotRequired[bool]
162
- r"""Automatically calculate the schema based on the events of each Parquet file generated"""
163
- parquet_version: NotRequired[OutputDatabricksParquetVersion]
164
- r"""Determines which data types are supported and how they are represented"""
165
- parquet_data_page_version: NotRequired[OutputDatabricksDataPageVersion]
166
- r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
167
- parquet_row_group_length: NotRequired[float]
168
- r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
169
- parquet_page_size: NotRequired[str]
170
- r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
171
- should_log_invalid_rows: NotRequired[bool]
172
- r"""Log up to 3 rows that @{product} skips due to data mismatch"""
173
- key_value_metadata: NotRequired[List[OutputDatabricksKeyValueMetadatumTypedDict]]
174
- r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
175
- enable_statistics: NotRequired[bool]
176
- r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
177
- enable_write_page_index: NotRequired[bool]
178
- r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
179
- enable_page_checksum: NotRequired[bool]
180
- r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
181
- empty_dir_cleanup_sec: NotRequired[float]
182
- r"""How frequently, in seconds, to clean up empty directories"""
183
- deadletter_path: NotRequired[str]
184
- r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
185
- max_retry_num: NotRequired[float]
186
- r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
121
+ client_secret: NotRequired[str]
122
+ r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
123
+ client_text_secret: NotRequired[str]
124
+ r"""Select or create a stored text secret"""
187
125
 
188
126
 
189
127
  class OutputDatabricks(BaseModel):
190
128
  type: OutputDatabricksType
191
129
 
130
+ login_url: Annotated[str, pydantic.Field(alias="loginUrl")]
131
+ r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
132
+
133
+ client_id: Annotated[str, pydantic.Field(alias="clientId")]
134
+ r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
135
+
192
136
  id: Optional[str] = None
193
137
  r"""Unique ID for this output"""
194
138
 
@@ -308,20 +252,6 @@ class OutputDatabricks(BaseModel):
308
252
  ] = OutputDatabricksAuthenticationMethod.MANUAL
309
253
  r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
310
254
 
311
- login_url: Annotated[Optional[str], pydantic.Field(alias="loginUrl")] = None
312
- r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
313
-
314
- client_id: Annotated[Optional[str], pydantic.Field(alias="clientId")] = None
315
- r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
316
-
317
- client_secret: Annotated[Optional[str], pydantic.Field(alias="clientSecret")] = None
318
- r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
319
-
320
- client_text_secret: Annotated[
321
- Optional[str], pydantic.Field(alias="clientTextSecret")
322
- ] = None
323
- r"""Select or create a stored secret that references your Client ID and Client Secret"""
324
-
325
255
  scope: Optional[str] = "all-apis"
326
256
  r"""OAuth scope for Unity Catalog authentication"""
327
257
 
@@ -352,88 +282,10 @@ class OutputDatabricks(BaseModel):
352
282
 
353
283
  description: Optional[str] = None
354
284
 
355
- compress: Annotated[
356
- Optional[OutputDatabricksCompression], PlainValidator(validate_open_enum(False))
357
- ] = OutputDatabricksCompression.GZIP
358
- r"""Data compression format to apply to HTTP content before it is delivered"""
359
-
360
- compression_level: Annotated[
361
- Annotated[
362
- Optional[OutputDatabricksCompressionLevel],
363
- PlainValidator(validate_open_enum(False)),
364
- ],
365
- pydantic.Field(alias="compressionLevel"),
366
- ] = OutputDatabricksCompressionLevel.BEST_SPEED
367
- r"""Compression level to apply before moving files to final destination"""
368
-
369
- automatic_schema: Annotated[
370
- Optional[bool], pydantic.Field(alias="automaticSchema")
371
- ] = False
372
- r"""Automatically calculate the schema based on the events of each Parquet file generated"""
373
-
374
- parquet_version: Annotated[
375
- Annotated[
376
- Optional[OutputDatabricksParquetVersion],
377
- PlainValidator(validate_open_enum(False)),
378
- ],
379
- pydantic.Field(alias="parquetVersion"),
380
- ] = OutputDatabricksParquetVersion.PARQUET_2_6
381
- r"""Determines which data types are supported and how they are represented"""
382
-
383
- parquet_data_page_version: Annotated[
384
- Annotated[
385
- Optional[OutputDatabricksDataPageVersion],
386
- PlainValidator(validate_open_enum(False)),
387
- ],
388
- pydantic.Field(alias="parquetDataPageVersion"),
389
- ] = OutputDatabricksDataPageVersion.DATA_PAGE_V2
390
- r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
391
-
392
- parquet_row_group_length: Annotated[
393
- Optional[float], pydantic.Field(alias="parquetRowGroupLength")
394
- ] = 10000
395
- r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
396
-
397
- parquet_page_size: Annotated[
398
- Optional[str], pydantic.Field(alias="parquetPageSize")
399
- ] = "1MB"
400
- r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
401
-
402
- should_log_invalid_rows: Annotated[
403
- Optional[bool], pydantic.Field(alias="shouldLogInvalidRows")
404
- ] = None
405
- r"""Log up to 3 rows that @{product} skips due to data mismatch"""
285
+ client_secret: Annotated[Optional[str], pydantic.Field(alias="clientSecret")] = None
286
+ r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
406
287
 
407
- key_value_metadata: Annotated[
408
- Optional[List[OutputDatabricksKeyValueMetadatum]],
409
- pydantic.Field(alias="keyValueMetadata"),
288
+ client_text_secret: Annotated[
289
+ Optional[str], pydantic.Field(alias="clientTextSecret")
410
290
  ] = None
411
- r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
412
-
413
- enable_statistics: Annotated[
414
- Optional[bool], pydantic.Field(alias="enableStatistics")
415
- ] = True
416
- r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
417
-
418
- enable_write_page_index: Annotated[
419
- Optional[bool], pydantic.Field(alias="enableWritePageIndex")
420
- ] = True
421
- r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
422
-
423
- enable_page_checksum: Annotated[
424
- Optional[bool], pydantic.Field(alias="enablePageChecksum")
425
- ] = False
426
- r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
427
-
428
- empty_dir_cleanup_sec: Annotated[
429
- Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
430
- ] = 300
431
- r"""How frequently, in seconds, to clean up empty directories"""
432
-
433
- deadletter_path: Annotated[
434
- Optional[str], pydantic.Field(alias="deadletterPath")
435
- ] = "$CRIBL_HOME/state/outputs/dead-letter"
436
- r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
437
-
438
- max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
439
- r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
291
+ r"""Select or create a stored text secret"""
@@ -18,32 +18,49 @@ class OutputDatadogType(str, Enum):
18
18
  class SendLogsAs(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""The content type to use when sending logs"""
20
20
 
21
+ # text/plain
21
22
  TEXT = "text"
23
+ # application/json
22
24
  JSON = "json"
23
25
 
24
26
 
25
27
  class OutputDatadogSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
26
28
  r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
27
29
 
30
+ # emergency
28
31
  EMERGENCY = "emergency"
32
+ # alert
29
33
  ALERT = "alert"
34
+ # critical
30
35
  CRITICAL = "critical"
36
+ # error
31
37
  ERROR = "error"
38
+ # warning
32
39
  WARNING = "warning"
40
+ # notice
33
41
  NOTICE = "notice"
42
+ # info
34
43
  INFO = "info"
44
+ # debug
35
45
  DEBUG = "debug"
36
46
 
37
47
 
38
48
  class DatadogSite(str, Enum, metaclass=utils.OpenEnumMeta):
39
49
  r"""Datadog site to which events should be sent"""
40
50
 
51
+ # US
41
52
  US = "us"
53
+ # US3
42
54
  US3 = "us3"
55
+ # US5
43
56
  US5 = "us5"
57
+ # Europe
44
58
  EU = "eu"
59
+ # US1-FED
45
60
  FED1 = "fed1"
61
+ # AP1
46
62
  AP1 = "ap1"
63
+ # Custom
47
64
  CUSTOM = "custom"
48
65
 
49
66
 
@@ -61,8 +78,11 @@ class OutputDatadogExtraHTTPHeader(BaseModel):
61
78
  class OutputDatadogFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
62
79
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
63
80
 
81
+ # Payload
64
82
  PAYLOAD = "payload"
83
+ # Payload + Headers
65
84
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
85
+ # None
66
86
  NONE = "none"
67
87
 
68
88
 
@@ -123,8 +143,11 @@ class OutputDatadogTimeoutRetrySettings(BaseModel):
123
143
  class OutputDatadogBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
124
144
  r"""How to handle events when all receivers are exerting backpressure"""
125
145
 
146
+ # Block
126
147
  BLOCK = "block"
148
+ # Drop
127
149
  DROP = "drop"
150
+ # Persistent Queue
128
151
  QUEUE = "queue"
129
152
 
130
153
 
@@ -138,22 +161,29 @@ class OutputDatadogAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
138
161
  class OutputDatadogCompression(str, Enum, metaclass=utils.OpenEnumMeta):
139
162
  r"""Codec to use to compress the persisted data"""
140
163
 
164
+ # None
141
165
  NONE = "none"
166
+ # Gzip
142
167
  GZIP = "gzip"
143
168
 
144
169
 
145
170
  class OutputDatadogQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
146
171
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
147
172
 
173
+ # Block
148
174
  BLOCK = "block"
175
+ # Drop new data
149
176
  DROP = "drop"
150
177
 
151
178
 
152
179
  class OutputDatadogMode(str, Enum, metaclass=utils.OpenEnumMeta):
153
180
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
154
181
 
182
+ # Error
155
183
  ERROR = "error"
184
+ # Backpressure
156
185
  BACKPRESSURE = "backpressure"
186
+ # Always On
157
187
  ALWAYS = "always"
158
188
 
159
189