cribl-control-plane 0.3.0b2__py3-none-any.whl → 0.3.0b4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (113) hide show
  1. cribl_control_plane/_version.py +3 -3
  2. cribl_control_plane/errors/__init__.py +8 -5
  3. cribl_control_plane/errors/{healthstatus_error.py → healthserverstatus_error.py} +10 -9
  4. cribl_control_plane/groups_sdk.py +48 -24
  5. cribl_control_plane/health.py +22 -16
  6. cribl_control_plane/models/__init__.py +152 -29
  7. cribl_control_plane/models/authtoken.py +4 -7
  8. cribl_control_plane/models/configgroup.py +8 -7
  9. cribl_control_plane/models/createconfiggroupbyproductop.py +6 -5
  10. cribl_control_plane/models/createroutesappendbyidop.py +2 -2
  11. cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
  12. cribl_control_plane/models/groupcreaterequest.py +152 -0
  13. cribl_control_plane/models/{healthstatus.py → healthserverstatus.py} +7 -7
  14. cribl_control_plane/models/input.py +15 -15
  15. cribl_control_plane/models/inputappscope.py +20 -16
  16. cribl_control_plane/models/inputconfluentcloud.py +110 -0
  17. cribl_control_plane/models/inputcriblhttp.py +20 -16
  18. cribl_control_plane/models/inputcribllakehttp.py +20 -16
  19. cribl_control_plane/models/inputcribltcp.py +20 -16
  20. cribl_control_plane/models/inputdatadogagent.py +20 -16
  21. cribl_control_plane/models/inputedgeprometheus.py +44 -36
  22. cribl_control_plane/models/inputelastic.py +44 -27
  23. cribl_control_plane/models/inputeventhub.py +118 -0
  24. cribl_control_plane/models/inputfile.py +7 -2
  25. cribl_control_plane/models/inputfirehose.py +20 -16
  26. cribl_control_plane/models/inputgrafana.py +39 -31
  27. cribl_control_plane/models/inputhttp.py +20 -16
  28. cribl_control_plane/models/inputhttpraw.py +20 -16
  29. cribl_control_plane/models/inputkafka.py +108 -0
  30. cribl_control_plane/models/inputloki.py +20 -16
  31. cribl_control_plane/models/inputmetrics.py +20 -16
  32. cribl_control_plane/models/inputmodeldriventelemetry.py +20 -16
  33. cribl_control_plane/models/inputopentelemetry.py +19 -15
  34. cribl_control_plane/models/inputprometheus.py +44 -36
  35. cribl_control_plane/models/inputprometheusrw.py +20 -16
  36. cribl_control_plane/models/inputsplunk.py +20 -16
  37. cribl_control_plane/models/inputsplunkhec.py +19 -15
  38. cribl_control_plane/models/inputsyslog.py +39 -31
  39. cribl_control_plane/models/inputsystemmetrics.py +20 -10
  40. cribl_control_plane/models/inputtcp.py +30 -16
  41. cribl_control_plane/models/inputtcpjson.py +20 -16
  42. cribl_control_plane/models/inputwindowsmetrics.py +20 -10
  43. cribl_control_plane/models/inputwineventlogs.py +14 -0
  44. cribl_control_plane/models/inputwizwebhook.py +20 -16
  45. cribl_control_plane/models/inputzscalerhec.py +19 -15
  46. cribl_control_plane/models/logininfo.py +3 -3
  47. cribl_control_plane/models/output.py +21 -21
  48. cribl_control_plane/models/outputazureblob.py +7 -0
  49. cribl_control_plane/models/outputazuredataexplorer.py +283 -93
  50. cribl_control_plane/models/outputazureeventhub.py +169 -21
  51. cribl_control_plane/models/outputazurelogs.py +49 -21
  52. cribl_control_plane/models/outputchronicle.py +49 -21
  53. cribl_control_plane/models/outputclickhouse.py +49 -21
  54. cribl_control_plane/models/outputcloudwatch.py +49 -21
  55. cribl_control_plane/models/outputconfluentcloud.py +167 -22
  56. cribl_control_plane/models/outputcriblhttp.py +49 -21
  57. cribl_control_plane/models/outputcribltcp.py +49 -21
  58. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -22
  59. cribl_control_plane/models/outputdatabricks.py +7 -0
  60. cribl_control_plane/models/outputdatadog.py +49 -21
  61. cribl_control_plane/models/outputdataset.py +49 -21
  62. cribl_control_plane/models/outputdls3.py +7 -0
  63. cribl_control_plane/models/outputdynatracehttp.py +49 -21
  64. cribl_control_plane/models/outputdynatraceotlp.py +49 -21
  65. cribl_control_plane/models/outputelastic.py +74 -21
  66. cribl_control_plane/models/outputelasticcloud.py +74 -21
  67. cribl_control_plane/models/outputfilesystem.py +7 -0
  68. cribl_control_plane/models/outputgooglechronicle.py +65 -22
  69. cribl_control_plane/models/outputgooglecloudlogging.py +50 -22
  70. cribl_control_plane/models/outputgooglecloudstorage.py +7 -0
  71. cribl_control_plane/models/outputgooglepubsub.py +49 -21
  72. cribl_control_plane/models/outputgrafanacloud.py +98 -42
  73. cribl_control_plane/models/outputgraphite.py +49 -21
  74. cribl_control_plane/models/outputhoneycomb.py +49 -21
  75. cribl_control_plane/models/outputhumiohec.py +49 -21
  76. cribl_control_plane/models/outputinfluxdb.py +49 -21
  77. cribl_control_plane/models/outputkafka.py +162 -19
  78. cribl_control_plane/models/outputkinesis.py +56 -21
  79. cribl_control_plane/models/outputloki.py +47 -19
  80. cribl_control_plane/models/outputminio.py +7 -0
  81. cribl_control_plane/models/outputmsk.py +54 -19
  82. cribl_control_plane/models/outputnewrelic.py +49 -21
  83. cribl_control_plane/models/outputnewrelicevents.py +50 -22
  84. cribl_control_plane/models/outputopentelemetry.py +49 -21
  85. cribl_control_plane/models/outputprometheus.py +49 -21
  86. cribl_control_plane/models/outputs3.py +7 -0
  87. cribl_control_plane/models/outputsentinel.py +49 -21
  88. cribl_control_plane/models/outputsentineloneaisiem.py +50 -22
  89. cribl_control_plane/models/outputservicenow.py +49 -21
  90. cribl_control_plane/models/outputsignalfx.py +49 -21
  91. cribl_control_plane/models/outputsns.py +47 -19
  92. cribl_control_plane/models/outputsplunk.py +49 -21
  93. cribl_control_plane/models/outputsplunkhec.py +124 -21
  94. cribl_control_plane/models/outputsplunklb.py +49 -21
  95. cribl_control_plane/models/outputsqs.py +47 -19
  96. cribl_control_plane/models/outputstatsd.py +49 -21
  97. cribl_control_plane/models/outputstatsdext.py +49 -21
  98. cribl_control_plane/models/outputsumologic.py +49 -21
  99. cribl_control_plane/models/outputsyslog.py +129 -99
  100. cribl_control_plane/models/outputtcpjson.py +49 -21
  101. cribl_control_plane/models/outputwavefront.py +49 -21
  102. cribl_control_plane/models/outputwebhook.py +49 -21
  103. cribl_control_plane/models/outputxsiam.py +47 -19
  104. cribl_control_plane/models/runnablejobcollection.py +12 -8
  105. cribl_control_plane/models/runnablejobexecutor.py +12 -8
  106. cribl_control_plane/models/runnablejobscheduledsearch.py +12 -8
  107. cribl_control_plane/routes_sdk.py +6 -6
  108. cribl_control_plane/tokens.py +23 -15
  109. {cribl_control_plane-0.3.0b2.dist-info → cribl_control_plane-0.3.0b4.dist-info}/METADATA +4 -4
  110. {cribl_control_plane-0.3.0b2.dist-info → cribl_control_plane-0.3.0b4.dist-info}/RECORD +111 -112
  111. cribl_control_plane/models/error.py +0 -16
  112. cribl_control_plane/models/gethealthinfoop.py +0 -17
  113. {cribl_control_plane-0.3.0b2.dist-info → cribl_control_plane-0.3.0b4.dist-info}/WHEEL +0 -0
@@ -113,13 +113,25 @@ class OutputElasticAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
113
113
 
114
114
  class OutputElasticAuthTypedDict(TypedDict):
115
115
  disabled: NotRequired[bool]
116
+ username: NotRequired[str]
117
+ password: NotRequired[str]
116
118
  auth_type: NotRequired[OutputElasticAuthenticationMethod]
117
119
  r"""Enter credentials directly, or select a stored secret"""
120
+ credentials_secret: NotRequired[str]
121
+ r"""Select or create a secret that references your credentials"""
122
+ manual_api_key: NotRequired[str]
123
+ r"""Enter API key directly"""
124
+ text_secret: NotRequired[str]
125
+ r"""Select or create a stored text secret"""
118
126
 
119
127
 
120
128
  class OutputElasticAuth(BaseModel):
121
129
  disabled: Optional[bool] = True
122
130
 
131
+ username: Optional[str] = None
132
+
133
+ password: Optional[str] = None
134
+
123
135
  auth_type: Annotated[
124
136
  Annotated[
125
137
  Optional[OutputElasticAuthenticationMethod],
@@ -129,6 +141,19 @@ class OutputElasticAuth(BaseModel):
129
141
  ] = OutputElasticAuthenticationMethod.MANUAL
130
142
  r"""Enter credentials directly, or select a stored secret"""
131
143
 
144
+ credentials_secret: Annotated[
145
+ Optional[str], pydantic.Field(alias="credentialsSecret")
146
+ ] = None
147
+ r"""Select or create a secret that references your credentials"""
148
+
149
+ manual_api_key: Annotated[Optional[str], pydantic.Field(alias="manualAPIKey")] = (
150
+ None
151
+ )
152
+ r"""Enter API key directly"""
153
+
154
+ text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
155
+ r"""Select or create a stored text secret"""
156
+
132
157
 
133
158
  class ElasticVersion(str, Enum, metaclass=utils.OpenEnumMeta):
134
159
  r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
@@ -176,6 +201,17 @@ class OutputElasticURL(BaseModel):
176
201
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
177
202
 
178
203
 
204
+ class OutputElasticMode(str, Enum, metaclass=utils.OpenEnumMeta):
205
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
206
+
207
+ # Error
208
+ ERROR = "error"
209
+ # Backpressure
210
+ ALWAYS = "always"
211
+ # Always On
212
+ BACKPRESSURE = "backpressure"
213
+
214
+
179
215
  class OutputElasticCompression(str, Enum, metaclass=utils.OpenEnumMeta):
180
216
  r"""Codec to use to compress the persisted data"""
181
217
 
@@ -194,17 +230,6 @@ class OutputElasticQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
194
230
  DROP = "drop"
195
231
 
196
232
 
197
- class OutputElasticMode(str, Enum, metaclass=utils.OpenEnumMeta):
198
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
199
-
200
- # Error
201
- ERROR = "error"
202
- # Backpressure
203
- BACKPRESSURE = "backpressure"
204
- # Always On
205
- ALWAYS = "always"
206
-
207
-
208
233
  class OutputElasticPqControlsTypedDict(TypedDict):
209
234
  pass
210
235
 
@@ -287,6 +312,16 @@ class OutputElasticTypedDict(TypedDict):
287
312
  r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
288
313
  load_balance_stats_period_sec: NotRequired[float]
289
314
  r"""How far back in time to keep traffic stats for load balancing purposes"""
315
+ pq_strict_ordering: NotRequired[bool]
316
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
317
+ pq_rate_per_sec: NotRequired[float]
318
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
319
+ pq_mode: NotRequired[OutputElasticMode]
320
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
321
+ pq_max_buffer_size: NotRequired[float]
322
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
323
+ pq_max_backpressure_sec: NotRequired[float]
324
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
290
325
  pq_max_file_size: NotRequired[str]
291
326
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
292
327
  pq_max_size: NotRequired[str]
@@ -297,8 +332,6 @@ class OutputElasticTypedDict(TypedDict):
297
332
  r"""Codec to use to compress the persisted data"""
298
333
  pq_on_backpressure: NotRequired[OutputElasticQueueFullBehavior]
299
334
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
300
- pq_mode: NotRequired[OutputElasticMode]
301
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
302
335
  pq_controls: NotRequired[OutputElasticPqControlsTypedDict]
303
336
 
304
337
 
@@ -468,6 +501,34 @@ class OutputElastic(BaseModel):
468
501
  ] = 300
469
502
  r"""How far back in time to keep traffic stats for load balancing purposes"""
470
503
 
504
+ pq_strict_ordering: Annotated[
505
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
506
+ ] = True
507
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
508
+
509
+ pq_rate_per_sec: Annotated[
510
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
511
+ ] = 0
512
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
513
+
514
+ pq_mode: Annotated[
515
+ Annotated[
516
+ Optional[OutputElasticMode], PlainValidator(validate_open_enum(False))
517
+ ],
518
+ pydantic.Field(alias="pqMode"),
519
+ ] = OutputElasticMode.ERROR
520
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
521
+
522
+ pq_max_buffer_size: Annotated[
523
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
524
+ ] = 42
525
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
526
+
527
+ pq_max_backpressure_sec: Annotated[
528
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
529
+ ] = 30
530
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
531
+
471
532
  pq_max_file_size: Annotated[
472
533
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
473
534
  ] = "1 MB"
@@ -499,14 +560,6 @@ class OutputElastic(BaseModel):
499
560
  ] = OutputElasticQueueFullBehavior.BLOCK
500
561
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
501
562
 
502
- pq_mode: Annotated[
503
- Annotated[
504
- Optional[OutputElasticMode], PlainValidator(validate_open_enum(False))
505
- ],
506
- pydantic.Field(alias="pqMode"),
507
- ] = OutputElasticMode.ERROR
508
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
509
-
510
563
  pq_controls: Annotated[
511
564
  Optional[OutputElasticPqControls], pydantic.Field(alias="pqControls")
512
565
  ] = None
@@ -61,13 +61,25 @@ class OutputElasticCloudAuthenticationMethod(str, Enum, metaclass=utils.OpenEnum
61
61
 
62
62
  class OutputElasticCloudAuthTypedDict(TypedDict):
63
63
  disabled: NotRequired[bool]
64
+ username: NotRequired[str]
65
+ password: NotRequired[str]
64
66
  auth_type: NotRequired[OutputElasticCloudAuthenticationMethod]
65
67
  r"""Enter credentials directly, or select a stored secret"""
68
+ credentials_secret: NotRequired[str]
69
+ r"""Select or create a secret that references your credentials"""
70
+ manual_api_key: NotRequired[str]
71
+ r"""Enter API key directly"""
72
+ text_secret: NotRequired[str]
73
+ r"""Select or create a stored text secret"""
66
74
 
67
75
 
68
76
  class OutputElasticCloudAuth(BaseModel):
69
77
  disabled: Optional[bool] = False
70
78
 
79
+ username: Optional[str] = None
80
+
81
+ password: Optional[str] = None
82
+
71
83
  auth_type: Annotated[
72
84
  Annotated[
73
85
  Optional[OutputElasticCloudAuthenticationMethod],
@@ -77,6 +89,19 @@ class OutputElasticCloudAuth(BaseModel):
77
89
  ] = OutputElasticCloudAuthenticationMethod.MANUAL
78
90
  r"""Enter credentials directly, or select a stored secret"""
79
91
 
92
+ credentials_secret: Annotated[
93
+ Optional[str], pydantic.Field(alias="credentialsSecret")
94
+ ] = None
95
+ r"""Select or create a secret that references your credentials"""
96
+
97
+ manual_api_key: Annotated[Optional[str], pydantic.Field(alias="manualAPIKey")] = (
98
+ None
99
+ )
100
+ r"""Enter API key directly"""
101
+
102
+ text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
103
+ r"""Select or create a stored text secret"""
104
+
80
105
 
81
106
  class OutputElasticCloudResponseRetrySettingTypedDict(TypedDict):
82
107
  http_status: float
@@ -143,6 +168,17 @@ class OutputElasticCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnum
143
168
  QUEUE = "queue"
144
169
 
145
170
 
171
+ class OutputElasticCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
172
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
173
+
174
+ # Error
175
+ ERROR = "error"
176
+ # Backpressure
177
+ ALWAYS = "always"
178
+ # Always On
179
+ BACKPRESSURE = "backpressure"
180
+
181
+
146
182
  class OutputElasticCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
147
183
  r"""Codec to use to compress the persisted data"""
148
184
 
@@ -161,17 +197,6 @@ class OutputElasticCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMet
161
197
  DROP = "drop"
162
198
 
163
199
 
164
- class OutputElasticCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
165
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
166
-
167
- # Error
168
- ERROR = "error"
169
- # Backpressure
170
- BACKPRESSURE = "backpressure"
171
- # Always On
172
- ALWAYS = "always"
173
-
174
-
175
200
  class OutputElasticCloudPqControlsTypedDict(TypedDict):
176
201
  pass
177
202
 
@@ -236,6 +261,16 @@ class OutputElasticCloudTypedDict(TypedDict):
236
261
  on_backpressure: NotRequired[OutputElasticCloudBackpressureBehavior]
237
262
  r"""How to handle events when all receivers are exerting backpressure"""
238
263
  description: NotRequired[str]
264
+ pq_strict_ordering: NotRequired[bool]
265
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
266
+ pq_rate_per_sec: NotRequired[float]
267
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
268
+ pq_mode: NotRequired[OutputElasticCloudMode]
269
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
270
+ pq_max_buffer_size: NotRequired[float]
271
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
272
+ pq_max_backpressure_sec: NotRequired[float]
273
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
239
274
  pq_max_file_size: NotRequired[str]
240
275
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
241
276
  pq_max_size: NotRequired[str]
@@ -246,8 +281,6 @@ class OutputElasticCloudTypedDict(TypedDict):
246
281
  r"""Codec to use to compress the persisted data"""
247
282
  pq_on_backpressure: NotRequired[OutputElasticCloudQueueFullBehavior]
248
283
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
249
- pq_mode: NotRequired[OutputElasticCloudMode]
250
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
251
284
  pq_controls: NotRequired[OutputElasticCloudPqControlsTypedDict]
252
285
 
253
286
 
@@ -374,6 +407,34 @@ class OutputElasticCloud(BaseModel):
374
407
 
375
408
  description: Optional[str] = None
376
409
 
410
+ pq_strict_ordering: Annotated[
411
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
412
+ ] = True
413
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
414
+
415
+ pq_rate_per_sec: Annotated[
416
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
417
+ ] = 0
418
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
419
+
420
+ pq_mode: Annotated[
421
+ Annotated[
422
+ Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
423
+ ],
424
+ pydantic.Field(alias="pqMode"),
425
+ ] = OutputElasticCloudMode.ERROR
426
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
427
+
428
+ pq_max_buffer_size: Annotated[
429
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
430
+ ] = 42
431
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
432
+
433
+ pq_max_backpressure_sec: Annotated[
434
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
435
+ ] = 30
436
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
437
+
377
438
  pq_max_file_size: Annotated[
378
439
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
379
440
  ] = "1 MB"
@@ -405,14 +466,6 @@ class OutputElasticCloud(BaseModel):
405
466
  ] = OutputElasticCloudQueueFullBehavior.BLOCK
406
467
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
407
468
 
408
- pq_mode: Annotated[
409
- Annotated[
410
- Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
411
- ],
412
- pydantic.Field(alias="pqMode"),
413
- ] = OutputElasticCloudMode.ERROR
414
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
415
-
416
469
  pq_controls: Annotated[
417
470
  Optional[OutputElasticCloudPqControls], pydantic.Field(alias="pqControls")
418
471
  ] = None
@@ -146,6 +146,8 @@ class OutputFilesystemTypedDict(TypedDict):
146
146
  r"""Compression level to apply before moving files to final destination"""
147
147
  automatic_schema: NotRequired[bool]
148
148
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
149
+ parquet_schema: NotRequired[str]
150
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
149
151
  parquet_version: NotRequired[OutputFilesystemParquetVersion]
150
152
  r"""Determines which data types are supported and how they are represented"""
151
153
  parquet_data_page_version: NotRequired[OutputFilesystemDataPageVersion]
@@ -304,6 +306,11 @@ class OutputFilesystem(BaseModel):
304
306
  ] = False
305
307
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
306
308
 
309
+ parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
310
+ None
311
+ )
312
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
313
+
307
314
  parquet_version: Annotated[
308
315
  Annotated[
309
316
  Optional[OutputFilesystemParquetVersion],
@@ -155,6 +155,24 @@ class OutputGoogleChronicleCustomLabel(BaseModel):
155
155
  value: str
156
156
 
157
157
 
158
+ class UDMType(str, Enum, metaclass=utils.OpenEnumMeta):
159
+ r"""Defines the specific format for UDM events sent to Google SecOps. This must match the type of UDM data being sent."""
160
+
161
+ ENTITIES = "entities"
162
+ LOGS = "logs"
163
+
164
+
165
+ class OutputGoogleChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
166
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
167
+
168
+ # Error
169
+ ERROR = "error"
170
+ # Backpressure
171
+ ALWAYS = "always"
172
+ # Always On
173
+ BACKPRESSURE = "backpressure"
174
+
175
+
158
176
  class OutputGoogleChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
159
177
  r"""Codec to use to compress the persisted data"""
160
178
 
@@ -173,17 +191,6 @@ class OutputGoogleChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnum
173
191
  DROP = "drop"
174
192
 
175
193
 
176
- class OutputGoogleChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
177
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
178
-
179
- # Error
180
- ERROR = "error"
181
- # Backpressure
182
- BACKPRESSURE = "backpressure"
183
- # Always On
184
- ALWAYS = "always"
185
-
186
-
187
194
  class OutputGoogleChroniclePqControlsTypedDict(TypedDict):
188
195
  pass
189
196
 
@@ -262,6 +269,8 @@ class OutputGoogleChronicleTypedDict(TypedDict):
262
269
  r"""User-configured environment namespace to identify the data domain the logs originated from. Use namespace as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
263
270
  custom_labels: NotRequired[List[OutputGoogleChronicleCustomLabelTypedDict]]
264
271
  r"""Custom labels to be added to every batch"""
272
+ udm_type: NotRequired[UDMType]
273
+ r"""Defines the specific format for UDM events sent to Google SecOps. This must match the type of UDM data being sent."""
265
274
  api_key: NotRequired[str]
266
275
  r"""Organization's API key in Google SecOps"""
267
276
  api_key_secret: NotRequired[str]
@@ -270,6 +279,16 @@ class OutputGoogleChronicleTypedDict(TypedDict):
270
279
  r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
271
280
  service_account_credentials_secret: NotRequired[str]
272
281
  r"""Select or create a stored text secret"""
282
+ pq_strict_ordering: NotRequired[bool]
283
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
284
+ pq_rate_per_sec: NotRequired[float]
285
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
286
+ pq_mode: NotRequired[OutputGoogleChronicleMode]
287
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
288
+ pq_max_buffer_size: NotRequired[float]
289
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
290
+ pq_max_backpressure_sec: NotRequired[float]
291
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
273
292
  pq_max_file_size: NotRequired[str]
274
293
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
275
294
  pq_max_size: NotRequired[str]
@@ -280,8 +299,6 @@ class OutputGoogleChronicleTypedDict(TypedDict):
280
299
  r"""Codec to use to compress the persisted data"""
281
300
  pq_on_backpressure: NotRequired[OutputGoogleChronicleQueueFullBehavior]
282
301
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
283
- pq_mode: NotRequired[OutputGoogleChronicleMode]
284
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
285
302
  pq_controls: NotRequired[OutputGoogleChroniclePqControlsTypedDict]
286
303
 
287
304
 
@@ -443,6 +460,12 @@ class OutputGoogleChronicle(BaseModel):
443
460
  ] = None
444
461
  r"""Custom labels to be added to every batch"""
445
462
 
463
+ udm_type: Annotated[
464
+ Annotated[Optional[UDMType], PlainValidator(validate_open_enum(False))],
465
+ pydantic.Field(alias="udmType"),
466
+ ] = UDMType.LOGS
467
+ r"""Defines the specific format for UDM events sent to Google SecOps. This must match the type of UDM data being sent."""
468
+
446
469
  api_key: Annotated[Optional[str], pydantic.Field(alias="apiKey")] = None
447
470
  r"""Organization's API key in Google SecOps"""
448
471
 
@@ -461,6 +484,35 @@ class OutputGoogleChronicle(BaseModel):
461
484
  ] = None
462
485
  r"""Select or create a stored text secret"""
463
486
 
487
+ pq_strict_ordering: Annotated[
488
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
489
+ ] = True
490
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
491
+
492
+ pq_rate_per_sec: Annotated[
493
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
494
+ ] = 0
495
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
496
+
497
+ pq_mode: Annotated[
498
+ Annotated[
499
+ Optional[OutputGoogleChronicleMode],
500
+ PlainValidator(validate_open_enum(False)),
501
+ ],
502
+ pydantic.Field(alias="pqMode"),
503
+ ] = OutputGoogleChronicleMode.ERROR
504
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
505
+
506
+ pq_max_buffer_size: Annotated[
507
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
508
+ ] = 42
509
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
510
+
511
+ pq_max_backpressure_sec: Annotated[
512
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
513
+ ] = 30
514
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
515
+
464
516
  pq_max_file_size: Annotated[
465
517
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
466
518
  ] = "1 MB"
@@ -492,15 +544,6 @@ class OutputGoogleChronicle(BaseModel):
492
544
  ] = OutputGoogleChronicleQueueFullBehavior.BLOCK
493
545
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
494
546
 
495
- pq_mode: Annotated[
496
- Annotated[
497
- Optional[OutputGoogleChronicleMode],
498
- PlainValidator(validate_open_enum(False)),
499
- ],
500
- pydantic.Field(alias="pqMode"),
501
- ] = OutputGoogleChronicleMode.ERROR
502
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
503
-
504
547
  pq_controls: Annotated[
505
548
  Optional[OutputGoogleChroniclePqControls], pydantic.Field(alias="pqControls")
506
549
  ] = None
@@ -91,6 +91,17 @@ class OutputGoogleCloudLoggingBackpressureBehavior(
91
91
  QUEUE = "queue"
92
92
 
93
93
 
94
+ class OutputGoogleCloudLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
95
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
96
+
97
+ # Error
98
+ ERROR = "error"
99
+ # Backpressure
100
+ ALWAYS = "always"
101
+ # Always On
102
+ BACKPRESSURE = "backpressure"
103
+
104
+
94
105
  class OutputGoogleCloudLoggingCompression(str, Enum, metaclass=utils.OpenEnumMeta):
95
106
  r"""Codec to use to compress the persisted data"""
96
107
 
@@ -111,17 +122,6 @@ class OutputGoogleCloudLoggingQueueFullBehavior(
111
122
  DROP = "drop"
112
123
 
113
124
 
114
- class OutputGoogleCloudLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
115
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
116
-
117
- # Error
118
- ERROR = "error"
119
- # Backpressure
120
- BACKPRESSURE = "backpressure"
121
- # Always On
122
- ALWAYS = "always"
123
-
124
-
125
125
  class OutputGoogleCloudLoggingPqControlsTypedDict(TypedDict):
126
126
  pass
127
127
 
@@ -243,6 +243,16 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
243
243
  description: NotRequired[str]
244
244
  payload_expression: NotRequired[str]
245
245
  r"""JavaScript expression to compute the value of the payload. Must evaluate to a JavaScript object value. If an invalid value is encountered it will result in the default value instead. Defaults to the entire event."""
246
+ pq_strict_ordering: NotRequired[bool]
247
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
248
+ pq_rate_per_sec: NotRequired[float]
249
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
250
+ pq_mode: NotRequired[OutputGoogleCloudLoggingMode]
251
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
252
+ pq_max_buffer_size: NotRequired[float]
253
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
254
+ pq_max_backpressure_sec: NotRequired[float]
255
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
246
256
  pq_max_file_size: NotRequired[str]
247
257
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
248
258
  pq_max_size: NotRequired[str]
@@ -253,8 +263,6 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
253
263
  r"""Codec to use to compress the persisted data"""
254
264
  pq_on_backpressure: NotRequired[OutputGoogleCloudLoggingQueueFullBehavior]
255
265
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
256
- pq_mode: NotRequired[OutputGoogleCloudLoggingMode]
257
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
258
266
  pq_controls: NotRequired[OutputGoogleCloudLoggingPqControlsTypedDict]
259
267
 
260
268
 
@@ -533,6 +541,35 @@ class OutputGoogleCloudLogging(BaseModel):
533
541
  ] = None
534
542
  r"""JavaScript expression to compute the value of the payload. Must evaluate to a JavaScript object value. If an invalid value is encountered it will result in the default value instead. Defaults to the entire event."""
535
543
 
544
+ pq_strict_ordering: Annotated[
545
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
546
+ ] = True
547
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
548
+
549
+ pq_rate_per_sec: Annotated[
550
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
551
+ ] = 0
552
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
553
+
554
+ pq_mode: Annotated[
555
+ Annotated[
556
+ Optional[OutputGoogleCloudLoggingMode],
557
+ PlainValidator(validate_open_enum(False)),
558
+ ],
559
+ pydantic.Field(alias="pqMode"),
560
+ ] = OutputGoogleCloudLoggingMode.ERROR
561
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
562
+
563
+ pq_max_buffer_size: Annotated[
564
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
565
+ ] = 42
566
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
567
+
568
+ pq_max_backpressure_sec: Annotated[
569
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
570
+ ] = 30
571
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
572
+
536
573
  pq_max_file_size: Annotated[
537
574
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
538
575
  ] = "1 MB"
@@ -564,15 +601,6 @@ class OutputGoogleCloudLogging(BaseModel):
564
601
  ] = OutputGoogleCloudLoggingQueueFullBehavior.BLOCK
565
602
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
566
603
 
567
- pq_mode: Annotated[
568
- Annotated[
569
- Optional[OutputGoogleCloudLoggingMode],
570
- PlainValidator(validate_open_enum(False)),
571
- ],
572
- pydantic.Field(alias="pqMode"),
573
- ] = OutputGoogleCloudLoggingMode.ERROR
574
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
575
-
576
604
  pq_controls: Annotated[
577
605
  Optional[OutputGoogleCloudLoggingPqControls], pydantic.Field(alias="pqControls")
578
606
  ] = None
@@ -217,6 +217,8 @@ class OutputGoogleCloudStorageTypedDict(TypedDict):
217
217
  r"""Compression level to apply before moving files to final destination"""
218
218
  automatic_schema: NotRequired[bool]
219
219
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
220
+ parquet_schema: NotRequired[str]
221
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
220
222
  parquet_version: NotRequired[OutputGoogleCloudStorageParquetVersion]
221
223
  r"""Determines which data types are supported and how they are represented"""
222
224
  parquet_data_page_version: NotRequired[OutputGoogleCloudStorageDataPageVersion]
@@ -445,6 +447,11 @@ class OutputGoogleCloudStorage(BaseModel):
445
447
  ] = False
446
448
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
447
449
 
450
+ parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
451
+ None
452
+ )
453
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
454
+
448
455
  parquet_version: Annotated[
449
456
  Annotated[
450
457
  Optional[OutputGoogleCloudStorageParquetVersion],