cribl-control-plane 0.2.1rc4__py3-none-any.whl → 0.2.1rc5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (110) hide show
  1. cribl_control_plane/_version.py +3 -3
  2. cribl_control_plane/groups_sdk.py +3 -0
  3. cribl_control_plane/mappings.py +1185 -0
  4. cribl_control_plane/models/__init__.py +149 -105
  5. cribl_control_plane/models/createadminproductsmappingsactivatebyproductop.py +52 -0
  6. cribl_control_plane/models/createadminproductsmappingsbyproductop.py +53 -0
  7. cribl_control_plane/models/deleteadminproductsmappingsbyproductandidop.py +51 -0
  8. cribl_control_plane/models/getadminproductsmappingsbyproductandidop.py +51 -0
  9. cribl_control_plane/models/getadminproductsmappingsbyproductop.py +44 -0
  10. cribl_control_plane/models/input.py +14 -14
  11. cribl_control_plane/models/inputappscope.py +16 -20
  12. cribl_control_plane/models/inputconfluentcloud.py +0 -110
  13. cribl_control_plane/models/inputcriblhttp.py +16 -20
  14. cribl_control_plane/models/inputcribllakehttp.py +16 -20
  15. cribl_control_plane/models/inputcribltcp.py +16 -20
  16. cribl_control_plane/models/inputdatadogagent.py +16 -20
  17. cribl_control_plane/models/inputedgeprometheus.py +36 -44
  18. cribl_control_plane/models/inputelastic.py +27 -44
  19. cribl_control_plane/models/inputeventhub.py +0 -118
  20. cribl_control_plane/models/inputfirehose.py +16 -20
  21. cribl_control_plane/models/inputgrafana.py +31 -39
  22. cribl_control_plane/models/inputhttp.py +16 -20
  23. cribl_control_plane/models/inputhttpraw.py +16 -20
  24. cribl_control_plane/models/inputkafka.py +0 -108
  25. cribl_control_plane/models/inputloki.py +16 -20
  26. cribl_control_plane/models/inputmetrics.py +16 -20
  27. cribl_control_plane/models/inputmodeldriventelemetry.py +16 -20
  28. cribl_control_plane/models/inputopentelemetry.py +15 -19
  29. cribl_control_plane/models/inputprometheus.py +36 -44
  30. cribl_control_plane/models/inputprometheusrw.py +16 -20
  31. cribl_control_plane/models/inputsplunk.py +16 -20
  32. cribl_control_plane/models/inputsplunkhec.py +15 -19
  33. cribl_control_plane/models/inputsyslog.py +31 -39
  34. cribl_control_plane/models/inputsystemmetrics.py +10 -20
  35. cribl_control_plane/models/inputtcp.py +16 -30
  36. cribl_control_plane/models/inputtcpjson.py +16 -20
  37. cribl_control_plane/models/inputwindowsmetrics.py +10 -20
  38. cribl_control_plane/models/inputwineventlogs.py +0 -14
  39. cribl_control_plane/models/inputwizwebhook.py +16 -20
  40. cribl_control_plane/models/inputzscalerhec.py +15 -19
  41. cribl_control_plane/models/mappingruleset.py +53 -0
  42. cribl_control_plane/models/mappingrulesetevalmappingfunction.py +71 -0
  43. cribl_control_plane/models/mappingrulesetgenericmappingfunction.py +29 -0
  44. cribl_control_plane/models/output.py +22 -22
  45. cribl_control_plane/models/outputazureblob.py +0 -7
  46. cribl_control_plane/models/outputazuredataexplorer.py +93 -283
  47. cribl_control_plane/models/outputazureeventhub.py +21 -169
  48. cribl_control_plane/models/outputazurelogs.py +21 -49
  49. cribl_control_plane/models/outputchronicle.py +21 -49
  50. cribl_control_plane/models/outputclickhouse.py +21 -49
  51. cribl_control_plane/models/outputcloudwatch.py +21 -49
  52. cribl_control_plane/models/outputconfluentcloud.py +22 -167
  53. cribl_control_plane/models/outputcriblhttp.py +21 -49
  54. cribl_control_plane/models/outputcribltcp.py +21 -49
  55. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +22 -50
  56. cribl_control_plane/models/outputdatabricks.py +0 -7
  57. cribl_control_plane/models/outputdatadog.py +21 -49
  58. cribl_control_plane/models/outputdataset.py +21 -49
  59. cribl_control_plane/models/outputdls3.py +0 -7
  60. cribl_control_plane/models/outputdynatracehttp.py +21 -49
  61. cribl_control_plane/models/outputdynatraceotlp.py +21 -49
  62. cribl_control_plane/models/outputelastic.py +21 -74
  63. cribl_control_plane/models/outputelasticcloud.py +21 -74
  64. cribl_control_plane/models/outputfilesystem.py +0 -7
  65. cribl_control_plane/models/outputgooglechronicle.py +22 -65
  66. cribl_control_plane/models/outputgooglecloudlogging.py +22 -50
  67. cribl_control_plane/models/outputgooglecloudstorage.py +0 -7
  68. cribl_control_plane/models/outputgooglepubsub.py +21 -49
  69. cribl_control_plane/models/outputgrafanacloud.py +42 -98
  70. cribl_control_plane/models/outputgraphite.py +21 -49
  71. cribl_control_plane/models/outputhoneycomb.py +21 -49
  72. cribl_control_plane/models/outputhumiohec.py +21 -49
  73. cribl_control_plane/models/outputinfluxdb.py +21 -49
  74. cribl_control_plane/models/outputkafka.py +19 -162
  75. cribl_control_plane/models/outputkinesis.py +21 -56
  76. cribl_control_plane/models/outputloki.py +19 -47
  77. cribl_control_plane/models/outputminio.py +0 -7
  78. cribl_control_plane/models/outputmsk.py +19 -54
  79. cribl_control_plane/models/outputnewrelic.py +21 -49
  80. cribl_control_plane/models/outputnewrelicevents.py +22 -50
  81. cribl_control_plane/models/outputopentelemetry.py +21 -49
  82. cribl_control_plane/models/outputprometheus.py +21 -49
  83. cribl_control_plane/models/outputs3.py +0 -7
  84. cribl_control_plane/models/outputsentinel.py +21 -49
  85. cribl_control_plane/models/outputsentineloneaisiem.py +22 -50
  86. cribl_control_plane/models/outputservicenow.py +21 -49
  87. cribl_control_plane/models/outputsignalfx.py +21 -49
  88. cribl_control_plane/models/outputsns.py +19 -47
  89. cribl_control_plane/models/outputsplunk.py +21 -49
  90. cribl_control_plane/models/outputsplunkhec.py +21 -49
  91. cribl_control_plane/models/outputsplunklb.py +21 -49
  92. cribl_control_plane/models/outputsqs.py +19 -47
  93. cribl_control_plane/models/outputstatsd.py +21 -49
  94. cribl_control_plane/models/outputstatsdext.py +21 -49
  95. cribl_control_plane/models/outputsumologic.py +21 -49
  96. cribl_control_plane/models/outputsyslog.py +99 -129
  97. cribl_control_plane/models/outputtcpjson.py +21 -49
  98. cribl_control_plane/models/outputwavefront.py +21 -49
  99. cribl_control_plane/models/outputwebhook.py +21 -49
  100. cribl_control_plane/models/outputxsiam.py +19 -47
  101. cribl_control_plane/models/pipeline.py +4 -4
  102. cribl_control_plane/models/rulesetid.py +13 -0
  103. cribl_control_plane/models/runnablejobcollection.py +8 -12
  104. cribl_control_plane/models/runnablejobexecutor.py +8 -12
  105. cribl_control_plane/models/runnablejobscheduledsearch.py +8 -12
  106. cribl_control_plane/models/updateadminproductsmappingsbyproductandidop.py +63 -0
  107. cribl_control_plane/pipelines.py +8 -8
  108. {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/METADATA +11 -2
  109. {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/RECORD +110 -99
  110. {cribl_control_plane-0.2.1rc4.dist-info → cribl_control_plane-0.2.1rc5.dist-info}/WHEEL +0 -0
@@ -113,25 +113,13 @@ class OutputElasticAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
113
113
 
114
114
  class OutputElasticAuthTypedDict(TypedDict):
115
115
  disabled: NotRequired[bool]
116
- username: NotRequired[str]
117
- password: NotRequired[str]
118
116
  auth_type: NotRequired[OutputElasticAuthenticationMethod]
119
117
  r"""Enter credentials directly, or select a stored secret"""
120
- credentials_secret: NotRequired[str]
121
- r"""Select or create a secret that references your credentials"""
122
- manual_api_key: NotRequired[str]
123
- r"""Enter API key directly"""
124
- text_secret: NotRequired[str]
125
- r"""Select or create a stored text secret"""
126
118
 
127
119
 
128
120
  class OutputElasticAuth(BaseModel):
129
121
  disabled: Optional[bool] = True
130
122
 
131
- username: Optional[str] = None
132
-
133
- password: Optional[str] = None
134
-
135
123
  auth_type: Annotated[
136
124
  Annotated[
137
125
  Optional[OutputElasticAuthenticationMethod],
@@ -141,19 +129,6 @@ class OutputElasticAuth(BaseModel):
141
129
  ] = OutputElasticAuthenticationMethod.MANUAL
142
130
  r"""Enter credentials directly, or select a stored secret"""
143
131
 
144
- credentials_secret: Annotated[
145
- Optional[str], pydantic.Field(alias="credentialsSecret")
146
- ] = None
147
- r"""Select or create a secret that references your credentials"""
148
-
149
- manual_api_key: Annotated[Optional[str], pydantic.Field(alias="manualAPIKey")] = (
150
- None
151
- )
152
- r"""Enter API key directly"""
153
-
154
- text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
155
- r"""Select or create a stored text secret"""
156
-
157
132
 
158
133
  class ElasticVersion(str, Enum, metaclass=utils.OpenEnumMeta):
159
134
  r"""Optional Elasticsearch version, used to format events. If not specified, will auto-discover version."""
@@ -201,17 +176,6 @@ class OutputElasticURL(BaseModel):
201
176
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
202
177
 
203
178
 
204
- class OutputElasticMode(str, Enum, metaclass=utils.OpenEnumMeta):
205
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
206
-
207
- # Error
208
- ERROR = "error"
209
- # Backpressure
210
- ALWAYS = "always"
211
- # Always On
212
- BACKPRESSURE = "backpressure"
213
-
214
-
215
179
  class OutputElasticCompression(str, Enum, metaclass=utils.OpenEnumMeta):
216
180
  r"""Codec to use to compress the persisted data"""
217
181
 
@@ -230,6 +194,17 @@ class OutputElasticQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
230
194
  DROP = "drop"
231
195
 
232
196
 
197
+ class OutputElasticMode(str, Enum, metaclass=utils.OpenEnumMeta):
198
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
199
+
200
+ # Error
201
+ ERROR = "error"
202
+ # Backpressure
203
+ BACKPRESSURE = "backpressure"
204
+ # Always On
205
+ ALWAYS = "always"
206
+
207
+
233
208
  class OutputElasticPqControlsTypedDict(TypedDict):
234
209
  pass
235
210
 
@@ -312,16 +287,6 @@ class OutputElasticTypedDict(TypedDict):
312
287
  r"""The interval in which to re-resolve any hostnames and pick up destinations from A records"""
313
288
  load_balance_stats_period_sec: NotRequired[float]
314
289
  r"""How far back in time to keep traffic stats for load balancing purposes"""
315
- pq_strict_ordering: NotRequired[bool]
316
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
317
- pq_rate_per_sec: NotRequired[float]
318
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
319
- pq_mode: NotRequired[OutputElasticMode]
320
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
321
- pq_max_buffer_size: NotRequired[float]
322
- r"""The maximum number of events to hold in memory before writing the events to disk"""
323
- pq_max_backpressure_sec: NotRequired[float]
324
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
325
290
  pq_max_file_size: NotRequired[str]
326
291
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
327
292
  pq_max_size: NotRequired[str]
@@ -332,6 +297,8 @@ class OutputElasticTypedDict(TypedDict):
332
297
  r"""Codec to use to compress the persisted data"""
333
298
  pq_on_backpressure: NotRequired[OutputElasticQueueFullBehavior]
334
299
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
300
+ pq_mode: NotRequired[OutputElasticMode]
301
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
335
302
  pq_controls: NotRequired[OutputElasticPqControlsTypedDict]
336
303
 
337
304
 
@@ -501,34 +468,6 @@ class OutputElastic(BaseModel):
501
468
  ] = 300
502
469
  r"""How far back in time to keep traffic stats for load balancing purposes"""
503
470
 
504
- pq_strict_ordering: Annotated[
505
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
506
- ] = True
507
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
508
-
509
- pq_rate_per_sec: Annotated[
510
- Optional[float], pydantic.Field(alias="pqRatePerSec")
511
- ] = 0
512
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
513
-
514
- pq_mode: Annotated[
515
- Annotated[
516
- Optional[OutputElasticMode], PlainValidator(validate_open_enum(False))
517
- ],
518
- pydantic.Field(alias="pqMode"),
519
- ] = OutputElasticMode.ERROR
520
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
521
-
522
- pq_max_buffer_size: Annotated[
523
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
524
- ] = 42
525
- r"""The maximum number of events to hold in memory before writing the events to disk"""
526
-
527
- pq_max_backpressure_sec: Annotated[
528
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
529
- ] = 30
530
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
531
-
532
471
  pq_max_file_size: Annotated[
533
472
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
534
473
  ] = "1 MB"
@@ -560,6 +499,14 @@ class OutputElastic(BaseModel):
560
499
  ] = OutputElasticQueueFullBehavior.BLOCK
561
500
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
562
501
 
502
+ pq_mode: Annotated[
503
+ Annotated[
504
+ Optional[OutputElasticMode], PlainValidator(validate_open_enum(False))
505
+ ],
506
+ pydantic.Field(alias="pqMode"),
507
+ ] = OutputElasticMode.ERROR
508
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
509
+
563
510
  pq_controls: Annotated[
564
511
  Optional[OutputElasticPqControls], pydantic.Field(alias="pqControls")
565
512
  ] = None
@@ -61,25 +61,13 @@ class OutputElasticCloudAuthenticationMethod(str, Enum, metaclass=utils.OpenEnum
61
61
 
62
62
  class OutputElasticCloudAuthTypedDict(TypedDict):
63
63
  disabled: NotRequired[bool]
64
- username: NotRequired[str]
65
- password: NotRequired[str]
66
64
  auth_type: NotRequired[OutputElasticCloudAuthenticationMethod]
67
65
  r"""Enter credentials directly, or select a stored secret"""
68
- credentials_secret: NotRequired[str]
69
- r"""Select or create a secret that references your credentials"""
70
- manual_api_key: NotRequired[str]
71
- r"""Enter API key directly"""
72
- text_secret: NotRequired[str]
73
- r"""Select or create a stored text secret"""
74
66
 
75
67
 
76
68
  class OutputElasticCloudAuth(BaseModel):
77
69
  disabled: Optional[bool] = False
78
70
 
79
- username: Optional[str] = None
80
-
81
- password: Optional[str] = None
82
-
83
71
  auth_type: Annotated[
84
72
  Annotated[
85
73
  Optional[OutputElasticCloudAuthenticationMethod],
@@ -89,19 +77,6 @@ class OutputElasticCloudAuth(BaseModel):
89
77
  ] = OutputElasticCloudAuthenticationMethod.MANUAL
90
78
  r"""Enter credentials directly, or select a stored secret"""
91
79
 
92
- credentials_secret: Annotated[
93
- Optional[str], pydantic.Field(alias="credentialsSecret")
94
- ] = None
95
- r"""Select or create a secret that references your credentials"""
96
-
97
- manual_api_key: Annotated[Optional[str], pydantic.Field(alias="manualAPIKey")] = (
98
- None
99
- )
100
- r"""Enter API key directly"""
101
-
102
- text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
103
- r"""Select or create a stored text secret"""
104
-
105
80
 
106
81
  class OutputElasticCloudResponseRetrySettingTypedDict(TypedDict):
107
82
  http_status: float
@@ -168,17 +143,6 @@ class OutputElasticCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnum
168
143
  QUEUE = "queue"
169
144
 
170
145
 
171
- class OutputElasticCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
172
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
173
-
174
- # Error
175
- ERROR = "error"
176
- # Backpressure
177
- ALWAYS = "always"
178
- # Always On
179
- BACKPRESSURE = "backpressure"
180
-
181
-
182
146
  class OutputElasticCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
183
147
  r"""Codec to use to compress the persisted data"""
184
148
 
@@ -197,6 +161,17 @@ class OutputElasticCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMet
197
161
  DROP = "drop"
198
162
 
199
163
 
164
+ class OutputElasticCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
165
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
166
+
167
+ # Error
168
+ ERROR = "error"
169
+ # Backpressure
170
+ BACKPRESSURE = "backpressure"
171
+ # Always On
172
+ ALWAYS = "always"
173
+
174
+
200
175
  class OutputElasticCloudPqControlsTypedDict(TypedDict):
201
176
  pass
202
177
 
@@ -261,16 +236,6 @@ class OutputElasticCloudTypedDict(TypedDict):
261
236
  on_backpressure: NotRequired[OutputElasticCloudBackpressureBehavior]
262
237
  r"""How to handle events when all receivers are exerting backpressure"""
263
238
  description: NotRequired[str]
264
- pq_strict_ordering: NotRequired[bool]
265
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
266
- pq_rate_per_sec: NotRequired[float]
267
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
268
- pq_mode: NotRequired[OutputElasticCloudMode]
269
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
270
- pq_max_buffer_size: NotRequired[float]
271
- r"""The maximum number of events to hold in memory before writing the events to disk"""
272
- pq_max_backpressure_sec: NotRequired[float]
273
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
274
239
  pq_max_file_size: NotRequired[str]
275
240
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
276
241
  pq_max_size: NotRequired[str]
@@ -281,6 +246,8 @@ class OutputElasticCloudTypedDict(TypedDict):
281
246
  r"""Codec to use to compress the persisted data"""
282
247
  pq_on_backpressure: NotRequired[OutputElasticCloudQueueFullBehavior]
283
248
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
249
+ pq_mode: NotRequired[OutputElasticCloudMode]
250
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
284
251
  pq_controls: NotRequired[OutputElasticCloudPqControlsTypedDict]
285
252
 
286
253
 
@@ -407,34 +374,6 @@ class OutputElasticCloud(BaseModel):
407
374
 
408
375
  description: Optional[str] = None
409
376
 
410
- pq_strict_ordering: Annotated[
411
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
412
- ] = True
413
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
414
-
415
- pq_rate_per_sec: Annotated[
416
- Optional[float], pydantic.Field(alias="pqRatePerSec")
417
- ] = 0
418
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
419
-
420
- pq_mode: Annotated[
421
- Annotated[
422
- Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
423
- ],
424
- pydantic.Field(alias="pqMode"),
425
- ] = OutputElasticCloudMode.ERROR
426
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
427
-
428
- pq_max_buffer_size: Annotated[
429
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
430
- ] = 42
431
- r"""The maximum number of events to hold in memory before writing the events to disk"""
432
-
433
- pq_max_backpressure_sec: Annotated[
434
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
435
- ] = 30
436
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
437
-
438
377
  pq_max_file_size: Annotated[
439
378
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
440
379
  ] = "1 MB"
@@ -466,6 +405,14 @@ class OutputElasticCloud(BaseModel):
466
405
  ] = OutputElasticCloudQueueFullBehavior.BLOCK
467
406
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
468
407
 
408
+ pq_mode: Annotated[
409
+ Annotated[
410
+ Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
411
+ ],
412
+ pydantic.Field(alias="pqMode"),
413
+ ] = OutputElasticCloudMode.ERROR
414
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
415
+
469
416
  pq_controls: Annotated[
470
417
  Optional[OutputElasticCloudPqControls], pydantic.Field(alias="pqControls")
471
418
  ] = None
@@ -146,8 +146,6 @@ class OutputFilesystemTypedDict(TypedDict):
146
146
  r"""Compression level to apply before moving files to final destination"""
147
147
  automatic_schema: NotRequired[bool]
148
148
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
149
- parquet_schema: NotRequired[str]
150
- r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
151
149
  parquet_version: NotRequired[OutputFilesystemParquetVersion]
152
150
  r"""Determines which data types are supported and how they are represented"""
153
151
  parquet_data_page_version: NotRequired[OutputFilesystemDataPageVersion]
@@ -306,11 +304,6 @@ class OutputFilesystem(BaseModel):
306
304
  ] = False
307
305
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
308
306
 
309
- parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
310
- None
311
- )
312
- r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
313
-
314
307
  parquet_version: Annotated[
315
308
  Annotated[
316
309
  Optional[OutputFilesystemParquetVersion],
@@ -155,24 +155,6 @@ class OutputGoogleChronicleCustomLabel(BaseModel):
155
155
  value: str
156
156
 
157
157
 
158
- class UDMType(str, Enum, metaclass=utils.OpenEnumMeta):
159
- r"""Defines the specific format for UDM events sent to Google SecOps. This must match the type of UDM data being sent."""
160
-
161
- ENTITIES = "entities"
162
- LOGS = "logs"
163
-
164
-
165
- class OutputGoogleChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
166
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
167
-
168
- # Error
169
- ERROR = "error"
170
- # Backpressure
171
- ALWAYS = "always"
172
- # Always On
173
- BACKPRESSURE = "backpressure"
174
-
175
-
176
158
  class OutputGoogleChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
177
159
  r"""Codec to use to compress the persisted data"""
178
160
 
@@ -191,6 +173,17 @@ class OutputGoogleChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnum
191
173
  DROP = "drop"
192
174
 
193
175
 
176
+ class OutputGoogleChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
177
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
178
+
179
+ # Error
180
+ ERROR = "error"
181
+ # Backpressure
182
+ BACKPRESSURE = "backpressure"
183
+ # Always On
184
+ ALWAYS = "always"
185
+
186
+
194
187
  class OutputGoogleChroniclePqControlsTypedDict(TypedDict):
195
188
  pass
196
189
 
@@ -269,8 +262,6 @@ class OutputGoogleChronicleTypedDict(TypedDict):
269
262
  r"""User-configured environment namespace to identify the data domain the logs originated from. Use namespace as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
270
263
  custom_labels: NotRequired[List[OutputGoogleChronicleCustomLabelTypedDict]]
271
264
  r"""Custom labels to be added to every batch"""
272
- udm_type: NotRequired[UDMType]
273
- r"""Defines the specific format for UDM events sent to Google SecOps. This must match the type of UDM data being sent."""
274
265
  api_key: NotRequired[str]
275
266
  r"""Organization's API key in Google SecOps"""
276
267
  api_key_secret: NotRequired[str]
@@ -279,16 +270,6 @@ class OutputGoogleChronicleTypedDict(TypedDict):
279
270
  r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
280
271
  service_account_credentials_secret: NotRequired[str]
281
272
  r"""Select or create a stored text secret"""
282
- pq_strict_ordering: NotRequired[bool]
283
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
284
- pq_rate_per_sec: NotRequired[float]
285
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
286
- pq_mode: NotRequired[OutputGoogleChronicleMode]
287
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
288
- pq_max_buffer_size: NotRequired[float]
289
- r"""The maximum number of events to hold in memory before writing the events to disk"""
290
- pq_max_backpressure_sec: NotRequired[float]
291
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
292
273
  pq_max_file_size: NotRequired[str]
293
274
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
294
275
  pq_max_size: NotRequired[str]
@@ -299,6 +280,8 @@ class OutputGoogleChronicleTypedDict(TypedDict):
299
280
  r"""Codec to use to compress the persisted data"""
300
281
  pq_on_backpressure: NotRequired[OutputGoogleChronicleQueueFullBehavior]
301
282
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
283
+ pq_mode: NotRequired[OutputGoogleChronicleMode]
284
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
302
285
  pq_controls: NotRequired[OutputGoogleChroniclePqControlsTypedDict]
303
286
 
304
287
 
@@ -460,12 +443,6 @@ class OutputGoogleChronicle(BaseModel):
460
443
  ] = None
461
444
  r"""Custom labels to be added to every batch"""
462
445
 
463
- udm_type: Annotated[
464
- Annotated[Optional[UDMType], PlainValidator(validate_open_enum(False))],
465
- pydantic.Field(alias="udmType"),
466
- ] = UDMType.LOGS
467
- r"""Defines the specific format for UDM events sent to Google SecOps. This must match the type of UDM data being sent."""
468
-
469
446
  api_key: Annotated[Optional[str], pydantic.Field(alias="apiKey")] = None
470
447
  r"""Organization's API key in Google SecOps"""
471
448
 
@@ -484,35 +461,6 @@ class OutputGoogleChronicle(BaseModel):
484
461
  ] = None
485
462
  r"""Select or create a stored text secret"""
486
463
 
487
- pq_strict_ordering: Annotated[
488
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
489
- ] = True
490
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
491
-
492
- pq_rate_per_sec: Annotated[
493
- Optional[float], pydantic.Field(alias="pqRatePerSec")
494
- ] = 0
495
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
496
-
497
- pq_mode: Annotated[
498
- Annotated[
499
- Optional[OutputGoogleChronicleMode],
500
- PlainValidator(validate_open_enum(False)),
501
- ],
502
- pydantic.Field(alias="pqMode"),
503
- ] = OutputGoogleChronicleMode.ERROR
504
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
505
-
506
- pq_max_buffer_size: Annotated[
507
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
508
- ] = 42
509
- r"""The maximum number of events to hold in memory before writing the events to disk"""
510
-
511
- pq_max_backpressure_sec: Annotated[
512
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
513
- ] = 30
514
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
515
-
516
464
  pq_max_file_size: Annotated[
517
465
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
518
466
  ] = "1 MB"
@@ -544,6 +492,15 @@ class OutputGoogleChronicle(BaseModel):
544
492
  ] = OutputGoogleChronicleQueueFullBehavior.BLOCK
545
493
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
546
494
 
495
+ pq_mode: Annotated[
496
+ Annotated[
497
+ Optional[OutputGoogleChronicleMode],
498
+ PlainValidator(validate_open_enum(False)),
499
+ ],
500
+ pydantic.Field(alias="pqMode"),
501
+ ] = OutputGoogleChronicleMode.ERROR
502
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
503
+
547
504
  pq_controls: Annotated[
548
505
  Optional[OutputGoogleChroniclePqControls], pydantic.Field(alias="pqControls")
549
506
  ] = None
@@ -91,17 +91,6 @@ class OutputGoogleCloudLoggingBackpressureBehavior(
91
91
  QUEUE = "queue"
92
92
 
93
93
 
94
- class OutputGoogleCloudLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
95
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
96
-
97
- # Error
98
- ERROR = "error"
99
- # Backpressure
100
- ALWAYS = "always"
101
- # Always On
102
- BACKPRESSURE = "backpressure"
103
-
104
-
105
94
  class OutputGoogleCloudLoggingCompression(str, Enum, metaclass=utils.OpenEnumMeta):
106
95
  r"""Codec to use to compress the persisted data"""
107
96
 
@@ -122,6 +111,17 @@ class OutputGoogleCloudLoggingQueueFullBehavior(
122
111
  DROP = "drop"
123
112
 
124
113
 
114
+ class OutputGoogleCloudLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
115
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
116
+
117
+ # Error
118
+ ERROR = "error"
119
+ # Backpressure
120
+ BACKPRESSURE = "backpressure"
121
+ # Always On
122
+ ALWAYS = "always"
123
+
124
+
125
125
  class OutputGoogleCloudLoggingPqControlsTypedDict(TypedDict):
126
126
  pass
127
127
 
@@ -243,16 +243,6 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
243
243
  description: NotRequired[str]
244
244
  payload_expression: NotRequired[str]
245
245
  r"""JavaScript expression to compute the value of the payload. Must evaluate to a JavaScript object value. If an invalid value is encountered it will result in the default value instead. Defaults to the entire event."""
246
- pq_strict_ordering: NotRequired[bool]
247
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
248
- pq_rate_per_sec: NotRequired[float]
249
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
250
- pq_mode: NotRequired[OutputGoogleCloudLoggingMode]
251
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
252
- pq_max_buffer_size: NotRequired[float]
253
- r"""The maximum number of events to hold in memory before writing the events to disk"""
254
- pq_max_backpressure_sec: NotRequired[float]
255
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
256
246
  pq_max_file_size: NotRequired[str]
257
247
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
258
248
  pq_max_size: NotRequired[str]
@@ -263,6 +253,8 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
263
253
  r"""Codec to use to compress the persisted data"""
264
254
  pq_on_backpressure: NotRequired[OutputGoogleCloudLoggingQueueFullBehavior]
265
255
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
256
+ pq_mode: NotRequired[OutputGoogleCloudLoggingMode]
257
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
266
258
  pq_controls: NotRequired[OutputGoogleCloudLoggingPqControlsTypedDict]
267
259
 
268
260
 
@@ -541,35 +533,6 @@ class OutputGoogleCloudLogging(BaseModel):
541
533
  ] = None
542
534
  r"""JavaScript expression to compute the value of the payload. Must evaluate to a JavaScript object value. If an invalid value is encountered it will result in the default value instead. Defaults to the entire event."""
543
535
 
544
- pq_strict_ordering: Annotated[
545
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
546
- ] = True
547
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
548
-
549
- pq_rate_per_sec: Annotated[
550
- Optional[float], pydantic.Field(alias="pqRatePerSec")
551
- ] = 0
552
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
553
-
554
- pq_mode: Annotated[
555
- Annotated[
556
- Optional[OutputGoogleCloudLoggingMode],
557
- PlainValidator(validate_open_enum(False)),
558
- ],
559
- pydantic.Field(alias="pqMode"),
560
- ] = OutputGoogleCloudLoggingMode.ERROR
561
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
562
-
563
- pq_max_buffer_size: Annotated[
564
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
565
- ] = 42
566
- r"""The maximum number of events to hold in memory before writing the events to disk"""
567
-
568
- pq_max_backpressure_sec: Annotated[
569
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
570
- ] = 30
571
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
572
-
573
536
  pq_max_file_size: Annotated[
574
537
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
575
538
  ] = "1 MB"
@@ -601,6 +564,15 @@ class OutputGoogleCloudLogging(BaseModel):
601
564
  ] = OutputGoogleCloudLoggingQueueFullBehavior.BLOCK
602
565
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
603
566
 
567
+ pq_mode: Annotated[
568
+ Annotated[
569
+ Optional[OutputGoogleCloudLoggingMode],
570
+ PlainValidator(validate_open_enum(False)),
571
+ ],
572
+ pydantic.Field(alias="pqMode"),
573
+ ] = OutputGoogleCloudLoggingMode.ERROR
574
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
575
+
604
576
  pq_controls: Annotated[
605
577
  Optional[OutputGoogleCloudLoggingPqControls], pydantic.Field(alias="pqControls")
606
578
  ] = None
@@ -217,8 +217,6 @@ class OutputGoogleCloudStorageTypedDict(TypedDict):
217
217
  r"""Compression level to apply before moving files to final destination"""
218
218
  automatic_schema: NotRequired[bool]
219
219
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
220
- parquet_schema: NotRequired[str]
221
- r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
222
220
  parquet_version: NotRequired[OutputGoogleCloudStorageParquetVersion]
223
221
  r"""Determines which data types are supported and how they are represented"""
224
222
  parquet_data_page_version: NotRequired[OutputGoogleCloudStorageDataPageVersion]
@@ -447,11 +445,6 @@ class OutputGoogleCloudStorage(BaseModel):
447
445
  ] = False
448
446
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
449
447
 
450
- parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
451
- None
452
- )
453
- r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
454
-
455
448
  parquet_version: Annotated[
456
449
  Annotated[
457
450
  Optional[OutputGoogleCloudStorageParquetVersion],