cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -61,13 +62,25 @@ class OutputElasticCloudAuthenticationMethod(str, Enum, metaclass=utils.OpenEnum
61
62
 
62
63
  class OutputElasticCloudAuthTypedDict(TypedDict):
63
64
  disabled: NotRequired[bool]
65
+ username: NotRequired[str]
66
+ password: NotRequired[str]
64
67
  auth_type: NotRequired[OutputElasticCloudAuthenticationMethod]
65
68
  r"""Enter credentials directly, or select a stored secret"""
69
+ credentials_secret: NotRequired[str]
70
+ r"""Select or create a secret that references your credentials"""
71
+ manual_api_key: NotRequired[str]
72
+ r"""Enter API key directly"""
73
+ text_secret: NotRequired[str]
74
+ r"""Select or create a stored text secret"""
66
75
 
67
76
 
68
77
  class OutputElasticCloudAuth(BaseModel):
69
78
  disabled: Optional[bool] = False
70
79
 
80
+ username: Optional[str] = None
81
+
82
+ password: Optional[str] = None
83
+
71
84
  auth_type: Annotated[
72
85
  Annotated[
73
86
  Optional[OutputElasticCloudAuthenticationMethod],
@@ -77,6 +90,28 @@ class OutputElasticCloudAuth(BaseModel):
77
90
  ] = OutputElasticCloudAuthenticationMethod.MANUAL
78
91
  r"""Enter credentials directly, or select a stored secret"""
79
92
 
93
+ credentials_secret: Annotated[
94
+ Optional[str], pydantic.Field(alias="credentialsSecret")
95
+ ] = None
96
+ r"""Select or create a secret that references your credentials"""
97
+
98
+ manual_api_key: Annotated[Optional[str], pydantic.Field(alias="manualAPIKey")] = (
99
+ None
100
+ )
101
+ r"""Enter API key directly"""
102
+
103
+ text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
104
+ r"""Select or create a stored text secret"""
105
+
106
+ @field_serializer("auth_type")
107
+ def serialize_auth_type(self, value):
108
+ if isinstance(value, str):
109
+ try:
110
+ return models.OutputElasticCloudAuthenticationMethod(value)
111
+ except ValueError:
112
+ return value
113
+ return value
114
+
80
115
 
81
116
  class OutputElasticCloudResponseRetrySettingTypedDict(TypedDict):
82
117
  http_status: float
@@ -143,6 +178,17 @@ class OutputElasticCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnum
143
178
  QUEUE = "queue"
144
179
 
145
180
 
181
+ class OutputElasticCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
182
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
183
+
184
+ # Error
185
+ ERROR = "error"
186
+ # Backpressure
187
+ ALWAYS = "always"
188
+ # Always On
189
+ BACKPRESSURE = "backpressure"
190
+
191
+
146
192
  class OutputElasticCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
147
193
  r"""Codec to use to compress the persisted data"""
148
194
 
@@ -161,17 +207,6 @@ class OutputElasticCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMet
161
207
  DROP = "drop"
162
208
 
163
209
 
164
- class OutputElasticCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
165
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
166
-
167
- # Error
168
- ERROR = "error"
169
- # Backpressure
170
- BACKPRESSURE = "backpressure"
171
- # Always On
172
- ALWAYS = "always"
173
-
174
-
175
210
  class OutputElasticCloudPqControlsTypedDict(TypedDict):
176
211
  pass
177
212
 
@@ -236,6 +271,16 @@ class OutputElasticCloudTypedDict(TypedDict):
236
271
  on_backpressure: NotRequired[OutputElasticCloudBackpressureBehavior]
237
272
  r"""How to handle events when all receivers are exerting backpressure"""
238
273
  description: NotRequired[str]
274
+ pq_strict_ordering: NotRequired[bool]
275
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
276
+ pq_rate_per_sec: NotRequired[float]
277
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
278
+ pq_mode: NotRequired[OutputElasticCloudMode]
279
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
280
+ pq_max_buffer_size: NotRequired[float]
281
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
282
+ pq_max_backpressure_sec: NotRequired[float]
283
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
239
284
  pq_max_file_size: NotRequired[str]
240
285
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
241
286
  pq_max_size: NotRequired[str]
@@ -246,8 +291,6 @@ class OutputElasticCloudTypedDict(TypedDict):
246
291
  r"""Codec to use to compress the persisted data"""
247
292
  pq_on_backpressure: NotRequired[OutputElasticCloudQueueFullBehavior]
248
293
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
249
- pq_mode: NotRequired[OutputElasticCloudMode]
250
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
251
294
  pq_controls: NotRequired[OutputElasticCloudPqControlsTypedDict]
252
295
 
253
296
 
@@ -374,6 +417,34 @@ class OutputElasticCloud(BaseModel):
374
417
 
375
418
  description: Optional[str] = None
376
419
 
420
+ pq_strict_ordering: Annotated[
421
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
422
+ ] = True
423
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
424
+
425
+ pq_rate_per_sec: Annotated[
426
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
427
+ ] = 0
428
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
429
+
430
+ pq_mode: Annotated[
431
+ Annotated[
432
+ Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
433
+ ],
434
+ pydantic.Field(alias="pqMode"),
435
+ ] = OutputElasticCloudMode.ERROR
436
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
437
+
438
+ pq_max_buffer_size: Annotated[
439
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
440
+ ] = 42
441
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
442
+
443
+ pq_max_backpressure_sec: Annotated[
444
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
445
+ ] = 30
446
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
447
+
377
448
  pq_max_file_size: Annotated[
378
449
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
379
450
  ] = "1 MB"
@@ -405,14 +476,51 @@ class OutputElasticCloud(BaseModel):
405
476
  ] = OutputElasticCloudQueueFullBehavior.BLOCK
406
477
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
407
478
 
408
- pq_mode: Annotated[
409
- Annotated[
410
- Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
411
- ],
412
- pydantic.Field(alias="pqMode"),
413
- ] = OutputElasticCloudMode.ERROR
414
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
415
-
416
479
  pq_controls: Annotated[
417
480
  Optional[OutputElasticCloudPqControls], pydantic.Field(alias="pqControls")
418
481
  ] = None
482
+
483
+ @field_serializer("failed_request_logging_mode")
484
+ def serialize_failed_request_logging_mode(self, value):
485
+ if isinstance(value, str):
486
+ try:
487
+ return models.OutputElasticCloudFailedRequestLoggingMode(value)
488
+ except ValueError:
489
+ return value
490
+ return value
491
+
492
+ @field_serializer("on_backpressure")
493
+ def serialize_on_backpressure(self, value):
494
+ if isinstance(value, str):
495
+ try:
496
+ return models.OutputElasticCloudBackpressureBehavior(value)
497
+ except ValueError:
498
+ return value
499
+ return value
500
+
501
+ @field_serializer("pq_mode")
502
+ def serialize_pq_mode(self, value):
503
+ if isinstance(value, str):
504
+ try:
505
+ return models.OutputElasticCloudMode(value)
506
+ except ValueError:
507
+ return value
508
+ return value
509
+
510
+ @field_serializer("pq_compress")
511
+ def serialize_pq_compress(self, value):
512
+ if isinstance(value, str):
513
+ try:
514
+ return models.OutputElasticCloudCompression(value)
515
+ except ValueError:
516
+ return value
517
+ return value
518
+
519
+ @field_serializer("pq_on_backpressure")
520
+ def serialize_pq_on_backpressure(self, value):
521
+ if isinstance(value, str):
522
+ try:
523
+ return models.OutputElasticCloudQueueFullBehavior(value)
524
+ except ValueError:
525
+ return value
526
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -307,3 +308,48 @@ class OutputExabeam(BaseModel):
307
308
 
308
309
  max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
309
310
  r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
311
+
312
+ @field_serializer("signature_version")
313
+ def serialize_signature_version(self, value):
314
+ if isinstance(value, str):
315
+ try:
316
+ return models.OutputExabeamSignatureVersion(value)
317
+ except ValueError:
318
+ return value
319
+ return value
320
+
321
+ @field_serializer("object_acl")
322
+ def serialize_object_acl(self, value):
323
+ if isinstance(value, str):
324
+ try:
325
+ return models.OutputExabeamObjectACL(value)
326
+ except ValueError:
327
+ return value
328
+ return value
329
+
330
+ @field_serializer("storage_class")
331
+ def serialize_storage_class(self, value):
332
+ if isinstance(value, str):
333
+ try:
334
+ return models.OutputExabeamStorageClass(value)
335
+ except ValueError:
336
+ return value
337
+ return value
338
+
339
+ @field_serializer("on_backpressure")
340
+ def serialize_on_backpressure(self, value):
341
+ if isinstance(value, str):
342
+ try:
343
+ return models.OutputExabeamBackpressureBehavior(value)
344
+ except ValueError:
345
+ return value
346
+ return value
347
+
348
+ @field_serializer("on_disk_full_backpressure")
349
+ def serialize_on_disk_full_backpressure(self, value):
350
+ if isinstance(value, str):
351
+ try:
352
+ return models.OutputExabeamDiskSpaceProtection(value)
353
+ except ValueError:
354
+ return value
355
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -146,6 +147,8 @@ class OutputFilesystemTypedDict(TypedDict):
146
147
  r"""Compression level to apply before moving files to final destination"""
147
148
  automatic_schema: NotRequired[bool]
148
149
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
150
+ parquet_schema: NotRequired[str]
151
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
149
152
  parquet_version: NotRequired[OutputFilesystemParquetVersion]
150
153
  r"""Determines which data types are supported and how they are represented"""
151
154
  parquet_data_page_version: NotRequired[OutputFilesystemDataPageVersion]
@@ -304,6 +307,11 @@ class OutputFilesystem(BaseModel):
304
307
  ] = False
305
308
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
306
309
 
310
+ parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
311
+ None
312
+ )
313
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
314
+
307
315
  parquet_version: Annotated[
308
316
  Annotated[
309
317
  Optional[OutputFilesystemParquetVersion],
@@ -370,3 +378,66 @@ class OutputFilesystem(BaseModel):
370
378
 
371
379
  max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
372
380
  r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
381
+
382
+ @field_serializer("format_")
383
+ def serialize_format_(self, value):
384
+ if isinstance(value, str):
385
+ try:
386
+ return models.OutputFilesystemDataFormat(value)
387
+ except ValueError:
388
+ return value
389
+ return value
390
+
391
+ @field_serializer("on_backpressure")
392
+ def serialize_on_backpressure(self, value):
393
+ if isinstance(value, str):
394
+ try:
395
+ return models.OutputFilesystemBackpressureBehavior(value)
396
+ except ValueError:
397
+ return value
398
+ return value
399
+
400
+ @field_serializer("on_disk_full_backpressure")
401
+ def serialize_on_disk_full_backpressure(self, value):
402
+ if isinstance(value, str):
403
+ try:
404
+ return models.OutputFilesystemDiskSpaceProtection(value)
405
+ except ValueError:
406
+ return value
407
+ return value
408
+
409
+ @field_serializer("compress")
410
+ def serialize_compress(self, value):
411
+ if isinstance(value, str):
412
+ try:
413
+ return models.OutputFilesystemCompression(value)
414
+ except ValueError:
415
+ return value
416
+ return value
417
+
418
+ @field_serializer("compression_level")
419
+ def serialize_compression_level(self, value):
420
+ if isinstance(value, str):
421
+ try:
422
+ return models.OutputFilesystemCompressionLevel(value)
423
+ except ValueError:
424
+ return value
425
+ return value
426
+
427
+ @field_serializer("parquet_version")
428
+ def serialize_parquet_version(self, value):
429
+ if isinstance(value, str):
430
+ try:
431
+ return models.OutputFilesystemParquetVersion(value)
432
+ except ValueError:
433
+ return value
434
+ return value
435
+
436
+ @field_serializer("parquet_data_page_version")
437
+ def serialize_parquet_data_page_version(self, value):
438
+ if isinstance(value, str):
439
+ try:
440
+ return models.OutputFilesystemDataPageVersion(value)
441
+ except ValueError:
442
+ return value
443
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -155,6 +156,24 @@ class OutputGoogleChronicleCustomLabel(BaseModel):
155
156
  value: str
156
157
 
157
158
 
159
+ class UDMType(str, Enum, metaclass=utils.OpenEnumMeta):
160
+ r"""Defines the specific format for UDM events sent to Google SecOps. This must match the type of UDM data being sent."""
161
+
162
+ ENTITIES = "entities"
163
+ LOGS = "logs"
164
+
165
+
166
+ class OutputGoogleChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
167
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
168
+
169
+ # Error
170
+ ERROR = "error"
171
+ # Backpressure
172
+ ALWAYS = "always"
173
+ # Always On
174
+ BACKPRESSURE = "backpressure"
175
+
176
+
158
177
  class OutputGoogleChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
159
178
  r"""Codec to use to compress the persisted data"""
160
179
 
@@ -173,17 +192,6 @@ class OutputGoogleChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnum
173
192
  DROP = "drop"
174
193
 
175
194
 
176
- class OutputGoogleChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
177
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
178
-
179
- # Error
180
- ERROR = "error"
181
- # Backpressure
182
- BACKPRESSURE = "backpressure"
183
- # Always On
184
- ALWAYS = "always"
185
-
186
-
187
195
  class OutputGoogleChroniclePqControlsTypedDict(TypedDict):
188
196
  pass
189
197
 
@@ -262,6 +270,8 @@ class OutputGoogleChronicleTypedDict(TypedDict):
262
270
  r"""User-configured environment namespace to identify the data domain the logs originated from. Use namespace as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
263
271
  custom_labels: NotRequired[List[OutputGoogleChronicleCustomLabelTypedDict]]
264
272
  r"""Custom labels to be added to every batch"""
273
+ udm_type: NotRequired[UDMType]
274
+ r"""Defines the specific format for UDM events sent to Google SecOps. This must match the type of UDM data being sent."""
265
275
  api_key: NotRequired[str]
266
276
  r"""Organization's API key in Google SecOps"""
267
277
  api_key_secret: NotRequired[str]
@@ -270,6 +280,16 @@ class OutputGoogleChronicleTypedDict(TypedDict):
270
280
  r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
271
281
  service_account_credentials_secret: NotRequired[str]
272
282
  r"""Select or create a stored text secret"""
283
+ pq_strict_ordering: NotRequired[bool]
284
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
285
+ pq_rate_per_sec: NotRequired[float]
286
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
287
+ pq_mode: NotRequired[OutputGoogleChronicleMode]
288
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
289
+ pq_max_buffer_size: NotRequired[float]
290
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
291
+ pq_max_backpressure_sec: NotRequired[float]
292
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
273
293
  pq_max_file_size: NotRequired[str]
274
294
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
275
295
  pq_max_size: NotRequired[str]
@@ -280,8 +300,6 @@ class OutputGoogleChronicleTypedDict(TypedDict):
280
300
  r"""Codec to use to compress the persisted data"""
281
301
  pq_on_backpressure: NotRequired[OutputGoogleChronicleQueueFullBehavior]
282
302
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
283
- pq_mode: NotRequired[OutputGoogleChronicleMode]
284
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
285
303
  pq_controls: NotRequired[OutputGoogleChroniclePqControlsTypedDict]
286
304
 
287
305
 
@@ -443,6 +461,12 @@ class OutputGoogleChronicle(BaseModel):
443
461
  ] = None
444
462
  r"""Custom labels to be added to every batch"""
445
463
 
464
+ udm_type: Annotated[
465
+ Annotated[Optional[UDMType], PlainValidator(validate_open_enum(False))],
466
+ pydantic.Field(alias="udmType"),
467
+ ] = UDMType.LOGS
468
+ r"""Defines the specific format for UDM events sent to Google SecOps. This must match the type of UDM data being sent."""
469
+
446
470
  api_key: Annotated[Optional[str], pydantic.Field(alias="apiKey")] = None
447
471
  r"""Organization's API key in Google SecOps"""
448
472
 
@@ -461,6 +485,35 @@ class OutputGoogleChronicle(BaseModel):
461
485
  ] = None
462
486
  r"""Select or create a stored text secret"""
463
487
 
488
+ pq_strict_ordering: Annotated[
489
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
490
+ ] = True
491
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
492
+
493
+ pq_rate_per_sec: Annotated[
494
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
495
+ ] = 0
496
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
497
+
498
+ pq_mode: Annotated[
499
+ Annotated[
500
+ Optional[OutputGoogleChronicleMode],
501
+ PlainValidator(validate_open_enum(False)),
502
+ ],
503
+ pydantic.Field(alias="pqMode"),
504
+ ] = OutputGoogleChronicleMode.ERROR
505
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
506
+
507
+ pq_max_buffer_size: Annotated[
508
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
509
+ ] = 42
510
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
511
+
512
+ pq_max_backpressure_sec: Annotated[
513
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
514
+ ] = 30
515
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
516
+
464
517
  pq_max_file_size: Annotated[
465
518
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
466
519
  ] = "1 MB"
@@ -492,15 +545,87 @@ class OutputGoogleChronicle(BaseModel):
492
545
  ] = OutputGoogleChronicleQueueFullBehavior.BLOCK
493
546
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
494
547
 
495
- pq_mode: Annotated[
496
- Annotated[
497
- Optional[OutputGoogleChronicleMode],
498
- PlainValidator(validate_open_enum(False)),
499
- ],
500
- pydantic.Field(alias="pqMode"),
501
- ] = OutputGoogleChronicleMode.ERROR
502
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
503
-
504
548
  pq_controls: Annotated[
505
549
  Optional[OutputGoogleChroniclePqControls], pydantic.Field(alias="pqControls")
506
550
  ] = None
551
+
552
+ @field_serializer("api_version")
553
+ def serialize_api_version(self, value):
554
+ if isinstance(value, str):
555
+ try:
556
+ return models.OutputGoogleChronicleAPIVersion(value)
557
+ except ValueError:
558
+ return value
559
+ return value
560
+
561
+ @field_serializer("authentication_method")
562
+ def serialize_authentication_method(self, value):
563
+ if isinstance(value, str):
564
+ try:
565
+ return models.OutputGoogleChronicleAuthenticationMethod(value)
566
+ except ValueError:
567
+ return value
568
+ return value
569
+
570
+ @field_serializer("log_format_type")
571
+ def serialize_log_format_type(self, value):
572
+ if isinstance(value, str):
573
+ try:
574
+ return models.SendEventsAs(value)
575
+ except ValueError:
576
+ return value
577
+ return value
578
+
579
+ @field_serializer("failed_request_logging_mode")
580
+ def serialize_failed_request_logging_mode(self, value):
581
+ if isinstance(value, str):
582
+ try:
583
+ return models.OutputGoogleChronicleFailedRequestLoggingMode(value)
584
+ except ValueError:
585
+ return value
586
+ return value
587
+
588
+ @field_serializer("on_backpressure")
589
+ def serialize_on_backpressure(self, value):
590
+ if isinstance(value, str):
591
+ try:
592
+ return models.OutputGoogleChronicleBackpressureBehavior(value)
593
+ except ValueError:
594
+ return value
595
+ return value
596
+
597
+ @field_serializer("udm_type")
598
+ def serialize_udm_type(self, value):
599
+ if isinstance(value, str):
600
+ try:
601
+ return models.UDMType(value)
602
+ except ValueError:
603
+ return value
604
+ return value
605
+
606
+ @field_serializer("pq_mode")
607
+ def serialize_pq_mode(self, value):
608
+ if isinstance(value, str):
609
+ try:
610
+ return models.OutputGoogleChronicleMode(value)
611
+ except ValueError:
612
+ return value
613
+ return value
614
+
615
+ @field_serializer("pq_compress")
616
+ def serialize_pq_compress(self, value):
617
+ if isinstance(value, str):
618
+ try:
619
+ return models.OutputGoogleChronicleCompression(value)
620
+ except ValueError:
621
+ return value
622
+ return value
623
+
624
+ @field_serializer("pq_on_backpressure")
625
+ def serialize_pq_on_backpressure(self, value):
626
+ if isinstance(value, str):
627
+ try:
628
+ return models.OutputGoogleChronicleQueueFullBehavior(value)
629
+ except ValueError:
630
+ return value
631
+ return value