cribl-control-plane 0.2.1rc7__py3-none-any.whl → 0.3.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (179) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/errors/__init__.py +5 -8
  3. cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
  4. cribl_control_plane/groups_sdk.py +28 -52
  5. cribl_control_plane/health.py +16 -22
  6. cribl_control_plane/models/__init__.py +54 -217
  7. cribl_control_plane/models/appmode.py +14 -0
  8. cribl_control_plane/models/authtoken.py +1 -5
  9. cribl_control_plane/models/cacheconnection.py +0 -20
  10. cribl_control_plane/models/configgroup.py +7 -55
  11. cribl_control_plane/models/configgroupcloud.py +1 -11
  12. cribl_control_plane/models/createconfiggroupbyproductop.py +5 -17
  13. cribl_control_plane/models/createroutesappendbyidop.py +2 -2
  14. cribl_control_plane/models/createversionundoop.py +3 -3
  15. cribl_control_plane/models/cribllakedataset.py +1 -11
  16. cribl_control_plane/models/cribllakedatasetupdate.py +1 -11
  17. cribl_control_plane/models/datasetmetadata.py +1 -11
  18. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +0 -11
  19. cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
  20. cribl_control_plane/models/distributedsummary.py +0 -6
  21. cribl_control_plane/models/error.py +16 -0
  22. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +0 -20
  23. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +0 -20
  24. cribl_control_plane/models/getconfiggroupbyproductandidop.py +0 -11
  25. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +0 -11
  26. cribl_control_plane/models/gethealthinfoop.py +17 -0
  27. cribl_control_plane/models/getsummaryop.py +0 -11
  28. cribl_control_plane/models/hbcriblinfo.py +3 -24
  29. cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +8 -27
  30. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  31. cribl_control_plane/models/input.py +78 -80
  32. cribl_control_plane/models/inputappscope.py +17 -80
  33. cribl_control_plane/models/inputazureblob.py +1 -33
  34. cribl_control_plane/models/inputcollection.py +1 -24
  35. cribl_control_plane/models/inputconfluentcloud.py +18 -195
  36. cribl_control_plane/models/inputcribl.py +1 -24
  37. cribl_control_plane/models/inputcriblhttp.py +17 -62
  38. cribl_control_plane/models/inputcribllakehttp.py +17 -62
  39. cribl_control_plane/models/inputcriblmetrics.py +1 -24
  40. cribl_control_plane/models/inputcribltcp.py +17 -62
  41. cribl_control_plane/models/inputcrowdstrike.py +1 -54
  42. cribl_control_plane/models/inputdatadogagent.py +17 -62
  43. cribl_control_plane/models/inputdatagen.py +1 -24
  44. cribl_control_plane/models/inputedgeprometheus.py +34 -147
  45. cribl_control_plane/models/inputelastic.py +27 -119
  46. cribl_control_plane/models/inputeventhub.py +1 -182
  47. cribl_control_plane/models/inputexec.py +1 -33
  48. cribl_control_plane/models/inputfile.py +3 -42
  49. cribl_control_plane/models/inputfirehose.py +17 -62
  50. cribl_control_plane/models/inputgooglepubsub.py +1 -36
  51. cribl_control_plane/models/inputgrafana.py +32 -157
  52. cribl_control_plane/models/inputhttp.py +17 -62
  53. cribl_control_plane/models/inputhttpraw.py +17 -62
  54. cribl_control_plane/models/inputjournalfiles.py +1 -24
  55. cribl_control_plane/models/inputkafka.py +17 -189
  56. cribl_control_plane/models/inputkinesis.py +1 -80
  57. cribl_control_plane/models/inputkubeevents.py +1 -24
  58. cribl_control_plane/models/inputkubelogs.py +1 -33
  59. cribl_control_plane/models/inputkubemetrics.py +1 -33
  60. cribl_control_plane/models/inputloki.py +17 -71
  61. cribl_control_plane/models/inputmetrics.py +17 -62
  62. cribl_control_plane/models/inputmodeldriventelemetry.py +17 -62
  63. cribl_control_plane/models/inputmsk.py +18 -81
  64. cribl_control_plane/models/inputnetflow.py +1 -24
  65. cribl_control_plane/models/inputoffice365mgmt.py +1 -67
  66. cribl_control_plane/models/inputoffice365msgtrace.py +1 -67
  67. cribl_control_plane/models/inputoffice365service.py +1 -67
  68. cribl_control_plane/models/inputopentelemetry.py +16 -92
  69. cribl_control_plane/models/inputprometheus.py +34 -138
  70. cribl_control_plane/models/inputprometheusrw.py +17 -71
  71. cribl_control_plane/models/inputrawudp.py +1 -24
  72. cribl_control_plane/models/inputs3.py +1 -45
  73. cribl_control_plane/models/inputs3inventory.py +1 -54
  74. cribl_control_plane/models/inputsecuritylake.py +1 -54
  75. cribl_control_plane/models/inputsnmp.py +1 -40
  76. cribl_control_plane/models/inputsplunk.py +17 -85
  77. cribl_control_plane/models/inputsplunkhec.py +16 -70
  78. cribl_control_plane/models/inputsplunksearch.py +1 -63
  79. cribl_control_plane/models/inputsqs.py +1 -56
  80. cribl_control_plane/models/inputsyslog.py +32 -121
  81. cribl_control_plane/models/inputsystemmetrics.py +9 -142
  82. cribl_control_plane/models/inputsystemstate.py +1 -33
  83. cribl_control_plane/models/inputtcp.py +17 -81
  84. cribl_control_plane/models/inputtcpjson.py +17 -71
  85. cribl_control_plane/models/inputwef.py +1 -71
  86. cribl_control_plane/models/inputwindowsmetrics.py +9 -129
  87. cribl_control_plane/models/inputwineventlogs.py +1 -60
  88. cribl_control_plane/models/inputwiz.py +1 -45
  89. cribl_control_plane/models/inputwizwebhook.py +17 -62
  90. cribl_control_plane/models/inputzscalerhec.py +16 -70
  91. cribl_control_plane/models/jobinfo.py +1 -4
  92. cribl_control_plane/models/jobstatus.py +3 -34
  93. cribl_control_plane/models/listconfiggroupbyproductop.py +0 -11
  94. cribl_control_plane/models/logininfo.py +3 -3
  95. cribl_control_plane/models/masterworkerentry.py +1 -11
  96. cribl_control_plane/models/nodeprovidedinfo.py +1 -11
  97. cribl_control_plane/models/nodeupgradestatus.py +0 -38
  98. cribl_control_plane/models/output.py +88 -93
  99. cribl_control_plane/models/outputazureblob.py +1 -110
  100. cribl_control_plane/models/outputazuredataexplorer.py +87 -452
  101. cribl_control_plane/models/outputazureeventhub.py +19 -281
  102. cribl_control_plane/models/outputazurelogs.py +19 -115
  103. cribl_control_plane/models/outputchronicle.py +19 -115
  104. cribl_control_plane/models/outputclickhouse.py +19 -155
  105. cribl_control_plane/models/outputcloudwatch.py +19 -106
  106. cribl_control_plane/models/outputconfluentcloud.py +38 -311
  107. cribl_control_plane/models/outputcriblhttp.py +19 -135
  108. cribl_control_plane/models/outputcribllake.py +1 -97
  109. cribl_control_plane/models/outputcribltcp.py +19 -132
  110. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +20 -129
  111. cribl_control_plane/models/outputdatadog.py +19 -159
  112. cribl_control_plane/models/outputdataset.py +19 -143
  113. cribl_control_plane/models/outputdiskspool.py +1 -11
  114. cribl_control_plane/models/outputdls3.py +1 -152
  115. cribl_control_plane/models/outputdynatracehttp.py +19 -160
  116. cribl_control_plane/models/outputdynatraceotlp.py +19 -160
  117. cribl_control_plane/models/outputelastic.py +19 -163
  118. cribl_control_plane/models/outputelasticcloud.py +19 -140
  119. cribl_control_plane/models/outputexabeam.py +1 -61
  120. cribl_control_plane/models/outputfilesystem.py +1 -87
  121. cribl_control_plane/models/outputgooglechronicle.py +20 -166
  122. cribl_control_plane/models/outputgooglecloudlogging.py +20 -131
  123. cribl_control_plane/models/outputgooglecloudstorage.py +1 -136
  124. cribl_control_plane/models/outputgooglepubsub.py +19 -106
  125. cribl_control_plane/models/outputgrafanacloud.py +37 -288
  126. cribl_control_plane/models/outputgraphite.py +19 -105
  127. cribl_control_plane/models/outputhoneycomb.py +19 -115
  128. cribl_control_plane/models/outputhumiohec.py +19 -126
  129. cribl_control_plane/models/outputinfluxdb.py +19 -130
  130. cribl_control_plane/models/outputkafka.py +34 -302
  131. cribl_control_plane/models/outputkinesis.py +19 -133
  132. cribl_control_plane/models/outputloki.py +17 -129
  133. cribl_control_plane/models/outputminio.py +1 -145
  134. cribl_control_plane/models/outputmsk.py +34 -193
  135. cribl_control_plane/models/outputnewrelic.py +19 -136
  136. cribl_control_plane/models/outputnewrelicevents.py +20 -128
  137. cribl_control_plane/models/outputopentelemetry.py +19 -178
  138. cribl_control_plane/models/outputprometheus.py +19 -115
  139. cribl_control_plane/models/outputring.py +1 -31
  140. cribl_control_plane/models/outputs3.py +1 -152
  141. cribl_control_plane/models/outputsecuritylake.py +1 -114
  142. cribl_control_plane/models/outputsentinel.py +19 -135
  143. cribl_control_plane/models/outputsentineloneaisiem.py +20 -134
  144. cribl_control_plane/models/outputservicenow.py +19 -168
  145. cribl_control_plane/models/outputsignalfx.py +19 -115
  146. cribl_control_plane/models/outputsns.py +17 -113
  147. cribl_control_plane/models/outputsplunk.py +19 -153
  148. cribl_control_plane/models/outputsplunkhec.py +19 -208
  149. cribl_control_plane/models/outputsplunklb.py +19 -182
  150. cribl_control_plane/models/outputsqs.py +17 -124
  151. cribl_control_plane/models/outputstatsd.py +19 -105
  152. cribl_control_plane/models/outputstatsdext.py +19 -105
  153. cribl_control_plane/models/outputsumologic.py +19 -117
  154. cribl_control_plane/models/outputsyslog.py +96 -259
  155. cribl_control_plane/models/outputtcpjson.py +19 -141
  156. cribl_control_plane/models/outputwavefront.py +19 -115
  157. cribl_control_plane/models/outputwebhook.py +19 -161
  158. cribl_control_plane/models/outputxsiam.py +17 -113
  159. cribl_control_plane/models/packinfo.py +5 -8
  160. cribl_control_plane/models/packinstallinfo.py +5 -8
  161. cribl_control_plane/models/resourcepolicy.py +0 -11
  162. cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
  163. cribl_control_plane/models/routeconf.py +4 -3
  164. cribl_control_plane/models/runnablejobcollection.py +9 -72
  165. cribl_control_plane/models/runnablejobexecutor.py +9 -32
  166. cribl_control_plane/models/runnablejobscheduledsearch.py +9 -23
  167. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +0 -11
  168. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +0 -11
  169. cribl_control_plane/packs.py +7 -202
  170. cribl_control_plane/routes_sdk.py +6 -6
  171. cribl_control_plane/tokens.py +15 -23
  172. {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
  173. cribl_control_plane-0.3.0a1.dist-info/RECORD +330 -0
  174. cribl_control_plane/models/groupcreaterequest.py +0 -171
  175. cribl_control_plane/models/outpostnodeinfo.py +0 -16
  176. cribl_control_plane/models/outputdatabricks.py +0 -482
  177. cribl_control_plane/models/updatepacksop.py +0 -25
  178. cribl_control_plane-0.2.1rc7.dist-info/RECORD +0 -331
  179. {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
@@ -1,12 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import models, utils
4
+ from cribl_control_plane import utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
- from pydantic import field_serializer
10
9
  from pydantic.functional_validators import PlainValidator
11
10
  from typing import List, Optional
12
11
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -17,22 +16,16 @@ class OutputGoogleCloudLoggingType(str, Enum):
17
16
 
18
17
 
19
18
  class LogLocationType(str, Enum, metaclass=utils.OpenEnumMeta):
20
- # Project
21
19
  PROJECT = "project"
22
- # Organization
23
20
  ORGANIZATION = "organization"
24
- # Billing Account
25
21
  BILLING_ACCOUNT = "billingAccount"
26
- # Folder
27
22
  FOLDER = "folder"
28
23
 
29
24
 
30
25
  class PayloadFormat(str, Enum, metaclass=utils.OpenEnumMeta):
31
26
  r"""Format to use when sending payload. Defaults to Text."""
32
27
 
33
- # Text
34
28
  TEXT = "text"
35
- # JSON
36
29
  JSON = "json"
37
30
 
38
31
 
@@ -71,11 +64,8 @@ class OutputGoogleCloudLoggingGoogleAuthenticationMethod(
71
64
  ):
72
65
  r"""Choose Auto to use Google Application Default Credentials (ADC), Manual to enter Google service account credentials directly, or Secret to select or create a stored secret that references Google service account credentials."""
73
66
 
74
- # Auto
75
67
  AUTO = "auto"
76
- # Manual
77
68
  MANUAL = "manual"
78
- # Secret
79
69
  SECRET = "secret"
80
70
 
81
71
 
@@ -84,31 +74,15 @@ class OutputGoogleCloudLoggingBackpressureBehavior(
84
74
  ):
85
75
  r"""How to handle events when all receivers are exerting backpressure"""
86
76
 
87
- # Block
88
77
  BLOCK = "block"
89
- # Drop
90
78
  DROP = "drop"
91
- # Persistent Queue
92
79
  QUEUE = "queue"
93
80
 
94
81
 
95
- class OutputGoogleCloudLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
96
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
97
-
98
- # Error
99
- ERROR = "error"
100
- # Backpressure
101
- ALWAYS = "always"
102
- # Always On
103
- BACKPRESSURE = "backpressure"
104
-
105
-
106
82
  class OutputGoogleCloudLoggingCompression(str, Enum, metaclass=utils.OpenEnumMeta):
107
83
  r"""Codec to use to compress the persisted data"""
108
84
 
109
- # None
110
85
  NONE = "none"
111
- # Gzip
112
86
  GZIP = "gzip"
113
87
 
114
88
 
@@ -117,12 +91,18 @@ class OutputGoogleCloudLoggingQueueFullBehavior(
117
91
  ):
118
92
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
119
93
 
120
- # Block
121
94
  BLOCK = "block"
122
- # Drop new data
123
95
  DROP = "drop"
124
96
 
125
97
 
98
+ class OutputGoogleCloudLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
99
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
100
+
101
+ ERROR = "error"
102
+ BACKPRESSURE = "backpressure"
103
+ ALWAYS = "always"
104
+
105
+
126
106
  class OutputGoogleCloudLoggingPqControlsTypedDict(TypedDict):
127
107
  pass
128
108
 
@@ -244,16 +224,6 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
244
224
  description: NotRequired[str]
245
225
  payload_expression: NotRequired[str]
246
226
  r"""JavaScript expression to compute the value of the payload. Must evaluate to a JavaScript object value. If an invalid value is encountered it will result in the default value instead. Defaults to the entire event."""
247
- pq_strict_ordering: NotRequired[bool]
248
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
249
- pq_rate_per_sec: NotRequired[float]
250
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
251
- pq_mode: NotRequired[OutputGoogleCloudLoggingMode]
252
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
253
- pq_max_buffer_size: NotRequired[float]
254
- r"""The maximum number of events to hold in memory before writing the events to disk"""
255
- pq_max_backpressure_sec: NotRequired[float]
256
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
257
227
  pq_max_file_size: NotRequired[str]
258
228
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
259
229
  pq_max_size: NotRequired[str]
@@ -264,6 +234,8 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
264
234
  r"""Codec to use to compress the persisted data"""
265
235
  pq_on_backpressure: NotRequired[OutputGoogleCloudLoggingQueueFullBehavior]
266
236
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
237
+ pq_mode: NotRequired[OutputGoogleCloudLoggingMode]
238
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
267
239
  pq_controls: NotRequired[OutputGoogleCloudLoggingPqControlsTypedDict]
268
240
 
269
241
 
@@ -542,35 +514,6 @@ class OutputGoogleCloudLogging(BaseModel):
542
514
  ] = None
543
515
  r"""JavaScript expression to compute the value of the payload. Must evaluate to a JavaScript object value. If an invalid value is encountered it will result in the default value instead. Defaults to the entire event."""
544
516
 
545
- pq_strict_ordering: Annotated[
546
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
547
- ] = True
548
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
549
-
550
- pq_rate_per_sec: Annotated[
551
- Optional[float], pydantic.Field(alias="pqRatePerSec")
552
- ] = 0
553
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
554
-
555
- pq_mode: Annotated[
556
- Annotated[
557
- Optional[OutputGoogleCloudLoggingMode],
558
- PlainValidator(validate_open_enum(False)),
559
- ],
560
- pydantic.Field(alias="pqMode"),
561
- ] = OutputGoogleCloudLoggingMode.ERROR
562
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
563
-
564
- pq_max_buffer_size: Annotated[
565
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
566
- ] = 42
567
- r"""The maximum number of events to hold in memory before writing the events to disk"""
568
-
569
- pq_max_backpressure_sec: Annotated[
570
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
571
- ] = 30
572
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
573
-
574
517
  pq_max_file_size: Annotated[
575
518
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
576
519
  ] = "1 MB"
@@ -602,69 +545,15 @@ class OutputGoogleCloudLogging(BaseModel):
602
545
  ] = OutputGoogleCloudLoggingQueueFullBehavior.BLOCK
603
546
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
604
547
 
548
+ pq_mode: Annotated[
549
+ Annotated[
550
+ Optional[OutputGoogleCloudLoggingMode],
551
+ PlainValidator(validate_open_enum(False)),
552
+ ],
553
+ pydantic.Field(alias="pqMode"),
554
+ ] = OutputGoogleCloudLoggingMode.ERROR
555
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
556
+
605
557
  pq_controls: Annotated[
606
558
  Optional[OutputGoogleCloudLoggingPqControls], pydantic.Field(alias="pqControls")
607
559
  ] = None
608
-
609
- @field_serializer("log_location_type")
610
- def serialize_log_location_type(self, value):
611
- if isinstance(value, str):
612
- try:
613
- return models.LogLocationType(value)
614
- except ValueError:
615
- return value
616
- return value
617
-
618
- @field_serializer("payload_format")
619
- def serialize_payload_format(self, value):
620
- if isinstance(value, str):
621
- try:
622
- return models.PayloadFormat(value)
623
- except ValueError:
624
- return value
625
- return value
626
-
627
- @field_serializer("google_auth_method")
628
- def serialize_google_auth_method(self, value):
629
- if isinstance(value, str):
630
- try:
631
- return models.OutputGoogleCloudLoggingGoogleAuthenticationMethod(value)
632
- except ValueError:
633
- return value
634
- return value
635
-
636
- @field_serializer("on_backpressure")
637
- def serialize_on_backpressure(self, value):
638
- if isinstance(value, str):
639
- try:
640
- return models.OutputGoogleCloudLoggingBackpressureBehavior(value)
641
- except ValueError:
642
- return value
643
- return value
644
-
645
- @field_serializer("pq_mode")
646
- def serialize_pq_mode(self, value):
647
- if isinstance(value, str):
648
- try:
649
- return models.OutputGoogleCloudLoggingMode(value)
650
- except ValueError:
651
- return value
652
- return value
653
-
654
- @field_serializer("pq_compress")
655
- def serialize_pq_compress(self, value):
656
- if isinstance(value, str):
657
- try:
658
- return models.OutputGoogleCloudLoggingCompression(value)
659
- except ValueError:
660
- return value
661
- return value
662
-
663
- @field_serializer("pq_on_backpressure")
664
- def serialize_pq_on_backpressure(self, value):
665
- if isinstance(value, str):
666
- try:
667
- return models.OutputGoogleCloudLoggingQueueFullBehavior(value)
668
- except ValueError:
669
- return value
670
- return value
@@ -1,12 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import models, utils
4
+ from cribl_control_plane import utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
- from pydantic import field_serializer
10
9
  from pydantic.functional_validators import PlainValidator
11
10
  from typing import List, Optional
12
11
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -26,52 +25,36 @@ class OutputGoogleCloudStorageSignatureVersion(str, Enum, metaclass=utils.OpenEn
26
25
  class OutputGoogleCloudStorageAuthenticationMethod(
27
26
  str, Enum, metaclass=utils.OpenEnumMeta
28
27
  ):
29
- # auto
30
28
  AUTO = "auto"
31
- # manual
32
29
  MANUAL = "manual"
33
- # Secret Key pair
34
30
  SECRET = "secret"
35
31
 
36
32
 
37
33
  class OutputGoogleCloudStorageObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
38
34
  r"""Object ACL to assign to uploaded objects"""
39
35
 
40
- # private
41
36
  PRIVATE = "private"
42
- # bucket-owner-read
43
37
  BUCKET_OWNER_READ = "bucket-owner-read"
44
- # bucket-owner-full-control
45
38
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
46
- # project-private
47
39
  PROJECT_PRIVATE = "project-private"
48
- # authenticated-read
49
40
  AUTHENTICATED_READ = "authenticated-read"
50
- # public-read
51
41
  PUBLIC_READ = "public-read"
52
42
 
53
43
 
54
44
  class OutputGoogleCloudStorageStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
55
45
  r"""Storage class to select for uploaded objects"""
56
46
 
57
- # Standard Storage
58
47
  STANDARD = "STANDARD"
59
- # Nearline Storage
60
48
  NEARLINE = "NEARLINE"
61
- # Coldline Storage
62
49
  COLDLINE = "COLDLINE"
63
- # Archive Storage
64
50
  ARCHIVE = "ARCHIVE"
65
51
 
66
52
 
67
53
  class OutputGoogleCloudStorageDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
68
54
  r"""Format of the output data"""
69
55
 
70
- # JSON
71
56
  JSON = "json"
72
- # Raw
73
57
  RAW = "raw"
74
- # Parquet
75
58
  PARQUET = "parquet"
76
59
 
77
60
 
@@ -80,9 +63,7 @@ class OutputGoogleCloudStorageBackpressureBehavior(
80
63
  ):
81
64
  r"""How to handle events when all receivers are exerting backpressure"""
82
65
 
83
- # Block
84
66
  BLOCK = "block"
85
- # Drop
86
67
  DROP = "drop"
87
68
 
88
69
 
@@ -91,9 +72,7 @@ class OutputGoogleCloudStorageDiskSpaceProtection(
91
72
  ):
92
73
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
93
74
 
94
- # Block
95
75
  BLOCK = "block"
96
- # Drop
97
76
  DROP = "drop"
98
77
 
99
78
 
@@ -107,31 +86,23 @@ class OutputGoogleCloudStorageCompression(str, Enum, metaclass=utils.OpenEnumMet
107
86
  class OutputGoogleCloudStorageCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
108
87
  r"""Compression level to apply before moving files to final destination"""
109
88
 
110
- # Best Speed
111
89
  BEST_SPEED = "best_speed"
112
- # Normal
113
90
  NORMAL = "normal"
114
- # Best Compression
115
91
  BEST_COMPRESSION = "best_compression"
116
92
 
117
93
 
118
94
  class OutputGoogleCloudStorageParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
119
95
  r"""Determines which data types are supported and how they are represented"""
120
96
 
121
- # 1.0
122
97
  PARQUET_1_0 = "PARQUET_1_0"
123
- # 2.4
124
98
  PARQUET_2_4 = "PARQUET_2_4"
125
- # 2.6
126
99
  PARQUET_2_6 = "PARQUET_2_6"
127
100
 
128
101
 
129
102
  class OutputGoogleCloudStorageDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
130
103
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
131
104
 
132
- # V1
133
105
  DATA_PAGE_V1 = "DATA_PAGE_V1"
134
- # V2
135
106
  DATA_PAGE_V2 = "DATA_PAGE_V2"
136
107
 
137
108
 
@@ -218,8 +189,6 @@ class OutputGoogleCloudStorageTypedDict(TypedDict):
218
189
  r"""Compression level to apply before moving files to final destination"""
219
190
  automatic_schema: NotRequired[bool]
220
191
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
221
- parquet_schema: NotRequired[str]
222
- r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
223
192
  parquet_version: NotRequired[OutputGoogleCloudStorageParquetVersion]
224
193
  r"""Determines which data types are supported and how they are represented"""
225
194
  parquet_data_page_version: NotRequired[OutputGoogleCloudStorageDataPageVersion]
@@ -448,11 +417,6 @@ class OutputGoogleCloudStorage(BaseModel):
448
417
  ] = False
449
418
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
450
419
 
451
- parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
452
- None
453
- )
454
- r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
455
-
456
420
  parquet_version: Annotated[
457
421
  Annotated[
458
422
  Optional[OutputGoogleCloudStorageParquetVersion],
@@ -530,102 +494,3 @@ class OutputGoogleCloudStorage(BaseModel):
530
494
 
531
495
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
532
496
  r"""Select or create a stored secret that references your access key and secret key"""
533
-
534
- @field_serializer("signature_version")
535
- def serialize_signature_version(self, value):
536
- if isinstance(value, str):
537
- try:
538
- return models.OutputGoogleCloudStorageSignatureVersion(value)
539
- except ValueError:
540
- return value
541
- return value
542
-
543
- @field_serializer("aws_authentication_method")
544
- def serialize_aws_authentication_method(self, value):
545
- if isinstance(value, str):
546
- try:
547
- return models.OutputGoogleCloudStorageAuthenticationMethod(value)
548
- except ValueError:
549
- return value
550
- return value
551
-
552
- @field_serializer("object_acl")
553
- def serialize_object_acl(self, value):
554
- if isinstance(value, str):
555
- try:
556
- return models.OutputGoogleCloudStorageObjectACL(value)
557
- except ValueError:
558
- return value
559
- return value
560
-
561
- @field_serializer("storage_class")
562
- def serialize_storage_class(self, value):
563
- if isinstance(value, str):
564
- try:
565
- return models.OutputGoogleCloudStorageStorageClass(value)
566
- except ValueError:
567
- return value
568
- return value
569
-
570
- @field_serializer("format_")
571
- def serialize_format_(self, value):
572
- if isinstance(value, str):
573
- try:
574
- return models.OutputGoogleCloudStorageDataFormat(value)
575
- except ValueError:
576
- return value
577
- return value
578
-
579
- @field_serializer("on_backpressure")
580
- def serialize_on_backpressure(self, value):
581
- if isinstance(value, str):
582
- try:
583
- return models.OutputGoogleCloudStorageBackpressureBehavior(value)
584
- except ValueError:
585
- return value
586
- return value
587
-
588
- @field_serializer("on_disk_full_backpressure")
589
- def serialize_on_disk_full_backpressure(self, value):
590
- if isinstance(value, str):
591
- try:
592
- return models.OutputGoogleCloudStorageDiskSpaceProtection(value)
593
- except ValueError:
594
- return value
595
- return value
596
-
597
- @field_serializer("compress")
598
- def serialize_compress(self, value):
599
- if isinstance(value, str):
600
- try:
601
- return models.OutputGoogleCloudStorageCompression(value)
602
- except ValueError:
603
- return value
604
- return value
605
-
606
- @field_serializer("compression_level")
607
- def serialize_compression_level(self, value):
608
- if isinstance(value, str):
609
- try:
610
- return models.OutputGoogleCloudStorageCompressionLevel(value)
611
- except ValueError:
612
- return value
613
- return value
614
-
615
- @field_serializer("parquet_version")
616
- def serialize_parquet_version(self, value):
617
- if isinstance(value, str):
618
- try:
619
- return models.OutputGoogleCloudStorageParquetVersion(value)
620
- except ValueError:
621
- return value
622
- return value
623
-
624
- @field_serializer("parquet_data_page_version")
625
- def serialize_parquet_data_page_version(self, value):
626
- if isinstance(value, str):
627
- try:
628
- return models.OutputGoogleCloudStorageDataPageVersion(value)
629
- except ValueError:
630
- return value
631
- return value
@@ -1,12 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import models, utils
4
+ from cribl_control_plane import utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
- from pydantic import field_serializer
10
9
  from pydantic.functional_validators import PlainValidator
11
10
  from typing import List, Optional
12
11
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -21,54 +20,41 @@ class OutputGooglePubsubGoogleAuthenticationMethod(
21
20
  ):
22
21
  r"""Choose Auto to use Google Application Default Credentials (ADC), Manual to enter Google service account credentials directly, or Secret to select or create a stored secret that references Google service account credentials."""
23
22
 
24
- # Auto
25
23
  AUTO = "auto"
26
- # Manual
27
24
  MANUAL = "manual"
28
- # Secret
29
25
  SECRET = "secret"
30
26
 
31
27
 
32
28
  class OutputGooglePubsubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
33
29
  r"""How to handle events when all receivers are exerting backpressure"""
34
30
 
35
- # Block
36
31
  BLOCK = "block"
37
- # Drop
38
32
  DROP = "drop"
39
- # Persistent Queue
40
33
  QUEUE = "queue"
41
34
 
42
35
 
43
- class OutputGooglePubsubMode(str, Enum, metaclass=utils.OpenEnumMeta):
44
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
45
-
46
- # Error
47
- ERROR = "error"
48
- # Backpressure
49
- ALWAYS = "always"
50
- # Always On
51
- BACKPRESSURE = "backpressure"
52
-
53
-
54
36
  class OutputGooglePubsubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
55
37
  r"""Codec to use to compress the persisted data"""
56
38
 
57
- # None
58
39
  NONE = "none"
59
- # Gzip
60
40
  GZIP = "gzip"
61
41
 
62
42
 
63
43
  class OutputGooglePubsubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
64
44
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
65
45
 
66
- # Block
67
46
  BLOCK = "block"
68
- # Drop new data
69
47
  DROP = "drop"
70
48
 
71
49
 
50
+ class OutputGooglePubsubMode(str, Enum, metaclass=utils.OpenEnumMeta):
51
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
52
+
53
+ ERROR = "error"
54
+ BACKPRESSURE = "backpressure"
55
+ ALWAYS = "always"
56
+
57
+
72
58
  class OutputGooglePubsubPqControlsTypedDict(TypedDict):
73
59
  pass
74
60
 
@@ -118,16 +104,6 @@ class OutputGooglePubsubTypedDict(TypedDict):
118
104
  on_backpressure: NotRequired[OutputGooglePubsubBackpressureBehavior]
119
105
  r"""How to handle events when all receivers are exerting backpressure"""
120
106
  description: NotRequired[str]
121
- pq_strict_ordering: NotRequired[bool]
122
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
123
- pq_rate_per_sec: NotRequired[float]
124
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
125
- pq_mode: NotRequired[OutputGooglePubsubMode]
126
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
127
- pq_max_buffer_size: NotRequired[float]
128
- r"""The maximum number of events to hold in memory before writing the events to disk"""
129
- pq_max_backpressure_sec: NotRequired[float]
130
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
131
107
  pq_max_file_size: NotRequired[str]
132
108
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
133
109
  pq_max_size: NotRequired[str]
@@ -138,6 +114,8 @@ class OutputGooglePubsubTypedDict(TypedDict):
138
114
  r"""Codec to use to compress the persisted data"""
139
115
  pq_on_backpressure: NotRequired[OutputGooglePubsubQueueFullBehavior]
140
116
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
117
+ pq_mode: NotRequired[OutputGooglePubsubMode]
118
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
141
119
  pq_controls: NotRequired[OutputGooglePubsubPqControlsTypedDict]
142
120
 
143
121
 
@@ -229,34 +207,6 @@ class OutputGooglePubsub(BaseModel):
229
207
 
230
208
  description: Optional[str] = None
231
209
 
232
- pq_strict_ordering: Annotated[
233
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
234
- ] = True
235
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
236
-
237
- pq_rate_per_sec: Annotated[
238
- Optional[float], pydantic.Field(alias="pqRatePerSec")
239
- ] = 0
240
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
241
-
242
- pq_mode: Annotated[
243
- Annotated[
244
- Optional[OutputGooglePubsubMode], PlainValidator(validate_open_enum(False))
245
- ],
246
- pydantic.Field(alias="pqMode"),
247
- ] = OutputGooglePubsubMode.ERROR
248
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
249
-
250
- pq_max_buffer_size: Annotated[
251
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
252
- ] = 42
253
- r"""The maximum number of events to hold in memory before writing the events to disk"""
254
-
255
- pq_max_backpressure_sec: Annotated[
256
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
257
- ] = 30
258
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
259
-
260
210
  pq_max_file_size: Annotated[
261
211
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
262
212
  ] = "1 MB"
@@ -288,51 +238,14 @@ class OutputGooglePubsub(BaseModel):
288
238
  ] = OutputGooglePubsubQueueFullBehavior.BLOCK
289
239
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
290
240
 
241
+ pq_mode: Annotated[
242
+ Annotated[
243
+ Optional[OutputGooglePubsubMode], PlainValidator(validate_open_enum(False))
244
+ ],
245
+ pydantic.Field(alias="pqMode"),
246
+ ] = OutputGooglePubsubMode.ERROR
247
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
248
+
291
249
  pq_controls: Annotated[
292
250
  Optional[OutputGooglePubsubPqControls], pydantic.Field(alias="pqControls")
293
251
  ] = None
294
-
295
- @field_serializer("google_auth_method")
296
- def serialize_google_auth_method(self, value):
297
- if isinstance(value, str):
298
- try:
299
- return models.OutputGooglePubsubGoogleAuthenticationMethod(value)
300
- except ValueError:
301
- return value
302
- return value
303
-
304
- @field_serializer("on_backpressure")
305
- def serialize_on_backpressure(self, value):
306
- if isinstance(value, str):
307
- try:
308
- return models.OutputGooglePubsubBackpressureBehavior(value)
309
- except ValueError:
310
- return value
311
- return value
312
-
313
- @field_serializer("pq_mode")
314
- def serialize_pq_mode(self, value):
315
- if isinstance(value, str):
316
- try:
317
- return models.OutputGooglePubsubMode(value)
318
- except ValueError:
319
- return value
320
- return value
321
-
322
- @field_serializer("pq_compress")
323
- def serialize_pq_compress(self, value):
324
- if isinstance(value, str):
325
- try:
326
- return models.OutputGooglePubsubCompression(value)
327
- except ValueError:
328
- return value
329
- return value
330
-
331
- @field_serializer("pq_on_backpressure")
332
- def serialize_pq_on_backpressure(self, value):
333
- if isinstance(value, str):
334
- try:
335
- return models.OutputGooglePubsubQueueFullBehavior(value)
336
- except ValueError:
337
- return value
338
- return value