cribl-control-plane 0.0.49__py3-none-any.whl → 0.0.50rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (173) hide show
  1. cribl_control_plane/_version.py +4 -6
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/health.py +6 -2
  4. cribl_control_plane/models/__init__.py +21 -4
  5. cribl_control_plane/models/appmode.py +2 -1
  6. cribl_control_plane/models/cacheconnection.py +10 -2
  7. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  8. cribl_control_plane/models/cloudprovider.py +2 -1
  9. cribl_control_plane/models/configgroup.py +7 -2
  10. cribl_control_plane/models/configgroupcloud.py +6 -2
  11. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  12. cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
  13. cribl_control_plane/models/createversionpushop.py +5 -5
  14. cribl_control_plane/models/createversionundoop.py +3 -3
  15. cribl_control_plane/models/cribllakedataset.py +8 -2
  16. cribl_control_plane/models/datasetmetadata.py +8 -2
  17. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  18. cribl_control_plane/models/error.py +16 -0
  19. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  20. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  21. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  22. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  23. cribl_control_plane/models/gethealthinfoop.py +17 -0
  24. cribl_control_plane/models/getsummaryop.py +7 -2
  25. cribl_control_plane/models/getversionshowop.py +6 -5
  26. cribl_control_plane/models/gitinfo.py +14 -3
  27. cribl_control_plane/models/gitshowresult.py +19 -0
  28. cribl_control_plane/models/hbcriblinfo.py +11 -1
  29. cribl_control_plane/models/healthstatus.py +7 -4
  30. cribl_control_plane/models/inputappscope.py +34 -14
  31. cribl_control_plane/models/inputazureblob.py +17 -6
  32. cribl_control_plane/models/inputcollection.py +11 -4
  33. cribl_control_plane/models/inputconfluentcloud.py +47 -20
  34. cribl_control_plane/models/inputcribl.py +11 -4
  35. cribl_control_plane/models/inputcriblhttp.py +23 -8
  36. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  37. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  38. cribl_control_plane/models/inputcribltcp.py +23 -8
  39. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  40. cribl_control_plane/models/inputdatadogagent.py +24 -8
  41. cribl_control_plane/models/inputdatagen.py +11 -4
  42. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  43. cribl_control_plane/models/inputelastic.py +40 -14
  44. cribl_control_plane/models/inputeventhub.py +15 -6
  45. cribl_control_plane/models/inputexec.py +14 -6
  46. cribl_control_plane/models/inputfile.py +15 -6
  47. cribl_control_plane/models/inputfirehose.py +23 -8
  48. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  49. cribl_control_plane/models/inputgrafana.py +67 -24
  50. cribl_control_plane/models/inputhttp.py +23 -8
  51. cribl_control_plane/models/inputhttpraw.py +23 -8
  52. cribl_control_plane/models/inputjournalfiles.py +12 -4
  53. cribl_control_plane/models/inputkafka.py +46 -16
  54. cribl_control_plane/models/inputkinesis.py +38 -14
  55. cribl_control_plane/models/inputkubeevents.py +11 -4
  56. cribl_control_plane/models/inputkubelogs.py +16 -8
  57. cribl_control_plane/models/inputkubemetrics.py +16 -8
  58. cribl_control_plane/models/inputloki.py +29 -10
  59. cribl_control_plane/models/inputmetrics.py +23 -8
  60. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  61. cribl_control_plane/models/inputmsk.py +53 -18
  62. cribl_control_plane/models/inputnetflow.py +11 -4
  63. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  64. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  65. cribl_control_plane/models/inputoffice365service.py +35 -16
  66. cribl_control_plane/models/inputopentelemetry.py +38 -16
  67. cribl_control_plane/models/inputprometheus.py +50 -18
  68. cribl_control_plane/models/inputprometheusrw.py +30 -10
  69. cribl_control_plane/models/inputrawudp.py +11 -4
  70. cribl_control_plane/models/inputs3.py +21 -8
  71. cribl_control_plane/models/inputs3inventory.py +26 -10
  72. cribl_control_plane/models/inputsecuritylake.py +27 -10
  73. cribl_control_plane/models/inputsnmp.py +16 -6
  74. cribl_control_plane/models/inputsplunk.py +33 -12
  75. cribl_control_plane/models/inputsplunkhec.py +29 -10
  76. cribl_control_plane/models/inputsplunksearch.py +33 -14
  77. cribl_control_plane/models/inputsqs.py +27 -10
  78. cribl_control_plane/models/inputsyslog.py +43 -16
  79. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  80. cribl_control_plane/models/inputsystemstate.py +16 -8
  81. cribl_control_plane/models/inputtcp.py +29 -10
  82. cribl_control_plane/models/inputtcpjson.py +29 -10
  83. cribl_control_plane/models/inputwef.py +37 -14
  84. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  85. cribl_control_plane/models/inputwineventlogs.py +20 -10
  86. cribl_control_plane/models/inputwiz.py +21 -8
  87. cribl_control_plane/models/inputwizwebhook.py +23 -8
  88. cribl_control_plane/models/inputzscalerhec.py +29 -10
  89. cribl_control_plane/models/jobinfo.py +4 -1
  90. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  91. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  92. cribl_control_plane/models/masterworkerentry.py +7 -2
  93. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  94. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  95. cribl_control_plane/models/nodeprovidedinfo.py +4 -1
  96. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  97. cribl_control_plane/models/nodeupgradestate.py +2 -1
  98. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  99. cribl_control_plane/models/outputazureblob.py +48 -18
  100. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  101. cribl_control_plane/models/outputazureeventhub.py +40 -18
  102. cribl_control_plane/models/outputazurelogs.py +35 -12
  103. cribl_control_plane/models/outputclickhouse.py +55 -20
  104. cribl_control_plane/models/outputcloudwatch.py +29 -10
  105. cribl_control_plane/models/outputconfluentcloud.py +77 -32
  106. cribl_control_plane/models/outputcriblhttp.py +44 -16
  107. cribl_control_plane/models/outputcribllake.py +46 -16
  108. cribl_control_plane/models/outputcribltcp.py +45 -18
  109. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  110. cribl_control_plane/models/outputdatadog.py +48 -20
  111. cribl_control_plane/models/outputdataset.py +46 -18
  112. cribl_control_plane/models/outputdiskspool.py +7 -2
  113. cribl_control_plane/models/outputdls3.py +68 -24
  114. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  115. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  116. cribl_control_plane/models/outputelastic.py +43 -18
  117. cribl_control_plane/models/outputelasticcloud.py +36 -12
  118. cribl_control_plane/models/outputexabeam.py +29 -10
  119. cribl_control_plane/models/outputfilesystem.py +39 -14
  120. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  121. cribl_control_plane/models/outputgooglecloudlogging.py +50 -18
  122. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  123. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  124. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  125. cribl_control_plane/models/outputgraphite.py +31 -14
  126. cribl_control_plane/models/outputhoneycomb.py +35 -12
  127. cribl_control_plane/models/outputhumiohec.py +43 -16
  128. cribl_control_plane/models/outputinfluxdb.py +42 -16
  129. cribl_control_plane/models/outputkafka.py +74 -28
  130. cribl_control_plane/models/outputkinesis.py +40 -16
  131. cribl_control_plane/models/outputloki.py +41 -16
  132. cribl_control_plane/models/outputminio.py +65 -24
  133. cribl_control_plane/models/outputmsk.py +82 -30
  134. cribl_control_plane/models/outputnewrelic.py +43 -18
  135. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  136. cribl_control_plane/models/outputopentelemetry.py +67 -26
  137. cribl_control_plane/models/outputprometheus.py +35 -12
  138. cribl_control_plane/models/outputring.py +19 -8
  139. cribl_control_plane/models/outputs3.py +68 -26
  140. cribl_control_plane/models/outputsecuritylake.py +52 -18
  141. cribl_control_plane/models/outputsentinel.py +45 -18
  142. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  143. cribl_control_plane/models/outputservicenow.py +60 -24
  144. cribl_control_plane/models/outputsignalfx.py +37 -14
  145. cribl_control_plane/models/outputsns.py +36 -14
  146. cribl_control_plane/models/outputsplunk.py +60 -24
  147. cribl_control_plane/models/outputsplunkhec.py +35 -12
  148. cribl_control_plane/models/outputsplunklb.py +77 -30
  149. cribl_control_plane/models/outputsqs.py +41 -16
  150. cribl_control_plane/models/outputstatsd.py +30 -14
  151. cribl_control_plane/models/outputstatsdext.py +29 -12
  152. cribl_control_plane/models/outputsumologic.py +35 -12
  153. cribl_control_plane/models/outputsyslog.py +58 -24
  154. cribl_control_plane/models/outputtcpjson.py +52 -20
  155. cribl_control_plane/models/outputwavefront.py +35 -12
  156. cribl_control_plane/models/outputwebhook.py +58 -22
  157. cribl_control_plane/models/outputxsiam.py +35 -14
  158. cribl_control_plane/models/packinfo.py +3 -0
  159. cribl_control_plane/models/packinstallinfo.py +3 -0
  160. cribl_control_plane/models/productscore.py +2 -1
  161. cribl_control_plane/models/rbacresource.py +2 -1
  162. cribl_control_plane/models/resourcepolicy.py +4 -2
  163. cribl_control_plane/models/runnablejobcollection.py +30 -13
  164. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  165. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  166. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  167. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  168. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
  169. cribl_control_plane/models/workertypes.py +2 -1
  170. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.0.50rc1.dist-info}/METADATA +1 -1
  171. cribl_control_plane-0.0.50rc1.dist-info/RECORD +328 -0
  172. cribl_control_plane-0.0.49.dist-info/RECORD +0 -325
  173. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.0.50rc1.dist-info}/WHEEL +0 -0
@@ -1,10 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from enum import Enum
5
6
 
6
7
 
7
- class NodeActiveUpgradeStatus(int, Enum):
8
+ class NodeActiveUpgradeStatus(int, Enum, metaclass=utils.OpenEnumMeta):
8
9
  ZERO = 0
9
10
  ONE = 1
10
11
  TWO = 2
@@ -1,9 +1,10 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from enum import Enum
5
6
 
6
7
 
7
- class NodeFailedUpgradeStatus(int, Enum):
8
+ class NodeFailedUpgradeStatus(int, Enum, metaclass=utils.OpenEnumMeta):
8
9
  ZERO = 0
9
10
  ONE = 1
@@ -5,7 +5,7 @@ from .hbcriblinfo import HBCriblInfo, HBCriblInfoTypedDict
5
5
  from .heartbeatmetadata import HeartbeatMetadata, HeartbeatMetadataTypedDict
6
6
  from cribl_control_plane.types import BaseModel
7
7
  import pydantic
8
- from typing import List, Optional, Union
8
+ from typing import Dict, List, Optional, Union
9
9
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
10
10
 
11
11
 
@@ -125,6 +125,7 @@ class NodeProvidedInfoTypedDict(TypedDict):
125
125
  architecture: str
126
126
  cpus: float
127
127
  cribl: HBCriblInfoTypedDict
128
+ env: Dict[str, str]
128
129
  free_disk_space: float
129
130
  hostname: str
130
131
  node: str
@@ -149,6 +150,8 @@ class NodeProvidedInfo(BaseModel):
149
150
 
150
151
  cribl: HBCriblInfo
151
152
 
153
+ env: Dict[str, str]
154
+
152
155
  free_disk_space: Annotated[float, pydantic.Field(alias="freeDiskSpace")]
153
156
 
154
157
  hostname: str
@@ -1,10 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from enum import Enum
5
6
 
6
7
 
7
- class NodeSkippedUpgradeStatus(int, Enum):
8
+ class NodeSkippedUpgradeStatus(int, Enum, metaclass=utils.OpenEnumMeta):
8
9
  ZERO = 0
9
10
  ONE = 1
10
11
  TWO = 2
@@ -1,10 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from enum import Enum
5
6
 
6
7
 
7
- class NodeUpgradeState(int, Enum):
8
+ class NodeUpgradeState(int, Enum, metaclass=utils.OpenEnumMeta):
8
9
  ZERO = 0
9
10
  ONE = 1
10
11
  TWO = 2
@@ -6,8 +6,10 @@ from .nodefailedupgradestatus import NodeFailedUpgradeStatus
6
6
  from .nodeskippedupgradestatus import NodeSkippedUpgradeStatus
7
7
  from .nodeupgradestate import NodeUpgradeState
8
8
  from cribl_control_plane.types import BaseModel
9
+ from cribl_control_plane.utils import validate_open_enum
10
+ from pydantic.functional_validators import PlainValidator
9
11
  from typing import Optional
10
- from typing_extensions import NotRequired, TypedDict
12
+ from typing_extensions import Annotated, NotRequired, TypedDict
11
13
 
12
14
 
13
15
  class NodeUpgradeStatusTypedDict(TypedDict):
@@ -19,12 +21,18 @@ class NodeUpgradeStatusTypedDict(TypedDict):
19
21
 
20
22
 
21
23
  class NodeUpgradeStatus(BaseModel):
22
- state: NodeUpgradeState
24
+ state: Annotated[NodeUpgradeState, PlainValidator(validate_open_enum(True))]
23
25
 
24
26
  timestamp: float
25
27
 
26
- active: Optional[NodeActiveUpgradeStatus] = None
28
+ active: Annotated[
29
+ Optional[NodeActiveUpgradeStatus], PlainValidator(validate_open_enum(True))
30
+ ] = None
27
31
 
28
- failed: Optional[NodeFailedUpgradeStatus] = None
32
+ failed: Annotated[
33
+ Optional[NodeFailedUpgradeStatus], PlainValidator(validate_open_enum(True))
34
+ ] = None
29
35
 
30
- skipped: Optional[NodeSkippedUpgradeStatus] = None
36
+ skipped: Annotated[
37
+ Optional[NodeSkippedUpgradeStatus], PlainValidator(validate_open_enum(True))
38
+ ] = None
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputAzureBlobType(str, Enum):
12
15
  AZURE_BLOB = "azure_blob"
13
16
 
14
17
 
15
- class OutputAzureBlobDataFormat(str, Enum):
18
+ class OutputAzureBlobDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Format of the output data"""
17
20
 
18
21
  JSON = "json"
@@ -20,28 +23,28 @@ class OutputAzureBlobDataFormat(str, Enum):
20
23
  PARQUET = "parquet"
21
24
 
22
25
 
23
- class OutputAzureBlobBackpressureBehavior(str, Enum):
26
+ class OutputAzureBlobBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""How to handle events when all receivers are exerting backpressure"""
25
28
 
26
29
  BLOCK = "block"
27
30
  DROP = "drop"
28
31
 
29
32
 
30
- class OutputAzureBlobDiskSpaceProtection(str, Enum):
33
+ class OutputAzureBlobDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
31
34
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
32
35
 
33
36
  BLOCK = "block"
34
37
  DROP = "drop"
35
38
 
36
39
 
37
- class OutputAzureBlobAuthenticationMethod(str, Enum):
40
+ class OutputAzureBlobAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
38
41
  MANUAL = "manual"
39
42
  SECRET = "secret"
40
43
  CLIENT_SECRET = "clientSecret"
41
44
  CLIENT_CERT = "clientCert"
42
45
 
43
46
 
44
- class BlobAccessTier(str, Enum):
47
+ class BlobAccessTier(str, Enum, metaclass=utils.OpenEnumMeta):
45
48
  INFERRED = "Inferred"
46
49
  HOT = "Hot"
47
50
  COOL = "Cool"
@@ -49,14 +52,14 @@ class BlobAccessTier(str, Enum):
49
52
  ARCHIVE = "Archive"
50
53
 
51
54
 
52
- class OutputAzureBlobCompression(str, Enum):
55
+ class OutputAzureBlobCompression(str, Enum, metaclass=utils.OpenEnumMeta):
53
56
  r"""Data compression format to apply to HTTP content before it is delivered"""
54
57
 
55
58
  NONE = "none"
56
59
  GZIP = "gzip"
57
60
 
58
61
 
59
- class OutputAzureBlobCompressionLevel(str, Enum):
62
+ class OutputAzureBlobCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
60
63
  r"""Compression level to apply before moving files to final destination"""
61
64
 
62
65
  BEST_SPEED = "best_speed"
@@ -64,7 +67,7 @@ class OutputAzureBlobCompressionLevel(str, Enum):
64
67
  BEST_COMPRESSION = "best_compression"
65
68
 
66
69
 
67
- class OutputAzureBlobParquetVersion(str, Enum):
70
+ class OutputAzureBlobParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
68
71
  r"""Determines which data types are supported and how they are represented"""
69
72
 
70
73
  PARQUET_1_0 = "PARQUET_1_0"
@@ -72,7 +75,7 @@ class OutputAzureBlobParquetVersion(str, Enum):
72
75
  PARQUET_2_6 = "PARQUET_2_6"
73
76
 
74
77
 
75
- class OutputAzureBlobDataPageVersion(str, Enum):
78
+ class OutputAzureBlobDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
76
79
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
77
80
 
78
81
  DATA_PAGE_V1 = "DATA_PAGE_V1"
@@ -261,7 +264,11 @@ class OutputAzureBlob(BaseModel):
261
264
  r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
262
265
 
263
266
  format_: Annotated[
264
- Optional[OutputAzureBlobDataFormat], pydantic.Field(alias="format")
267
+ Annotated[
268
+ Optional[OutputAzureBlobDataFormat],
269
+ PlainValidator(validate_open_enum(False)),
270
+ ],
271
+ pydantic.Field(alias="format"),
265
272
  ] = OutputAzureBlobDataFormat.JSON
266
273
  r"""Format of the output data"""
267
274
 
@@ -304,7 +311,10 @@ class OutputAzureBlob(BaseModel):
304
311
  r"""Buffer size used to write to a file"""
305
312
 
306
313
  on_backpressure: Annotated[
307
- Optional[OutputAzureBlobBackpressureBehavior],
314
+ Annotated[
315
+ Optional[OutputAzureBlobBackpressureBehavior],
316
+ PlainValidator(validate_open_enum(False)),
317
+ ],
308
318
  pydantic.Field(alias="onBackpressure"),
309
319
  ] = OutputAzureBlobBackpressureBehavior.BLOCK
310
320
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -315,26 +325,39 @@ class OutputAzureBlob(BaseModel):
315
325
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
316
326
 
317
327
  on_disk_full_backpressure: Annotated[
318
- Optional[OutputAzureBlobDiskSpaceProtection],
328
+ Annotated[
329
+ Optional[OutputAzureBlobDiskSpaceProtection],
330
+ PlainValidator(validate_open_enum(False)),
331
+ ],
319
332
  pydantic.Field(alias="onDiskFullBackpressure"),
320
333
  ] = OutputAzureBlobDiskSpaceProtection.BLOCK
321
334
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
322
335
 
323
336
  auth_type: Annotated[
324
- Optional[OutputAzureBlobAuthenticationMethod], pydantic.Field(alias="authType")
337
+ Annotated[
338
+ Optional[OutputAzureBlobAuthenticationMethod],
339
+ PlainValidator(validate_open_enum(False)),
340
+ ],
341
+ pydantic.Field(alias="authType"),
325
342
  ] = OutputAzureBlobAuthenticationMethod.MANUAL
326
343
 
327
344
  storage_class: Annotated[
328
- Optional[BlobAccessTier], pydantic.Field(alias="storageClass")
345
+ Annotated[Optional[BlobAccessTier], PlainValidator(validate_open_enum(False))],
346
+ pydantic.Field(alias="storageClass"),
329
347
  ] = BlobAccessTier.INFERRED
330
348
 
331
349
  description: Optional[str] = None
332
350
 
333
- compress: Optional[OutputAzureBlobCompression] = OutputAzureBlobCompression.GZIP
351
+ compress: Annotated[
352
+ Optional[OutputAzureBlobCompression], PlainValidator(validate_open_enum(False))
353
+ ] = OutputAzureBlobCompression.GZIP
334
354
  r"""Data compression format to apply to HTTP content before it is delivered"""
335
355
 
336
356
  compression_level: Annotated[
337
- Optional[OutputAzureBlobCompressionLevel],
357
+ Annotated[
358
+ Optional[OutputAzureBlobCompressionLevel],
359
+ PlainValidator(validate_open_enum(False)),
360
+ ],
338
361
  pydantic.Field(alias="compressionLevel"),
339
362
  ] = OutputAzureBlobCompressionLevel.BEST_SPEED
340
363
  r"""Compression level to apply before moving files to final destination"""
@@ -345,12 +368,19 @@ class OutputAzureBlob(BaseModel):
345
368
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
346
369
 
347
370
  parquet_version: Annotated[
348
- Optional[OutputAzureBlobParquetVersion], pydantic.Field(alias="parquetVersion")
371
+ Annotated[
372
+ Optional[OutputAzureBlobParquetVersion],
373
+ PlainValidator(validate_open_enum(False)),
374
+ ],
375
+ pydantic.Field(alias="parquetVersion"),
349
376
  ] = OutputAzureBlobParquetVersion.PARQUET_2_6
350
377
  r"""Determines which data types are supported and how they are represented"""
351
378
 
352
379
  parquet_data_page_version: Annotated[
353
- Optional[OutputAzureBlobDataPageVersion],
380
+ Annotated[
381
+ Optional[OutputAzureBlobDataPageVersion],
382
+ PlainValidator(validate_open_enum(False)),
383
+ ],
354
384
  pydantic.Field(alias="parquetDataPageVersion"),
355
385
  ] = OutputAzureBlobDataPageVersion.DATA_PAGE_V2
356
386
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,12 +15,12 @@ class OutputAzureDataExplorerType(str, Enum):
12
15
  AZURE_DATA_EXPLORER = "azure_data_explorer"
13
16
 
14
17
 
15
- class IngestionMode(str, Enum):
18
+ class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  BATCHING = "batching"
17
20
  STREAMING = "streaming"
18
21
 
19
22
 
20
- class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
23
+ class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnumMeta):
21
24
  r"""Endpoint used to acquire authentication tokens from Azure"""
22
25
 
23
26
  HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
@@ -25,7 +28,9 @@ class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
25
28
  HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
26
29
 
27
30
 
28
- class OutputAzureDataExplorerAuthenticationMethod(str, Enum):
31
+ class OutputAzureDataExplorerAuthenticationMethod(
32
+ str, Enum, metaclass=utils.OpenEnumMeta
33
+ ):
29
34
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
30
35
 
31
36
  CLIENT_SECRET = "clientSecret"
@@ -45,7 +50,9 @@ class OutputAzureDataExplorerCertificate(BaseModel):
45
50
  r"""The certificate you registered as credentials for your app in the Azure portal"""
46
51
 
47
52
 
48
- class OutputAzureDataExplorerBackpressureBehavior(str, Enum):
53
+ class OutputAzureDataExplorerBackpressureBehavior(
54
+ str, Enum, metaclass=utils.OpenEnumMeta
55
+ ):
49
56
  r"""How to handle events when all receivers are exerting backpressure"""
50
57
 
51
58
  BLOCK = "block"
@@ -53,7 +60,7 @@ class OutputAzureDataExplorerBackpressureBehavior(str, Enum):
53
60
  QUEUE = "queue"
54
61
 
55
62
 
56
- class OutputAzureDataExplorerDataFormat(str, Enum):
63
+ class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
57
64
  r"""Format of the output data"""
58
65
 
59
66
  JSON = "json"
@@ -61,14 +68,16 @@ class OutputAzureDataExplorerDataFormat(str, Enum):
61
68
  PARQUET = "parquet"
62
69
 
63
70
 
64
- class OutputAzureDataExplorerDiskSpaceProtection(str, Enum):
71
+ class OutputAzureDataExplorerDiskSpaceProtection(
72
+ str, Enum, metaclass=utils.OpenEnumMeta
73
+ ):
65
74
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
66
75
 
67
76
  BLOCK = "block"
68
77
  DROP = "drop"
69
78
 
70
79
 
71
- class PrefixOptional(str, Enum):
80
+ class PrefixOptional(str, Enum, metaclass=utils.OpenEnumMeta):
72
81
  DROP_BY = "dropBy"
73
82
  INGEST_BY = "ingestBy"
74
83
 
@@ -81,7 +90,9 @@ class ExtentTagTypedDict(TypedDict):
81
90
  class ExtentTag(BaseModel):
82
91
  value: str
83
92
 
84
- prefix: Optional[PrefixOptional] = None
93
+ prefix: Annotated[
94
+ Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
95
+ ] = None
85
96
 
86
97
 
87
98
  class IngestIfNotExistTypedDict(TypedDict):
@@ -92,7 +103,7 @@ class IngestIfNotExist(BaseModel):
92
103
  value: str
93
104
 
94
105
 
95
- class ReportLevel(str, Enum):
106
+ class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
96
107
  r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
97
108
 
98
109
  FAILURES_ONLY = "failuresOnly"
@@ -100,7 +111,7 @@ class ReportLevel(str, Enum):
100
111
  FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
101
112
 
102
113
 
103
- class ReportMethod(str, Enum):
114
+ class ReportMethod(str, Enum, metaclass=utils.OpenEnumMeta):
104
115
  r"""Target of the ingestion status reporting. Defaults to Queue."""
105
116
 
106
117
  QUEUE = "queue"
@@ -173,28 +184,32 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
173
184
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
174
185
 
175
186
 
176
- class OutputAzureDataExplorerCompressCompression(str, Enum):
187
+ class OutputAzureDataExplorerCompressCompression(
188
+ str, Enum, metaclass=utils.OpenEnumMeta
189
+ ):
177
190
  r"""Data compression format to apply to HTTP content before it is delivered"""
178
191
 
179
192
  NONE = "none"
180
193
  GZIP = "gzip"
181
194
 
182
195
 
183
- class OutputAzureDataExplorerPqCompressCompression(str, Enum):
196
+ class OutputAzureDataExplorerPqCompressCompression(
197
+ str, Enum, metaclass=utils.OpenEnumMeta
198
+ ):
184
199
  r"""Codec to use to compress the persisted data"""
185
200
 
186
201
  NONE = "none"
187
202
  GZIP = "gzip"
188
203
 
189
204
 
190
- class OutputAzureDataExplorerQueueFullBehavior(str, Enum):
205
+ class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
191
206
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
192
207
 
193
208
  BLOCK = "block"
194
209
  DROP = "drop"
195
210
 
196
211
 
197
- class OutputAzureDataExplorerMode(str, Enum):
212
+ class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
198
213
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
199
214
 
200
215
  ERROR = "error"
@@ -384,17 +399,24 @@ class OutputAzureDataExplorer(BaseModel):
384
399
  r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
385
400
 
386
401
  ingest_mode: Annotated[
387
- Optional[IngestionMode], pydantic.Field(alias="ingestMode")
402
+ Annotated[Optional[IngestionMode], PlainValidator(validate_open_enum(False))],
403
+ pydantic.Field(alias="ingestMode"),
388
404
  ] = IngestionMode.BATCHING
389
405
 
390
406
  oauth_endpoint: Annotated[
391
- Optional[MicrosoftEntraIDAuthenticationEndpoint],
407
+ Annotated[
408
+ Optional[MicrosoftEntraIDAuthenticationEndpoint],
409
+ PlainValidator(validate_open_enum(False)),
410
+ ],
392
411
  pydantic.Field(alias="oauthEndpoint"),
393
412
  ] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
394
413
  r"""Endpoint used to acquire authentication tokens from Azure"""
395
414
 
396
415
  oauth_type: Annotated[
397
- Optional[OutputAzureDataExplorerAuthenticationMethod],
416
+ Annotated[
417
+ Optional[OutputAzureDataExplorerAuthenticationMethod],
418
+ PlainValidator(validate_open_enum(False)),
419
+ ],
398
420
  pydantic.Field(alias="oauthType"),
399
421
  ] = OutputAzureDataExplorerAuthenticationMethod.CLIENT_SECRET
400
422
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
@@ -413,7 +435,10 @@ class OutputAzureDataExplorer(BaseModel):
413
435
  r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
414
436
 
415
437
  on_backpressure: Annotated[
416
- Optional[OutputAzureDataExplorerBackpressureBehavior],
438
+ Annotated[
439
+ Optional[OutputAzureDataExplorerBackpressureBehavior],
440
+ PlainValidator(validate_open_enum(False)),
441
+ ],
417
442
  pydantic.Field(alias="onBackpressure"),
418
443
  ] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
419
444
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -424,7 +449,11 @@ class OutputAzureDataExplorer(BaseModel):
424
449
  r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
425
450
 
426
451
  format_: Annotated[
427
- Optional[OutputAzureDataExplorerDataFormat], pydantic.Field(alias="format")
452
+ Annotated[
453
+ Optional[OutputAzureDataExplorerDataFormat],
454
+ PlainValidator(validate_open_enum(False)),
455
+ ],
456
+ pydantic.Field(alias="format"),
428
457
  ] = OutputAzureDataExplorerDataFormat.JSON
429
458
  r"""Format of the output data"""
430
459
 
@@ -464,7 +493,10 @@ class OutputAzureDataExplorer(BaseModel):
464
493
  r"""Maximum number of parts to upload in parallel per file"""
465
494
 
466
495
  on_disk_full_backpressure: Annotated[
467
- Optional[OutputAzureDataExplorerDiskSpaceProtection],
496
+ Annotated[
497
+ Optional[OutputAzureDataExplorerDiskSpaceProtection],
498
+ PlainValidator(validate_open_enum(False)),
499
+ ],
468
500
  pydantic.Field(alias="onDiskFullBackpressure"),
469
501
  ] = OutputAzureDataExplorerDiskSpaceProtection.BLOCK
470
502
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
@@ -508,12 +540,14 @@ class OutputAzureDataExplorer(BaseModel):
508
540
  r"""Prevents duplicate ingestion by verifying whether an extent with the specified ingest-by tag already exists"""
509
541
 
510
542
  report_level: Annotated[
511
- Optional[ReportLevel], pydantic.Field(alias="reportLevel")
543
+ Annotated[Optional[ReportLevel], PlainValidator(validate_open_enum(False))],
544
+ pydantic.Field(alias="reportLevel"),
512
545
  ] = ReportLevel.FAILURES_ONLY
513
546
  r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
514
547
 
515
548
  report_method: Annotated[
516
- Optional[ReportMethod], pydantic.Field(alias="reportMethod")
549
+ Annotated[Optional[ReportMethod], PlainValidator(validate_open_enum(False))],
550
+ pydantic.Field(alias="reportMethod"),
517
551
  ] = ReportMethod.QUEUE
518
552
  r"""Target of the ingestion status reporting. Defaults to Queue."""
519
553
 
@@ -538,9 +572,10 @@ class OutputAzureDataExplorer(BaseModel):
538
572
  ] = True
539
573
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
540
574
 
541
- compress: Optional[OutputAzureDataExplorerCompressCompression] = (
542
- OutputAzureDataExplorerCompressCompression.GZIP
543
- )
575
+ compress: Annotated[
576
+ Optional[OutputAzureDataExplorerCompressCompression],
577
+ PlainValidator(validate_open_enum(False)),
578
+ ] = OutputAzureDataExplorerCompressCompression.GZIP
544
579
  r"""Data compression format to apply to HTTP content before it is delivered"""
545
580
 
546
581
  mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
@@ -594,19 +629,29 @@ class OutputAzureDataExplorer(BaseModel):
594
629
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
595
630
 
596
631
  pq_compress: Annotated[
597
- Optional[OutputAzureDataExplorerPqCompressCompression],
632
+ Annotated[
633
+ Optional[OutputAzureDataExplorerPqCompressCompression],
634
+ PlainValidator(validate_open_enum(False)),
635
+ ],
598
636
  pydantic.Field(alias="pqCompress"),
599
637
  ] = OutputAzureDataExplorerPqCompressCompression.NONE
600
638
  r"""Codec to use to compress the persisted data"""
601
639
 
602
640
  pq_on_backpressure: Annotated[
603
- Optional[OutputAzureDataExplorerQueueFullBehavior],
641
+ Annotated[
642
+ Optional[OutputAzureDataExplorerQueueFullBehavior],
643
+ PlainValidator(validate_open_enum(False)),
644
+ ],
604
645
  pydantic.Field(alias="pqOnBackpressure"),
605
646
  ] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
606
647
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
607
648
 
608
649
  pq_mode: Annotated[
609
- Optional[OutputAzureDataExplorerMode], pydantic.Field(alias="pqMode")
650
+ Annotated[
651
+ Optional[OutputAzureDataExplorerMode],
652
+ PlainValidator(validate_open_enum(False)),
653
+ ],
654
+ pydantic.Field(alias="pqMode"),
610
655
  ] = OutputAzureDataExplorerMode.ERROR
611
656
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
612
657