cribl-control-plane 0.2.0rc1__py3-none-any.whl → 0.3.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (144) hide show
  1. cribl_control_plane/_version.py +3 -5
  2. cribl_control_plane/groups_sdk.py +8 -8
  3. cribl_control_plane/models/__init__.py +65 -27
  4. cribl_control_plane/models/authtoken.py +8 -1
  5. cribl_control_plane/models/configgroup.py +30 -2
  6. cribl_control_plane/models/createversionundoop.py +3 -3
  7. cribl_control_plane/models/distributedsummary.py +6 -0
  8. cribl_control_plane/models/hbcriblinfo.py +14 -3
  9. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  10. cribl_control_plane/models/input.py +65 -63
  11. cribl_control_plane/models/inputappscope.py +4 -0
  12. cribl_control_plane/models/inputazureblob.py +4 -0
  13. cribl_control_plane/models/inputcollection.py +4 -0
  14. cribl_control_plane/models/inputconfluentcloud.py +8 -18
  15. cribl_control_plane/models/inputcribl.py +4 -0
  16. cribl_control_plane/models/inputcriblhttp.py +4 -0
  17. cribl_control_plane/models/inputcribllakehttp.py +4 -0
  18. cribl_control_plane/models/inputcriblmetrics.py +4 -0
  19. cribl_control_plane/models/inputcribltcp.py +4 -0
  20. cribl_control_plane/models/inputcrowdstrike.py +7 -0
  21. cribl_control_plane/models/inputdatadogagent.py +4 -0
  22. cribl_control_plane/models/inputdatagen.py +4 -0
  23. cribl_control_plane/models/inputedgeprometheus.py +12 -0
  24. cribl_control_plane/models/inputelastic.py +11 -0
  25. cribl_control_plane/models/inputeventhub.py +6 -0
  26. cribl_control_plane/models/inputexec.py +4 -0
  27. cribl_control_plane/models/inputfile.py +6 -0
  28. cribl_control_plane/models/inputfirehose.py +4 -0
  29. cribl_control_plane/models/inputgooglepubsub.py +7 -0
  30. cribl_control_plane/models/inputgrafana.py +8 -0
  31. cribl_control_plane/models/inputhttp.py +4 -0
  32. cribl_control_plane/models/inputhttpraw.py +4 -0
  33. cribl_control_plane/models/inputjournalfiles.py +4 -0
  34. cribl_control_plane/models/inputkafka.py +8 -17
  35. cribl_control_plane/models/inputkinesis.py +15 -0
  36. cribl_control_plane/models/inputkubeevents.py +4 -0
  37. cribl_control_plane/models/inputkubelogs.py +4 -0
  38. cribl_control_plane/models/inputkubemetrics.py +4 -0
  39. cribl_control_plane/models/inputloki.py +4 -0
  40. cribl_control_plane/models/inputmetrics.py +4 -0
  41. cribl_control_plane/models/inputmodeldriventelemetry.py +4 -0
  42. cribl_control_plane/models/inputmsk.py +7 -17
  43. cribl_control_plane/models/inputnetflow.py +4 -0
  44. cribl_control_plane/models/inputoffice365mgmt.py +11 -0
  45. cribl_control_plane/models/inputoffice365msgtrace.py +11 -0
  46. cribl_control_plane/models/inputoffice365service.py +11 -0
  47. cribl_control_plane/models/inputopentelemetry.py +8 -0
  48. cribl_control_plane/models/inputprometheus.py +10 -0
  49. cribl_control_plane/models/inputprometheusrw.py +4 -0
  50. cribl_control_plane/models/inputrawudp.py +4 -0
  51. cribl_control_plane/models/inputs3.py +7 -0
  52. cribl_control_plane/models/inputs3inventory.py +7 -0
  53. cribl_control_plane/models/inputsecuritylake.py +7 -0
  54. cribl_control_plane/models/inputsnmp.py +11 -0
  55. cribl_control_plane/models/inputsplunk.py +9 -0
  56. cribl_control_plane/models/inputsplunkhec.py +4 -0
  57. cribl_control_plane/models/inputsplunksearch.py +7 -0
  58. cribl_control_plane/models/inputsqs.py +9 -0
  59. cribl_control_plane/models/inputsyslog.py +8 -0
  60. cribl_control_plane/models/inputsystemmetrics.py +32 -0
  61. cribl_control_plane/models/inputsystemstate.py +4 -0
  62. cribl_control_plane/models/inputtcp.py +4 -0
  63. cribl_control_plane/models/inputtcpjson.py +4 -0
  64. cribl_control_plane/models/inputwef.py +6 -0
  65. cribl_control_plane/models/inputwindowsmetrics.py +28 -0
  66. cribl_control_plane/models/inputwineventlogs.py +8 -0
  67. cribl_control_plane/models/inputwiz.py +7 -0
  68. cribl_control_plane/models/inputwizwebhook.py +4 -0
  69. cribl_control_plane/models/inputzscalerhec.py +4 -0
  70. cribl_control_plane/models/jobinfo.py +4 -1
  71. cribl_control_plane/models/nodeprovidedinfo.py +11 -1
  72. cribl_control_plane/models/outpostnodeinfo.py +16 -0
  73. cribl_control_plane/models/output.py +77 -72
  74. cribl_control_plane/models/outputazureblob.py +20 -0
  75. cribl_control_plane/models/outputazuredataexplorer.py +28 -0
  76. cribl_control_plane/models/outputazureeventhub.py +17 -0
  77. cribl_control_plane/models/outputazurelogs.py +13 -0
  78. cribl_control_plane/models/outputchronicle.py +13 -0
  79. cribl_control_plane/models/outputclickhouse.py +17 -0
  80. cribl_control_plane/models/outputcloudwatch.py +13 -0
  81. cribl_control_plane/models/outputconfluentcloud.py +24 -18
  82. cribl_control_plane/models/outputcriblhttp.py +15 -0
  83. cribl_control_plane/models/outputcribllake.py +21 -0
  84. cribl_control_plane/models/outputcribltcp.py +12 -0
  85. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +15 -0
  86. cribl_control_plane/models/outputdatabricks.py +411 -0
  87. cribl_control_plane/models/outputdatadog.py +30 -0
  88. cribl_control_plane/models/outputdataset.py +23 -0
  89. cribl_control_plane/models/outputdls3.py +35 -0
  90. cribl_control_plane/models/outputdynatracehttp.py +22 -0
  91. cribl_control_plane/models/outputdynatraceotlp.py +22 -0
  92. cribl_control_plane/models/outputelastic.py +18 -0
  93. cribl_control_plane/models/outputelasticcloud.py +13 -0
  94. cribl_control_plane/models/outputexabeam.py +14 -0
  95. cribl_control_plane/models/outputfilesystem.py +15 -0
  96. cribl_control_plane/models/outputgooglechronicle.py +21 -0
  97. cribl_control_plane/models/outputgooglecloudlogging.py +19 -0
  98. cribl_control_plane/models/outputgooglecloudstorage.py +28 -0
  99. cribl_control_plane/models/outputgooglepubsub.py +13 -0
  100. cribl_control_plane/models/outputgrafanacloud.py +50 -0
  101. cribl_control_plane/models/outputgraphite.py +12 -0
  102. cribl_control_plane/models/outputhoneycomb.py +13 -0
  103. cribl_control_plane/models/outputhumiohec.py +15 -0
  104. cribl_control_plane/models/outputinfluxdb.py +19 -0
  105. cribl_control_plane/models/outputkafka.py +24 -17
  106. cribl_control_plane/models/outputkinesis.py +15 -0
  107. cribl_control_plane/models/outputloki.py +20 -0
  108. cribl_control_plane/models/outputminio.py +28 -0
  109. cribl_control_plane/models/outputmsk.py +23 -17
  110. cribl_control_plane/models/outputnewrelic.py +16 -0
  111. cribl_control_plane/models/outputnewrelicevents.py +16 -0
  112. cribl_control_plane/models/outputopentelemetry.py +22 -0
  113. cribl_control_plane/models/outputprometheus.py +13 -0
  114. cribl_control_plane/models/outputring.py +2 -0
  115. cribl_control_plane/models/outputs3.py +35 -0
  116. cribl_control_plane/models/outputsecuritylake.py +29 -0
  117. cribl_control_plane/models/outputsentinel.py +15 -0
  118. cribl_control_plane/models/outputsentineloneaisiem.py +13 -0
  119. cribl_control_plane/models/outputservicenow.py +21 -0
  120. cribl_control_plane/models/outputsignalfx.py +13 -0
  121. cribl_control_plane/models/outputsns.py +13 -0
  122. cribl_control_plane/models/outputsplunk.py +15 -0
  123. cribl_control_plane/models/outputsplunkhec.py +13 -0
  124. cribl_control_plane/models/outputsplunklb.py +15 -0
  125. cribl_control_plane/models/outputsqs.py +15 -0
  126. cribl_control_plane/models/outputstatsd.py +12 -0
  127. cribl_control_plane/models/outputstatsdext.py +12 -0
  128. cribl_control_plane/models/outputsumologic.py +15 -0
  129. cribl_control_plane/models/outputsyslog.py +24 -0
  130. cribl_control_plane/models/outputtcpjson.py +12 -0
  131. cribl_control_plane/models/outputwavefront.py +13 -0
  132. cribl_control_plane/models/outputwebhook.py +23 -0
  133. cribl_control_plane/models/outputxsiam.py +13 -0
  134. cribl_control_plane/models/packinfo.py +8 -5
  135. cribl_control_plane/models/packinstallinfo.py +8 -5
  136. cribl_control_plane/models/routeconf.py +3 -4
  137. cribl_control_plane/models/runnablejobcollection.py +4 -0
  138. cribl_control_plane/models/updatepacksop.py +25 -0
  139. cribl_control_plane/models/{routecloneconf.py → uploadpackresponse.py} +4 -4
  140. cribl_control_plane/packs.py +202 -7
  141. {cribl_control_plane-0.2.0rc1.dist-info → cribl_control_plane-0.3.0b1.dist-info}/METADATA +39 -16
  142. {cribl_control_plane-0.2.0rc1.dist-info → cribl_control_plane-0.3.0b1.dist-info}/RECORD +143 -141
  143. cribl_control_plane/models/appmode.py +0 -14
  144. {cribl_control_plane-0.2.0rc1.dist-info → cribl_control_plane-0.3.0b1.dist-info}/WHEEL +0 -0
@@ -29,14 +29,18 @@ class InputWizConnection(BaseModel):
29
29
  class InputWizMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
30
  r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
31
31
 
32
+ # Smart
32
33
  SMART = "smart"
34
+ # Always On
33
35
  ALWAYS = "always"
34
36
 
35
37
 
36
38
  class InputWizCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
39
  r"""Codec to use to compress the persisted data"""
38
40
 
41
+ # None
39
42
  NONE = "none"
43
+ # Gzip
40
44
  GZIP = "gzip"
41
45
 
42
46
 
@@ -137,8 +141,11 @@ class InputWizMetadatum(BaseModel):
137
141
  class InputWizRetryType(str, Enum, metaclass=utils.OpenEnumMeta):
138
142
  r"""The algorithm to use when performing HTTP retries"""
139
143
 
144
+ # Disabled
140
145
  NONE = "none"
146
+ # Backoff
141
147
  BACKOFF = "backoff"
148
+ # Static
142
149
  STATIC = "static"
143
150
 
144
151
 
@@ -29,14 +29,18 @@ class InputWizWebhookConnection(BaseModel):
29
29
  class InputWizWebhookMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
30
  r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
31
31
 
32
+ # Smart
32
33
  SMART = "smart"
34
+ # Always On
33
35
  ALWAYS = "always"
34
36
 
35
37
 
36
38
  class InputWizWebhookCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
39
  r"""Codec to use to compress the persisted data"""
38
40
 
41
+ # None
39
42
  NONE = "none"
43
+ # Gzip
40
44
  GZIP = "gzip"
41
45
 
42
46
 
@@ -29,14 +29,18 @@ class InputZscalerHecConnection(BaseModel):
29
29
  class InputZscalerHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
30
  r"""With Smart mode, PQ will write events to the filesystem only when it detects backpressure from the processing engine. With Always On mode, PQ will always write events directly to the queue before forwarding them to the processing engine."""
31
31
 
32
+ # Smart
32
33
  SMART = "smart"
34
+ # Always On
33
35
  ALWAYS = "always"
34
36
 
35
37
 
36
38
  class InputZscalerHecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
39
  r"""Codec to use to compress the persisted data"""
38
40
 
41
+ # None
39
42
  NONE = "none"
43
+ # Gzip
40
44
  GZIP = "gzip"
41
45
 
42
46
 
@@ -4,13 +4,14 @@ from __future__ import annotations
4
4
  from .jobstatus import JobStatus, JobStatusTypedDict
5
5
  from .runnablejob import RunnableJob, RunnableJobTypedDict
6
6
  from cribl_control_plane.types import BaseModel
7
- from typing import Optional
7
+ from typing import Dict, Optional
8
8
  from typing_extensions import NotRequired, TypedDict
9
9
 
10
10
 
11
11
  class JobInfoTypedDict(TypedDict):
12
12
  args: RunnableJobTypedDict
13
13
  id: str
14
+ stats: Dict[str, float]
14
15
  status: JobStatusTypedDict
15
16
  keep: NotRequired[bool]
16
17
 
@@ -20,6 +21,8 @@ class JobInfo(BaseModel):
20
21
 
21
22
  id: str
22
23
 
24
+ stats: Dict[str, float]
25
+
23
26
  status: JobStatus
24
27
 
25
28
  keep: Optional[bool] = None
@@ -3,9 +3,10 @@
3
3
  from __future__ import annotations
4
4
  from .hbcriblinfo import HBCriblInfo, HBCriblInfoTypedDict
5
5
  from .heartbeatmetadata import HeartbeatMetadata, HeartbeatMetadataTypedDict
6
+ from .outpostnodeinfo import OutpostNodeInfo, OutpostNodeInfoTypedDict
6
7
  from cribl_control_plane.types import BaseModel
7
8
  import pydantic
8
- from typing import List, Optional, Union
9
+ from typing import Dict, List, Optional, Union
9
10
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
10
11
 
11
12
 
@@ -19,6 +20,7 @@ class NodeProvidedInfoTags(BaseModel):
19
20
 
20
21
  class NodeProvidedInfoAwsTypedDict(TypedDict):
21
22
  enabled: bool
23
+ instance_id: str
22
24
  region: str
23
25
  type: str
24
26
  zone: str
@@ -28,6 +30,8 @@ class NodeProvidedInfoAwsTypedDict(TypedDict):
28
30
  class NodeProvidedInfoAws(BaseModel):
29
31
  enabled: bool
30
32
 
33
+ instance_id: Annotated[str, pydantic.Field(alias="instanceId")]
34
+
31
35
  region: str
32
36
 
33
37
  type: str
@@ -125,6 +129,7 @@ class NodeProvidedInfoTypedDict(TypedDict):
125
129
  architecture: str
126
130
  cpus: float
127
131
  cribl: HBCriblInfoTypedDict
132
+ env: Dict[str, str]
128
133
  free_disk_space: float
129
134
  hostname: str
130
135
  node: str
@@ -140,6 +145,7 @@ class NodeProvidedInfoTypedDict(TypedDict):
140
145
  local_time: NotRequired[float]
141
146
  metadata: NotRequired[HeartbeatMetadataTypedDict]
142
147
  os: NotRequired[OsTypedDict]
148
+ outpost: NotRequired[OutpostNodeInfoTypedDict]
143
149
 
144
150
 
145
151
  class NodeProvidedInfo(BaseModel):
@@ -149,6 +155,8 @@ class NodeProvidedInfo(BaseModel):
149
155
 
150
156
  cribl: HBCriblInfo
151
157
 
158
+ env: Dict[str, str]
159
+
152
160
  free_disk_space: Annotated[float, pydantic.Field(alias="freeDiskSpace")]
153
161
 
154
162
  hostname: str
@@ -182,3 +190,5 @@ class NodeProvidedInfo(BaseModel):
182
190
  metadata: Optional[HeartbeatMetadata] = None
183
191
 
184
192
  os: Optional[Os] = None
193
+
194
+ outpost: Optional[OutpostNodeInfo] = None
@@ -0,0 +1,16 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane.types import BaseModel
5
+ from typing_extensions import TypedDict
6
+
7
+
8
+ class OutpostNodeInfoTypedDict(TypedDict):
9
+ guid: str
10
+ host: str
11
+
12
+
13
+ class OutpostNodeInfo(BaseModel):
14
+ guid: str
15
+
16
+ host: str
@@ -19,6 +19,7 @@ from .outputcrowdstrikenextgensiem import (
19
19
  OutputCrowdstrikeNextGenSiem,
20
20
  OutputCrowdstrikeNextGenSiemTypedDict,
21
21
  )
22
+ from .outputdatabricks import OutputDatabricks, OutputDatabricksTypedDict
22
23
  from .outputdatadog import OutputDatadog, OutputDatadogTypedDict
23
24
  from .outputdataset import OutputDataset, OutputDatasetTypedDict
24
25
  from .outputdefault import OutputDefault, OutputDefaultTypedDict
@@ -81,8 +82,10 @@ from .outputtcpjson import OutputTcpjson, OutputTcpjsonTypedDict
81
82
  from .outputwavefront import OutputWavefront, OutputWavefrontTypedDict
82
83
  from .outputwebhook import OutputWebhook, OutputWebhookTypedDict
83
84
  from .outputxsiam import OutputXsiam, OutputXsiamTypedDict
85
+ from cribl_control_plane.utils import get_discriminator
86
+ from pydantic import Discriminator, Tag
84
87
  from typing import Union
85
- from typing_extensions import TypeAliasType
88
+ from typing_extensions import Annotated, TypeAliasType
86
89
 
87
90
 
88
91
  OutputTypedDict = TypeAliasType(
@@ -132,10 +135,11 @@ OutputTypedDict = TypeAliasType(
132
135
  OutputServiceNowTypedDict,
133
136
  OutputChronicleTypedDict,
134
137
  OutputDynatraceOtlpTypedDict,
135
- OutputElasticTypedDict,
136
138
  OutputGoogleChronicleTypedDict,
137
- OutputCriblLakeTypedDict,
139
+ OutputElasticTypedDict,
138
140
  OutputDatadogTypedDict,
141
+ OutputCriblLakeTypedDict,
142
+ OutputDatabricksTypedDict,
139
143
  OutputPrometheusTypedDict,
140
144
  OutputMskTypedDict,
141
145
  OutputSentinelOneAiSiemTypedDict,
@@ -147,8 +151,8 @@ OutputTypedDict = TypeAliasType(
147
151
  OutputMinioTypedDict,
148
152
  OutputClickHouseTypedDict,
149
153
  OutputSecurityLakeTypedDict,
150
- OutputDlS3TypedDict,
151
154
  OutputS3TypedDict,
155
+ OutputDlS3TypedDict,
152
156
  OutputWebhookTypedDict,
153
157
  OutputAzureDataExplorerTypedDict,
154
158
  OutputGoogleCloudLoggingTypedDict,
@@ -157,73 +161,74 @@ OutputTypedDict = TypeAliasType(
157
161
  )
158
162
 
159
163
 
160
- Output = TypeAliasType(
161
- "Output",
164
+ Output = Annotated[
162
165
  Union[
163
- OutputDevnull,
164
- OutputDefault,
165
- OutputRouter,
166
- OutputSnmp,
167
- OutputNetflow,
168
- OutputDiskSpool,
169
- OutputRing,
170
- OutputStatsdExt,
171
- OutputGraphite,
172
- OutputStatsd,
173
- OutputGooglePubsub,
174
- OutputCriblTCP,
175
- OutputSplunk,
176
- OutputSns,
177
- OutputCloudwatch,
178
- OutputAzureEventhub,
179
- OutputWavefront,
180
- OutputSignalfx,
181
- OutputHoneycomb,
182
- OutputSumoLogic,
183
- OutputCrowdstrikeNextGenSiem,
184
- OutputHumioHec,
185
- OutputTcpjson,
186
- OutputElasticCloud,
187
- OutputKinesis,
188
- OutputConfluentCloud,
189
- OutputKafka,
190
- OutputExabeam,
191
- OutputNewrelicEvents,
192
- OutputAzureLogs,
193
- OutputSplunkLb,
194
- OutputSyslog,
195
- OutputSqs,
196
- OutputNewrelic,
197
- OutputCriblHTTP,
198
- OutputXsiam,
199
- OutputFilesystem,
200
- OutputDataset,
201
- OutputLoki,
202
- OutputSplunkHec,
203
- OutputDynatraceHTTP,
204
- OutputServiceNow,
205
- OutputChronicle,
206
- OutputDynatraceOtlp,
207
- OutputElastic,
208
- OutputGoogleChronicle,
209
- OutputCriblLake,
210
- OutputDatadog,
211
- OutputPrometheus,
212
- OutputMsk,
213
- OutputSentinelOneAiSiem,
214
- OutputSentinel,
215
- OutputInfluxdb,
216
- OutputGoogleCloudStorage,
217
- OutputAzureBlob,
218
- OutputOpenTelemetry,
219
- OutputMinio,
220
- OutputClickHouse,
221
- OutputSecurityLake,
222
- OutputDlS3,
223
- OutputS3,
224
- OutputWebhook,
225
- OutputAzureDataExplorer,
226
- OutputGoogleCloudLogging,
227
- OutputGrafanaCloud,
166
+ Annotated[OutputDefault, Tag("default")],
167
+ Annotated[OutputWebhook, Tag("webhook")],
168
+ Annotated[OutputSentinel, Tag("sentinel")],
169
+ Annotated[OutputDevnull, Tag("devnull")],
170
+ Annotated[OutputSyslog, Tag("syslog")],
171
+ Annotated[OutputSplunk, Tag("splunk")],
172
+ Annotated[OutputSplunkLb, Tag("splunk_lb")],
173
+ Annotated[OutputSplunkHec, Tag("splunk_hec")],
174
+ Annotated[OutputTcpjson, Tag("tcpjson")],
175
+ Annotated[OutputWavefront, Tag("wavefront")],
176
+ Annotated[OutputSignalfx, Tag("signalfx")],
177
+ Annotated[OutputFilesystem, Tag("filesystem")],
178
+ Annotated[OutputS3, Tag("s3")],
179
+ Annotated[OutputAzureBlob, Tag("azure_blob")],
180
+ Annotated[OutputAzureDataExplorer, Tag("azure_data_explorer")],
181
+ Annotated[OutputAzureLogs, Tag("azure_logs")],
182
+ Annotated[OutputKinesis, Tag("kinesis")],
183
+ Annotated[OutputHoneycomb, Tag("honeycomb")],
184
+ Annotated[OutputAzureEventhub, Tag("azure_eventhub")],
185
+ Annotated[OutputGoogleChronicle, Tag("google_chronicle")],
186
+ Annotated[OutputGoogleCloudStorage, Tag("google_cloud_storage")],
187
+ Annotated[OutputGoogleCloudLogging, Tag("google_cloud_logging")],
188
+ Annotated[OutputGooglePubsub, Tag("google_pubsub")],
189
+ Annotated[OutputExabeam, Tag("exabeam")],
190
+ Annotated[OutputKafka, Tag("kafka")],
191
+ Annotated[OutputConfluentCloud, Tag("confluent_cloud")],
192
+ Annotated[OutputMsk, Tag("msk")],
193
+ Annotated[OutputElastic, Tag("elastic")],
194
+ Annotated[OutputElasticCloud, Tag("elastic_cloud")],
195
+ Annotated[OutputNewrelic, Tag("newrelic")],
196
+ Annotated[OutputNewrelicEvents, Tag("newrelic_events")],
197
+ Annotated[OutputInfluxdb, Tag("influxdb")],
198
+ Annotated[OutputCloudwatch, Tag("cloudwatch")],
199
+ Annotated[OutputMinio, Tag("minio")],
200
+ Annotated[OutputStatsd, Tag("statsd")],
201
+ Annotated[OutputStatsdExt, Tag("statsd_ext")],
202
+ Annotated[OutputGraphite, Tag("graphite")],
203
+ Annotated[OutputRouter, Tag("router")],
204
+ Annotated[OutputSns, Tag("sns")],
205
+ Annotated[OutputSqs, Tag("sqs")],
206
+ Annotated[OutputSnmp, Tag("snmp")],
207
+ Annotated[OutputSumoLogic, Tag("sumo_logic")],
208
+ Annotated[OutputDatadog, Tag("datadog")],
209
+ Annotated[OutputGrafanaCloud, Tag("grafana_cloud")],
210
+ Annotated[OutputLoki, Tag("loki")],
211
+ Annotated[OutputPrometheus, Tag("prometheus")],
212
+ Annotated[OutputRing, Tag("ring")],
213
+ Annotated[OutputOpenTelemetry, Tag("open_telemetry")],
214
+ Annotated[OutputServiceNow, Tag("service_now")],
215
+ Annotated[OutputDataset, Tag("dataset")],
216
+ Annotated[OutputCriblTCP, Tag("cribl_tcp")],
217
+ Annotated[OutputCriblHTTP, Tag("cribl_http")],
218
+ Annotated[OutputHumioHec, Tag("humio_hec")],
219
+ Annotated[OutputCrowdstrikeNextGenSiem, Tag("crowdstrike_next_gen_siem")],
220
+ Annotated[OutputDlS3, Tag("dl_s3")],
221
+ Annotated[OutputSecurityLake, Tag("security_lake")],
222
+ Annotated[OutputCriblLake, Tag("cribl_lake")],
223
+ Annotated[OutputDiskSpool, Tag("disk_spool")],
224
+ Annotated[OutputClickHouse, Tag("click_house")],
225
+ Annotated[OutputXsiam, Tag("xsiam")],
226
+ Annotated[OutputNetflow, Tag("netflow")],
227
+ Annotated[OutputDynatraceHTTP, Tag("dynatrace_http")],
228
+ Annotated[OutputDynatraceOtlp, Tag("dynatrace_otlp")],
229
+ Annotated[OutputSentinelOneAiSiem, Tag("sentinel_one_ai_siem")],
230
+ Annotated[OutputChronicle, Tag("chronicle")],
231
+ Annotated[OutputDatabricks, Tag("databricks")],
228
232
  ],
229
- )
233
+ Discriminator(lambda m: get_discriminator(m, "type", "type")),
234
+ ]
@@ -18,22 +18,29 @@ class OutputAzureBlobType(str, Enum):
18
18
  class OutputAzureBlobDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Format of the output data"""
20
20
 
21
+ # JSON
21
22
  JSON = "json"
23
+ # Raw
22
24
  RAW = "raw"
25
+ # Parquet
23
26
  PARQUET = "parquet"
24
27
 
25
28
 
26
29
  class OutputAzureBlobBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""How to handle events when all receivers are exerting backpressure"""
28
31
 
32
+ # Block
29
33
  BLOCK = "block"
34
+ # Drop
30
35
  DROP = "drop"
31
36
 
32
37
 
33
38
  class OutputAzureBlobDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
34
39
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
35
40
 
41
+ # Block
36
42
  BLOCK = "block"
43
+ # Drop
37
44
  DROP = "drop"
38
45
 
39
46
 
@@ -45,10 +52,15 @@ class OutputAzureBlobAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMet
45
52
 
46
53
 
47
54
  class BlobAccessTier(str, Enum, metaclass=utils.OpenEnumMeta):
55
+ # Default account access tier
48
56
  INFERRED = "Inferred"
57
+ # Hot tier
49
58
  HOT = "Hot"
59
+ # Cool tier
50
60
  COOL = "Cool"
61
+ # Cold tier
51
62
  COLD = "Cold"
63
+ # Archive tier
52
64
  ARCHIVE = "Archive"
53
65
 
54
66
 
@@ -62,23 +74,31 @@ class OutputAzureBlobCompression(str, Enum, metaclass=utils.OpenEnumMeta):
62
74
  class OutputAzureBlobCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
63
75
  r"""Compression level to apply before moving files to final destination"""
64
76
 
77
+ # Best Speed
65
78
  BEST_SPEED = "best_speed"
79
+ # Normal
66
80
  NORMAL = "normal"
81
+ # Best Compression
67
82
  BEST_COMPRESSION = "best_compression"
68
83
 
69
84
 
70
85
  class OutputAzureBlobParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
71
86
  r"""Determines which data types are supported and how they are represented"""
72
87
 
88
+ # 1.0
73
89
  PARQUET_1_0 = "PARQUET_1_0"
90
+ # 2.4
74
91
  PARQUET_2_4 = "PARQUET_2_4"
92
+ # 2.6
75
93
  PARQUET_2_6 = "PARQUET_2_6"
76
94
 
77
95
 
78
96
  class OutputAzureBlobDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
79
97
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
80
98
 
99
+ # V1
81
100
  DATA_PAGE_V1 = "DATA_PAGE_V1"
101
+ # V2
82
102
  DATA_PAGE_V2 = "DATA_PAGE_V2"
83
103
 
84
104
 
@@ -16,7 +16,9 @@ class OutputAzureDataExplorerType(str, Enum):
16
16
 
17
17
 
18
18
  class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ # Batching
19
20
  BATCHING = "batching"
21
+ # Streaming
20
22
  STREAMING = "streaming"
21
23
 
22
24
 
@@ -33,8 +35,11 @@ class OutputAzureDataExplorerAuthenticationMethod(
33
35
  ):
34
36
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
35
37
 
38
+ # Client secret
36
39
  CLIENT_SECRET = "clientSecret"
40
+ # Client secret (text secret)
37
41
  CLIENT_TEXT_SECRET = "clientTextSecret"
42
+ # Certificate
38
43
  CERTIFICATE = "certificate"
39
44
 
40
45
 
@@ -55,16 +60,22 @@ class OutputAzureDataExplorerBackpressureBehavior(
55
60
  ):
56
61
  r"""How to handle events when all receivers are exerting backpressure"""
57
62
 
63
+ # Block
58
64
  BLOCK = "block"
65
+ # Drop
59
66
  DROP = "drop"
67
+ # Persistent Queue
60
68
  QUEUE = "queue"
61
69
 
62
70
 
63
71
  class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
64
72
  r"""Format of the output data"""
65
73
 
74
+ # JSON
66
75
  JSON = "json"
76
+ # Raw
67
77
  RAW = "raw"
78
+ # Parquet
68
79
  PARQUET = "parquet"
69
80
 
70
81
 
@@ -73,12 +84,16 @@ class OutputAzureDataExplorerDiskSpaceProtection(
73
84
  ):
74
85
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
75
86
 
87
+ # Block
76
88
  BLOCK = "block"
89
+ # Drop
77
90
  DROP = "drop"
78
91
 
79
92
 
80
93
  class PrefixOptional(str, Enum, metaclass=utils.OpenEnumMeta):
94
+ # drop-by
81
95
  DROP_BY = "dropBy"
96
+ # ingest-by
82
97
  INGEST_BY = "ingestBy"
83
98
 
84
99
 
@@ -106,16 +121,22 @@ class IngestIfNotExist(BaseModel):
106
121
  class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
107
122
  r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
108
123
 
124
+ # FailuresOnly
109
125
  FAILURES_ONLY = "failuresOnly"
126
+ # DoNotReport
110
127
  DO_NOT_REPORT = "doNotReport"
128
+ # FailuresAndSuccesses
111
129
  FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
112
130
 
113
131
 
114
132
  class ReportMethod(str, Enum, metaclass=utils.OpenEnumMeta):
115
133
  r"""Target of the ingestion status reporting. Defaults to Queue."""
116
134
 
135
+ # Queue
117
136
  QUEUE = "queue"
137
+ # Table
118
138
  TABLE = "table"
139
+ # QueueAndTable
119
140
  QUEUE_AND_TABLE = "queueAndTable"
120
141
 
121
142
 
@@ -198,22 +219,29 @@ class OutputAzureDataExplorerPqCompressCompression(
198
219
  ):
199
220
  r"""Codec to use to compress the persisted data"""
200
221
 
222
+ # None
201
223
  NONE = "none"
224
+ # Gzip
202
225
  GZIP = "gzip"
203
226
 
204
227
 
205
228
  class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
206
229
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
207
230
 
231
+ # Block
208
232
  BLOCK = "block"
233
+ # Drop new data
209
234
  DROP = "drop"
210
235
 
211
236
 
212
237
  class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
213
238
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
214
239
 
240
+ # Error
215
241
  ERROR = "error"
242
+ # Backpressure
216
243
  BACKPRESSURE = "backpressure"
244
+ # Always On
217
245
  ALWAYS = "always"
218
246
 
219
247
 
@@ -18,20 +18,27 @@ class OutputAzureEventhubType(str, Enum):
18
18
  class OutputAzureEventhubAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Control the number of required acknowledgments"""
20
20
 
21
+ # Leader
21
22
  ONE = 1
23
+ # None
22
24
  ZERO = 0
25
+ # All
23
26
  MINUS_1 = -1
24
27
 
25
28
 
26
29
  class OutputAzureEventhubRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
28
31
 
32
+ # JSON
29
33
  JSON = "json"
34
+ # Field _raw
30
35
  RAW = "raw"
31
36
 
32
37
 
33
38
  class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
39
+ # PLAIN
34
40
  PLAIN = "plain"
41
+ # OAUTHBEARER
35
42
  OAUTHBEARER = "oauthbearer"
36
43
 
37
44
 
@@ -71,30 +78,40 @@ class OutputAzureEventhubTLSSettingsClientSide(BaseModel):
71
78
  class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
72
79
  r"""How to handle events when all receivers are exerting backpressure"""
73
80
 
81
+ # Block
74
82
  BLOCK = "block"
83
+ # Drop
75
84
  DROP = "drop"
85
+ # Persistent Queue
76
86
  QUEUE = "queue"
77
87
 
78
88
 
79
89
  class OutputAzureEventhubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
80
90
  r"""Codec to use to compress the persisted data"""
81
91
 
92
+ # None
82
93
  NONE = "none"
94
+ # Gzip
83
95
  GZIP = "gzip"
84
96
 
85
97
 
86
98
  class OutputAzureEventhubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
87
99
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
88
100
 
101
+ # Block
89
102
  BLOCK = "block"
103
+ # Drop new data
90
104
  DROP = "drop"
91
105
 
92
106
 
93
107
  class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
94
108
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
95
109
 
110
+ # Error
96
111
  ERROR = "error"
112
+ # Backpressure
97
113
  BACKPRESSURE = "backpressure"
114
+ # Always On
98
115
  ALWAYS = "always"
99
116
 
100
117
 
@@ -29,8 +29,11 @@ class OutputAzureLogsExtraHTTPHeader(BaseModel):
29
29
  class OutputAzureLogsFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
30
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
31
31
 
32
+ # Payload
32
33
  PAYLOAD = "payload"
34
+ # Payload + Headers
33
35
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
36
+ # None
34
37
  NONE = "none"
35
38
 
36
39
 
@@ -91,8 +94,11 @@ class OutputAzureLogsTimeoutRetrySettings(BaseModel):
91
94
  class OutputAzureLogsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
92
95
  r"""How to handle events when all receivers are exerting backpressure"""
93
96
 
97
+ # Block
94
98
  BLOCK = "block"
99
+ # Drop
95
100
  DROP = "drop"
101
+ # Persistent Queue
96
102
  QUEUE = "queue"
97
103
 
98
104
 
@@ -106,22 +112,29 @@ class OutputAzureLogsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMet
106
112
  class OutputAzureLogsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
107
113
  r"""Codec to use to compress the persisted data"""
108
114
 
115
+ # None
109
116
  NONE = "none"
117
+ # Gzip
110
118
  GZIP = "gzip"
111
119
 
112
120
 
113
121
  class OutputAzureLogsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
114
122
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
115
123
 
124
+ # Block
116
125
  BLOCK = "block"
126
+ # Drop new data
117
127
  DROP = "drop"
118
128
 
119
129
 
120
130
  class OutputAzureLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
121
131
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
122
132
 
133
+ # Error
123
134
  ERROR = "error"
135
+ # Backpressure
124
136
  BACKPRESSURE = "backpressure"
137
+ # Always On
125
138
  ALWAYS = "always"
126
139
 
127
140