cribl-control-plane 0.1.0b2__py3-none-any.whl → 0.2.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (142) hide show
  1. cribl_control_plane/_hooks/clientcredentials.py +91 -41
  2. cribl_control_plane/_version.py +4 -4
  3. cribl_control_plane/errors/apierror.py +1 -1
  4. cribl_control_plane/errors/criblcontrolplaneerror.py +1 -1
  5. cribl_control_plane/errors/error.py +1 -1
  6. cribl_control_plane/errors/healthstatus_error.py +1 -1
  7. cribl_control_plane/errors/no_response_error.py +1 -1
  8. cribl_control_plane/errors/responsevalidationerror.py +1 -1
  9. cribl_control_plane/httpclient.py +0 -1
  10. cribl_control_plane/models/__init__.py +68 -7
  11. cribl_control_plane/models/createversionundoop.py +3 -3
  12. cribl_control_plane/models/distributedsummary.py +6 -0
  13. cribl_control_plane/models/gitinfo.py +14 -3
  14. cribl_control_plane/models/input.py +65 -63
  15. cribl_control_plane/models/inputappscope.py +4 -0
  16. cribl_control_plane/models/inputazureblob.py +4 -0
  17. cribl_control_plane/models/inputcollection.py +4 -0
  18. cribl_control_plane/models/inputconfluentcloud.py +8 -0
  19. cribl_control_plane/models/inputcribl.py +4 -0
  20. cribl_control_plane/models/inputcriblhttp.py +4 -0
  21. cribl_control_plane/models/inputcribllakehttp.py +4 -0
  22. cribl_control_plane/models/inputcriblmetrics.py +4 -0
  23. cribl_control_plane/models/inputcribltcp.py +4 -0
  24. cribl_control_plane/models/inputcrowdstrike.py +7 -0
  25. cribl_control_plane/models/inputdatadogagent.py +4 -0
  26. cribl_control_plane/models/inputdatagen.py +4 -0
  27. cribl_control_plane/models/inputedgeprometheus.py +12 -0
  28. cribl_control_plane/models/inputelastic.py +11 -0
  29. cribl_control_plane/models/inputeventhub.py +6 -0
  30. cribl_control_plane/models/inputexec.py +4 -0
  31. cribl_control_plane/models/inputfile.py +6 -0
  32. cribl_control_plane/models/inputfirehose.py +4 -0
  33. cribl_control_plane/models/inputgooglepubsub.py +7 -0
  34. cribl_control_plane/models/inputgrafana.py +8 -0
  35. cribl_control_plane/models/inputhttp.py +4 -0
  36. cribl_control_plane/models/inputhttpraw.py +4 -0
  37. cribl_control_plane/models/inputjournalfiles.py +4 -0
  38. cribl_control_plane/models/inputkafka.py +8 -0
  39. cribl_control_plane/models/inputkinesis.py +15 -0
  40. cribl_control_plane/models/inputkubeevents.py +4 -0
  41. cribl_control_plane/models/inputkubelogs.py +4 -0
  42. cribl_control_plane/models/inputkubemetrics.py +4 -0
  43. cribl_control_plane/models/inputloki.py +4 -0
  44. cribl_control_plane/models/inputmetrics.py +4 -0
  45. cribl_control_plane/models/inputmodeldriventelemetry.py +4 -0
  46. cribl_control_plane/models/inputmsk.py +7 -0
  47. cribl_control_plane/models/inputnetflow.py +4 -0
  48. cribl_control_plane/models/inputoffice365mgmt.py +11 -0
  49. cribl_control_plane/models/inputoffice365msgtrace.py +11 -0
  50. cribl_control_plane/models/inputoffice365service.py +11 -0
  51. cribl_control_plane/models/inputopentelemetry.py +8 -0
  52. cribl_control_plane/models/inputprometheus.py +10 -0
  53. cribl_control_plane/models/inputprometheusrw.py +4 -0
  54. cribl_control_plane/models/inputrawudp.py +4 -0
  55. cribl_control_plane/models/inputs3.py +7 -0
  56. cribl_control_plane/models/inputs3inventory.py +7 -0
  57. cribl_control_plane/models/inputsecuritylake.py +7 -0
  58. cribl_control_plane/models/inputsnmp.py +11 -0
  59. cribl_control_plane/models/inputsplunk.py +9 -0
  60. cribl_control_plane/models/inputsplunkhec.py +4 -0
  61. cribl_control_plane/models/inputsplunksearch.py +7 -0
  62. cribl_control_plane/models/inputsqs.py +17 -10
  63. cribl_control_plane/models/inputsyslog.py +8 -0
  64. cribl_control_plane/models/inputsystemmetrics.py +32 -0
  65. cribl_control_plane/models/inputsystemstate.py +4 -0
  66. cribl_control_plane/models/inputtcp.py +4 -0
  67. cribl_control_plane/models/inputtcpjson.py +4 -0
  68. cribl_control_plane/models/inputwef.py +6 -0
  69. cribl_control_plane/models/inputwindowsmetrics.py +28 -0
  70. cribl_control_plane/models/inputwineventlogs.py +8 -0
  71. cribl_control_plane/models/inputwiz.py +7 -0
  72. cribl_control_plane/models/inputwizwebhook.py +4 -0
  73. cribl_control_plane/models/inputzscalerhec.py +4 -0
  74. cribl_control_plane/models/jobinfo.py +4 -1
  75. cribl_control_plane/models/nodeprovidedinfo.py +4 -1
  76. cribl_control_plane/models/output.py +74 -69
  77. cribl_control_plane/models/outputazureblob.py +20 -0
  78. cribl_control_plane/models/outputazuredataexplorer.py +28 -0
  79. cribl_control_plane/models/outputazureeventhub.py +17 -0
  80. cribl_control_plane/models/outputazurelogs.py +13 -0
  81. cribl_control_plane/models/outputchronicle.py +444 -0
  82. cribl_control_plane/models/outputclickhouse.py +17 -0
  83. cribl_control_plane/models/outputcloudwatch.py +13 -0
  84. cribl_control_plane/models/outputconfluentcloud.py +24 -0
  85. cribl_control_plane/models/outputcriblhttp.py +15 -0
  86. cribl_control_plane/models/outputcribllake.py +21 -0
  87. cribl_control_plane/models/outputcribltcp.py +12 -0
  88. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +15 -0
  89. cribl_control_plane/models/outputdatabricks.py +9 -0
  90. cribl_control_plane/models/outputdatadog.py +30 -0
  91. cribl_control_plane/models/outputdataset.py +23 -0
  92. cribl_control_plane/models/outputdls3.py +35 -0
  93. cribl_control_plane/models/outputdynatracehttp.py +22 -0
  94. cribl_control_plane/models/outputdynatraceotlp.py +22 -0
  95. cribl_control_plane/models/outputelastic.py +18 -0
  96. cribl_control_plane/models/outputelasticcloud.py +13 -0
  97. cribl_control_plane/models/outputexabeam.py +14 -0
  98. cribl_control_plane/models/outputfilesystem.py +15 -0
  99. cribl_control_plane/models/outputgooglechronicle.py +26 -4
  100. cribl_control_plane/models/outputgooglecloudlogging.py +28 -4
  101. cribl_control_plane/models/outputgooglecloudstorage.py +28 -0
  102. cribl_control_plane/models/outputgooglepubsub.py +13 -0
  103. cribl_control_plane/models/outputgrafanacloud.py +50 -0
  104. cribl_control_plane/models/outputgraphite.py +12 -0
  105. cribl_control_plane/models/outputhoneycomb.py +13 -0
  106. cribl_control_plane/models/outputhumiohec.py +15 -0
  107. cribl_control_plane/models/outputinfluxdb.py +19 -0
  108. cribl_control_plane/models/outputkafka.py +24 -0
  109. cribl_control_plane/models/outputkinesis.py +15 -0
  110. cribl_control_plane/models/outputloki.py +20 -0
  111. cribl_control_plane/models/outputminio.py +28 -0
  112. cribl_control_plane/models/outputmsk.py +23 -0
  113. cribl_control_plane/models/outputnewrelic.py +16 -0
  114. cribl_control_plane/models/outputnewrelicevents.py +16 -0
  115. cribl_control_plane/models/outputopentelemetry.py +22 -0
  116. cribl_control_plane/models/outputprometheus.py +13 -0
  117. cribl_control_plane/models/outputring.py +2 -0
  118. cribl_control_plane/models/outputs3.py +35 -0
  119. cribl_control_plane/models/outputsecuritylake.py +29 -0
  120. cribl_control_plane/models/outputsentinel.py +15 -0
  121. cribl_control_plane/models/outputsentineloneaisiem.py +13 -0
  122. cribl_control_plane/models/outputservicenow.py +21 -0
  123. cribl_control_plane/models/outputsignalfx.py +13 -0
  124. cribl_control_plane/models/outputsns.py +13 -0
  125. cribl_control_plane/models/outputsplunk.py +15 -0
  126. cribl_control_plane/models/outputsplunkhec.py +13 -0
  127. cribl_control_plane/models/outputsplunklb.py +15 -0
  128. cribl_control_plane/models/outputsqs.py +23 -10
  129. cribl_control_plane/models/outputstatsd.py +12 -0
  130. cribl_control_plane/models/outputstatsdext.py +12 -0
  131. cribl_control_plane/models/outputsumologic.py +15 -0
  132. cribl_control_plane/models/outputsyslog.py +24 -0
  133. cribl_control_plane/models/outputtcpjson.py +12 -0
  134. cribl_control_plane/models/outputwavefront.py +13 -0
  135. cribl_control_plane/models/outputwebhook.py +23 -0
  136. cribl_control_plane/models/outputxsiam.py +13 -0
  137. cribl_control_plane/models/packinfo.py +3 -0
  138. cribl_control_plane/models/packinstallinfo.py +3 -0
  139. cribl_control_plane/models/runnablejobcollection.py +4 -0
  140. {cribl_control_plane-0.1.0b2.dist-info → cribl_control_plane-0.2.0b1.dist-info}/METADATA +1 -8
  141. {cribl_control_plane-0.1.0b2.dist-info → cribl_control_plane-0.2.0b1.dist-info}/RECORD +142 -141
  142. {cribl_control_plane-0.1.0b2.dist-info → cribl_control_plane-0.2.0b1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,444 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputChronicleType(str, Enum):
15
+ CHRONICLE = "chronicle"
16
+
17
+
18
+ class OutputChronicleAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ SERVICE_ACCOUNT = "serviceAccount"
20
+ SERVICE_ACCOUNT_SECRET = "serviceAccountSecret"
21
+
22
+
23
+ class OutputChronicleResponseRetrySettingTypedDict(TypedDict):
24
+ http_status: float
25
+ r"""The HTTP response status code that will trigger retries"""
26
+ initial_backoff: NotRequired[float]
27
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
28
+ backoff_rate: NotRequired[float]
29
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
30
+ max_backoff: NotRequired[float]
31
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
32
+
33
+
34
+ class OutputChronicleResponseRetrySetting(BaseModel):
35
+ http_status: Annotated[float, pydantic.Field(alias="httpStatus")]
36
+ r"""The HTTP response status code that will trigger retries"""
37
+
38
+ initial_backoff: Annotated[
39
+ Optional[float], pydantic.Field(alias="initialBackoff")
40
+ ] = 1000
41
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
42
+
43
+ backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
44
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
45
+
46
+ max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
47
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
48
+
49
+
50
+ class OutputChronicleTimeoutRetrySettingsTypedDict(TypedDict):
51
+ timeout_retry: NotRequired[bool]
52
+ initial_backoff: NotRequired[float]
53
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
54
+ backoff_rate: NotRequired[float]
55
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
56
+ max_backoff: NotRequired[float]
57
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
58
+
59
+
60
+ class OutputChronicleTimeoutRetrySettings(BaseModel):
61
+ timeout_retry: Annotated[Optional[bool], pydantic.Field(alias="timeoutRetry")] = (
62
+ False
63
+ )
64
+
65
+ initial_backoff: Annotated[
66
+ Optional[float], pydantic.Field(alias="initialBackoff")
67
+ ] = 1000
68
+ r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
69
+
70
+ backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
71
+ r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
72
+
73
+ max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
74
+ r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
75
+
76
+
77
+ class OutputChronicleExtraHTTPHeaderTypedDict(TypedDict):
78
+ value: str
79
+ name: NotRequired[str]
80
+
81
+
82
+ class OutputChronicleExtraHTTPHeader(BaseModel):
83
+ value: str
84
+
85
+ name: Optional[str] = None
86
+
87
+
88
+ class OutputChronicleFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
89
+ r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
90
+
91
+ # Payload
92
+ PAYLOAD = "payload"
93
+ # Payload + Headers
94
+ PAYLOAD_AND_HEADERS = "payloadAndHeaders"
95
+ # None
96
+ NONE = "none"
97
+
98
+
99
+ class OutputChronicleBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
100
+ r"""How to handle events when all receivers are exerting backpressure"""
101
+
102
+ # Block
103
+ BLOCK = "block"
104
+ # Drop
105
+ DROP = "drop"
106
+ # Persistent Queue
107
+ QUEUE = "queue"
108
+
109
+
110
+ class OutputChronicleCustomLabelTypedDict(TypedDict):
111
+ key: str
112
+ value: str
113
+
114
+
115
+ class OutputChronicleCustomLabel(BaseModel):
116
+ key: str
117
+
118
+ value: str
119
+
120
+
121
+ class OutputChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
122
+ r"""Codec to use to compress the persisted data"""
123
+
124
+ # None
125
+ NONE = "none"
126
+ # Gzip
127
+ GZIP = "gzip"
128
+
129
+
130
+ class OutputChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
131
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
132
+
133
+ # Block
134
+ BLOCK = "block"
135
+ # Drop new data
136
+ DROP = "drop"
137
+
138
+
139
+ class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
140
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
141
+
142
+ # Error
143
+ ERROR = "error"
144
+ # Backpressure
145
+ BACKPRESSURE = "backpressure"
146
+ # Always On
147
+ ALWAYS = "always"
148
+
149
+
150
+ class OutputChroniclePqControlsTypedDict(TypedDict):
151
+ pass
152
+
153
+
154
+ class OutputChroniclePqControls(BaseModel):
155
+ pass
156
+
157
+
158
+ class OutputChronicleTypedDict(TypedDict):
159
+ type: OutputChronicleType
160
+ region: str
161
+ r"""Regional endpoint to send events to"""
162
+ log_type: str
163
+ r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
164
+ gcp_project_id: str
165
+ r"""The Google Cloud Platform (GCP) project ID to send events to"""
166
+ gcp_instance: str
167
+ r"""The Google Cloud Platform (GCP) instance to send events to. This is the Chronicle customer uuid."""
168
+ id: NotRequired[str]
169
+ r"""Unique ID for this output"""
170
+ pipeline: NotRequired[str]
171
+ r"""Pipeline to process data before sending out to this output"""
172
+ system_fields: NotRequired[List[str]]
173
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
174
+ environment: NotRequired[str]
175
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
176
+ streamtags: NotRequired[List[str]]
177
+ r"""Tags for filtering and grouping in @{product}"""
178
+ api_version: NotRequired[str]
179
+ authentication_method: NotRequired[OutputChronicleAuthenticationMethod]
180
+ response_retry_settings: NotRequired[
181
+ List[OutputChronicleResponseRetrySettingTypedDict]
182
+ ]
183
+ r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
184
+ timeout_retry_settings: NotRequired[OutputChronicleTimeoutRetrySettingsTypedDict]
185
+ response_honor_retry_after_header: NotRequired[bool]
186
+ r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
187
+ concurrency: NotRequired[float]
188
+ r"""Maximum number of ongoing requests before blocking"""
189
+ max_payload_size_kb: NotRequired[float]
190
+ r"""Maximum size, in KB, of the request body"""
191
+ max_payload_events: NotRequired[float]
192
+ r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
193
+ compress: NotRequired[bool]
194
+ r"""Compress the payload body before sending"""
195
+ reject_unauthorized: NotRequired[bool]
196
+ r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
197
+ Enabled by default. When this setting is also present in TLS Settings (Client Side),
198
+ that value will take precedence.
199
+ """
200
+ timeout_sec: NotRequired[float]
201
+ r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
202
+ flush_period_sec: NotRequired[float]
203
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
204
+ extra_http_headers: NotRequired[List[OutputChronicleExtraHTTPHeaderTypedDict]]
205
+ r"""Headers to add to all events"""
206
+ failed_request_logging_mode: NotRequired[OutputChronicleFailedRequestLoggingMode]
207
+ r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
208
+ safe_headers: NotRequired[List[str]]
209
+ r"""List of headers that are safe to log in plain text"""
210
+ use_round_robin_dns: NotRequired[bool]
211
+ r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
212
+ on_backpressure: NotRequired[OutputChronicleBackpressureBehavior]
213
+ r"""How to handle events when all receivers are exerting backpressure"""
214
+ total_memory_limit_kb: NotRequired[float]
215
+ r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
216
+ ingestion_method: NotRequired[str]
217
+ namespace: NotRequired[str]
218
+ r"""User-configured environment namespace to identify the data domain the logs originated from. This namespace is used as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
219
+ log_text_field: NotRequired[str]
220
+ r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
221
+ custom_labels: NotRequired[List[OutputChronicleCustomLabelTypedDict]]
222
+ r"""Custom labels to be added to every event"""
223
+ description: NotRequired[str]
224
+ service_account_credentials: NotRequired[str]
225
+ r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
226
+ service_account_credentials_secret: NotRequired[str]
227
+ r"""Select or create a stored text secret"""
228
+ pq_max_file_size: NotRequired[str]
229
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
230
+ pq_max_size: NotRequired[str]
231
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
232
+ pq_path: NotRequired[str]
233
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
234
+ pq_compress: NotRequired[OutputChronicleCompression]
235
+ r"""Codec to use to compress the persisted data"""
236
+ pq_on_backpressure: NotRequired[OutputChronicleQueueFullBehavior]
237
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
238
+ pq_mode: NotRequired[OutputChronicleMode]
239
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
240
+ pq_controls: NotRequired[OutputChroniclePqControlsTypedDict]
241
+
242
+
243
+ class OutputChronicle(BaseModel):
244
+ type: OutputChronicleType
245
+
246
+ region: str
247
+ r"""Regional endpoint to send events to"""
248
+
249
+ log_type: Annotated[str, pydantic.Field(alias="logType")]
250
+ r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
251
+
252
+ gcp_project_id: Annotated[str, pydantic.Field(alias="gcpProjectId")]
253
+ r"""The Google Cloud Platform (GCP) project ID to send events to"""
254
+
255
+ gcp_instance: Annotated[str, pydantic.Field(alias="gcpInstance")]
256
+ r"""The Google Cloud Platform (GCP) instance to send events to. This is the Chronicle customer uuid."""
257
+
258
+ id: Optional[str] = None
259
+ r"""Unique ID for this output"""
260
+
261
+ pipeline: Optional[str] = None
262
+ r"""Pipeline to process data before sending out to this output"""
263
+
264
+ system_fields: Annotated[
265
+ Optional[List[str]], pydantic.Field(alias="systemFields")
266
+ ] = None
267
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
268
+
269
+ environment: Optional[str] = None
270
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
271
+
272
+ streamtags: Optional[List[str]] = None
273
+ r"""Tags for filtering and grouping in @{product}"""
274
+
275
+ api_version: Annotated[Optional[str], pydantic.Field(alias="apiVersion")] = (
276
+ "v1alpha"
277
+ )
278
+
279
+ authentication_method: Annotated[
280
+ Annotated[
281
+ Optional[OutputChronicleAuthenticationMethod],
282
+ PlainValidator(validate_open_enum(False)),
283
+ ],
284
+ pydantic.Field(alias="authenticationMethod"),
285
+ ] = OutputChronicleAuthenticationMethod.SERVICE_ACCOUNT
286
+
287
+ response_retry_settings: Annotated[
288
+ Optional[List[OutputChronicleResponseRetrySetting]],
289
+ pydantic.Field(alias="responseRetrySettings"),
290
+ ] = None
291
+ r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
292
+
293
+ timeout_retry_settings: Annotated[
294
+ Optional[OutputChronicleTimeoutRetrySettings],
295
+ pydantic.Field(alias="timeoutRetrySettings"),
296
+ ] = None
297
+
298
+ response_honor_retry_after_header: Annotated[
299
+ Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
300
+ ] = True
301
+ r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
302
+
303
+ concurrency: Optional[float] = 5
304
+ r"""Maximum number of ongoing requests before blocking"""
305
+
306
+ max_payload_size_kb: Annotated[
307
+ Optional[float], pydantic.Field(alias="maxPayloadSizeKB")
308
+ ] = 1024
309
+ r"""Maximum size, in KB, of the request body"""
310
+
311
+ max_payload_events: Annotated[
312
+ Optional[float], pydantic.Field(alias="maxPayloadEvents")
313
+ ] = 0
314
+ r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
315
+
316
+ compress: Optional[bool] = True
317
+ r"""Compress the payload body before sending"""
318
+
319
+ reject_unauthorized: Annotated[
320
+ Optional[bool], pydantic.Field(alias="rejectUnauthorized")
321
+ ] = True
322
+ r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
323
+ Enabled by default. When this setting is also present in TLS Settings (Client Side),
324
+ that value will take precedence.
325
+ """
326
+
327
+ timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 90
328
+ r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
329
+
330
+ flush_period_sec: Annotated[
331
+ Optional[float], pydantic.Field(alias="flushPeriodSec")
332
+ ] = 1
333
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
334
+
335
+ extra_http_headers: Annotated[
336
+ Optional[List[OutputChronicleExtraHTTPHeader]],
337
+ pydantic.Field(alias="extraHttpHeaders"),
338
+ ] = None
339
+ r"""Headers to add to all events"""
340
+
341
+ failed_request_logging_mode: Annotated[
342
+ Annotated[
343
+ Optional[OutputChronicleFailedRequestLoggingMode],
344
+ PlainValidator(validate_open_enum(False)),
345
+ ],
346
+ pydantic.Field(alias="failedRequestLoggingMode"),
347
+ ] = OutputChronicleFailedRequestLoggingMode.NONE
348
+ r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
349
+
350
+ safe_headers: Annotated[
351
+ Optional[List[str]], pydantic.Field(alias="safeHeaders")
352
+ ] = None
353
+ r"""List of headers that are safe to log in plain text"""
354
+
355
+ use_round_robin_dns: Annotated[
356
+ Optional[bool], pydantic.Field(alias="useRoundRobinDns")
357
+ ] = False
358
+ r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
359
+
360
+ on_backpressure: Annotated[
361
+ Annotated[
362
+ Optional[OutputChronicleBackpressureBehavior],
363
+ PlainValidator(validate_open_enum(False)),
364
+ ],
365
+ pydantic.Field(alias="onBackpressure"),
366
+ ] = OutputChronicleBackpressureBehavior.BLOCK
367
+ r"""How to handle events when all receivers are exerting backpressure"""
368
+
369
+ total_memory_limit_kb: Annotated[
370
+ Optional[float], pydantic.Field(alias="totalMemoryLimitKB")
371
+ ] = None
372
+ r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
373
+
374
+ ingestion_method: Annotated[
375
+ Optional[str], pydantic.Field(alias="ingestionMethod")
376
+ ] = "ImportLogs"
377
+
378
+ namespace: Optional[str] = None
379
+ r"""User-configured environment namespace to identify the data domain the logs originated from. This namespace is used as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
380
+
381
+ log_text_field: Annotated[Optional[str], pydantic.Field(alias="logTextField")] = (
382
+ None
383
+ )
384
+ r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
385
+
386
+ custom_labels: Annotated[
387
+ Optional[List[OutputChronicleCustomLabel]], pydantic.Field(alias="customLabels")
388
+ ] = None
389
+ r"""Custom labels to be added to every event"""
390
+
391
+ description: Optional[str] = None
392
+
393
+ service_account_credentials: Annotated[
394
+ Optional[str], pydantic.Field(alias="serviceAccountCredentials")
395
+ ] = None
396
+ r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
397
+
398
+ service_account_credentials_secret: Annotated[
399
+ Optional[str], pydantic.Field(alias="serviceAccountCredentialsSecret")
400
+ ] = None
401
+ r"""Select or create a stored text secret"""
402
+
403
+ pq_max_file_size: Annotated[
404
+ Optional[str], pydantic.Field(alias="pqMaxFileSize")
405
+ ] = "1 MB"
406
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
407
+
408
+ pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
409
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
410
+
411
+ pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
412
+ "$CRIBL_HOME/state/queues"
413
+ )
414
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
415
+
416
+ pq_compress: Annotated[
417
+ Annotated[
418
+ Optional[OutputChronicleCompression],
419
+ PlainValidator(validate_open_enum(False)),
420
+ ],
421
+ pydantic.Field(alias="pqCompress"),
422
+ ] = OutputChronicleCompression.NONE
423
+ r"""Codec to use to compress the persisted data"""
424
+
425
+ pq_on_backpressure: Annotated[
426
+ Annotated[
427
+ Optional[OutputChronicleQueueFullBehavior],
428
+ PlainValidator(validate_open_enum(False)),
429
+ ],
430
+ pydantic.Field(alias="pqOnBackpressure"),
431
+ ] = OutputChronicleQueueFullBehavior.BLOCK
432
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
433
+
434
+ pq_mode: Annotated[
435
+ Annotated[
436
+ Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
437
+ ],
438
+ pydantic.Field(alias="pqMode"),
439
+ ] = OutputChronicleMode.ERROR
440
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
441
+
442
+ pq_controls: Annotated[
443
+ Optional[OutputChroniclePqControls], pydantic.Field(alias="pqControls")
444
+ ] = None
@@ -28,14 +28,18 @@ class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta
28
28
  class OutputClickHouseFormat(str, Enum, metaclass=utils.OpenEnumMeta):
29
29
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
30
30
 
31
+ # JSONCompactEachRowWithNames
31
32
  JSON_COMPACT_EACH_ROW_WITH_NAMES = "json-compact-each-row-with-names"
33
+ # JSONEachRow
32
34
  JSON_EACH_ROW = "json-each-row"
33
35
 
34
36
 
35
37
  class MappingType(str, Enum, metaclass=utils.OpenEnumMeta):
36
38
  r"""How event fields are mapped to ClickHouse columns."""
37
39
 
40
+ # Automatic
38
41
  AUTOMATIC = "automatic"
42
+ # Custom
39
43
  CUSTOM = "custom"
40
44
 
41
45
 
@@ -125,8 +129,11 @@ class OutputClickHouseExtraHTTPHeader(BaseModel):
125
129
  class OutputClickHouseFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
126
130
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
127
131
 
132
+ # Payload
128
133
  PAYLOAD = "payload"
134
+ # Payload + Headers
129
135
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
136
+ # None
130
137
  NONE = "none"
131
138
 
132
139
 
@@ -187,8 +194,11 @@ class OutputClickHouseTimeoutRetrySettings(BaseModel):
187
194
  class OutputClickHouseBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
188
195
  r"""How to handle events when all receivers are exerting backpressure"""
189
196
 
197
+ # Block
190
198
  BLOCK = "block"
199
+ # Drop
191
200
  DROP = "drop"
201
+ # Persistent Queue
192
202
  QUEUE = "queue"
193
203
 
194
204
 
@@ -247,22 +257,29 @@ class ColumnMapping(BaseModel):
247
257
  class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
248
258
  r"""Codec to use to compress the persisted data"""
249
259
 
260
+ # None
250
261
  NONE = "none"
262
+ # Gzip
251
263
  GZIP = "gzip"
252
264
 
253
265
 
254
266
  class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
255
267
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
256
268
 
269
+ # Block
257
270
  BLOCK = "block"
271
+ # Drop new data
258
272
  DROP = "drop"
259
273
 
260
274
 
261
275
  class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
262
276
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
263
277
 
278
+ # Error
264
279
  ERROR = "error"
280
+ # Backpressure
265
281
  BACKPRESSURE = "backpressure"
282
+ # Always On
266
283
  ALWAYS = "always"
267
284
 
268
285
 
@@ -18,38 +18,51 @@ class OutputCloudwatchType(str, Enum):
18
18
  class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
20
 
21
+ # Auto
21
22
  AUTO = "auto"
23
+ # Manual
22
24
  MANUAL = "manual"
25
+ # Secret Key pair
23
26
  SECRET = "secret"
24
27
 
25
28
 
26
29
  class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""How to handle events when all receivers are exerting backpressure"""
28
31
 
32
+ # Block
29
33
  BLOCK = "block"
34
+ # Drop
30
35
  DROP = "drop"
36
+ # Persistent Queue
31
37
  QUEUE = "queue"
32
38
 
33
39
 
34
40
  class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
35
41
  r"""Codec to use to compress the persisted data"""
36
42
 
43
+ # None
37
44
  NONE = "none"
45
+ # Gzip
38
46
  GZIP = "gzip"
39
47
 
40
48
 
41
49
  class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
42
50
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
43
51
 
52
+ # Block
44
53
  BLOCK = "block"
54
+ # Drop new data
45
55
  DROP = "drop"
46
56
 
47
57
 
48
58
  class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
49
59
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
50
60
 
61
+ # Error
51
62
  ERROR = "error"
63
+ # Backpressure
52
64
  BACKPRESSURE = "backpressure"
65
+ # Always On
53
66
  ALWAYS = "always"
54
67
 
55
68
 
@@ -101,25 +101,35 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
101
101
  class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
102
102
  r"""Control the number of required acknowledgments."""
103
103
 
104
+ # Leader
104
105
  ONE = 1
106
+ # None
105
107
  ZERO = 0
108
+ # All
106
109
  MINUS_1 = -1
107
110
 
108
111
 
109
112
  class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
110
113
  r"""Format to use to serialize events before writing to Kafka."""
111
114
 
115
+ # JSON
112
116
  JSON = "json"
117
+ # Field _raw
113
118
  RAW = "raw"
119
+ # Protobuf
114
120
  PROTOBUF = "protobuf"
115
121
 
116
122
 
117
123
  class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
118
124
  r"""Codec to use to compress the data before sending to Kafka"""
119
125
 
126
+ # None
120
127
  NONE = "none"
128
+ # Gzip
121
129
  GZIP = "gzip"
130
+ # Snappy
122
131
  SNAPPY = "snappy"
132
+ # LZ4
123
133
  LZ4 = "lz4"
124
134
 
125
135
 
@@ -288,9 +298,13 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
288
298
 
289
299
 
290
300
  class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
301
+ # PLAIN
291
302
  PLAIN = "plain"
303
+ # SCRAM-SHA-256
292
304
  SCRAM_SHA_256 = "scram-sha-256"
305
+ # SCRAM-SHA-512
293
306
  SCRAM_SHA_512 = "scram-sha-512"
307
+ # GSSAPI/Kerberos
294
308
  KERBEROS = "kerberos"
295
309
 
296
310
 
@@ -322,8 +336,11 @@ class OutputConfluentCloudAuthentication(BaseModel):
322
336
  class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
323
337
  r"""How to handle events when all receivers are exerting backpressure"""
324
338
 
339
+ # Block
325
340
  BLOCK = "block"
341
+ # Drop
326
342
  DROP = "drop"
343
+ # Persistent Queue
327
344
  QUEUE = "queue"
328
345
 
329
346
 
@@ -332,22 +349,29 @@ class OutputConfluentCloudPqCompressCompression(
332
349
  ):
333
350
  r"""Codec to use to compress the persisted data"""
334
351
 
352
+ # None
335
353
  NONE = "none"
354
+ # Gzip
336
355
  GZIP = "gzip"
337
356
 
338
357
 
339
358
  class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
340
359
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
341
360
 
361
+ # Block
342
362
  BLOCK = "block"
363
+ # Drop new data
343
364
  DROP = "drop"
344
365
 
345
366
 
346
367
  class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
347
368
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
348
369
 
370
+ # Error
349
371
  ERROR = "error"
372
+ # Backpressure
350
373
  BACKPRESSURE = "backpressure"
374
+ # Always On
351
375
  ALWAYS = "always"
352
376
 
353
377