feldera 0.27.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
feldera/rest/errors.py ADDED
@@ -0,0 +1,55 @@
1
+ from requests import Response
2
+ import json
3
+
4
+
5
+ class FelderaError(Exception):
6
+ """
7
+ Generic class for Feldera error handling
8
+ """
9
+
10
+ def __init__(self, message: str) -> None:
11
+ self.message = message
12
+ super().__init__(self.message)
13
+
14
+ def __str__(self) -> str:
15
+ return f"FelderaError. Error message: {self.message}"
16
+
17
+
18
+ class FelderaAPIError(FelderaError):
19
+ """Error sent by Feldera API"""
20
+
21
+ def __init__(self, error: str, request: Response) -> None:
22
+ self.status_code = request.status_code
23
+ self.error = error
24
+ self.error_code = None
25
+ self.message = None
26
+ self.details = None
27
+
28
+ if request.text:
29
+ try:
30
+ json_data = json.loads(request.text)
31
+ self.message = json_data.get("message")
32
+ self.details = json_data.get("details")
33
+ self.error_code = json_data.get("error_code")
34
+ except:
35
+ self.message = request.text
36
+
37
+ def __str__(self) -> str:
38
+ if self.error_code:
39
+ return f"FelderaAPIError: {self.error}\n Error code: {self.error_code}\n Error message: {self.message}\n Details: {self.details}"
40
+ else:
41
+ return f"FelderaAPIError: {self.error}\n {self.message}"
42
+
43
+
44
+ class FelderaTimeoutError(FelderaError):
45
+ """Error when Feldera operation takes longer than expected"""
46
+
47
+ def __str__(self) -> str:
48
+ return f"FelderaTimeoutError: {self.message}"
49
+
50
+
51
+ class FelderaCommunicationError(FelderaError):
52
+ """Error when connection to Feldera"""
53
+
54
+ def __str__(self) -> str:
55
+ return f"FelderaCommunicationError: {self.message}"
@@ -0,0 +1,377 @@
1
+ from typing import Optional
2
+ import logging
3
+ import time
4
+ import json
5
+ from decimal import Decimal
6
+
7
+ from feldera.rest.config import Config
8
+ from feldera.rest.pipeline import Pipeline
9
+ from feldera.rest._httprequests import HttpRequests
10
+
11
+
12
+ def _prepare_boolean_input(value: bool) -> str:
13
+ return "true" if value else "false"
14
+
15
+
16
+ class FelderaClient:
17
+ """
18
+ A client for the Feldera HTTP API
19
+
20
+ A client instance is needed for every Feldera API method to know the location of
21
+ Feldera and its permissions.
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ url: str,
27
+ api_key: Optional[str] = None,
28
+ timeout: Optional[int] = None,
29
+ ) -> None:
30
+ """
31
+ :param url: The url to Feldera API (ex: https://try.feldera.com)
32
+ :param api_key: The optional API key for Feldera
33
+ :param timeout: (optional) The amount of time in seconds that the client will wait for a response before timing
34
+ out.
35
+ """
36
+
37
+ self.config = Config(url, api_key, timeout)
38
+ self.http = HttpRequests(self.config)
39
+
40
+ try:
41
+ self.pipelines()
42
+ except Exception as e:
43
+ logging.error(f"Failed to connect to Feldera API: {e}")
44
+ raise e
45
+
46
+ def get_pipeline(self, pipeline_name) -> Pipeline:
47
+ """
48
+ Get a pipeline by name
49
+
50
+ :param pipeline_name: The name of the pipeline
51
+ """
52
+
53
+ resp = self.http.get(f"/pipelines/{pipeline_name}")
54
+
55
+ return Pipeline.from_dict(resp)
56
+
57
+ def get_runtime_config(self, pipeline_name) -> dict:
58
+ """
59
+ Get the runtime config of a pipeline by name
60
+
61
+ :param pipeline_name: The name of the pipeline
62
+ """
63
+
64
+ resp: dict = self.http.get(f"/pipelines/{pipeline_name}")
65
+
66
+ return resp.get("runtime_config")
67
+
68
+ def pipelines(self) -> list[Pipeline]:
69
+ """
70
+ Get all pipelines
71
+ """
72
+
73
+ resp = self.http.get(
74
+ path="/pipelines",
75
+ )
76
+
77
+ return [Pipeline.from_dict(pipeline) for pipeline in resp]
78
+
79
+ def __wait_for_compilation(self, name: str):
80
+ wait = ["Pending", "CompilingSql", "CompilingRust"]
81
+
82
+ while True:
83
+ p = self.get_pipeline(name)
84
+ status = p.program_status
85
+
86
+ if status == "Success":
87
+ return p
88
+ elif status not in wait:
89
+ # TODO: return a more detailed error message
90
+ raise RuntimeError(f"The program failed to compile: {status}")
91
+
92
+ logging.debug("still compiling %s, waiting for 100 more milliseconds", name)
93
+ time.sleep(0.1)
94
+
95
+ def create_pipeline(self, pipeline: Pipeline) -> Pipeline:
96
+ """
97
+ Create a pipeline if it doesn't exist and wait for it to compile
98
+
99
+
100
+ :name: The name of the pipeline
101
+ """
102
+
103
+ body = {
104
+ "name": pipeline.name,
105
+ "program_code": pipeline.program_code,
106
+ "program_config": pipeline.program_config,
107
+ "runtime_config": pipeline.runtime_config,
108
+ "description": pipeline.description or "",
109
+ }
110
+
111
+ self.http.post(
112
+ path=f"/pipelines",
113
+ body=body,
114
+ )
115
+
116
+ return self.__wait_for_compilation(pipeline.name)
117
+
118
+ def create_or_update_pipeline(self, pipeline: Pipeline) -> Pipeline:
119
+ """
120
+ Create a pipeline if it doesn't exist or update a pipeline and wait for it to compile
121
+ """
122
+
123
+ body = {
124
+ "name": pipeline.name,
125
+ "program_code": pipeline.program_code,
126
+ "program_config": pipeline.program_config,
127
+ "runtime_config": pipeline.runtime_config,
128
+ "description": pipeline.description or "",
129
+ }
130
+
131
+ self.http.put(
132
+ path=f"/pipelines/{pipeline.name}",
133
+ body=body,
134
+ )
135
+
136
+ return self.__wait_for_compilation(pipeline.name)
137
+
138
+ def patch_pipeline(self, name: str, sql: str):
139
+ """
140
+ Incrementally update the pipeline SQL
141
+
142
+ :param name: The name of the pipeline
143
+ :param sql: The SQL snippet. Replaces the existing SQL code with this one.
144
+ """
145
+
146
+ self.http.patch(
147
+ path=f"/pipelines/{name}",
148
+ body={"program_code": sql},
149
+ )
150
+
151
+ def delete_pipeline(self, name: str):
152
+ """
153
+ Deletes a pipeline by name
154
+
155
+ :param name: The name of the pipeline
156
+ """
157
+ resp = self.http.delete(
158
+ path=f"/pipelines/{name}",
159
+ )
160
+
161
+ def get_pipeline_stats(self, name: str) -> dict:
162
+ """
163
+ Get the pipeline metrics and performance counters
164
+
165
+ :param name: The name of the pipeline
166
+ """
167
+
168
+ resp = self.http.get(
169
+ path=f"/pipelines/{name}/stats",
170
+ )
171
+
172
+ return resp
173
+
174
+ def start_pipeline(self, pipeline_name: str):
175
+ """
176
+ Start a pipeline
177
+
178
+ :param pipeline_name: The name of the pipeline to start
179
+ """
180
+
181
+ self.http.post(
182
+ path=f"/pipelines/{pipeline_name}/start",
183
+ )
184
+
185
+ while True:
186
+ status = self.get_pipeline(pipeline_name).deployment_status
187
+
188
+ if status == "Running":
189
+ break
190
+ elif status == "Failed":
191
+ raise RuntimeError(f"Failed to start pipeline")
192
+
193
+ logging.debug("still starting %s, waiting for 100 more milliseconds", pipeline_name)
194
+ time.sleep(0.1)
195
+
196
+ def pause_pipeline(self, pipeline_name: str):
197
+ """
198
+ Stop a pipeline
199
+
200
+ :param pipeline_name: The name of the pipeline to stop
201
+ """
202
+ self.http.post(
203
+ path=f"/pipelines/{pipeline_name}/pause",
204
+ )
205
+
206
+ while True:
207
+ status = self.get_pipeline(pipeline_name).deployment_status
208
+
209
+ if status == "Paused":
210
+ break
211
+ elif status == "Failed":
212
+ # TODO: return a more detailed error message
213
+ raise RuntimeError(f"Failed to pause pipeline")
214
+
215
+ logging.debug("still pausing %s, waiting for 100 more milliseconds", pipeline_name)
216
+ time.sleep(0.1)
217
+
218
+ def shutdown_pipeline(self, pipeline_name: str):
219
+ """
220
+ Shutdown a pipeline
221
+
222
+ :param pipeline_name: The name of the pipeline to shutdown
223
+ """
224
+
225
+ self.http.post(
226
+ path=f"/pipelines/{pipeline_name}/shutdown",
227
+ )
228
+
229
+ start = time.time()
230
+ timeout = 15
231
+
232
+ while time.time() - start < timeout:
233
+ status = self.get_pipeline(pipeline_name).deployment_status
234
+
235
+ if status == "Shutdown":
236
+ return
237
+
238
+ logging.debug("still shutting down %s, waiting for 100 more milliseconds", pipeline_name)
239
+ time.sleep(0.1)
240
+
241
+ # retry sending shutdown request as the pipline hasn't shutdown yet
242
+ logging.debug("pipeline %s hasn't shutdown after %s s, retrying", pipeline_name, timeout)
243
+ self.http.post(
244
+ path=f"/pipelines/{pipeline_name}/shutdown",
245
+ )
246
+
247
+ start = time.time()
248
+ timeout = 5
249
+
250
+ while time.time() - start < timeout:
251
+ status = self.get_pipeline(pipeline_name).deployment_status
252
+
253
+ if status == "Shutdown":
254
+ return
255
+
256
+ logging.debug("still shutting down %s, waiting for 100 more milliseconds", pipeline_name)
257
+ time.sleep(0.1)
258
+
259
+ raise RuntimeError(f"Failed to shutdown pipeline {pipeline_name}")
260
+
261
+ def push_to_pipeline(
262
+ self,
263
+ pipeline_name: str,
264
+ table_name: str,
265
+ format: str,
266
+ data: list[list | str | dict],
267
+ array: bool = False,
268
+ force: bool = False,
269
+ update_format: str = "raw",
270
+ json_flavor: str = None,
271
+ serialize: bool = True,
272
+ ):
273
+ """
274
+ Insert data into a pipeline
275
+
276
+ :param pipeline_name: The name of the pipeline
277
+ :param table_name: The name of the table
278
+ :param format: The format of the data, either "json" or "csv"
279
+ :param array: True if updates in this stream are packed into JSON arrays, used in conjunction with the
280
+ "json" format
281
+
282
+ :param force: If True, the data will be inserted even if the pipeline is paused
283
+ :param update_format: JSON data change event format, used in conjunction with the "json" format,
284
+ the default value is "insert_delete", other supported formats: "weighted", "debezium", "snowflake", "raw"
285
+ :param json_flavor: JSON encoding used for individual table records, the default value is "default", other supported encodings:
286
+ "debezium_mysql", "snowflake", "kafka_connect_json_converter", "pandas"
287
+ :param data: The data to insert
288
+ :param serialize: If True, the data will be serialized to JSON. True by default
289
+ """
290
+
291
+ if format not in ["json", "csv"]:
292
+ raise ValueError("format must be either 'json' or 'csv'")
293
+
294
+ if update_format not in ["insert_delete", "weighted", "debezium", "snowflake", "raw"]:
295
+ raise ValueError("update_format must be one of 'insert_delete', 'weighted', 'debezium', 'snowflake', 'raw'")
296
+
297
+ if json_flavor is not None and json_flavor not in ["default", "debezium_mysql", "snowflake", "kafka_connect_json_converter", "pandas"]:
298
+ raise ValueError("json_flavor must be one of 'default', 'debezium_mysql', 'snowflake', 'kafka_connect_json_converter', 'pandas'")
299
+
300
+ # python sends `True` which isn't accepted by the backend
301
+ array = _prepare_boolean_input(array)
302
+ force = _prepare_boolean_input(force)
303
+
304
+ params = {
305
+ "force": force,
306
+ "format": format,
307
+ }
308
+
309
+ if format == "json":
310
+ params["array"] = array
311
+ params["update_format"] = update_format
312
+
313
+ if json_flavor is not None:
314
+ params["json_flavor"] = json_flavor
315
+
316
+ content_type = "application/json"
317
+
318
+ if format == "csv":
319
+ content_type = "text/csv"
320
+ data = bytes(str(data), "utf-8")
321
+
322
+ self.http.post(
323
+ path=f"/pipelines/{pipeline_name}/ingress/{table_name}",
324
+ params=params,
325
+ content_type=content_type,
326
+ body=data,
327
+ serialize=serialize,
328
+ )
329
+
330
+ def listen_to_pipeline(
331
+ self,
332
+ pipeline_name: str,
333
+ table_name: str,
334
+ format: str,
335
+ backpressure: bool = True,
336
+ array: bool = False,
337
+ timeout: Optional[float] = None,
338
+ ):
339
+ """
340
+ Listen for updates to views for pipeline, yields the chunks of data
341
+
342
+ :param pipeline_name: The name of the pipeline
343
+ :param table_name: The name of the table to listen to
344
+ :param format: The format of the data, either "json" or "csv"
345
+ :param backpressure: When the flag is True (the default), this method waits for the consumer to receive each
346
+ chunk and blocks the pipeline if the consumer cannot keep up. When this flag is False, the pipeline drops
347
+ data chunks if the consumer is not keeping up with its output. This prevents a slow consumer from slowing
348
+ down the entire pipeline.
349
+ :param array: Set True to group updates in this stream into JSON arrays, used in conjunction with the
350
+ "json" format, the default value is False
351
+
352
+ :param timeout: The amount of time in seconds to listen to the stream for
353
+ """
354
+
355
+ params = {
356
+ "format": format,
357
+ "backpressure": _prepare_boolean_input(backpressure),
358
+ }
359
+
360
+ if format == "json":
361
+ params["array"] = _prepare_boolean_input(array)
362
+
363
+ resp = self.http.post(
364
+ path=f"/pipelines/{pipeline_name}/egress/{table_name}",
365
+ params=params,
366
+ stream=True,
367
+ )
368
+
369
+ end = time.time() + timeout if timeout else None
370
+
371
+ # Using the default chunk size below makes `iter_lines` extremely
372
+ # inefficient when dealing with long lines.
373
+ for chunk in resp.iter_lines(chunk_size=50000000):
374
+ if end and time.time() > end:
375
+ break
376
+ if chunk:
377
+ yield json.loads(chunk, parse_float=Decimal)
@@ -0,0 +1,69 @@
1
+ from typing import Mapping, Any, Optional
2
+ from feldera.rest.sql_table import SQLTable
3
+ from feldera.rest.sql_view import SQLView
4
+
5
+
6
+ class Pipeline:
7
+ """
8
+ Represents a Feldera pipeline
9
+ """
10
+
11
+ def __init__(
12
+ self,
13
+ name: str,
14
+ sql: str,
15
+ program_config: Mapping[str, Any],
16
+ runtime_config: Mapping[str, Any],
17
+ description: Optional[str] = None,
18
+ ):
19
+ """
20
+ Initializes a new pipeline
21
+
22
+ :param name: The name of the pipeline
23
+ :param sql: The SQL code of the pipeline
24
+ :param program_config: The program config of the pipeline
25
+ :param runtime_config: The configuration of the pipeline
26
+ :param description: Optional. The description of the pipeline
27
+ """
28
+
29
+ self.name: str = name
30
+ self.program_code: str = sql.strip()
31
+ self.description: Optional[str] = description
32
+ self.program_config: Mapping[str, Any] = program_config
33
+ self.runtime_config: Mapping[str, Any] = runtime_config
34
+ self.id: Optional[str] = id
35
+ self.tables: list[SQLTable] = []
36
+ self.views: list[SQLView] = []
37
+ self.deployment_status: Optional[str] = None
38
+ self.deployment_status_since: Optional[str] = None
39
+ self.created_at: Optional[str] = None
40
+ self.version: Optional[int] = None
41
+ self.program_version: Optional[int] = None
42
+ self.deployment_config: Optional[dict] = None
43
+ self.deployment_desired_status: Optional[str] = None
44
+ self.deployment_error: Optional[dict] = None
45
+ self.deployment_location: Optional[str] = None
46
+ self.program_binary_url: Optional[str] = None
47
+ self.program_info: Optional[dict] = None # info about input & output connectors and the schema
48
+ self.program_status: Optional[str] = None
49
+ self.program_status_since: Optional[str] = None
50
+
51
+ @classmethod
52
+ def from_dict(cls, d: Mapping[str, Any]):
53
+ pipeline = cls("", "", {}, {})
54
+ pipeline.__dict__ = d
55
+ pipeline.tables = []
56
+ pipeline.views = []
57
+
58
+ info = d.get("program_info")
59
+
60
+ if info is not None:
61
+ for i in info['schema']['inputs']:
62
+ tbl = SQLTable.from_dict(i)
63
+ pipeline.tables.append(tbl)
64
+
65
+ for output in info['schema']['outputs']:
66
+ v = SQLView.from_dict(output)
67
+ pipeline.views.append(v)
68
+
69
+ return pipeline
@@ -0,0 +1,17 @@
1
+ class SQLTable:
2
+ """
3
+ Represents a SQL table in Feldera
4
+ """
5
+
6
+ def __init__(self, name: str, fields: list[dict], case_sensitive: bool = False, materialized: bool = False):
7
+ self.name = name
8
+ self.case_sensitive = case_sensitive
9
+ self.materialized = materialized
10
+ self.fields: list[dict] = fields
11
+
12
+ @classmethod
13
+ def from_dict(self, table_dict: dict):
14
+ tbl = SQLTable(name=table_dict['name'], fields=table_dict['fields'])
15
+ tbl.case_sensitive = table_dict['case_sensitive']
16
+ tbl.materialized = table_dict['materialized']
17
+ return tbl
@@ -0,0 +1,17 @@
1
+ class SQLView:
2
+ """
3
+ Represents a SQL view in Feldera
4
+ """
5
+
6
+ def __init__(self, name: str, fields: list[dict], case_sensitive: bool = False, materialized: bool = False):
7
+ self.name = name
8
+ self.case_sensitive = case_sensitive
9
+ self.materialized = materialized
10
+ self.fields: list[dict] = fields
11
+
12
+ @classmethod
13
+ def from_dict(self, view_dict: dict):
14
+ tbl = SQLView(name=view_dict['name'], fields=view_dict['fields'])
15
+ tbl.case_sensitive = view_dict['case_sensitive']
16
+ tbl.materialized = view_dict['materialized']
17
+ return tbl
@@ -0,0 +1,79 @@
1
+ from typing import Optional, Any, Mapping
2
+
3
+
4
+ class Resources:
5
+ """
6
+ Class used to specify the resource configuration for a pipeline.
7
+
8
+ :param config: A dictionary containing all the configuration values.
9
+ :param cpu_cores_max: The maximum number of CPU cores to reserve for an instance of the pipeline.
10
+ :param cpu_cores_min: The minimum number of CPU cores to reserve for an instance of the pipeline.
11
+ :param memory_mb_max: The maximum memory in Megabytes to reserve for an instance of the pipeline.
12
+ :param memory_mb_min: The minimum memory in Megabytes to reserve for an instance of the pipeline.
13
+ :param storage_class: The storage class to use for the pipeline. The class determines storage performance such
14
+ as IOPS and throughput.
15
+ :param storage_mb_max: The storage in Megabytes to reserve for an instance of the pipeline.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ config: Optional[Mapping[str, Any]] = None,
21
+ cpu_cores_max: Optional[int] = None,
22
+ cpu_cores_min: Optional[int] = None,
23
+ memory_mb_max: Optional[int] = None,
24
+ memory_mb_min: Optional[int] = None,
25
+ storage_class: Optional[str] = None,
26
+ storage_mb_max: Optional[int] = None,
27
+ ):
28
+
29
+ config = config or {}
30
+
31
+ self.cpu_cores_max = cpu_cores_max
32
+ self.cpu_cores_min = cpu_cores_min
33
+ self.memory_mb_max = memory_mb_max
34
+ self.memory_mb_min = memory_mb_min
35
+ self.storage_class = storage_class
36
+ self.storage_mb_max = storage_mb_max
37
+
38
+ self.__dict__.update(config)
39
+
40
+
41
+ class RuntimeConfig:
42
+ """
43
+ Runtime configuration class to define the configuration for a pipeline.
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ workers: Optional[int] = None,
49
+ storage: Optional[bool] = False,
50
+ tracing: Optional[bool] = False,
51
+ tracing_endpoint_jaeger: Optional[str] = "",
52
+ cpu_profiler: bool = True,
53
+ max_buffering_delay_usecs: int = 0,
54
+ min_batch_size_records: int = 0,
55
+ min_storage_bytes: Optional[int] = None,
56
+ clock_resolution_usecs: Optional[int] = None,
57
+ resources: Optional[Resources] = None,
58
+ ):
59
+ self.workers = workers
60
+ self.storage = storage
61
+ self.tracing = tracing
62
+ self.tracing_endpoint_jaeger = tracing_endpoint_jaeger
63
+ self.cpu_profiler = cpu_profiler
64
+ self.max_buffering_delay_usecs = max_buffering_delay_usecs
65
+ self.min_batch_size_records = min_batch_size_records
66
+ self.min_storage_bytes = min_storage_bytes
67
+ self.clock_resolution_usecs = clock_resolution_usecs
68
+ if resources is not None:
69
+ self.resources = resources.__dict__
70
+
71
+ @classmethod
72
+ def from_dict(cls, d: Mapping[str, Any]):
73
+ """
74
+ Create a `.RuntimeConfig` object from a dictionary.
75
+ """
76
+
77
+ conf = cls()
78
+ conf.__dict__ = d
79
+ return conf