feldera 0.34.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of feldera might be problematic. Click here for more details.

feldera/pipeline.py ADDED
@@ -0,0 +1,809 @@
1
+ import logging
2
+ import time
3
+ from datetime import datetime
4
+
5
+ import pandas
6
+
7
+ from typing import List, Dict, Callable, Optional, Generator, Mapping, Any
8
+ from collections import deque
9
+ from queue import Queue
10
+
11
+ from feldera.rest.errors import FelderaAPIError
12
+ from feldera.enums import PipelineStatus, ProgramStatus
13
+ from feldera.rest.pipeline import Pipeline as InnerPipeline
14
+ from feldera.rest.feldera_client import FelderaClient
15
+ from feldera._callback_runner import _CallbackRunnerInstruction, CallbackRunner
16
+ from feldera.output_handler import OutputHandler
17
+ from feldera._helpers import ensure_dataframe_has_columns, chunk_dataframe
18
+ from feldera.rest.sql_table import SQLTable
19
+ from feldera.rest.sql_view import SQLView
20
+
21
+
22
+ class Pipeline:
23
+ def __init__(self, client: FelderaClient):
24
+ self.client: FelderaClient = client
25
+ self._inner: InnerPipeline | None = None
26
+ self.views_tx: List[Dict[str, Queue]] = []
27
+
28
+ @staticmethod
29
+ def _from_inner(inner: InnerPipeline, client: FelderaClient) -> "Pipeline":
30
+ pipeline = Pipeline(client)
31
+ pipeline._inner = inner
32
+ return pipeline
33
+
34
+ def __setup_output_listeners(self):
35
+ """
36
+ Internal function used to set up the output listeners.
37
+
38
+ :meta private:
39
+ """
40
+
41
+ for view_queue in self.views_tx:
42
+ for view_name, queue in view_queue.items():
43
+ # sends a message to the callback runner to start listening
44
+ queue.put(_CallbackRunnerInstruction.PipelineStarted)
45
+ # block until the callback runner is ready
46
+ queue.join()
47
+
48
+ def refresh(self):
49
+ """
50
+ Calls the backend to get the updated, latest version of the pipeline.
51
+
52
+ :raises FelderaConnectionError: If there is an issue connecting to the backend.
53
+ """
54
+
55
+ self._inner = self.client.get_pipeline(self.name)
56
+
57
+ def status(self) -> PipelineStatus:
58
+ """
59
+ Return the current status of the pipeline.
60
+ """
61
+
62
+ try:
63
+ self.refresh()
64
+ return PipelineStatus.from_str(self._inner.deployment_status)
65
+
66
+ except FelderaAPIError as err:
67
+ if err.status_code == 404:
68
+ return PipelineStatus.NOT_FOUND
69
+ else:
70
+ raise err
71
+
72
+ def input_pandas(self, table_name: str, df: pandas.DataFrame, force: bool = False):
73
+ """
74
+ Push all rows in a pandas DataFrame to the pipeline.
75
+
76
+ The pipeline must either be in RUNNING or PAUSED states to push data.
77
+ An error will be raised if the pipeline is in any other state.
78
+
79
+ The dataframe must have the same columns as the table in the pipeline.
80
+
81
+ :param table_name: The name of the table to insert data into.
82
+ :param df: The pandas DataFrame to be pushed to the pipeline.
83
+ :param force: `True` to push data even if the pipeline is paused. `False` by default.
84
+
85
+ :raises ValueError: If the table does not exist in the pipeline.
86
+ :raises RuntimeError: If the pipeline is not in a valid state to push data.
87
+ :raises RuntimeError: If the pipeline is paused and force is not set to `True`.
88
+ """
89
+
90
+ status = self.status()
91
+ if status not in [
92
+ PipelineStatus.RUNNING,
93
+ PipelineStatus.PAUSED,
94
+ ]:
95
+ raise RuntimeError("Pipeline must be running or paused to push data")
96
+
97
+ if not force and status == PipelineStatus.PAUSED:
98
+ raise RuntimeError("Pipeline is paused, set force=True to push data")
99
+
100
+ ensure_dataframe_has_columns(df)
101
+
102
+ pipeline = self.client.get_pipeline(self.name)
103
+ if table_name.lower() != "now" and table_name.lower() not in [
104
+ tbl.name.lower() for tbl in pipeline.tables
105
+ ]:
106
+ raise ValueError(
107
+ f"Cannot push to table '{table_name}': table with this name does not exist in the '{self.name}' pipeline"
108
+ )
109
+ else:
110
+ # consider validating the schema here
111
+ for datum in chunk_dataframe(df):
112
+ self.client.push_to_pipeline(
113
+ self.name,
114
+ table_name,
115
+ "json",
116
+ datum.to_json(orient="records", date_format="epoch"),
117
+ json_flavor="pandas",
118
+ array=True,
119
+ serialize=False,
120
+ force=force,
121
+ )
122
+ return
123
+
124
+ def input_json(
125
+ self,
126
+ table_name: str,
127
+ data: Dict | list,
128
+ update_format: str = "raw",
129
+ force: bool = False,
130
+ ):
131
+ """
132
+ Push this JSON data to the specified table of the pipeline.
133
+
134
+ The pipeline must either be in RUNNING or PAUSED states to push data.
135
+ An error will be raised if the pipeline is in any other state.
136
+
137
+ :param table_name: The name of the table to push data into.
138
+ :param data: The JSON encoded data to be pushed to the pipeline. The data should be in the form:
139
+ `{'col1': 'val1', 'col2': 'val2'}` or `[{'col1': 'val1', 'col2': 'val2'}, {'col1': 'val1', 'col2': 'val2'}]`
140
+ :param update_format: The update format of the JSON data to be pushed to the pipeline. Must be one of:
141
+ "raw", "insert_delete". <https://docs.feldera.com/formats/json#the-insertdelete-format>
142
+ :param force: `True` to push data even if the pipeline is paused. `False` by default.
143
+
144
+ :raises ValueError: If the update format is invalid.
145
+ :raises FelderaAPIError: If the pipeline is not in a valid state to push data.
146
+ :raises RuntimeError: If the pipeline is paused and `force` is not set to `True`.
147
+ """
148
+
149
+ status = self.status()
150
+ if not force and status == PipelineStatus.PAUSED:
151
+ raise RuntimeError("Pipeline is paused, set force=True to push data")
152
+
153
+ if update_format not in ["raw", "insert_delete"]:
154
+ ValueError("update_format must be one of raw or insert_delete")
155
+
156
+ array = True if isinstance(data, list) else False
157
+ self.client.push_to_pipeline(
158
+ self.name,
159
+ table_name,
160
+ "json",
161
+ data,
162
+ update_format=update_format,
163
+ array=array,
164
+ force=force,
165
+ )
166
+
167
+ def pause_connector(self, table_name: str, connector_name: str):
168
+ """
169
+ Pause the specified input connector.
170
+
171
+ Connectors allow feldera to fetch data from a source or write data to a sink.
172
+ This method allows users to **PAUSE** a specific **INPUT** connector.
173
+ All connectors are RUNNING by default.
174
+
175
+ Refer to the connector documentation for more information:
176
+ <https://docs.feldera.com/connectors/#input-connector-orchestration>
177
+
178
+ :param table_name: The name of the table that the connector is attached to.
179
+ :param connector_name: The name of the connector to pause.
180
+
181
+ :raises FelderaAPIError: If the connector is not found, or if the pipeline is not running.
182
+ """
183
+
184
+ self.client.pause_connector(self.name, table_name, connector_name)
185
+
186
+ def resume_connector(self, table_name: str, connector_name: str):
187
+ """
188
+ Resume the specified connector.
189
+
190
+ Connectors allow feldera to fetch data from a source or write data to a sink.
191
+ This method allows users to **RESUME / START** a specific **INPUT** connector.
192
+ All connectors are RUNNING by default.
193
+
194
+ Refer to the connector documentation for more information:
195
+ <https://docs.feldera.com/connectors/#input-connector-orchestration>
196
+
197
+ :param table_name: The name of the table that the connector is attached to.
198
+ :param connector_name: The name of the connector to resume.
199
+
200
+ :raises FelderaAPIError: If the connector is not found, or if the pipeline is not running.
201
+ """
202
+
203
+ self.client.resume_connector(self.name, table_name, connector_name)
204
+
205
+ def listen(self, view_name: str) -> OutputHandler:
206
+ """
207
+ Follow the change stream (i.e., the output) of the provided view.
208
+ Returns an output handler to read the changes.
209
+
210
+ When the pipeline is shutdown, these listeners are dropped.
211
+
212
+ You must call this method before starting the pipeline to get the entire output of the view.
213
+ If this method is called once the pipeline has started, you will only get the output from that point onwards.
214
+
215
+ :param view_name: The name of the view to listen to.
216
+ """
217
+
218
+ queue: Optional[Queue] = None
219
+
220
+ if self.status() not in [PipelineStatus.PAUSED, PipelineStatus.RUNNING]:
221
+ queue = Queue(maxsize=1)
222
+ self.views_tx.append({view_name: queue})
223
+
224
+ handler = OutputHandler(self.client, self.name, view_name, queue)
225
+ handler.start()
226
+
227
+ return handler
228
+
229
+ def foreach_chunk(
230
+ self, view_name: str, callback: Callable[[pandas.DataFrame, int], None]
231
+ ):
232
+ """
233
+ Run the given callback on each chunk of the output of the specified view.
234
+
235
+ You must call this method before starting the pipeline to operate on the entire output.
236
+ You can call this method after the pipeline has started, but you will only get the output from that point onwards.
237
+
238
+ :param view_name: The name of the view.
239
+ :param callback: The callback to run on each chunk. The callback should take two arguments:
240
+
241
+ - **chunk** -> The chunk as a pandas DataFrame
242
+ - **seq_no** -> The sequence number. The sequence number is a monotonically increasing integer that
243
+ starts from 0. Note that the sequence number is unique for each chunk, but not necessarily contiguous.
244
+
245
+ Please note that the callback is run in a separate thread, so it should be thread-safe.
246
+ Please note that the callback should not block for a long time, as by default, backpressure is enabled and
247
+ will block the pipeline.
248
+
249
+ .. note::
250
+ - The callback must be thread-safe as it will be run in a separate thread.
251
+
252
+ """
253
+
254
+ queue: Optional[Queue] = None
255
+
256
+ if self.status() not in [PipelineStatus.RUNNING, PipelineStatus.PAUSED]:
257
+ queue = Queue(maxsize=1)
258
+ self.views_tx.append({view_name: queue})
259
+
260
+ handler = CallbackRunner(self.client, self.name, view_name, callback, queue)
261
+ handler.start()
262
+
263
+ def wait_for_completion(
264
+ self, shutdown: bool = False, timeout_s: Optional[float] = None
265
+ ):
266
+ """
267
+ Block until the pipeline has completed processing all input records.
268
+
269
+ This method blocks until (1) all input connectors attached to the pipeline
270
+ have finished reading their input data sources and issued end-of-input
271
+ notifications to the pipeline, and (2) all inputs received from these
272
+ connectors have been fully processed and corresponding outputs have been
273
+ sent out through the output connectors.
274
+
275
+ This method will block indefinitely if at least one of the input
276
+ connectors attached to the pipeline is a streaming connector, such as
277
+ Kafka, that does not issue the end-of-input notification.
278
+
279
+ :param shutdown: If True, the pipeline will be shutdown after completion. False by default.
280
+ :param timeout_s: Optional. The maximum time (in seconds) to wait for the pipeline to complete.
281
+ The default is None, which means wait indefinitely.
282
+
283
+ :raises RuntimeError: If the pipeline returns unknown metrics.
284
+ """
285
+
286
+ if self.status() not in [
287
+ PipelineStatus.RUNNING,
288
+ PipelineStatus.INITIALIZING,
289
+ PipelineStatus.PROVISIONING,
290
+ ]:
291
+ raise RuntimeError("Pipeline must be running to wait for completion")
292
+
293
+ start_time = time.monotonic()
294
+
295
+ while True:
296
+ if timeout_s is not None:
297
+ elapsed = time.monotonic() - start_time
298
+ if elapsed > timeout_s:
299
+ raise TimeoutError(
300
+ f"timeout ({timeout_s}s) reached while waiting for pipeline '{self.name}' to complete"
301
+ )
302
+ logging.debug(
303
+ f"waiting for pipeline {self.name} to complete: elapsed time {elapsed}s, timeout: {timeout_s}s"
304
+ )
305
+
306
+ metrics: dict = self.client.get_pipeline_stats(self.name).get(
307
+ "global_metrics"
308
+ )
309
+ pipeline_complete: bool = metrics.get("pipeline_complete")
310
+
311
+ if pipeline_complete is None:
312
+ raise RuntimeError(
313
+ "received unknown metrics from the pipeline, pipeline_complete is None"
314
+ )
315
+
316
+ if pipeline_complete:
317
+ break
318
+
319
+ time.sleep(1)
320
+
321
+ if shutdown:
322
+ self.shutdown()
323
+
324
+ def __failed_check(self, next):
325
+ """
326
+ Checks if the pipeline is in FAILED state and raises an error if it is.
327
+ :meta private:
328
+ """
329
+ status = self.status()
330
+ if status == PipelineStatus.FAILED:
331
+ deployment_error = self.client.get_pipeline(self.name).deployment_error
332
+ error_msg = deployment_error.get("message", "")
333
+ raise RuntimeError(
334
+ f"""Cannot {next} pipeline '{self.name}' in FAILED state.
335
+ The pipeline must be in SHUTDOWN state before it can be started, but it is currently in FAILED state.
336
+ Use `Pipeline.shutdown()` method to shut down the pipeline.
337
+ Error Message:
338
+ {error_msg}"""
339
+ )
340
+
341
+ def start(self, timeout_s: Optional[float] = None):
342
+ """
343
+ .. _start:
344
+
345
+ Starts this pipeline.
346
+
347
+ The pipeline must be in SHUTDOWN state to start.
348
+ If the pipeline is in any other state, an error will be raised.
349
+ If the pipeline is in PAUSED state, use `.meth:resume` instead.
350
+ If the pipeline is in FAILED state, it must be shutdown before starting it again.
351
+
352
+ :param timeout_s: The maximum time (in seconds) to wait for the pipeline to start.
353
+
354
+ :raises RuntimeError: If the pipeline is not in SHUTDOWN state.
355
+ """
356
+
357
+ self.__failed_check("start")
358
+ status = self.status()
359
+ if status != PipelineStatus.SHUTDOWN:
360
+ raise RuntimeError(
361
+ f"""Cannot start pipeline '{self.name}' in state '{str(status.name)}'.
362
+ The pipeline must be in SHUTDOWN state before it can be started.
363
+ You can either shut down the pipeline using the `Pipeline.shutdown()` method or use `Pipeline.resume()` to \
364
+ resume a paused pipeline."""
365
+ )
366
+
367
+ self.client.pause_pipeline(
368
+ self.name, "Unable to START the pipeline.", timeout_s
369
+ )
370
+ self.__setup_output_listeners()
371
+ self.resume(timeout_s)
372
+
373
+ def restart(self, timeout_s: Optional[float] = None):
374
+ """
375
+ Restarts the pipeline.
376
+
377
+ This method **SHUTS DOWN** the pipeline regardless of its current state and then starts it again.
378
+
379
+ :param timeout_s: The maximum time (in seconds) to wait for the pipeline to restart.
380
+ """
381
+
382
+ self.shutdown(timeout_s)
383
+ self.start(timeout_s)
384
+
385
+ def wait_for_idle(
386
+ self,
387
+ idle_interval_s: float = 5.0,
388
+ timeout_s: float = 600.0,
389
+ poll_interval_s: float = 0.2,
390
+ ):
391
+ """
392
+ Wait for the pipeline to become idle and then returns.
393
+
394
+ Idle is defined as a sufficiently long interval in which the number of
395
+ input and processed records reported by the pipeline do not change, and
396
+ they equal each other (thus, all input records present at the pipeline
397
+ have been processed).
398
+
399
+ :param idle_interval_s: Idle interval duration (default is 5.0 seconds).
400
+ :param timeout_s: Timeout waiting for idle (default is 600.0 seconds).
401
+ :param poll_interval_s: Polling interval, should be set substantially
402
+ smaller than the idle interval (default is 0.2 seconds).
403
+ :raises ValueError: If idle interval is larger than timeout, poll interval
404
+ is larger than timeout, or poll interval is larger than idle interval.
405
+ :raises RuntimeError: If the metrics are missing or the timeout was
406
+ reached.
407
+ """
408
+ if idle_interval_s > timeout_s:
409
+ raise ValueError(
410
+ f"idle interval ({idle_interval_s}s) cannot be larger than timeout ({timeout_s}s)"
411
+ )
412
+ if poll_interval_s > timeout_s:
413
+ raise ValueError(
414
+ f"poll interval ({poll_interval_s}s) cannot be larger than timeout ({timeout_s}s)"
415
+ )
416
+ if poll_interval_s > idle_interval_s:
417
+ raise ValueError(
418
+ f"poll interval ({poll_interval_s}s) cannot be larger "
419
+ f"than idle interval ({idle_interval_s}s)"
420
+ )
421
+
422
+ start_time_s = time.monotonic()
423
+ idle_started_s = None
424
+ prev = (0, 0)
425
+ while True:
426
+ now_s = time.monotonic()
427
+
428
+ # Metrics retrieval
429
+ metrics: dict = self.client.get_pipeline_stats(self.name).get(
430
+ "global_metrics"
431
+ )
432
+ total_input_records: int | None = metrics.get("total_input_records")
433
+ total_processed_records: int | None = metrics.get("total_processed_records")
434
+ if total_input_records is None:
435
+ raise RuntimeError(
436
+ "total_input_records is missing from the pipeline metrics"
437
+ )
438
+ if total_processed_records is None:
439
+ raise RuntimeError(
440
+ "total_processed_records is missing from the pipeline metrics"
441
+ )
442
+
443
+ # Idle check
444
+ unchanged = (
445
+ prev[0] == total_input_records and prev[1] == total_processed_records
446
+ )
447
+ equal = total_input_records == total_processed_records
448
+ prev = (total_input_records, total_processed_records)
449
+ if unchanged and equal:
450
+ if idle_started_s is None:
451
+ idle_started_s = now_s
452
+ else:
453
+ idle_started_s = None
454
+ if idle_started_s is not None and now_s - idle_started_s >= idle_interval_s:
455
+ return
456
+
457
+ # Timeout
458
+ if now_s - start_time_s >= timeout_s:
459
+ raise RuntimeError(f"waiting for idle reached timeout ({timeout_s}s)")
460
+ time.sleep(poll_interval_s)
461
+
462
+ def pause(self, timeout_s: Optional[float] = None):
463
+ """
464
+ Pause the pipeline.
465
+
466
+ The pipeline can only transition to the PAUSED state from the RUNNING state.
467
+ If the pipeline is already paused, it will remain in the PAUSED state.
468
+
469
+ :param timeout_s: The maximum time (in seconds) to wait for the pipeline to pause.
470
+
471
+ :raises FelderaAPIError: If the pipeline is in FAILED state.
472
+ """
473
+
474
+ self.__failed_check("pause")
475
+ self.client.pause_pipeline(self.name, timeout_s=timeout_s)
476
+
477
+ def shutdown(self, timeout_s: Optional[float] = None):
478
+ """
479
+ Shut down the pipeline.
480
+
481
+ Shuts down the pipeline regardless of its current state.
482
+
483
+ :param timeout_s: The maximum time (in seconds) to wait for the pipeline to shut down.
484
+ """
485
+
486
+ if len(self.views_tx) > 0:
487
+ for _, queue in self.views_tx.pop().items():
488
+ # sends a message to the callback runner to stop listening
489
+ queue.put(_CallbackRunnerInstruction.RanToCompletion)
490
+ # block until the callback runner has been stopped
491
+ queue.join()
492
+
493
+ self.client.shutdown_pipeline(self.name, timeout_s=timeout_s)
494
+
495
+ def resume(self, timeout_s: Optional[float] = None):
496
+ """
497
+ Resumes the pipeline from the PAUSED state. If the pipeline is already running, it will remain in the RUNNING state.
498
+
499
+ :param timeout_s: The maximum time (in seconds) to wait for the pipeline to shut down.
500
+
501
+ :raises FelderaAPIError: If the pipeline is in FAILED state.
502
+ """
503
+
504
+ self.__failed_check("resume")
505
+ self.client.start_pipeline(self.name, timeout_s=timeout_s)
506
+
507
+ def delete(self):
508
+ """
509
+ Deletes the pipeline.
510
+
511
+ The pipeline must be shutdown before it can be deleted.
512
+
513
+ :raises FelderaAPIError: If the pipeline is not in SHUTDOWN state.
514
+ """
515
+
516
+ self.client.delete_pipeline(self.name)
517
+
518
+ @staticmethod
519
+ def get(name: str, client: FelderaClient) -> "Pipeline":
520
+ """
521
+ Get the pipeline if it exists.
522
+
523
+ :param name: The name of the pipeline.
524
+ :param client: The FelderaClient instance.
525
+ """
526
+
527
+ try:
528
+ inner = client.get_pipeline(name)
529
+ return Pipeline._from_inner(inner, client)
530
+ except FelderaAPIError as err:
531
+ if err.status_code == 404:
532
+ raise RuntimeError(f"Pipeline with name {name} not found")
533
+
534
+ def checkpoint(self):
535
+ """
536
+ Checkpoints this pipeline, if fault-tolerance is enabled.
537
+ Fault Tolerance in Feldera: <https://docs.feldera.com/fault-tolerance/>
538
+
539
+ :raises FelderaAPIError: If checkpointing is not enabled.
540
+ """
541
+
542
+ self.client.checkpoint_pipeline(self.name)
543
+
544
+ def query(self, query: str) -> Generator[Mapping[str, Any], None, None]:
545
+ """
546
+ Executes an ad-hoc SQL query on this pipeline and returns a generator that yields the rows of the result as Python dictionaries.
547
+ For ``INSERT`` and ``DELETE`` queries, consider using :meth:`.execute` instead.
548
+ All floating-point numbers are deserialized as Decimal objects to avoid precision loss.
549
+
550
+ Note:
551
+ You can only ``SELECT`` from materialized tables and views.
552
+
553
+ Important:
554
+ This method is lazy. It returns a generator and is not evaluated until you consume the result.
555
+
556
+ :param query: The SQL query to be executed.
557
+ :return: A generator that yields the rows of the result as Python dictionaries.
558
+
559
+ :raises FelderaAPIError: If the pipeline is not in a RUNNING or PAUSED state.
560
+ :raises FelderaAPIError: If querying a non materialized table or view.
561
+ :raises FelderaAPIError: If the query is invalid.
562
+ """
563
+
564
+ return self.client.query_as_json(self.name, query)
565
+
566
+ def query_parquet(self, query: str, path: str):
567
+ """
568
+ Executes an ad-hoc SQL query on this pipeline and saves the result to the specified path as a parquet file.
569
+ If the extension isn't `parquet`, it will be automatically appended to `path`.
570
+
571
+ Note:
572
+ You can only ``SELECT`` from materialized tables and views.
573
+
574
+ :param query: The SQL query to be executed.
575
+ :param path: The path of the parquet file.
576
+
577
+ :raises FelderaAPIError: If the pipeline is not in a RUNNING or PAUSED state.
578
+ :raises FelderaAPIError: If querying a non materialized table or view.
579
+ :raises FelderaAPIError: If the query is invalid.
580
+ """
581
+
582
+ self.client.query_as_parquet(self.name, query, path)
583
+
584
+ def query_tabular(self, query: str) -> Generator[str, None, None]:
585
+ """
586
+ Executes a SQL query on this pipeline and returns the result as a formatted string.
587
+
588
+ Note:
589
+ You can only ``SELECT`` from materialized tables and views.
590
+
591
+ Important:
592
+ This method is lazy. It returns a generator and is not evaluated until you consume the result.
593
+
594
+ :param query: The SQL query to be executed.
595
+ :return: A generator that yields a string representing the query result in a human-readable, tabular format.
596
+
597
+ :raises FelderaAPIError: If the pipeline is not in a RUNNING or PAUSED state.
598
+ :raises FelderaAPIError: If querying a non materialized table or view.
599
+ :raises FelderaAPIError: If the query is invalid.
600
+ """
601
+
602
+ return self.client.query_as_text(self.name, query)
603
+
604
+ def execute(self, query: str):
605
+ """
606
+ Executes an ad-hoc SQL query on the current pipeline, discarding its result.
607
+ Unlike the :meth:`.query` method which returns a generator for retrieving query results lazily,
608
+ this method processes the query eagerly and fully before returning.
609
+
610
+ This method is suitable for SQL operations like ``INSERT`` and ``DELETE``, where the user needs
611
+ confirmation of successful query execution, but does not require the query result.
612
+ If the query fails, an exception will be raised.
613
+
614
+ Important:
615
+ If you try to ``INSERT`` or ``DELETE`` data from a table while the pipeline is paused,
616
+ it will block until the pipeline is resumed.
617
+
618
+ :param query: The SQL query to be executed.
619
+
620
+ :raises FelderaAPIError: If the pipeline is not in a RUNNING state.
621
+ :raises FelderaAPIError: If the query is invalid.
622
+ """
623
+
624
+ gen = self.query_tabular(query)
625
+ deque(gen, maxlen=0)
626
+
627
+ @property
628
+ def name(self) -> str:
629
+ """
630
+ Return the name of the pipeline.
631
+ """
632
+
633
+ return self._inner.name
634
+
635
+ def program_code(self) -> str:
636
+ """
637
+ Return the program SQL code of the pipeline.
638
+ """
639
+
640
+ self.refresh()
641
+ return self._inner.program_code
642
+
643
+ def program_status(self) -> ProgramStatus:
644
+ """
645
+ Return the program status of the pipeline.
646
+
647
+ Program status is the status of compilation of this SQL program.
648
+ We first compile the SQL program to Rust code, and then compile the Rust code to a binary.
649
+ """
650
+
651
+ self.refresh()
652
+ return ProgramStatus.from_value(self._inner.program_status)
653
+
654
+ def program_status_since(self) -> datetime:
655
+ """
656
+ Return the timestamp when the current program status was set.
657
+ """
658
+
659
+ self.refresh()
660
+ return datetime.fromisoformat(self._inner.program_status_since)
661
+
662
+ def udf_rust(self) -> str:
663
+ """
664
+ Return the Rust code for UDFs.
665
+ """
666
+
667
+ self.refresh()
668
+ return self._inner.udf_rust
669
+
670
+ def udf_toml(self) -> str:
671
+ """
672
+ Return the Rust dependencies required by UDFs (in the TOML format).
673
+ """
674
+
675
+ self.refresh()
676
+ return self._inner.udf_toml
677
+
678
+ def program_config(self) -> Mapping[str, Any]:
679
+ """
680
+ Return the program config of the pipeline.
681
+ """
682
+
683
+ self.refresh()
684
+ return self._inner.program_config
685
+
686
+ def runtime_config(self) -> Mapping[str, Any]:
687
+ """
688
+ Return the runtime config of the pipeline.
689
+ """
690
+
691
+ self.refresh()
692
+ return self._inner.runtime_config
693
+
694
+ def id(self) -> str:
695
+ """
696
+ Return the ID of the pipeline.
697
+ """
698
+
699
+ self.refresh()
700
+ return self._inner.id
701
+
702
+ def description(self) -> str:
703
+ """
704
+ Return the description of the pipeline.
705
+ """
706
+
707
+ self.refresh()
708
+ return self._inner.description
709
+
710
+ def tables(self) -> List[SQLTable]:
711
+ """
712
+ Return the tables of the pipeline.
713
+ """
714
+
715
+ self.refresh()
716
+ return self._inner.tables
717
+
718
+ def views(self) -> List[SQLView]:
719
+ """
720
+ Return the views of the pipeline.
721
+ """
722
+
723
+ self.refresh()
724
+ return self._inner.views
725
+
726
+ def created_at(self) -> datetime:
727
+ """
728
+ Return the creation time of the pipeline.
729
+ """
730
+
731
+ self.refresh()
732
+ return datetime.fromisoformat(self._inner.created_at)
733
+
734
+ def version(self) -> int:
735
+ """
736
+ Return the version of the pipeline.
737
+ """
738
+
739
+ self.refresh()
740
+ return self._inner.version
741
+
742
+ def program_version(self) -> int:
743
+ """
744
+ Return the program version of the pipeline.
745
+ """
746
+
747
+ self.refresh()
748
+ return self._inner.program_version
749
+
750
+ def deployment_status_since(self) -> datetime:
751
+ """
752
+ Return the timestamp when the current deployment status of the pipeline was set.
753
+ """
754
+
755
+ self.refresh()
756
+ return datetime.fromisoformat(self._inner.deployment_status_since)
757
+
758
+ def deployment_config(self) -> Mapping[str, Any]:
759
+ """
760
+ Return the deployment config of the pipeline.
761
+ """
762
+
763
+ self.refresh()
764
+ return self._inner.deployment_config
765
+
766
+ def deployment_desired_status(self) -> PipelineStatus:
767
+ """
768
+ Return the desired deployment status of the pipeline.
769
+ This is the next state that the pipeline should transition to.
770
+ """
771
+
772
+ self.refresh()
773
+ return PipelineStatus.from_str(self._inner.deployment_desired_status)
774
+
775
+ def deployment_error(self) -> Mapping[str, Any]:
776
+ """
777
+ Return the deployment error of the pipeline.
778
+ Returns an empty string if there is no error.
779
+ """
780
+
781
+ self.refresh()
782
+ return self._inner.deployment_error
783
+
784
+ def deployment_location(self) -> str:
785
+ """
786
+ Return the deployment location of the pipeline.
787
+ Deployment location is the location where the pipeline can be reached at runtime (a TCP port number or a URI).
788
+ """
789
+
790
+ self.refresh()
791
+ return self._inner.deployment_location
792
+
793
+ def program_binary_url(self) -> str:
794
+ """
795
+ Return the program binary URL of the pipeline.
796
+ This is the URL where the compiled program binary can be downloaded from.
797
+ """
798
+
799
+ self.refresh()
800
+ return self._inner.program_binary_url
801
+
802
+ def program_info(self) -> Mapping[str, Any]:
803
+ """
804
+ Return the program info of the pipeline.
805
+ This is the output returned by the SQL compiler, including: the list of input and output connectors, the generated Rust code for the pipeline, and the SQL program schema.
806
+ """
807
+
808
+ self.refresh()
809
+ return self._inner.program_info