feldera 0.34.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of feldera might be problematic. Click here for more details.

@@ -0,0 +1,605 @@
1
+ import pathlib
2
+ from typing import Optional
3
+ import logging
4
+ import time
5
+ import json
6
+ from decimal import Decimal
7
+ from typing import Generator
8
+
9
+ from feldera.rest.config import Config
10
+ from feldera.rest.errors import FelderaTimeoutError
11
+ from feldera.rest.pipeline import Pipeline
12
+ from feldera.rest._httprequests import HttpRequests
13
+
14
+
15
+ def _prepare_boolean_input(value: bool) -> str:
16
+ return "true" if value else "false"
17
+
18
+
19
+ class FelderaClient:
20
+ """
21
+ A client for the Feldera HTTP API
22
+
23
+ A client instance is needed for every Feldera API method to know the location of
24
+ Feldera and its permissions.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ url: str,
30
+ api_key: Optional[str] = None,
31
+ timeout: Optional[float] = None,
32
+ ) -> None:
33
+ """
34
+ :param url: The url to Feldera API (ex: https://try.feldera.com)
35
+ :param api_key: The optional API key for Feldera
36
+ :param timeout: (optional) The amount of time in seconds that the client will wait for a response before timing
37
+ out.
38
+ """
39
+
40
+ self.config = Config(url, api_key, timeout=timeout)
41
+ self.http = HttpRequests(self.config)
42
+
43
+ try:
44
+ self.pipelines()
45
+ except Exception as e:
46
+ logging.error(f"Failed to connect to Feldera API: {e}")
47
+ raise e
48
+
49
+ @staticmethod
50
+ def localhost(port: int = 8080) -> "FelderaClient":
51
+ """
52
+ Create a FelderaClient that connects to the local Feldera instance
53
+ """
54
+
55
+ return FelderaClient(f"http://localhost:{port}")
56
+
57
+ def get_pipeline(self, pipeline_name) -> Pipeline:
58
+ """
59
+ Get a pipeline by name
60
+
61
+ :param pipeline_name: The name of the pipeline
62
+ """
63
+
64
+ resp = self.http.get(f"/pipelines/{pipeline_name}")
65
+
66
+ return Pipeline.from_dict(resp)
67
+
68
+ def get_runtime_config(self, pipeline_name) -> dict:
69
+ """
70
+ Get the runtime config of a pipeline by name
71
+
72
+ :param pipeline_name: The name of the pipeline
73
+ """
74
+
75
+ resp: dict = self.http.get(f"/pipelines/{pipeline_name}")
76
+
77
+ return resp.get("runtime_config")
78
+
79
+ def pipelines(self) -> list[Pipeline]:
80
+ """
81
+ Get all pipelines
82
+ """
83
+
84
+ resp = self.http.get(
85
+ path="/pipelines",
86
+ )
87
+
88
+ return [Pipeline.from_dict(pipeline) for pipeline in resp]
89
+
90
+ def __wait_for_compilation(self, name: str):
91
+ wait = ["Pending", "CompilingSql", "SqlCompiled", "CompilingRust"]
92
+
93
+ while True:
94
+ p = self.get_pipeline(name)
95
+ status = p.program_status
96
+
97
+ if status == "Success":
98
+ return p
99
+ elif status not in wait:
100
+ # error handling for SQL compilation errors
101
+ if isinstance(status, dict):
102
+ sql_errors = status.get("SqlError")
103
+ if sql_errors:
104
+ err_msg = f"Pipeline {name} failed to compile:\n"
105
+ for sql_error in sql_errors:
106
+ err_msg += (
107
+ f"{sql_error['error_type']}\n{sql_error['message']}\n"
108
+ )
109
+ err_msg += f"Code snippet:\n{sql_error['snippet']}"
110
+ raise RuntimeError(err_msg)
111
+
112
+ raise RuntimeError(f"The program failed to compile: {status}")
113
+
114
+ logging.debug("still compiling %s, waiting for 100 more milliseconds", name)
115
+ time.sleep(0.1)
116
+
117
+ def create_pipeline(self, pipeline: Pipeline) -> Pipeline:
118
+ """
119
+ Create a pipeline if it doesn't exist and wait for it to compile
120
+
121
+
122
+ :name: The name of the pipeline
123
+ """
124
+
125
+ body = {
126
+ "name": pipeline.name,
127
+ "program_code": pipeline.program_code,
128
+ "udf_rust": pipeline.udf_rust,
129
+ "udf_toml": pipeline.udf_toml,
130
+ "program_config": pipeline.program_config,
131
+ "runtime_config": pipeline.runtime_config,
132
+ "description": pipeline.description or "",
133
+ }
134
+
135
+ self.http.post(
136
+ path="/pipelines",
137
+ body=body,
138
+ )
139
+
140
+ return self.__wait_for_compilation(pipeline.name)
141
+
142
+ def create_or_update_pipeline(self, pipeline: Pipeline) -> Pipeline:
143
+ """
144
+ Create a pipeline if it doesn't exist or update a pipeline and wait for it to compile
145
+ """
146
+
147
+ body = {
148
+ "name": pipeline.name,
149
+ "program_code": pipeline.program_code,
150
+ "udf_rust": pipeline.udf_rust,
151
+ "udf_toml": pipeline.udf_toml,
152
+ "program_config": pipeline.program_config,
153
+ "runtime_config": pipeline.runtime_config,
154
+ "description": pipeline.description or "",
155
+ }
156
+
157
+ self.http.put(
158
+ path=f"/pipelines/{pipeline.name}",
159
+ body=body,
160
+ )
161
+
162
+ return self.__wait_for_compilation(pipeline.name)
163
+
164
+ def patch_pipeline(self, name: str, sql: str):
165
+ """
166
+ Incrementally update the pipeline SQL
167
+
168
+ :param name: The name of the pipeline
169
+ :param sql: The SQL snippet. Replaces the existing SQL code with this one.
170
+ """
171
+
172
+ self.http.patch(
173
+ path=f"/pipelines/{name}",
174
+ body={"program_code": sql},
175
+ )
176
+
177
+ def delete_pipeline(self, name: str):
178
+ """
179
+ Deletes a pipeline by name
180
+
181
+ :param name: The name of the pipeline
182
+ """
183
+ self.http.delete(
184
+ path=f"/pipelines/{name}",
185
+ )
186
+
187
+ def get_pipeline_stats(self, name: str) -> dict:
188
+ """
189
+ Get the pipeline metrics and performance counters
190
+
191
+ :param name: The name of the pipeline
192
+ """
193
+
194
+ resp = self.http.get(
195
+ path=f"/pipelines/{name}/stats",
196
+ )
197
+
198
+ return resp
199
+
200
+ def start_pipeline(self, pipeline_name: str, timeout_s: Optional[float] = 300):
201
+ """
202
+
203
+ :param pipeline_name: The name of the pipeline to start
204
+ :param timeout_s: The amount of time in seconds to wait for the pipeline to start. 300 seconds by default.
205
+ """
206
+
207
+ if timeout_s is None:
208
+ timeout_s = 300
209
+
210
+ self.http.post(
211
+ path=f"/pipelines/{pipeline_name}/start",
212
+ )
213
+
214
+ start_time = time.monotonic()
215
+
216
+ while True:
217
+ if timeout_s is not None:
218
+ elapsed = time.monotonic() - start_time
219
+ if elapsed > timeout_s:
220
+ raise TimeoutError(
221
+ f"Timed out waiting for pipeline {pipeline_name} to start"
222
+ )
223
+
224
+ resp = self.get_pipeline(pipeline_name)
225
+ status = resp.deployment_status
226
+
227
+ if status == "Running":
228
+ break
229
+ elif status == "Failed":
230
+ raise RuntimeError(
231
+ f"""Unable to START the pipeline.
232
+ Reason: The pipeline is in a FAILED state due to the following error:
233
+ {resp.deployment_error.get("message", "")}"""
234
+ )
235
+
236
+ logging.debug(
237
+ "still starting %s, waiting for 100 more milliseconds", pipeline_name
238
+ )
239
+ time.sleep(0.1)
240
+
241
+ def pause_pipeline(
242
+ self,
243
+ pipeline_name: str,
244
+ error_message: str = None,
245
+ timeout_s: Optional[float] = 300,
246
+ ):
247
+ """
248
+ Stop a pipeline
249
+
250
+ :param pipeline_name: The name of the pipeline to stop
251
+ :param error_message: The error message to show if the pipeline is in FAILED state
252
+ :param timeout_s: The amount of time in seconds to wait for the pipeline to pause. 300 seconds by default.
253
+ """
254
+
255
+ if timeout_s is None:
256
+ timeout_s = 300
257
+
258
+ self.http.post(
259
+ path=f"/pipelines/{pipeline_name}/pause",
260
+ )
261
+
262
+ if error_message is None:
263
+ error_message = "Unable to PAUSE the pipeline.\n"
264
+
265
+ start_time = time.monotonic()
266
+
267
+ while True:
268
+ if timeout_s is not None:
269
+ elapsed = time.monotonic() - start_time
270
+ if elapsed > timeout_s:
271
+ raise TimeoutError(
272
+ f"Timed out waiting for pipeline {pipeline_name} to pause"
273
+ )
274
+
275
+ resp = self.get_pipeline(pipeline_name)
276
+ status = resp.deployment_status
277
+
278
+ if status == "Paused":
279
+ break
280
+ elif status == "Failed":
281
+ raise RuntimeError(
282
+ error_message
283
+ + f"""Reason: The pipeline is in a FAILED state due to the following error:
284
+ {resp.deployment_error.get("message", "")}"""
285
+ )
286
+
287
+ logging.debug(
288
+ "still pausing %s, waiting for 100 more milliseconds", pipeline_name
289
+ )
290
+ time.sleep(0.1)
291
+
292
+ def shutdown_pipeline(self, pipeline_name: str, timeout_s: Optional[float] = 300):
293
+ """
294
+ Shutdown a pipeline
295
+
296
+ :param pipeline_name: The name of the pipeline to shut down
297
+ :param timeout_s: The amount of time in seconds to wait for the pipeline to shut down. Default is 15 seconds.
298
+ """
299
+
300
+ if timeout_s is None:
301
+ timeout_s = 300
302
+
303
+ self.http.post(
304
+ path=f"/pipelines/{pipeline_name}/shutdown",
305
+ )
306
+
307
+ start = time.monotonic()
308
+
309
+ while time.monotonic() - start < timeout_s:
310
+ status = self.get_pipeline(pipeline_name).deployment_status
311
+
312
+ if status == "Shutdown":
313
+ return
314
+
315
+ logging.debug(
316
+ "still shutting down %s, waiting for 100 more milliseconds",
317
+ pipeline_name,
318
+ )
319
+ time.sleep(0.1)
320
+
321
+ raise FelderaTimeoutError(
322
+ f"timeout error: pipeline '{pipeline_name}' did not shutdown in {timeout_s} seconds"
323
+ )
324
+
325
+ def checkpoint_pipeline(self, pipeline_name: str):
326
+ """
327
+ Checkpoint a fault-tolerant pipeline
328
+
329
+ :param pipeline_name: The name of the pipeline to checkpoint
330
+ """
331
+
332
+ self.http.post(
333
+ path=f"/pipelines/{pipeline_name}/checkpoint",
334
+ )
335
+
336
+ def push_to_pipeline(
337
+ self,
338
+ pipeline_name: str,
339
+ table_name: str,
340
+ format: str,
341
+ data: list[list | str | dict],
342
+ array: bool = False,
343
+ force: bool = False,
344
+ update_format: str = "raw",
345
+ json_flavor: str = None,
346
+ serialize: bool = True,
347
+ ):
348
+ """
349
+ Insert data into a pipeline
350
+
351
+ :param pipeline_name: The name of the pipeline
352
+ :param table_name: The name of the table
353
+ :param format: The format of the data, either "json" or "csv"
354
+ :param array: True if updates in this stream are packed into JSON arrays, used in conjunction with the
355
+ "json" format
356
+
357
+ :param force: If True, the data will be inserted even if the pipeline is paused
358
+ :param update_format: JSON data change event format, used in conjunction with the "json" format,
359
+ the default value is "insert_delete", other supported formats: "weighted", "debezium", "snowflake", "raw"
360
+ :param json_flavor: JSON encoding used for individual table records, the default value is "default", other supported encodings:
361
+ "debezium_mysql", "snowflake", "kafka_connect_json_converter", "pandas"
362
+ :param data: The data to insert
363
+ :param serialize: If True, the data will be serialized to JSON. True by default
364
+ """
365
+
366
+ if format not in ["json", "csv"]:
367
+ raise ValueError("format must be either 'json' or 'csv'")
368
+
369
+ if update_format not in [
370
+ "insert_delete",
371
+ "weighted",
372
+ "debezium",
373
+ "snowflake",
374
+ "raw",
375
+ ]:
376
+ raise ValueError(
377
+ "update_format must be one of 'insert_delete', 'weighted', 'debezium', 'snowflake', 'raw'"
378
+ )
379
+
380
+ if json_flavor is not None and json_flavor not in [
381
+ "default",
382
+ "debezium_mysql",
383
+ "snowflake",
384
+ "kafka_connect_json_converter",
385
+ "pandas",
386
+ ]:
387
+ raise ValueError(
388
+ "json_flavor must be one of 'default', 'debezium_mysql', 'snowflake', 'kafka_connect_json_converter', 'pandas'"
389
+ )
390
+
391
+ # python sends `True` which isn't accepted by the backend
392
+ array = _prepare_boolean_input(array)
393
+ force = _prepare_boolean_input(force)
394
+
395
+ params = {
396
+ "force": force,
397
+ "format": format,
398
+ }
399
+
400
+ if format == "json":
401
+ params["array"] = array
402
+ params["update_format"] = update_format
403
+
404
+ if json_flavor is not None:
405
+ params["json_flavor"] = json_flavor
406
+
407
+ content_type = "application/json"
408
+
409
+ if format == "csv":
410
+ content_type = "text/csv"
411
+ data = bytes(str(data), "utf-8")
412
+
413
+ self.http.post(
414
+ path=f"/pipelines/{pipeline_name}/ingress/{table_name}",
415
+ params=params,
416
+ content_type=content_type,
417
+ body=data,
418
+ serialize=serialize,
419
+ )
420
+
421
+ def listen_to_pipeline(
422
+ self,
423
+ pipeline_name: str,
424
+ table_name: str,
425
+ format: str,
426
+ backpressure: bool = True,
427
+ array: bool = False,
428
+ timeout: Optional[float] = None,
429
+ ):
430
+ """
431
+ Listen for updates to views for pipeline, yields the chunks of data
432
+
433
+ :param pipeline_name: The name of the pipeline
434
+ :param table_name: The name of the table to listen to
435
+ :param format: The format of the data, either "json" or "csv"
436
+ :param backpressure: When the flag is True (the default), this method waits for the consumer to receive each
437
+ chunk and blocks the pipeline if the consumer cannot keep up. When this flag is False, the pipeline drops
438
+ data chunks if the consumer is not keeping up with its output. This prevents a slow consumer from slowing
439
+ down the entire pipeline.
440
+ :param array: Set True to group updates in this stream into JSON arrays, used in conjunction with the
441
+ "json" format, the default value is False
442
+
443
+ :param timeout: The amount of time in seconds to listen to the stream for
444
+ """
445
+
446
+ params = {
447
+ "format": format,
448
+ "backpressure": _prepare_boolean_input(backpressure),
449
+ }
450
+
451
+ if format == "json":
452
+ params["array"] = _prepare_boolean_input(array)
453
+
454
+ resp = self.http.post(
455
+ path=f"/pipelines/{pipeline_name}/egress/{table_name}",
456
+ params=params,
457
+ stream=True,
458
+ )
459
+
460
+ end = time.monotonic() + timeout if timeout else None
461
+
462
+ # Using the default chunk size below makes `iter_lines` extremely
463
+ # inefficient when dealing with long lines.
464
+ for chunk in resp.iter_lines(chunk_size=50000000):
465
+ if end and time.monotonic() > end:
466
+ break
467
+ if chunk:
468
+ yield json.loads(chunk, parse_float=Decimal)
469
+
470
+ def query_as_text(
471
+ self, pipeline_name: str, query: str
472
+ ) -> Generator[str, None, None]:
473
+ """
474
+ Executes an ad-hoc query on the specified pipeline and returns a generator that yields lines of the table.
475
+
476
+ :param pipeline_name: The name of the pipeline to query.
477
+ :param query: The SQL query to be executed.
478
+ :return: A generator yielding the query result in tabular format, one line at a time.
479
+ """
480
+ params = {
481
+ "pipeline_name": pipeline_name,
482
+ "sql": query,
483
+ "format": "text",
484
+ }
485
+
486
+ resp = self.http.get(
487
+ path=f"/pipelines/{pipeline_name}/query",
488
+ params=params,
489
+ stream=True,
490
+ )
491
+
492
+ chunk: bytes
493
+ for chunk in resp.iter_lines(chunk_size=50000000):
494
+ if chunk:
495
+ yield chunk.decode("utf-8")
496
+
497
+ def query_as_parquet(self, pipeline_name: str, query: str, path: str):
498
+ """
499
+ Executes an ad-hoc query on the specified pipeline and saves the result to a parquet file.
500
+ If the extension isn't `parquet`, it will be automatically appended to `path`.
501
+
502
+ :param pipeline_name: The name of the pipeline to query.
503
+ :param query: The SQL query to be executed.
504
+ :param path: The path including the file name to save the resulting parquet file in.
505
+ """
506
+
507
+ params = {
508
+ "pipeline_name": pipeline_name,
509
+ "sql": query,
510
+ "format": "parquet",
511
+ }
512
+
513
+ resp = self.http.get(
514
+ path=f"/pipelines/{pipeline_name}/query",
515
+ params=params,
516
+ stream=True,
517
+ )
518
+
519
+ path: pathlib.Path = pathlib.Path(path)
520
+
521
+ ext = ".parquet"
522
+ if path.suffix != ext:
523
+ path = path.with_suffix(ext)
524
+
525
+ file = open(path, "wb")
526
+
527
+ chunk: bytes
528
+ for chunk in resp.iter_content(chunk_size=1024):
529
+ if chunk:
530
+ file.write(chunk)
531
+ file.close()
532
+
533
+ def query_as_json(
534
+ self, pipeline_name: str, query: str
535
+ ) -> Generator[dict, None, None]:
536
+ """
537
+ Executes an ad-hoc query on the specified pipeline and returns the result as a generator that yields
538
+ rows of the query as Python dictionaries.
539
+ All floating-point numbers are deserialized as Decimal objects to avoid precision loss.
540
+
541
+ :param pipeline_name: The name of the pipeline to query.
542
+ :param query: The SQL query to be executed.
543
+ :return: A generator that yields each row of the result as a Python dictionary, deserialized from JSON.
544
+ """
545
+ params = {
546
+ "pipeline_name": pipeline_name,
547
+ "sql": query,
548
+ "format": "json",
549
+ }
550
+
551
+ resp = self.http.get(
552
+ path=f"/pipelines/{pipeline_name}/query",
553
+ params=params,
554
+ stream=True,
555
+ )
556
+
557
+ for chunk in resp.iter_lines(chunk_size=50000000):
558
+ if chunk:
559
+ yield json.loads(chunk, parse_float=Decimal)
560
+
561
+ def pause_connector(self, pipeline_name, table_name, connector_name):
562
+ """
563
+ Pause the specified input connector.
564
+
565
+ Connectors allow feldera to fetch data from a source or write data to a sink.
566
+ This method allows users to **PAUSE** a specific **INPUT** connector.
567
+ All connectors are RUNNING by default.
568
+
569
+ Refer to the connector documentation for more information:
570
+ <https://docs.feldera.com/connectors/#input-connector-orchestration>
571
+
572
+ :param pipeline_name: The name of the pipeline.
573
+ :param table_name: The name of the table associated with this connector.
574
+ :param connector_name: The name of the connector.
575
+
576
+ :raises FelderaAPIError: If the connector cannot be found, or if the pipeline is not running.
577
+ """
578
+
579
+ self.http.post(
580
+ path=f"/pipelines/{pipeline_name}/tables/{table_name}/connectors/{connector_name}/pause",
581
+ )
582
+
583
+ def resume_connector(
584
+ self, pipeline_name: str, table_name: str, connector_name: str
585
+ ):
586
+ """
587
+ Resume the specified connector.
588
+
589
+ Connectors allow feldera to fetch data from a source or write data to a sink.
590
+ This method allows users to **RESUME / START** a specific **INPUT** connector.
591
+ All connectors are RUNNING by default.
592
+
593
+ Refer to the connector documentation for more information:
594
+ <https://docs.feldera.com/connectors/#input-connector-orchestration>
595
+
596
+ :param pipeline_name: The name of the pipeline.
597
+ :param table_name: The name of the table associated with this connector.
598
+ :param connector_name: The name of the connector.
599
+
600
+ :raises FelderaAPIError: If the connector cannot be found, or if the pipeline is not running.
601
+ """
602
+
603
+ self.http.post(
604
+ path=f"/pipelines/{pipeline_name}/tables/{table_name}/connectors/{connector_name}/start",
605
+ )
@@ -0,0 +1,77 @@
1
+ from typing import Mapping, Any, Optional
2
+ from feldera.rest.sql_table import SQLTable
3
+ from feldera.rest.sql_view import SQLView
4
+
5
+
6
+ class Pipeline:
7
+ """
8
+ Represents a Feldera pipeline
9
+ """
10
+
11
+ def __init__(
12
+ self,
13
+ name: str,
14
+ sql: str,
15
+ udf_rust: str,
16
+ udf_toml: str,
17
+ program_config: Mapping[str, Any],
18
+ runtime_config: Mapping[str, Any],
19
+ description: Optional[str] = None,
20
+ ):
21
+ """
22
+ Initializes a new pipeline
23
+
24
+ :param name: The name of the pipeline
25
+ :param sql: The SQL code of the pipeline
26
+ :param udf_rust: Rust code for UDFs
27
+ :param udf_toml: Rust dependencies required by UDFs (in the TOML format)
28
+ :param program_config: The program config of the pipeline
29
+ :param runtime_config: The configuration of the pipeline
30
+ :param description: Optional. The description of the pipeline
31
+ """
32
+
33
+ self.name: str = name
34
+ self.program_code: str = sql.strip()
35
+ self.udf_rust: str = udf_rust
36
+ self.udf_toml: str = udf_toml
37
+ self.description: Optional[str] = description
38
+ self.program_config: Mapping[str, Any] = program_config
39
+ self.runtime_config: Mapping[str, Any] = runtime_config
40
+ self.id: Optional[str] = None
41
+ self.tables: list[SQLTable] = []
42
+ self.views: list[SQLView] = []
43
+ self.deployment_status: Optional[str] = None
44
+ self.deployment_status_since: Optional[str] = None
45
+ self.created_at: Optional[str] = None
46
+ self.version: Optional[int] = None
47
+ self.program_version: Optional[int] = None
48
+ self.deployment_config: Optional[dict] = None
49
+ self.deployment_desired_status: Optional[str] = None
50
+ self.deployment_error: Optional[dict] = None
51
+ self.deployment_location: Optional[str] = None
52
+ self.program_binary_url: Optional[str] = None
53
+ self.program_info: Optional[dict] = (
54
+ None # info about input & output connectors and the schema
55
+ )
56
+ self.program_status: Optional[str] = None
57
+ self.program_status_since: Optional[str] = None
58
+
59
+ @classmethod
60
+ def from_dict(cls, d: Mapping[str, Any]):
61
+ pipeline = cls("", "", "", "", {}, {})
62
+ pipeline.__dict__ = d
63
+ pipeline.tables = []
64
+ pipeline.views = []
65
+
66
+ info = d.get("program_info")
67
+
68
+ if info is not None:
69
+ for i in info["schema"]["inputs"]:
70
+ tbl = SQLTable.from_dict(i)
71
+ pipeline.tables.append(tbl)
72
+
73
+ for output in info["schema"]["outputs"]:
74
+ v = SQLView.from_dict(output)
75
+ pipeline.views.append(v)
76
+
77
+ return pipeline
@@ -0,0 +1,23 @@
1
+ class SQLTable:
2
+ """
3
+ Represents a SQL table in Feldera
4
+ """
5
+
6
+ def __init__(
7
+ self,
8
+ name: str,
9
+ fields: list[dict],
10
+ case_sensitive: bool = False,
11
+ materialized: bool = False,
12
+ ):
13
+ self.name = name
14
+ self.case_sensitive = case_sensitive
15
+ self.materialized = materialized
16
+ self.fields: list[dict] = fields
17
+
18
+ @classmethod
19
+ def from_dict(self, table_dict: dict):
20
+ tbl = SQLTable(name=table_dict["name"], fields=table_dict["fields"])
21
+ tbl.case_sensitive = table_dict["case_sensitive"]
22
+ tbl.materialized = table_dict["materialized"]
23
+ return tbl