sqlmesh 0.217.1.dev1__py3-none-any.whl → 0.227.2.dev4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sqlmesh/__init__.py +12 -2
- sqlmesh/_version.py +2 -2
- sqlmesh/cli/project_init.py +10 -2
- sqlmesh/core/_typing.py +1 -0
- sqlmesh/core/audit/definition.py +8 -2
- sqlmesh/core/config/__init__.py +1 -1
- sqlmesh/core/config/connection.py +17 -5
- sqlmesh/core/config/dbt.py +13 -0
- sqlmesh/core/config/janitor.py +12 -0
- sqlmesh/core/config/loader.py +7 -0
- sqlmesh/core/config/model.py +2 -0
- sqlmesh/core/config/root.py +3 -0
- sqlmesh/core/console.py +80 -2
- sqlmesh/core/constants.py +1 -1
- sqlmesh/core/context.py +61 -25
- sqlmesh/core/dialect.py +3 -0
- sqlmesh/core/engine_adapter/_typing.py +2 -0
- sqlmesh/core/engine_adapter/base.py +322 -22
- sqlmesh/core/engine_adapter/base_postgres.py +17 -1
- sqlmesh/core/engine_adapter/bigquery.py +146 -7
- sqlmesh/core/engine_adapter/clickhouse.py +17 -13
- sqlmesh/core/engine_adapter/databricks.py +33 -2
- sqlmesh/core/engine_adapter/fabric.py +1 -29
- sqlmesh/core/engine_adapter/mixins.py +142 -48
- sqlmesh/core/engine_adapter/mssql.py +15 -4
- sqlmesh/core/engine_adapter/mysql.py +2 -2
- sqlmesh/core/engine_adapter/postgres.py +9 -3
- sqlmesh/core/engine_adapter/redshift.py +4 -0
- sqlmesh/core/engine_adapter/risingwave.py +1 -0
- sqlmesh/core/engine_adapter/shared.py +6 -0
- sqlmesh/core/engine_adapter/snowflake.py +82 -11
- sqlmesh/core/engine_adapter/spark.py +14 -10
- sqlmesh/core/engine_adapter/trino.py +4 -2
- sqlmesh/core/janitor.py +181 -0
- sqlmesh/core/lineage.py +1 -0
- sqlmesh/core/macros.py +35 -13
- sqlmesh/core/model/common.py +2 -0
- sqlmesh/core/model/definition.py +65 -4
- sqlmesh/core/model/kind.py +66 -2
- sqlmesh/core/model/meta.py +107 -2
- sqlmesh/core/node.py +101 -2
- sqlmesh/core/plan/builder.py +15 -10
- sqlmesh/core/plan/common.py +196 -2
- sqlmesh/core/plan/definition.py +21 -6
- sqlmesh/core/plan/evaluator.py +72 -113
- sqlmesh/core/plan/explainer.py +90 -8
- sqlmesh/core/plan/stages.py +42 -21
- sqlmesh/core/renderer.py +26 -18
- sqlmesh/core/scheduler.py +60 -19
- sqlmesh/core/selector.py +137 -9
- sqlmesh/core/signal.py +64 -1
- sqlmesh/core/snapshot/__init__.py +1 -0
- sqlmesh/core/snapshot/definition.py +109 -25
- sqlmesh/core/snapshot/evaluator.py +610 -50
- sqlmesh/core/state_sync/__init__.py +0 -1
- sqlmesh/core/state_sync/base.py +31 -27
- sqlmesh/core/state_sync/cache.py +12 -4
- sqlmesh/core/state_sync/common.py +216 -111
- sqlmesh/core/state_sync/db/facade.py +30 -15
- sqlmesh/core/state_sync/db/interval.py +27 -7
- sqlmesh/core/state_sync/db/migrator.py +14 -8
- sqlmesh/core/state_sync/db/snapshot.py +119 -87
- sqlmesh/core/table_diff.py +2 -2
- sqlmesh/core/test/definition.py +14 -9
- sqlmesh/dbt/adapter.py +20 -11
- sqlmesh/dbt/basemodel.py +52 -41
- sqlmesh/dbt/builtin.py +27 -11
- sqlmesh/dbt/column.py +17 -5
- sqlmesh/dbt/common.py +4 -2
- sqlmesh/dbt/context.py +14 -1
- sqlmesh/dbt/loader.py +60 -8
- sqlmesh/dbt/manifest.py +136 -8
- sqlmesh/dbt/model.py +105 -25
- sqlmesh/dbt/package.py +16 -1
- sqlmesh/dbt/profile.py +3 -3
- sqlmesh/dbt/project.py +12 -7
- sqlmesh/dbt/seed.py +1 -1
- sqlmesh/dbt/source.py +6 -1
- sqlmesh/dbt/target.py +25 -6
- sqlmesh/dbt/test.py +31 -1
- sqlmesh/migrations/v0000_baseline.py +3 -6
- sqlmesh/migrations/v0061_mysql_fix_blob_text_type.py +2 -5
- sqlmesh/migrations/v0062_add_model_gateway.py +2 -2
- sqlmesh/migrations/v0063_change_signals.py +2 -4
- sqlmesh/migrations/v0064_join_when_matched_strings.py +2 -4
- sqlmesh/migrations/v0065_add_model_optimize.py +2 -2
- sqlmesh/migrations/v0066_add_auto_restatements.py +2 -6
- sqlmesh/migrations/v0067_add_tsql_date_full_precision.py +2 -2
- sqlmesh/migrations/v0068_include_unrendered_query_in_metadata_hash.py +2 -2
- sqlmesh/migrations/v0069_update_dev_table_suffix.py +2 -4
- sqlmesh/migrations/v0070_include_grains_in_metadata_hash.py +2 -2
- sqlmesh/migrations/v0071_add_dev_version_to_intervals.py +2 -6
- sqlmesh/migrations/v0072_add_environment_statements.py +2 -4
- sqlmesh/migrations/v0073_remove_symbolic_disable_restatement.py +2 -4
- sqlmesh/migrations/v0074_add_partition_by_time_column_property.py +2 -2
- sqlmesh/migrations/v0075_remove_validate_query.py +2 -4
- sqlmesh/migrations/v0076_add_cron_tz.py +2 -2
- sqlmesh/migrations/v0077_fix_column_type_hash_calculation.py +2 -2
- sqlmesh/migrations/v0078_warn_if_non_migratable_python_env.py +2 -4
- sqlmesh/migrations/v0079_add_gateway_managed_property.py +7 -9
- sqlmesh/migrations/v0080_add_batch_size_to_scd_type_2_models.py +2 -2
- sqlmesh/migrations/v0081_update_partitioned_by.py +2 -4
- sqlmesh/migrations/v0082_warn_if_incorrectly_duplicated_statements.py +2 -4
- sqlmesh/migrations/v0083_use_sql_for_scd_time_data_type_data_hash.py +2 -2
- sqlmesh/migrations/v0084_normalize_quote_when_matched_and_merge_filter.py +2 -2
- sqlmesh/migrations/v0085_deterministic_repr.py +2 -4
- sqlmesh/migrations/v0086_check_deterministic_bug.py +2 -4
- sqlmesh/migrations/v0087_normalize_blueprint_variables.py +2 -4
- sqlmesh/migrations/v0088_warn_about_variable_python_env_diffs.py +2 -4
- sqlmesh/migrations/v0089_add_virtual_environment_mode.py +2 -2
- sqlmesh/migrations/v0090_add_forward_only_column.py +2 -6
- sqlmesh/migrations/v0091_on_additive_change.py +2 -2
- sqlmesh/migrations/v0092_warn_about_dbt_data_type_diff.py +2 -4
- sqlmesh/migrations/v0093_use_raw_sql_in_fingerprint.py +2 -2
- sqlmesh/migrations/v0094_add_dev_version_and_fingerprint_columns.py +2 -6
- sqlmesh/migrations/v0095_warn_about_dbt_raw_sql_diff.py +2 -4
- sqlmesh/migrations/v0096_remove_plan_dags_table.py +2 -4
- sqlmesh/migrations/v0097_add_dbt_name_in_node.py +2 -2
- sqlmesh/migrations/v0098_add_dbt_node_info_in_node.py +103 -0
- sqlmesh/migrations/v0099_add_last_altered_to_intervals.py +25 -0
- sqlmesh/migrations/v0100_add_grants_and_grants_target_layer.py +9 -0
- sqlmesh/utils/__init__.py +8 -1
- sqlmesh/utils/cache.py +5 -1
- sqlmesh/utils/date.py +1 -1
- sqlmesh/utils/errors.py +4 -0
- sqlmesh/utils/jinja.py +25 -2
- sqlmesh/utils/pydantic.py +6 -6
- sqlmesh/utils/windows.py +13 -3
- {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/METADATA +5 -5
- {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/RECORD +181 -176
- sqlmesh_dbt/cli.py +70 -7
- sqlmesh_dbt/console.py +14 -6
- sqlmesh_dbt/operations.py +103 -24
- sqlmesh_dbt/selectors.py +39 -1
- web/client/dist/assets/{Audits-Ucsx1GzF.js → Audits-CBiYyyx-.js} +1 -1
- web/client/dist/assets/{Banner-BWDzvavM.js → Banner-DSRbUlO5.js} +1 -1
- web/client/dist/assets/{ChevronDownIcon-D2VL13Ah.js → ChevronDownIcon-MK_nrjD_.js} +1 -1
- web/client/dist/assets/{ChevronRightIcon-DWGYbf1l.js → ChevronRightIcon-CLWtT22Q.js} +1 -1
- web/client/dist/assets/{Content-DdHDZM3I.js → Content-BNuGZN5l.js} +1 -1
- web/client/dist/assets/{Content-Bikfy8fh.js → Content-CSHJyW0n.js} +1 -1
- web/client/dist/assets/{Data-CzAJH7rW.js → Data-C1oRDbLx.js} +1 -1
- web/client/dist/assets/{DataCatalog-BJF11g8f.js → DataCatalog-HXyX2-_j.js} +1 -1
- web/client/dist/assets/{Editor-s0SBpV2y.js → Editor-BDyfpUuw.js} +1 -1
- web/client/dist/assets/{Editor-DgLhgKnm.js → Editor-D0jNItwC.js} +1 -1
- web/client/dist/assets/{Errors-D0m0O1d3.js → Errors-BfuFLcPi.js} +1 -1
- web/client/dist/assets/{FileExplorer-CEv0vXkt.js → FileExplorer-BR9IE3he.js} +1 -1
- web/client/dist/assets/{Footer-BwzXn8Ew.js → Footer-CgBEtiAh.js} +1 -1
- web/client/dist/assets/{Header-6heDkEqG.js → Header-DSqR6nSO.js} +1 -1
- web/client/dist/assets/{Input-obuJsD6k.js → Input-B-oZ6fGO.js} +1 -1
- web/client/dist/assets/Lineage-DYQVwDbD.js +1 -0
- web/client/dist/assets/{ListboxShow-HM9_qyrt.js → ListboxShow-BE5-xevs.js} +1 -1
- web/client/dist/assets/{ModelLineage-zWdKo0U2.js → ModelLineage-DkIFAYo4.js} +1 -1
- web/client/dist/assets/{Models-Bcu66SRz.js → Models-D5dWr8RB.js} +1 -1
- web/client/dist/assets/{Page-BWEEQfIt.js → Page-C-XfU5BR.js} +1 -1
- web/client/dist/assets/{Plan-C4gXCqlf.js → Plan-ZEuTINBq.js} +1 -1
- web/client/dist/assets/{PlusCircleIcon-CVDO651q.js → PlusCircleIcon-DVXAHG8_.js} +1 -1
- web/client/dist/assets/{ReportErrors-BT6xFwAr.js → ReportErrors-B7FEPzMB.js} +1 -1
- web/client/dist/assets/{Root-ryJoBK4h.js → Root-8aZyhPxF.js} +1 -1
- web/client/dist/assets/{SearchList-DB04sPb9.js → SearchList-W_iT2G82.js} +1 -1
- web/client/dist/assets/{SelectEnvironment-CUYcXUu6.js → SelectEnvironment-C65jALmO.js} +1 -1
- web/client/dist/assets/{SourceList-Doo_9ZGp.js → SourceList-DSLO6nVJ.js} +1 -1
- web/client/dist/assets/{SourceListItem-D5Mj7Dly.js → SourceListItem-BHt8d9-I.js} +1 -1
- web/client/dist/assets/{SplitPane-qHmkD1qy.js → SplitPane-CViaZmw6.js} +1 -1
- web/client/dist/assets/{Tests-DH1Z74ML.js → Tests-DhaVt5t1.js} +1 -1
- web/client/dist/assets/{Welcome-DqUJUNMF.js → Welcome-DvpjH-_4.js} +1 -1
- web/client/dist/assets/context-BctCsyGb.js +71 -0
- web/client/dist/assets/{context-Dr54UHLi.js → context-DFNeGsFF.js} +1 -1
- web/client/dist/assets/{editor-DYIP1yQ4.js → editor-CcO28cqd.js} +1 -1
- web/client/dist/assets/{file-DarlIDVi.js → file-CvJN3aZO.js} +1 -1
- web/client/dist/assets/{floating-ui.react-dom-BH3TFvkM.js → floating-ui.react-dom-CjE-JNW1.js} +1 -1
- web/client/dist/assets/{help-Bl8wqaQc.js → help-DuPhjipa.js} +1 -1
- web/client/dist/assets/{index-D1sR7wpN.js → index-C-dJH7yZ.js} +1 -1
- web/client/dist/assets/{index-O3mjYpnE.js → index-Dj0i1-CA.js} +2 -2
- web/client/dist/assets/{plan-CehRrJUG.js → plan-BTRSbjKn.js} +1 -1
- web/client/dist/assets/{popover-CqgMRE0G.js → popover-_Sf0yvOI.js} +1 -1
- web/client/dist/assets/{project-6gxepOhm.js → project-BvSOI8MY.js} +1 -1
- web/client/dist/index.html +1 -1
- web/client/dist/assets/Lineage-D0Hgdz2v.js +0 -1
- web/client/dist/assets/context-DgX0fp2E.js +0 -68
- {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/WHEEL +0 -0
- {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/entry_points.txt +0 -0
- {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/licenses/LICENSE +0 -0
- {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/top_level.txt +0 -0
sqlmesh/core/dialect.py
CHANGED
|
@@ -174,6 +174,7 @@ def _parse_id_var(
|
|
|
174
174
|
|
|
175
175
|
while (
|
|
176
176
|
identifier
|
|
177
|
+
and not identifier.args.get("quoted")
|
|
177
178
|
and self._is_connected()
|
|
178
179
|
and (
|
|
179
180
|
self._match_texts(("{", SQLMESH_MACRO_PREFIX))
|
|
@@ -349,6 +350,7 @@ def _parse_select(
|
|
|
349
350
|
parse_subquery_alias: bool = True,
|
|
350
351
|
parse_set_operation: bool = True,
|
|
351
352
|
consume_pipe: bool = True,
|
|
353
|
+
from_: t.Optional[exp.From] = None,
|
|
352
354
|
) -> t.Optional[exp.Expression]:
|
|
353
355
|
select = self.__parse_select( # type: ignore
|
|
354
356
|
nested=nested,
|
|
@@ -356,6 +358,7 @@ def _parse_select(
|
|
|
356
358
|
parse_subquery_alias=parse_subquery_alias,
|
|
357
359
|
parse_set_operation=parse_set_operation,
|
|
358
360
|
consume_pipe=consume_pipe,
|
|
361
|
+
from_=from_,
|
|
359
362
|
)
|
|
360
363
|
|
|
361
364
|
if (
|
|
@@ -18,7 +18,7 @@ from functools import cached_property, partial
|
|
|
18
18
|
|
|
19
19
|
from sqlglot import Dialect, exp
|
|
20
20
|
from sqlglot.errors import ErrorLevel
|
|
21
|
-
from sqlglot.helper import ensure_list
|
|
21
|
+
from sqlglot.helper import ensure_list, seq_get
|
|
22
22
|
from sqlglot.optimizer.qualify_columns import quote_identifiers
|
|
23
23
|
|
|
24
24
|
from sqlmesh.core.dialect import (
|
|
@@ -63,6 +63,7 @@ if t.TYPE_CHECKING:
|
|
|
63
63
|
from sqlmesh.core.engine_adapter._typing import (
|
|
64
64
|
DF,
|
|
65
65
|
BigframeSession,
|
|
66
|
+
GrantsConfig,
|
|
66
67
|
PySparkDataFrame,
|
|
67
68
|
PySparkSession,
|
|
68
69
|
Query,
|
|
@@ -114,11 +115,13 @@ class EngineAdapter:
|
|
|
114
115
|
SUPPORTS_TUPLE_IN = True
|
|
115
116
|
HAS_VIEW_BINDING = False
|
|
116
117
|
SUPPORTS_REPLACE_TABLE = True
|
|
118
|
+
SUPPORTS_GRANTS = False
|
|
117
119
|
DEFAULT_CATALOG_TYPE = DIALECT
|
|
118
120
|
QUOTE_IDENTIFIERS_IN_VIEWS = True
|
|
119
121
|
MAX_IDENTIFIER_LENGTH: t.Optional[int] = None
|
|
120
122
|
ATTACH_CORRELATION_ID = True
|
|
121
123
|
SUPPORTS_QUERY_EXECUTION_TRACKING = False
|
|
124
|
+
SUPPORTS_METADATA_TABLE_LAST_MODIFIED_TS = False
|
|
122
125
|
|
|
123
126
|
def __init__(
|
|
124
127
|
self,
|
|
@@ -160,6 +163,7 @@ class EngineAdapter:
|
|
|
160
163
|
self.correlation_id = correlation_id
|
|
161
164
|
self._schema_differ_overrides = schema_differ_overrides
|
|
162
165
|
self._query_execution_tracker = query_execution_tracker
|
|
166
|
+
self._data_object_cache: t.Dict[str, t.Optional[DataObject]] = {}
|
|
163
167
|
|
|
164
168
|
def with_settings(self, **kwargs: t.Any) -> EngineAdapter:
|
|
165
169
|
extra_kwargs = {
|
|
@@ -223,6 +227,10 @@ class EngineAdapter:
|
|
|
223
227
|
}
|
|
224
228
|
)
|
|
225
229
|
|
|
230
|
+
@property
|
|
231
|
+
def _catalog_type_overrides(self) -> t.Dict[str, str]:
|
|
232
|
+
return self._extra_config.get("catalog_type_overrides") or {}
|
|
233
|
+
|
|
226
234
|
@classmethod
|
|
227
235
|
def _casted_columns(
|
|
228
236
|
cls,
|
|
@@ -430,7 +438,11 @@ class EngineAdapter:
|
|
|
430
438
|
raise UnsupportedCatalogOperationError(
|
|
431
439
|
f"{self.dialect} does not support catalogs and a catalog was provided: {catalog}"
|
|
432
440
|
)
|
|
433
|
-
return
|
|
441
|
+
return (
|
|
442
|
+
self._catalog_type_overrides.get(catalog, self.DEFAULT_CATALOG_TYPE)
|
|
443
|
+
if catalog
|
|
444
|
+
else self.DEFAULT_CATALOG_TYPE
|
|
445
|
+
)
|
|
434
446
|
|
|
435
447
|
def get_catalog_type_from_table(self, table: TableName) -> str:
|
|
436
448
|
"""Get the catalog type from a table name if it has a catalog specified, otherwise return the current catalog type"""
|
|
@@ -539,11 +551,13 @@ class EngineAdapter:
|
|
|
539
551
|
target_table,
|
|
540
552
|
source_queries,
|
|
541
553
|
target_columns_to_types,
|
|
554
|
+
**kwargs,
|
|
542
555
|
)
|
|
543
556
|
return self._insert_overwrite_by_condition(
|
|
544
557
|
target_table,
|
|
545
558
|
source_queries,
|
|
546
559
|
target_columns_to_types,
|
|
560
|
+
**kwargs,
|
|
547
561
|
)
|
|
548
562
|
|
|
549
563
|
def create_index(
|
|
@@ -974,6 +988,13 @@ class EngineAdapter:
|
|
|
974
988
|
),
|
|
975
989
|
track_rows_processed=track_rows_processed,
|
|
976
990
|
)
|
|
991
|
+
# Extract table name to clear cache
|
|
992
|
+
table_name = (
|
|
993
|
+
table_name_or_schema.this
|
|
994
|
+
if isinstance(table_name_or_schema, exp.Schema)
|
|
995
|
+
else table_name_or_schema
|
|
996
|
+
)
|
|
997
|
+
self._clear_data_object_cache(table_name)
|
|
977
998
|
|
|
978
999
|
def _build_create_table_exp(
|
|
979
1000
|
self,
|
|
@@ -1029,13 +1050,15 @@ class EngineAdapter:
|
|
|
1029
1050
|
target_table_name: The name of the table to create. Can be fully qualified or just table name.
|
|
1030
1051
|
source_table_name: The name of the table to base the new table on.
|
|
1031
1052
|
"""
|
|
1032
|
-
self.
|
|
1053
|
+
self._create_table_like(target_table_name, source_table_name, exists=exists, **kwargs)
|
|
1054
|
+
self._clear_data_object_cache(target_table_name)
|
|
1033
1055
|
|
|
1034
1056
|
def clone_table(
|
|
1035
1057
|
self,
|
|
1036
1058
|
target_table_name: TableName,
|
|
1037
1059
|
source_table_name: TableName,
|
|
1038
1060
|
replace: bool = False,
|
|
1061
|
+
exists: bool = True,
|
|
1039
1062
|
clone_kwargs: t.Optional[t.Dict[str, t.Any]] = None,
|
|
1040
1063
|
**kwargs: t.Any,
|
|
1041
1064
|
) -> None:
|
|
@@ -1045,6 +1068,7 @@ class EngineAdapter:
|
|
|
1045
1068
|
target_table_name: The name of the table that should be created.
|
|
1046
1069
|
source_table_name: The name of the source table that should be cloned.
|
|
1047
1070
|
replace: Whether or not to replace an existing table.
|
|
1071
|
+
exists: Indicates whether to include the IF NOT EXISTS check.
|
|
1048
1072
|
"""
|
|
1049
1073
|
if not self.SUPPORTS_CLONING:
|
|
1050
1074
|
raise NotImplementedError(f"Engine does not support cloning: {type(self)}")
|
|
@@ -1055,6 +1079,7 @@ class EngineAdapter:
|
|
|
1055
1079
|
this=exp.to_table(target_table_name),
|
|
1056
1080
|
kind="TABLE",
|
|
1057
1081
|
replace=replace,
|
|
1082
|
+
exists=exists,
|
|
1058
1083
|
clone=exp.Clone(
|
|
1059
1084
|
this=exp.to_table(source_table_name),
|
|
1060
1085
|
**(clone_kwargs or {}),
|
|
@@ -1062,6 +1087,7 @@ class EngineAdapter:
|
|
|
1062
1087
|
**kwargs,
|
|
1063
1088
|
)
|
|
1064
1089
|
)
|
|
1090
|
+
self._clear_data_object_cache(target_table_name)
|
|
1065
1091
|
|
|
1066
1092
|
def drop_data_object(self, data_object: DataObject, ignore_if_not_exists: bool = True) -> None:
|
|
1067
1093
|
"""Drops a data object of arbitrary type.
|
|
@@ -1127,6 +1153,7 @@ class EngineAdapter:
|
|
|
1127
1153
|
drop_args["cascade"] = cascade
|
|
1128
1154
|
|
|
1129
1155
|
self.execute(exp.Drop(this=exp.to_table(name), kind=kind, exists=exists, **drop_args))
|
|
1156
|
+
self._clear_data_object_cache(name)
|
|
1130
1157
|
|
|
1131
1158
|
def get_alter_operations(
|
|
1132
1159
|
self,
|
|
@@ -1317,6 +1344,8 @@ class EngineAdapter:
|
|
|
1317
1344
|
quote_identifiers=self.QUOTE_IDENTIFIERS_IN_VIEWS,
|
|
1318
1345
|
)
|
|
1319
1346
|
|
|
1347
|
+
self._clear_data_object_cache(view_name)
|
|
1348
|
+
|
|
1320
1349
|
# Register table comment with commands if the engine doesn't support doing it in CREATE
|
|
1321
1350
|
if (
|
|
1322
1351
|
table_description
|
|
@@ -1446,8 +1475,14 @@ class EngineAdapter:
|
|
|
1446
1475
|
}
|
|
1447
1476
|
|
|
1448
1477
|
def table_exists(self, table_name: TableName) -> bool:
|
|
1478
|
+
table = exp.to_table(table_name)
|
|
1479
|
+
data_object_cache_key = _get_data_object_cache_key(table.catalog, table.db, table.name)
|
|
1480
|
+
if data_object_cache_key in self._data_object_cache:
|
|
1481
|
+
logger.debug("Table existence cache hit: %s", data_object_cache_key)
|
|
1482
|
+
return self._data_object_cache[data_object_cache_key] is not None
|
|
1483
|
+
|
|
1449
1484
|
try:
|
|
1450
|
-
self.execute(exp.Describe(this=
|
|
1485
|
+
self.execute(exp.Describe(this=table, kind="TABLE"))
|
|
1451
1486
|
return True
|
|
1452
1487
|
except Exception:
|
|
1453
1488
|
return False
|
|
@@ -1581,7 +1616,7 @@ class EngineAdapter:
|
|
|
1581
1616
|
**kwargs: t.Any,
|
|
1582
1617
|
) -> None:
|
|
1583
1618
|
return self._insert_overwrite_by_condition(
|
|
1584
|
-
table_name, source_queries, target_columns_to_types, where
|
|
1619
|
+
table_name, source_queries, target_columns_to_types, where, **kwargs
|
|
1585
1620
|
)
|
|
1586
1621
|
|
|
1587
1622
|
def _values_to_sql(
|
|
@@ -1633,6 +1668,30 @@ class EngineAdapter:
|
|
|
1633
1668
|
target_columns_to_types=target_columns_to_types,
|
|
1634
1669
|
order_projections=False,
|
|
1635
1670
|
)
|
|
1671
|
+
elif insert_overwrite_strategy.is_merge:
|
|
1672
|
+
columns = [exp.column(col) for col in target_columns_to_types]
|
|
1673
|
+
when_not_matched_by_source = exp.When(
|
|
1674
|
+
matched=False,
|
|
1675
|
+
source=True,
|
|
1676
|
+
condition=where,
|
|
1677
|
+
then=exp.Delete(),
|
|
1678
|
+
)
|
|
1679
|
+
when_not_matched_by_target = exp.When(
|
|
1680
|
+
matched=False,
|
|
1681
|
+
source=False,
|
|
1682
|
+
then=exp.Insert(
|
|
1683
|
+
this=exp.Tuple(expressions=columns),
|
|
1684
|
+
expression=exp.Tuple(expressions=columns),
|
|
1685
|
+
),
|
|
1686
|
+
)
|
|
1687
|
+
self._merge(
|
|
1688
|
+
target_table=table_name,
|
|
1689
|
+
query=query,
|
|
1690
|
+
on=exp.false(),
|
|
1691
|
+
whens=exp.Whens(
|
|
1692
|
+
expressions=[when_not_matched_by_source, when_not_matched_by_target]
|
|
1693
|
+
),
|
|
1694
|
+
)
|
|
1636
1695
|
else:
|
|
1637
1696
|
insert_exp = exp.insert(
|
|
1638
1697
|
query,
|
|
@@ -1715,7 +1774,7 @@ class EngineAdapter:
|
|
|
1715
1774
|
valid_from_col: exp.Column,
|
|
1716
1775
|
valid_to_col: exp.Column,
|
|
1717
1776
|
execution_time: t.Union[TimeLike, exp.Column],
|
|
1718
|
-
check_columns: t.Union[exp.Star, t.Sequence[exp.
|
|
1777
|
+
check_columns: t.Union[exp.Star, t.Sequence[exp.Expression]],
|
|
1719
1778
|
invalidate_hard_deletes: bool = True,
|
|
1720
1779
|
execution_time_as_valid_from: bool = False,
|
|
1721
1780
|
target_columns_to_types: t.Optional[t.Dict[str, exp.DataType]] = None,
|
|
@@ -1753,7 +1812,7 @@ class EngineAdapter:
|
|
|
1753
1812
|
execution_time: t.Union[TimeLike, exp.Column],
|
|
1754
1813
|
invalidate_hard_deletes: bool = True,
|
|
1755
1814
|
updated_at_col: t.Optional[exp.Column] = None,
|
|
1756
|
-
check_columns: t.Optional[t.Union[exp.Star, t.Sequence[exp.
|
|
1815
|
+
check_columns: t.Optional[t.Union[exp.Star, t.Sequence[exp.Expression]]] = None,
|
|
1757
1816
|
updated_at_as_valid_from: bool = False,
|
|
1758
1817
|
execution_time_as_valid_from: bool = False,
|
|
1759
1818
|
target_columns_to_types: t.Optional[t.Dict[str, exp.DataType]] = None,
|
|
@@ -1828,8 +1887,10 @@ class EngineAdapter:
|
|
|
1828
1887
|
# they are equal or not, the extra check is not a problem and we gain simplified logic here.
|
|
1829
1888
|
# If we want to change this, then we just need to check the expressions in unique_key and pull out the
|
|
1830
1889
|
# column names and then remove them from the unmanaged_columns
|
|
1831
|
-
if check_columns
|
|
1832
|
-
|
|
1890
|
+
if check_columns:
|
|
1891
|
+
# Handle both Star directly and [Star()] (which can happen during serialization/deserialization)
|
|
1892
|
+
if isinstance(seq_get(ensure_list(check_columns), 0), exp.Star):
|
|
1893
|
+
check_columns = [exp.column(col) for col in unmanaged_columns_to_types]
|
|
1833
1894
|
execution_ts = (
|
|
1834
1895
|
exp.cast(execution_time, time_data_type, dialect=self.dialect)
|
|
1835
1896
|
if isinstance(execution_time, exp.Column)
|
|
@@ -1866,7 +1927,8 @@ class EngineAdapter:
|
|
|
1866
1927
|
col_qualified.set("table", exp.to_identifier("joined"))
|
|
1867
1928
|
|
|
1868
1929
|
t_col = col_qualified.copy()
|
|
1869
|
-
t_col.
|
|
1930
|
+
for column in t_col.find_all(exp.Column):
|
|
1931
|
+
column.this.set("this", f"t_{column.name}")
|
|
1870
1932
|
|
|
1871
1933
|
row_check_conditions.extend(
|
|
1872
1934
|
[
|
|
@@ -2217,24 +2279,34 @@ class EngineAdapter:
|
|
|
2217
2279
|
"Tried to rename table across catalogs which is not supported"
|
|
2218
2280
|
)
|
|
2219
2281
|
self._rename_table(old_table_name, new_table_name)
|
|
2282
|
+
self._clear_data_object_cache(old_table_name)
|
|
2283
|
+
self._clear_data_object_cache(new_table_name)
|
|
2220
2284
|
|
|
2221
|
-
def get_data_object(
|
|
2285
|
+
def get_data_object(
|
|
2286
|
+
self, target_name: TableName, safe_to_cache: bool = False
|
|
2287
|
+
) -> t.Optional[DataObject]:
|
|
2222
2288
|
target_table = exp.to_table(target_name)
|
|
2223
2289
|
existing_data_objects = self.get_data_objects(
|
|
2224
|
-
schema_(target_table.db, target_table.catalog),
|
|
2290
|
+
schema_(target_table.db, target_table.catalog),
|
|
2291
|
+
{target_table.name},
|
|
2292
|
+
safe_to_cache=safe_to_cache,
|
|
2225
2293
|
)
|
|
2226
2294
|
if existing_data_objects:
|
|
2227
2295
|
return existing_data_objects[0]
|
|
2228
2296
|
return None
|
|
2229
2297
|
|
|
2230
2298
|
def get_data_objects(
|
|
2231
|
-
self,
|
|
2299
|
+
self,
|
|
2300
|
+
schema_name: SchemaName,
|
|
2301
|
+
object_names: t.Optional[t.Set[str]] = None,
|
|
2302
|
+
safe_to_cache: bool = False,
|
|
2232
2303
|
) -> t.List[DataObject]:
|
|
2233
2304
|
"""Lists all data objects in the target schema.
|
|
2234
2305
|
|
|
2235
2306
|
Args:
|
|
2236
2307
|
schema_name: The name of the schema to list data objects from.
|
|
2237
2308
|
object_names: If provided, only return data objects with these names.
|
|
2309
|
+
safe_to_cache: Whether it is safe to cache the results of this call.
|
|
2238
2310
|
|
|
2239
2311
|
Returns:
|
|
2240
2312
|
A list of data objects in the target schema.
|
|
@@ -2242,15 +2314,64 @@ class EngineAdapter:
|
|
|
2242
2314
|
if object_names is not None:
|
|
2243
2315
|
if not object_names:
|
|
2244
2316
|
return []
|
|
2245
|
-
|
|
2246
|
-
|
|
2247
|
-
|
|
2248
|
-
|
|
2249
|
-
|
|
2250
|
-
|
|
2251
|
-
|
|
2252
|
-
|
|
2253
|
-
|
|
2317
|
+
|
|
2318
|
+
# Check cache for each object name
|
|
2319
|
+
target_schema = to_schema(schema_name)
|
|
2320
|
+
cached_objects = []
|
|
2321
|
+
missing_names = set()
|
|
2322
|
+
|
|
2323
|
+
for name in object_names:
|
|
2324
|
+
cache_key = _get_data_object_cache_key(
|
|
2325
|
+
target_schema.catalog, target_schema.db, name
|
|
2326
|
+
)
|
|
2327
|
+
if cache_key in self._data_object_cache:
|
|
2328
|
+
logger.debug("Data object cache hit: %s", cache_key)
|
|
2329
|
+
data_object = self._data_object_cache[cache_key]
|
|
2330
|
+
# If the object is none, then the table was previously looked for but not found
|
|
2331
|
+
if data_object:
|
|
2332
|
+
cached_objects.append(data_object)
|
|
2333
|
+
else:
|
|
2334
|
+
logger.debug("Data object cache miss: %s", cache_key)
|
|
2335
|
+
missing_names.add(name)
|
|
2336
|
+
|
|
2337
|
+
# Fetch missing objects from database
|
|
2338
|
+
if missing_names:
|
|
2339
|
+
object_names_list = list(missing_names)
|
|
2340
|
+
batches = [
|
|
2341
|
+
object_names_list[i : i + self.DATA_OBJECT_FILTER_BATCH_SIZE]
|
|
2342
|
+
for i in range(0, len(object_names_list), self.DATA_OBJECT_FILTER_BATCH_SIZE)
|
|
2343
|
+
]
|
|
2344
|
+
|
|
2345
|
+
fetched_objects = []
|
|
2346
|
+
fetched_object_names = set()
|
|
2347
|
+
for batch in batches:
|
|
2348
|
+
objects = self._get_data_objects(schema_name, set(batch))
|
|
2349
|
+
for obj in objects:
|
|
2350
|
+
if safe_to_cache:
|
|
2351
|
+
cache_key = _get_data_object_cache_key(
|
|
2352
|
+
obj.catalog, obj.schema_name, obj.name
|
|
2353
|
+
)
|
|
2354
|
+
self._data_object_cache[cache_key] = obj
|
|
2355
|
+
fetched_objects.append(obj)
|
|
2356
|
+
fetched_object_names.add(obj.name)
|
|
2357
|
+
|
|
2358
|
+
if safe_to_cache:
|
|
2359
|
+
for missing_name in missing_names - fetched_object_names:
|
|
2360
|
+
cache_key = _get_data_object_cache_key(
|
|
2361
|
+
target_schema.catalog, target_schema.db, missing_name
|
|
2362
|
+
)
|
|
2363
|
+
self._data_object_cache[cache_key] = None
|
|
2364
|
+
|
|
2365
|
+
return cached_objects + fetched_objects
|
|
2366
|
+
|
|
2367
|
+
return cached_objects
|
|
2368
|
+
|
|
2369
|
+
fetched_objects = self._get_data_objects(schema_name)
|
|
2370
|
+
if safe_to_cache:
|
|
2371
|
+
for obj in fetched_objects:
|
|
2372
|
+
cache_key = _get_data_object_cache_key(obj.catalog, obj.schema_name, obj.name)
|
|
2373
|
+
self._data_object_cache[cache_key] = obj
|
|
2374
|
+
return fetched_objects
|
|
2254
2375
|
|
|
2255
2376
|
def fetchone(
|
|
2256
2377
|
self,
|
|
@@ -2322,6 +2443,11 @@ class EngineAdapter:
|
|
|
2322
2443
|
"""Fetches a PySpark DataFrame from the cursor"""
|
|
2323
2444
|
raise NotImplementedError(f"Engine does not support PySpark DataFrames: {type(self)}")
|
|
2324
2445
|
|
|
2446
|
+
@property
|
|
2447
|
+
def wap_enabled(self) -> bool:
|
|
2448
|
+
"""Returns whether WAP is enabled for this engine."""
|
|
2449
|
+
return self._extra_config.get("wap_enabled", False)
|
|
2450
|
+
|
|
2325
2451
|
def wap_supported(self, table_name: TableName) -> bool:
|
|
2326
2452
|
"""Returns whether WAP for the target table is supported."""
|
|
2327
2453
|
return False
|
|
@@ -2359,6 +2485,33 @@ class EngineAdapter:
|
|
|
2359
2485
|
"""
|
|
2360
2486
|
raise NotImplementedError(f"Engine does not support WAP: {type(self)}")
|
|
2361
2487
|
|
|
2488
|
+
def sync_grants_config(
|
|
2489
|
+
self,
|
|
2490
|
+
table: exp.Table,
|
|
2491
|
+
grants_config: GrantsConfig,
|
|
2492
|
+
table_type: DataObjectType = DataObjectType.TABLE,
|
|
2493
|
+
) -> None:
|
|
2494
|
+
"""Applies the grants_config to a table authoritatively.
|
|
2495
|
+
It first compares the specified grants against the current grants, and then
|
|
2496
|
+
applies the diffs to the table by revoking and granting privileges as needed.
|
|
2497
|
+
|
|
2498
|
+
Args:
|
|
2499
|
+
table: The table/view to apply grants to.
|
|
2500
|
+
grants_config: Dictionary mapping privileges to lists of grantees.
|
|
2501
|
+
table_type: The type of database object (TABLE, VIEW, MATERIALIZED_VIEW).
|
|
2502
|
+
"""
|
|
2503
|
+
if not self.SUPPORTS_GRANTS:
|
|
2504
|
+
raise NotImplementedError(f"Engine does not support grants: {type(self)}")
|
|
2505
|
+
|
|
2506
|
+
current_grants = self._get_current_grants_config(table)
|
|
2507
|
+
new_grants, revoked_grants = self._diff_grants_configs(grants_config, current_grants)
|
|
2508
|
+
revoke_exprs = self._revoke_grants_config_expr(table, revoked_grants, table_type)
|
|
2509
|
+
grant_exprs = self._apply_grants_config_expr(table, new_grants, table_type)
|
|
2510
|
+
dcl_exprs = revoke_exprs + grant_exprs
|
|
2511
|
+
|
|
2512
|
+
if dcl_exprs:
|
|
2513
|
+
self.execute(dcl_exprs)
|
|
2514
|
+
|
|
2362
2515
|
@contextlib.contextmanager
|
|
2363
2516
|
def transaction(
|
|
2364
2517
|
self,
|
|
@@ -2652,6 +2805,17 @@ class EngineAdapter:
|
|
|
2652
2805
|
|
|
2653
2806
|
return expression.sql(**sql_gen_kwargs, copy=False) # type: ignore
|
|
2654
2807
|
|
|
2808
|
+
def _clear_data_object_cache(self, table_name: t.Optional[TableName] = None) -> None:
|
|
2809
|
+
"""Clears the cache entry for the given table name, or clears the entire cache if table_name is None."""
|
|
2810
|
+
if table_name is None:
|
|
2811
|
+
logger.debug("Clearing entire data object cache")
|
|
2812
|
+
self._data_object_cache.clear()
|
|
2813
|
+
else:
|
|
2814
|
+
table = exp.to_table(table_name)
|
|
2815
|
+
cache_key = _get_data_object_cache_key(table.catalog, table.db, table.name)
|
|
2816
|
+
logger.debug("Clearing data object cache key: %s", cache_key)
|
|
2817
|
+
self._data_object_cache.pop(cache_key, None)
|
|
2818
|
+
|
|
2655
2819
|
def _get_data_objects(
|
|
2656
2820
|
self, schema_name: SchemaName, object_names: t.Optional[t.Set[str]] = None
|
|
2657
2821
|
) -> t.List[DataObject]:
|
|
@@ -2837,6 +3001,15 @@ class EngineAdapter:
|
|
|
2837
3001
|
exc_info=True,
|
|
2838
3002
|
)
|
|
2839
3003
|
|
|
3004
|
+
def _create_table_like(
|
|
3005
|
+
self,
|
|
3006
|
+
target_table_name: TableName,
|
|
3007
|
+
source_table_name: TableName,
|
|
3008
|
+
exists: bool,
|
|
3009
|
+
**kwargs: t.Any,
|
|
3010
|
+
) -> None:
|
|
3011
|
+
self.create_table(target_table_name, self.columns(source_table_name), exists=exists)
|
|
3012
|
+
|
|
2840
3013
|
def _rename_table(
|
|
2841
3014
|
self,
|
|
2842
3015
|
old_table_name: TableName,
|
|
@@ -2887,6 +3060,127 @@ class EngineAdapter:
|
|
|
2887
3060
|
f"Identifier name '{name}' (length {name_length}) exceeds {self.dialect.capitalize()}'s max identifier limit of {self.MAX_IDENTIFIER_LENGTH} characters"
|
|
2888
3061
|
)
|
|
2889
3062
|
|
|
3063
|
+
def get_table_last_modified_ts(self, table_names: t.List[TableName]) -> t.List[int]:
|
|
3064
|
+
raise NotImplementedError()
|
|
3065
|
+
|
|
3066
|
+
@classmethod
|
|
3067
|
+
def _diff_grants_configs(
|
|
3068
|
+
cls, new_config: GrantsConfig, old_config: GrantsConfig
|
|
3069
|
+
) -> t.Tuple[GrantsConfig, GrantsConfig]:
|
|
3070
|
+
"""Compute additions and removals between two grants configurations.
|
|
3071
|
+
|
|
3072
|
+
This method compares new (desired) and old (current) GrantsConfigs case-insensitively
|
|
3073
|
+
for both privilege keys and grantees, while preserving original casing
|
|
3074
|
+
in the output GrantsConfigs.
|
|
3075
|
+
|
|
3076
|
+
Args:
|
|
3077
|
+
new_config: Desired grants configuration (specified by the user).
|
|
3078
|
+
old_config: Current grants configuration (returned by the database).
|
|
3079
|
+
|
|
3080
|
+
Returns:
|
|
3081
|
+
A tuple of (additions, removals) GrantsConfig where:
|
|
3082
|
+
- additions contains privileges/grantees present in new_config but not in old_config
|
|
3083
|
+
- additions uses keys and grantee strings from new_config (user-specified casing)
|
|
3084
|
+
- removals contains privileges/grantees present in old_config but not in new_config
|
|
3085
|
+
- removals uses keys and grantee strings from old_config (database-returned casing)
|
|
3086
|
+
|
|
3087
|
+
Notes:
|
|
3088
|
+
- Comparison is case-insensitive using casefold(); original casing is preserved in results.
|
|
3089
|
+
- Overlapping grantees (case-insensitive) are excluded from the results.
|
|
3090
|
+
"""
|
|
3091
|
+
|
|
3092
|
+
def _diffs(config1: GrantsConfig, config2: GrantsConfig) -> GrantsConfig:
|
|
3093
|
+
diffs: GrantsConfig = {}
|
|
3094
|
+
cf_config2 = {k.casefold(): {g.casefold() for g in v} for k, v in config2.items()}
|
|
3095
|
+
for key, grantees in config1.items():
|
|
3096
|
+
cf_key = key.casefold()
|
|
3097
|
+
|
|
3098
|
+
# Missing key (add all grantees)
|
|
3099
|
+
if cf_key not in cf_config2:
|
|
3100
|
+
diffs[key] = grantees.copy()
|
|
3101
|
+
continue
|
|
3102
|
+
|
|
3103
|
+
# Include only grantees not in config2
|
|
3104
|
+
cf_grantees2 = cf_config2[cf_key]
|
|
3105
|
+
diff_grantees = []
|
|
3106
|
+
for grantee in grantees:
|
|
3107
|
+
if grantee.casefold() not in cf_grantees2:
|
|
3108
|
+
diff_grantees.append(grantee)
|
|
3109
|
+
if diff_grantees:
|
|
3110
|
+
diffs[key] = diff_grantees
|
|
3111
|
+
return diffs
|
|
3112
|
+
|
|
3113
|
+
return _diffs(new_config, old_config), _diffs(old_config, new_config)
|
|
3114
|
+
|
|
3115
|
+
def _get_current_grants_config(self, table: exp.Table) -> GrantsConfig:
|
|
3116
|
+
"""Returns current grants for a table as a dictionary.
|
|
3117
|
+
|
|
3118
|
+
This method queries the database and returns the current grants/permissions
|
|
3119
|
+
for the given table, parsed into a dictionary format. The it handles
|
|
3120
|
+
case-insensitive comparison between these current grants and the desired
|
|
3121
|
+
grants from model configuration.
|
|
3122
|
+
|
|
3123
|
+
Args:
|
|
3124
|
+
table: The table/view to query grants for.
|
|
3125
|
+
|
|
3126
|
+
Returns:
|
|
3127
|
+
Dictionary mapping permissions to lists of grantees. Permission names
|
|
3128
|
+
should be returned as the database provides them (typically uppercase
|
|
3129
|
+
for standard SQL permissions, but engine-specific roles may vary).
|
|
3130
|
+
|
|
3131
|
+
Raises:
|
|
3132
|
+
NotImplementedError: If the engine does not support grants.
|
|
3133
|
+
"""
|
|
3134
|
+
if not self.SUPPORTS_GRANTS:
|
|
3135
|
+
raise NotImplementedError(f"Engine does not support grants: {type(self)}")
|
|
3136
|
+
raise NotImplementedError("Subclass must implement get_current_grants")
|
|
3137
|
+
|
|
3138
|
+
def _apply_grants_config_expr(
|
|
3139
|
+
self,
|
|
3140
|
+
table: exp.Table,
|
|
3141
|
+
grants_config: GrantsConfig,
|
|
3142
|
+
table_type: DataObjectType = DataObjectType.TABLE,
|
|
3143
|
+
) -> t.List[exp.Expression]:
|
|
3144
|
+
"""Returns SQLGlot Grant expressions to apply grants to a table.
|
|
3145
|
+
|
|
3146
|
+
Args:
|
|
3147
|
+
table: The table/view to grant permissions on.
|
|
3148
|
+
grants_config: Dictionary mapping permissions to lists of grantees.
|
|
3149
|
+
table_type: The type of database object (TABLE, VIEW, MATERIALIZED_VIEW).
|
|
3150
|
+
|
|
3151
|
+
Returns:
|
|
3152
|
+
List of SQLGlot expressions for grant operations.
|
|
3153
|
+
|
|
3154
|
+
Raises:
|
|
3155
|
+
NotImplementedError: If the engine does not support grants.
|
|
3156
|
+
"""
|
|
3157
|
+
if not self.SUPPORTS_GRANTS:
|
|
3158
|
+
raise NotImplementedError(f"Engine does not support grants: {type(self)}")
|
|
3159
|
+
raise NotImplementedError("Subclass must implement _apply_grants_config_expr")
|
|
3160
|
+
|
|
3161
|
+
def _revoke_grants_config_expr(
|
|
3162
|
+
self,
|
|
3163
|
+
table: exp.Table,
|
|
3164
|
+
grants_config: GrantsConfig,
|
|
3165
|
+
table_type: DataObjectType = DataObjectType.TABLE,
|
|
3166
|
+
) -> t.List[exp.Expression]:
|
|
3167
|
+
"""Returns SQLGlot expressions to revoke grants from a table.
|
|
3168
|
+
|
|
3169
|
+
Args:
|
|
3170
|
+
table: The table/view to revoke permissions from.
|
|
3171
|
+
grants_config: Dictionary mapping permissions to lists of grantees.
|
|
3172
|
+
table_type: The type of database object (TABLE, VIEW, MATERIALIZED_VIEW).
|
|
3173
|
+
|
|
3174
|
+
Returns:
|
|
3175
|
+
List of SQLGlot expressions for revoke operations.
|
|
3176
|
+
|
|
3177
|
+
Raises:
|
|
3178
|
+
NotImplementedError: If the engine does not support grants.
|
|
3179
|
+
"""
|
|
3180
|
+
if not self.SUPPORTS_GRANTS:
|
|
3181
|
+
raise NotImplementedError(f"Engine does not support grants: {type(self)}")
|
|
3182
|
+
raise NotImplementedError("Subclass must implement _revoke_grants_config_expr")
|
|
3183
|
+
|
|
2890
3184
|
|
|
2891
3185
|
class EngineAdapterWithIndexSupport(EngineAdapter):
|
|
2892
3186
|
SUPPORTS_INDEXES = True
|
|
@@ -2896,3 +3190,9 @@ def _decoded_str(value: t.Union[str, bytes]) -> str:
|
|
|
2896
3190
|
if isinstance(value, bytes):
|
|
2897
3191
|
return value.decode("utf-8")
|
|
2898
3192
|
return value
|
|
3193
|
+
|
|
3194
|
+
|
|
3195
|
+
def _get_data_object_cache_key(catalog: t.Optional[str], schema_name: str, object_name: str) -> str:
|
|
3196
|
+
"""Returns a cache key for a data object based on its fully qualified name."""
|
|
3197
|
+
catalog = f"{catalog}." if catalog else ""
|
|
3198
|
+
return f"{catalog}{schema_name}.{object_name}"
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import typing as t
|
|
4
|
+
import logging
|
|
4
5
|
|
|
5
6
|
from sqlglot import exp
|
|
6
7
|
|
|
7
8
|
from sqlmesh.core.dialect import to_schema
|
|
8
|
-
from sqlmesh.core.engine_adapter import EngineAdapter
|
|
9
|
+
from sqlmesh.core.engine_adapter.base import EngineAdapter, _get_data_object_cache_key
|
|
9
10
|
from sqlmesh.core.engine_adapter.shared import (
|
|
10
11
|
CatalogSupport,
|
|
11
12
|
CommentCreationTable,
|
|
@@ -20,6 +21,9 @@ if t.TYPE_CHECKING:
|
|
|
20
21
|
from sqlmesh.core.engine_adapter._typing import QueryOrDF
|
|
21
22
|
|
|
22
23
|
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
|
|
23
27
|
class BasePostgresEngineAdapter(EngineAdapter):
|
|
24
28
|
DEFAULT_BATCH_SIZE = 400
|
|
25
29
|
COMMENT_CREATION_TABLE = CommentCreationTable.COMMENT_COMMAND_ONLY
|
|
@@ -58,6 +62,7 @@ class BasePostgresEngineAdapter(EngineAdapter):
|
|
|
58
62
|
raise SQLMeshError(
|
|
59
63
|
f"Could not get columns for table '{table.sql(dialect=self.dialect)}'. Table not found."
|
|
60
64
|
)
|
|
65
|
+
|
|
61
66
|
return {
|
|
62
67
|
column_name: exp.DataType.build(data_type, dialect=self.dialect, udt=True)
|
|
63
68
|
for column_name, data_type in resp
|
|
@@ -75,6 +80,10 @@ class BasePostgresEngineAdapter(EngineAdapter):
|
|
|
75
80
|
Reference: https://github.com/aws/amazon-redshift-python-driver/blob/master/redshift_connector/cursor.py#L528-L553
|
|
76
81
|
"""
|
|
77
82
|
table = exp.to_table(table_name)
|
|
83
|
+
data_object_cache_key = _get_data_object_cache_key(table.catalog, table.db, table.name)
|
|
84
|
+
if data_object_cache_key in self._data_object_cache:
|
|
85
|
+
logger.debug("Table existence cache hit: %s", data_object_cache_key)
|
|
86
|
+
return self._data_object_cache[data_object_cache_key] is not None
|
|
78
87
|
|
|
79
88
|
sql = (
|
|
80
89
|
exp.select("1")
|
|
@@ -188,3 +197,10 @@ class BasePostgresEngineAdapter(EngineAdapter):
|
|
|
188
197
|
)
|
|
189
198
|
for row in df.itertuples()
|
|
190
199
|
]
|
|
200
|
+
|
|
201
|
+
def _get_current_schema(self) -> str:
|
|
202
|
+
"""Returns the current default schema for the connection."""
|
|
203
|
+
result = self.fetchone(exp.select(exp.func("current_schema")))
|
|
204
|
+
if result and result[0]:
|
|
205
|
+
return result[0]
|
|
206
|
+
return "public"
|