sqlmesh 0.217.1.dev1__py3-none-any.whl → 0.227.2.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (183) hide show
  1. sqlmesh/__init__.py +12 -2
  2. sqlmesh/_version.py +2 -2
  3. sqlmesh/cli/project_init.py +10 -2
  4. sqlmesh/core/_typing.py +1 -0
  5. sqlmesh/core/audit/definition.py +8 -2
  6. sqlmesh/core/config/__init__.py +1 -1
  7. sqlmesh/core/config/connection.py +17 -5
  8. sqlmesh/core/config/dbt.py +13 -0
  9. sqlmesh/core/config/janitor.py +12 -0
  10. sqlmesh/core/config/loader.py +7 -0
  11. sqlmesh/core/config/model.py +2 -0
  12. sqlmesh/core/config/root.py +3 -0
  13. sqlmesh/core/console.py +80 -2
  14. sqlmesh/core/constants.py +1 -1
  15. sqlmesh/core/context.py +61 -25
  16. sqlmesh/core/dialect.py +3 -0
  17. sqlmesh/core/engine_adapter/_typing.py +2 -0
  18. sqlmesh/core/engine_adapter/base.py +322 -22
  19. sqlmesh/core/engine_adapter/base_postgres.py +17 -1
  20. sqlmesh/core/engine_adapter/bigquery.py +146 -7
  21. sqlmesh/core/engine_adapter/clickhouse.py +17 -13
  22. sqlmesh/core/engine_adapter/databricks.py +33 -2
  23. sqlmesh/core/engine_adapter/fabric.py +1 -29
  24. sqlmesh/core/engine_adapter/mixins.py +142 -48
  25. sqlmesh/core/engine_adapter/mssql.py +15 -4
  26. sqlmesh/core/engine_adapter/mysql.py +2 -2
  27. sqlmesh/core/engine_adapter/postgres.py +9 -3
  28. sqlmesh/core/engine_adapter/redshift.py +4 -0
  29. sqlmesh/core/engine_adapter/risingwave.py +1 -0
  30. sqlmesh/core/engine_adapter/shared.py +6 -0
  31. sqlmesh/core/engine_adapter/snowflake.py +82 -11
  32. sqlmesh/core/engine_adapter/spark.py +14 -10
  33. sqlmesh/core/engine_adapter/trino.py +4 -2
  34. sqlmesh/core/janitor.py +181 -0
  35. sqlmesh/core/lineage.py +1 -0
  36. sqlmesh/core/macros.py +35 -13
  37. sqlmesh/core/model/common.py +2 -0
  38. sqlmesh/core/model/definition.py +65 -4
  39. sqlmesh/core/model/kind.py +66 -2
  40. sqlmesh/core/model/meta.py +107 -2
  41. sqlmesh/core/node.py +101 -2
  42. sqlmesh/core/plan/builder.py +15 -10
  43. sqlmesh/core/plan/common.py +196 -2
  44. sqlmesh/core/plan/definition.py +21 -6
  45. sqlmesh/core/plan/evaluator.py +72 -113
  46. sqlmesh/core/plan/explainer.py +90 -8
  47. sqlmesh/core/plan/stages.py +42 -21
  48. sqlmesh/core/renderer.py +26 -18
  49. sqlmesh/core/scheduler.py +60 -19
  50. sqlmesh/core/selector.py +137 -9
  51. sqlmesh/core/signal.py +64 -1
  52. sqlmesh/core/snapshot/__init__.py +1 -0
  53. sqlmesh/core/snapshot/definition.py +109 -25
  54. sqlmesh/core/snapshot/evaluator.py +610 -50
  55. sqlmesh/core/state_sync/__init__.py +0 -1
  56. sqlmesh/core/state_sync/base.py +31 -27
  57. sqlmesh/core/state_sync/cache.py +12 -4
  58. sqlmesh/core/state_sync/common.py +216 -111
  59. sqlmesh/core/state_sync/db/facade.py +30 -15
  60. sqlmesh/core/state_sync/db/interval.py +27 -7
  61. sqlmesh/core/state_sync/db/migrator.py +14 -8
  62. sqlmesh/core/state_sync/db/snapshot.py +119 -87
  63. sqlmesh/core/table_diff.py +2 -2
  64. sqlmesh/core/test/definition.py +14 -9
  65. sqlmesh/dbt/adapter.py +20 -11
  66. sqlmesh/dbt/basemodel.py +52 -41
  67. sqlmesh/dbt/builtin.py +27 -11
  68. sqlmesh/dbt/column.py +17 -5
  69. sqlmesh/dbt/common.py +4 -2
  70. sqlmesh/dbt/context.py +14 -1
  71. sqlmesh/dbt/loader.py +60 -8
  72. sqlmesh/dbt/manifest.py +136 -8
  73. sqlmesh/dbt/model.py +105 -25
  74. sqlmesh/dbt/package.py +16 -1
  75. sqlmesh/dbt/profile.py +3 -3
  76. sqlmesh/dbt/project.py +12 -7
  77. sqlmesh/dbt/seed.py +1 -1
  78. sqlmesh/dbt/source.py +6 -1
  79. sqlmesh/dbt/target.py +25 -6
  80. sqlmesh/dbt/test.py +31 -1
  81. sqlmesh/migrations/v0000_baseline.py +3 -6
  82. sqlmesh/migrations/v0061_mysql_fix_blob_text_type.py +2 -5
  83. sqlmesh/migrations/v0062_add_model_gateway.py +2 -2
  84. sqlmesh/migrations/v0063_change_signals.py +2 -4
  85. sqlmesh/migrations/v0064_join_when_matched_strings.py +2 -4
  86. sqlmesh/migrations/v0065_add_model_optimize.py +2 -2
  87. sqlmesh/migrations/v0066_add_auto_restatements.py +2 -6
  88. sqlmesh/migrations/v0067_add_tsql_date_full_precision.py +2 -2
  89. sqlmesh/migrations/v0068_include_unrendered_query_in_metadata_hash.py +2 -2
  90. sqlmesh/migrations/v0069_update_dev_table_suffix.py +2 -4
  91. sqlmesh/migrations/v0070_include_grains_in_metadata_hash.py +2 -2
  92. sqlmesh/migrations/v0071_add_dev_version_to_intervals.py +2 -6
  93. sqlmesh/migrations/v0072_add_environment_statements.py +2 -4
  94. sqlmesh/migrations/v0073_remove_symbolic_disable_restatement.py +2 -4
  95. sqlmesh/migrations/v0074_add_partition_by_time_column_property.py +2 -2
  96. sqlmesh/migrations/v0075_remove_validate_query.py +2 -4
  97. sqlmesh/migrations/v0076_add_cron_tz.py +2 -2
  98. sqlmesh/migrations/v0077_fix_column_type_hash_calculation.py +2 -2
  99. sqlmesh/migrations/v0078_warn_if_non_migratable_python_env.py +2 -4
  100. sqlmesh/migrations/v0079_add_gateway_managed_property.py +7 -9
  101. sqlmesh/migrations/v0080_add_batch_size_to_scd_type_2_models.py +2 -2
  102. sqlmesh/migrations/v0081_update_partitioned_by.py +2 -4
  103. sqlmesh/migrations/v0082_warn_if_incorrectly_duplicated_statements.py +2 -4
  104. sqlmesh/migrations/v0083_use_sql_for_scd_time_data_type_data_hash.py +2 -2
  105. sqlmesh/migrations/v0084_normalize_quote_when_matched_and_merge_filter.py +2 -2
  106. sqlmesh/migrations/v0085_deterministic_repr.py +2 -4
  107. sqlmesh/migrations/v0086_check_deterministic_bug.py +2 -4
  108. sqlmesh/migrations/v0087_normalize_blueprint_variables.py +2 -4
  109. sqlmesh/migrations/v0088_warn_about_variable_python_env_diffs.py +2 -4
  110. sqlmesh/migrations/v0089_add_virtual_environment_mode.py +2 -2
  111. sqlmesh/migrations/v0090_add_forward_only_column.py +2 -6
  112. sqlmesh/migrations/v0091_on_additive_change.py +2 -2
  113. sqlmesh/migrations/v0092_warn_about_dbt_data_type_diff.py +2 -4
  114. sqlmesh/migrations/v0093_use_raw_sql_in_fingerprint.py +2 -2
  115. sqlmesh/migrations/v0094_add_dev_version_and_fingerprint_columns.py +2 -6
  116. sqlmesh/migrations/v0095_warn_about_dbt_raw_sql_diff.py +2 -4
  117. sqlmesh/migrations/v0096_remove_plan_dags_table.py +2 -4
  118. sqlmesh/migrations/v0097_add_dbt_name_in_node.py +2 -2
  119. sqlmesh/migrations/v0098_add_dbt_node_info_in_node.py +103 -0
  120. sqlmesh/migrations/v0099_add_last_altered_to_intervals.py +25 -0
  121. sqlmesh/migrations/v0100_add_grants_and_grants_target_layer.py +9 -0
  122. sqlmesh/utils/__init__.py +8 -1
  123. sqlmesh/utils/cache.py +5 -1
  124. sqlmesh/utils/date.py +1 -1
  125. sqlmesh/utils/errors.py +4 -0
  126. sqlmesh/utils/jinja.py +25 -2
  127. sqlmesh/utils/pydantic.py +6 -6
  128. sqlmesh/utils/windows.py +13 -3
  129. {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/METADATA +5 -5
  130. {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/RECORD +181 -176
  131. sqlmesh_dbt/cli.py +70 -7
  132. sqlmesh_dbt/console.py +14 -6
  133. sqlmesh_dbt/operations.py +103 -24
  134. sqlmesh_dbt/selectors.py +39 -1
  135. web/client/dist/assets/{Audits-Ucsx1GzF.js → Audits-CBiYyyx-.js} +1 -1
  136. web/client/dist/assets/{Banner-BWDzvavM.js → Banner-DSRbUlO5.js} +1 -1
  137. web/client/dist/assets/{ChevronDownIcon-D2VL13Ah.js → ChevronDownIcon-MK_nrjD_.js} +1 -1
  138. web/client/dist/assets/{ChevronRightIcon-DWGYbf1l.js → ChevronRightIcon-CLWtT22Q.js} +1 -1
  139. web/client/dist/assets/{Content-DdHDZM3I.js → Content-BNuGZN5l.js} +1 -1
  140. web/client/dist/assets/{Content-Bikfy8fh.js → Content-CSHJyW0n.js} +1 -1
  141. web/client/dist/assets/{Data-CzAJH7rW.js → Data-C1oRDbLx.js} +1 -1
  142. web/client/dist/assets/{DataCatalog-BJF11g8f.js → DataCatalog-HXyX2-_j.js} +1 -1
  143. web/client/dist/assets/{Editor-s0SBpV2y.js → Editor-BDyfpUuw.js} +1 -1
  144. web/client/dist/assets/{Editor-DgLhgKnm.js → Editor-D0jNItwC.js} +1 -1
  145. web/client/dist/assets/{Errors-D0m0O1d3.js → Errors-BfuFLcPi.js} +1 -1
  146. web/client/dist/assets/{FileExplorer-CEv0vXkt.js → FileExplorer-BR9IE3he.js} +1 -1
  147. web/client/dist/assets/{Footer-BwzXn8Ew.js → Footer-CgBEtiAh.js} +1 -1
  148. web/client/dist/assets/{Header-6heDkEqG.js → Header-DSqR6nSO.js} +1 -1
  149. web/client/dist/assets/{Input-obuJsD6k.js → Input-B-oZ6fGO.js} +1 -1
  150. web/client/dist/assets/Lineage-DYQVwDbD.js +1 -0
  151. web/client/dist/assets/{ListboxShow-HM9_qyrt.js → ListboxShow-BE5-xevs.js} +1 -1
  152. web/client/dist/assets/{ModelLineage-zWdKo0U2.js → ModelLineage-DkIFAYo4.js} +1 -1
  153. web/client/dist/assets/{Models-Bcu66SRz.js → Models-D5dWr8RB.js} +1 -1
  154. web/client/dist/assets/{Page-BWEEQfIt.js → Page-C-XfU5BR.js} +1 -1
  155. web/client/dist/assets/{Plan-C4gXCqlf.js → Plan-ZEuTINBq.js} +1 -1
  156. web/client/dist/assets/{PlusCircleIcon-CVDO651q.js → PlusCircleIcon-DVXAHG8_.js} +1 -1
  157. web/client/dist/assets/{ReportErrors-BT6xFwAr.js → ReportErrors-B7FEPzMB.js} +1 -1
  158. web/client/dist/assets/{Root-ryJoBK4h.js → Root-8aZyhPxF.js} +1 -1
  159. web/client/dist/assets/{SearchList-DB04sPb9.js → SearchList-W_iT2G82.js} +1 -1
  160. web/client/dist/assets/{SelectEnvironment-CUYcXUu6.js → SelectEnvironment-C65jALmO.js} +1 -1
  161. web/client/dist/assets/{SourceList-Doo_9ZGp.js → SourceList-DSLO6nVJ.js} +1 -1
  162. web/client/dist/assets/{SourceListItem-D5Mj7Dly.js → SourceListItem-BHt8d9-I.js} +1 -1
  163. web/client/dist/assets/{SplitPane-qHmkD1qy.js → SplitPane-CViaZmw6.js} +1 -1
  164. web/client/dist/assets/{Tests-DH1Z74ML.js → Tests-DhaVt5t1.js} +1 -1
  165. web/client/dist/assets/{Welcome-DqUJUNMF.js → Welcome-DvpjH-_4.js} +1 -1
  166. web/client/dist/assets/context-BctCsyGb.js +71 -0
  167. web/client/dist/assets/{context-Dr54UHLi.js → context-DFNeGsFF.js} +1 -1
  168. web/client/dist/assets/{editor-DYIP1yQ4.js → editor-CcO28cqd.js} +1 -1
  169. web/client/dist/assets/{file-DarlIDVi.js → file-CvJN3aZO.js} +1 -1
  170. web/client/dist/assets/{floating-ui.react-dom-BH3TFvkM.js → floating-ui.react-dom-CjE-JNW1.js} +1 -1
  171. web/client/dist/assets/{help-Bl8wqaQc.js → help-DuPhjipa.js} +1 -1
  172. web/client/dist/assets/{index-D1sR7wpN.js → index-C-dJH7yZ.js} +1 -1
  173. web/client/dist/assets/{index-O3mjYpnE.js → index-Dj0i1-CA.js} +2 -2
  174. web/client/dist/assets/{plan-CehRrJUG.js → plan-BTRSbjKn.js} +1 -1
  175. web/client/dist/assets/{popover-CqgMRE0G.js → popover-_Sf0yvOI.js} +1 -1
  176. web/client/dist/assets/{project-6gxepOhm.js → project-BvSOI8MY.js} +1 -1
  177. web/client/dist/index.html +1 -1
  178. web/client/dist/assets/Lineage-D0Hgdz2v.js +0 -1
  179. web/client/dist/assets/context-DgX0fp2E.js +0 -68
  180. {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/WHEEL +0 -0
  181. {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/entry_points.txt +0 -0
  182. {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/licenses/LICENSE +0 -0
  183. {sqlmesh-0.217.1.dev1.dist-info → sqlmesh-0.227.2.dev4.dist-info}/top_level.txt +0 -0
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import typing as t
6
+ import logging
6
7
 
7
8
  from sqlglot import exp
8
9
 
@@ -13,10 +14,10 @@ from sqlmesh.core.engine_adapter.base import (
13
14
  InsertOverwriteStrategy,
14
15
  MERGE_SOURCE_ALIAS,
15
16
  MERGE_TARGET_ALIAS,
17
+ _get_data_object_cache_key,
16
18
  )
17
19
  from sqlmesh.core.engine_adapter.mixins import (
18
20
  GetCurrentCatalogFromFunctionMixin,
19
- InsertOverwriteWithMergeMixin,
20
21
  PandasNativeFetchDFSupportMixin,
21
22
  VarcharSizeWorkaroundMixin,
22
23
  RowDiffMixin,
@@ -37,11 +38,13 @@ if t.TYPE_CHECKING:
37
38
  from sqlmesh.core.engine_adapter._typing import DF, Query, QueryOrDF
38
39
 
39
40
 
41
+ logger = logging.getLogger(__name__)
42
+
43
+
40
44
  @set_catalog()
41
45
  class MSSQLEngineAdapter(
42
46
  EngineAdapterWithIndexSupport,
43
47
  PandasNativeFetchDFSupportMixin,
44
- InsertOverwriteWithMergeMixin,
45
48
  GetCurrentCatalogFromFunctionMixin,
46
49
  VarcharSizeWorkaroundMixin,
47
50
  RowDiffMixin,
@@ -53,6 +56,7 @@ class MSSQLEngineAdapter(
53
56
  COMMENT_CREATION_TABLE = CommentCreationTable.UNSUPPORTED
54
57
  COMMENT_CREATION_VIEW = CommentCreationView.UNSUPPORTED
55
58
  SUPPORTS_REPLACE_TABLE = False
59
+ MAX_IDENTIFIER_LENGTH = 128
56
60
  SUPPORTS_QUERY_EXECUTION_TRACKING = True
57
61
  SCHEMA_DIFFER_KWARGS = {
58
62
  "parameterized_type_defaults": {
@@ -74,6 +78,7 @@ class MSSQLEngineAdapter(
74
78
  },
75
79
  }
76
80
  VARIABLE_LENGTH_DATA_TYPES = {"binary", "varbinary", "char", "varchar", "nchar", "nvarchar"}
81
+ INSERT_OVERWRITE_STRATEGY = InsertOverwriteStrategy.MERGE
77
82
 
78
83
  @property
79
84
  def catalog_support(self) -> CatalogSupport:
@@ -145,6 +150,10 @@ class MSSQLEngineAdapter(
145
150
  def table_exists(self, table_name: TableName) -> bool:
146
151
  """MsSql doesn't support describe so we query information_schema."""
147
152
  table = exp.to_table(table_name)
153
+ data_object_cache_key = _get_data_object_cache_key(table.catalog, table.db, table.name)
154
+ if data_object_cache_key in self._data_object_cache:
155
+ logger.debug("Table existence cache hit: %s", data_object_cache_key)
156
+ return self._data_object_cache[data_object_cache_key] is not None
148
157
 
149
158
  sql = (
150
159
  exp.select("1")
@@ -414,7 +423,9 @@ class MSSQLEngineAdapter(
414
423
  insert_overwrite_strategy_override: t.Optional[InsertOverwriteStrategy] = None,
415
424
  **kwargs: t.Any,
416
425
  ) -> None:
417
- if not where or where == exp.true():
426
+ # note that this is passed as table_properties here rather than physical_properties
427
+ use_merge_strategy = kwargs.get("table_properties", {}).get("mssql_merge_exists")
428
+ if (not where or where == exp.true()) and not use_merge_strategy:
418
429
  # this is a full table replacement, call the base strategy to do DELETE+INSERT
419
430
  # which will result in TRUNCATE+INSERT due to how we have overridden self.delete_from()
420
431
  return EngineAdapter._insert_overwrite_by_condition(
@@ -427,7 +438,7 @@ class MSSQLEngineAdapter(
427
438
  **kwargs,
428
439
  )
429
440
 
430
- # For actual conditional overwrites, use MERGE from InsertOverwriteWithMergeMixin
441
+ # For conditional overwrites or when mssql_merge_exists is set use MERGE
431
442
  return super()._insert_overwrite_by_condition(
432
443
  table_name=table_name,
433
444
  source_queries=source_queries,
@@ -164,11 +164,11 @@ class MySQLEngineAdapter(
164
164
  exc_info=True,
165
165
  )
166
166
 
167
- def create_table_like(
167
+ def _create_table_like(
168
168
  self,
169
169
  target_table_name: TableName,
170
170
  source_table_name: TableName,
171
- exists: bool = True,
171
+ exists: bool,
172
172
  **kwargs: t.Any,
173
173
  ) -> None:
174
174
  self.execute(
@@ -12,6 +12,7 @@ from sqlmesh.core.engine_adapter.mixins import (
12
12
  PandasNativeFetchDFSupportMixin,
13
13
  RowDiffMixin,
14
14
  logical_merge,
15
+ GrantsFromInfoSchemaMixin,
15
16
  )
16
17
  from sqlmesh.core.engine_adapter.shared import set_catalog
17
18
 
@@ -28,14 +29,19 @@ class PostgresEngineAdapter(
28
29
  PandasNativeFetchDFSupportMixin,
29
30
  GetCurrentCatalogFromFunctionMixin,
30
31
  RowDiffMixin,
32
+ GrantsFromInfoSchemaMixin,
31
33
  ):
32
34
  DIALECT = "postgres"
35
+ SUPPORTS_GRANTS = True
33
36
  SUPPORTS_INDEXES = True
34
37
  HAS_VIEW_BINDING = True
35
38
  CURRENT_CATALOG_EXPRESSION = exp.column("current_catalog")
36
39
  SUPPORTS_REPLACE_TABLE = False
37
- MAX_IDENTIFIER_LENGTH = 63
40
+ MAX_IDENTIFIER_LENGTH: t.Optional[int] = 63
38
41
  SUPPORTS_QUERY_EXECUTION_TRACKING = True
42
+ GRANT_INFORMATION_SCHEMA_TABLE_NAME = "role_table_grants"
43
+ CURRENT_USER_OR_ROLE_EXPRESSION: exp.Expression = exp.column("current_role")
44
+ SUPPORTS_MULTIPLE_GRANT_PRINCIPALS = True
39
45
  SCHEMA_DIFFER_KWARGS = {
40
46
  "parameterized_type_defaults": {
41
47
  # DECIMAL without precision is "up to 131072 digits before the decimal point; up to 16383 digits after the decimal point"
@@ -79,11 +85,11 @@ class PostgresEngineAdapter(
79
85
  self._connection_pool.commit()
80
86
  return df
81
87
 
82
- def create_table_like(
88
+ def _create_table_like(
83
89
  self,
84
90
  target_table_name: TableName,
85
91
  source_table_name: TableName,
86
- exists: bool = True,
92
+ exists: bool,
87
93
  **kwargs: t.Any,
88
94
  ) -> None:
89
95
  self.execute(
@@ -14,6 +14,7 @@ from sqlmesh.core.engine_adapter.mixins import (
14
14
  VarcharSizeWorkaroundMixin,
15
15
  RowDiffMixin,
16
16
  logical_merge,
17
+ GrantsFromInfoSchemaMixin,
17
18
  )
18
19
  from sqlmesh.core.engine_adapter.shared import (
19
20
  CommentCreationView,
@@ -40,12 +41,15 @@ class RedshiftEngineAdapter(
40
41
  NonTransactionalTruncateMixin,
41
42
  VarcharSizeWorkaroundMixin,
42
43
  RowDiffMixin,
44
+ GrantsFromInfoSchemaMixin,
43
45
  ):
44
46
  DIALECT = "redshift"
45
47
  CURRENT_CATALOG_EXPRESSION = exp.func("current_database")
46
48
  # Redshift doesn't support comments for VIEWs WITH NO SCHEMA BINDING (which we always use)
47
49
  COMMENT_CREATION_VIEW = CommentCreationView.UNSUPPORTED
48
50
  SUPPORTS_REPLACE_TABLE = False
51
+ SUPPORTS_GRANTS = True
52
+ SUPPORTS_MULTIPLE_GRANT_PRINCIPALS = True
49
53
 
50
54
  SCHEMA_DIFFER_KWARGS = {
51
55
  "parameterized_type_defaults": {
@@ -32,6 +32,7 @@ class RisingwaveEngineAdapter(PostgresEngineAdapter):
32
32
  SUPPORTS_MATERIALIZED_VIEWS = True
33
33
  SUPPORTS_TRANSACTIONS = False
34
34
  MAX_IDENTIFIER_LENGTH = None
35
+ SUPPORTS_GRANTS = False
35
36
 
36
37
  def columns(
37
38
  self, table_name: TableName, include_pseudo_columns: bool = False
@@ -243,6 +243,8 @@ class InsertOverwriteStrategy(Enum):
243
243
  # Issue a single INSERT query to replace a data range. The assumption is that the query engine will transparently match partition bounds
244
244
  # and replace data rather than append to it. Trino is an example of this when `hive.insert-existing-partitions-behavior=OVERWRITE` is configured
245
245
  INTO_IS_OVERWRITE = 4
246
+ # Do the INSERT OVERWRITE using merge since the engine doesn't support it natively
247
+ MERGE = 5
246
248
 
247
249
  @property
248
250
  def is_delete_insert(self) -> bool:
@@ -260,6 +262,10 @@ class InsertOverwriteStrategy(Enum):
260
262
  def is_into_is_overwrite(self) -> bool:
261
263
  return self == InsertOverwriteStrategy.INTO_IS_OVERWRITE
262
264
 
265
+ @property
266
+ def is_merge(self) -> bool:
267
+ return self == InsertOverwriteStrategy.MERGE
268
+
263
269
 
264
270
  class SourceQuery:
265
271
  def __init__(
@@ -15,6 +15,7 @@ from sqlmesh.core.engine_adapter.mixins import (
15
15
  GetCurrentCatalogFromFunctionMixin,
16
16
  ClusteredByMixin,
17
17
  RowDiffMixin,
18
+ GrantsFromInfoSchemaMixin,
18
19
  )
19
20
  from sqlmesh.core.engine_adapter.shared import (
20
21
  CatalogSupport,
@@ -34,7 +35,12 @@ if t.TYPE_CHECKING:
34
35
  import pandas as pd
35
36
 
36
37
  from sqlmesh.core._typing import SchemaName, SessionProperties, TableName
37
- from sqlmesh.core.engine_adapter._typing import DF, Query, QueryOrDF, SnowparkSession
38
+ from sqlmesh.core.engine_adapter._typing import (
39
+ DF,
40
+ Query,
41
+ QueryOrDF,
42
+ SnowparkSession,
43
+ )
38
44
  from sqlmesh.core.node import IntervalUnit
39
45
 
40
46
 
@@ -46,7 +52,9 @@ if t.TYPE_CHECKING:
46
52
  "drop_catalog": CatalogSupport.REQUIRES_SET_CATALOG, # needs a catalog to issue a query to information_schema.databases even though the result is global
47
53
  }
48
54
  )
49
- class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixin, RowDiffMixin):
55
+ class SnowflakeEngineAdapter(
56
+ GetCurrentCatalogFromFunctionMixin, ClusteredByMixin, RowDiffMixin, GrantsFromInfoSchemaMixin
57
+ ):
50
58
  DIALECT = "snowflake"
51
59
  SUPPORTS_MATERIALIZED_VIEWS = True
52
60
  SUPPORTS_MATERIALIZED_VIEW_SCHEMA = True
@@ -54,6 +62,7 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
54
62
  SUPPORTS_MANAGED_MODELS = True
55
63
  CURRENT_CATALOG_EXPRESSION = exp.func("current_database")
56
64
  SUPPORTS_CREATE_DROP_CATALOG = True
65
+ SUPPORTS_METADATA_TABLE_LAST_MODIFIED_TS = True
57
66
  SUPPORTED_DROP_CASCADE_OBJECT_KINDS = ["DATABASE", "SCHEMA", "TABLE"]
58
67
  SCHEMA_DIFFER_KWARGS = {
59
68
  "parameterized_type_defaults": {
@@ -73,6 +82,9 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
73
82
  MANAGED_TABLE_KIND = "DYNAMIC TABLE"
74
83
  SNOWPARK = "snowpark"
75
84
  SUPPORTS_QUERY_EXECUTION_TRACKING = True
85
+ SUPPORTS_GRANTS = True
86
+ CURRENT_USER_OR_ROLE_EXPRESSION: exp.Expression = exp.func("CURRENT_ROLE")
87
+ USE_CATALOG_IN_GRANTS = True
76
88
 
77
89
  @contextlib.contextmanager
78
90
  def session(self, properties: SessionProperties) -> t.Iterator[None]:
@@ -127,6 +139,23 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
127
139
  def catalog_support(self) -> CatalogSupport:
128
140
  return CatalogSupport.FULL_SUPPORT
129
141
 
142
+ @staticmethod
143
+ def _grant_object_kind(table_type: DataObjectType) -> str:
144
+ if table_type == DataObjectType.VIEW:
145
+ return "VIEW"
146
+ if table_type == DataObjectType.MATERIALIZED_VIEW:
147
+ return "MATERIALIZED VIEW"
148
+ if table_type == DataObjectType.MANAGED_TABLE:
149
+ return "DYNAMIC TABLE"
150
+ return "TABLE"
151
+
152
+ def _get_current_schema(self) -> str:
153
+ """Returns the current default schema for the connection."""
154
+ result = self.fetchone("SELECT CURRENT_SCHEMA()")
155
+ if not result or not result[0]:
156
+ raise SQLMeshError("Unable to determine current schema")
157
+ return str(result[0])
158
+
130
159
  def _create_catalog(self, catalog_name: exp.Identifier) -> None:
131
160
  props = exp.Properties(
132
161
  expressions=[exp.SchemaCommentProperty(this=exp.Literal.string(c.SQLMESH_MANAGED))]
@@ -378,6 +407,8 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
378
407
  elif isinstance(df, pd.DataFrame):
379
408
  from snowflake.connector.pandas_tools import write_pandas
380
409
 
410
+ ordered_df = df[list(source_columns_to_types)]
411
+
381
412
  # Workaround for https://github.com/snowflakedb/snowflake-connector-python/issues/1034
382
413
  # The above issue has already been fixed upstream, but we keep the following
383
414
  # line anyway in order to support a wider range of Snowflake versions.
@@ -388,16 +419,16 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
388
419
 
389
420
  # See: https://stackoverflow.com/a/75627721
390
421
  for column, kind in source_columns_to_types.items():
391
- if is_datetime64_any_dtype(df.dtypes[column]):
422
+ if is_datetime64_any_dtype(ordered_df.dtypes[column]):
392
423
  if kind.is_type("date"): # type: ignore
393
- df[column] = pd.to_datetime(df[column]).dt.date # type: ignore
394
- elif getattr(df.dtypes[column], "tz", None) is not None: # type: ignore
395
- df[column] = pd.to_datetime(df[column]).dt.strftime(
424
+ ordered_df[column] = pd.to_datetime(ordered_df[column]).dt.date # type: ignore
425
+ elif getattr(ordered_df.dtypes[column], "tz", None) is not None: # type: ignore
426
+ ordered_df[column] = pd.to_datetime(ordered_df[column]).dt.strftime(
396
427
  "%Y-%m-%d %H:%M:%S.%f%z"
397
428
  ) # type: ignore
398
429
  # https://github.com/snowflakedb/snowflake-connector-python/issues/1677
399
430
  else: # type: ignore
400
- df[column] = pd.to_datetime(df[column]).dt.strftime(
431
+ ordered_df[column] = pd.to_datetime(ordered_df[column]).dt.strftime(
401
432
  "%Y-%m-%d %H:%M:%S.%f"
402
433
  ) # type: ignore
403
434
 
@@ -407,7 +438,7 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
407
438
 
408
439
  write_pandas(
409
440
  self._connection_pool.get(),
410
- df,
441
+ ordered_df,
411
442
  temp_table.name,
412
443
  schema=temp_table.db or None,
413
444
  database=database.sql(dialect=self.dialect) if database else None,
@@ -526,16 +557,36 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
526
557
  type=DataObjectType.from_str(row.type), # type: ignore
527
558
  clustering_key=row.clustering_key, # type: ignore
528
559
  )
529
- for row in df.itertuples()
560
+ # lowercase the column names for cases where Snowflake might return uppercase column names for certain catalogs
561
+ for row in df.rename(columns={col: col.lower() for col in df.columns}).itertuples()
530
562
  ]
531
563
 
564
+ def _get_grant_expression(self, table: exp.Table) -> exp.Expression:
565
+ # Upon execute the catalog in table expressions are properly normalized to handle the case where a user provides
566
+ # the default catalog in their connection config. This doesn't though update catalogs in strings like when querying
567
+ # the information schema. So we need to manually replace those here.
568
+ expression = super()._get_grant_expression(table)
569
+ for col_exp in expression.find_all(exp.Column):
570
+ if col_exp.this.name == "table_catalog":
571
+ and_exp = col_exp.parent
572
+ assert and_exp is not None, "Expected column expression to have a parent"
573
+ assert and_exp.expression, "Expected AND expression to have an expression"
574
+ normalized_catalog = self._normalize_catalog(
575
+ exp.table_("placeholder", db="placeholder", catalog=and_exp.expression.this)
576
+ )
577
+ and_exp.set(
578
+ "expression",
579
+ exp.Literal.string(normalized_catalog.args["catalog"].alias_or_name),
580
+ )
581
+ return expression
582
+
532
583
  def set_current_catalog(self, catalog: str) -> None:
533
584
  self.execute(exp.Use(this=exp.to_identifier(catalog)))
534
585
 
535
586
  def set_current_schema(self, schema: str) -> None:
536
587
  self.execute(exp.Use(kind="SCHEMA", this=to_schema(schema)))
537
588
 
538
- def _to_sql(self, expression: exp.Expression, quote: bool = True, **kwargs: t.Any) -> str:
589
+ def _normalize_catalog(self, expression: exp.Expression) -> exp.Expression:
539
590
  # note: important to use self._default_catalog instead of the self.default_catalog property
540
591
  # otherwise we get RecursionError: maximum recursion depth exceeded
541
592
  # because it calls get_current_catalog(), which executes a query, which needs the default catalog, which calls get_current_catalog()... etc
@@ -568,8 +619,12 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
568
619
  # Snowflake connection config. This is because the catalog present on the model gets normalized and quoted to match
569
620
  # the source dialect, which isnt always compatible with Snowflake
570
621
  expression = expression.transform(catalog_rewriter)
622
+ return expression
571
623
 
572
- return super()._to_sql(expression=expression, quote=quote, **kwargs)
624
+ def _to_sql(self, expression: exp.Expression, quote: bool = True, **kwargs: t.Any) -> str:
625
+ return super()._to_sql(
626
+ expression=self._normalize_catalog(expression), quote=quote, **kwargs
627
+ )
573
628
 
574
629
  def _create_column_comments(
575
630
  self,
@@ -610,6 +665,7 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
610
665
  target_table_name: TableName,
611
666
  source_table_name: TableName,
612
667
  replace: bool = False,
668
+ exists: bool = True,
613
669
  clone_kwargs: t.Optional[t.Dict[str, t.Any]] = None,
614
670
  **kwargs: t.Any,
615
671
  ) -> None:
@@ -665,3 +721,18 @@ class SnowflakeEngineAdapter(GetCurrentCatalogFromFunctionMixin, ClusteredByMixi
665
721
  self._connection_pool.set_attribute(self.SNOWPARK, None)
666
722
 
667
723
  return super().close()
724
+
725
+ def get_table_last_modified_ts(self, table_names: t.List[TableName]) -> t.List[int]:
726
+ from sqlmesh.utils.date import to_timestamp
727
+
728
+ num_tables = len(table_names)
729
+
730
+ query = "SELECT LAST_ALTERED FROM INFORMATION_SCHEMA.TABLES WHERE"
731
+ for i, table_name in enumerate(table_names):
732
+ table = exp.to_table(table_name)
733
+ query += f"""(TABLE_NAME = '{table.name}' AND TABLE_SCHEMA = '{table.db}' AND TABLE_CATALOG = '{table.catalog}')"""
734
+ if i < num_tables - 1:
735
+ query += " OR "
736
+
737
+ result = self.fetchall(query)
738
+ return [to_timestamp(row[0]) for row in result]
@@ -397,19 +397,21 @@ class SparkEngineAdapter(
397
397
  def set_current_catalog(self, catalog_name: str) -> None:
398
398
  self.connection.set_current_catalog(catalog_name)
399
399
 
400
- def get_current_database(self) -> str:
400
+ def _get_current_schema(self) -> str:
401
401
  if self._use_spark_session:
402
402
  return self.spark.catalog.currentDatabase()
403
403
  return self.fetchone(exp.select(exp.func("current_database")))[0] # type: ignore
404
404
 
405
- def get_data_object(self, target_name: TableName) -> t.Optional[DataObject]:
405
+ def get_data_object(
406
+ self, target_name: TableName, safe_to_cache: bool = False
407
+ ) -> t.Optional[DataObject]:
406
408
  target_table = exp.to_table(target_name)
407
409
  if isinstance(target_table.this, exp.Dot) and target_table.this.expression.name.startswith(
408
410
  f"{self.BRANCH_PREFIX}{self.WAP_PREFIX}"
409
411
  ):
410
412
  # Exclude the branch name
411
413
  target_table.set("this", target_table.this.this)
412
- return super().get_data_object(target_table)
414
+ return super().get_data_object(target_table, safe_to_cache=safe_to_cache)
413
415
 
414
416
  def create_state_table(
415
417
  self,
@@ -457,12 +459,14 @@ class SparkEngineAdapter(
457
459
  if wap_id.startswith(f"{self.BRANCH_PREFIX}{self.WAP_PREFIX}"):
458
460
  table_name.set("this", table_name.this.this)
459
461
 
460
- wap_supported = (
461
- kwargs.get("storage_format") or ""
462
- ).lower() == "iceberg" or self.wap_supported(table_name)
463
- do_dummy_insert = (
464
- False if not wap_supported or not exists else not self.table_exists(table_name)
465
- )
462
+ do_dummy_insert = False
463
+ if self.wap_enabled:
464
+ wap_supported = (
465
+ kwargs.get("storage_format") or ""
466
+ ).lower() == "iceberg" or self.wap_supported(table_name)
467
+ do_dummy_insert = (
468
+ False if not wap_supported or not exists else not self.table_exists(table_name)
469
+ )
466
470
  super()._create_table(
467
471
  table_name_or_schema,
468
472
  expression,
@@ -535,7 +539,7 @@ class SparkEngineAdapter(
535
539
  if not table.catalog:
536
540
  table.set("catalog", self.get_current_catalog())
537
541
  if not table.db:
538
- table.set("db", self.get_current_database())
542
+ table.set("db", self._get_current_schema())
539
543
  return table
540
544
 
541
545
  def _build_create_comment_column_exp(
@@ -71,7 +71,7 @@ class TrinoEngineAdapter(
71
71
  MAX_TIMESTAMP_PRECISION = 3
72
72
 
73
73
  @property
74
- def schema_location_mapping(self) -> t.Optional[dict[re.Pattern, str]]:
74
+ def schema_location_mapping(self) -> t.Optional[t.Dict[re.Pattern, str]]:
75
75
  return self._extra_config.get("schema_location_mapping")
76
76
 
77
77
  @property
@@ -86,6 +86,8 @@ class TrinoEngineAdapter(
86
86
  def get_catalog_type(self, catalog: t.Optional[str]) -> str:
87
87
  row: t.Tuple = tuple()
88
88
  if catalog:
89
+ if catalog_type_override := self._catalog_type_overrides.get(catalog):
90
+ return catalog_type_override
89
91
  row = (
90
92
  self.fetchone(
91
93
  f"select connector_name from system.metadata.catalogs where catalog_name='{catalog}'"
@@ -300,7 +302,7 @@ class TrinoEngineAdapter(
300
302
  execution_time: t.Union[TimeLike, exp.Column],
301
303
  invalidate_hard_deletes: bool = True,
302
304
  updated_at_col: t.Optional[exp.Column] = None,
303
- check_columns: t.Optional[t.Union[exp.Star, t.Sequence[exp.Column]]] = None,
305
+ check_columns: t.Optional[t.Union[exp.Star, t.Sequence[exp.Expression]]] = None,
304
306
  updated_at_as_valid_from: bool = False,
305
307
  execution_time_as_valid_from: bool = False,
306
308
  target_columns_to_types: t.Optional[t.Dict[str, exp.DataType]] = None,
@@ -0,0 +1,181 @@
1
+ from __future__ import annotations
2
+
3
+ import typing as t
4
+
5
+ from sqlglot import exp
6
+
7
+ from sqlmesh.core.engine_adapter import EngineAdapter
8
+ from sqlmesh.core.console import Console
9
+ from sqlmesh.core.dialect import schema_
10
+ from sqlmesh.core.environment import Environment
11
+ from sqlmesh.core.snapshot import SnapshotEvaluator
12
+ from sqlmesh.core.state_sync import StateSync
13
+ from sqlmesh.core.state_sync.common import (
14
+ logger,
15
+ iter_expired_snapshot_batches,
16
+ RowBoundary,
17
+ ExpiredBatchRange,
18
+ )
19
+ from sqlmesh.utils.errors import SQLMeshError
20
+
21
+
22
+ def cleanup_expired_views(
23
+ default_adapter: EngineAdapter,
24
+ engine_adapters: t.Dict[str, EngineAdapter],
25
+ environments: t.List[Environment],
26
+ warn_on_delete_failure: bool = False,
27
+ console: t.Optional[Console] = None,
28
+ ) -> None:
29
+ expired_schema_or_catalog_environments = [
30
+ environment
31
+ for environment in environments
32
+ if environment.suffix_target.is_schema or environment.suffix_target.is_catalog
33
+ ]
34
+ expired_table_environments = [
35
+ environment for environment in environments if environment.suffix_target.is_table
36
+ ]
37
+
38
+ # We have to use the corresponding adapter if the virtual layer is gateway managed
39
+ def get_adapter(gateway_managed: bool, gateway: t.Optional[str] = None) -> EngineAdapter:
40
+ if gateway_managed and gateway:
41
+ return engine_adapters.get(gateway, default_adapter)
42
+ return default_adapter
43
+
44
+ catalogs_to_drop: t.Set[t.Tuple[EngineAdapter, str]] = set()
45
+ schemas_to_drop: t.Set[t.Tuple[EngineAdapter, exp.Table]] = set()
46
+
47
+ # Collect schemas and catalogs to drop
48
+ for engine_adapter, expired_catalog, expired_schema, suffix_target in {
49
+ (
50
+ (engine_adapter := get_adapter(environment.gateway_managed, snapshot.model_gateway)),
51
+ snapshot.qualified_view_name.catalog_for_environment(
52
+ environment.naming_info, dialect=engine_adapter.dialect
53
+ ),
54
+ snapshot.qualified_view_name.schema_for_environment(
55
+ environment.naming_info, dialect=engine_adapter.dialect
56
+ ),
57
+ environment.suffix_target,
58
+ )
59
+ for environment in expired_schema_or_catalog_environments
60
+ for snapshot in environment.snapshots
61
+ if snapshot.is_model and not snapshot.is_symbolic
62
+ }:
63
+ if suffix_target.is_catalog:
64
+ if expired_catalog:
65
+ catalogs_to_drop.add((engine_adapter, expired_catalog))
66
+ else:
67
+ schema = schema_(expired_schema, expired_catalog)
68
+ schemas_to_drop.add((engine_adapter, schema))
69
+
70
+ # Drop the views for the expired environments
71
+ for engine_adapter, expired_view in {
72
+ (
73
+ (engine_adapter := get_adapter(environment.gateway_managed, snapshot.model_gateway)),
74
+ snapshot.qualified_view_name.for_environment(
75
+ environment.naming_info, dialect=engine_adapter.dialect
76
+ ),
77
+ )
78
+ for environment in expired_table_environments
79
+ for snapshot in environment.snapshots
80
+ if snapshot.is_model and not snapshot.is_symbolic
81
+ }:
82
+ try:
83
+ engine_adapter.drop_view(expired_view, ignore_if_not_exists=True)
84
+ if console:
85
+ console.update_cleanup_progress(expired_view)
86
+ except Exception as e:
87
+ message = f"Failed to drop the expired environment view '{expired_view}': {e}"
88
+ if warn_on_delete_failure:
89
+ logger.warning(message)
90
+ else:
91
+ raise SQLMeshError(message) from e
92
+
93
+ # Drop the schemas for the expired environments
94
+ for engine_adapter, schema in schemas_to_drop:
95
+ try:
96
+ engine_adapter.drop_schema(
97
+ schema,
98
+ ignore_if_not_exists=True,
99
+ cascade=True,
100
+ )
101
+ if console:
102
+ console.update_cleanup_progress(schema.sql(dialect=engine_adapter.dialect))
103
+ except Exception as e:
104
+ message = f"Failed to drop the expired environment schema '{schema}': {e}"
105
+ if warn_on_delete_failure:
106
+ logger.warning(message)
107
+ else:
108
+ raise SQLMeshError(message) from e
109
+
110
+ # Drop any catalogs that were associated with a snapshot where the engine adapter supports dropping catalogs
111
+ # catalogs_to_drop is only populated when environment_suffix_target is set to 'catalog'
112
+ for engine_adapter, catalog in catalogs_to_drop:
113
+ if engine_adapter.SUPPORTS_CREATE_DROP_CATALOG:
114
+ try:
115
+ engine_adapter.drop_catalog(catalog)
116
+ if console:
117
+ console.update_cleanup_progress(catalog)
118
+ except Exception as e:
119
+ message = f"Failed to drop the expired environment catalog '{catalog}': {e}"
120
+ if warn_on_delete_failure:
121
+ logger.warning(message)
122
+ else:
123
+ raise SQLMeshError(message) from e
124
+
125
+
126
+ def delete_expired_snapshots(
127
+ state_sync: StateSync,
128
+ snapshot_evaluator: SnapshotEvaluator,
129
+ *,
130
+ current_ts: int,
131
+ ignore_ttl: bool = False,
132
+ batch_size: t.Optional[int] = None,
133
+ console: t.Optional[Console] = None,
134
+ ) -> None:
135
+ """Delete all expired snapshots in batches.
136
+
137
+ This helper function encapsulates the logic for deleting expired snapshots in batches,
138
+ eliminating code duplication across different use cases.
139
+
140
+ Args:
141
+ state_sync: StateSync instance to query and delete expired snapshots from.
142
+ snapshot_evaluator: SnapshotEvaluator instance to clean up tables associated with snapshots.
143
+ current_ts: Timestamp used to evaluate expiration.
144
+ ignore_ttl: If True, include snapshots regardless of TTL (only checks if unreferenced).
145
+ batch_size: Maximum number of snapshots to fetch per batch.
146
+ console: Optional console for reporting progress.
147
+
148
+ Returns:
149
+ The total number of deleted expired snapshots.
150
+ """
151
+ num_expired_snapshots = 0
152
+ for batch in iter_expired_snapshot_batches(
153
+ state_reader=state_sync,
154
+ current_ts=current_ts,
155
+ ignore_ttl=ignore_ttl,
156
+ batch_size=batch_size,
157
+ ):
158
+ end_info = (
159
+ f"updated_ts={batch.batch_range.end.updated_ts}"
160
+ if isinstance(batch.batch_range.end, RowBoundary)
161
+ else f"limit={batch.batch_range.end.batch_size}"
162
+ )
163
+ logger.info(
164
+ "Processing batch of size %s with end %s",
165
+ len(batch.expired_snapshot_ids),
166
+ end_info,
167
+ )
168
+ snapshot_evaluator.cleanup(
169
+ target_snapshots=batch.cleanup_tasks,
170
+ on_complete=console.update_cleanup_progress if console else None,
171
+ )
172
+ state_sync.delete_expired_snapshots(
173
+ batch_range=ExpiredBatchRange(
174
+ start=RowBoundary.lowest_boundary(),
175
+ end=batch.batch_range.end,
176
+ ),
177
+ ignore_ttl=ignore_ttl,
178
+ )
179
+ logger.info("Cleaned up expired snapshots batch")
180
+ num_expired_snapshots += len(batch.expired_snapshot_ids)
181
+ logger.info("Cleaned up %s expired snapshots", num_expired_snapshots)
sqlmesh/core/lineage.py CHANGED
@@ -66,6 +66,7 @@ def lineage(
66
66
  scope=scope,
67
67
  trim_selects=trim_selects,
68
68
  dialect=model.dialect,
69
+ copy=False,
69
70
  )
70
71
 
71
72