sqlglot 26.28.1__tar.gz → 26.30.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (219) hide show
  1. {sqlglot-26.28.1 → sqlglot-26.30.0}/CHANGELOG.md +55 -0
  2. {sqlglot-26.28.1 → sqlglot-26.30.0}/PKG-INFO +1 -1
  3. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/_version.py +2 -2
  4. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/__init__.py +1 -0
  5. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/athena.py +1 -0
  6. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/bigquery.py +1 -0
  7. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/clickhouse.py +2 -0
  8. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/dialect.py +16 -5
  9. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/duckdb.py +8 -0
  10. sqlglot-26.30.0/sqlglot/dialects/fabric.py +88 -0
  11. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/hive.py +1 -0
  12. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/oracle.py +15 -0
  13. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/presto.py +1 -0
  14. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/prql.py +5 -1
  15. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/redshift.py +1 -0
  16. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/snowflake.py +39 -1
  17. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/spark.py +17 -0
  18. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/sqlite.py +4 -3
  19. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/tsql.py +2 -2
  20. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/expressions.py +9 -1
  21. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/generator.py +20 -3
  22. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/annotate_types.py +44 -1
  23. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/qualify_columns.py +7 -0
  24. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/scope.py +14 -1
  25. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/parser.py +191 -87
  26. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot.egg-info/PKG-INFO +1 -1
  27. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot.egg-info/SOURCES.txt +2 -0
  28. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_athena.py +4 -0
  29. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_bigquery.py +20 -16
  30. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_clickhouse.py +5 -1
  31. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_databricks.py +4 -0
  32. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_dialect.py +2 -2
  33. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_duckdb.py +22 -1
  34. sqlglot-26.30.0/tests/dialects/test_fabric.py +64 -0
  35. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_hive.py +4 -0
  36. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_oracle.py +14 -0
  37. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_pipe_syntax.py +103 -48
  38. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_presto.py +7 -3
  39. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_snowflake.py +85 -27
  40. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_spark.py +24 -0
  41. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_sqlite.py +4 -0
  42. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_trino.py +16 -0
  43. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_tsql.py +4 -0
  44. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/qualify_columns.sql +8 -3
  45. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/qualify_tables.sql +3 -3
  46. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_optimizer.py +37 -0
  47. {sqlglot-26.28.1 → sqlglot-26.30.0}/.gitignore +0 -0
  48. {sqlglot-26.28.1 → sqlglot-26.30.0}/.gitpod.yml +0 -0
  49. {sqlglot-26.28.1 → sqlglot-26.30.0}/.pre-commit-config.yaml +0 -0
  50. {sqlglot-26.28.1 → sqlglot-26.30.0}/CONTRIBUTING.md +0 -0
  51. {sqlglot-26.28.1 → sqlglot-26.30.0}/LICENSE +0 -0
  52. {sqlglot-26.28.1 → sqlglot-26.30.0}/MANIFEST.in +0 -0
  53. {sqlglot-26.28.1 → sqlglot-26.30.0}/Makefile +0 -0
  54. {sqlglot-26.28.1 → sqlglot-26.30.0}/README.md +0 -0
  55. {sqlglot-26.28.1 → sqlglot-26.30.0}/pyproject.toml +0 -0
  56. {sqlglot-26.28.1 → sqlglot-26.30.0}/setup.cfg +0 -0
  57. {sqlglot-26.28.1 → sqlglot-26.30.0}/setup.py +0 -0
  58. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/__init__.py +0 -0
  59. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/__main__.py +0 -0
  60. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/_typing.py +0 -0
  61. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/databricks.py +0 -0
  62. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/doris.py +0 -0
  63. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/drill.py +0 -0
  64. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/druid.py +0 -0
  65. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/dune.py +0 -0
  66. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/materialize.py +0 -0
  67. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/mysql.py +0 -0
  68. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/postgres.py +0 -0
  69. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/risingwave.py +0 -0
  70. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/spark2.py +0 -0
  71. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/starrocks.py +0 -0
  72. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/tableau.py +0 -0
  73. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/teradata.py +0 -0
  74. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/dialects/trino.py +0 -0
  75. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/diff.py +0 -0
  76. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/errors.py +0 -0
  77. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/executor/__init__.py +0 -0
  78. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/executor/context.py +0 -0
  79. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/executor/env.py +0 -0
  80. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/executor/python.py +0 -0
  81. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/executor/table.py +0 -0
  82. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/helper.py +0 -0
  83. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/jsonpath.py +0 -0
  84. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/lineage.py +0 -0
  85. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/__init__.py +0 -0
  86. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/canonicalize.py +0 -0
  87. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/eliminate_ctes.py +0 -0
  88. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/eliminate_joins.py +0 -0
  89. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/eliminate_subqueries.py +0 -0
  90. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/isolate_table_selects.py +0 -0
  91. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/merge_subqueries.py +0 -0
  92. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/normalize.py +0 -0
  93. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/normalize_identifiers.py +0 -0
  94. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/optimize_joins.py +0 -0
  95. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/optimizer.py +0 -0
  96. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/pushdown_predicates.py +0 -0
  97. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/pushdown_projections.py +0 -0
  98. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/qualify.py +0 -0
  99. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/qualify_tables.py +0 -0
  100. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/simplify.py +0 -0
  101. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/optimizer/unnest_subqueries.py +0 -0
  102. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/planner.py +0 -0
  103. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/py.typed +0 -0
  104. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/schema.py +0 -0
  105. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/serde.py +0 -0
  106. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/time.py +0 -0
  107. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/tokens.py +0 -0
  108. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/transforms.py +0 -0
  109. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot/trie.py +0 -0
  110. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot.egg-info/dependency_links.txt +0 -0
  111. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot.egg-info/requires.txt +0 -0
  112. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot.egg-info/top_level.txt +0 -0
  113. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglot.png +0 -0
  114. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/Cargo.lock +0 -0
  115. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/Cargo.toml +0 -0
  116. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/benches/dialect_settings.json +0 -0
  117. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/benches/long.rs +0 -0
  118. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/benches/token_type_settings.json +0 -0
  119. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/benches/tokenizer_dialect_settings.json +0 -0
  120. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/benches/tokenizer_settings.json +0 -0
  121. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/pyproject.toml +0 -0
  122. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/src/lib.rs +0 -0
  123. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/src/settings.rs +0 -0
  124. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/src/token.rs +0 -0
  125. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/src/tokenizer.rs +0 -0
  126. {sqlglot-26.28.1 → sqlglot-26.30.0}/sqlglotrs/src/trie.rs +0 -0
  127. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/__init__.py +0 -0
  128. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/__init__.py +0 -0
  129. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_doris.py +0 -0
  130. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_drill.py +0 -0
  131. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_druid.py +0 -0
  132. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_dune.py +0 -0
  133. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_materialize.py +0 -0
  134. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_mysql.py +0 -0
  135. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_postgres.py +0 -0
  136. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_prql.py +0 -0
  137. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_redshift.py +0 -0
  138. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_risingwave.py +0 -0
  139. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_starrocks.py +0 -0
  140. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_tableau.py +0 -0
  141. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/dialects/test_teradata.py +0 -0
  142. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/identity.sql +0 -0
  143. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/jsonpath/LICENSE +0 -0
  144. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/jsonpath/cts.json +0 -0
  145. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/annotate_functions.sql +0 -0
  146. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/annotate_types.sql +0 -0
  147. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/canonicalize.sql +0 -0
  148. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/eliminate_ctes.sql +0 -0
  149. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/eliminate_joins.sql +0 -0
  150. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/eliminate_subqueries.sql +0 -0
  151. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/isolate_table_selects.sql +0 -0
  152. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/merge_subqueries.sql +0 -0
  153. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/normalize.sql +0 -0
  154. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/normalize_identifiers.sql +0 -0
  155. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/optimize_joins.sql +0 -0
  156. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/optimizer.sql +0 -0
  157. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/pushdown_cte_alias_columns.sql +0 -0
  158. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/pushdown_predicates.sql +0 -0
  159. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/pushdown_projections.sql +0 -0
  160. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/qualify_columns__invalid.sql +0 -0
  161. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/qualify_columns__with_invisible.sql +0 -0
  162. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/qualify_columns_ddl.sql +0 -0
  163. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/quote_identifiers.sql +0 -0
  164. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/simplify.sql +0 -0
  165. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/call_center.csv.gz +0 -0
  166. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/catalog_page.csv.gz +0 -0
  167. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/catalog_returns.csv.gz +0 -0
  168. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/catalog_sales.csv.gz +0 -0
  169. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/customer.csv.gz +0 -0
  170. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/customer_address.csv.gz +0 -0
  171. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/customer_demographics.csv.gz +0 -0
  172. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/date_dim.csv.gz +0 -0
  173. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/household_demographics.csv.gz +0 -0
  174. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/income_band.csv.gz +0 -0
  175. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/inventory.csv.gz +0 -0
  176. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/item.csv.gz +0 -0
  177. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/promotion.csv.gz +0 -0
  178. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/reason.csv.gz +0 -0
  179. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/ship_mode.csv.gz +0 -0
  180. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/store.csv.gz +0 -0
  181. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/store_returns.csv.gz +0 -0
  182. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/store_sales.csv.gz +0 -0
  183. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/time_dim.csv.gz +0 -0
  184. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql +0 -0
  185. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/warehouse.csv.gz +0 -0
  186. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/web_page.csv.gz +0 -0
  187. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/web_returns.csv.gz +0 -0
  188. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/web_sales.csv.gz +0 -0
  189. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-ds/web_site.csv.gz +0 -0
  190. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-h/customer.csv.gz +0 -0
  191. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-h/lineitem.csv.gz +0 -0
  192. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-h/nation.csv.gz +0 -0
  193. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-h/orders.csv.gz +0 -0
  194. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-h/part.csv.gz +0 -0
  195. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-h/partsupp.csv.gz +0 -0
  196. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-h/region.csv.gz +0 -0
  197. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-h/supplier.csv.gz +0 -0
  198. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/tpc-h/tpc-h.sql +0 -0
  199. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/optimizer/unnest_subqueries.sql +0 -0
  200. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/partial.sql +0 -0
  201. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/fixtures/pretty.sql +0 -0
  202. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/gen_fixtures.py +0 -0
  203. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/helpers.py +0 -0
  204. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_build.py +0 -0
  205. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_diff.py +0 -0
  206. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_docs.py +0 -0
  207. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_executor.py +0 -0
  208. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_expressions.py +0 -0
  209. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_generator.py +0 -0
  210. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_helper.py +0 -0
  211. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_jsonpath.py +0 -0
  212. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_lineage.py +0 -0
  213. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_parser.py +0 -0
  214. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_schema.py +0 -0
  215. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_serde.py +0 -0
  216. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_time.py +0 -0
  217. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_tokens.py +0 -0
  218. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_transforms.py +0 -0
  219. {sqlglot-26.28.1 → sqlglot-26.30.0}/tests/test_transpile.py +0 -0
@@ -1,6 +1,59 @@
1
1
  Changelog
2
2
  =========
3
3
 
4
+ ## [v26.29.0] - 2025-06-17
5
+ ### :boom: BREAKING CHANGES
6
+ - due to [`4f42d95`](https://github.com/tobymao/sqlglot/commit/4f42d951363f8c43a4c414dc21d0505d9c8e48bf) - Normalize date parts in `exp.Extract` generation *(PR [#5229](https://github.com/tobymao/sqlglot/pull/5229) by [@VaggelisD](https://github.com/VaggelisD))*:
7
+
8
+ Normalize date parts in `exp.Extract` generation (#5229)
9
+
10
+ - due to [`e7e38fe`](https://github.com/tobymao/sqlglot/commit/e7e38fe0e09f9affbff4ffa7023d0161e3a1ee49) - resolve table "columns" in bigquery that produce structs *(PR [#5230](https://github.com/tobymao/sqlglot/pull/5230) by [@georgesittas](https://github.com/georgesittas))*:
11
+
12
+ resolve table "columns" in bigquery that produce structs (#5230)
13
+
14
+
15
+ ### :sparkles: New Features
16
+ - [`97f5822`](https://github.com/tobymao/sqlglot/commit/97f58226fc8815b23787b7b8699ea71f58268560) - **parser**: AS pipe syntax *(PR [#5224](https://github.com/tobymao/sqlglot/pull/5224) by [@geooo109](https://github.com/geooo109))*
17
+ - [`a7e7fee`](https://github.com/tobymao/sqlglot/commit/a7e7feef02a77fe8606f3f482bad91230fa637f4) - **parser**: EXTEND pipe syntax *(PR [#5225](https://github.com/tobymao/sqlglot/pull/5225) by [@geooo109](https://github.com/geooo109))*
18
+ - [`c1cb9f8`](https://github.com/tobymao/sqlglot/commit/c1cb9f8f682080f7a06c387219d79c6d068b6dbe) - **snowflake**: add autoincrement order clause support *(PR [#5223](https://github.com/tobymao/sqlglot/pull/5223) by [@dmaresma](https://github.com/dmaresma))*
19
+ - [`91afe4c`](https://github.com/tobymao/sqlglot/commit/91afe4cfd7b3f427e4c0b298075e867b8a1bbe55) - **parser**: TABLESAMPLE pipe syntax *(PR [#5231](https://github.com/tobymao/sqlglot/pull/5231) by [@geooo109](https://github.com/geooo109))*
20
+ - [`62da84a`](https://github.com/tobymao/sqlglot/commit/62da84acce7f44802dca26a9357a16115e21fabf) - **snowflake**: improve transpilation of unnested object lookup *(PR [#5234](https://github.com/tobymao/sqlglot/pull/5234) by [@georgesittas](https://github.com/georgesittas))*
21
+ - [`2c60453`](https://github.com/tobymao/sqlglot/commit/2c604537ba83dee74e9ced7e216673ecc70fe487) - **parser**: DROP pipe syntax *(PR [#5226](https://github.com/tobymao/sqlglot/pull/5226) by [@geooo109](https://github.com/geooo109))*
22
+ - [`9885729`](https://github.com/tobymao/sqlglot/commit/988572954135c68dc021b992c815024ce3debaff) - **parser**: SET pipe syntax *(PR [#5236](https://github.com/tobymao/sqlglot/pull/5236) by [@geooo109](https://github.com/geooo109))*
23
+
24
+ ### :bug: Bug Fixes
25
+ - [`df73a79`](https://github.com/tobymao/sqlglot/commit/df73a79a2ca3ba859b8aba5e3d0f6ed269874a63) - **tsql**: Retain limit clause in subquery expression. *(PR [#5227](https://github.com/tobymao/sqlglot/pull/5227) by [@MarcusRisanger](https://github.com/MarcusRisanger))*
26
+ - [`4f42d95`](https://github.com/tobymao/sqlglot/commit/4f42d951363f8c43a4c414dc21d0505d9c8e48bf) - **duckdb**: Normalize date parts in `exp.Extract` generation *(PR [#5229](https://github.com/tobymao/sqlglot/pull/5229) by [@VaggelisD](https://github.com/VaggelisD))*
27
+ - :arrow_lower_right: *fixes issue [#5228](https://github.com/tobymao/sqlglot/issues/5228) opened by [@greybeam-bot](https://github.com/greybeam-bot)*
28
+ - [`1b4c083`](https://github.com/tobymao/sqlglot/commit/1b4c083fff8d7c44bf1dbba28c1225fa1e28c4d2) - **athena**: include Hive string escapes in the tokenizer *(PR [#5233](https://github.com/tobymao/sqlglot/pull/5233) by [@georgesittas](https://github.com/georgesittas))*
29
+ - :arrow_lower_right: *fixes issue [#5232](https://github.com/tobymao/sqlglot/issues/5232) opened by [@ligfx](https://github.com/ligfx)*
30
+ - [`e7e38fe`](https://github.com/tobymao/sqlglot/commit/e7e38fe0e09f9affbff4ffa7023d0161e3a1ee49) - **optimizer**: resolve table "columns" in bigquery that produce structs *(PR [#5230](https://github.com/tobymao/sqlglot/pull/5230) by [@georgesittas](https://github.com/georgesittas))*
31
+ - :arrow_lower_right: *fixes issue [#5207](https://github.com/tobymao/sqlglot/issues/5207) opened by [@Bladieblah](https://github.com/Bladieblah)*
32
+ - [`781539d`](https://github.com/tobymao/sqlglot/commit/781539d5cbe58142ed6688f1522fc4ed31da0a56) - **duckdb**: Generate correct DETACH syntax if IF EXISTS is set *(PR [#5235](https://github.com/tobymao/sqlglot/pull/5235) by [@erindru](https://github.com/erindru))*
33
+
34
+ ### :wrench: Chores
35
+ - [`7dfb578`](https://github.com/tobymao/sqlglot/commit/7dfb5780fb242c82744dc1538077776ac624081e) - Refactor DETACH generation *(PR [#5237](https://github.com/tobymao/sqlglot/pull/5237) by [@VaggelisD](https://github.com/VaggelisD))*
36
+
37
+
38
+ ## [v26.28.1] - 2025-06-13
39
+ ### :boom: BREAKING CHANGES
40
+ - due to [`44297f1`](https://github.com/tobymao/sqlglot/commit/44297f1c5c8c2cb16fe77c318312f417b4281708) - JOIN pipe syntax, Set Operators as CTEs *(PR [#5215](https://github.com/tobymao/sqlglot/pull/5215) by [@geooo109](https://github.com/geooo109))*:
41
+
42
+ JOIN pipe syntax, Set Operators as CTEs (#5215)
43
+
44
+
45
+ ### :sparkles: New Features
46
+ - [`44297f1`](https://github.com/tobymao/sqlglot/commit/44297f1c5c8c2cb16fe77c318312f417b4281708) - **parser**: JOIN pipe syntax, Set Operators as CTEs *(PR [#5215](https://github.com/tobymao/sqlglot/pull/5215) by [@geooo109](https://github.com/geooo109))*
47
+ - [`21cd3eb`](https://github.com/tobymao/sqlglot/commit/21cd3ebf5d0b57f5b102c5aadc3b24a598ebe918) - **parser**: PIVOT/UNPIVOT pipe syntax *(PR [#5222](https://github.com/tobymao/sqlglot/pull/5222) by [@geooo109](https://github.com/geooo109))*
48
+
49
+ ### :bug: Bug Fixes
50
+ - [`28fed58`](https://github.com/tobymao/sqlglot/commit/28fed586a39df83aade4792217743a1a859fd039) - **optimizer**: UnboundLocalError in scope module *(commit by [@georgesittas](https://github.com/georgesittas))*
51
+ - [`809e05a`](https://github.com/tobymao/sqlglot/commit/809e05a743d5a2904a1d6f6813f24ca7549ac7ef) - **snowflake**: preserve STRTOK_TO_ARRAY roundtrip *(commit by [@georgesittas](https://github.com/georgesittas))*
52
+
53
+ ### :recycle: Refactors
54
+ - [`aac70aa`](https://github.com/tobymao/sqlglot/commit/aac70aaaa8d840c267129e2307ccb65058cef0c9) - **parser**: simpler _parse_pipe_syntax_select *(commit by [@geooo109](https://github.com/geooo109))*
55
+
56
+
4
57
  ## [v26.27.0] - 2025-06-12
5
58
  ### :boom: BREAKING CHANGES
6
59
  - due to [`ac6555b`](https://github.com/tobymao/sqlglot/commit/ac6555b4d6c162ef7b14b63307d01fd560138ea0) - preserve DIV binary operator, fixes [#5198](https://github.com/tobymao/sqlglot/pull/5198) *(PR [#5199](https://github.com/tobymao/sqlglot/pull/5199) by [@georgesittas](https://github.com/georgesittas))*:
@@ -4882,3 +4935,5 @@ Changelog
4882
4935
  [v26.25.3]: https://github.com/tobymao/sqlglot/compare/v26.25.2...v26.25.3
4883
4936
  [v26.26.0]: https://github.com/tobymao/sqlglot/compare/v26.25.3...v26.26.0
4884
4937
  [v26.27.0]: https://github.com/tobymao/sqlglot/compare/v26.26.0...v26.27.0
4938
+ [v26.28.1]: https://github.com/tobymao/sqlglot/compare/v26.27.1...v26.28.1
4939
+ [v26.29.0]: https://github.com/tobymao/sqlglot/compare/v26.28.1...v26.29.0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sqlglot
3
- Version: 26.28.1
3
+ Version: 26.30.0
4
4
  Summary: An easily customizable SQL parser and transpiler
5
5
  Author-email: Toby Mao <toby.mao@gmail.com>
6
6
  License: MIT License
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '26.28.1'
21
- __version_tuple__ = version_tuple = (26, 28, 1)
20
+ __version__ = version = '26.30.0'
21
+ __version_tuple__ = version_tuple = (26, 30, 0)
@@ -74,6 +74,7 @@ DIALECTS = [
74
74
  "Druid",
75
75
  "DuckDB",
76
76
  "Dune",
77
+ "Fabric",
77
78
  "Hive",
78
79
  "Materialize",
79
80
  "MySQL",
@@ -108,6 +108,7 @@ class Athena(Trino):
108
108
  """
109
109
 
110
110
  IDENTIFIERS = ['"', "`"]
111
+ STRING_ESCAPES = ["'", "\\"]
111
112
  KEYWORDS = {
112
113
  **Hive.Tokenizer.KEYWORDS,
113
114
  **Trino.Tokenizer.KEYWORDS,
@@ -524,6 +524,7 @@ class BigQuery(Dialect):
524
524
  PREFIXED_PIVOT_COLUMNS = True
525
525
  LOG_DEFAULTS_TO_LN = True
526
526
  SUPPORTS_IMPLICIT_UNNEST = True
527
+ JOINS_HAVE_EQUAL_PRECEDENCE = True
527
528
 
528
529
  # BigQuery does not allow ASC/DESC to be used as an identifier
529
530
  ID_VAR_TOKENS = parser.Parser.ID_VAR_TOKENS - {TokenType.ASC, TokenType.DESC}
@@ -297,6 +297,7 @@ class ClickHouse(Dialect):
297
297
  MODIFIERS_ATTACHED_TO_SET_OP = False
298
298
  INTERVAL_SPANS = False
299
299
  OPTIONAL_ALIAS_TOKEN_CTE = False
300
+ JOINS_HAVE_EQUAL_PRECEDENCE = True
300
301
 
301
302
  FUNCTIONS = {
302
303
  **parser.Parser.FUNCTIONS,
@@ -691,6 +692,7 @@ class ClickHouse(Dialect):
691
692
  parse_bracket: bool = False,
692
693
  is_db_reference: bool = False,
693
694
  parse_partition: bool = False,
695
+ consume_pipe: bool = False,
694
696
  ) -> t.Optional[exp.Expression]:
695
697
  this = super()._parse_table(
696
698
  schema=schema,
@@ -77,6 +77,7 @@ class Dialects(str, Enum):
77
77
  DRUID = "druid"
78
78
  DUCKDB = "duckdb"
79
79
  DUNE = "dune"
80
+ FABRIC = "fabric"
80
81
  HIVE = "hive"
81
82
  MATERIALIZE = "materialize"
82
83
  MYSQL = "mysql"
@@ -1621,7 +1622,10 @@ def map_date_part(part, dialect: DialectType = Dialect):
1621
1622
  mapped = (
1622
1623
  Dialect.get_or_raise(dialect).DATE_PART_MAPPING.get(part.name.upper()) if part else None
1623
1624
  )
1624
- return exp.var(mapped) if mapped else part
1625
+ if mapped:
1626
+ return exp.Literal.string(mapped) if part.is_string else exp.var(mapped)
1627
+
1628
+ return part
1625
1629
 
1626
1630
 
1627
1631
  def no_last_day_sql(self: Generator, expression: exp.LastDay) -> str:
@@ -1903,12 +1907,19 @@ def groupconcat_sql(
1903
1907
 
1904
1908
  def build_timetostr_or_tochar(args: t.List, dialect: Dialect) -> exp.TimeToStr | exp.ToChar:
1905
1909
  this = seq_get(args, 0)
1910
+ format = seq_get(args, 1)
1906
1911
 
1907
- if this and not this.type:
1908
- from sqlglot.optimizer.annotate_types import annotate_types
1912
+ if this:
1913
+ if not this.type:
1914
+ from sqlglot.optimizer.annotate_types import annotate_types
1915
+
1916
+ annotate_types(this, dialect=dialect)
1909
1917
 
1910
- annotate_types(this, dialect=dialect)
1911
- if this.is_type(*exp.DataType.TEMPORAL_TYPES):
1918
+ from sqlglot.dialects import Snowflake
1919
+
1920
+ if this.is_type(*exp.DataType.TEMPORAL_TYPES) or (
1921
+ isinstance(format, exp.Literal) and format.name in Snowflake.TIME_MAPPING
1922
+ ):
1912
1923
  dialect_name = dialect.__class__.__name__.lower()
1913
1924
  return build_formatted_time(exp.TimeToStr, dialect_name, default=True)(args)
1914
1925
 
@@ -290,6 +290,12 @@ class DuckDB(Dialect):
290
290
  # https://duckdb.org/docs/sql/introduction.html#creating-a-new-table
291
291
  NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
292
292
 
293
+ DATE_PART_MAPPING = {
294
+ **Dialect.DATE_PART_MAPPING,
295
+ "DAYOFWEEKISO": "ISODOW",
296
+ }
297
+ DATE_PART_MAPPING.pop("WEEKDAY")
298
+
293
299
  def to_json_path(self, path: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
294
300
  if isinstance(path, exp.Literal):
295
301
  # DuckDB also supports the JSON pointer syntax, where every path starts with a `/`.
@@ -502,6 +508,7 @@ class DuckDB(Dialect):
502
508
  parse_bracket: bool = False,
503
509
  is_db_reference: bool = False,
504
510
  parse_partition: bool = False,
511
+ consume_pipe: bool = False,
505
512
  ) -> t.Optional[exp.Expression]:
506
513
  # DuckDB supports prefix aliases, e.g. FROM foo: bar
507
514
  if self._next and self._next.token_type == TokenType.COLON:
@@ -620,6 +627,7 @@ class DuckDB(Dialect):
620
627
  PAD_FILL_PATTERN_IS_REQUIRED = True
621
628
  ARRAY_CONCAT_IS_VAR_LEN = False
622
629
  ARRAY_SIZE_DIM_REQUIRED = False
630
+ NORMALIZE_EXTRACT_DATE_PARTS = True
623
631
 
624
632
  TRANSFORMS = {
625
633
  **generator.Generator.TRANSFORMS,
@@ -0,0 +1,88 @@
1
+ from __future__ import annotations
2
+
3
+ from sqlglot import exp
4
+ from sqlglot.dialects.dialect import NormalizationStrategy
5
+ from sqlglot.dialects.tsql import TSQL
6
+
7
+
8
+ class Fabric(TSQL):
9
+ """
10
+ Microsoft Fabric Data Warehouse dialect that inherits from T-SQL.
11
+
12
+ Microsoft Fabric is a cloud-based analytics platform that provides a unified
13
+ data warehouse experience. While it shares much of T-SQL's syntax, it has
14
+ specific differences and limitations that this dialect addresses.
15
+
16
+ Key differences from T-SQL:
17
+ - Case-sensitive identifiers (unlike T-SQL which is case-insensitive)
18
+ - Limited data type support with mappings to supported alternatives
19
+ - Temporal types (DATETIME2, DATETIMEOFFSET, TIME) limited to 6 digits precision
20
+ - Certain legacy types (MONEY, SMALLMONEY, etc.) are not supported
21
+ - Unicode types (NCHAR, NVARCHAR) are mapped to non-unicode equivalents
22
+
23
+ References:
24
+ - Data Types: https://learn.microsoft.com/en-us/fabric/data-warehouse/data-types
25
+ - T-SQL Surface Area: https://learn.microsoft.com/en-us/fabric/data-warehouse/tsql-surface-area
26
+ """
27
+
28
+ # Fabric is case-sensitive unlike T-SQL which is case-insensitive
29
+ NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_SENSITIVE
30
+
31
+ class Generator(TSQL.Generator):
32
+ # Fabric-specific type mappings - override T-SQL types that aren't supported
33
+ # Reference: https://learn.microsoft.com/en-us/fabric/data-warehouse/data-types
34
+ TYPE_MAPPING = {
35
+ **TSQL.Generator.TYPE_MAPPING,
36
+ # Fabric doesn't support these types, map to alternatives
37
+ exp.DataType.Type.MONEY: "DECIMAL",
38
+ exp.DataType.Type.SMALLMONEY: "DECIMAL",
39
+ exp.DataType.Type.DATETIME: "DATETIME2(6)",
40
+ exp.DataType.Type.SMALLDATETIME: "DATETIME2(6)",
41
+ exp.DataType.Type.NCHAR: "CHAR",
42
+ exp.DataType.Type.NVARCHAR: "VARCHAR",
43
+ exp.DataType.Type.TEXT: "VARCHAR(MAX)",
44
+ exp.DataType.Type.IMAGE: "VARBINARY",
45
+ exp.DataType.Type.TINYINT: "SMALLINT",
46
+ exp.DataType.Type.UTINYINT: "SMALLINT", # T-SQL parses TINYINT as UTINYINT
47
+ exp.DataType.Type.JSON: "VARCHAR",
48
+ exp.DataType.Type.XML: "VARCHAR",
49
+ exp.DataType.Type.UUID: "VARBINARY(MAX)", # UNIQUEIDENTIFIER has limitations in Fabric
50
+ # Override T-SQL mappings that use different names in Fabric
51
+ exp.DataType.Type.DECIMAL: "DECIMAL", # T-SQL uses NUMERIC
52
+ exp.DataType.Type.DOUBLE: "FLOAT",
53
+ exp.DataType.Type.INT: "INT", # T-SQL uses INTEGER
54
+ }
55
+
56
+ def datatype_sql(self, expression: exp.DataType) -> str:
57
+ """
58
+ Override datatype generation to handle Fabric-specific precision limitations.
59
+
60
+ Fabric limits temporal types (TIME, DATETIME2, DATETIMEOFFSET) to max 6 digits precision.
61
+ When no precision is specified, we default to 6 digits.
62
+ """
63
+ if expression.is_type(
64
+ exp.DataType.Type.TIME,
65
+ exp.DataType.Type.DATETIME2,
66
+ exp.DataType.Type.TIMESTAMPTZ, # DATETIMEOFFSET in Fabric
67
+ ):
68
+ # Get the current precision (first expression if it exists)
69
+ precision = expression.find(exp.DataTypeParam)
70
+
71
+ # Determine the target precision
72
+ if precision is None:
73
+ # No precision specified, default to 6
74
+ target_precision = 6
75
+ elif precision.this.is_int:
76
+ # Cap precision at 6
77
+ current_precision = precision.this.to_py()
78
+ target_precision = min(current_precision, 6)
79
+
80
+ # Create a new expression with the target precision
81
+ new_expression = exp.DataType(
82
+ this=expression.this,
83
+ expressions=[exp.DataTypeParam(this=exp.Literal.number(target_precision))],
84
+ )
85
+
86
+ return super().datatype_sql(new_expression)
87
+
88
+ return super().datatype_sql(expression)
@@ -305,6 +305,7 @@ class Hive(Dialect):
305
305
  LOG_DEFAULTS_TO_LN = True
306
306
  STRICT_CAST = False
307
307
  VALUES_FOLLOWED_BY_PAREN = False
308
+ JOINS_HAVE_EQUAL_PRECEDENCE = True
308
309
 
309
310
  FUNCTIONS = {
310
311
  **parser.Parser.FUNCTIONS,
@@ -128,6 +128,7 @@ class Oracle(Dialect):
128
128
  "NEXT": lambda self: self._parse_next_value_for(),
129
129
  "PRIOR": lambda self: self.expression(exp.Prior, this=self._parse_bitwise()),
130
130
  "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True),
131
+ "DBMS_RANDOM": lambda self: self._parse_dbms_random(),
131
132
  }
132
133
 
133
134
  FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
@@ -177,6 +178,19 @@ class Oracle(Dialect):
177
178
  ),
178
179
  }
179
180
 
181
+ def _parse_dbms_random(self) -> t.Optional[exp.Expression]:
182
+ if self._match_text_seq(".", "VALUE"):
183
+ lower, upper = None, None
184
+ if self._match(TokenType.L_PAREN, advance=False):
185
+ lower_upper = self._parse_wrapped_csv(self._parse_bitwise)
186
+ if len(lower_upper) == 2:
187
+ lower, upper = lower_upper
188
+
189
+ return exp.Rand(lower=lower, upper=upper)
190
+
191
+ self._retreat(self._index - 1)
192
+ return None
193
+
180
194
  def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E:
181
195
  return self.expression(
182
196
  expr_type,
@@ -299,6 +313,7 @@ class Oracle(Dialect):
299
313
  exp.LogicalOr: rename_func("MAX"),
300
314
  exp.LogicalAnd: rename_func("MIN"),
301
315
  exp.Mod: rename_func("MOD"),
316
+ exp.Rand: rename_func("DBMS_RANDOM.VALUE"),
302
317
  exp.Select: transforms.preprocess(
303
318
  [
304
319
  transforms.eliminate_distinct_on,
@@ -315,6 +315,7 @@ class Presto(Dialect):
315
315
 
316
316
  class Parser(parser.Parser):
317
317
  VALUES_FOLLOWED_BY_PAREN = False
318
+ ZONE_AWARE_TIMESTAMP_CONSTRUCTOR = True
318
319
 
319
320
  FUNCTIONS = {
320
321
  **parser.Parser.FUNCTIONS,
@@ -189,11 +189,15 @@ class PRQL(Dialect):
189
189
  parse_bracket: bool = False,
190
190
  is_db_reference: bool = False,
191
191
  parse_partition: bool = False,
192
+ consume_pipe: bool = False,
192
193
  ) -> t.Optional[exp.Expression]:
193
194
  return self._parse_table_parts()
194
195
 
195
196
  def _parse_from(
196
- self, joins: bool = False, skip_from_token: bool = False
197
+ self,
198
+ joins: bool = False,
199
+ skip_from_token: bool = False,
200
+ consume_pipe: bool = False,
197
201
  ) -> t.Optional[exp.From]:
198
202
  if not skip_from_token and not self._match(TokenType.FROM):
199
203
  return None
@@ -90,6 +90,7 @@ class Redshift(Postgres):
90
90
  parse_bracket: bool = False,
91
91
  is_db_reference: bool = False,
92
92
  parse_partition: bool = False,
93
+ consume_pipe: bool = False,
93
94
  ) -> t.Optional[exp.Expression]:
94
95
  # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr`
95
96
  unpivot = self._match(TokenType.UNPIVOT)
@@ -31,6 +31,7 @@ from sqlglot.dialects.dialect import (
31
31
  )
32
32
  from sqlglot.generator import unsupported_args
33
33
  from sqlglot.helper import flatten, is_float, is_int, seq_get
34
+ from sqlglot.optimizer.scope import find_all_in_scope
34
35
  from sqlglot.tokens import TokenType
35
36
 
36
37
  if t.TYPE_CHECKING:
@@ -333,6 +334,34 @@ def _json_extract_value_array_sql(
333
334
  return self.func("TRANSFORM", json_extract, transform_lambda)
334
335
 
335
336
 
337
+ def _eliminate_dot_variant_lookup(expression: exp.Expression) -> exp.Expression:
338
+ if isinstance(expression, exp.Select):
339
+ # This transformation is used to facilitate transpilation of BigQuery `UNNEST` operations
340
+ # to Snowflake. It should not affect roundtrip because `Unnest` nodes cannot be produced
341
+ # by Snowflake's parser.
342
+ #
343
+ # Additionally, at the time of writing this, BigQuery is the only dialect that produces a
344
+ # `TableAlias` node that only fills `columns` and not `this`, due to `UNNEST_COLUMN_ONLY`.
345
+ unnest_aliases = set()
346
+ for unnest in find_all_in_scope(expression, exp.Unnest):
347
+ unnest_alias = unnest.args.get("alias")
348
+ if (
349
+ isinstance(unnest_alias, exp.TableAlias)
350
+ and not unnest_alias.this
351
+ and len(unnest_alias.columns) == 1
352
+ ):
353
+ unnest_aliases.add(unnest_alias.columns[0].name)
354
+
355
+ if unnest_aliases:
356
+ for c in find_all_in_scope(expression, exp.Column):
357
+ if c.table in unnest_aliases:
358
+ bracket_lhs = c.args["table"]
359
+ bracket_rhs = exp.Literal.string(c.name)
360
+ c.replace(exp.Bracket(this=bracket_lhs, expressions=[bracket_rhs]))
361
+
362
+ return expression
363
+
364
+
336
365
  class Snowflake(Dialect):
337
366
  # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
338
367
  NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
@@ -770,6 +799,7 @@ class Snowflake(Dialect):
770
799
  parse_bracket: bool = False,
771
800
  is_db_reference: bool = False,
772
801
  parse_partition: bool = False,
802
+ consume_pipe: bool = False,
773
803
  ) -> t.Optional[exp.Expression]:
774
804
  table = super()._parse_table(
775
805
  schema=schema,
@@ -1096,6 +1126,7 @@ class Snowflake(Dialect):
1096
1126
  transforms.explode_projection_to_unnest(),
1097
1127
  transforms.eliminate_semi_and_anti_joins,
1098
1128
  _transform_generate_date_array,
1129
+ _eliminate_dot_variant_lookup,
1099
1130
  ]
1100
1131
  ),
1101
1132
  exp.SHA: rename_func("SHA1"),
@@ -1314,7 +1345,14 @@ class Snowflake(Dialect):
1314
1345
  start = f" START {start}" if start else ""
1315
1346
  increment = expression.args.get("increment")
1316
1347
  increment = f" INCREMENT {increment}" if increment else ""
1317
- return f"AUTOINCREMENT{start}{increment}"
1348
+
1349
+ order = expression.args.get("order")
1350
+ if order is not None:
1351
+ order_clause = " ORDER" if order else " NOORDER"
1352
+ else:
1353
+ order_clause = ""
1354
+
1355
+ return f"AUTOINCREMENT{start}{increment}{order_clause}"
1318
1356
 
1319
1357
  def cluster_sql(self, expression: exp.Cluster) -> str:
1320
1358
  return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
@@ -7,6 +7,7 @@ from sqlglot.dialects.dialect import rename_func, unit_to_var, timestampdiff_sql
7
7
  from sqlglot.dialects.hive import _build_with_ignore_nulls
8
8
  from sqlglot.dialects.spark2 import Spark2, temporary_storage_provider, _build_as_cast
9
9
  from sqlglot.helper import ensure_list, seq_get
10
+ from sqlglot.tokens import TokenType
10
11
  from sqlglot.transforms import (
11
12
  ctas_with_tmp_tables_to_create_tmp_view,
12
13
  remove_unique_constraints,
@@ -121,6 +122,16 @@ class Spark(Spark2):
121
122
  ),
122
123
  }
123
124
 
125
+ PLACEHOLDER_PARSERS = {
126
+ **Spark2.Parser.PLACEHOLDER_PARSERS,
127
+ TokenType.L_BRACE: lambda self: self._parse_query_parameter(),
128
+ }
129
+
130
+ def _parse_query_parameter(self) -> t.Optional[exp.Expression]:
131
+ this = self._parse_id_var()
132
+ self._match(TokenType.R_BRACE)
133
+ return self.expression(exp.Placeholder, this=this, widget=True)
134
+
124
135
  def _parse_generated_as_identity(
125
136
  self,
126
137
  ) -> (
@@ -200,3 +211,9 @@ class Spark(Spark2):
200
211
  return self.func("DATEDIFF", unit_to_var(expression), start, end)
201
212
 
202
213
  return self.func("DATEDIFF", end, start)
214
+
215
+ def placeholder_sql(self, expression: exp.Placeholder) -> str:
216
+ if not expression.args.get("widget"):
217
+ return super().placeholder_sql(expression)
218
+
219
+ return f"{{{expression.name}}}"
@@ -102,6 +102,10 @@ class SQLite(Dialect):
102
102
  COMMANDS = {*tokens.Tokenizer.COMMANDS, TokenType.REPLACE}
103
103
 
104
104
  class Parser(parser.Parser):
105
+ STRING_ALIASES = True
106
+ ALTER_RENAME_REQUIRES_COLUMN = False
107
+ JOINS_HAVE_EQUAL_PRECEDENCE = True
108
+
105
109
  FUNCTIONS = {
106
110
  **parser.Parser.FUNCTIONS,
107
111
  "EDITDIST3": exp.Levenshtein.from_arg_list,
@@ -110,9 +114,6 @@ class SQLite(Dialect):
110
114
  "TIME": lambda args: exp.Anonymous(this="TIME", expressions=args),
111
115
  }
112
116
 
113
- STRING_ALIASES = True
114
- ALTER_RENAME_REQUIRES_COLUMN = False
115
-
116
117
  def _parse_unique(self) -> exp.UniqueColumnConstraint:
117
118
  # Do not consume more tokens if UNIQUE is used as a standalone constraint, e.g:
118
119
  # CREATE TABLE foo (bar TEXT UNIQUE REFERENCES baz ...)
@@ -1224,8 +1224,6 @@ class TSQL(Dialect):
1224
1224
  # to amend the AST by moving the CTEs to the CREATE VIEW statement's query.
1225
1225
  ctas_expression.set("with", with_.pop())
1226
1226
 
1227
- sql = super().create_sql(expression)
1228
-
1229
1227
  table = expression.find(exp.Table)
1230
1228
 
1231
1229
  # Convert CTAS statement to SELECT .. INTO ..
@@ -1243,6 +1241,8 @@ class TSQL(Dialect):
1243
1241
  select_into.limit(0, copy=False)
1244
1242
 
1245
1243
  sql = self.sql(select_into)
1244
+ else:
1245
+ sql = super().create_sql(expression)
1246
1246
 
1247
1247
  if exists:
1248
1248
  identifier = self.sql(exp.Literal.string(exp.table_name(table) if table else ""))
@@ -1947,6 +1947,7 @@ class GeneratedAsIdentityColumnConstraint(ColumnConstraintKind):
1947
1947
  "minvalue": False,
1948
1948
  "maxvalue": False,
1949
1949
  "cycle": False,
1950
+ "order": False,
1950
1951
  }
1951
1952
 
1952
1953
 
@@ -4452,8 +4453,9 @@ class SessionParameter(Condition):
4452
4453
  arg_types = {"this": True, "kind": False}
4453
4454
 
4454
4455
 
4456
+ # https://www.databricks.com/blog/parameterized-queries-pyspark
4455
4457
  class Placeholder(Condition):
4456
- arg_types = {"this": False, "kind": False}
4458
+ arg_types = {"this": False, "kind": False, "widget": False}
4457
4459
 
4458
4460
  @property
4459
4461
  def name(self) -> str:
@@ -7044,6 +7046,12 @@ class Semicolon(Expression):
7044
7046
  arg_types = {}
7045
7047
 
7046
7048
 
7049
+ # BigQuery allows SELECT t FROM t and treats the projection as a struct value. This expression
7050
+ # type is intended to be constructed by qualify so that we can properly annotate its type later
7051
+ class TableColumn(Expression):
7052
+ pass
7053
+
7054
+
7047
7055
  def _norm_arg(arg):
7048
7056
  return arg.lower() if type(arg) is str else arg
7049
7057
 
@@ -201,6 +201,7 @@ class Generator(metaclass=_Generator):
201
201
  exp.StreamingTableProperty: lambda *_: "STREAMING",
202
202
  exp.StrictProperty: lambda *_: "STRICT",
203
203
  exp.SwapTable: lambda self, e: f"SWAP WITH {self.sql(e, 'this')}",
204
+ exp.TableColumn: lambda self, e: self.sql(e.this),
204
205
  exp.Tags: lambda self, e: f"TAG ({self.expressions(e, flat=True)})",
205
206
  exp.TemporaryProperty: lambda *_: "TEMPORARY",
206
207
  exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}",
@@ -463,6 +464,11 @@ class Generator(metaclass=_Generator):
463
464
  # Whether to wrap <props> in `AlterSet`, e.g., ALTER ... SET (<props>)
464
465
  ALTER_SET_WRAPPED = False
465
466
 
467
+ # Whether to normalize the date parts in EXTRACT(<date_part> FROM <expr>) into a common representation
468
+ # For instance, to extract the day of week in ISO semantics, one can use ISODOW, DAYOFWEEKISO etc depending on the dialect.
469
+ # TODO: The normalization should be done by default once we've tested it across all dialects.
470
+ NORMALIZE_EXTRACT_DATE_PARTS = False
471
+
466
472
  # The name to generate for the JSONPath expression. If `None`, only `this` will be generated
467
473
  PARSE_JSON_NAME: t.Optional[str] = "PARSE_JSON"
468
474
 
@@ -2909,9 +2915,17 @@ class Generator(metaclass=_Generator):
2909
2915
  return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}"
2910
2916
 
2911
2917
  def extract_sql(self, expression: exp.Extract) -> str:
2912
- this = self.sql(expression, "this") if self.EXTRACT_ALLOWS_QUOTES else expression.this.name
2918
+ from sqlglot.dialects.dialect import map_date_part
2919
+
2920
+ this = (
2921
+ map_date_part(expression.this, self.dialect)
2922
+ if self.NORMALIZE_EXTRACT_DATE_PARTS
2923
+ else expression.this
2924
+ )
2925
+ this_sql = self.sql(this) if self.EXTRACT_ALLOWS_QUOTES else this.name
2913
2926
  expression_sql = self.sql(expression, "expression")
2914
- return f"EXTRACT({this} FROM {expression_sql})"
2927
+
2928
+ return f"EXTRACT({this_sql} FROM {expression_sql})"
2915
2929
 
2916
2930
  def trim_sql(self, expression: exp.Trim) -> str:
2917
2931
  trim_type = self.sql(expression, "position")
@@ -4766,7 +4780,10 @@ class Generator(metaclass=_Generator):
4766
4780
 
4767
4781
  def detach_sql(self, expression: exp.Detach) -> str:
4768
4782
  this = self.sql(expression, "this")
4769
- exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
4783
+ # the DATABASE keyword is required if IF EXISTS is set
4784
+ # without it, DuckDB throws an error: Parser Error: syntax error at or near "exists" (Line Number: 1)
4785
+ # ref: https://duckdb.org/docs/stable/sql/statements/attach.html#detach-syntax
4786
+ exists_sql = " DATABASE IF EXISTS" if expression.args.get("exists") else ""
4770
4787
 
4771
4788
  return f"DETACH{exists_sql} {this}"
4772
4789