sqlglot 27.20.0__tar.gz → 27.21.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sqlglot-27.20.0 → sqlglot-27.21.0}/CHANGELOG.md +57 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/PKG-INFO +2 -2
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/_version.py +3 -3
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/bigquery.py +2 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/dialect.py +4 -2
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/duckdb.py +3 -4
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/hive.py +0 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/oracle.py +0 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/presto.py +0 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/risingwave.py +14 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/snowflake.py +84 -6
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/spark.py +0 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/expressions.py +46 -3
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/canonicalize.py +1 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/merge_subqueries.py +2 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/parser.py +25 -26
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/tokens.py +5 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/transforms.py +0 -33
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot.egg-info/PKG-INFO +2 -2
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot.egg-info/requires.txt +1 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/Cargo.lock +1 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/Cargo.toml +1 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/src/tokenizer.rs +6 -15
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_bigquery.py +20 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_databricks.py +2 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_dialect.py +19 -12
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_duckdb.py +20 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_hive.py +1 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_postgres.py +6 -4
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_redshift.py +2 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_snowflake.py +98 -1
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_tsql.py +1 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/identity.sql +1 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/annotate_functions.sql +172 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/merge_subqueries.sql +28 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/optimizer.sql +12 -7
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql +22 -12
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_transforms.py +0 -33
- {sqlglot-27.20.0 → sqlglot-27.21.0}/.gitignore +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/.gitpod.yml +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/.pre-commit-config.yaml +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/CONTRIBUTING.md +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/LICENSE +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/MANIFEST.in +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/Makefile +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/README.md +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/pyproject.toml +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/setup.cfg +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/setup.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/__init__.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/__main__.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/_typing.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/__init__.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/athena.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/clickhouse.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/databricks.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/doris.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/dremio.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/drill.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/druid.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/dune.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/exasol.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/fabric.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/materialize.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/mysql.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/postgres.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/prql.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/redshift.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/singlestore.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/solr.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/spark2.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/sqlite.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/starrocks.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/tableau.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/teradata.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/trino.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/dialects/tsql.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/diff.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/errors.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/executor/__init__.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/executor/context.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/executor/env.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/executor/python.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/executor/table.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/generator.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/helper.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/jsonpath.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/lineage.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/__init__.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/annotate_types.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/eliminate_ctes.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/eliminate_joins.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/eliminate_subqueries.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/isolate_table_selects.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/normalize.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/normalize_identifiers.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/optimize_joins.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/optimizer.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/pushdown_predicates.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/pushdown_projections.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/qualify.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/qualify_columns.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/qualify_tables.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/scope.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/simplify.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/optimizer/unnest_subqueries.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/planner.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/py.typed +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/schema.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/serde.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/time.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot/trie.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot.egg-info/SOURCES.txt +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot.egg-info/dependency_links.txt +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot.egg-info/top_level.txt +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglot.png +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/benches/dialect_settings.json +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/benches/long.rs +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/benches/token_type_settings.json +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/benches/tokenizer_dialect_settings.json +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/benches/tokenizer_settings.json +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/pyproject.toml +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/src/lib.rs +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/src/settings.rs +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/src/token.rs +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/sqlglotrs/src/trie.rs +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/__init__.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/__init__.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_athena.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_clickhouse.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_doris.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_dremio.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_drill.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_druid.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_dune.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_exasol.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_fabric.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_materialize.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_mysql.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_oracle.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_pipe_syntax.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_presto.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_prql.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_risingwave.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_singlestore.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_solr.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_spark.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_sqlite.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_starrocks.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_tableau.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_teradata.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/dialects/test_trino.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/jsonpath/LICENSE +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/jsonpath/cts.json +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/annotate_types.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/canonicalize.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/eliminate_ctes.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/eliminate_joins.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/eliminate_subqueries.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/isolate_table_selects.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/normalize.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/normalize_identifiers.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/optimize_joins.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/pushdown_cte_alias_columns.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/pushdown_predicates.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/pushdown_projections.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/qualify_columns.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/qualify_columns__invalid.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/qualify_columns__with_invisible.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/qualify_columns_ddl.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/qualify_tables.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/quote_identifiers.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/simplify.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/call_center.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/catalog_page.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/catalog_returns.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/catalog_sales.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/customer.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/customer_address.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/customer_demographics.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/date_dim.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/household_demographics.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/income_band.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/inventory.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/item.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/promotion.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/reason.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/ship_mode.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/store.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/store_returns.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/store_sales.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/time_dim.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/warehouse.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/web_page.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/web_returns.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/web_sales.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-ds/web_site.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-h/customer.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-h/lineitem.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-h/nation.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-h/orders.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-h/part.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-h/partsupp.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-h/region.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-h/supplier.csv.gz +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/tpc-h/tpc-h.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/optimizer/unnest_subqueries.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/partial.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/fixtures/pretty.sql +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/gen_fixtures.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/helpers.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_build.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_dialect_imports.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_diff.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_docs.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_executor.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_expressions.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_generator.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_helper.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_jsonpath.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_lineage.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_optimizer.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_parser.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_schema.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_serde.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_time.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_tokens.py +0 -0
- {sqlglot-27.20.0 → sqlglot-27.21.0}/tests/test_transpile.py +0 -0
|
@@ -1,6 +1,62 @@
|
|
|
1
1
|
Changelog
|
|
2
2
|
=========
|
|
3
3
|
|
|
4
|
+
## [v27.20.0] - 2025-09-30
|
|
5
|
+
### :boom: BREAKING CHANGES
|
|
6
|
+
- due to [`13a30df`](https://github.com/tobymao/sqlglot/commit/13a30dfa37096df5bfc2c31538325c40a49f7917) - Annotate type for snowflake TRY_BASE64_DECODE_BINARY function *(PR [#5972](https://github.com/tobymao/sqlglot/pull/5972) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*:
|
|
7
|
+
|
|
8
|
+
Annotate type for snowflake TRY_BASE64_DECODE_BINARY function (#5972)
|
|
9
|
+
|
|
10
|
+
- due to [`1f5fdd7`](https://github.com/tobymao/sqlglot/commit/1f5fdd799c047de167a4572f7ac26b7ad92167f2) - Annotate type for snowflake TRY_BASE64_DECODE_STRING function *(PR [#5974](https://github.com/tobymao/sqlglot/pull/5974) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*:
|
|
11
|
+
|
|
12
|
+
Annotate type for snowflake TRY_BASE64_DECODE_STRING function (#5974)
|
|
13
|
+
|
|
14
|
+
- due to [`324e82f`](https://github.com/tobymao/sqlglot/commit/324e82fe1fb11722f91341010602a743b151e055) - Annotate type for snowflake TRY_HEX_DECODE_BINARY function *(PR [#5975](https://github.com/tobymao/sqlglot/pull/5975) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*:
|
|
15
|
+
|
|
16
|
+
Annotate type for snowflake TRY_HEX_DECODE_BINARY function (#5975)
|
|
17
|
+
|
|
18
|
+
- due to [`6caf99d`](https://github.com/tobymao/sqlglot/commit/6caf99d556a3357ffaa6c294a9babcd30dd5fac5) - Annotate type for snowflake TRY_HEX_DECODE_STRING function *(PR [#5976](https://github.com/tobymao/sqlglot/pull/5976) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*:
|
|
19
|
+
|
|
20
|
+
Annotate type for snowflake TRY_HEX_DECODE_STRING function (#5976)
|
|
21
|
+
|
|
22
|
+
- due to [`73186a8`](https://github.com/tobymao/sqlglot/commit/73186a812ce422c108ee81b3de11da6ee9a9e902) - annotate type for Snowflake REGEXP_COUNT function *(PR [#5963](https://github.com/tobymao/sqlglot/pull/5963) by [@fivetran-BradfordPaskewitz](https://github.com/fivetran-BradfordPaskewitz))*:
|
|
23
|
+
|
|
24
|
+
annotate type for Snowflake REGEXP_COUNT function (#5963)
|
|
25
|
+
|
|
26
|
+
- due to [`c3bdb3c`](https://github.com/tobymao/sqlglot/commit/c3bdb3cd1af1809ed82be0ae40744d9fffc8ce18) - array start index is 1, support array_flatten, fixes [#5983](https://github.com/tobymao/sqlglot/pull/5983) *(commit by [@georgesittas](https://github.com/georgesittas))*:
|
|
27
|
+
|
|
28
|
+
array start index is 1, support array_flatten, fixes #5983
|
|
29
|
+
|
|
30
|
+
- due to [`244fb48`](https://github.com/tobymao/sqlglot/commit/244fb48fc9c4776f427c08b825d139b1c172fd26) - annotate type for Snowflake SPLIT_PART function *(PR [#5988](https://github.com/tobymao/sqlglot/pull/5988) by [@fivetran-BradfordPaskewitz](https://github.com/fivetran-BradfordPaskewitz))*:
|
|
31
|
+
|
|
32
|
+
annotate type for Snowflake SPLIT_PART function (#5988)
|
|
33
|
+
|
|
34
|
+
- due to [`0d772e0`](https://github.com/tobymao/sqlglot/commit/0d772e0b9d687b24d49203c05d7a90cc1dce02d5) - add ast node for `DIRECTORY` source *(PR [#5990](https://github.com/tobymao/sqlglot/pull/5990) by [@georgesittas](https://github.com/georgesittas))*:
|
|
35
|
+
|
|
36
|
+
add ast node for `DIRECTORY` source (#5990)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
### :sparkles: New Features
|
|
40
|
+
- [`13a30df`](https://github.com/tobymao/sqlglot/commit/13a30dfa37096df5bfc2c31538325c40a49f7917) - **optimizer**: Annotate type for snowflake TRY_BASE64_DECODE_BINARY function *(PR [#5972](https://github.com/tobymao/sqlglot/pull/5972) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*
|
|
41
|
+
- [`1f5fdd7`](https://github.com/tobymao/sqlglot/commit/1f5fdd799c047de167a4572f7ac26b7ad92167f2) - **optimizer**: Annotate type for snowflake TRY_BASE64_DECODE_STRING function *(PR [#5974](https://github.com/tobymao/sqlglot/pull/5974) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*
|
|
42
|
+
- [`324e82f`](https://github.com/tobymao/sqlglot/commit/324e82fe1fb11722f91341010602a743b151e055) - **optimizer**: Annotate type for snowflake TRY_HEX_DECODE_BINARY function *(PR [#5975](https://github.com/tobymao/sqlglot/pull/5975) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*
|
|
43
|
+
- [`6caf99d`](https://github.com/tobymao/sqlglot/commit/6caf99d556a3357ffaa6c294a9babcd30dd5fac5) - **optimizer**: Annotate type for snowflake TRY_HEX_DECODE_STRING function *(PR [#5976](https://github.com/tobymao/sqlglot/pull/5976) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*
|
|
44
|
+
- [`73186a8`](https://github.com/tobymao/sqlglot/commit/73186a812ce422c108ee81b3de11da6ee9a9e902) - **optimizer**: annotate type for Snowflake REGEXP_COUNT function *(PR [#5963](https://github.com/tobymao/sqlglot/pull/5963) by [@fivetran-BradfordPaskewitz](https://github.com/fivetran-BradfordPaskewitz))*
|
|
45
|
+
- [`6124de7`](https://github.com/tobymao/sqlglot/commit/6124de76fa6d6725e844cd37e09ebfe99469b0ec) - **optimizer**: Annotate type for snowflake SOUNDEX function *(PR [#5986](https://github.com/tobymao/sqlglot/pull/5986) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*
|
|
46
|
+
- [`244fb48`](https://github.com/tobymao/sqlglot/commit/244fb48fc9c4776f427c08b825d139b1c172fd26) - **optimizer**: annotate type for Snowflake SPLIT_PART function *(PR [#5988](https://github.com/tobymao/sqlglot/pull/5988) by [@fivetran-BradfordPaskewitz](https://github.com/fivetran-BradfordPaskewitz))*
|
|
47
|
+
- [`0d772e0`](https://github.com/tobymao/sqlglot/commit/0d772e0b9d687b24d49203c05d7a90cc1dce02d5) - **snowflake**: add ast node for `DIRECTORY` source *(PR [#5990](https://github.com/tobymao/sqlglot/pull/5990) by [@georgesittas](https://github.com/georgesittas))*
|
|
48
|
+
|
|
49
|
+
### :bug: Bug Fixes
|
|
50
|
+
- [`7a3744f`](https://github.com/tobymao/sqlglot/commit/7a3744f203b93211e5dd97e6730b6bf59d6d96e0) - **sqlite**: support `RANGE CURRENT ROW` in window spec *(commit by [@georgesittas](https://github.com/georgesittas))*
|
|
51
|
+
- [`c3bdb3c`](https://github.com/tobymao/sqlglot/commit/c3bdb3cd1af1809ed82be0ae40744d9fffc8ce18) - **starrocks**: array start index is 1, support array_flatten, fixes [#5983](https://github.com/tobymao/sqlglot/pull/5983) *(commit by [@georgesittas](https://github.com/georgesittas))*
|
|
52
|
+
|
|
53
|
+
### :recycle: Refactors
|
|
54
|
+
- [`d425ba2`](https://github.com/tobymao/sqlglot/commit/d425ba26b96b368801f8f486fa375cd75105993d) - make hash and eq non recursive *(PR [#5966](https://github.com/tobymao/sqlglot/pull/5966) by [@tobymao](https://github.com/tobymao))*
|
|
55
|
+
|
|
56
|
+
### :wrench: Chores
|
|
57
|
+
- [`345c6a1`](https://github.com/tobymao/sqlglot/commit/345c6a153481a22d6df1b12ef1863e2133688fdf) - add uv support to Makefile *(PR [#5973](https://github.com/tobymao/sqlglot/pull/5973) by [@eakmanrq](https://github.com/eakmanrq))*
|
|
58
|
+
|
|
59
|
+
|
|
4
60
|
## [v27.19.0] - 2025-09-26
|
|
5
61
|
### :boom: BREAKING CHANGES
|
|
6
62
|
- due to [`68473ac`](https://github.com/tobymao/sqlglot/commit/68473ac3ec8dc76512dc76819892a1b0324c7ddc) - Annotate type for snowflake PARSE_URL function *(PR [#5962](https://github.com/tobymao/sqlglot/pull/5962) by [@fivetran-amrutabhimsenayachit](https://github.com/fivetran-amrutabhimsenayachit))*:
|
|
@@ -7561,3 +7617,4 @@ Changelog
|
|
|
7561
7617
|
[v27.17.0]: https://github.com/tobymao/sqlglot/compare/v27.16.3...v27.17.0
|
|
7562
7618
|
[v27.18.0]: https://github.com/tobymao/sqlglot/compare/v27.17.0...v27.18.0
|
|
7563
7619
|
[v27.19.0]: https://github.com/tobymao/sqlglot/compare/v27.18.0...v27.19.0
|
|
7620
|
+
[v27.20.0]: https://github.com/tobymao/sqlglot/compare/v27.19.0...v27.20.0
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sqlglot
|
|
3
|
-
Version: 27.
|
|
3
|
+
Version: 27.21.0
|
|
4
4
|
Summary: An easily customizable SQL parser and transpiler
|
|
5
5
|
Author-email: Toby Mao <toby.mao@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -33,7 +33,7 @@ Requires-Dist: typing_extensions; extra == "dev"
|
|
|
33
33
|
Requires-Dist: maturin<2.0,>=1.4; extra == "dev"
|
|
34
34
|
Requires-Dist: pyperf; extra == "dev"
|
|
35
35
|
Provides-Extra: rs
|
|
36
|
-
Requires-Dist: sqlglotrs==0.
|
|
36
|
+
Requires-Dist: sqlglotrs==0.7.0; extra == "rs"
|
|
37
37
|
Dynamic: license-file
|
|
38
38
|
Dynamic: provides-extra
|
|
39
39
|
|
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '27.
|
|
32
|
-
__version_tuple__ = version_tuple = (27,
|
|
31
|
+
__version__ = version = '27.21.0'
|
|
32
|
+
__version_tuple__ = version_tuple = (27, 21, 0)
|
|
33
33
|
|
|
34
|
-
__commit_id__ = commit_id = '
|
|
34
|
+
__commit_id__ = commit_id = 'g5dd2ed3c6'
|
|
@@ -867,6 +867,8 @@ class BigQuery(Dialect):
|
|
|
867
867
|
"FROM_HEX": exp.Unhex.from_arg_list,
|
|
868
868
|
"WEEK": lambda args: exp.WeekStart(this=exp.var(seq_get(args, 0))),
|
|
869
869
|
}
|
|
870
|
+
# Remove SEARCH to avoid parameter routing issues - let it fall back to Anonymous function
|
|
871
|
+
FUNCTIONS.pop("SEARCH")
|
|
870
872
|
|
|
871
873
|
FUNCTION_PARSERS = {
|
|
872
874
|
**parser.Parser.FUNCTION_PARSERS,
|
|
@@ -1715,7 +1715,7 @@ def unit_to_str(expression: exp.Expression, default: str = "DAY") -> t.Optional[
|
|
|
1715
1715
|
def unit_to_var(expression: exp.Expression, default: str = "DAY") -> t.Optional[exp.Expression]:
|
|
1716
1716
|
unit = expression.args.get("unit")
|
|
1717
1717
|
|
|
1718
|
-
if isinstance(unit, (exp.Var, exp.Placeholder, exp.WeekStart)):
|
|
1718
|
+
if isinstance(unit, (exp.Var, exp.Placeholder, exp.WeekStart, exp.Column)):
|
|
1719
1719
|
return unit
|
|
1720
1720
|
|
|
1721
1721
|
value = unit.name if unit else default
|
|
@@ -1736,7 +1736,9 @@ def map_date_part(
|
|
|
1736
1736
|
|
|
1737
1737
|
def map_date_part(part, dialect: DialectType = Dialect):
|
|
1738
1738
|
mapped = (
|
|
1739
|
-
Dialect.get_or_raise(dialect).DATE_PART_MAPPING.get(part.name.upper())
|
|
1739
|
+
Dialect.get_or_raise(dialect).DATE_PART_MAPPING.get(part.name.upper())
|
|
1740
|
+
if part and not (isinstance(part, exp.Column) and len(part.parts) != 1)
|
|
1741
|
+
else None
|
|
1740
1742
|
)
|
|
1741
1743
|
if mapped:
|
|
1742
1744
|
return exp.Literal.string(mapped) if part.is_string else exp.var(mapped)
|
|
@@ -311,6 +311,7 @@ class DuckDB(Dialect):
|
|
|
311
311
|
"PIVOT_WIDER": TokenType.PIVOT,
|
|
312
312
|
"POSITIONAL": TokenType.POSITIONAL,
|
|
313
313
|
"RESET": TokenType.COMMAND,
|
|
314
|
+
"ROW": TokenType.STRUCT,
|
|
314
315
|
"SIGNED": TokenType.INT,
|
|
315
316
|
"STRING": TokenType.TEXT,
|
|
316
317
|
"SUMMARIZE": TokenType.SUMMARIZE,
|
|
@@ -337,16 +338,14 @@ class DuckDB(Dialect):
|
|
|
337
338
|
class Parser(parser.Parser):
|
|
338
339
|
MAP_KEYS_ARE_ARBITRARY_EXPRESSIONS = True
|
|
339
340
|
|
|
340
|
-
BITWISE =
|
|
341
|
-
**parser.Parser.BITWISE,
|
|
342
|
-
TokenType.TILDA: exp.RegexpLike,
|
|
343
|
-
}
|
|
341
|
+
BITWISE = parser.Parser.BITWISE.copy()
|
|
344
342
|
BITWISE.pop(TokenType.CARET)
|
|
345
343
|
|
|
346
344
|
RANGE_PARSERS = {
|
|
347
345
|
**parser.Parser.RANGE_PARSERS,
|
|
348
346
|
TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
|
|
349
347
|
TokenType.CARET_AT: binary_range_parser(exp.StartsWith),
|
|
348
|
+
TokenType.TILDA: binary_range_parser(exp.RegexpFullMatch),
|
|
350
349
|
}
|
|
351
350
|
|
|
352
351
|
EXPONENT = {
|
|
@@ -531,7 +531,6 @@ class Hive(Dialect):
|
|
|
531
531
|
|
|
532
532
|
TRANSFORMS = {
|
|
533
533
|
**generator.Generator.TRANSFORMS,
|
|
534
|
-
exp.Group: transforms.preprocess([transforms.unalias_group]),
|
|
535
534
|
exp.Property: property_sql,
|
|
536
535
|
exp.AnyValue: rename_func("FIRST"),
|
|
537
536
|
exp.ApproxDistinct: approx_count_distinct_sql,
|
|
@@ -307,7 +307,6 @@ class Oracle(Dialect):
|
|
|
307
307
|
),
|
|
308
308
|
exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.unit),
|
|
309
309
|
exp.EuclideanDistance: rename_func("L2_DISTANCE"),
|
|
310
|
-
exp.Group: transforms.preprocess([transforms.unalias_group]),
|
|
311
310
|
exp.ILike: no_ilike_sql,
|
|
312
311
|
exp.LogicalOr: rename_func("MAX"),
|
|
313
312
|
exp.LogicalAnd: rename_func("MIN"),
|
|
@@ -475,7 +475,6 @@ class Presto(Dialect):
|
|
|
475
475
|
e: f"WITH_TIMEZONE({self.sql(e, 'this')}, {self.sql(e, 'zone')}) AT TIME ZONE 'UTC'",
|
|
476
476
|
exp.GenerateSeries: sequence_sql,
|
|
477
477
|
exp.GenerateDateArray: sequence_sql,
|
|
478
|
-
exp.Group: transforms.preprocess([transforms.unalias_group]),
|
|
479
478
|
exp.If: if_sql(),
|
|
480
479
|
exp.ILike: no_ilike_sql,
|
|
481
480
|
exp.Initcap: _initcap_sql,
|
|
@@ -25,6 +25,20 @@ class RisingWave(Postgres):
|
|
|
25
25
|
"KEY": lambda self: self._parse_encode_property(key=True),
|
|
26
26
|
}
|
|
27
27
|
|
|
28
|
+
CONSTRAINT_PARSERS = {
|
|
29
|
+
**Postgres.Parser.CONSTRAINT_PARSERS,
|
|
30
|
+
"WATERMARK": lambda self: self.expression(
|
|
31
|
+
exp.WatermarkColumnConstraint,
|
|
32
|
+
this=self._match(TokenType.FOR) and self._parse_column(),
|
|
33
|
+
expression=self._match(TokenType.ALIAS) and self._parse_disjunction(),
|
|
34
|
+
),
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
SCHEMA_UNNAMED_CONSTRAINTS = {
|
|
38
|
+
*Postgres.Parser.SCHEMA_UNNAMED_CONSTRAINTS,
|
|
39
|
+
"WATERMARK",
|
|
40
|
+
}
|
|
41
|
+
|
|
28
42
|
def _parse_table_hints(self) -> t.Optional[t.List[exp.Expression]]:
|
|
29
43
|
# There is no hint in risingwave.
|
|
30
44
|
# Do nothing here to avoid WITH keywords conflict in CREATE SINK statement.
|
|
@@ -41,7 +41,18 @@ if t.TYPE_CHECKING:
|
|
|
41
41
|
from sqlglot._typing import E, B
|
|
42
42
|
|
|
43
43
|
|
|
44
|
-
|
|
44
|
+
def _build_strtok(args: t.List) -> exp.SplitPart:
|
|
45
|
+
# Add default delimiter (space) if missing - per Snowflake docs
|
|
46
|
+
if len(args) == 1:
|
|
47
|
+
args.append(exp.Literal.string(" "))
|
|
48
|
+
|
|
49
|
+
# Add default part_index (1) if missing
|
|
50
|
+
if len(args) == 2:
|
|
51
|
+
args.append(exp.Literal.number(1))
|
|
52
|
+
|
|
53
|
+
return exp.SplitPart.from_arg_list(args)
|
|
54
|
+
|
|
55
|
+
|
|
45
56
|
def _build_datetime(
|
|
46
57
|
name: str, kind: exp.DataType.Type, safe: bool = False
|
|
47
58
|
) -> t.Callable[[t.List], exp.Func]:
|
|
@@ -137,12 +148,35 @@ def _build_if_from_div0(args: t.List) -> exp.If:
|
|
|
137
148
|
return exp.If(this=cond, true=true, false=false)
|
|
138
149
|
|
|
139
150
|
|
|
151
|
+
# https://docs.snowflake.com/en/sql-reference/functions/div0null
|
|
152
|
+
def _build_if_from_div0null(args: t.List) -> exp.If:
|
|
153
|
+
lhs = exp._wrap(seq_get(args, 0), exp.Binary)
|
|
154
|
+
rhs = exp._wrap(seq_get(args, 1), exp.Binary)
|
|
155
|
+
|
|
156
|
+
# Returns 0 when divisor is 0 OR NULL
|
|
157
|
+
cond = exp.EQ(this=rhs, expression=exp.Literal.number(0)).or_(
|
|
158
|
+
exp.Is(this=rhs, expression=exp.null())
|
|
159
|
+
)
|
|
160
|
+
true = exp.Literal.number(0)
|
|
161
|
+
false = exp.Div(this=lhs, expression=rhs)
|
|
162
|
+
return exp.If(this=cond, true=true, false=false)
|
|
163
|
+
|
|
164
|
+
|
|
140
165
|
# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
|
|
141
166
|
def _build_if_from_zeroifnull(args: t.List) -> exp.If:
|
|
142
167
|
cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
|
|
143
168
|
return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
|
|
144
169
|
|
|
145
170
|
|
|
171
|
+
def _build_search(args: t.List) -> exp.Search:
|
|
172
|
+
kwargs = {
|
|
173
|
+
"this": seq_get(args, 0),
|
|
174
|
+
"expression": seq_get(args, 1),
|
|
175
|
+
**{arg.name.lower(): arg for arg in args[2:] if isinstance(arg, exp.Kwarg)},
|
|
176
|
+
}
|
|
177
|
+
return exp.Search(**kwargs)
|
|
178
|
+
|
|
179
|
+
|
|
146
180
|
# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
|
|
147
181
|
def _build_if_from_nullifzero(args: t.List) -> exp.If:
|
|
148
182
|
cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
|
|
@@ -529,6 +563,16 @@ class Snowflake(Dialect):
|
|
|
529
563
|
|
|
530
564
|
TYPE_TO_EXPRESSIONS = {
|
|
531
565
|
**Dialect.TYPE_TO_EXPRESSIONS,
|
|
566
|
+
exp.DataType.Type.DOUBLE: {
|
|
567
|
+
*Dialect.TYPE_TO_EXPRESSIONS[exp.DataType.Type.DOUBLE],
|
|
568
|
+
exp.Cos,
|
|
569
|
+
exp.Cosh,
|
|
570
|
+
exp.Cot,
|
|
571
|
+
exp.Degrees,
|
|
572
|
+
exp.Exp,
|
|
573
|
+
exp.Sin,
|
|
574
|
+
exp.Tan,
|
|
575
|
+
},
|
|
532
576
|
exp.DataType.Type.INT: {
|
|
533
577
|
*Dialect.TYPE_TO_EXPRESSIONS[exp.DataType.Type.INT],
|
|
534
578
|
exp.Ascii,
|
|
@@ -539,6 +583,7 @@ class Snowflake(Dialect):
|
|
|
539
583
|
exp.Levenshtein,
|
|
540
584
|
exp.JarowinklerSimilarity,
|
|
541
585
|
exp.StrPosition,
|
|
586
|
+
exp.Unicode,
|
|
542
587
|
},
|
|
543
588
|
exp.DataType.Type.VARCHAR: {
|
|
544
589
|
*Dialect.TYPE_TO_EXPRESSIONS[exp.DataType.Type.VARCHAR],
|
|
@@ -564,8 +609,10 @@ class Snowflake(Dialect):
|
|
|
564
609
|
exp.SHA,
|
|
565
610
|
exp.SHA2,
|
|
566
611
|
exp.Soundex,
|
|
612
|
+
exp.SoundexP123,
|
|
567
613
|
exp.Space,
|
|
568
614
|
exp.SplitPart,
|
|
615
|
+
exp.Translate,
|
|
569
616
|
exp.Uuid,
|
|
570
617
|
},
|
|
571
618
|
exp.DataType.Type.BINARY: {
|
|
@@ -587,6 +634,8 @@ class Snowflake(Dialect):
|
|
|
587
634
|
},
|
|
588
635
|
exp.DataType.Type.ARRAY: {
|
|
589
636
|
exp.Split,
|
|
637
|
+
exp.RegexpExtractAll,
|
|
638
|
+
exp.StringToArray,
|
|
590
639
|
},
|
|
591
640
|
exp.DataType.Type.OBJECT: {
|
|
592
641
|
exp.ParseUrl,
|
|
@@ -595,6 +644,10 @@ class Snowflake(Dialect):
|
|
|
595
644
|
exp.DataType.Type.DECIMAL: {
|
|
596
645
|
exp.RegexpCount,
|
|
597
646
|
},
|
|
647
|
+
exp.DataType.Type.BOOLEAN: {
|
|
648
|
+
*Dialect.TYPE_TO_EXPRESSIONS[exp.DataType.Type.BOOLEAN],
|
|
649
|
+
exp.Search,
|
|
650
|
+
},
|
|
598
651
|
}
|
|
599
652
|
|
|
600
653
|
ANNOTATORS = {
|
|
@@ -614,11 +667,17 @@ class Snowflake(Dialect):
|
|
|
614
667
|
exp.Substring,
|
|
615
668
|
)
|
|
616
669
|
},
|
|
670
|
+
**{
|
|
671
|
+
expr_type: lambda self, e: self._annotate_with_type(
|
|
672
|
+
e, exp.DataType.build("NUMBER", dialect="snowflake")
|
|
673
|
+
)
|
|
674
|
+
for expr_type in (
|
|
675
|
+
exp.RegexpCount,
|
|
676
|
+
exp.RegexpInstr,
|
|
677
|
+
)
|
|
678
|
+
},
|
|
617
679
|
exp.ConcatWs: lambda self, e: self._annotate_by_args(e, "expressions"),
|
|
618
680
|
exp.Reverse: _annotate_reverse,
|
|
619
|
-
exp.RegexpCount: lambda self, e: self._annotate_with_type(
|
|
620
|
-
e, exp.DataType.build("NUMBER", dialect="snowflake")
|
|
621
|
-
),
|
|
622
681
|
}
|
|
623
682
|
|
|
624
683
|
TIME_MAPPING = {
|
|
@@ -691,7 +750,7 @@ class Snowflake(Dialect):
|
|
|
691
750
|
"APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
|
|
692
751
|
"ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
|
|
693
752
|
"ARRAY_CONTAINS": lambda args: exp.ArrayContains(
|
|
694
|
-
this=seq_get(args, 1), expression=seq_get(args, 0)
|
|
753
|
+
this=seq_get(args, 1), expression=seq_get(args, 0), ensure_variant=False
|
|
695
754
|
),
|
|
696
755
|
"ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
|
|
697
756
|
# ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
|
|
@@ -727,6 +786,7 @@ class Snowflake(Dialect):
|
|
|
727
786
|
"DATEDIFF": _build_datediff,
|
|
728
787
|
"DAYOFWEEKISO": exp.DayOfWeekIso.from_arg_list,
|
|
729
788
|
"DIV0": _build_if_from_div0,
|
|
789
|
+
"DIV0NULL": _build_if_from_div0null,
|
|
730
790
|
"EDITDISTANCE": lambda args: exp.Levenshtein(
|
|
731
791
|
this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
|
|
732
792
|
),
|
|
@@ -765,6 +825,7 @@ class Snowflake(Dialect):
|
|
|
765
825
|
"SHA2_BINARY": exp.SHA2Digest.from_arg_list,
|
|
766
826
|
"SHA2_HEX": exp.SHA2.from_arg_list,
|
|
767
827
|
"SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
|
|
828
|
+
"STRTOK": _build_strtok,
|
|
768
829
|
"TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
|
|
769
830
|
"TIMEADD": _build_date_time_add(exp.TimeAdd),
|
|
770
831
|
"TIMEDIFF": _build_datediff,
|
|
@@ -799,6 +860,7 @@ class Snowflake(Dialect):
|
|
|
799
860
|
"ZEROIFNULL": _build_if_from_zeroifnull,
|
|
800
861
|
"LIKE": _build_like(exp.Like),
|
|
801
862
|
"ILIKE": _build_like(exp.ILike),
|
|
863
|
+
"SEARCH": _build_search,
|
|
802
864
|
}
|
|
803
865
|
FUNCTIONS.pop("PREDICT")
|
|
804
866
|
|
|
@@ -1364,7 +1426,13 @@ class Snowflake(Dialect):
|
|
|
1364
1426
|
exp.ArgMax: rename_func("MAX_BY"),
|
|
1365
1427
|
exp.ArgMin: rename_func("MIN_BY"),
|
|
1366
1428
|
exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
|
|
1367
|
-
exp.ArrayContains: lambda self, e: self.func(
|
|
1429
|
+
exp.ArrayContains: lambda self, e: self.func(
|
|
1430
|
+
"ARRAY_CONTAINS",
|
|
1431
|
+
e.expression
|
|
1432
|
+
if e.args.get("ensure_variant") is False
|
|
1433
|
+
else exp.cast(e.expression, exp.DataType.Type.VARIANT, copy=False),
|
|
1434
|
+
e.this,
|
|
1435
|
+
),
|
|
1368
1436
|
exp.ArrayIntersect: rename_func("ARRAY_INTERSECTION"),
|
|
1369
1437
|
exp.AtTimeZone: lambda self, e: self.func(
|
|
1370
1438
|
"CONVERT_TIMEZONE", e.args.get("zone"), e.this
|
|
@@ -1894,3 +1962,13 @@ class Snowflake(Dialect):
|
|
|
1894
1962
|
return self.func("TO_CHAR", expression.expressions[0])
|
|
1895
1963
|
|
|
1896
1964
|
return self.function_fallback_sql(expression)
|
|
1965
|
+
|
|
1966
|
+
def splitpart_sql(self, expression: exp.SplitPart) -> str:
|
|
1967
|
+
# Set part_index to 1 if missing
|
|
1968
|
+
if not expression.args.get("delimiter"):
|
|
1969
|
+
expression.set("delimiter", exp.Literal.string(" "))
|
|
1970
|
+
|
|
1971
|
+
if not expression.args.get("part_index"):
|
|
1972
|
+
expression.set("part_index", exp.Literal.number(1))
|
|
1973
|
+
|
|
1974
|
+
return rename_func("SPLIT_PART")(self, expression)
|
|
@@ -5385,7 +5385,7 @@ class TimeUnit(Expression):
|
|
|
5385
5385
|
|
|
5386
5386
|
def __init__(self, **args):
|
|
5387
5387
|
unit = args.get("unit")
|
|
5388
|
-
if type(unit) in self.VAR_LIKE:
|
|
5388
|
+
if type(unit) in self.VAR_LIKE and not (isinstance(unit, Column) and len(unit.parts) != 1):
|
|
5389
5389
|
args["unit"] = Var(
|
|
5390
5390
|
this=(self.UNABBREVIATED_UNIT_NAME.get(unit.name) or unit.name).upper()
|
|
5391
5391
|
)
|
|
@@ -5525,6 +5525,10 @@ class Coth(Func):
|
|
|
5525
5525
|
pass
|
|
5526
5526
|
|
|
5527
5527
|
|
|
5528
|
+
class Cos(Func):
|
|
5529
|
+
pass
|
|
5530
|
+
|
|
5531
|
+
|
|
5528
5532
|
class Csc(Func):
|
|
5529
5533
|
pass
|
|
5530
5534
|
|
|
@@ -5549,6 +5553,18 @@ class Sinh(Func):
|
|
|
5549
5553
|
pass
|
|
5550
5554
|
|
|
5551
5555
|
|
|
5556
|
+
class Tan(Func):
|
|
5557
|
+
pass
|
|
5558
|
+
|
|
5559
|
+
|
|
5560
|
+
class Degrees(Func):
|
|
5561
|
+
pass
|
|
5562
|
+
|
|
5563
|
+
|
|
5564
|
+
class Cosh(Func):
|
|
5565
|
+
pass
|
|
5566
|
+
|
|
5567
|
+
|
|
5552
5568
|
class CosineDistance(Func):
|
|
5553
5569
|
arg_types = {"this": True, "expression": True}
|
|
5554
5570
|
|
|
@@ -5840,6 +5856,7 @@ class ArrayConstructCompact(Func):
|
|
|
5840
5856
|
|
|
5841
5857
|
|
|
5842
5858
|
class ArrayContains(Binary, Func):
|
|
5859
|
+
arg_types = {"this": True, "expression": True, "ensure_variant": False}
|
|
5843
5860
|
_sql_names = ["ARRAY_CONTAINS", "ARRAY_HAS"]
|
|
5844
5861
|
|
|
5845
5862
|
|
|
@@ -6172,7 +6189,9 @@ class DateTrunc(Func):
|
|
|
6172
6189
|
unabbreviate = args.pop("unabbreviate", True)
|
|
6173
6190
|
|
|
6174
6191
|
unit = args.get("unit")
|
|
6175
|
-
if isinstance(unit, TimeUnit.VAR_LIKE)
|
|
6192
|
+
if isinstance(unit, TimeUnit.VAR_LIKE) and not (
|
|
6193
|
+
isinstance(unit, Column) and len(unit.parts) != 1
|
|
6194
|
+
):
|
|
6176
6195
|
unit_name = unit.name.upper()
|
|
6177
6196
|
if unabbreviate and unit_name in TimeUnit.UNABBREVIATED_UNIT_NAME:
|
|
6178
6197
|
unit_name = TimeUnit.UNABBREVIATED_UNIT_NAME[unit_name]
|
|
@@ -7279,6 +7298,10 @@ class RegexpILike(Binary, Func):
|
|
|
7279
7298
|
arg_types = {"this": True, "expression": True, "flag": False}
|
|
7280
7299
|
|
|
7281
7300
|
|
|
7301
|
+
class RegexpFullMatch(Binary, Func):
|
|
7302
|
+
arg_types = {"this": True, "expression": True, "options": False}
|
|
7303
|
+
|
|
7304
|
+
|
|
7282
7305
|
class RegexpInstr(Func):
|
|
7283
7306
|
arg_types = {
|
|
7284
7307
|
"this": True,
|
|
@@ -7380,13 +7403,20 @@ class Soundex(Func):
|
|
|
7380
7403
|
pass
|
|
7381
7404
|
|
|
7382
7405
|
|
|
7406
|
+
# https://docs.snowflake.com/en/sql-reference/functions/soundex_p123
|
|
7407
|
+
class SoundexP123(Func):
|
|
7408
|
+
pass
|
|
7409
|
+
|
|
7410
|
+
|
|
7383
7411
|
class Split(Func):
|
|
7384
7412
|
arg_types = {"this": True, "expression": True, "limit": False}
|
|
7385
7413
|
|
|
7386
7414
|
|
|
7387
7415
|
# https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.split_part.html
|
|
7416
|
+
# https://docs.snowflake.com/en/sql-reference/functions/split_part
|
|
7417
|
+
# https://docs.snowflake.com/en/sql-reference/functions/strtok
|
|
7388
7418
|
class SplitPart(Func):
|
|
7389
|
-
arg_types = {"this": True, "delimiter":
|
|
7419
|
+
arg_types = {"this": True, "delimiter": False, "part_index": False}
|
|
7390
7420
|
|
|
7391
7421
|
|
|
7392
7422
|
# Start may be omitted in the case of postgres
|
|
@@ -7430,6 +7460,19 @@ class StrPosition(Func):
|
|
|
7430
7460
|
}
|
|
7431
7461
|
|
|
7432
7462
|
|
|
7463
|
+
# Snowflake: https://docs.snowflake.com/en/sql-reference/functions/search
|
|
7464
|
+
# BigQuery: https://cloud.google.com/bigquery/docs/reference/standard-sql/search_functions#search
|
|
7465
|
+
class Search(Func):
|
|
7466
|
+
arg_types = {
|
|
7467
|
+
"this": True, # data_to_search / search_data
|
|
7468
|
+
"expression": True, # search_query / search_string
|
|
7469
|
+
"json_scope": False, # BigQuery: JSON_VALUES | JSON_KEYS | JSON_KEYS_AND_VALUES
|
|
7470
|
+
"analyzer": False, # Both: analyzer / ANALYZER
|
|
7471
|
+
"analyzer_options": False, # BigQuery: analyzer_options_values
|
|
7472
|
+
"search_mode": False, # Snowflake: OR | AND
|
|
7473
|
+
}
|
|
7474
|
+
|
|
7475
|
+
|
|
7433
7476
|
class StrToDate(Func):
|
|
7434
7477
|
arg_types = {"this": True, "format": False, "safe": False}
|
|
7435
7478
|
|
|
@@ -77,7 +77,7 @@ def coerce_type(node: exp.Expression, promote_to_inferred_datetime_type: bool) -
|
|
|
77
77
|
_coerce_date(node.left, node.right, promote_to_inferred_datetime_type)
|
|
78
78
|
elif isinstance(node, exp.Between):
|
|
79
79
|
_coerce_date(node.this, node.args["low"], promote_to_inferred_datetime_type)
|
|
80
|
-
elif isinstance(node, exp.Extract) and not node.expression.
|
|
80
|
+
elif isinstance(node, exp.Extract) and not node.expression.is_type(
|
|
81
81
|
*exp.DataType.TEMPORAL_TYPES
|
|
82
82
|
):
|
|
83
83
|
_replace_cast(node.expression, exp.DataType.Type.DATETIME)
|
|
@@ -201,6 +201,7 @@ def _mergeable(
|
|
|
201
201
|
and not outer_scope.pivots
|
|
202
202
|
and not any(e.find(exp.AggFunc, exp.Select, exp.Explode) for e in inner_select.expressions)
|
|
203
203
|
and not (leave_tables_isolated and len(outer_scope.selected_sources) > 1)
|
|
204
|
+
and not (isinstance(from_or_join, exp.Join) and inner_select.args.get("joins"))
|
|
204
205
|
and not (
|
|
205
206
|
isinstance(from_or_join, exp.Join)
|
|
206
207
|
and inner_select.args.get("where")
|
|
@@ -282,6 +283,7 @@ def _merge_joins(outer_scope: Scope, inner_scope: Scope, from_or_join: FromOrJoi
|
|
|
282
283
|
new_joins = []
|
|
283
284
|
|
|
284
285
|
joins = inner_scope.expression.args.get("joins") or []
|
|
286
|
+
|
|
285
287
|
for join in joins:
|
|
286
288
|
new_joins.append(join)
|
|
287
289
|
outer_scope.add_source(join.alias_or_name, inner_scope.sources[join.alias_or_name])
|
|
@@ -1141,11 +1141,6 @@ class Parser(metaclass=_Parser):
|
|
|
1141
1141
|
"TTL": lambda self: self.expression(exp.MergeTreeTTL, expressions=[self._parse_bitwise()]),
|
|
1142
1142
|
"UNIQUE": lambda self: self._parse_unique(),
|
|
1143
1143
|
"UPPERCASE": lambda self: self.expression(exp.UppercaseColumnConstraint),
|
|
1144
|
-
"WATERMARK": lambda self: self.expression(
|
|
1145
|
-
exp.WatermarkColumnConstraint,
|
|
1146
|
-
this=self._match(TokenType.FOR) and self._parse_column(),
|
|
1147
|
-
expression=self._match(TokenType.ALIAS) and self._parse_disjunction(),
|
|
1148
|
-
),
|
|
1149
1144
|
"WITH": lambda self: self.expression(
|
|
1150
1145
|
exp.Properties, expressions=self._parse_wrapped_properties()
|
|
1151
1146
|
),
|
|
@@ -1211,7 +1206,6 @@ class Parser(metaclass=_Parser):
|
|
|
1211
1206
|
"PERIOD",
|
|
1212
1207
|
"PRIMARY KEY",
|
|
1213
1208
|
"UNIQUE",
|
|
1214
|
-
"WATERMARK",
|
|
1215
1209
|
"BUCKET",
|
|
1216
1210
|
"TRUNCATE",
|
|
1217
1211
|
}
|
|
@@ -4592,14 +4586,10 @@ class Parser(metaclass=_Parser):
|
|
|
4592
4586
|
before_with_index = self._index
|
|
4593
4587
|
with_prefix = self._match(TokenType.WITH)
|
|
4594
4588
|
|
|
4595
|
-
|
|
4596
|
-
|
|
4597
|
-
|
|
4598
|
-
)
|
|
4599
|
-
elif self._match(TokenType.CUBE):
|
|
4600
|
-
elements["cube"].append(
|
|
4601
|
-
self._parse_cube_or_rollup(exp.Cube, with_prefix=with_prefix)
|
|
4602
|
-
)
|
|
4589
|
+
cube_or_rollup = self._parse_cube_or_rollup(with_prefix=with_prefix)
|
|
4590
|
+
if cube_or_rollup:
|
|
4591
|
+
key = "rollup" if isinstance(cube_or_rollup, exp.Rollup) else "cube"
|
|
4592
|
+
elements[key].append(cube_or_rollup)
|
|
4603
4593
|
elif self._match(TokenType.GROUPING_SETS):
|
|
4604
4594
|
elements["grouping_sets"].append(
|
|
4605
4595
|
self.expression(
|
|
@@ -4619,18 +4609,20 @@ class Parser(metaclass=_Parser):
|
|
|
4619
4609
|
|
|
4620
4610
|
return self.expression(exp.Group, comments=comments, **elements) # type: ignore
|
|
4621
4611
|
|
|
4622
|
-
def _parse_cube_or_rollup(self,
|
|
4612
|
+
def _parse_cube_or_rollup(self, with_prefix: bool = False) -> t.Optional[exp.Cube | exp.Rollup]:
|
|
4613
|
+
if self._match(TokenType.CUBE):
|
|
4614
|
+
kind: t.Type[exp.Cube | exp.Rollup] = exp.Cube
|
|
4615
|
+
elif self._match(TokenType.ROLLUP):
|
|
4616
|
+
kind = exp.Rollup
|
|
4617
|
+
else:
|
|
4618
|
+
return None
|
|
4619
|
+
|
|
4623
4620
|
return self.expression(
|
|
4624
4621
|
kind, expressions=[] if with_prefix else self._parse_wrapped_csv(self._parse_column)
|
|
4625
4622
|
)
|
|
4626
4623
|
|
|
4627
4624
|
def _parse_grouping_set(self) -> t.Optional[exp.Expression]:
|
|
4628
|
-
|
|
4629
|
-
grouping_set = self._parse_csv(self._parse_bitwise)
|
|
4630
|
-
self._match_r_paren()
|
|
4631
|
-
return self.expression(exp.Tuple, expressions=grouping_set)
|
|
4632
|
-
|
|
4633
|
-
return self._parse_column()
|
|
4625
|
+
return self._parse_cube_or_rollup() or self._parse_bitwise()
|
|
4634
4626
|
|
|
4635
4627
|
def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Having]:
|
|
4636
4628
|
if not skip_having_token and not self._match(TokenType.HAVING):
|
|
@@ -4749,11 +4741,15 @@ class Parser(metaclass=_Parser):
|
|
|
4749
4741
|
exp.Ordered, this=this, desc=desc, nulls_first=nulls_first, with_fill=with_fill
|
|
4750
4742
|
)
|
|
4751
4743
|
|
|
4752
|
-
def _parse_limit_options(self) -> exp.LimitOptions:
|
|
4753
|
-
percent = self.
|
|
4744
|
+
def _parse_limit_options(self) -> t.Optional[exp.LimitOptions]:
|
|
4745
|
+
percent = self._match_set((TokenType.PERCENT, TokenType.MOD))
|
|
4754
4746
|
rows = self._match_set((TokenType.ROW, TokenType.ROWS))
|
|
4755
4747
|
self._match_text_seq("ONLY")
|
|
4756
4748
|
with_ties = self._match_text_seq("WITH", "TIES")
|
|
4749
|
+
|
|
4750
|
+
if not (percent or rows or with_ties):
|
|
4751
|
+
return None
|
|
4752
|
+
|
|
4757
4753
|
return self.expression(exp.LimitOptions, percent=percent, rows=rows, with_ties=with_ties)
|
|
4758
4754
|
|
|
4759
4755
|
def _parse_limit(
|
|
@@ -4771,10 +4767,13 @@ class Parser(metaclass=_Parser):
|
|
|
4771
4767
|
if limit_paren:
|
|
4772
4768
|
self._match_r_paren()
|
|
4773
4769
|
|
|
4774
|
-
limit_options = self._parse_limit_options()
|
|
4775
4770
|
else:
|
|
4776
|
-
|
|
4777
|
-
|
|
4771
|
+
# Parsing LIMIT x% (i.e x PERCENT) as a term leads to an error, since
|
|
4772
|
+
# we try to build an exp.Mod expr. For that matter, we backtrack and instead
|
|
4773
|
+
# consume the factor plus parse the percentage separately
|
|
4774
|
+
expression = self._try_parse(self._parse_term) or self._parse_factor()
|
|
4775
|
+
|
|
4776
|
+
limit_options = self._parse_limit_options()
|
|
4778
4777
|
|
|
4779
4778
|
if self._match(TokenType.COMMA):
|
|
4780
4779
|
offset = expression
|
|
@@ -1421,7 +1421,11 @@ class Tokenizer(metaclass=_Tokenizer):
|
|
|
1421
1421
|
raise_unmatched=not self.HEREDOC_TAG_IS_IDENTIFIER,
|
|
1422
1422
|
)
|
|
1423
1423
|
|
|
1424
|
-
if
|
|
1424
|
+
if (
|
|
1425
|
+
tag
|
|
1426
|
+
and self.HEREDOC_TAG_IS_IDENTIFIER
|
|
1427
|
+
and (self._end or tag.isdigit() or any(c.isspace() for c in tag))
|
|
1428
|
+
):
|
|
1425
1429
|
if not self._end:
|
|
1426
1430
|
self._advance(-1)
|
|
1427
1431
|
|