sqlframe 3.39.1__tar.gz → 3.39.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sqlframe-3.39.1 → sqlframe-3.39.3}/PKG-INFO +1 -1
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/bigquery.md +1 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/duckdb.md +1 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/postgres.md +1 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/snowflake.md +1 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/setup.py +2 -1
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/_version.py +3 -3
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/dataframe.py +53 -24
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/functions.py +12 -4
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/operations.py +4 -3
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/session.py +6 -1
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/util.py +15 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe.egg-info/PKG-INFO +1 -1
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe.egg-info/requires.txt +2 -1
- sqlframe-3.39.3/tests/integration/engines/duck/test_duckdb_session.py +38 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/test_int_functions.py +11 -1
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/test_int_dataframe.py +95 -4
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/test_session.py +0 -21
- sqlframe-3.39.1/tests/integration/engines/duck/test_duckdb_session.py +0 -14
- {sqlframe-3.39.1 → sqlframe-3.39.3}/.github/CODEOWNERS +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/.github/workflows/main.workflow.yaml +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/.github/workflows/publish.workflow.yaml +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/.gitignore +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/.pre-commit-config.yaml +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/.readthedocs.yaml +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/LICENSE +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/Makefile +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/README.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/add_chatgpt_support.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/adding_ai_to_meal.jpeg +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/hype_train.gif +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/marvin_paranoid_robot.gif +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/nonsense_sql.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/openai_full_rewrite.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/openai_replacing_cte_names.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/sqlglot_optimized_code.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/sunny_shake_head_no.gif +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/but_wait_theres_more.gif +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/cake.gif +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/you_get_pyspark_api.gif +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/sqlframe_universal_dataframe_api.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/configuration.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/databricks.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/docs/bigquery.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/docs/duckdb.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/docs/images/SF.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/docs/images/favicon.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/docs/images/sqlframe_logo.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/docs/postgres.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/images/SF.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/images/favicon.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/images/sqlframe_logo.png +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/index.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/redshift.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/requirements.txt +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/spark.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/standalone.md +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/docs/stylesheets/extra.css +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/mkdocs.yml +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/pytest.ini +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/renovate.json +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/setup.cfg +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/LICENSE +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/_typing.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/decorators.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/exceptions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/function_alternatives.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/group.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/mixins/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/mixins/catalog_mixins.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/mixins/dataframe_mixins.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/mixins/readwriter_mixins.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/mixins/table_mixins.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/normalize.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/readerwriter.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/transforms.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/base/window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/functions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/functions.pyi +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/group.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/readwriter.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/bigquery/window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/functions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/functions.pyi +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/group.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/readwriter.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/databricks/window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/functions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/functions.pyi +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/group.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/readwriter.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/duckdb/window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/functions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/functions.pyi +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/group.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/readwriter.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/postgres/window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/py.typed +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/functions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/group.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/readwriter.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/redshift/window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/functions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/functions.pyi +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/group.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/readwriter.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/snowflake/window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/functions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/functions.pyi +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/group.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/readwriter.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/spark/window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/functions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/group.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/readwriter.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/standalone/window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/testing/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe/testing/utils.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe.egg-info/SOURCES.txt +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe.egg-info/dependency_links.txt +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/sqlframe.egg-info/top_level.txt +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/common_fixtures.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/conftest.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee.csv +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee.json +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee.parquet +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/.part-00000-e5965c7b-e58f-4d3c-ad56-002876814e3a-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/.part-00002-3fed7f18-370f-4b16-b232-504d6194eb52-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/.part-00004-143c5da1-d5ab-4706-8e84-0d2a324c6894-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/.part-00006-64f07e25-c30e-4075-acc6-b3c69c4ce80b-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/.part-00008-89ccad8d-df73-4ad5-8850-82ef3884db60-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/.part-00010-812b3382-8c7f-4c4e-9bcd-09ce8664f6e0-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/_delta_log/.00000000000000000000.json.crc +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/_delta_log/00000000000000000000.json +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/part-00000-e5965c7b-e58f-4d3c-ad56-002876814e3a-c000.snappy.parquet +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/part-00002-3fed7f18-370f-4b16-b232-504d6194eb52-c000.snappy.parquet +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/part-00004-143c5da1-d5ab-4706-8e84-0d2a324c6894-c000.snappy.parquet +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/part-00006-64f07e25-c30e-4075-acc6-b3c69c4ce80b-c000.snappy.parquet +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/part-00008-89ccad8d-df73-4ad5-8850-82ef3884db60-c000.snappy.parquet +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_delta/part-00010-812b3382-8c7f-4c4e-9bcd-09ce8664f6e0-c000.snappy.parquet +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/employee_extra_line.csv +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/issue_219.csv +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds1.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds10.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds11.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds12.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds13.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds14.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds15.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds16.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds17.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds18.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds19.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds2.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds20.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds21.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds22.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds23.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds24.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds25.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds26.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds27.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds28.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds29.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds3.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds30.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds31.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds32.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds33.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds34.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds35.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds36.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds37.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds38.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds39.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds4.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds40.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds41.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds42.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds43.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds44.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds45.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds46.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds47.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds48.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds49.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds5.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds50.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds51.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds52.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds53.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds54.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds55.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds56.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds57.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds58.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds59.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds6.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds60.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds61.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds62.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds63.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds64.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds65.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds66.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds67.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds68.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds69.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds7.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds70.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds71.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds72.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds73.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds74.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds75.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds76.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds77.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds78.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds79.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds8.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds80.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds81.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds82.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds83.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds84.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds85.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds86.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds87.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds88.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds89.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds9.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds90.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds91.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds92.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds93.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds94.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds95.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds96.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds97.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds98.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/fixtures/tpcds/tpcds99.sql +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/bigquery/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/bigquery/test_bigquery_catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/bigquery/test_bigquery_dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/bigquery/test_bigquery_session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/databricks/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/databricks/test_databricks_catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/databricks/test_databricks_dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/databricks/test_databricks_session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/duck/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/duck/test_duckdb_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/duck/test_duckdb_catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/duck/test_duckdb_dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/duck/test_duckdb_reader.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/duck/test_duckdb_udf.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/duck/test_tpcds.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/postgres/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/postgres/test_postgres_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/postgres/test_postgres_catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/postgres/test_postgres_dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/postgres/test_postgres_session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/redshift/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/redshift/test_redshift_catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/redshift/test_redshift_session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/snowflake/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/snowflake/test_snowflake_catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/snowflake/test_snowflake_dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/snowflake/test_snowflake_session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/spark/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/spark/test_spark_catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/spark/test_spark_dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/test_engine_column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/test_engine_dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/test_engine_reader.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/test_engine_session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/test_engine_table.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/test_engine_writer.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/engines/test_int_testing.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/fixtures.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/test_int_dataframe_stats.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/test_int_grouped_data.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/integration/test_int_session.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/bigquery/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/bigquery/test_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/conftest.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/databricks/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/databricks/test_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/duck/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/duck/test_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/duck/test_reader_options.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/postgres/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/postgres/test_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/redshift/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/redshift/test_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/snowflake/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/snowflake/test_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/spark/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/spark/test_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/spark/test_reader_options.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/__init__.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/fixtures.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/test_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/test_column.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/test_dataframe.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/test_dataframe_writer.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/test_functions.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/test_session_case_sensitivity.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/test_types.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/standalone/test_window.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/test_activate.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/test_base_reader_options.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/test_catalog.py +0 -0
- {sqlframe-3.39.1 → sqlframe-3.39.3}/tests/unit/test_util.py +0 -0
@@ -413,6 +413,7 @@ See something that you would like to see supported? [Open an issue](https://gith
|
|
413
413
|
* [get_json_object](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.get_json_object.html)
|
414
414
|
* Values are returned quoted while Spark strips the quotes
|
415
415
|
* [greatest](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.greatest.html)
|
416
|
+
* [grouping](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.grouping.html)
|
416
417
|
* [hash](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.hash.html)
|
417
418
|
* Use a different hash algorithm than Spark
|
418
419
|
* [hex](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.hex.html)
|
@@ -374,6 +374,7 @@ See something that you would like to see supported? [Open an issue](https://gith
|
|
374
374
|
* [get_json_object](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.get_json_object.html)
|
375
375
|
* Values are returned quoted while Spark strips the quotes
|
376
376
|
* [greatest](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.greatest.html)
|
377
|
+
* [grouping](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.grouping.html)
|
377
378
|
* [grouping_id](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.grouping_id.html)
|
378
379
|
* [hash](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.hash.html)
|
379
380
|
* Uses a different hash algorithm than Spark
|
@@ -377,6 +377,7 @@ See something that you would like to see supported? [Open an issue](https://gith
|
|
377
377
|
* [from_unixtime](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.from_unixtime.html)
|
378
378
|
* [get_json_object](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.get_json_object.html)
|
379
379
|
* [greatest](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.greatest.html)
|
380
|
+
* [grouping](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.grouping.html)
|
380
381
|
* [hour](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.hour.html)
|
381
382
|
* [initcap](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.initcap.html)
|
382
383
|
* [input_file_name](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.input_file_name.html)
|
@@ -402,6 +402,7 @@ See something that you would like to see supported? [Open an issue](https://gith
|
|
402
402
|
* [format_number](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.format_number.html)
|
403
403
|
* [from_unixtime](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.from_unixtime.html)
|
404
404
|
* [greatest](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.greatest.html)
|
405
|
+
* [grouping](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.grouping.html)
|
405
406
|
* [grouping_id](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.grouping_id.html)
|
406
407
|
* [hash](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.hash.html)
|
407
408
|
* The hash is calculated differently
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
28
28
|
commit_id: COMMIT_ID
|
29
29
|
__commit_id__: COMMIT_ID
|
30
30
|
|
31
|
-
__version__ = version = '3.39.
|
32
|
-
__version_tuple__ = version_tuple = (3, 39,
|
31
|
+
__version__ = version = '3.39.3'
|
32
|
+
__version_tuple__ = version_tuple = (3, 39, 3)
|
33
33
|
|
34
|
-
__commit_id__ = commit_id = '
|
34
|
+
__commit_id__ = commit_id = 'g9d915cb1e'
|
@@ -16,6 +16,7 @@ from dataclasses import dataclass
|
|
16
16
|
from uuid import uuid4
|
17
17
|
|
18
18
|
import sqlglot
|
19
|
+
from more_itertools import partition
|
19
20
|
from prettytable import PrettyTable
|
20
21
|
from sqlglot import Dialect, maybe_parse
|
21
22
|
from sqlglot import expressions as exp
|
@@ -31,6 +32,7 @@ from sqlframe.base.util import (
|
|
31
32
|
get_func_from_session,
|
32
33
|
get_tables_from_expression_with_join,
|
33
34
|
normalize_string,
|
35
|
+
partition_to,
|
34
36
|
quote_preserving_alias_or_name,
|
35
37
|
sqlglot_to_spark,
|
36
38
|
verify_openai_installed,
|
@@ -540,16 +542,23 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
540
542
|
expression.set("with", exp.With(expressions=existing_ctes))
|
541
543
|
return expression
|
542
544
|
|
545
|
+
@classmethod
|
546
|
+
def _get_outer_select_expressions(
|
547
|
+
cls, item: exp.Expression
|
548
|
+
) -> t.List[t.Union[exp.Column, exp.Alias]]:
|
549
|
+
outer_select = item.find(exp.Select)
|
550
|
+
if outer_select:
|
551
|
+
return outer_select.expressions
|
552
|
+
return []
|
553
|
+
|
543
554
|
@classmethod
|
544
555
|
def _get_outer_select_columns(cls, item: exp.Expression) -> t.List[Column]:
|
545
556
|
from sqlframe.base.session import _BaseSession
|
546
557
|
|
547
558
|
col = get_func_from_session("col", _BaseSession())
|
548
559
|
|
549
|
-
|
550
|
-
|
551
|
-
return [col(quote_preserving_alias_or_name(x)) for x in outer_select.expressions]
|
552
|
-
return []
|
560
|
+
outer_expressions = cls._get_outer_select_expressions(item)
|
561
|
+
return [col(quote_preserving_alias_or_name(x)) for x in outer_expressions]
|
553
562
|
|
554
563
|
def _create_hash_from_expression(self, expression: exp.Expression) -> str:
|
555
564
|
from sqlframe.base.session import _BaseSession
|
@@ -1503,20 +1512,23 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
1503
1512
|
"""
|
1504
1513
|
return func(self, *args, **kwargs) # type: ignore
|
1505
1514
|
|
1506
|
-
@operation(Operation.
|
1515
|
+
@operation(Operation.SELECT_CONSTRAINED)
|
1507
1516
|
def withColumn(self, colName: str, col: Column) -> Self:
|
1508
1517
|
return self.withColumns.__wrapped__(self, {colName: col}) # type: ignore
|
1509
1518
|
|
1510
|
-
@operation(Operation.
|
1519
|
+
@operation(Operation.SELECT_CONSTRAINED)
|
1511
1520
|
def withColumnRenamed(self, existing: str, new: str) -> Self:
|
1521
|
+
col_func = get_func_from_session("col", self.session)
|
1512
1522
|
expression = self.expression.copy()
|
1513
1523
|
existing = self.session._normalize_string(existing)
|
1514
|
-
|
1524
|
+
outer_expressions = self._get_outer_select_expressions(expression)
|
1515
1525
|
results = []
|
1516
1526
|
found_match = False
|
1517
|
-
for
|
1518
|
-
|
1519
|
-
|
1527
|
+
for expr in outer_expressions:
|
1528
|
+
column = col_func(expr.copy())
|
1529
|
+
if existing == quote_preserving_alias_or_name(expr):
|
1530
|
+
if isinstance(column.expression, exp.Alias):
|
1531
|
+
column.expression.set("alias", exp.to_identifier(new))
|
1520
1532
|
self._update_display_name_mapping([column], [new])
|
1521
1533
|
found_match = True
|
1522
1534
|
results.append(column)
|
@@ -1524,7 +1536,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
1524
1536
|
raise ValueError("Tried to rename a column that doesn't exist")
|
1525
1537
|
return self.select.__wrapped__(self, *results, skip_update_display_name_mapping=True) # type: ignore
|
1526
1538
|
|
1527
|
-
@operation(Operation.
|
1539
|
+
@operation(Operation.SELECT_CONSTRAINED)
|
1528
1540
|
def withColumnsRenamed(self, colsMap: t.Dict[str, str]) -> Self:
|
1529
1541
|
"""
|
1530
1542
|
Returns a new :class:`DataFrame` by renaming multiple columns. If a non-existing column is
|
@@ -1570,7 +1582,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
1570
1582
|
|
1571
1583
|
return self.select.__wrapped__(self, *results, skip_update_display_name_mapping=True) # type: ignore
|
1572
1584
|
|
1573
|
-
@operation(Operation.
|
1585
|
+
@operation(Operation.SELECT_CONSTRAINED)
|
1574
1586
|
def withColumns(self, *colsMap: t.Dict[str, Column]) -> Self:
|
1575
1587
|
"""
|
1576
1588
|
Returns a new :class:`DataFrame` by adding multiple columns or replacing the
|
@@ -1608,13 +1620,14 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
1608
1620
|
"""
|
1609
1621
|
if len(colsMap) != 1:
|
1610
1622
|
raise ValueError("Only a single map is supported")
|
1623
|
+
col_func = get_func_from_session("col")
|
1611
1624
|
col_map = {
|
1612
1625
|
self._ensure_and_normalize_col(k): (self._ensure_and_normalize_col(v), k)
|
1613
1626
|
for k, v in colsMap[0].items()
|
1614
1627
|
}
|
1615
|
-
|
1616
|
-
existing_col_names = [x.alias_or_name for x in
|
1617
|
-
select_columns =
|
1628
|
+
existing_expr = self._get_outer_select_expressions(self.expression)
|
1629
|
+
existing_col_names = [x.alias_or_name for x in existing_expr]
|
1630
|
+
select_columns = [col_func(x) for x in existing_expr]
|
1618
1631
|
for col, (col_value, display_name) in col_map.items():
|
1619
1632
|
column_name = col.alias_or_name
|
1620
1633
|
existing_col_index = (
|
@@ -1631,16 +1644,32 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
1631
1644
|
)
|
1632
1645
|
return self.select.__wrapped__(self, *select_columns, skip_update_display_name_mapping=True) # type: ignore
|
1633
1646
|
|
1634
|
-
@operation(Operation.
|
1647
|
+
@operation(Operation.SELECT_CONSTRAINED)
|
1635
1648
|
def drop(self, *cols: t.Union[str, Column]) -> Self:
|
1636
|
-
|
1637
|
-
|
1638
|
-
|
1639
|
-
|
1640
|
-
|
1641
|
-
|
1642
|
-
|
1643
|
-
|
1649
|
+
# Separate string column names from Column objects for different handling
|
1650
|
+
column_objs, column_names = partition_to(lambda x: isinstance(x, str), cols, list, set)
|
1651
|
+
|
1652
|
+
# Normalize only the Column objects (strings will be handled as unqualified)
|
1653
|
+
drop_cols = self._ensure_and_normalize_cols(column_objs) if column_objs else []
|
1654
|
+
|
1655
|
+
# Work directly with the expression's select columns to preserve table qualifiers
|
1656
|
+
current_expressions = self.expression.expressions
|
1657
|
+
drop_sql = {drop_col.expression.sql() for drop_col in drop_cols}
|
1658
|
+
|
1659
|
+
# Create a more sophisticated matching function that considers table qualifiers
|
1660
|
+
def should_drop_expression(expr: exp.Expression) -> bool:
|
1661
|
+
# Check against fully qualified Column objects and
|
1662
|
+
# Check against unqualified string column names (drop ALL columns with this name)
|
1663
|
+
if expr.sql() in drop_sql or (
|
1664
|
+
isinstance(expr, exp.Column) and expr.alias_or_name in column_names
|
1665
|
+
):
|
1666
|
+
return True
|
1667
|
+
return False
|
1668
|
+
|
1669
|
+
new_expressions = [expr for expr in current_expressions if not should_drop_expression(expr)]
|
1670
|
+
return self.select.__wrapped__( # type: ignore
|
1671
|
+
self, *new_expressions, skip_update_display_name_mapping=True
|
1672
|
+
)
|
1644
1673
|
|
1645
1674
|
@operation(Operation.LIMIT)
|
1646
1675
|
def limit(self, num: int) -> Self:
|
@@ -1450,6 +1450,9 @@ def unix_timestamp(
|
|
1450
1450
|
|
1451
1451
|
session = _get_session()
|
1452
1452
|
|
1453
|
+
if session._is_duckdb or session._is_postgres or session._is_snowflake or session._is_bigquery:
|
1454
|
+
timestamp = Column.ensure_col(timestamp).cast("string")
|
1455
|
+
|
1453
1456
|
if session._is_bigquery:
|
1454
1457
|
return unix_timestamp_bgutil(timestamp, format)
|
1455
1458
|
|
@@ -1984,7 +1987,7 @@ def initcap(col: ColumnOrName) -> Column:
|
|
1984
1987
|
|
1985
1988
|
@meta()
|
1986
1989
|
def soundex(col: ColumnOrName) -> Column:
|
1987
|
-
return Column.
|
1990
|
+
return Column.invoke_expression_over_column(col, expression.Soundex)
|
1988
1991
|
|
1989
1992
|
|
1990
1993
|
@meta(unsupported_engines=["postgres", "snowflake"])
|
@@ -2053,7 +2056,11 @@ def bit_length(col: ColumnOrName) -> Column:
|
|
2053
2056
|
|
2054
2057
|
@meta()
|
2055
2058
|
def translate(srcCol: ColumnOrName, matching: str, replace: str) -> Column:
|
2056
|
-
return Column.
|
2059
|
+
return Column.invoke_expression_over_column(
|
2060
|
+
srcCol,
|
2061
|
+
expression.Translate,
|
2062
|
+
**{"from": lit(matching).column_expression, "to": lit(replace).column_expression},
|
2063
|
+
)
|
2057
2064
|
|
2058
2065
|
|
2059
2066
|
@meta()
|
@@ -3380,7 +3387,7 @@ def get_active_spark_context() -> SparkContext:
|
|
3380
3387
|
return session.spark_session.sparkContext
|
3381
3388
|
|
3382
3389
|
|
3383
|
-
@meta(
|
3390
|
+
@meta()
|
3384
3391
|
def grouping(col: ColumnOrName) -> Column:
|
3385
3392
|
"""
|
3386
3393
|
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
|
@@ -3413,7 +3420,7 @@ def grouping(col: ColumnOrName) -> Column:
|
|
3413
3420
|
| Bob| 0| 5|
|
3414
3421
|
+-----+--------------+--------+
|
3415
3422
|
"""
|
3416
|
-
return Column.
|
3423
|
+
return Column(expression.Grouping(expressions=[Column.ensure_col(col).column_expression]))
|
3417
3424
|
|
3418
3425
|
|
3419
3426
|
@meta(unsupported_engines="*")
|
@@ -6338,6 +6345,7 @@ def to_unix_timestamp(
|
|
6338
6345
|
|
6339
6346
|
if session._is_duckdb:
|
6340
6347
|
format = format or _BaseSession().default_time_format
|
6348
|
+
timestamp = Column.ensure_col(timestamp).cast("string")
|
6341
6349
|
|
6342
6350
|
if format is not None:
|
6343
6351
|
return Column.invoke_expression_over_column(
|
@@ -27,9 +27,10 @@ class Operation(IntEnum):
|
|
27
27
|
WHERE = 2
|
28
28
|
GROUP_BY = 3
|
29
29
|
HAVING = 4
|
30
|
-
|
31
|
-
|
32
|
-
|
30
|
+
SELECT_CONSTRAINED = 5
|
31
|
+
SELECT = 6
|
32
|
+
ORDER_BY = 7
|
33
|
+
LIMIT = 8
|
33
34
|
|
34
35
|
|
35
36
|
# We want to decorate a function (self: DF, *args, **kwargs) -> T
|
@@ -179,7 +179,7 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
|
|
179
179
|
return self._table(self, *args, **kwargs)
|
180
180
|
|
181
181
|
def __new__(cls, *args, **kwargs):
|
182
|
-
if _BaseSession._instance is None:
|
182
|
+
if _BaseSession._instance is None or not isinstance(_BaseSession._instance, cls):
|
183
183
|
_BaseSession._instance = super().__new__(cls)
|
184
184
|
return _BaseSession._instance
|
185
185
|
|
@@ -194,6 +194,11 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
|
|
194
194
|
def getActiveSession(self) -> Self:
|
195
195
|
return self
|
196
196
|
|
197
|
+
def stop(self) -> None:
|
198
|
+
if connection := getattr(self, "_connection", None):
|
199
|
+
connection.close()
|
200
|
+
_BaseSession._instance = None
|
201
|
+
|
197
202
|
def range(
|
198
203
|
self,
|
199
204
|
start: int,
|
@@ -6,6 +6,7 @@ import string
|
|
6
6
|
import typing as t
|
7
7
|
import unicodedata
|
8
8
|
|
9
|
+
from more_itertools import partition
|
9
10
|
from sqlglot import expressions as exp
|
10
11
|
from sqlglot import parse_one, to_table
|
11
12
|
from sqlglot.dialects import DuckDB
|
@@ -537,3 +538,17 @@ def is_relativedelta_like(value: t.Any) -> bool:
|
|
537
538
|
and hasattr(value, "weeks")
|
538
539
|
and hasattr(value, "leapdays")
|
539
540
|
)
|
541
|
+
|
542
|
+
|
543
|
+
T = t.TypeVar("T")
|
544
|
+
R1 = t.TypeVar("R1")
|
545
|
+
R2 = t.TypeVar("R2")
|
546
|
+
|
547
|
+
|
548
|
+
def partition_to(
|
549
|
+
pred: t.Callable[[T], bool],
|
550
|
+
iterable: t.Iterable[T],
|
551
|
+
result1: t.Type[R1],
|
552
|
+
result2: t.Type[R2],
|
553
|
+
) -> tuple[R1, R2]:
|
554
|
+
return (lambda x, y: (result1(x), result2(y)))(*partition(pred, iterable)) # type: ignore
|
@@ -0,0 +1,38 @@
|
|
1
|
+
import pytest
|
2
|
+
from duckdb.duckdb import ConnectionException
|
3
|
+
from sqlglot import exp
|
4
|
+
|
5
|
+
from sqlframe.base.types import Row
|
6
|
+
from sqlframe.duckdb.session import DuckDBSession
|
7
|
+
|
8
|
+
pytest_plugins = ["tests.common_fixtures"]
|
9
|
+
|
10
|
+
|
11
|
+
def test_session_from_config():
|
12
|
+
import duckdb
|
13
|
+
|
14
|
+
conn = duckdb.connect()
|
15
|
+
conn.execute("CREATE TABLE test_table (cola INT, colb STRING)")
|
16
|
+
session = DuckDBSession.builder.config("sqlframe.conn", conn).getOrCreate()
|
17
|
+
columns = session.catalog.get_columns("test_table")
|
18
|
+
assert columns == {"cola": exp.DataType.build("INT"), "colb": exp.DataType.build("TEXT")}
|
19
|
+
assert session.execution_dialect_name == "duckdb"
|
20
|
+
|
21
|
+
|
22
|
+
@pytest.mark.forked
|
23
|
+
def test_session_stop(duckdb_session: DuckDBSession):
|
24
|
+
assert duckdb_session.range(1, 2).collect() == [Row(id=1)]
|
25
|
+
duckdb_session.stop()
|
26
|
+
with pytest.raises(ConnectionException):
|
27
|
+
duckdb_session.range(1, 10).collect()
|
28
|
+
|
29
|
+
|
30
|
+
@pytest.mark.forked
|
31
|
+
def test_session_new_session(duckdb_session: DuckDBSession):
|
32
|
+
# Remove old session
|
33
|
+
assert duckdb_session.range(1, 2).collect() == [Row(id=1)]
|
34
|
+
duckdb_session.stop()
|
35
|
+
new_session = DuckDBSession.builder.getOrCreate()
|
36
|
+
assert new_session is not duckdb_session
|
37
|
+
assert isinstance(new_session, DuckDBSession)
|
38
|
+
assert new_session.range(1, 2).collect() == [Row(id=1)]
|
@@ -1502,11 +1502,15 @@ def test_from_unixtime(get_session_and_func):
|
|
1502
1502
|
assert df.select(from_unixtime("unix_time").alias("ts")).first()[0] == expected
|
1503
1503
|
|
1504
1504
|
|
1505
|
-
def test_unix_timestamp(get_session_and_func):
|
1505
|
+
def test_unix_timestamp(get_session_and_func, get_func):
|
1506
1506
|
session, unix_timestamp = get_session_and_func("unix_timestamp")
|
1507
1507
|
df = session.createDataFrame([("2015-04-08",)], ["dt"])
|
1508
1508
|
result = df.select(unix_timestamp("dt", "yyyy-MM-dd").alias("unix_time")).first()[0]
|
1509
1509
|
assert result == 1428451200
|
1510
|
+
ts_type = "TIMESTAMP" if isinstance(session, PySparkSession) else "TIMESTAMPNTZ"
|
1511
|
+
df = session.createDataFrame([(datetime.datetime(2015, 4, 8),)], schema=f"ts {ts_type}")
|
1512
|
+
result = df.select(unix_timestamp("ts").alias("unix_time")).first()[0]
|
1513
|
+
assert result == 1428451200
|
1510
1514
|
|
1511
1515
|
|
1512
1516
|
def test_from_utc_timestamp(get_session_and_func):
|
@@ -5003,6 +5007,12 @@ def test_to_unix_timestamp(get_session_and_func, get_func):
|
|
5003
5007
|
else:
|
5004
5008
|
df = session.createDataFrame([("2016-04-08",)], ["e"])
|
5005
5009
|
assert df.select(to_unix_timestamp(df.e).alias("r")).collect() == [Row(r=None)]
|
5010
|
+
ts_type = "TIMESTAMP" if isinstance(session, PySparkSession) else "TIMESTAMPNTZ"
|
5011
|
+
df = session.createDataFrame([(datetime.datetime(2015, 4, 8),)], schema=f"ts {ts_type}")
|
5012
|
+
result = df.select(
|
5013
|
+
to_unix_timestamp("ts", lit("yyyy-MM-dd HH:mm:ss")).alias("unix_time")
|
5014
|
+
).first()[0]
|
5015
|
+
assert result == 1428451200
|
5006
5016
|
|
5007
5017
|
|
5008
5018
|
def test_to_varchar(get_session_and_func, get_func):
|
@@ -1892,6 +1892,97 @@ def test_drop_column_reference_join(
|
|
1892
1892
|
compare_frames(df, dfs, sort=True)
|
1893
1893
|
|
1894
1894
|
|
1895
|
+
# TODO: This test exposes a bug in Spark where the column order is not preserved after a join
|
1896
|
+
# for some reason spark flips foo/bar to bar/foo
|
1897
|
+
# def test_left_join_column_order(
|
1898
|
+
# pyspark_employee: PySparkDataFrame,
|
1899
|
+
# get_df: t.Callable[[str], BaseDataFrame],
|
1900
|
+
# compare_frames: t.Callable,
|
1901
|
+
# ):
|
1902
|
+
# df1 = pyspark_employee.sparkSession.createDataFrame([{"foo": 0, "bar": "a"}, {"foo": 1, "bar": "b"}]).alias("df1")
|
1903
|
+
# df2 = pyspark_employee.sparkSession.createDataFrame([{"foo": 0, "baz": 1.5}]).alias("df2")
|
1904
|
+
# df_joined = df1.join(df2, on=F.col("df1.foo") == F.col("df2.foo"), how="left")
|
1905
|
+
#
|
1906
|
+
# employee = get_df("employee")
|
1907
|
+
# dfs1 = employee.sparkSession.createDataFrame([{"foo": 0, "bar": "a"}, {"foo": 1, "bar": "b"}]).alias("dfs1")
|
1908
|
+
# dfs2 = employee.sparkSession.createDataFrame([{"foo": 0, "baz": 1.5}]).alias("dfs2")
|
1909
|
+
# dfs_joined = dfs1.join(dfs2, on=SF.col("dfs1.foo") == SF.col("dfs2.foo"), how="left")
|
1910
|
+
#
|
1911
|
+
# compare_frames(df_joined, dfs_joined, sort=True)
|
1912
|
+
|
1913
|
+
|
1914
|
+
# def test_drop_column_join_column(
|
1915
|
+
# pyspark_employee: PySparkDataFrame,
|
1916
|
+
# get_df: t.Callable[[str], BaseDataFrame],
|
1917
|
+
# compare_frames: t.Callable,
|
1918
|
+
# ):
|
1919
|
+
# df1 = pyspark_employee.sparkSession.createDataFrame([{"foo": 0, "bar": "a"}, {"foo": 1, "bar": "b"}]).alias("df1")
|
1920
|
+
# df2 = pyspark_employee.sparkSession.createDataFrame([{"foo": 0, "baz": 1.5}]).alias("df2")
|
1921
|
+
# df_joined = df1.join(df2, on=F.col("df1.foo") == F.col("df2.foo"), how="left").drop(F.col("df1.foo"))
|
1922
|
+
#
|
1923
|
+
# employee = get_df("employee")
|
1924
|
+
# dfs1 = employee.sparkSession.createDataFrame([{"foo": 0, "bar": "a"}, {"foo": 1, "bar": "b"}]).alias("dfs1")
|
1925
|
+
# dfs2 = employee.sparkSession.createDataFrame([{"foo": 0, "baz": 1.5}]).alias("dfs2")
|
1926
|
+
# dfs_joined = dfs1.join(dfs2, on=SF.col("dfs1.foo") == SF.col("dfs2.foo"), how="left").drop(SF.col("dfs1.foo"))
|
1927
|
+
#
|
1928
|
+
# compare_frames(df_joined, dfs_joined, sort=True)
|
1929
|
+
|
1930
|
+
|
1931
|
+
def test_drop_column_join_column_df_reference(
|
1932
|
+
pyspark_employee: PySparkDataFrame,
|
1933
|
+
get_df: t.Callable[[str], BaseDataFrame],
|
1934
|
+
compare_frames: t.Callable,
|
1935
|
+
):
|
1936
|
+
df1 = pyspark_employee.sparkSession.createDataFrame( # type: ignore
|
1937
|
+
[{"foo": 0, "bar": "a"}, {"foo": 1, "bar": "b"}]
|
1938
|
+
).alias("df1")
|
1939
|
+
df2 = pyspark_employee.sparkSession.createDataFrame([{"foo": 0, "baz": 1.5}]).alias("df2") # type: ignore
|
1940
|
+
df_joined = (
|
1941
|
+
df1.join(df2, on=F.col("df1.foo") == F.col("df2.foo"), how="left")
|
1942
|
+
.drop(df1.foo)
|
1943
|
+
.select(df1.bar, df2.baz, df2.foo)
|
1944
|
+
)
|
1945
|
+
|
1946
|
+
employee = get_df("employee")
|
1947
|
+
dfs1 = employee.sparkSession.createDataFrame(
|
1948
|
+
[{"foo": 0, "bar": "a"}, {"foo": 1, "bar": "b"}]
|
1949
|
+
).alias("dfs1")
|
1950
|
+
dfs2 = employee.sparkSession.createDataFrame([{"foo": 0, "baz": 1.5}]).alias("dfs2")
|
1951
|
+
dfs_joined = (
|
1952
|
+
dfs1.join(dfs2, on=SF.col("dfs1.foo") == SF.col("dfs2.foo"), how="left")
|
1953
|
+
.drop(dfs1.foo)
|
1954
|
+
.select(dfs1.bar, dfs2.baz, dfs2.foo)
|
1955
|
+
)
|
1956
|
+
|
1957
|
+
compare_frames(df_joined, dfs_joined, sort=True, compare_schema=False)
|
1958
|
+
|
1959
|
+
|
1960
|
+
def test_drop_join_column_unqualified(
|
1961
|
+
pyspark_employee: PySparkDataFrame,
|
1962
|
+
get_df: t.Callable[[str], BaseDataFrame],
|
1963
|
+
compare_frames: t.Callable,
|
1964
|
+
):
|
1965
|
+
df1 = pyspark_employee.sparkSession.createDataFrame( # type: ignore
|
1966
|
+
[{"foo": 0, "bar": "a"}, {"foo": 1, "bar": "b"}]
|
1967
|
+
).alias("df1")
|
1968
|
+
df2 = pyspark_employee.sparkSession.createDataFrame([{"foo": 0, "baz": 1.5}]).alias("df2") # type: ignore
|
1969
|
+
df_joined = (
|
1970
|
+
df1.join(df2, on=F.col("df1.foo") == F.col("df2.foo"), how="left").drop("foo")
|
1971
|
+
# select the columns to work around column order bug
|
1972
|
+
)
|
1973
|
+
|
1974
|
+
employee = get_df("employee")
|
1975
|
+
dfs1 = employee.sparkSession.createDataFrame(
|
1976
|
+
[{"foo": 0, "bar": "a"}, {"foo": 1, "bar": "b"}]
|
1977
|
+
).alias("dfs1")
|
1978
|
+
dfs2 = employee.sparkSession.createDataFrame([{"foo": 0, "baz": 1.5}]).alias("dfs2")
|
1979
|
+
dfs_joined = dfs1.join(dfs2, on=SF.col("dfs1.foo") == SF.col("dfs2.foo"), how="left").drop(
|
1980
|
+
"foo"
|
1981
|
+
)
|
1982
|
+
|
1983
|
+
compare_frames(df_joined, dfs_joined, sort=True, compare_schema=False)
|
1984
|
+
|
1985
|
+
|
1895
1986
|
def test_limit(
|
1896
1987
|
pyspark_employee: PySparkDataFrame,
|
1897
1988
|
get_df: t.Callable[[str], BaseDataFrame],
|
@@ -2167,10 +2258,10 @@ def test_transform(
|
|
2167
2258
|
get_df: t.Callable[[str], BaseDataFrame],
|
2168
2259
|
compare_frames: t.Callable,
|
2169
2260
|
):
|
2170
|
-
def
|
2261
|
+
def cast_all_to_string_pyspark(input_df):
|
2171
2262
|
return input_df.select([F.col(col_name).cast("string") for col_name in input_df.columns])
|
2172
2263
|
|
2173
|
-
def
|
2264
|
+
def cast_all_to_string_sqlframe(input_df):
|
2174
2265
|
return input_df.select([SF.col(col_name).cast("string") for col_name in input_df.columns])
|
2175
2266
|
|
2176
2267
|
def sort_columns_asc(input_df):
|
@@ -2178,8 +2269,8 @@ def test_transform(
|
|
2178
2269
|
|
2179
2270
|
employee = get_df("employee")
|
2180
2271
|
|
2181
|
-
df = pyspark_employee.transform(
|
2182
|
-
dfs = employee.transform(
|
2272
|
+
df = pyspark_employee.transform(cast_all_to_string_pyspark).transform(sort_columns_asc)
|
2273
|
+
dfs = employee.transform(cast_all_to_string_sqlframe).transform(sort_columns_asc)
|
2183
2274
|
compare_frames(df, dfs)
|
2184
2275
|
|
2185
2276
|
|
@@ -136,24 +136,3 @@ def test_sql_insert(standalone_session: StandaloneSession, compare_sql: t.Callab
|
|
136
136
|
|
137
137
|
def test_session_create_builder_patterns():
|
138
138
|
assert StandaloneSession.builder.appName("abc").getOrCreate() == StandaloneSession()
|
139
|
-
|
140
|
-
|
141
|
-
# @pytest.mark.parametrize(
|
142
|
-
# "input, expected",
|
143
|
-
# [
|
144
|
-
# (
|
145
|
-
# StandaloneSession._to_row(["a"], [1]),
|
146
|
-
# types.Row(a=1),
|
147
|
-
# ),
|
148
|
-
# (
|
149
|
-
# StandaloneSession._to_row(["a", "b"], [1, 2]),
|
150
|
-
# types.Row(a=1, b=2),
|
151
|
-
# ),
|
152
|
-
# (
|
153
|
-
# StandaloneSession._to_row(["a", "a"], [1, 2]),
|
154
|
-
# types.Row(a=1, a=2),
|
155
|
-
# ),
|
156
|
-
# ],
|
157
|
-
# )
|
158
|
-
# def test_to_row(input, expected):
|
159
|
-
# assert input == expected
|
@@ -1,14 +0,0 @@
|
|
1
|
-
from sqlglot import exp
|
2
|
-
|
3
|
-
from sqlframe.duckdb.session import DuckDBSession
|
4
|
-
|
5
|
-
|
6
|
-
def test_session_from_config():
|
7
|
-
import duckdb
|
8
|
-
|
9
|
-
conn = duckdb.connect()
|
10
|
-
conn.execute("CREATE TABLE test_table (cola INT, colb STRING)")
|
11
|
-
session = DuckDBSession.builder.config("sqlframe.conn", conn).getOrCreate()
|
12
|
-
columns = session.catalog.get_columns("test_table")
|
13
|
-
assert columns == {"cola": exp.DataType.build("INT"), "colb": exp.DataType.build("TEXT")}
|
14
|
-
assert session.execution_dialect_name == "duckdb"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/marvin_paranoid_robot.gif
RENAMED
File without changes
|
File without changes
|
{sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/openai_full_rewrite.png
RENAMED
File without changes
|
{sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/openai_replacing_cte_names.png
RENAMED
File without changes
|
{sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/sqlglot_optimized_code.png
RENAMED
File without changes
|
{sqlframe-3.39.1 → sqlframe-3.39.3}/blogs/images/add_chatgpt_support/sunny_shake_head_no.gif
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|