sqlframe 3.25.0__tar.gz → 3.27.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sqlframe-3.25.0 → sqlframe-3.27.0}/PKG-INFO +1 -1
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/duckdb.md +1 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/_version.py +2 -2
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/function_alternatives.py +11 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/functions.py +17 -7
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/readerwriter.py +91 -3
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/session.py +15 -16
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/readwriter.py +7 -3
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/functions.pyi +2 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/readwriter.py +8 -5
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/readwriter.py +10 -4
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe.egg-info/PKG-INFO +1 -1
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe.egg-info/SOURCES.txt +3 -0
- sqlframe-3.27.0/tests/unit/duck/test_reader_options.py +116 -0
- sqlframe-3.27.0/tests/unit/spark/test_reader_options.py +152 -0
- sqlframe-3.27.0/tests/unit/test_base_reader_options.py +136 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/.github/CODEOWNERS +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/.github/workflows/main.workflow.yaml +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/.github/workflows/publish.workflow.yaml +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/.gitignore +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/.pre-commit-config.yaml +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/.readthedocs.yaml +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/LICENSE +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/Makefile +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/README.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/add_chatgpt_support.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/add_chatgpt_support/adding_ai_to_meal.jpeg +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/add_chatgpt_support/hype_train.gif +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/add_chatgpt_support/marvin_paranoid_robot.gif +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/add_chatgpt_support/nonsense_sql.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/add_chatgpt_support/openai_full_rewrite.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/add_chatgpt_support/openai_replacing_cte_names.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/add_chatgpt_support/sqlglot_optimized_code.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/add_chatgpt_support/sunny_shake_head_no.gif +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/but_wait_theres_more.gif +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/cake.gif +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/images/you_get_pyspark_api.gif +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/blogs/sqlframe_universal_dataframe_api.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/bigquery.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/configuration.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/databricks.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/docs/bigquery.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/docs/duckdb.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/docs/images/SF.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/docs/images/favicon.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/docs/images/favicon_old.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/docs/images/sqlframe_diagram.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/docs/images/sqlframe_logo.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/docs/postgres.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/images/SF.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/images/favicon.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/images/favicon_old.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/images/sqlframe_diagram.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/images/sqlframe_logo.png +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/index.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/postgres.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/redshift.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/requirements.txt +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/snowflake.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/spark.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/standalone.md +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/docs/stylesheets/extra.css +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/mkdocs.yml +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/pytest.ini +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/renovate.json +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/setup.cfg +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/setup.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/LICENSE +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/_typing.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/decorators.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/exceptions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/group.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/mixins/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/mixins/catalog_mixins.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/mixins/dataframe_mixins.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/mixins/readwriter_mixins.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/mixins/table_mixins.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/normalize.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/operations.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/transforms.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/util.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/base/window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/functions.pyi +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/group.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/readwriter.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/bigquery/window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/functions.pyi +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/group.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/databricks/window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/group.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/duckdb/window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/functions.pyi +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/group.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/readwriter.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/postgres/window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/py.typed +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/group.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/readwriter.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/redshift/window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/functions.pyi +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/group.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/readwriter.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/snowflake/window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/functions.pyi +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/group.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/spark/window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/group.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/readwriter.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/standalone/window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/testing/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe/testing/utils.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe.egg-info/dependency_links.txt +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe.egg-info/requires.txt +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/sqlframe.egg-info/top_level.txt +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/common_fixtures.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/conftest.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee.csv +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee.json +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee.parquet +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/.part-00000-e5965c7b-e58f-4d3c-ad56-002876814e3a-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/.part-00002-3fed7f18-370f-4b16-b232-504d6194eb52-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/.part-00004-143c5da1-d5ab-4706-8e84-0d2a324c6894-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/.part-00006-64f07e25-c30e-4075-acc6-b3c69c4ce80b-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/.part-00008-89ccad8d-df73-4ad5-8850-82ef3884db60-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/.part-00010-812b3382-8c7f-4c4e-9bcd-09ce8664f6e0-c000.snappy.parquet.crc +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/_delta_log/.00000000000000000000.json.crc +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/_delta_log/00000000000000000000.json +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/part-00000-e5965c7b-e58f-4d3c-ad56-002876814e3a-c000.snappy.parquet +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/part-00002-3fed7f18-370f-4b16-b232-504d6194eb52-c000.snappy.parquet +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/part-00004-143c5da1-d5ab-4706-8e84-0d2a324c6894-c000.snappy.parquet +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/part-00006-64f07e25-c30e-4075-acc6-b3c69c4ce80b-c000.snappy.parquet +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/part-00008-89ccad8d-df73-4ad5-8850-82ef3884db60-c000.snappy.parquet +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_delta/part-00010-812b3382-8c7f-4c4e-9bcd-09ce8664f6e0-c000.snappy.parquet +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/employee_extra_line.csv +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/issue_219.csv +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds1.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds10.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds11.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds12.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds13.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds14.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds15.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds16.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds17.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds18.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds19.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds2.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds20.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds21.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds22.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds23.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds24.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds25.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds26.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds27.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds28.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds29.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds3.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds30.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds31.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds32.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds33.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds34.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds35.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds36.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds37.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds38.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds39.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds4.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds40.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds41.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds42.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds43.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds44.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds45.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds46.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds47.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds48.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds49.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds5.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds50.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds51.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds52.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds53.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds54.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds55.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds56.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds57.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds58.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds59.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds6.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds60.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds61.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds62.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds63.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds64.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds65.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds66.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds67.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds68.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds69.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds7.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds70.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds71.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds72.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds73.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds74.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds75.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds76.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds77.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds78.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds79.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds8.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds80.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds81.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds82.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds83.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds84.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds85.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds86.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds87.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds88.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds89.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds9.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds90.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds91.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds92.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds93.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds94.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds95.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds96.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds97.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds98.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/fixtures/tpcds/tpcds99.sql +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/bigquery/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/bigquery/test_bigquery_catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/bigquery/test_bigquery_dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/bigquery/test_bigquery_session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/databricks/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/databricks/test_databricks_catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/databricks/test_databricks_dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/databricks/test_databricks_session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/duck/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/duck/test_duckdb_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/duck/test_duckdb_catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/duck/test_duckdb_dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/duck/test_duckdb_reader.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/duck/test_duckdb_session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/duck/test_duckdb_udf.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/duck/test_tpcds.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/postgres/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/postgres/test_postgres_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/postgres/test_postgres_catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/postgres/test_postgres_dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/postgres/test_postgres_session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/redshift/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/redshift/test_redshift_catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/redshift/test_redshift_session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/snowflake/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/snowflake/test_snowflake_catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/snowflake/test_snowflake_dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/snowflake/test_snowflake_session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/spark/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/spark/test_spark_catalog.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/spark/test_spark_dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/test_engine_column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/test_engine_dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/test_engine_reader.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/test_engine_session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/test_engine_table.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/test_engine_writer.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/test_int_functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/engines/test_int_testing.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/fixtures.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/test_int_dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/test_int_dataframe_stats.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/test_int_grouped_data.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/integration/test_int_session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/bigquery/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/bigquery/test_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/conftest.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/databricks/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/databricks/test_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/duck/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/duck/test_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/postgres/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/postgres/test_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/redshift/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/redshift/test_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/snowflake/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/snowflake/test_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/spark/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/spark/test_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/__init__.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/fixtures.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/test_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/test_column.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/test_dataframe.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/test_dataframe_writer.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/test_functions.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/test_session.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/test_session_case_sensitivity.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/test_types.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/standalone/test_window.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/test_activate.py +0 -0
- {sqlframe-3.25.0 → sqlframe-3.27.0}/tests/unit/test_util.py +0 -0
|
@@ -417,6 +417,7 @@ See something that you would like to see supported? [Open an issue](https://gith
|
|
|
417
417
|
* [percent_rank](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.percent_rank.html)
|
|
418
418
|
* [percentile](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.percentile.html)
|
|
419
419
|
* [percentile_approx](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.percentile_approx.html)
|
|
420
|
+
* [product](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.product.html)
|
|
420
421
|
* [position](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.position.html)
|
|
421
422
|
* [pow](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.pow.html)
|
|
422
423
|
* [quarter](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.quarter.html)
|
|
@@ -551,6 +551,17 @@ def sha1_force_sha1_and_to_hex(col: ColumnOrName) -> Column:
|
|
|
551
551
|
)
|
|
552
552
|
|
|
553
553
|
|
|
554
|
+
def sha2_sha265(col: ColumnOrName) -> Column:
|
|
555
|
+
col_func = get_func_from_session("col")
|
|
556
|
+
|
|
557
|
+
return Column(
|
|
558
|
+
expression.Anonymous(
|
|
559
|
+
this="SHA256",
|
|
560
|
+
expressions=[col_func(col).column_expression],
|
|
561
|
+
)
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
|
|
554
565
|
def hash_from_farm_fingerprint(*cols: ColumnOrName) -> Column:
|
|
555
566
|
if len(cols) > 1:
|
|
556
567
|
raise ValueError("This dialect only supports a single column for calculating hash")
|
|
@@ -1492,7 +1492,7 @@ def md5(col: ColumnOrName) -> Column:
|
|
|
1492
1492
|
return Column.invoke_expression_over_column(col, expression.MD5)
|
|
1493
1493
|
|
|
1494
1494
|
|
|
1495
|
-
@meta(unsupported_engines=["
|
|
1495
|
+
@meta(unsupported_engines=["postgres"])
|
|
1496
1496
|
def sha1(col: ColumnOrName) -> Column:
|
|
1497
1497
|
from sqlframe.base.function_alternatives import sha1_force_sha1_and_to_hex
|
|
1498
1498
|
|
|
@@ -1504,8 +1504,18 @@ def sha1(col: ColumnOrName) -> Column:
|
|
|
1504
1504
|
return Column.invoke_expression_over_column(col, expression.SHA)
|
|
1505
1505
|
|
|
1506
1506
|
|
|
1507
|
-
@meta(unsupported_engines=["bigquery", "
|
|
1507
|
+
@meta(unsupported_engines=["bigquery", "postgres"])
|
|
1508
1508
|
def sha2(col: ColumnOrName, numBits: int) -> Column:
|
|
1509
|
+
from sqlframe.base.function_alternatives import sha2_sha265
|
|
1510
|
+
|
|
1511
|
+
session = _get_session()
|
|
1512
|
+
|
|
1513
|
+
if session._is_duckdb:
|
|
1514
|
+
if numBits in [256, 0]:
|
|
1515
|
+
return sha2_sha265(col)
|
|
1516
|
+
else:
|
|
1517
|
+
raise ValueError("This dialect only supports SHA-265 (numBits=256 or numBits=0)")
|
|
1518
|
+
|
|
1509
1519
|
return Column.invoke_expression_over_column(col, expression.SHA2, length=lit(numBits))
|
|
1510
1520
|
|
|
1511
1521
|
|
|
@@ -2355,7 +2365,7 @@ def from_json(
|
|
|
2355
2365
|
schema = schema.simpleString()
|
|
2356
2366
|
schema = schema if isinstance(schema, Column) else lit(schema)
|
|
2357
2367
|
if options is not None:
|
|
2358
|
-
options_col = create_map([lit(x) for x in _flatten(options.items())])
|
|
2368
|
+
options_col = create_map([lit(str(x)) for x in _flatten(options.items())])
|
|
2359
2369
|
return Column.invoke_anonymous_function(col, "FROM_JSON", schema, options_col)
|
|
2360
2370
|
return Column.invoke_anonymous_function(col, "FROM_JSON", schema)
|
|
2361
2371
|
|
|
@@ -2389,7 +2399,7 @@ def schema_of_json(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = No
|
|
|
2389
2399
|
if isinstance(col, str):
|
|
2390
2400
|
col = lit(col)
|
|
2391
2401
|
if options is not None:
|
|
2392
|
-
options_col = create_map([lit(x) for x in _flatten(options.items())])
|
|
2402
|
+
options_col = create_map([lit(str(x)) for x in _flatten(options.items())])
|
|
2393
2403
|
return Column.invoke_anonymous_function(col, "SCHEMA_OF_JSON", options_col)
|
|
2394
2404
|
return Column.invoke_anonymous_function(col, "SCHEMA_OF_JSON")
|
|
2395
2405
|
|
|
@@ -2399,7 +2409,7 @@ def schema_of_csv(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = Non
|
|
|
2399
2409
|
if isinstance(col, str):
|
|
2400
2410
|
col = lit(col)
|
|
2401
2411
|
if options is not None:
|
|
2402
|
-
options_col = create_map([lit(x) for x in _flatten(options.items())])
|
|
2412
|
+
options_col = create_map([lit(str(x)) for x in _flatten(options.items())])
|
|
2403
2413
|
return Column.invoke_anonymous_function(col, "SCHEMA_OF_CSV", options_col)
|
|
2404
2414
|
return Column.invoke_anonymous_function(col, "SCHEMA_OF_CSV")
|
|
2405
2415
|
|
|
@@ -2407,7 +2417,7 @@ def schema_of_csv(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = Non
|
|
|
2407
2417
|
@meta(unsupported_engines=["bigquery", "duckdb", "postgres", "snowflake"])
|
|
2408
2418
|
def to_csv(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = None) -> Column:
|
|
2409
2419
|
if options is not None:
|
|
2410
|
-
options_col = create_map([lit(x) for x in _flatten(options.items())])
|
|
2420
|
+
options_col = create_map([lit(str(x)) for x in _flatten(options.items())])
|
|
2411
2421
|
return Column.invoke_anonymous_function(col, "TO_CSV", options_col)
|
|
2412
2422
|
return Column.invoke_anonymous_function(col, "TO_CSV")
|
|
2413
2423
|
|
|
@@ -4934,7 +4944,7 @@ def printf(format: ColumnOrName, *cols: ColumnOrName) -> Column:
|
|
|
4934
4944
|
return Column.invoke_anonymous_function(format, "printf", *cols)
|
|
4935
4945
|
|
|
4936
4946
|
|
|
4937
|
-
@meta(unsupported_engines=["
|
|
4947
|
+
@meta(unsupported_engines=["bigquery", "postgres", "redshift", "snowflake", "spark", "databricks"])
|
|
4938
4948
|
def product(col: ColumnOrName) -> Column:
|
|
4939
4949
|
"""
|
|
4940
4950
|
Aggregate function: returns the product of the values in a group.
|
|
@@ -38,6 +38,7 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
|
|
|
38
38
|
def __init__(self, spark: SESSION):
|
|
39
39
|
self._session = spark
|
|
40
40
|
self.state_format_to_read: t.Optional[str] = None
|
|
41
|
+
self.state_options: t.Dict[str, OptionalPrimitiveType] = {}
|
|
41
42
|
|
|
42
43
|
@property
|
|
43
44
|
def session(self) -> SESSION:
|
|
@@ -107,6 +108,88 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
|
|
|
107
108
|
self.state_format_to_read = source
|
|
108
109
|
return self
|
|
109
110
|
|
|
111
|
+
def options(self, **options: OptionalPrimitiveType) -> "Self":
|
|
112
|
+
"""Adds input options for the underlying data source.
|
|
113
|
+
|
|
114
|
+
.. versionadded:: 1.4.0
|
|
115
|
+
|
|
116
|
+
.. versionchanged:: 3.4.0
|
|
117
|
+
Supports Spark Connect.
|
|
118
|
+
|
|
119
|
+
Parameters
|
|
120
|
+
----------
|
|
121
|
+
**options : dict
|
|
122
|
+
The dictionary of string keys and primitive-type values.
|
|
123
|
+
|
|
124
|
+
Examples
|
|
125
|
+
--------
|
|
126
|
+
>>> spark.read.options(inferSchema=True, header=True)
|
|
127
|
+
<...readwriter.DataFrameReader object ...>
|
|
128
|
+
|
|
129
|
+
Specify the option 'nullValue' and 'header' with reading a CSV file.
|
|
130
|
+
|
|
131
|
+
>>> import tempfile
|
|
132
|
+
>>> with tempfile.TemporaryDirectory() as d:
|
|
133
|
+
... # Write a DataFrame into a CSV file with a header.
|
|
134
|
+
... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
|
|
135
|
+
... df.write.option("header", True).mode("overwrite").format("csv").save(d)
|
|
136
|
+
...
|
|
137
|
+
... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon',
|
|
138
|
+
... # and 'header' option set to `True`.
|
|
139
|
+
... spark.read.options(
|
|
140
|
+
... nullValue="Hyukjin Kwon",
|
|
141
|
+
... header=True
|
|
142
|
+
... ).format('csv').load(d).show()
|
|
143
|
+
+---+----+
|
|
144
|
+
|age|name|
|
|
145
|
+
+---+----+
|
|
146
|
+
|100|NULL|
|
|
147
|
+
+---+----+
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
self.state_options = {**self.state_options, **options}
|
|
151
|
+
return self
|
|
152
|
+
|
|
153
|
+
def option(self, key: str, value: OptionalPrimitiveType) -> "Self":
|
|
154
|
+
"""Adds an input option for the underlying data source.
|
|
155
|
+
|
|
156
|
+
.. versionadded:: 1.4.0
|
|
157
|
+
|
|
158
|
+
.. versionchanged:: 3.4.0
|
|
159
|
+
Supports Spark Connect.
|
|
160
|
+
|
|
161
|
+
Parameters
|
|
162
|
+
----------
|
|
163
|
+
key : str
|
|
164
|
+
The key of the option.
|
|
165
|
+
value :
|
|
166
|
+
The value of the option.
|
|
167
|
+
|
|
168
|
+
Examples
|
|
169
|
+
--------
|
|
170
|
+
>>> spark.read.option("inferSchema", True)
|
|
171
|
+
<...readwriter.DataFrameReader object ...>
|
|
172
|
+
|
|
173
|
+
Specify the option 'nullValue' and 'header' with reading a CSV file.
|
|
174
|
+
|
|
175
|
+
>>> import tempfile
|
|
176
|
+
>>> with tempfile.TemporaryDirectory() as d:
|
|
177
|
+
... # Write a DataFrame into a CSV file with a header.
|
|
178
|
+
... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
|
|
179
|
+
... df.write.option("header", True).mode("overwrite").format("csv").save(d)
|
|
180
|
+
...
|
|
181
|
+
... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon',
|
|
182
|
+
... # and 'header' option set to `True`.
|
|
183
|
+
... spark.read.option("nullValue", "Hyukjin Kwon").option("header", True).format('csv').load(d).show()
|
|
184
|
+
+---+----+
|
|
185
|
+
|age|name|
|
|
186
|
+
+---+----+
|
|
187
|
+
|100|NULL|
|
|
188
|
+
+---+----+
|
|
189
|
+
"""
|
|
190
|
+
self.state_options[key] = value
|
|
191
|
+
return self
|
|
192
|
+
|
|
110
193
|
def load(
|
|
111
194
|
self,
|
|
112
195
|
path: t.Optional[PathOrPaths] = None,
|
|
@@ -220,7 +303,9 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
|
|
|
220
303
|
modifiedAfter=modifiedAfter,
|
|
221
304
|
allowNonNumericNumbers=allowNonNumericNumbers,
|
|
222
305
|
)
|
|
223
|
-
|
|
306
|
+
# Apply previously set options, with method-specific options taking precedence
|
|
307
|
+
all_options = {**self.state_options, **{k: v for k, v in options.items() if v is not None}}
|
|
308
|
+
return self.load(path=path, format="json", schema=schema, **all_options)
|
|
224
309
|
|
|
225
310
|
def parquet(self, *paths: str, **options: OptionalPrimitiveType) -> DF:
|
|
226
311
|
"""
|
|
@@ -263,7 +348,8 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
|
|
|
263
348
|
|100|Hyukjin Kwon|
|
|
264
349
|
+---+------------+
|
|
265
350
|
"""
|
|
266
|
-
|
|
351
|
+
all_options = {**self.state_options, **{k: v for k, v in options.items() if v is not None}}
|
|
352
|
+
dfs = [self.load(path=path, format="parquet", **all_options) for path in paths] # type: ignore
|
|
267
353
|
return reduce(lambda a, b: a.union(b), dfs)
|
|
268
354
|
|
|
269
355
|
def csv(
|
|
@@ -384,7 +470,9 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
|
|
|
384
470
|
modifiedAfter=modifiedAfter,
|
|
385
471
|
unescapedQuoteHandling=unescapedQuoteHandling,
|
|
386
472
|
)
|
|
387
|
-
|
|
473
|
+
# Apply previously set options, with method-specific options taking precedence
|
|
474
|
+
all_options = {**self.state_options, **{k: v for k, v in options.items() if v is not None}}
|
|
475
|
+
return self.load(path=path, format="csv", schema=schema, **all_options)
|
|
388
476
|
|
|
389
477
|
|
|
390
478
|
class _BaseDataFrameWriter(t.Generic[SESSION, DF]):
|
|
@@ -193,22 +193,21 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
|
|
|
193
193
|
def getActiveSession(self) -> Self:
|
|
194
194
|
return self
|
|
195
195
|
|
|
196
|
-
def range(
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
start
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
)
|
|
196
|
+
def range(
|
|
197
|
+
self,
|
|
198
|
+
start: int,
|
|
199
|
+
end: t.Optional[int] = None,
|
|
200
|
+
step: int = 1,
|
|
201
|
+
numPartitions: t.Optional[int] = None,
|
|
202
|
+
):
|
|
203
|
+
# Ensure end is provided by either args or kwargs
|
|
204
|
+
if end is None:
|
|
205
|
+
if start:
|
|
206
|
+
end = start
|
|
207
|
+
start = 0
|
|
208
|
+
else:
|
|
209
|
+
raise ValueError("range() requires an 'end' value")
|
|
210
|
+
|
|
212
211
|
if numPartitions is not None:
|
|
213
212
|
logger.warning("numPartitions is not supported")
|
|
214
213
|
return self.createDataFrame([[x] for x in range(start, end, step)], schema={"id": "long"})
|
|
@@ -94,11 +94,15 @@ class DatabricksDataFrameReader(
|
|
|
94
94
|
"""
|
|
95
95
|
assert path is not None, "path is required"
|
|
96
96
|
assert isinstance(path, str), "path must be a string"
|
|
97
|
+
|
|
98
|
+
# Merge state_options with provided options, with provided options taking precedence
|
|
99
|
+
merged_options = {**self.state_options, **options}
|
|
100
|
+
|
|
97
101
|
format = format or self.state_format_to_read or _infer_format(path)
|
|
98
102
|
fs_prefix, filepath = split_filepath(path)
|
|
99
103
|
|
|
100
104
|
if fs_prefix == "":
|
|
101
|
-
return super().load(path, format, schema, **
|
|
105
|
+
return super().load(path, format, schema, **merged_options)
|
|
102
106
|
|
|
103
107
|
if schema:
|
|
104
108
|
column_mapping = ensure_column_mapping(schema)
|
|
@@ -116,7 +120,7 @@ class DatabricksDataFrameReader(
|
|
|
116
120
|
paths = ",".join([f"{path}" for path in ensure_list(path)])
|
|
117
121
|
|
|
118
122
|
format_options: dict[str, OptionalPrimitiveType] = {
|
|
119
|
-
k: v for k, v in
|
|
123
|
+
k: v for k, v in merged_options.items() if v is not None
|
|
120
124
|
}
|
|
121
125
|
format_options["format"] = format
|
|
122
126
|
format_options["schemaEvolutionMode"] = "none"
|
|
@@ -137,7 +141,7 @@ class DatabricksDataFrameReader(
|
|
|
137
141
|
qualify=False,
|
|
138
142
|
)
|
|
139
143
|
if select_columns == [exp.Star()] and df.schema:
|
|
140
|
-
return self.load(path=path, format=format, schema=df.schema, **
|
|
144
|
+
return self.load(path=path, format=format, schema=df.schema, **merged_options)
|
|
141
145
|
self.session._last_loaded_file = path # type: ignore
|
|
142
146
|
return df
|
|
143
147
|
|
|
@@ -148,6 +148,7 @@ from sqlframe.base.functions import percentile_approx as percentile_approx
|
|
|
148
148
|
from sqlframe.base.functions import position as position
|
|
149
149
|
from sqlframe.base.functions import pow as pow
|
|
150
150
|
from sqlframe.base.functions import power as power
|
|
151
|
+
from sqlframe.base.functions import product as product
|
|
151
152
|
from sqlframe.base.functions import quarter as quarter
|
|
152
153
|
from sqlframe.base.functions import radians as radians
|
|
153
154
|
from sqlframe.base.functions import rand as rand
|
|
@@ -166,6 +167,7 @@ from sqlframe.base.functions import rpad as rpad
|
|
|
166
167
|
from sqlframe.base.functions import rtrim as rtrim
|
|
167
168
|
from sqlframe.base.functions import second as second
|
|
168
169
|
from sqlframe.base.functions import sequence as sequence
|
|
170
|
+
from sqlframe.base.functions import sha1 as sha1
|
|
169
171
|
from sqlframe.base.functions import shiftLeft as shiftLeft
|
|
170
172
|
from sqlframe.base.functions import shiftleft as shiftleft
|
|
171
173
|
from sqlframe.base.functions import shiftRight as shiftRight
|
|
@@ -75,31 +75,34 @@ class DuckDBDataFrameReader(
|
|
|
75
75
|
|100|NULL|
|
|
76
76
|
+---+----+
|
|
77
77
|
"""
|
|
78
|
+
# Merge state_options with provided options, with provided options taking precedence
|
|
79
|
+
merged_options = {**self.state_options, **options}
|
|
80
|
+
|
|
78
81
|
format = format or self.state_format_to_read
|
|
79
82
|
if schema:
|
|
80
83
|
column_mapping = ensure_column_mapping(schema)
|
|
81
84
|
select_column_mapping = column_mapping.copy()
|
|
82
|
-
if
|
|
85
|
+
if merged_options.get("filename"):
|
|
83
86
|
select_column_mapping["filename"] = "VARCHAR"
|
|
84
87
|
select_columns = [x.expression for x in self._to_casted_columns(select_column_mapping)]
|
|
85
88
|
if format == "csv":
|
|
86
89
|
duckdb_columns = ", ".join(
|
|
87
90
|
[f"'{column}': '{dtype}'" for column, dtype in column_mapping.items()]
|
|
88
91
|
)
|
|
89
|
-
|
|
92
|
+
merged_options["columns"] = "{" + duckdb_columns + "}"
|
|
90
93
|
else:
|
|
91
94
|
select_columns = [exp.Star()]
|
|
92
95
|
if format == "delta":
|
|
93
96
|
from_clause = f"delta_scan('{path}')"
|
|
94
97
|
elif format:
|
|
95
|
-
|
|
98
|
+
merged_options.pop("inferSchema", None)
|
|
96
99
|
paths = ",".join([f"'{path}'" for path in ensure_list(path)])
|
|
97
|
-
from_clause = f"read_{format}([{paths}], {to_csv(
|
|
100
|
+
from_clause = f"read_{format}([{paths}], {to_csv(merged_options)})"
|
|
98
101
|
else:
|
|
99
102
|
from_clause = f"'{path}'"
|
|
100
103
|
df = self.session.sql(exp.select(*select_columns).from_(from_clause), qualify=False)
|
|
101
104
|
if select_columns == [exp.Star()]:
|
|
102
|
-
return self.load(path=path, format=format, schema=df.schema, **
|
|
105
|
+
return self.load(path=path, format=format, schema=df.schema, **merged_options)
|
|
103
106
|
self.session._last_loaded_file = path # type: ignore
|
|
104
107
|
return df
|
|
105
108
|
|
|
@@ -78,6 +78,10 @@ class SparkDataFrameReader(
|
|
|
78
78
|
"""
|
|
79
79
|
assert path is not None, "path is required"
|
|
80
80
|
assert isinstance(path, str), "path must be a string"
|
|
81
|
+
|
|
82
|
+
# Merge state_options with provided options, with provided options taking precedence
|
|
83
|
+
merged_options = {**self.state_options, **options}
|
|
84
|
+
|
|
81
85
|
format = format or self.state_format_to_read or _infer_format(path)
|
|
82
86
|
if schema:
|
|
83
87
|
column_mapping = ensure_column_mapping(schema)
|
|
@@ -93,11 +97,13 @@ class SparkDataFrameReader(
|
|
|
93
97
|
from_clause = f"delta.`{path}`"
|
|
94
98
|
elif format:
|
|
95
99
|
paths = ",".join([f"{path}" for path in ensure_list(path)])
|
|
96
|
-
tmp_view_key =
|
|
97
|
-
|
|
100
|
+
tmp_view_key = merged_options.get(
|
|
101
|
+
"_tmp_view_key_", f"{generate_random_identifier()}_vw"
|
|
102
|
+
)
|
|
103
|
+
merged_options["_tmp_view_key_"] = tmp_view_key
|
|
98
104
|
|
|
99
105
|
format_options: dict[str, OptionalPrimitiveType] = {
|
|
100
|
-
k: v for k, v in
|
|
106
|
+
k: v for k, v in merged_options.items() if v is not None
|
|
101
107
|
}
|
|
102
108
|
format_options.pop("_tmp_view_key_")
|
|
103
109
|
format_options["path"] = paths
|
|
@@ -121,7 +127,7 @@ class SparkDataFrameReader(
|
|
|
121
127
|
qualify=False,
|
|
122
128
|
)
|
|
123
129
|
if select_columns == [exp.Star()] and df.schema:
|
|
124
|
-
return self.load(path=path, format=format, schema=df.schema, **
|
|
130
|
+
return self.load(path=path, format=format, schema=df.schema, **merged_options)
|
|
125
131
|
self.session._last_loaded_file = path # type: ignore
|
|
126
132
|
return df
|
|
127
133
|
|
|
@@ -359,6 +359,7 @@ tests/integration/engines/spark/test_spark_dataframe.py
|
|
|
359
359
|
tests/unit/__init__.py
|
|
360
360
|
tests/unit/conftest.py
|
|
361
361
|
tests/unit/test_activate.py
|
|
362
|
+
tests/unit/test_base_reader_options.py
|
|
362
363
|
tests/unit/test_util.py
|
|
363
364
|
tests/unit/bigquery/__init__.py
|
|
364
365
|
tests/unit/bigquery/test_activate.py
|
|
@@ -366,6 +367,7 @@ tests/unit/databricks/__init__.py
|
|
|
366
367
|
tests/unit/databricks/test_activate.py
|
|
367
368
|
tests/unit/duck/__init__.py
|
|
368
369
|
tests/unit/duck/test_activate.py
|
|
370
|
+
tests/unit/duck/test_reader_options.py
|
|
369
371
|
tests/unit/postgres/__init__.py
|
|
370
372
|
tests/unit/postgres/test_activate.py
|
|
371
373
|
tests/unit/redshift/__init__.py
|
|
@@ -374,6 +376,7 @@ tests/unit/snowflake/__init__.py
|
|
|
374
376
|
tests/unit/snowflake/test_activate.py
|
|
375
377
|
tests/unit/spark/__init__.py
|
|
376
378
|
tests/unit/spark/test_activate.py
|
|
379
|
+
tests/unit/spark/test_reader_options.py
|
|
377
380
|
tests/unit/standalone/__init__.py
|
|
378
381
|
tests/unit/standalone/fixtures.py
|
|
379
382
|
tests/unit/standalone/test_activate.py
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
from unittest.mock import MagicMock, patch
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
|
|
5
|
+
from sqlframe.base.readerwriter import _BaseDataFrameReader
|
|
6
|
+
from sqlframe.duckdb import DuckDBDataFrameReader
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@pytest.fixture
|
|
10
|
+
def mock_duckdb_session():
|
|
11
|
+
"""Create a mock DuckDBSession for testing."""
|
|
12
|
+
session = MagicMock()
|
|
13
|
+
session.input_dialect = "duckdb"
|
|
14
|
+
session.sql = MagicMock()
|
|
15
|
+
session._last_loaded_file = None
|
|
16
|
+
return session
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@pytest.fixture
|
|
20
|
+
def reader(mock_duckdb_session):
|
|
21
|
+
"""Create a DataFrameReader instance for testing."""
|
|
22
|
+
return DuckDBDataFrameReader(mock_duckdb_session)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def test_options_initialization(reader):
|
|
26
|
+
"""Test that options are correctly initialized."""
|
|
27
|
+
assert reader.state_options == {}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def test_options_method(reader):
|
|
31
|
+
"""Test that the options method correctly stores options."""
|
|
32
|
+
reader.options(inferSchema=True, header=True)
|
|
33
|
+
assert reader.state_options == {"inferSchema": True, "header": True}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def test_option_method(reader):
|
|
37
|
+
"""Test that the option method correctly stores a single option."""
|
|
38
|
+
reader.option("inferSchema", True)
|
|
39
|
+
assert reader.state_options == {"inferSchema": True}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def test_options_and_option_methods_together(reader):
|
|
43
|
+
"""Test that option and options methods can be used together."""
|
|
44
|
+
reader.options(inferSchema=True, header=True)
|
|
45
|
+
reader.option("delimiter", ",")
|
|
46
|
+
assert reader.state_options == {"inferSchema": True, "header": True, "delimiter": ","}
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def test_options_override(reader):
|
|
50
|
+
"""Test that options override previous values with the same key."""
|
|
51
|
+
reader.options(inferSchema=True, header=True)
|
|
52
|
+
reader.options(inferSchema=False, nullValue="NULL")
|
|
53
|
+
assert reader.state_options == {"inferSchema": False, "header": True, "nullValue": "NULL"}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def test_option_override(reader):
|
|
57
|
+
"""Test that option overrides previous values with the same key."""
|
|
58
|
+
reader.option("inferSchema", True)
|
|
59
|
+
reader.option("inferSchema", False)
|
|
60
|
+
assert reader.state_options == {"inferSchema": False}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@patch.object(DuckDBDataFrameReader, "load")
|
|
64
|
+
def test_csv_uses_options(mock_load, reader):
|
|
65
|
+
"""Test that the csv method uses the stored options."""
|
|
66
|
+
# Setup
|
|
67
|
+
mock_load.return_value = MagicMock()
|
|
68
|
+
reader.options(inferSchema=True, header=True, delimiter=",")
|
|
69
|
+
|
|
70
|
+
# Execute
|
|
71
|
+
reader.csv("test.csv", sep="|")
|
|
72
|
+
|
|
73
|
+
# Assert
|
|
74
|
+
call_args = mock_load.call_args
|
|
75
|
+
assert call_args.kwargs["format"] == "csv"
|
|
76
|
+
assert call_args.kwargs["path"] == "test.csv"
|
|
77
|
+
assert call_args.kwargs["inferSchema"] is True
|
|
78
|
+
assert call_args.kwargs["header"] is True
|
|
79
|
+
assert call_args.kwargs["sep"] == "|" # Method-specific overrides global
|
|
80
|
+
assert call_args.kwargs["delimiter"] == ","
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@patch.object(DuckDBDataFrameReader, "load")
|
|
84
|
+
def test_parquet_uses_options(mock_load, reader):
|
|
85
|
+
"""Test that the parquet method uses the stored options."""
|
|
86
|
+
# Setup
|
|
87
|
+
mock_load.return_value = MagicMock()
|
|
88
|
+
reader.options(compression="snappy", row_group_size=1000)
|
|
89
|
+
|
|
90
|
+
# Execute
|
|
91
|
+
reader.parquet("test.parquet")
|
|
92
|
+
|
|
93
|
+
# Assert
|
|
94
|
+
call_args = mock_load.call_args
|
|
95
|
+
assert call_args.kwargs["format"] == "parquet"
|
|
96
|
+
assert call_args.kwargs["path"] == "test.parquet"
|
|
97
|
+
assert call_args.kwargs["compression"] == "snappy"
|
|
98
|
+
assert call_args.kwargs["row_group_size"] == 1000
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@patch.object(DuckDBDataFrameReader, "load")
|
|
102
|
+
def test_method_specific_options_override_global_options(mock_load, reader):
|
|
103
|
+
"""Test that method-specific options override global options."""
|
|
104
|
+
# Setup
|
|
105
|
+
mock_load.return_value = MagicMock()
|
|
106
|
+
reader.options(header=True, delimiter=",")
|
|
107
|
+
|
|
108
|
+
# Execute
|
|
109
|
+
reader.csv("test.csv", header=False)
|
|
110
|
+
|
|
111
|
+
# Assert
|
|
112
|
+
call_args = mock_load.call_args
|
|
113
|
+
assert call_args.kwargs["header"] is False # Method-specific overrides global
|
|
114
|
+
assert call_args.kwargs["delimiter"] == ","
|
|
115
|
+
assert call_args.kwargs["format"] == "csv"
|
|
116
|
+
assert call_args.kwargs["path"] == "test.csv"
|