sqlframe 3.15.1__tar.gz → 3.17.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (387) hide show
  1. {sqlframe-3.15.1 → sqlframe-3.17.0}/Makefile +2 -11
  2. {sqlframe-3.15.1 → sqlframe-3.17.0}/PKG-INFO +1 -1
  3. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/bigquery.md +1 -0
  4. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/duckdb.md +1 -0
  5. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/postgres.md +1 -0
  6. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/snowflake.md +1 -0
  7. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/_version.py +2 -2
  8. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/column.py +1 -0
  9. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/dataframe.py +118 -66
  10. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/decorators.py +1 -1
  11. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/functions.py +17 -7
  12. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/session.py +7 -2
  13. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/session.py +12 -11
  14. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe.egg-info/PKG-INFO +1 -1
  15. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/test_engine_dataframe.py +36 -14
  16. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/test_int_functions.py +1 -1
  17. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/test_int_dataframe.py +34 -1
  18. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/test_dataframe.py +2 -2
  19. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/test_dataframe_writer.py +1 -1
  20. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/test_functions.py +4 -4
  21. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/test_session_case_sensitivity.py +2 -2
  22. {sqlframe-3.15.1 → sqlframe-3.17.0}/.github/CODEOWNERS +0 -0
  23. {sqlframe-3.15.1 → sqlframe-3.17.0}/.github/workflows/main.workflow.yaml +0 -0
  24. {sqlframe-3.15.1 → sqlframe-3.17.0}/.github/workflows/publish.workflow.yaml +0 -0
  25. {sqlframe-3.15.1 → sqlframe-3.17.0}/.gitignore +0 -0
  26. {sqlframe-3.15.1 → sqlframe-3.17.0}/.pre-commit-config.yaml +0 -0
  27. {sqlframe-3.15.1 → sqlframe-3.17.0}/.readthedocs.yaml +0 -0
  28. {sqlframe-3.15.1 → sqlframe-3.17.0}/LICENSE +0 -0
  29. {sqlframe-3.15.1 → sqlframe-3.17.0}/README.md +0 -0
  30. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/add_chatgpt_support.md +0 -0
  31. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/add_chatgpt_support/adding_ai_to_meal.jpeg +0 -0
  32. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/add_chatgpt_support/hype_train.gif +0 -0
  33. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/add_chatgpt_support/marvin_paranoid_robot.gif +0 -0
  34. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/add_chatgpt_support/nonsense_sql.png +0 -0
  35. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/add_chatgpt_support/openai_full_rewrite.png +0 -0
  36. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/add_chatgpt_support/openai_replacing_cte_names.png +0 -0
  37. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/add_chatgpt_support/sqlglot_optimized_code.png +0 -0
  38. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/add_chatgpt_support/sunny_shake_head_no.gif +0 -0
  39. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/but_wait_theres_more.gif +0 -0
  40. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/cake.gif +0 -0
  41. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/images/you_get_pyspark_api.gif +0 -0
  42. {sqlframe-3.15.1 → sqlframe-3.17.0}/blogs/sqlframe_universal_dataframe_api.md +0 -0
  43. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/configuration.md +0 -0
  44. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/databricks.md +0 -0
  45. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/docs/bigquery.md +0 -0
  46. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/docs/duckdb.md +0 -0
  47. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/docs/images/SF.png +0 -0
  48. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/docs/images/favicon.png +0 -0
  49. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/docs/images/favicon_old.png +0 -0
  50. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/docs/images/sqlframe_diagram.png +0 -0
  51. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/docs/images/sqlframe_logo.png +0 -0
  52. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/docs/postgres.md +0 -0
  53. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/images/SF.png +0 -0
  54. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/images/favicon.png +0 -0
  55. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/images/favicon_old.png +0 -0
  56. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/images/sqlframe_diagram.png +0 -0
  57. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/images/sqlframe_logo.png +0 -0
  58. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/index.md +0 -0
  59. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/redshift.md +0 -0
  60. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/requirements.txt +0 -0
  61. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/spark.md +0 -0
  62. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/standalone.md +0 -0
  63. {sqlframe-3.15.1 → sqlframe-3.17.0}/docs/stylesheets/extra.css +0 -0
  64. {sqlframe-3.15.1 → sqlframe-3.17.0}/mkdocs.yml +0 -0
  65. {sqlframe-3.15.1 → sqlframe-3.17.0}/pytest.ini +0 -0
  66. {sqlframe-3.15.1 → sqlframe-3.17.0}/renovate.json +0 -0
  67. {sqlframe-3.15.1 → sqlframe-3.17.0}/setup.cfg +0 -0
  68. {sqlframe-3.15.1 → sqlframe-3.17.0}/setup.py +0 -0
  69. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/LICENSE +0 -0
  70. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/__init__.py +0 -0
  71. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/__init__.py +0 -0
  72. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/_typing.py +0 -0
  73. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/catalog.py +0 -0
  74. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/exceptions.py +0 -0
  75. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/function_alternatives.py +0 -0
  76. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/group.py +0 -0
  77. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/mixins/__init__.py +0 -0
  78. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/mixins/catalog_mixins.py +0 -0
  79. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/mixins/dataframe_mixins.py +0 -0
  80. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/mixins/readwriter_mixins.py +0 -0
  81. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/mixins/table_mixins.py +0 -0
  82. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/normalize.py +0 -0
  83. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/operations.py +0 -0
  84. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/readerwriter.py +0 -0
  85. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/table.py +0 -0
  86. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/transforms.py +0 -0
  87. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/types.py +0 -0
  88. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/udf.py +0 -0
  89. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/util.py +0 -0
  90. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/base/window.py +0 -0
  91. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/__init__.py +0 -0
  92. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/catalog.py +0 -0
  93. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/column.py +0 -0
  94. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/dataframe.py +0 -0
  95. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/functions.py +0 -0
  96. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/functions.pyi +0 -0
  97. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/group.py +0 -0
  98. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/readwriter.py +0 -0
  99. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/session.py +0 -0
  100. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/table.py +0 -0
  101. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/types.py +0 -0
  102. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/udf.py +0 -0
  103. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/bigquery/window.py +0 -0
  104. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/__init__.py +0 -0
  105. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/catalog.py +0 -0
  106. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/column.py +0 -0
  107. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/dataframe.py +0 -0
  108. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/functions.py +0 -0
  109. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/functions.pyi +0 -0
  110. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/group.py +0 -0
  111. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/readwriter.py +0 -0
  112. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/session.py +0 -0
  113. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/table.py +0 -0
  114. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/types.py +0 -0
  115. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/udf.py +0 -0
  116. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/databricks/window.py +0 -0
  117. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/__init__.py +0 -0
  118. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/catalog.py +0 -0
  119. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/column.py +0 -0
  120. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/dataframe.py +0 -0
  121. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/functions.py +0 -0
  122. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/functions.pyi +0 -0
  123. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/group.py +0 -0
  124. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/readwriter.py +0 -0
  125. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/session.py +0 -0
  126. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/table.py +0 -0
  127. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/types.py +0 -0
  128. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/udf.py +0 -0
  129. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/duckdb/window.py +0 -0
  130. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/__init__.py +0 -0
  131. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/catalog.py +0 -0
  132. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/column.py +0 -0
  133. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/dataframe.py +0 -0
  134. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/functions.py +0 -0
  135. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/functions.pyi +0 -0
  136. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/group.py +0 -0
  137. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/readwriter.py +0 -0
  138. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/session.py +0 -0
  139. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/table.py +0 -0
  140. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/types.py +0 -0
  141. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/udf.py +0 -0
  142. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/postgres/window.py +0 -0
  143. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/__init__.py +0 -0
  144. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/catalog.py +0 -0
  145. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/column.py +0 -0
  146. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/dataframe.py +0 -0
  147. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/functions.py +0 -0
  148. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/group.py +0 -0
  149. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/readwriter.py +0 -0
  150. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/session.py +0 -0
  151. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/table.py +0 -0
  152. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/types.py +0 -0
  153. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/udf.py +0 -0
  154. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/redshift/window.py +0 -0
  155. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/__init__.py +0 -0
  156. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/catalog.py +0 -0
  157. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/column.py +0 -0
  158. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/dataframe.py +0 -0
  159. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/functions.py +0 -0
  160. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/functions.pyi +0 -0
  161. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/group.py +0 -0
  162. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/readwriter.py +0 -0
  163. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/session.py +0 -0
  164. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/table.py +0 -0
  165. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/types.py +0 -0
  166. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/udf.py +0 -0
  167. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/snowflake/window.py +0 -0
  168. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/__init__.py +0 -0
  169. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/catalog.py +0 -0
  170. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/column.py +0 -0
  171. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/dataframe.py +0 -0
  172. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/functions.py +0 -0
  173. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/functions.pyi +0 -0
  174. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/group.py +0 -0
  175. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/readwriter.py +0 -0
  176. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/table.py +0 -0
  177. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/types.py +0 -0
  178. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/udf.py +0 -0
  179. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/spark/window.py +0 -0
  180. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/__init__.py +0 -0
  181. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/catalog.py +0 -0
  182. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/column.py +0 -0
  183. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/dataframe.py +0 -0
  184. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/functions.py +0 -0
  185. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/group.py +0 -0
  186. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/readwriter.py +0 -0
  187. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/session.py +0 -0
  188. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/table.py +0 -0
  189. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/types.py +0 -0
  190. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/udf.py +0 -0
  191. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/standalone/window.py +0 -0
  192. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/testing/__init__.py +0 -0
  193. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe/testing/utils.py +0 -0
  194. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe.egg-info/SOURCES.txt +0 -0
  195. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe.egg-info/dependency_links.txt +0 -0
  196. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe.egg-info/requires.txt +0 -0
  197. {sqlframe-3.15.1 → sqlframe-3.17.0}/sqlframe.egg-info/top_level.txt +0 -0
  198. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/__init__.py +0 -0
  199. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/common_fixtures.py +0 -0
  200. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/conftest.py +0 -0
  201. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee.csv +0 -0
  202. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee.json +0 -0
  203. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee.parquet +0 -0
  204. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/.part-00000-e5965c7b-e58f-4d3c-ad56-002876814e3a-c000.snappy.parquet.crc +0 -0
  205. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/.part-00002-3fed7f18-370f-4b16-b232-504d6194eb52-c000.snappy.parquet.crc +0 -0
  206. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/.part-00004-143c5da1-d5ab-4706-8e84-0d2a324c6894-c000.snappy.parquet.crc +0 -0
  207. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/.part-00006-64f07e25-c30e-4075-acc6-b3c69c4ce80b-c000.snappy.parquet.crc +0 -0
  208. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/.part-00008-89ccad8d-df73-4ad5-8850-82ef3884db60-c000.snappy.parquet.crc +0 -0
  209. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/.part-00010-812b3382-8c7f-4c4e-9bcd-09ce8664f6e0-c000.snappy.parquet.crc +0 -0
  210. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/_delta_log/.00000000000000000000.json.crc +0 -0
  211. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/_delta_log/00000000000000000000.json +0 -0
  212. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/part-00000-e5965c7b-e58f-4d3c-ad56-002876814e3a-c000.snappy.parquet +0 -0
  213. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/part-00002-3fed7f18-370f-4b16-b232-504d6194eb52-c000.snappy.parquet +0 -0
  214. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/part-00004-143c5da1-d5ab-4706-8e84-0d2a324c6894-c000.snappy.parquet +0 -0
  215. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/part-00006-64f07e25-c30e-4075-acc6-b3c69c4ce80b-c000.snappy.parquet +0 -0
  216. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/part-00008-89ccad8d-df73-4ad5-8850-82ef3884db60-c000.snappy.parquet +0 -0
  217. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_delta/part-00010-812b3382-8c7f-4c4e-9bcd-09ce8664f6e0-c000.snappy.parquet +0 -0
  218. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/employee_extra_line.csv +0 -0
  219. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/issue_219.csv +0 -0
  220. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds1.sql +0 -0
  221. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds10.sql +0 -0
  222. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds11.sql +0 -0
  223. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds12.sql +0 -0
  224. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds13.sql +0 -0
  225. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds14.sql +0 -0
  226. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds15.sql +0 -0
  227. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds16.sql +0 -0
  228. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds17.sql +0 -0
  229. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds18.sql +0 -0
  230. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds19.sql +0 -0
  231. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds2.sql +0 -0
  232. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds20.sql +0 -0
  233. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds21.sql +0 -0
  234. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds22.sql +0 -0
  235. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds23.sql +0 -0
  236. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds24.sql +0 -0
  237. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds25.sql +0 -0
  238. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds26.sql +0 -0
  239. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds27.sql +0 -0
  240. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds28.sql +0 -0
  241. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds29.sql +0 -0
  242. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds3.sql +0 -0
  243. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds30.sql +0 -0
  244. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds31.sql +0 -0
  245. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds32.sql +0 -0
  246. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds33.sql +0 -0
  247. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds34.sql +0 -0
  248. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds35.sql +0 -0
  249. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds36.sql +0 -0
  250. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds37.sql +0 -0
  251. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds38.sql +0 -0
  252. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds39.sql +0 -0
  253. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds4.sql +0 -0
  254. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds40.sql +0 -0
  255. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds41.sql +0 -0
  256. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds42.sql +0 -0
  257. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds43.sql +0 -0
  258. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds44.sql +0 -0
  259. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds45.sql +0 -0
  260. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds46.sql +0 -0
  261. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds47.sql +0 -0
  262. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds48.sql +0 -0
  263. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds49.sql +0 -0
  264. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds5.sql +0 -0
  265. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds50.sql +0 -0
  266. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds51.sql +0 -0
  267. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds52.sql +0 -0
  268. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds53.sql +0 -0
  269. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds54.sql +0 -0
  270. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds55.sql +0 -0
  271. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds56.sql +0 -0
  272. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds57.sql +0 -0
  273. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds58.sql +0 -0
  274. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds59.sql +0 -0
  275. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds6.sql +0 -0
  276. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds60.sql +0 -0
  277. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds61.sql +0 -0
  278. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds62.sql +0 -0
  279. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds63.sql +0 -0
  280. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds64.sql +0 -0
  281. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds65.sql +0 -0
  282. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds66.sql +0 -0
  283. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds67.sql +0 -0
  284. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds68.sql +0 -0
  285. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds69.sql +0 -0
  286. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds7.sql +0 -0
  287. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds70.sql +0 -0
  288. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds71.sql +0 -0
  289. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds72.sql +0 -0
  290. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds73.sql +0 -0
  291. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds74.sql +0 -0
  292. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds75.sql +0 -0
  293. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds76.sql +0 -0
  294. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds77.sql +0 -0
  295. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds78.sql +0 -0
  296. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds79.sql +0 -0
  297. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds8.sql +0 -0
  298. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds80.sql +0 -0
  299. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds81.sql +0 -0
  300. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds82.sql +0 -0
  301. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds83.sql +0 -0
  302. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds84.sql +0 -0
  303. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds85.sql +0 -0
  304. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds86.sql +0 -0
  305. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds87.sql +0 -0
  306. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds88.sql +0 -0
  307. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds89.sql +0 -0
  308. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds9.sql +0 -0
  309. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds90.sql +0 -0
  310. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds91.sql +0 -0
  311. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds92.sql +0 -0
  312. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds93.sql +0 -0
  313. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds94.sql +0 -0
  314. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds95.sql +0 -0
  315. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds96.sql +0 -0
  316. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds97.sql +0 -0
  317. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds98.sql +0 -0
  318. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/fixtures/tpcds/tpcds99.sql +0 -0
  319. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/__init__.py +0 -0
  320. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/__init__.py +0 -0
  321. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/bigquery/__init__.py +0 -0
  322. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/bigquery/test_bigquery_catalog.py +0 -0
  323. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/bigquery/test_bigquery_dataframe.py +0 -0
  324. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/bigquery/test_bigquery_session.py +0 -0
  325. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/databricks/__init__.py +0 -0
  326. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/databricks/test_databricks_catalog.py +0 -0
  327. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/databricks/test_databricks_dataframe.py +0 -0
  328. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/databricks/test_databricks_session.py +0 -0
  329. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/duck/__init__.py +0 -0
  330. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/duck/test_duckdb_activate.py +0 -0
  331. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/duck/test_duckdb_catalog.py +0 -0
  332. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/duck/test_duckdb_dataframe.py +0 -0
  333. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/duck/test_duckdb_reader.py +0 -0
  334. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/duck/test_duckdb_session.py +0 -0
  335. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/duck/test_duckdb_udf.py +0 -0
  336. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/duck/test_tpcds.py +0 -0
  337. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/postgres/__init__.py +0 -0
  338. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/postgres/test_postgres_activate.py +0 -0
  339. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/postgres/test_postgres_catalog.py +0 -0
  340. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/postgres/test_postgres_dataframe.py +0 -0
  341. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/postgres/test_postgres_session.py +0 -0
  342. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/redshift/__init__.py +0 -0
  343. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/redshift/test_redshift_catalog.py +0 -0
  344. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/redshift/test_redshift_session.py +0 -0
  345. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/snowflake/__init__.py +0 -0
  346. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/snowflake/test_snowflake_catalog.py +0 -0
  347. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/snowflake/test_snowflake_dataframe.py +0 -0
  348. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/snowflake/test_snowflake_session.py +0 -0
  349. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/spark/__init__.py +0 -0
  350. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/spark/test_spark_catalog.py +0 -0
  351. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/spark/test_spark_dataframe.py +0 -0
  352. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/test_engine_column.py +0 -0
  353. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/test_engine_reader.py +0 -0
  354. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/test_engine_session.py +0 -0
  355. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/test_engine_table.py +0 -0
  356. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/test_engine_writer.py +0 -0
  357. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/engines/test_int_testing.py +0 -0
  358. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/fixtures.py +0 -0
  359. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/test_int_dataframe_stats.py +0 -0
  360. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/test_int_grouped_data.py +0 -0
  361. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/integration/test_int_session.py +0 -0
  362. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/types.py +0 -0
  363. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/__init__.py +0 -0
  364. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/bigquery/__init__.py +0 -0
  365. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/bigquery/test_activate.py +0 -0
  366. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/conftest.py +0 -0
  367. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/databricks/__init__.py +0 -0
  368. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/databricks/test_activate.py +0 -0
  369. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/duck/__init__.py +0 -0
  370. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/duck/test_activate.py +0 -0
  371. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/postgres/__init__.py +0 -0
  372. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/postgres/test_activate.py +0 -0
  373. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/redshift/__init__.py +0 -0
  374. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/redshift/test_activate.py +0 -0
  375. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/snowflake/__init__.py +0 -0
  376. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/snowflake/test_activate.py +0 -0
  377. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/spark/__init__.py +0 -0
  378. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/spark/test_activate.py +0 -0
  379. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/__init__.py +0 -0
  380. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/fixtures.py +0 -0
  381. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/test_activate.py +0 -0
  382. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/test_column.py +0 -0
  383. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/test_session.py +0 -0
  384. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/test_types.py +0 -0
  385. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/standalone/test_window.py +0 -0
  386. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/test_activate.py +0 -0
  387. {sqlframe-3.15.1 → sqlframe-3.17.0}/tests/unit/test_util.py +0 -0
@@ -13,17 +13,8 @@ fast-test:
13
13
  local-test:
14
14
  pytest -n auto -m "fast or local"
15
15
 
16
- bigquery-test:
17
- pytest -n auto -m "bigquery"
18
-
19
- duckdb-test:
20
- pytest -n auto -m "duckdb"
21
-
22
- snowflake-test:
23
- pytest -n auto -m "snowflake"
24
-
25
- databricks-test:
26
- pytest -n auto -m "databricks"
16
+ %-test:
17
+ pytest -n auto -m "${*}"
27
18
 
28
19
  style:
29
20
  pre-commit run --all-files
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 3.15.1
3
+ Version: 3.17.0
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -334,6 +334,7 @@ See something that you would like to see supported? [Open an issue](https://gith
334
334
  * [bool_and](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bool_and.html)
335
335
  * [bool_or](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bool_or.html)
336
336
  * [bround](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bround.html)
337
+ * [btrim](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.btrim.html)
337
338
  * [call_function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.call_function.html)
338
339
  * [cbrt](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.cbrt.html)
339
340
  * [ceil](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.ceil.html)
@@ -296,6 +296,7 @@ See something that you would like to see supported? [Open an issue](https://gith
296
296
  * [bitwise_not](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bitwise_not.html)
297
297
  * [bool_and](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bool_and.html)
298
298
  * [bool_or](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bool_or.html)
299
+ * [btrim](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.btrim.html)
299
300
  * [call_function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.call_function.html)
300
301
  * [cbrt](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.cbrt.html)
301
302
  * [ceil](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.ceil.html)
@@ -306,6 +306,7 @@ See something that you would like to see supported? [Open an issue](https://gith
306
306
  * [bitwise_not](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bitwise_not.html)
307
307
  * [bool_and](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bool_and.html)
308
308
  * [bool_or](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bool_or.html)
309
+ * [btrim](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.btrim.html)
309
310
  * [call_function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.call_function.html)
310
311
  * [cbrt](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.cbrt.html)
311
312
  * [ceil](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.ceil.html)
@@ -332,6 +332,7 @@ See something that you would like to see supported? [Open an issue](https://gith
332
332
  * [bool_or](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bool_or.html)
333
333
  * [bround](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.bround.html)
334
334
  * [Input must be a fixed-point nnumber](https://docs.snowflake.com/en/sql-reference/data-types-numeric.html#label-data-types-for-fixed-point-numbers)
335
+ * [btrim](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.btrim.html)
335
336
  * [call_function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.call_function.html)
336
337
  * [cbrt](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.cbrt.html)
337
338
  * [ceil](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.ceil.html)
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '3.15.1'
16
- __version_tuple__ = version_tuple = (3, 15, 1)
15
+ __version__ = version = '3.17.0'
16
+ __version_tuple__ = version_tuple = (3, 17, 0)
@@ -291,6 +291,7 @@ class Column:
291
291
  this=self.column_expression,
292
292
  alias=alias.this if isinstance(alias, exp.Column) else alias,
293
293
  )
294
+ new_expression._meta = {"display_name": name, **(new_expression._meta or {})}
294
295
  return Column(new_expression)
295
296
 
296
297
  def asc(self) -> Column:
@@ -233,6 +233,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
233
233
  last_op: Operation = Operation.INIT,
234
234
  pending_hints: t.Optional[t.List[exp.Expression]] = None,
235
235
  output_expression_container: t.Optional[OutputExpressionContainer] = None,
236
+ display_name_mapping: t.Optional[t.Dict[str, str]] = None,
236
237
  **kwargs,
237
238
  ):
238
239
  self.session = session
@@ -246,6 +247,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
246
247
  self.pending_hints = pending_hints or []
247
248
  self.output_expression_container = output_expression_container or exp.Select()
248
249
  self.temp_views: t.List[exp.Select] = []
250
+ self.display_name_mapping = display_name_mapping or {}
249
251
 
250
252
  def __getattr__(self, column_name: str) -> Column:
251
253
  return self[column_name]
@@ -385,13 +387,16 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
385
387
  return Column.ensure_cols(ensure_list(cols)) # type: ignore
386
388
 
387
389
  def _ensure_and_normalize_cols(
388
- self, cols, expression: t.Optional[exp.Select] = None
390
+ self, cols, expression: t.Optional[exp.Select] = None, skip_star_expansion: bool = False
389
391
  ) -> t.List[Column]:
390
392
  from sqlframe.base.normalize import normalize
391
393
 
392
394
  cols = self._ensure_list_of_columns(cols)
393
395
  normalize(self.session, expression or self.expression, cols)
394
- return list(flatten([self._expand_star(col) for col in cols]))
396
+ if not skip_star_expansion:
397
+ cols = list(flatten([self._expand_star(col) for col in cols]))
398
+ self._resolve_ambiguous_columns(cols)
399
+ return cols
395
400
 
396
401
  def _ensure_and_normalize_col(self, col):
397
402
  from sqlframe.base.column import Column
@@ -399,6 +404,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
399
404
 
400
405
  col = Column.ensure_col(col)
401
406
  normalize(self.session, self.expression, col)
407
+ self._resolve_ambiguous_columns(col)
402
408
  return col
403
409
 
404
410
  def _convert_leaf_to_cte(
@@ -589,6 +595,23 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
589
595
  )
590
596
  return [col]
591
597
 
598
+ def _update_display_name_mapping(
599
+ self, normalized_columns: t.List[Column], user_input: t.Iterable[ColumnOrName]
600
+ ) -> None:
601
+ from sqlframe.base.column import Column
602
+
603
+ normalized_aliases = [x.alias_or_name for x in normalized_columns]
604
+ user_display_names = [
605
+ x.expression.meta.get("display_name") if isinstance(x, Column) else x
606
+ for x in user_input
607
+ ]
608
+ zipped = {
609
+ k: v
610
+ for k, v in dict(zip(normalized_aliases, user_display_names)).items()
611
+ if v is not None
612
+ }
613
+ self.display_name_mapping.update(zipped)
614
+
592
615
  def _get_expressions(
593
616
  self,
594
617
  optimize: bool = True,
@@ -608,6 +631,16 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
608
631
  select_expression = select_expression.transform(
609
632
  replace_id_value, replacement_mapping
610
633
  ).assert_is(exp.Select)
634
+ for index, column in enumerate(select_expression.expressions):
635
+ column_name = quote_preserving_alias_or_name(column)
636
+ if column_name in self.display_name_mapping:
637
+ display_name_identifier = exp.to_identifier(
638
+ self.display_name_mapping[column_name], quoted=True
639
+ )
640
+ display_name_identifier._meta = {"case_sensitive": True, **(column._meta or {})}
641
+ select_expression.expressions[index] = exp.alias_(
642
+ column.unalias(), display_name_identifier, quoted=True
643
+ )
611
644
  if optimize:
612
645
  select_expression = t.cast(
613
646
  exp.Select,
@@ -745,59 +778,73 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
745
778
  kwargs["join_on_uuid"] = str(uuid4())
746
779
  return self.__class__(**object_to_dict(self, **kwargs))
747
780
 
781
+ def _resolve_ambiguous_columns(self, columns: t.Union[Column, t.List[Column]]) -> None:
782
+ if "joins" not in self.expression.args:
783
+ return
784
+
785
+ columns = ensure_list(columns)
786
+ ambiguous_cols: t.List[exp.Column] = list(
787
+ flatten(
788
+ [
789
+ sub_col
790
+ for col in columns
791
+ for sub_col in col.expression.find_all(exp.Column)
792
+ if not sub_col.table
793
+ ]
794
+ )
795
+ )
796
+ if ambiguous_cols:
797
+ join_table_identifiers = [
798
+ x.this for x in get_tables_from_expression_with_join(self.expression)
799
+ ]
800
+ cte_names_in_join = [x.this for x in join_table_identifiers]
801
+ # If we have columns that resolve to multiple CTE expressions then we want to use each CTE left-to-right
802
+ # (or right to left if a right join) and therefore we allow multiple columns with the same
803
+ # name in the result. This matches the behavior of Spark.
804
+ resolved_column_position: t.Dict[exp.Column, int] = {
805
+ col.copy(): -1 for col in ambiguous_cols
806
+ }
807
+ for ambiguous_col in ambiguous_cols:
808
+ ctes = (
809
+ list(reversed(self.expression.ctes))
810
+ if self.expression.args["joins"][0].args.get("side", "") == "right"
811
+ else self.expression.ctes
812
+ )
813
+ ctes_with_column = [
814
+ cte
815
+ for cte in ctes
816
+ if cte.alias_or_name in cte_names_in_join
817
+ and ambiguous_col.alias_or_name in cte.this.named_selects
818
+ ]
819
+ # Check if there is a CTE with this column that we haven't used before. If so, use it. Otherwise,
820
+ # use the same CTE we used before
821
+ cte = seq_get(ctes_with_column, resolved_column_position[ambiguous_col] + 1)
822
+ if cte:
823
+ resolved_column_position[ambiguous_col] += 1
824
+ else:
825
+ cte = ctes_with_column[resolved_column_position[ambiguous_col]]
826
+ ambiguous_col.set("table", exp.to_identifier(cte.alias_or_name))
827
+
748
828
  @operation(Operation.SELECT)
749
829
  def select(self, *cols, **kwargs) -> Self:
750
- from sqlframe.base.column import Column
751
-
752
830
  if not cols:
753
831
  return self
754
832
 
755
833
  if isinstance(cols[0], list):
756
834
  cols = cols[0] # type: ignore
757
835
  columns = self._ensure_and_normalize_cols(cols)
836
+ if "skip_update_display_name_mapping" not in kwargs:
837
+ unexpanded_columns = self._ensure_and_normalize_cols(cols, skip_star_expansion=True)
838
+ user_cols = list(cols)
839
+ star_columns = []
840
+ for index, user_col in enumerate(cols):
841
+ if "*" in (user_col if isinstance(user_col, str) else user_col.alias_or_name):
842
+ star_columns.append(index)
843
+ for index in star_columns:
844
+ unexpanded_columns.pop(index)
845
+ user_cols.pop(index)
846
+ self._update_display_name_mapping(unexpanded_columns, user_cols)
758
847
  kwargs["append"] = kwargs.get("append", False)
759
- if self.expression.args.get("joins"):
760
- ambiguous_cols: t.List[exp.Column] = list(
761
- flatten(
762
- [
763
- sub_col
764
- for col in columns
765
- for sub_col in col.expression.find_all(exp.Column)
766
- if not sub_col.table
767
- ]
768
- )
769
- )
770
- if ambiguous_cols:
771
- join_table_identifiers = [
772
- x.this for x in get_tables_from_expression_with_join(self.expression)
773
- ]
774
- cte_names_in_join = [x.this for x in join_table_identifiers]
775
- # If we have columns that resolve to multiple CTE expressions then we want to use each CTE left-to-right
776
- # (or right to left if a right join) and therefore we allow multiple columns with the same
777
- # name in the result. This matches the behavior of Spark.
778
- resolved_column_position: t.Dict[exp.Column, int] = {
779
- col.copy(): -1 for col in ambiguous_cols
780
- }
781
- for ambiguous_col in ambiguous_cols:
782
- ctes = (
783
- list(reversed(self.expression.ctes))
784
- if self.expression.args["joins"][0].args.get("side", "") == "right"
785
- else self.expression.ctes
786
- )
787
- ctes_with_column = [
788
- cte
789
- for cte in ctes
790
- if cte.alias_or_name in cte_names_in_join
791
- and ambiguous_col.alias_or_name in cte.this.named_selects
792
- ]
793
- # Check if there is a CTE with this column that we haven't used before. If so, use it. Otherwise,
794
- # use the same CTE we used before
795
- cte = seq_get(ctes_with_column, resolved_column_position[ambiguous_col] + 1)
796
- if cte:
797
- resolved_column_position[ambiguous_col] += 1
798
- else:
799
- cte = ctes_with_column[resolved_column_position[ambiguous_col]]
800
- ambiguous_col.set("table", exp.to_identifier(cte.alias_or_name))
801
848
  # If an expression is `CAST(x AS DATETYPE)` then we want to alias so that `x` is the result column name
802
849
  columns = [
803
850
  col.alias(col.expression.alias_or_name)
@@ -846,6 +893,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
846
893
  @operation(Operation.SELECT)
847
894
  def agg(self, *exprs, **kwargs) -> Self:
848
895
  cols = self._ensure_and_normalize_cols(exprs)
896
+ self._update_display_name_mapping(cols, exprs)
849
897
  return self.groupBy().agg(*cols)
850
898
 
851
899
  @operation(Operation.FROM)
@@ -1045,7 +1093,9 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
1045
1093
  new_df = self.copy(expression=join_expression)
1046
1094
  new_df.pending_join_hints.extend(self.pending_join_hints)
1047
1095
  new_df.pending_hints.extend(other_df.pending_hints)
1048
- new_df = new_df.select.__wrapped__(new_df, *select_column_names) # type: ignore
1096
+ new_df = new_df.select.__wrapped__( # type: ignore
1097
+ new_df, *select_column_names, skip_update_display_name_mapping=True
1098
+ )
1049
1099
  return new_df
1050
1100
 
1051
1101
  @operation(Operation.ORDER_BY)
@@ -1435,20 +1485,18 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
1435
1485
  def withColumnRenamed(self, existing: str, new: str) -> Self:
1436
1486
  expression = self.expression.copy()
1437
1487
  existing = self.session._normalize_string(existing)
1438
- new = self.session._normalize_string(new)
1439
- existing_columns = [
1440
- expression
1441
- for expression in expression.expressions
1442
- if expression.alias_or_name == existing
1443
- ]
1444
- if not existing_columns:
1488
+ columns = self._get_outer_select_columns(expression)
1489
+ results = []
1490
+ found_match = False
1491
+ for column in columns:
1492
+ if column.alias_or_name == existing:
1493
+ column = column.alias(new)
1494
+ self._update_display_name_mapping([column], [new])
1495
+ found_match = True
1496
+ results.append(column)
1497
+ if not found_match:
1445
1498
  raise ValueError("Tried to rename a column that doesn't exist")
1446
- for existing_column in existing_columns:
1447
- if isinstance(existing_column, exp.Column):
1448
- existing_column.replace(exp.alias_(existing_column, new))
1449
- else:
1450
- existing_column.set("alias", exp.to_identifier(new))
1451
- return self.copy(expression=expression)
1499
+ return self.select.__wrapped__(self, *results, skip_update_display_name_mapping=True) # type: ignore
1452
1500
 
1453
1501
  @operation(Operation.SELECT)
1454
1502
  def withColumns(self, *colsMap: t.Dict[str, Column]) -> Self:
@@ -1489,23 +1537,27 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
1489
1537
  if len(colsMap) != 1:
1490
1538
  raise ValueError("Only a single map is supported")
1491
1539
  col_map = {
1492
- self._ensure_and_normalize_col(k).alias_or_name: self._ensure_and_normalize_col(v)
1540
+ self._ensure_and_normalize_col(k): (self._ensure_and_normalize_col(v), k)
1493
1541
  for k, v in colsMap[0].items()
1494
1542
  }
1495
1543
  existing_cols = self._get_outer_select_columns(self.expression)
1496
1544
  existing_col_names = [x.alias_or_name for x in existing_cols]
1497
1545
  select_columns = existing_cols
1498
- for column_name, col_value in col_map.items():
1546
+ for col, (col_value, display_name) in col_map.items():
1547
+ column_name = col.alias_or_name
1499
1548
  existing_col_index = (
1500
1549
  existing_col_names.index(column_name) if column_name in existing_col_names else None
1501
1550
  )
1502
1551
  if existing_col_index is not None:
1503
1552
  select_columns[existing_col_index] = col_value.alias( # type: ignore
1504
- column_name
1505
- ).expression
1553
+ display_name
1554
+ )
1506
1555
  else:
1507
- select_columns.append(col_value.alias(column_name))
1508
- return self.select.__wrapped__(self, *select_columns) # type: ignore
1556
+ select_columns.append(col_value.alias(display_name))
1557
+ self._update_display_name_mapping(
1558
+ [col for col in col_map], [name for _, name in col_map.values()]
1559
+ )
1560
+ return self.select.__wrapped__(self, *select_columns, skip_update_display_name_mapping=True) # type: ignore
1509
1561
 
1510
1562
  @operation(Operation.SELECT)
1511
1563
  def drop(self, *cols: t.Union[str, Column]) -> Self:
@@ -43,7 +43,7 @@ def func_metadata(unsupported_engines: t.Optional[t.Union[str, t.List[str]]] = N
43
43
  col_name = col_name.this
44
44
  alias_name = f"{func.__name__}__{col_name or ''}__"
45
45
  # BigQuery has restrictions on alias names so we constrain it to alphanumeric characters and underscores
46
- return result.alias(re.sub("\W", "_", alias_name)) # type: ignore
46
+ return result.alias(re.sub(r"\W", "_", alias_name)) # type: ignore
47
47
  return result
48
48
 
49
49
  wrapper.unsupported_engines = ( # type: ignore
@@ -39,11 +39,19 @@ def col(column_name: t.Union[ColumnOrName, t.Any]) -> Column:
39
39
 
40
40
  dialect = _BaseSession().input_dialect
41
41
  if isinstance(column_name, str):
42
- return Column(
43
- expression.to_column(column_name, dialect=dialect).transform(
44
- dialect.normalize_identifier
45
- )
42
+ col_expression = expression.to_column(column_name, dialect=dialect).transform(
43
+ dialect.normalize_identifier
46
44
  )
45
+ case_sensitive_expression = expression.to_column(column_name, dialect=dialect)
46
+ if not isinstance(
47
+ case_sensitive_expression, (expression.Star, expression.Literal, expression.Null)
48
+ ):
49
+ col_expression._meta = {
50
+ "display_name": case_sensitive_expression.this.this,
51
+ **(col_expression._meta or {}),
52
+ }
53
+
54
+ return Column(col_expression)
47
55
  return Column(column_name)
48
56
 
49
57
 
@@ -2851,12 +2859,14 @@ def bool_or(col: ColumnOrName) -> Column:
2851
2859
  return Column.invoke_expression_over_column(col, expression.LogicalOr)
2852
2860
 
2853
2861
 
2854
- @meta(unsupported_engines="*")
2862
+ @meta()
2855
2863
  def btrim(str: ColumnOrName, trim: t.Optional[ColumnOrName] = None) -> Column:
2856
2864
  if trim is not None:
2857
- return Column.invoke_anonymous_function(str, "btrim", trim)
2865
+ return Column.invoke_expression_over_column(
2866
+ str, expression.Trim, expression=Column.ensure_col(trim).column_expression
2867
+ )
2858
2868
  else:
2859
- return Column.invoke_anonymous_function(str, "btrim")
2869
+ return Column.invoke_expression_over_column(str, expression.Trim)
2860
2870
 
2861
2871
 
2862
2872
  @meta(unsupported_engines="*")
@@ -507,9 +507,14 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
507
507
  result = self._cur.fetchall()
508
508
  if not self._cur.description:
509
509
  return []
510
+ case_sensitive_cols = []
511
+ for col in self._cur.description:
512
+ col_id = exp.parse_identifier(col[0], dialect=self.execution_dialect)
513
+ col_id._meta = {"case_sensitive": True, **(col_id._meta or {})}
514
+ case_sensitive_cols.append(col_id)
510
515
  columns = [
511
- normalize_string(x[0], from_dialect="execution", to_dialect="output", is_column=True)
512
- for x in self._cur.description
516
+ normalize_string(x, from_dialect="execution", to_dialect="output")
517
+ for x in case_sensitive_cols
513
518
  ]
514
519
  return [self._to_row(columns, row) for row in result]
515
520
 
@@ -79,17 +79,18 @@ class SparkSession(
79
79
  if skip_rows:
80
80
  return []
81
81
  assert self._last_df is not None
82
- return [
83
- Row(
84
- **{
85
- normalize_string(
86
- k, from_dialect="execution", to_dialect="output", is_column=True
87
- ): v
88
- for k, v in row.asDict().items()
89
- }
90
- )
91
- for row in self._last_df.collect()
92
- ]
82
+ results = []
83
+ for row in self._last_df.collect():
84
+ rows_normalized = {}
85
+ for k, v in row.asDict().items():
86
+ col_id = exp.parse_identifier(k, dialect=self.execution_dialect)
87
+ col_id._meta = {"case_sensitive": True, **(col_id._meta or {})}
88
+ col_name = normalize_string(
89
+ col_id, from_dialect="execution", to_dialect="output", is_column=True
90
+ )
91
+ rows_normalized[col_name] = v
92
+ results.append(Row(**rows_normalized))
93
+ return results
93
94
 
94
95
  def _execute(self, sql: str) -> None:
95
96
  self._last_df = self.spark_session.sql(sql)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 3.15.1
3
+ Version: 3.17.0
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -3,6 +3,8 @@ from __future__ import annotations
3
3
  import typing as t
4
4
 
5
5
  from sqlframe.base.types import Row
6
+ from sqlframe.snowflake import SnowflakeSession
7
+ from sqlframe.spark import SparkSession
6
8
 
7
9
  if t.TYPE_CHECKING:
8
10
  from sqlframe.base.dataframe import BaseDataFrame
@@ -31,20 +33,30 @@ def test_show(
31
33
  ):
32
34
  employee = get_engine_df("employee")
33
35
  lit = get_func("lit", employee.session)
34
- employee = employee.select("*", lit(1).alias("one"))
36
+ col = get_func("col", employee.session)
37
+ employee = (
38
+ employee.select("EmPloyee_Id", "fname", "lnamE", "AGE", "stoRe_iD", lit(1).alias("One"))
39
+ .withColumnRenamed("sToRe_id", "SToRE_Id")
40
+ .withColumns(
41
+ {
42
+ "lNamE": col("lname"),
43
+ "tWo": lit(2),
44
+ }
45
+ )
46
+ )
35
47
  employee.show()
36
48
  captured = capsys.readouterr()
37
49
  assert (
38
50
  captured.out
39
- == """+-------------+--------+-----------+-----+----------+-----+
40
- | employee_id | fname | lname | age | store_id | one |
41
- +-------------+--------+-----------+-----+----------+-----+
42
- | 1 | Jack | Shephard | 37 | 1 | 1 |
43
- | 2 | John | Locke | 65 | 1 | 1 |
44
- | 3 | Kate | Austen | 37 | 2 | 1 |
45
- | 4 | Claire | Littleton | 27 | 2 | 1 |
46
- | 5 | Hugo | Reyes | 29 | 100 | 1 |
47
- +-------------+--------+-----------+-----+----------+-----+\n"""
51
+ == """+-------------+--------+-----------+-----+----------+-----+-----+
52
+ | EmPloyee_Id | fname | lNamE | AGE | SToRE_Id | One | tWo |
53
+ +-------------+--------+-----------+-----+----------+-----+-----+
54
+ | 1 | Jack | Shephard | 37 | 1 | 1 | 2 |
55
+ | 2 | John | Locke | 65 | 1 | 1 | 2 |
56
+ | 3 | Kate | Austen | 37 | 2 | 1 | 2 |
57
+ | 4 | Claire | Littleton | 27 | 2 | 1 | 2 |
58
+ | 5 | Hugo | Reyes | 29 | 100 | 1 | 2 |
59
+ +-------------+--------+-----------+-----+----------+-----+-----+\n"""
48
60
  )
49
61
  assert "Truncate is ignored so full results will be displayed" not in caplog.text
50
62
  employee.show(truncate=True)
@@ -58,11 +70,21 @@ def test_show_limit(
58
70
  employee = get_engine_df("employee")
59
71
  employee.show(1)
60
72
  captured = capsys.readouterr()
61
- assert (
62
- captured.out
63
- == """+-------------+-------+----------+-----+----------+
73
+ if isinstance(employee.session, SnowflakeSession):
74
+ assert (
75
+ captured.out
76
+ == """+-------------+-------+----------+-----+----------+
77
+ | EMPLOYEE_ID | FNAME | LNAME | AGE | STORE_ID |
78
+ +-------------+-------+----------+-----+----------+
79
+ | 1 | Jack | Shephard | 37 | 1 |
80
+ +-------------+-------+----------+-----+----------+\n"""
81
+ )
82
+ else:
83
+ assert (
84
+ captured.out
85
+ == """+-------------+-------+----------+-----+----------+
64
86
  | employee_id | fname | lname | age | store_id |
65
87
  +-------------+-------+----------+-----+----------+
66
88
  | 1 | Jack | Shephard | 37 | 1 |
67
89
  +-------------+-------+----------+-----+----------+\n"""
68
- )
90
+ )
@@ -241,7 +241,7 @@ def test_alias(get_session_and_func):
241
241
  DatabricksSession,
242
242
  ),
243
243
  ):
244
- assert space_result == "`a space in new name`"
244
+ assert space_result == "`A Space In New Name`"
245
245
  else:
246
246
  assert space_result == "A Space In New Name"
247
247
 
@@ -9,7 +9,7 @@ from pyspark.sql import functions as F
9
9
 
10
10
  from sqlframe.standalone import functions as SF
11
11
  from sqlframe.standalone.dataframe import StandaloneDataFrame
12
- from tests.integration.fixtures import StandaloneSession
12
+ from tests.integration.fixtures import StandaloneSession, is_snowflake
13
13
 
14
14
  if t.TYPE_CHECKING:
15
15
  from sqlframe.base.dataframe import BaseDataFrame
@@ -1780,6 +1780,7 @@ def test_with_columns_reference_another(
1780
1780
  compare_frames: t.Callable,
1781
1781
  is_bigquery: t.Callable,
1782
1782
  is_postgres: t.Callable,
1783
+ is_snowflake: t.Callable,
1783
1784
  ):
1784
1785
  # Could consider two options:
1785
1786
  # 1. Use SQLGlot optimizer to properly change the references to be expanded to avoid the issue (a rule already does this)
@@ -1792,6 +1793,14 @@ def test_with_columns_reference_another(
1792
1793
  pytest.skip(
1793
1794
  "Postgres doesn't support having selects with columns that reference each other."
1794
1795
  )
1796
+ if is_snowflake():
1797
+ # Snowflake does allow columns that reference each other but the issue is that if you do this in the final
1798
+ # select the columns are replaced with their alias version to show their display name (the case-sensitive
1799
+ # name provided by the user) and then, since the column is now aliased and case-sensitive, SF thinks
1800
+ # the column doesn't exist since the column of the same case does not exist since it was aliased.
1801
+ pytest.skip(
1802
+ "Bugged behavior introduced display names means that snowflake can no longer reference itself."
1803
+ )
1795
1804
  employee = get_df("employee")
1796
1805
  df = pyspark_employee.withColumns(
1797
1806
  {
@@ -2381,3 +2390,27 @@ def test_union_common_root_again(
2381
2390
  dfs_final = dfs_1.union(dfs_2).union(employee)
2382
2391
 
2383
2392
  compare_frames(df_final, dfs_final, compare_schema=False)
2393
+
2394
+
2395
+ # https://github.com/eakmanrq/sqlframe/issues/277
2396
+ def test_filtering_join_key(
2397
+ pyspark_employee: PySparkDataFrame,
2398
+ pyspark_store: PySparkDataFrame,
2399
+ get_df: t.Callable[[str], BaseDataFrame],
2400
+ compare_frames: t.Callable,
2401
+ ):
2402
+ df = pyspark_employee.join(
2403
+ pyspark_store,
2404
+ on="store_id",
2405
+ how="inner",
2406
+ ).filter(F.col("store_id") > 1)
2407
+
2408
+ employee = get_df("employee")
2409
+ store = get_df("store")
2410
+ dfs = employee.join(
2411
+ store,
2412
+ on="store_id",
2413
+ how="inner",
2414
+ ).filter(SF.col("store_id") > 1)
2415
+
2416
+ compare_frames(df, dfs, compare_schema=False, sort=True)
@@ -55,7 +55,7 @@ def test_with_column_duplicate_alias(standalone_employee: StandaloneDataFrame):
55
55
  # Make sure that the new columns is added with an alias to `fname`
56
56
  assert (
57
57
  df.sql(pretty=False)
58
- == "SELECT `a1`.`employee_id` AS `employee_id`, CAST(`a1`.`age` AS STRING) AS `fname`, CAST(`a1`.`lname` AS STRING) AS `lname`, `a1`.`age` AS `age`, `a1`.`store_id` AS `store_id` FROM VALUES (1, 'Jack', 'Shephard', 37, 1), (2, 'John', 'Locke', 65, 1), (3, 'Kate', 'Austen', 37, 2), (4, 'Claire', 'Littleton', 27, 2), (5, 'Hugo', 'Reyes', 29, 100) AS `a1`(`employee_id`, `fname`, `lname`, `age`, `store_id`)"
58
+ == "SELECT `a1`.`employee_id` AS `employee_id`, CAST(`a1`.`age` AS STRING) AS `fName`, CAST(`a1`.`lname` AS STRING) AS `lname`, `a1`.`age` AS `age`, `a1`.`store_id` AS `store_id` FROM VALUES (1, 'Jack', 'Shephard', 37, 1), (2, 'John', 'Locke', 65, 1), (3, 'Kate', 'Austen', 37, 2), (4, 'Claire', 'Littleton', 27, 2), (5, 'Hugo', 'Reyes', 29, 100) AS `a1`(`employee_id`, `fname`, `lname`, `age`, `store_id`)"
59
59
  )
60
60
 
61
61
 
@@ -86,7 +86,7 @@ def test_transform(standalone_employee: StandaloneDataFrame):
86
86
  df = standalone_employee.transform(cast_all_to_int).transform(sort_columns_asc)
87
87
  assert df.columns == ["age", "employee_id", "fname", "lname", "store_id"]
88
88
  assert df.sql(pretty=False, optimize=False).endswith( # type: ignore
89
- "SELECT CAST(`employee_id` AS INT) AS `employee_id`, CAST(`fname` AS INT) AS `fname`, CAST(`lname` AS INT) AS `lname`, CAST(`age` AS INT) AS `age`, CAST(`store_id` AS INT) AS `store_id` FROM `t51718876`) SELECT `age`, `employee_id`, `fname`, `lname`, `store_id` FROM `t16881256`"
89
+ "SELECT CAST(`employee_id` AS INT) AS `employee_id`, CAST(`fname` AS INT) AS `fname`, CAST(`lname` AS INT) AS `lname`, CAST(`age` AS INT) AS `age`, CAST(`store_id` AS INT) AS `store_id` FROM `t51718876`) SELECT `age` AS `age`, `employee_id` AS `employee_id`, `fname` AS `fname`, `lname` AS `lname`, `store_id` AS `store_id` FROM `t16881256`"
90
90
  )
91
91
 
92
92