sqlframe 1.11.0__tar.gz → 1.13.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (215) hide show
  1. {sqlframe-1.11.0 → sqlframe-1.13.0}/PKG-INFO +1 -1
  2. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/bigquery.md +0 -4
  3. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/duckdb.md +1 -6
  4. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/postgres.md +0 -3
  5. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/snowflake.md +0 -4
  6. {sqlframe-1.11.0 → sqlframe-1.13.0}/setup.py +1 -1
  7. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/_version.py +2 -2
  8. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/column.py +41 -0
  9. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/dataframe.py +23 -2
  10. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/function_alternatives.py +17 -18
  11. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/functions.py +28 -17
  12. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/session.py +4 -1
  13. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/types.py +10 -0
  14. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/util.py +24 -1
  15. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/functions.py +11 -17
  16. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/functions.pyi +0 -1
  17. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/session.py +0 -1
  18. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/functions.py +0 -1
  19. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/session.py +0 -2
  20. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/session.py +0 -2
  21. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/functions.pyi +0 -1
  22. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/session.py +0 -2
  23. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe.egg-info/PKG-INFO +1 -1
  24. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe.egg-info/SOURCES.txt +1 -0
  25. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe.egg-info/requires.txt +1 -1
  26. sqlframe-1.13.0/tests/integration/engines/test_engine_column.py +27 -0
  27. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/test_engine_dataframe.py +25 -19
  28. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/test_int_functions.py +21 -60
  29. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/test_int_dataframe.py +10 -0
  30. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/test_column.py +4 -0
  31. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/test_dataframe.py +14 -0
  32. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/test_functions.py +3 -3
  33. {sqlframe-1.11.0 → sqlframe-1.13.0}/.github/CODEOWNERS +0 -0
  34. {sqlframe-1.11.0 → sqlframe-1.13.0}/.github/workflows/main.workflow.yaml +0 -0
  35. {sqlframe-1.11.0 → sqlframe-1.13.0}/.github/workflows/publish.workflow.yaml +0 -0
  36. {sqlframe-1.11.0 → sqlframe-1.13.0}/.gitignore +0 -0
  37. {sqlframe-1.11.0 → sqlframe-1.13.0}/.pre-commit-config.yaml +0 -0
  38. {sqlframe-1.11.0 → sqlframe-1.13.0}/.readthedocs.yaml +0 -0
  39. {sqlframe-1.11.0 → sqlframe-1.13.0}/LICENSE +0 -0
  40. {sqlframe-1.11.0 → sqlframe-1.13.0}/Makefile +0 -0
  41. {sqlframe-1.11.0 → sqlframe-1.13.0}/README.md +0 -0
  42. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/add_chatgpt_support.md +0 -0
  43. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/add_chatgpt_support/adding_ai_to_meal.jpeg +0 -0
  44. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/add_chatgpt_support/hype_train.gif +0 -0
  45. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/add_chatgpt_support/marvin_paranoid_robot.gif +0 -0
  46. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/add_chatgpt_support/nonsense_sql.png +0 -0
  47. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/add_chatgpt_support/openai_full_rewrite.png +0 -0
  48. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/add_chatgpt_support/openai_replacing_cte_names.png +0 -0
  49. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/add_chatgpt_support/sqlglot_optimized_code.png +0 -0
  50. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/add_chatgpt_support/sunny_shake_head_no.gif +0 -0
  51. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/but_wait_theres_more.gif +0 -0
  52. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/cake.gif +0 -0
  53. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/images/you_get_pyspark_api.gif +0 -0
  54. {sqlframe-1.11.0 → sqlframe-1.13.0}/blogs/sqlframe_universal_dataframe_api.md +0 -0
  55. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/configuration.md +0 -0
  56. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/docs/bigquery.md +0 -0
  57. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/docs/duckdb.md +0 -0
  58. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/docs/images/SF.png +0 -0
  59. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/docs/images/favicon.png +0 -0
  60. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/docs/images/favicon_old.png +0 -0
  61. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/docs/images/sqlframe_diagram.png +0 -0
  62. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/docs/images/sqlframe_logo.png +0 -0
  63. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/docs/postgres.md +0 -0
  64. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/images/SF.png +0 -0
  65. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/images/favicon.png +0 -0
  66. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/images/favicon_old.png +0 -0
  67. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/images/sqlframe_diagram.png +0 -0
  68. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/images/sqlframe_logo.png +0 -0
  69. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/index.md +0 -0
  70. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/requirements.txt +0 -0
  71. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/spark.md +0 -0
  72. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/standalone.md +0 -0
  73. {sqlframe-1.11.0 → sqlframe-1.13.0}/docs/stylesheets/extra.css +0 -0
  74. {sqlframe-1.11.0 → sqlframe-1.13.0}/mkdocs.yml +0 -0
  75. {sqlframe-1.11.0 → sqlframe-1.13.0}/pytest.ini +0 -0
  76. {sqlframe-1.11.0 → sqlframe-1.13.0}/renovate.json +0 -0
  77. {sqlframe-1.11.0 → sqlframe-1.13.0}/setup.cfg +0 -0
  78. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/LICENSE +0 -0
  79. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/__init__.py +0 -0
  80. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/__init__.py +0 -0
  81. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/_typing.py +0 -0
  82. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/catalog.py +0 -0
  83. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/decorators.py +0 -0
  84. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/exceptions.py +0 -0
  85. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/group.py +0 -0
  86. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/mixins/__init__.py +0 -0
  87. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/mixins/catalog_mixins.py +0 -0
  88. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/mixins/dataframe_mixins.py +0 -0
  89. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/mixins/readwriter_mixins.py +0 -0
  90. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/normalize.py +0 -0
  91. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/operations.py +0 -0
  92. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/readerwriter.py +0 -0
  93. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/transforms.py +0 -0
  94. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/base/window.py +0 -0
  95. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/__init__.py +0 -0
  96. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/catalog.py +0 -0
  97. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/column.py +0 -0
  98. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/dataframe.py +0 -0
  99. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/group.py +0 -0
  100. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/readwriter.py +0 -0
  101. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/types.py +0 -0
  102. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/bigquery/window.py +0 -0
  103. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/__init__.py +0 -0
  104. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/catalog.py +0 -0
  105. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/column.py +0 -0
  106. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/dataframe.py +0 -0
  107. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/functions.pyi +0 -0
  108. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/group.py +0 -0
  109. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/readwriter.py +0 -0
  110. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/types.py +0 -0
  111. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/duckdb/window.py +0 -0
  112. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/__init__.py +0 -0
  113. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/catalog.py +0 -0
  114. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/column.py +0 -0
  115. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/dataframe.py +0 -0
  116. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/functions.py +0 -0
  117. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/functions.pyi +0 -0
  118. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/group.py +0 -0
  119. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/readwriter.py +0 -0
  120. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/types.py +0 -0
  121. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/postgres/window.py +0 -0
  122. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/__init__.py +0 -0
  123. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/catalog.py +0 -0
  124. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/column.py +0 -0
  125. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/dataframe.py +0 -0
  126. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/functions.py +0 -0
  127. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/group.py +0 -0
  128. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/readwriter.py +0 -0
  129. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/session.py +0 -0
  130. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/types.py +0 -0
  131. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/redshift/window.py +0 -0
  132. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/__init__.py +0 -0
  133. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/catalog.py +0 -0
  134. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/column.py +0 -0
  135. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/dataframe.py +0 -0
  136. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/functions.py +0 -0
  137. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/group.py +0 -0
  138. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/readwriter.py +0 -0
  139. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/types.py +0 -0
  140. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/snowflake/window.py +0 -0
  141. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/__init__.py +0 -0
  142. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/catalog.py +0 -0
  143. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/column.py +0 -0
  144. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/dataframe.py +0 -0
  145. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/functions.py +0 -0
  146. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/functions.pyi +0 -0
  147. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/group.py +0 -0
  148. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/readwriter.py +0 -0
  149. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/session.py +0 -0
  150. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/types.py +0 -0
  151. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/spark/window.py +0 -0
  152. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/__init__.py +0 -0
  153. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/catalog.py +0 -0
  154. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/column.py +0 -0
  155. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/dataframe.py +0 -0
  156. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/functions.py +0 -0
  157. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/group.py +0 -0
  158. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/readwriter.py +0 -0
  159. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/session.py +0 -0
  160. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/types.py +0 -0
  161. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/standalone/window.py +0 -0
  162. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/testing/__init__.py +0 -0
  163. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe/testing/utils.py +0 -0
  164. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe.egg-info/dependency_links.txt +0 -0
  165. {sqlframe-1.11.0 → sqlframe-1.13.0}/sqlframe.egg-info/top_level.txt +0 -0
  166. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/__init__.py +0 -0
  167. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/common_fixtures.py +0 -0
  168. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/conftest.py +0 -0
  169. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/fixtures/employee.csv +0 -0
  170. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/fixtures/employee.json +0 -0
  171. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/fixtures/employee.parquet +0 -0
  172. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/fixtures/employee_extra_line.csv +0 -0
  173. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/__init__.py +0 -0
  174. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/__init__.py +0 -0
  175. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/bigquery/__init__.py +0 -0
  176. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/bigquery/test_bigquery_catalog.py +0 -0
  177. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/bigquery/test_bigquery_dataframe.py +0 -0
  178. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/bigquery/test_bigquery_session.py +0 -0
  179. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/duck/__init__.py +0 -0
  180. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/duck/test_duckdb_catalog.py +0 -0
  181. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/duck/test_duckdb_dataframe.py +0 -0
  182. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/duck/test_duckdb_reader.py +0 -0
  183. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/duck/test_duckdb_session.py +0 -0
  184. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/postgres/__init__.py +0 -0
  185. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/postgres/test_postgres_catalog.py +0 -0
  186. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/postgres/test_postgres_dataframe.py +0 -0
  187. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/postgres/test_postgres_session.py +0 -0
  188. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/redshift/__init__.py +0 -0
  189. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/redshift/test_redshift_catalog.py +0 -0
  190. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/redshift/test_redshift_session.py +0 -0
  191. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/snowflake/__init__.py +0 -0
  192. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/snowflake/test_snowflake_catalog.py +0 -0
  193. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/snowflake/test_snowflake_dataframe.py +0 -0
  194. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/snowflake/test_snowflake_session.py +0 -0
  195. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/spark/__init__.py +0 -0
  196. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/spark/test_spark_catalog.py +0 -0
  197. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/spark/test_spark_dataframe.py +0 -0
  198. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/test_engine_reader.py +0 -0
  199. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/test_engine_session.py +0 -0
  200. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/test_engine_writer.py +0 -0
  201. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/engines/test_int_testing.py +0 -0
  202. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/fixtures.py +0 -0
  203. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/test_int_dataframe_stats.py +0 -0
  204. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/test_int_grouped_data.py +0 -0
  205. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/integration/test_int_session.py +0 -0
  206. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/types.py +0 -0
  207. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/__init__.py +0 -0
  208. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/__init__.py +0 -0
  209. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/fixtures.py +0 -0
  210. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/test_dataframe_writer.py +0 -0
  211. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/test_session.py +0 -0
  212. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/test_session_case_sensitivity.py +0 -0
  213. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/test_types.py +0 -0
  214. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/standalone/test_window.py +0 -0
  215. {sqlframe-1.11.0 → sqlframe-1.13.0}/tests/unit/test_util.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 1.11.0
3
+ Version: 1.13.0
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -307,7 +307,6 @@ See something that you would like to see supported? [Open an issue](https://gith
307
307
  * [date_diff](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_diff.html)
308
308
  * [datediff](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.datediff.html)
309
309
  * [date_format](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_format.html)
310
- * [The format string should be in BigQuery syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements)
311
310
  * [date_sub](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_sub.html)
312
311
  * [date_trunc](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_trunc.html)
313
312
  * [dayofmonth](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.dayofmonth.html)
@@ -442,9 +441,7 @@ See something that you would like to see supported? [Open an issue](https://gith
442
441
  * [toDegrees](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.toDegrees.html)
443
442
  * [toRadians](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.toRadians.html)
444
443
  * [to_date](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_date.html)
445
- * [The format string should be in BigQuery syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements)
446
444
  * [to_timestamp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html)
447
- * [The format string should be in BigQuery syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements)
448
445
  * [translate](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.translate.html)
449
446
  * [trim](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.trim.html)
450
447
  * [trunc](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.trunc.html)
@@ -454,7 +451,6 @@ See something that you would like to see supported? [Open an issue](https://gith
454
451
  * [unbase64](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.unbase64.html)
455
452
  * [unhex](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.unhex.html)
456
453
  * [unix_timestamp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.unix_timestamp.html)
457
- * [The format string should be in BigQuery syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements)
458
454
  * [upper](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.upper.html)
459
455
  * [var_pop](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.var_pop.html)
460
456
  * [var_samp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.var_samp.html)
@@ -276,8 +276,7 @@ See something that you would like to see supported? [Open an issue](https://gith
276
276
  * [dateadd](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.dateadd.html)
277
277
  * [date_diff](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_diff.html)
278
278
  * [datediff](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.datediff.html)
279
- * [date_format](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_format.html)
280
- * [The format string should be in DuckDB syntax](https://duckdb.org/docs/sql/functions/dateformat.html#format-specifiers)
279
+ * [date_format](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_format.html)
281
280
  * [date_sub](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_sub.html)
282
281
  * [date_trunc](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_trunc.html)
283
282
  * [dayofmonth](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.dayofmonth.html)
@@ -405,11 +404,8 @@ See something that you would like to see supported? [Open an issue](https://gith
405
404
  * [toDegrees](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.toDegrees.html)
406
405
  * [toRadians](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.toRadians.html)
407
406
  * [to_date](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_date.html)
408
- * [The format string should be in DuckDB syntax](https://duckdb.org/docs/sql/functions/dateformat.html#format-specifiers)
409
407
  * [to_timestamp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html)
410
- * [The format string should be in DuckDB syntax](https://duckdb.org/docs/sql/functions/dateformat.html#format-specifiers)
411
408
  * [to_unix_timestamp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_unix_timestamp.html)
412
- * [The format string should be in DuckDB syntax](https://duckdb.org/docs/sql/functions/dateformat.html#format-specifiers
413
409
  * The values must match the format string (null will not be returned if they do not)
414
410
  * [translate](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.translate.html)
415
411
  * [trim](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.trim.html)
@@ -420,7 +416,6 @@ See something that you would like to see supported? [Open an issue](https://gith
420
416
  * [unbase64](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.unbase64.html)
421
417
  * [unhex](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.unhex.html)
422
418
  * [unix_timestamp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.unix_timestamp.html)
423
- * [The format string should be in DuckDB syntax](https://duckdb.org/docs/sql/functions/dateformat.html#format-specifiers)
424
419
  * [upper](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.upper.html)
425
420
  * [var_pop](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.var_pop.html)
426
421
  * [var_samp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.var_samp.html)
@@ -284,7 +284,6 @@ See something that you would like to see supported? [Open an issue](https://gith
284
284
  * [date_diff](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_diff.html)
285
285
  * [datediff](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.datediff.html)
286
286
  * [date_format](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_format.html)
287
- * [The format string should be in Postgres syntax](https://www.postgresql.org/docs/current/functions-formatting.html#FUNCTIONS-FORMATTING-DATETIME-TABLE)
288
287
  * [date_sub](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_sub.html)
289
288
  * [date_trunc](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_trunc.html)
290
289
  * Rounded whole number is returned
@@ -397,10 +396,8 @@ See something that you would like to see supported? [Open an issue](https://gith
397
396
  * [toDegrees](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.toDegrees.html)
398
397
  * [toRadians](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.toRadians.html)
399
398
  * [to_date](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_date.html)
400
- * [The format string should be in Postgres syntax](https://www.postgresql.org/docs/current/functions-formatting.html#FUNCTIONS-FORMATTING-DATETIME-TABLE)
401
399
  * [to_number](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_number.html)
402
400
  * [to_timestamp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html)
403
- * [The format string should be in Postgres syntax](https://www.postgresql.org/docs/current/functions-formatting.html#FUNCTIONS-FORMATTING-DATETIME-TABLE)
404
401
  * [translate](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.translate.html)
405
402
  * [trim](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.trim.html)
406
403
  * [trunc](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.trunc.html)
@@ -307,7 +307,6 @@ See something that you would like to see supported? [Open an issue](https://gith
307
307
  * [date_diff](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_diff.html)
308
308
  * [datediff](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.datediff.html)
309
309
  * [date_format](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_format.html)
310
- * [The format string should be in Snowflake syntax](https://docs.snowflake.com/en/sql-reference/functions-conversion#label-date-time-format-conversion)
311
310
  * [date_sub](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_sub.html)
312
311
  * [date_trunc](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.date_trunc.html)
313
312
  * [dayofmonth](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.dayofmonth.html)
@@ -440,10 +439,8 @@ See something that you would like to see supported? [Open an issue](https://gith
440
439
  * [toDegrees](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.toDegrees.html)
441
440
  * [toRadians](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.toRadians.html)
442
441
  * [to_date](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_date.html)
443
- * [The format string should be in Snowflake syntax](https://docs.snowflake.com/en/sql-reference/functions-conversion#label-date-time-format-conversion)
444
442
  * [to_number](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_number.html)
445
443
  * [to_timestamp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html)
446
- * [The format string should be in Snowflake syntax](https://docs.snowflake.com/en/sql-reference/functions-conversion#label-date-time-format-conversion)
447
444
  * [translate](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.translate.html)
448
445
  * [trim](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.trim.html)
449
446
  * [trunc](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.trunc.html)
@@ -451,7 +448,6 @@ See something that you would like to see supported? [Open an issue](https://gith
451
448
  * [unbase64](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.unbase64.html)
452
449
  * [unhex](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.unhex.html)
453
450
  * [unix_timestamp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.unix_timestamp.html)
454
- * [The format string should be in Snowflake syntax](https://docs.snowflake.com/en/sql-reference/functions-conversion#label-date-time-format-conversion)
455
451
  * [upper](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.upper.html)
456
452
  * [var_pop](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.var_pop.html)
457
453
  * [var_samp](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.var_samp.html)
@@ -42,7 +42,7 @@ setup(
42
42
  "pytest-xdist>=3.6,<3.7",
43
43
  "pre-commit>=3.5;python_version=='3.8'",
44
44
  "pre-commit>=3.7,<3.8;python_version>='3.9'",
45
- "ruff>=0.4.4,<0.5",
45
+ "ruff>=0.4.4,<0.6",
46
46
  "types-psycopg2>=2.9,<3",
47
47
  ],
48
48
  "docs": [
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '1.11.0'
16
- __version_tuple__ = version_tuple = (1, 11, 0)
15
+ __version__ = version = '1.13.0'
16
+ __version_tuple__ = version_tuple = (1, 13, 0)
@@ -407,3 +407,44 @@ class Column:
407
407
  window_expression = window.expression.copy()
408
408
  window_expression.set("this", self.column_expression)
409
409
  return Column(window_expression)
410
+
411
+ def getItem(self, key: t.Any) -> Column:
412
+ """
413
+ An expression that gets an item at position ``ordinal`` out of a list,
414
+ or gets an item by key out of a dict.
415
+
416
+ .. versionadded:: 1.3.0
417
+
418
+ .. versionchanged:: 3.4.0
419
+ Supports Spark Connect.
420
+
421
+ Parameters
422
+ ----------
423
+ key
424
+ a literal value, or a :class:`Column` expression.
425
+ The result will only be true at a location if the item matches in the column.
426
+
427
+ .. deprecated:: 3.0.0
428
+ :class:`Column` as a parameter is deprecated.
429
+
430
+ Returns
431
+ -------
432
+ :class:`Column`
433
+ Column representing the item(s) got at position out of a list or by key out of a dict.
434
+
435
+ Examples
436
+ --------
437
+ >>> df = spark.createDataFrame([([1, 2], {"key": "value"})], ["l", "d"])
438
+ >>> df.select(df.l.getItem(0), df.d.getItem("key")).show()
439
+ +----+------+
440
+ |l[0]|d[key]|
441
+ +----+------+
442
+ | 1| value|
443
+ +----+------+
444
+ """
445
+ element_at = get_func_from_session("element_at")
446
+ lit = get_func_from_session("lit")
447
+ key = lit(key) if not isinstance(key, Column) else key
448
+ if isinstance(key.expression, exp.Literal) and key.expression.is_number:
449
+ key = key + lit(1)
450
+ return element_at(self, key)
@@ -361,7 +361,7 @@ class _BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
361
361
 
362
362
  cols = self._ensure_list_of_columns(cols)
363
363
  normalize(self.session, expression or self.expression, cols)
364
- return cols
364
+ return list(flatten([self._expand_star(col) for col in cols]))
365
365
 
366
366
  def _ensure_and_normalize_col(self, col):
367
367
  from sqlframe.base.column import Column
@@ -514,6 +514,27 @@ class _BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
514
514
  select_expressions.append(expression_select_pair) # type: ignore
515
515
  return select_expressions
516
516
 
517
+ def _expand_star(self, col: Column) -> t.List[Column]:
518
+ from sqlframe.base.column import Column
519
+
520
+ if isinstance(col.column_expression, exp.Star):
521
+ return self._get_outer_select_columns(self.expression)
522
+ elif (
523
+ isinstance(col.column_expression, exp.Column)
524
+ and isinstance(col.column_expression.this, exp.Star)
525
+ and col.column_expression.args.get("table")
526
+ ):
527
+ for cte in self.expression.ctes:
528
+ if cte.alias_or_name == col.column_expression.args["table"].this:
529
+ return [
530
+ Column.ensure_col(exp.column(x.column_alias_or_name, cte.alias_or_name))
531
+ for x in self._get_outer_select_columns(cte)
532
+ ]
533
+ raise ValueError(
534
+ f"Could not find table to expand star: {col.column_expression.args['table']}"
535
+ )
536
+ return [col]
537
+
517
538
  @t.overload
518
539
  def sql(
519
540
  self,
@@ -1555,7 +1576,7 @@ class _BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
1555
1576
  result = self.session._fetch_rows(sql)
1556
1577
  table = PrettyTable()
1557
1578
  if row := seq_get(result, 0):
1558
- table.field_names = list(row.asDict().keys())
1579
+ table.field_names = row._unique_field_names
1559
1580
  for row in result:
1560
1581
  table.add_row(list(row))
1561
1582
  print(table)
@@ -6,11 +6,16 @@ import re
6
6
  import typing as t
7
7
 
8
8
  from sqlglot import exp as expression
9
+ from sqlglot.dialects.dialect import build_formatted_time
9
10
  from sqlglot.helper import ensure_list
10
11
  from sqlglot.helper import flatten as _flatten
11
12
 
12
13
  from sqlframe.base.column import Column
13
- from sqlframe.base.util import get_func_from_session
14
+ from sqlframe.base.util import (
15
+ format_time_from_spark,
16
+ get_func_from_session,
17
+ spark_default_time_format,
18
+ )
14
19
 
15
20
  if t.TYPE_CHECKING:
16
21
  from sqlframe.base._typing import ColumnOrLiteral, ColumnOrName
@@ -715,14 +720,10 @@ def months_between_cast_as_date_cast_roundoff(
715
720
 
716
721
 
717
722
  def from_unixtime_from_timestamp(col: ColumnOrName, format: t.Optional[str] = None) -> Column:
718
- from sqlframe.base.session import _BaseSession
719
-
720
- session: _BaseSession = _BaseSession()
721
723
  lit = get_func_from_session("lit")
722
724
  col_func = get_func_from_session("col")
723
725
 
724
- if format is None:
725
- format = session.DEFAULT_TIME_FORMAT
726
+ format = lit(format or spark_default_time_format())
726
727
  return Column.invoke_expression_over_column(
727
728
  Column(
728
729
  expression.Anonymous(
@@ -731,7 +732,7 @@ def from_unixtime_from_timestamp(col: ColumnOrName, format: t.Optional[str] = No
731
732
  )
732
733
  ),
733
734
  expression.TimeToStr,
734
- format=lit(format),
735
+ format=format_time_from_spark(format), # type: ignore
735
736
  )
736
737
 
737
738
 
@@ -1135,13 +1136,11 @@ def array_intersect_using_intersection(col1: ColumnOrName, col2: ColumnOrName) -
1135
1136
  def element_at_using_brackets(col: ColumnOrName, value: ColumnOrLiteral) -> Column:
1136
1137
  col_func = get_func_from_session("col")
1137
1138
  lit = get_func_from_session("lit")
1138
- # SQLGlot will auto add 1 to whatever we pass in for the brackets even though the value is already 1 based.
1139
- if not isinstance(value, int):
1140
- raise ValueError("This dialect requires the value must be an integer")
1141
- value_lit = lit(value - 1)
1142
- return Column(
1143
- expression.Bracket(this=col_func(col).expression, expressions=[value_lit.expression])
1144
- )
1139
+ # SQLGlot will auto add 1 to whatever we pass in for the brackets even though the value is already 1 based.
1140
+ value = value if isinstance(value, Column) else lit(value)
1141
+ if [x for x in value.expression.find_all(expression.Literal) if x.is_number]:
1142
+ value = value - lit(1)
1143
+ return Column(expression.Bracket(this=col_func(col).expression, expressions=[value.expression])) # type: ignore
1145
1144
 
1146
1145
 
1147
1146
  def array_remove_using_filter(col: ColumnOrName, value: ColumnOrLiteral) -> Column:
@@ -1513,10 +1512,10 @@ def to_unix_timestamp_include_default_format(
1513
1512
  format: t.Optional[ColumnOrName] = None,
1514
1513
  ) -> Column:
1515
1514
  from sqlframe.base.functions import to_unix_timestamp
1516
-
1517
- lit = get_func_from_session("lit")
1515
+ from sqlframe.base.session import _BaseSession
1518
1516
 
1519
1517
  if not format:
1520
- format = lit("%Y-%m-%d %H:%M:%S")
1521
-
1518
+ format = _BaseSession().output_dialect.TIME_FORMAT
1519
+ else:
1520
+ format = format_time_from_spark(format)
1522
1521
  return to_unix_timestamp(timestamp, format)
@@ -6,12 +6,14 @@ import decimal
6
6
  import logging
7
7
  import typing as t
8
8
 
9
+ from sqlglot import Dialect
9
10
  from sqlglot import exp as expression
10
11
  from sqlglot.helper import ensure_list
11
12
  from sqlglot.helper import flatten as _flatten
12
13
 
13
14
  from sqlframe.base.column import Column
14
15
  from sqlframe.base.decorators import func_metadata as meta
16
+ from sqlframe.base.util import format_time_from_spark, spark_default_time_format
15
17
 
16
18
  if t.TYPE_CHECKING:
17
19
  from pyspark.sql.session import SparkContext
@@ -695,7 +697,7 @@ def date_format(col: ColumnOrName, format: str) -> Column:
695
697
  return Column.invoke_expression_over_column(
696
698
  Column(expression.TimeStrToTime(this=Column.ensure_col(col).expression)),
697
699
  expression.TimeToStr,
698
- format=lit(format),
700
+ format=format_time_from_spark(format),
699
701
  )
700
702
 
701
703
 
@@ -875,17 +877,21 @@ def months_between(
875
877
 
876
878
  @meta()
877
879
  def to_date(col: ColumnOrName, format: t.Optional[str] = None) -> Column:
880
+ format = lit(format or spark_default_time_format())
878
881
  if format is not None:
879
882
  return Column.invoke_expression_over_column(
880
- col, expression.TsOrDsToDate, format=lit(format)
883
+ col, expression.TsOrDsToDate, format=format_time_from_spark(format)
881
884
  )
882
885
  return Column.invoke_expression_over_column(col, expression.TsOrDsToDate)
883
886
 
884
887
 
885
888
  @meta()
886
889
  def to_timestamp(col: ColumnOrName, format: t.Optional[str] = None) -> Column:
890
+ format = lit(format or spark_default_time_format())
887
891
  if format is not None:
888
- return Column.invoke_expression_over_column(col, expression.StrToTime, format=lit(format))
892
+ return Column.invoke_expression_over_column(
893
+ col, expression.StrToTime, format=format_time_from_spark(format)
894
+ )
889
895
 
890
896
  return Column.ensure_col(col).cast("timestamp")
891
897
 
@@ -916,23 +922,23 @@ def last_day(col: ColumnOrName) -> Column:
916
922
 
917
923
  @meta()
918
924
  def from_unixtime(col: ColumnOrName, format: t.Optional[str] = None) -> Column:
919
- from sqlframe.base.session import _BaseSession
920
-
921
- if format is None:
922
- format = _BaseSession().DEFAULT_TIME_FORMAT
923
- return Column.invoke_expression_over_column(col, expression.UnixToStr, format=lit(format))
925
+ format = lit(format or spark_default_time_format())
926
+ return Column.invoke_expression_over_column(
927
+ col,
928
+ expression.UnixToStr,
929
+ format=format_time_from_spark(format), # type: ignore
930
+ )
924
931
 
925
932
 
926
933
  @meta()
927
934
  def unix_timestamp(
928
935
  timestamp: t.Optional[ColumnOrName] = None, format: t.Optional[str] = None
929
936
  ) -> Column:
930
- from sqlframe.base.session import _BaseSession
931
-
932
- if format is None:
933
- format = _BaseSession().DEFAULT_TIME_FORMAT
937
+ format = lit(format or spark_default_time_format())
934
938
  return Column.invoke_expression_over_column(
935
- timestamp, expression.StrToUnix, format=lit(format)
939
+ timestamp,
940
+ expression.StrToUnix,
941
+ format=format_time_from_spark(format), # type: ignore
936
942
  ).cast("bigint")
937
943
 
938
944
 
@@ -1923,7 +1929,9 @@ def call_function(funcName: str, *cols: ColumnOrName) -> Column:
1923
1929
  cols = ensure_list(cols) # type: ignore
1924
1930
  if len(cols) > 1:
1925
1931
  return Column.invoke_anonymous_function(cols[0], funcName, *cols[1:])
1926
- return Column.invoke_anonymous_function(cols[0], funcName)
1932
+ elif len(cols) == 1:
1933
+ return Column.invoke_anonymous_function(cols[0], funcName)
1934
+ return Column.invoke_anonymous_function(None, funcName)
1927
1935
 
1928
1936
 
1929
1937
  # @meta(unsupported_engines="*")
@@ -2028,7 +2036,7 @@ def character_length(str: ColumnOrName) -> Column:
2028
2036
  return Column.invoke_anonymous_function(str, "character_length")
2029
2037
 
2030
2038
 
2031
- @meta(unsupported_engines="*")
2039
+ @meta()
2032
2040
  def contains(left: ColumnOrName, right: ColumnOrName) -> Column:
2033
2041
  return Column.invoke_anonymous_function(left, "contains", right)
2034
2042
 
@@ -5104,8 +5112,11 @@ def to_unix_timestamp(
5104
5112
  [Row(r=None)]
5105
5113
  >>> spark.conf.unset("spark.sql.session.timeZone")
5106
5114
  """
5115
+ format = lit(spark_default_time_format()) if format is None else format
5107
5116
  if format is not None:
5108
- return Column.invoke_expression_over_column(timestamp, expression.StrToUnix, format=format)
5117
+ return Column.invoke_expression_over_column(
5118
+ timestamp, expression.StrToUnix, format=format_time_from_spark(format)
5119
+ )
5109
5120
  else:
5110
5121
  return Column.invoke_expression_over_column(timestamp, expression.StrToUnix)
5111
5122
 
@@ -5322,7 +5333,7 @@ def ucase(str: ColumnOrName) -> Column:
5322
5333
  return Column.invoke_expression_over_column(str, expression.Upper)
5323
5334
 
5324
5335
 
5325
- @meta()
5336
+ @meta(unsupported_engines=["bigquery", "snowflake"])
5326
5337
  def unix_date(col: ColumnOrName) -> Column:
5327
5338
  """Returns the number of days since 1970-01-01.
5328
5339
 
@@ -72,7 +72,6 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, CONN]):
72
72
  _df: t.Type[DF]
73
73
 
74
74
  SANITIZE_COLUMN_NAMES = False
75
- DEFAULT_TIME_FORMAT = "yyyy-MM-dd HH:mm:ss"
76
75
 
77
76
  def __init__(
78
77
  self,
@@ -114,6 +113,10 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, CONN]):
114
113
  def _cur(self) -> DBAPICursorWithPandas:
115
114
  return self._conn.cursor()
116
115
 
116
+ @property
117
+ def default_time_format(self) -> str:
118
+ return self.output_dialect.TIME_FORMAT.strip("'")
119
+
117
120
  def _sanitize_column_name(self, name: str) -> str:
118
121
  if self.SANITIZE_COLUMN_NAMES:
119
122
  return name.replace("(", "_").replace(")", "_")
@@ -416,3 +416,13 @@ class Row(tuple):
416
416
  )
417
417
  else:
418
418
  return "<Row(%s)>" % ", ".join(repr(field) for field in self)
419
+
420
+ # SQLFrame Specific
421
+ @property
422
+ def _unique_field_names(self) -> t.List[str]:
423
+ fields = []
424
+ for i, field in enumerate(self.__fields__):
425
+ if field in fields:
426
+ field = field + "_" + str(i)
427
+ fields.append(field)
428
+ return fields
@@ -13,7 +13,12 @@ if t.TYPE_CHECKING:
13
13
  from pyspark.sql.dataframe import SparkSession as PySparkSession
14
14
 
15
15
  from sqlframe.base import types
16
- from sqlframe.base._typing import OptionalPrimitiveType, SchemaInput
16
+ from sqlframe.base._typing import (
17
+ ColumnOrLiteral,
18
+ OptionalPrimitiveType,
19
+ SchemaInput,
20
+ )
21
+ from sqlframe.base.column import Column
17
22
  from sqlframe.base.session import _BaseSession
18
23
  from sqlframe.base.types import StructType
19
24
 
@@ -342,3 +347,21 @@ def sqlglot_to_spark(sqlglot_dtype: exp.DataType) -> types.DataType:
342
347
  ]
343
348
  )
344
349
  raise NotImplementedError(f"Unsupported data type: {sqlglot_dtype}")
350
+
351
+
352
+ def format_time_from_spark(value: ColumnOrLiteral) -> Column:
353
+ from sqlframe.base.column import Column
354
+ from sqlframe.base.session import _BaseSession
355
+
356
+ lit = get_func_from_session("lit")
357
+ value = lit(value) if not isinstance(value, Column) else value
358
+ formatted_time = Dialect["spark"].format_time(value.expression)
359
+ return Column(
360
+ _BaseSession()
361
+ .output_dialect.generator()
362
+ .format_time(exp.StrToTime(this=exp.Null(), format=formatted_time))
363
+ )
364
+
365
+
366
+ def spark_default_time_format() -> str:
367
+ return Dialect["spark"].TIME_FORMAT.strip("'")
@@ -7,7 +7,11 @@ import typing as t
7
7
  from sqlglot import exp as sqlglot_expression
8
8
 
9
9
  import sqlframe.base.functions
10
- from sqlframe.base.util import get_func_from_session
10
+ from sqlframe.base.util import (
11
+ format_time_from_spark,
12
+ get_func_from_session,
13
+ spark_default_time_format,
14
+ )
11
15
  from sqlframe.bigquery.column import Column
12
16
 
13
17
  if t.TYPE_CHECKING:
@@ -52,6 +56,7 @@ from sqlframe.base.function_alternatives import ( # noqa
52
56
  make_date_from_date_func as make_date,
53
57
  to_date_from_timestamp as to_date,
54
58
  last_day_with_cast as last_day,
59
+ sha1_force_sha1_and_to_hex as sha,
55
60
  sha1_force_sha1_and_to_hex as sha1,
56
61
  hash_from_farm_fingerprint as hash,
57
62
  base64_from_blob as base64,
@@ -147,23 +152,15 @@ def from_unixtime(col: ColumnOrName, format: t.Optional[str] = None) -> Column:
147
152
 
148
153
  session: _BaseSession = _BaseSession()
149
154
  lit = get_func_from_session("lit")
150
- to_timestamp = get_func_from_session("to_timestamp")
151
155
 
152
156
  expressions = [Column.ensure_col(col).expression]
153
- if format is not None:
154
- expressions.append(lit(format).expression)
155
157
  return Column(
156
158
  sqlglot_expression.Anonymous(
157
159
  this="FORMAT_TIMESTAMP",
158
160
  expressions=[
159
- lit(session.DEFAULT_TIME_FORMAT).expression,
160
- to_timestamp(
161
- Column(
162
- sqlglot_expression.Anonymous(
163
- this="TIMESTAMP_SECONDS", expressions=expressions
164
- )
165
- ),
166
- format,
161
+ lit(session.default_time_format).expression,
162
+ Column(
163
+ sqlglot_expression.Anonymous(this="TIMESTAMP_SECONDS", expressions=expressions)
167
164
  ).expression,
168
165
  ],
169
166
  )
@@ -173,12 +170,9 @@ def from_unixtime(col: ColumnOrName, format: t.Optional[str] = None) -> Column:
173
170
  def unix_timestamp(
174
171
  timestamp: t.Optional[ColumnOrName] = None, format: t.Optional[str] = None
175
172
  ) -> Column:
176
- from sqlframe.base.session import _BaseSession
177
-
178
173
  lit = get_func_from_session("lit")
179
174
 
180
- if format is None:
181
- format = _BaseSession().DEFAULT_TIME_FORMAT
175
+ format = lit(format or spark_default_time_format())
182
176
  return Column(
183
177
  sqlglot_expression.Anonymous(
184
178
  this="UNIX_SECONDS",
@@ -186,7 +180,7 @@ def unix_timestamp(
186
180
  sqlglot_expression.Anonymous(
187
181
  this="PARSE_TIMESTAMP",
188
182
  expressions=[
189
- lit(format).expression,
183
+ format_time_from_spark(format).expression,
190
184
  Column.ensure_col(timestamp).expression,
191
185
  lit("UTC").expression,
192
186
  ],
@@ -267,7 +267,6 @@ from sqlframe.base.functions import trunc as trunc
267
267
  from sqlframe.base.functions import ucase as ucase
268
268
  from sqlframe.base.functions import unbase64 as unbase64
269
269
  from sqlframe.base.functions import unhex as unhex
270
- from sqlframe.base.functions import unix_date as unix_date
271
270
  from sqlframe.base.functions import upper as upper
272
271
  from sqlframe.base.functions import user as user
273
272
  from sqlframe.base.functions import var_pop as var_pop
@@ -32,7 +32,6 @@ class BigQuerySession(
32
32
  _writer = BigQueryDataFrameWriter
33
33
  _df = BigQueryDataFrame
34
34
 
35
- DEFAULT_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
36
35
  QUALIFY_INFO_SCHEMA_WITH_DATABASE = True
37
36
  SANITIZE_COLUMN_NAMES = True
38
37
 
@@ -46,5 +46,4 @@ from sqlframe.base.function_alternatives import ( # noqa
46
46
  array_max_from_sort as array_max,
47
47
  sequence_from_generate_series as sequence,
48
48
  try_element_at_zero_based as try_element_at,
49
- to_unix_timestamp_include_default_format as to_unix_timestamp,
50
49
  )
@@ -33,8 +33,6 @@ class DuckDBSession(
33
33
  _writer = DuckDBDataFrameWriter
34
34
  _df = DuckDBDataFrame
35
35
 
36
- DEFAULT_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
37
-
38
36
  def __init__(self, conn: t.Optional[DuckDBPyConnection] = None, *args, **kwargs):
39
37
  import duckdb
40
38
  from duckdb.typing import VARCHAR
@@ -34,8 +34,6 @@ class PostgresSession(
34
34
  _writer = PostgresDataFrameWriter
35
35
  _df = PostgresDataFrame
36
36
 
37
- DEFAULT_TIME_FORMAT = "yyyy-MM-dd HH:MI:SS"
38
-
39
37
  def __init__(self, conn: t.Optional[psycopg2_connection] = None):
40
38
  if not hasattr(self, "_conn"):
41
39
  super().__init__(conn)
@@ -207,7 +207,6 @@ from sqlframe.base.functions import (
207
207
  trim as trim,
208
208
  trunc as trunc,
209
209
  ucase as ucase,
210
- unix_date as unix_date,
211
210
  upper as upper,
212
211
  user as user,
213
212
  var_pop as var_pop,
@@ -57,8 +57,6 @@ class SnowflakeSession(
57
57
  _writer = SnowflakeDataFrameWriter
58
58
  _df = SnowflakeDataFrame
59
59
 
60
- DEFAULT_TIME_FORMAT = "YYYY-MM-DD HH:MI:SS"
61
-
62
60
  def __init__(self, conn: t.Optional[SnowflakeConnection] = None):
63
61
  import snowflake
64
62
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 1.11.0
3
+ Version: 1.13.0
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman