sqlframe 2.1.0__tar.gz → 2.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. {sqlframe-2.1.0 → sqlframe-2.2.0}/PKG-INFO +2 -2
  2. {sqlframe-2.1.0 → sqlframe-2.2.0}/README.md +1 -1
  3. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/add_chatgpt_support.md +1 -1
  4. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/sqlframe_universal_dataframe_api.md +1 -1
  5. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/bigquery.md +4 -2
  6. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/configuration.md +1 -1
  7. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/duckdb.md +2 -0
  8. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/postgres.md +2 -0
  9. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/snowflake.md +2 -0
  10. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/spark.md +2 -0
  11. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/standalone.md +2 -0
  12. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/_version.py +2 -2
  13. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/dataframe.py +8 -0
  14. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe.egg-info/PKG-INFO +2 -2
  15. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/test_dataframe.py +11 -0
  16. {sqlframe-2.1.0 → sqlframe-2.2.0}/.github/CODEOWNERS +0 -0
  17. {sqlframe-2.1.0 → sqlframe-2.2.0}/.github/workflows/main.workflow.yaml +0 -0
  18. {sqlframe-2.1.0 → sqlframe-2.2.0}/.github/workflows/publish.workflow.yaml +0 -0
  19. {sqlframe-2.1.0 → sqlframe-2.2.0}/.gitignore +0 -0
  20. {sqlframe-2.1.0 → sqlframe-2.2.0}/.pre-commit-config.yaml +0 -0
  21. {sqlframe-2.1.0 → sqlframe-2.2.0}/.readthedocs.yaml +0 -0
  22. {sqlframe-2.1.0 → sqlframe-2.2.0}/LICENSE +0 -0
  23. {sqlframe-2.1.0 → sqlframe-2.2.0}/Makefile +0 -0
  24. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/add_chatgpt_support/adding_ai_to_meal.jpeg +0 -0
  25. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/add_chatgpt_support/hype_train.gif +0 -0
  26. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/add_chatgpt_support/marvin_paranoid_robot.gif +0 -0
  27. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/add_chatgpt_support/nonsense_sql.png +0 -0
  28. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/add_chatgpt_support/openai_full_rewrite.png +0 -0
  29. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/add_chatgpt_support/openai_replacing_cte_names.png +0 -0
  30. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/add_chatgpt_support/sqlglot_optimized_code.png +0 -0
  31. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/add_chatgpt_support/sunny_shake_head_no.gif +0 -0
  32. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/but_wait_theres_more.gif +0 -0
  33. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/cake.gif +0 -0
  34. {sqlframe-2.1.0 → sqlframe-2.2.0}/blogs/images/you_get_pyspark_api.gif +0 -0
  35. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/docs/bigquery.md +0 -0
  36. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/docs/duckdb.md +0 -0
  37. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/docs/images/SF.png +0 -0
  38. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/docs/images/favicon.png +0 -0
  39. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/docs/images/favicon_old.png +0 -0
  40. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/docs/images/sqlframe_diagram.png +0 -0
  41. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/docs/images/sqlframe_logo.png +0 -0
  42. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/docs/postgres.md +0 -0
  43. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/images/SF.png +0 -0
  44. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/images/favicon.png +0 -0
  45. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/images/favicon_old.png +0 -0
  46. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/images/sqlframe_diagram.png +0 -0
  47. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/images/sqlframe_logo.png +0 -0
  48. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/index.md +0 -0
  49. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/requirements.txt +0 -0
  50. {sqlframe-2.1.0 → sqlframe-2.2.0}/docs/stylesheets/extra.css +0 -0
  51. {sqlframe-2.1.0 → sqlframe-2.2.0}/mkdocs.yml +0 -0
  52. {sqlframe-2.1.0 → sqlframe-2.2.0}/pytest.ini +0 -0
  53. {sqlframe-2.1.0 → sqlframe-2.2.0}/renovate.json +0 -0
  54. {sqlframe-2.1.0 → sqlframe-2.2.0}/setup.cfg +0 -0
  55. {sqlframe-2.1.0 → sqlframe-2.2.0}/setup.py +0 -0
  56. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/LICENSE +0 -0
  57. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/__init__.py +0 -0
  58. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/__init__.py +0 -0
  59. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/_typing.py +0 -0
  60. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/catalog.py +0 -0
  61. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/column.py +0 -0
  62. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/decorators.py +0 -0
  63. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/exceptions.py +0 -0
  64. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/function_alternatives.py +0 -0
  65. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/functions.py +0 -0
  66. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/group.py +0 -0
  67. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/mixins/__init__.py +0 -0
  68. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/mixins/catalog_mixins.py +0 -0
  69. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/mixins/dataframe_mixins.py +0 -0
  70. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/mixins/readwriter_mixins.py +0 -0
  71. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/normalize.py +0 -0
  72. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/operations.py +0 -0
  73. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/readerwriter.py +0 -0
  74. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/session.py +0 -0
  75. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/transforms.py +0 -0
  76. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/types.py +0 -0
  77. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/udf.py +0 -0
  78. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/util.py +0 -0
  79. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/base/window.py +0 -0
  80. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/__init__.py +0 -0
  81. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/catalog.py +0 -0
  82. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/column.py +0 -0
  83. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/dataframe.py +0 -0
  84. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/functions.py +0 -0
  85. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/functions.pyi +0 -0
  86. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/group.py +0 -0
  87. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/readwriter.py +0 -0
  88. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/session.py +0 -0
  89. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/types.py +0 -0
  90. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/udf.py +0 -0
  91. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/bigquery/window.py +0 -0
  92. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/__init__.py +0 -0
  93. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/catalog.py +0 -0
  94. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/column.py +0 -0
  95. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/dataframe.py +0 -0
  96. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/functions.py +0 -0
  97. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/functions.pyi +0 -0
  98. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/group.py +0 -0
  99. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/readwriter.py +0 -0
  100. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/session.py +0 -0
  101. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/types.py +0 -0
  102. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/udf.py +0 -0
  103. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/duckdb/window.py +0 -0
  104. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/__init__.py +0 -0
  105. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/catalog.py +0 -0
  106. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/column.py +0 -0
  107. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/dataframe.py +0 -0
  108. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/functions.py +0 -0
  109. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/functions.pyi +0 -0
  110. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/group.py +0 -0
  111. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/readwriter.py +0 -0
  112. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/session.py +0 -0
  113. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/types.py +0 -0
  114. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/udf.py +0 -0
  115. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/postgres/window.py +0 -0
  116. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/__init__.py +0 -0
  117. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/catalog.py +0 -0
  118. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/column.py +0 -0
  119. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/dataframe.py +0 -0
  120. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/functions.py +0 -0
  121. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/group.py +0 -0
  122. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/readwriter.py +0 -0
  123. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/session.py +0 -0
  124. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/types.py +0 -0
  125. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/udf.py +0 -0
  126. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/redshift/window.py +0 -0
  127. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/__init__.py +0 -0
  128. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/catalog.py +0 -0
  129. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/column.py +0 -0
  130. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/dataframe.py +0 -0
  131. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/functions.py +0 -0
  132. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/functions.pyi +0 -0
  133. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/group.py +0 -0
  134. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/readwriter.py +0 -0
  135. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/session.py +0 -0
  136. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/types.py +0 -0
  137. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/udf.py +0 -0
  138. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/snowflake/window.py +0 -0
  139. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/__init__.py +0 -0
  140. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/catalog.py +0 -0
  141. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/column.py +0 -0
  142. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/dataframe.py +0 -0
  143. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/functions.py +0 -0
  144. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/functions.pyi +0 -0
  145. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/group.py +0 -0
  146. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/readwriter.py +0 -0
  147. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/session.py +0 -0
  148. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/types.py +0 -0
  149. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/udf.py +0 -0
  150. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/spark/window.py +0 -0
  151. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/__init__.py +0 -0
  152. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/catalog.py +0 -0
  153. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/column.py +0 -0
  154. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/dataframe.py +0 -0
  155. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/functions.py +0 -0
  156. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/group.py +0 -0
  157. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/readwriter.py +0 -0
  158. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/session.py +0 -0
  159. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/types.py +0 -0
  160. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/udf.py +0 -0
  161. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/standalone/window.py +0 -0
  162. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/testing/__init__.py +0 -0
  163. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe/testing/utils.py +0 -0
  164. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe.egg-info/SOURCES.txt +0 -0
  165. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe.egg-info/dependency_links.txt +0 -0
  166. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe.egg-info/requires.txt +0 -0
  167. {sqlframe-2.1.0 → sqlframe-2.2.0}/sqlframe.egg-info/top_level.txt +0 -0
  168. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/__init__.py +0 -0
  169. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/common_fixtures.py +0 -0
  170. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/conftest.py +0 -0
  171. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/fixtures/employee.csv +0 -0
  172. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/fixtures/employee.json +0 -0
  173. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/fixtures/employee.parquet +0 -0
  174. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/fixtures/employee_extra_line.csv +0 -0
  175. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/__init__.py +0 -0
  176. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/__init__.py +0 -0
  177. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/bigquery/__init__.py +0 -0
  178. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/bigquery/test_bigquery_catalog.py +0 -0
  179. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/bigquery/test_bigquery_dataframe.py +0 -0
  180. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/bigquery/test_bigquery_session.py +0 -0
  181. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/duck/__init__.py +0 -0
  182. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/duck/test_duckdb_catalog.py +0 -0
  183. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/duck/test_duckdb_dataframe.py +0 -0
  184. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/duck/test_duckdb_reader.py +0 -0
  185. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/duck/test_duckdb_session.py +0 -0
  186. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/duck/test_duckdb_udf.py +0 -0
  187. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/postgres/__init__.py +0 -0
  188. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/postgres/test_postgres_catalog.py +0 -0
  189. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/postgres/test_postgres_dataframe.py +0 -0
  190. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/postgres/test_postgres_session.py +0 -0
  191. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/redshift/__init__.py +0 -0
  192. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/redshift/test_redshift_catalog.py +0 -0
  193. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/redshift/test_redshift_session.py +0 -0
  194. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/snowflake/__init__.py +0 -0
  195. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/snowflake/test_snowflake_catalog.py +0 -0
  196. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/snowflake/test_snowflake_dataframe.py +0 -0
  197. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/snowflake/test_snowflake_session.py +0 -0
  198. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/spark/__init__.py +0 -0
  199. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/spark/test_spark_catalog.py +0 -0
  200. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/spark/test_spark_dataframe.py +0 -0
  201. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/test_engine_column.py +0 -0
  202. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/test_engine_dataframe.py +0 -0
  203. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/test_engine_reader.py +0 -0
  204. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/test_engine_session.py +0 -0
  205. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/test_engine_writer.py +0 -0
  206. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/test_int_functions.py +0 -0
  207. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/engines/test_int_testing.py +0 -0
  208. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/fixtures.py +0 -0
  209. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/test_int_dataframe.py +0 -0
  210. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/test_int_dataframe_stats.py +0 -0
  211. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/test_int_grouped_data.py +0 -0
  212. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/integration/test_int_session.py +0 -0
  213. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/types.py +0 -0
  214. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/__init__.py +0 -0
  215. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/__init__.py +0 -0
  216. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/fixtures.py +0 -0
  217. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/test_column.py +0 -0
  218. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/test_dataframe_writer.py +0 -0
  219. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/test_functions.py +0 -0
  220. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/test_session.py +0 -0
  221. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/test_session_case_sensitivity.py +0 -0
  222. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/test_types.py +0 -0
  223. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/standalone/test_window.py +0 -0
  224. {sqlframe-2.1.0 → sqlframe-2.2.0}/tests/unit/test_util.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 2.1.0
3
+ Version: 2.2.0
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -90,7 +90,7 @@ from sqlframe.bigquery import functions as F
90
90
  from sqlframe.bigquery import Window
91
91
 
92
92
  session = BigQuerySession()
93
- table_path = "bigquery-public-data.samples.natality"
93
+ table_path = '"bigquery-public-data".samples.natality'
94
94
  # Top 5 years with the greatest year-over-year % change in new families with single child
95
95
  df = (
96
96
  session.table(table_path)
@@ -60,7 +60,7 @@ from sqlframe.bigquery import functions as F
60
60
  from sqlframe.bigquery import Window
61
61
 
62
62
  session = BigQuerySession()
63
- table_path = "bigquery-public-data.samples.natality"
63
+ table_path = '"bigquery-public-data".samples.natality'
64
64
  # Top 5 years with the greatest year-over-year % change in new families with single child
65
65
  df = (
66
66
  session.table(table_path)
@@ -47,7 +47,7 @@ from sqlframe.bigquery import functions as F
47
47
  from sqlframe.bigquery import Window
48
48
 
49
49
  session = BigQuerySession()
50
- table_path = "bigquery-public-data.samples.natality"
50
+ table_path = '"bigquery-public-data".samples.natality'
51
51
  # Top 5 years with the greatest year-over-year % change in new families with single child
52
52
  df = (
53
53
  session.table(table_path)
@@ -39,7 +39,7 @@ from sqlframe.bigquery import Window
39
39
 
40
40
  # Unique to SQLFrame: Ability to connect directly to BigQuery
41
41
  session = BigQuerySession()
42
- table_path = "bigquery-public-data.samples.natality"
42
+ table_path = '"bigquery-public-data".samples.natality'
43
43
  # Get the top 5 years with the greatest year-over-year % change in new families with a single child
44
44
  df = (
45
45
  session.table(table_path)
@@ -72,7 +72,7 @@ from sqlframe.bigquery import functions as F
72
72
 
73
73
  session = BigQuerySession(default_dataset="sqlframe.db1")
74
74
  (
75
- session.table("bigquery-public-data.samples.natality")
75
+ session.table('"bigquery-public-data".samples.natality')
76
76
  .select(F.call_function("FARM_FINGERPRINT", F.col("source")).alias("source_hash"))
77
77
  .show()
78
78
  )
@@ -86,7 +86,7 @@ from sqlframe.bigquery import functions as F
86
86
  from sqlframe.bigquery import Window
87
87
 
88
88
  session = BigQuerySession(default_dataset="sqlframe.db1")
89
- table_path = "bigquery-public-data.samples.natality"
89
+ table_path = '"bigquery-public-data".samples.natality'
90
90
  # Get columns in the table
91
91
  print(session.catalog.listColumns(table_path))
92
92
  # Get the top 5 years with the greatest year-over-year % change in new families with a single child
@@ -214,6 +214,8 @@ See something that you would like to see supported? [Open an issue](https://gith
214
214
  * [intersectAll](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.intersectAll.html)
215
215
  * [join](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.join.html)
216
216
  * [limit](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.limit.html)
217
+ * lineage
218
+ * Get lineage for a specific column. [Returns a SQLGlot Node](https://sqlglot.com/sqlglot/lineage.html#Node). Can be used to get lineage SQL or HTML representation.
217
219
  * [na](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.na.html)
218
220
  * [orderBy](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.orderBy.html)
219
221
  * [persist](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.persist.html)
@@ -62,7 +62,7 @@ from sqlframe.bigquery import functions as F
62
62
  from sqlframe.bigquery import Window
63
63
 
64
64
  session = BigQuerySession()
65
- table_path = "bigquery-public-data.samples.natality"
65
+ table_path = '"bigquery-public-data".samples.natality'
66
66
  # Top 5 years with the greatest year-over-year % change in new families with single child
67
67
  df = (
68
68
  session.table(table_path)
@@ -187,6 +187,8 @@ See something that you would like to see supported? [Open an issue](https://gith
187
187
  * [intersectAll](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.intersectAll.html)
188
188
  * [join](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.join.html)
189
189
  * [limit](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.limit.html)
190
+ * lineage
191
+ * Get lineage for a specific column. [Returns a SQLGlot Node](https://sqlglot.com/sqlglot/lineage.html#Node). Can be used to get lineage SQL or HTML representation.
190
192
  * [na](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.na.html)
191
193
  * [orderBy](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.orderBy.html)
192
194
  * [persist](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.persist.html)
@@ -198,6 +198,8 @@ See something that you would like to see supported? [Open an issue](https://gith
198
198
  * [intersectAll](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.intersectAll.html)
199
199
  * [join](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.join.html)
200
200
  * [limit](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.limit.html)
201
+ * lineage
202
+ * Get lineage for a specific column. [Returns a SQLGlot Node](https://sqlglot.com/sqlglot/lineage.html#Node). Can be used to get lineage SQL or HTML representation.
201
203
  * [na](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.na.html)
202
204
  * [orderBy](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.orderBy.html)
203
205
  * [persist](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.persist.html)
@@ -209,6 +209,8 @@ See something that you would like to see supported? [Open an issue](https://gith
209
209
  * [intersectAll](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.intersectAll.html)
210
210
  * [join](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.join.html)
211
211
  * [limit](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.limit.html)
212
+ * lineage
213
+ * Get lineage for a specific column. [Returns a SQLGlot Node](https://sqlglot.com/sqlglot/lineage.html#Node). Can be used to get lineage SQL or HTML representation.
212
214
  * [na](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.na.html)
213
215
  * [orderBy](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.orderBy.html)
214
216
  * [persist](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.persist.html)
@@ -156,6 +156,8 @@ df.show(5)
156
156
  * [intersectAll](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.intersectAll.html)
157
157
  * [join](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.join.html)
158
158
  * [limit](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.limit.html)
159
+ * lineage
160
+ * Get lineage for a specific column. [Returns a SQLGlot Node](https://sqlglot.com/sqlglot/lineage.html#Node). Can be used to get lineage SQL or HTML representation.
159
161
  * [na](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.na.html)
160
162
  * [orderBy](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.orderBy.html)
161
163
  * [persist](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.persist.html)
@@ -133,6 +133,8 @@ See something that you would like to see supported? [Open an issue](https://gith
133
133
  * [intersectAll](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.intersectAll.html)
134
134
  * [join](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.join.html)
135
135
  * [limit](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.limit.html)
136
+ * lineage
137
+ * Get lineage for a specific column. [Returns a SQLGlot Node](https://sqlglot.com/sqlglot/lineage.html#Node). Can be used to get lineage SQL or HTML representation.
136
138
  * [na](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.na.html)
137
139
  * [orderBy](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.orderBy.html)
138
140
  * [persist](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.DataFrame.persist.html)
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '2.1.0'
16
- __version_tuple__ = version_tuple = (2, 1, 0)
15
+ __version__ = version = '2.2.0'
16
+ __version_tuple__ = version_tuple = (2, 2, 0)
@@ -17,6 +17,7 @@ import sqlglot
17
17
  from prettytable import PrettyTable
18
18
  from sqlglot import Dialect
19
19
  from sqlglot import expressions as exp
20
+ from sqlglot import lineage as sqlglot_lineage
20
21
  from sqlglot.helper import ensure_list, flatten, object_to_dict, seq_get
21
22
  from sqlglot.optimizer.pushdown_projections import pushdown_projections
22
23
  from sqlglot.optimizer.qualify import qualify
@@ -1613,6 +1614,13 @@ class _BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
1613
1614
  0,
1614
1615
  )
1615
1616
 
1617
+ def lineage(self, col: ColumnOrName, optimize: bool = True) -> sqlglot_lineage.Node:
1618
+ return sqlglot_lineage.lineage(
1619
+ column=self._ensure_and_normalize_col(col).alias_or_name,
1620
+ sql=self._get_expressions(optimize=optimize)[0],
1621
+ schema=self.session.catalog._schema,
1622
+ )
1623
+
1616
1624
  def toPandas(self) -> pd.DataFrame:
1617
1625
  return self.session._fetchdf(self._get_expressions(optimize=False))
1618
1626
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 2.1.0
3
+ Version: 2.2.0
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -90,7 +90,7 @@ from sqlframe.bigquery import functions as F
90
90
  from sqlframe.bigquery import Window
91
91
 
92
92
  session = BigQuerySession()
93
- table_path = "bigquery-public-data.samples.natality"
93
+ table_path = '"bigquery-public-data".samples.natality'
94
94
  # Top 5 years with the greatest year-over-year % change in new families with single child
95
95
  df = (
96
96
  session.table(table_path)
@@ -137,3 +137,14 @@ def test_expand_star_table_alias(standalone_employee: StandaloneDataFrame):
137
137
  standalone_employee.alias("blah").select("blah.*").sql(pretty=False, optimize=False)
138
138
  == "WITH `t51718876` AS (SELECT CAST(`employee_id` AS INT) AS `employee_id`, CAST(`fname` AS STRING) AS `fname`, CAST(`lname` AS STRING) AS `lname`, CAST(`age` AS INT) AS `age`, CAST(`store_id` AS INT) AS `store_id` FROM VALUES (1, 'Jack', 'Shephard', 37, 1), (2, 'John', 'Locke', 65, 1), (3, 'Kate', 'Austen', 37, 2), (4, 'Claire', 'Littleton', 27, 2), (5, 'Hugo', 'Reyes', 29, 100) AS `a1`(`employee_id`, `fname`, `lname`, `age`, `store_id`)), `t37842204` AS (SELECT `employee_id`, `fname`, `lname`, `age`, `store_id` FROM `t51718876`) SELECT `t37842204`.`employee_id`, `t37842204`.`fname`, `t37842204`.`lname`, `t37842204`.`age`, `t37842204`.`store_id` FROM `t37842204`"
139
139
  )
140
+
141
+
142
+ def test_lineage(standalone_employee: StandaloneDataFrame):
143
+ assert (
144
+ standalone_employee.lineage("age").source.sql()
145
+ == "SELECT a1.age AS age FROM (VALUES (1, 'Jack', 'Shephard', 37, 1), (2, 'John', 'Locke', 65, 1), (3, 'Kate', 'Austen', 37, 2), (4, 'Claire', 'Littleton', 27, 2), (5, 'Hugo', 'Reyes', 29, 100)) AS a1(employee_id, fname, lname, age, store_id)"
146
+ )
147
+ assert (
148
+ standalone_employee.session.sql("SELECT * FROM employee").lineage("age").source.sql()
149
+ == "SELECT employee.age AS age FROM employee AS employee"
150
+ )
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes