sqlframe 3.26.0__tar.gz → 3.27.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (397) hide show
  1. {sqlframe-3.26.0 → sqlframe-3.27.1}/PKG-INFO +1 -1
  2. sqlframe-3.27.1/docs/docs/images/SF.png +0 -0
  3. sqlframe-3.27.1/docs/docs/images/favicon.png +0 -0
  4. sqlframe-3.27.1/docs/docs/images/sqlframe_logo.png +0 -0
  5. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/duckdb.md +1 -0
  6. sqlframe-3.27.1/docs/images/SF.png +0 -0
  7. sqlframe-3.27.1/docs/images/favicon.png +0 -0
  8. sqlframe-3.27.1/docs/images/sqlframe_logo.png +0 -0
  9. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/_version.py +2 -2
  10. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/functions.py +1 -1
  11. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/readerwriter.py +91 -3
  12. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/session.py +15 -16
  13. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/readwriter.py +7 -3
  14. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/functions.pyi +1 -0
  15. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/readwriter.py +8 -5
  16. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/readwriter.py +10 -4
  17. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe.egg-info/PKG-INFO +1 -1
  18. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe.egg-info/SOURCES.txt +3 -4
  19. sqlframe-3.27.1/tests/unit/duck/test_reader_options.py +116 -0
  20. sqlframe-3.27.1/tests/unit/spark/test_reader_options.py +152 -0
  21. sqlframe-3.27.1/tests/unit/test_base_reader_options.py +136 -0
  22. sqlframe-3.26.0/docs/docs/images/SF.png +0 -0
  23. sqlframe-3.26.0/docs/docs/images/favicon.png +0 -0
  24. sqlframe-3.26.0/docs/docs/images/favicon_old.png +0 -0
  25. sqlframe-3.26.0/docs/docs/images/sqlframe_diagram.png +0 -0
  26. sqlframe-3.26.0/docs/docs/images/sqlframe_logo.png +0 -0
  27. sqlframe-3.26.0/docs/images/SF.png +0 -0
  28. sqlframe-3.26.0/docs/images/favicon.png +0 -0
  29. sqlframe-3.26.0/docs/images/favicon_old.png +0 -0
  30. sqlframe-3.26.0/docs/images/sqlframe_diagram.png +0 -0
  31. sqlframe-3.26.0/docs/images/sqlframe_logo.png +0 -0
  32. {sqlframe-3.26.0 → sqlframe-3.27.1}/.github/CODEOWNERS +0 -0
  33. {sqlframe-3.26.0 → sqlframe-3.27.1}/.github/workflows/main.workflow.yaml +0 -0
  34. {sqlframe-3.26.0 → sqlframe-3.27.1}/.github/workflows/publish.workflow.yaml +0 -0
  35. {sqlframe-3.26.0 → sqlframe-3.27.1}/.gitignore +0 -0
  36. {sqlframe-3.26.0 → sqlframe-3.27.1}/.pre-commit-config.yaml +0 -0
  37. {sqlframe-3.26.0 → sqlframe-3.27.1}/.readthedocs.yaml +0 -0
  38. {sqlframe-3.26.0 → sqlframe-3.27.1}/LICENSE +0 -0
  39. {sqlframe-3.26.0 → sqlframe-3.27.1}/Makefile +0 -0
  40. {sqlframe-3.26.0 → sqlframe-3.27.1}/README.md +0 -0
  41. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/add_chatgpt_support.md +0 -0
  42. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/add_chatgpt_support/adding_ai_to_meal.jpeg +0 -0
  43. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/add_chatgpt_support/hype_train.gif +0 -0
  44. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/add_chatgpt_support/marvin_paranoid_robot.gif +0 -0
  45. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/add_chatgpt_support/nonsense_sql.png +0 -0
  46. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/add_chatgpt_support/openai_full_rewrite.png +0 -0
  47. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/add_chatgpt_support/openai_replacing_cte_names.png +0 -0
  48. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/add_chatgpt_support/sqlglot_optimized_code.png +0 -0
  49. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/add_chatgpt_support/sunny_shake_head_no.gif +0 -0
  50. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/but_wait_theres_more.gif +0 -0
  51. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/cake.gif +0 -0
  52. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/images/you_get_pyspark_api.gif +0 -0
  53. {sqlframe-3.26.0 → sqlframe-3.27.1}/blogs/sqlframe_universal_dataframe_api.md +0 -0
  54. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/bigquery.md +0 -0
  55. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/configuration.md +0 -0
  56. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/databricks.md +0 -0
  57. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/docs/bigquery.md +0 -0
  58. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/docs/duckdb.md +0 -0
  59. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/docs/postgres.md +0 -0
  60. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/index.md +0 -0
  61. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/postgres.md +0 -0
  62. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/redshift.md +0 -0
  63. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/requirements.txt +0 -0
  64. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/snowflake.md +0 -0
  65. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/spark.md +0 -0
  66. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/standalone.md +0 -0
  67. {sqlframe-3.26.0 → sqlframe-3.27.1}/docs/stylesheets/extra.css +0 -0
  68. {sqlframe-3.26.0 → sqlframe-3.27.1}/mkdocs.yml +0 -0
  69. {sqlframe-3.26.0 → sqlframe-3.27.1}/pytest.ini +0 -0
  70. {sqlframe-3.26.0 → sqlframe-3.27.1}/renovate.json +0 -0
  71. {sqlframe-3.26.0 → sqlframe-3.27.1}/setup.cfg +0 -0
  72. {sqlframe-3.26.0 → sqlframe-3.27.1}/setup.py +0 -0
  73. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/LICENSE +0 -0
  74. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/__init__.py +0 -0
  75. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/__init__.py +0 -0
  76. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/_typing.py +0 -0
  77. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/catalog.py +0 -0
  78. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/column.py +0 -0
  79. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/dataframe.py +0 -0
  80. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/decorators.py +0 -0
  81. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/exceptions.py +0 -0
  82. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/function_alternatives.py +0 -0
  83. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/group.py +0 -0
  84. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/mixins/__init__.py +0 -0
  85. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/mixins/catalog_mixins.py +0 -0
  86. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/mixins/dataframe_mixins.py +0 -0
  87. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/mixins/readwriter_mixins.py +0 -0
  88. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/mixins/table_mixins.py +0 -0
  89. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/normalize.py +0 -0
  90. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/operations.py +0 -0
  91. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/table.py +0 -0
  92. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/transforms.py +0 -0
  93. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/types.py +0 -0
  94. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/udf.py +0 -0
  95. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/util.py +0 -0
  96. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/base/window.py +0 -0
  97. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/__init__.py +0 -0
  98. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/catalog.py +0 -0
  99. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/column.py +0 -0
  100. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/dataframe.py +0 -0
  101. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/functions.py +0 -0
  102. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/functions.pyi +0 -0
  103. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/group.py +0 -0
  104. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/readwriter.py +0 -0
  105. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/session.py +0 -0
  106. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/table.py +0 -0
  107. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/types.py +0 -0
  108. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/udf.py +0 -0
  109. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/bigquery/window.py +0 -0
  110. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/__init__.py +0 -0
  111. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/catalog.py +0 -0
  112. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/column.py +0 -0
  113. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/dataframe.py +0 -0
  114. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/functions.py +0 -0
  115. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/functions.pyi +0 -0
  116. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/group.py +0 -0
  117. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/session.py +0 -0
  118. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/table.py +0 -0
  119. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/types.py +0 -0
  120. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/udf.py +0 -0
  121. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/databricks/window.py +0 -0
  122. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/__init__.py +0 -0
  123. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/catalog.py +0 -0
  124. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/column.py +0 -0
  125. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/dataframe.py +0 -0
  126. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/functions.py +0 -0
  127. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/group.py +0 -0
  128. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/session.py +0 -0
  129. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/table.py +0 -0
  130. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/types.py +0 -0
  131. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/udf.py +0 -0
  132. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/duckdb/window.py +0 -0
  133. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/__init__.py +0 -0
  134. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/catalog.py +0 -0
  135. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/column.py +0 -0
  136. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/dataframe.py +0 -0
  137. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/functions.py +0 -0
  138. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/functions.pyi +0 -0
  139. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/group.py +0 -0
  140. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/readwriter.py +0 -0
  141. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/session.py +0 -0
  142. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/table.py +0 -0
  143. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/types.py +0 -0
  144. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/udf.py +0 -0
  145. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/postgres/window.py +0 -0
  146. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/py.typed +0 -0
  147. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/__init__.py +0 -0
  148. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/catalog.py +0 -0
  149. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/column.py +0 -0
  150. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/dataframe.py +0 -0
  151. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/functions.py +0 -0
  152. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/group.py +0 -0
  153. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/readwriter.py +0 -0
  154. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/session.py +0 -0
  155. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/table.py +0 -0
  156. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/types.py +0 -0
  157. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/udf.py +0 -0
  158. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/redshift/window.py +0 -0
  159. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/__init__.py +0 -0
  160. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/catalog.py +0 -0
  161. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/column.py +0 -0
  162. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/dataframe.py +0 -0
  163. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/functions.py +0 -0
  164. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/functions.pyi +0 -0
  165. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/group.py +0 -0
  166. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/readwriter.py +0 -0
  167. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/session.py +0 -0
  168. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/table.py +0 -0
  169. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/types.py +0 -0
  170. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/udf.py +0 -0
  171. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/snowflake/window.py +0 -0
  172. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/__init__.py +0 -0
  173. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/catalog.py +0 -0
  174. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/column.py +0 -0
  175. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/dataframe.py +0 -0
  176. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/functions.py +0 -0
  177. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/functions.pyi +0 -0
  178. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/group.py +0 -0
  179. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/session.py +0 -0
  180. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/table.py +0 -0
  181. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/types.py +0 -0
  182. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/udf.py +0 -0
  183. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/spark/window.py +0 -0
  184. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/__init__.py +0 -0
  185. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/catalog.py +0 -0
  186. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/column.py +0 -0
  187. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/dataframe.py +0 -0
  188. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/functions.py +0 -0
  189. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/group.py +0 -0
  190. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/readwriter.py +0 -0
  191. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/session.py +0 -0
  192. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/table.py +0 -0
  193. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/types.py +0 -0
  194. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/udf.py +0 -0
  195. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/standalone/window.py +0 -0
  196. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/testing/__init__.py +0 -0
  197. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe/testing/utils.py +0 -0
  198. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe.egg-info/dependency_links.txt +0 -0
  199. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe.egg-info/requires.txt +0 -0
  200. {sqlframe-3.26.0 → sqlframe-3.27.1}/sqlframe.egg-info/top_level.txt +0 -0
  201. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/__init__.py +0 -0
  202. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/common_fixtures.py +0 -0
  203. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/conftest.py +0 -0
  204. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee.csv +0 -0
  205. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee.json +0 -0
  206. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee.parquet +0 -0
  207. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/.part-00000-e5965c7b-e58f-4d3c-ad56-002876814e3a-c000.snappy.parquet.crc +0 -0
  208. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/.part-00002-3fed7f18-370f-4b16-b232-504d6194eb52-c000.snappy.parquet.crc +0 -0
  209. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/.part-00004-143c5da1-d5ab-4706-8e84-0d2a324c6894-c000.snappy.parquet.crc +0 -0
  210. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/.part-00006-64f07e25-c30e-4075-acc6-b3c69c4ce80b-c000.snappy.parquet.crc +0 -0
  211. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/.part-00008-89ccad8d-df73-4ad5-8850-82ef3884db60-c000.snappy.parquet.crc +0 -0
  212. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/.part-00010-812b3382-8c7f-4c4e-9bcd-09ce8664f6e0-c000.snappy.parquet.crc +0 -0
  213. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/_delta_log/.00000000000000000000.json.crc +0 -0
  214. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/_delta_log/00000000000000000000.json +0 -0
  215. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/part-00000-e5965c7b-e58f-4d3c-ad56-002876814e3a-c000.snappy.parquet +0 -0
  216. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/part-00002-3fed7f18-370f-4b16-b232-504d6194eb52-c000.snappy.parquet +0 -0
  217. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/part-00004-143c5da1-d5ab-4706-8e84-0d2a324c6894-c000.snappy.parquet +0 -0
  218. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/part-00006-64f07e25-c30e-4075-acc6-b3c69c4ce80b-c000.snappy.parquet +0 -0
  219. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/part-00008-89ccad8d-df73-4ad5-8850-82ef3884db60-c000.snappy.parquet +0 -0
  220. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_delta/part-00010-812b3382-8c7f-4c4e-9bcd-09ce8664f6e0-c000.snappy.parquet +0 -0
  221. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/employee_extra_line.csv +0 -0
  222. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/issue_219.csv +0 -0
  223. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds1.sql +0 -0
  224. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds10.sql +0 -0
  225. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds11.sql +0 -0
  226. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds12.sql +0 -0
  227. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds13.sql +0 -0
  228. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds14.sql +0 -0
  229. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds15.sql +0 -0
  230. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds16.sql +0 -0
  231. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds17.sql +0 -0
  232. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds18.sql +0 -0
  233. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds19.sql +0 -0
  234. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds2.sql +0 -0
  235. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds20.sql +0 -0
  236. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds21.sql +0 -0
  237. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds22.sql +0 -0
  238. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds23.sql +0 -0
  239. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds24.sql +0 -0
  240. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds25.sql +0 -0
  241. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds26.sql +0 -0
  242. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds27.sql +0 -0
  243. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds28.sql +0 -0
  244. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds29.sql +0 -0
  245. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds3.sql +0 -0
  246. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds30.sql +0 -0
  247. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds31.sql +0 -0
  248. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds32.sql +0 -0
  249. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds33.sql +0 -0
  250. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds34.sql +0 -0
  251. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds35.sql +0 -0
  252. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds36.sql +0 -0
  253. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds37.sql +0 -0
  254. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds38.sql +0 -0
  255. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds39.sql +0 -0
  256. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds4.sql +0 -0
  257. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds40.sql +0 -0
  258. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds41.sql +0 -0
  259. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds42.sql +0 -0
  260. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds43.sql +0 -0
  261. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds44.sql +0 -0
  262. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds45.sql +0 -0
  263. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds46.sql +0 -0
  264. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds47.sql +0 -0
  265. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds48.sql +0 -0
  266. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds49.sql +0 -0
  267. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds5.sql +0 -0
  268. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds50.sql +0 -0
  269. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds51.sql +0 -0
  270. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds52.sql +0 -0
  271. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds53.sql +0 -0
  272. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds54.sql +0 -0
  273. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds55.sql +0 -0
  274. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds56.sql +0 -0
  275. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds57.sql +0 -0
  276. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds58.sql +0 -0
  277. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds59.sql +0 -0
  278. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds6.sql +0 -0
  279. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds60.sql +0 -0
  280. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds61.sql +0 -0
  281. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds62.sql +0 -0
  282. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds63.sql +0 -0
  283. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds64.sql +0 -0
  284. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds65.sql +0 -0
  285. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds66.sql +0 -0
  286. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds67.sql +0 -0
  287. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds68.sql +0 -0
  288. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds69.sql +0 -0
  289. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds7.sql +0 -0
  290. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds70.sql +0 -0
  291. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds71.sql +0 -0
  292. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds72.sql +0 -0
  293. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds73.sql +0 -0
  294. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds74.sql +0 -0
  295. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds75.sql +0 -0
  296. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds76.sql +0 -0
  297. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds77.sql +0 -0
  298. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds78.sql +0 -0
  299. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds79.sql +0 -0
  300. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds8.sql +0 -0
  301. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds80.sql +0 -0
  302. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds81.sql +0 -0
  303. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds82.sql +0 -0
  304. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds83.sql +0 -0
  305. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds84.sql +0 -0
  306. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds85.sql +0 -0
  307. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds86.sql +0 -0
  308. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds87.sql +0 -0
  309. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds88.sql +0 -0
  310. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds89.sql +0 -0
  311. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds9.sql +0 -0
  312. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds90.sql +0 -0
  313. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds91.sql +0 -0
  314. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds92.sql +0 -0
  315. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds93.sql +0 -0
  316. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds94.sql +0 -0
  317. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds95.sql +0 -0
  318. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds96.sql +0 -0
  319. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds97.sql +0 -0
  320. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds98.sql +0 -0
  321. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/fixtures/tpcds/tpcds99.sql +0 -0
  322. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/__init__.py +0 -0
  323. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/__init__.py +0 -0
  324. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/bigquery/__init__.py +0 -0
  325. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/bigquery/test_bigquery_catalog.py +0 -0
  326. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/bigquery/test_bigquery_dataframe.py +0 -0
  327. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/bigquery/test_bigquery_session.py +0 -0
  328. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/databricks/__init__.py +0 -0
  329. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/databricks/test_databricks_catalog.py +0 -0
  330. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/databricks/test_databricks_dataframe.py +0 -0
  331. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/databricks/test_databricks_session.py +0 -0
  332. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/duck/__init__.py +0 -0
  333. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/duck/test_duckdb_activate.py +0 -0
  334. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/duck/test_duckdb_catalog.py +0 -0
  335. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/duck/test_duckdb_dataframe.py +0 -0
  336. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/duck/test_duckdb_reader.py +0 -0
  337. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/duck/test_duckdb_session.py +0 -0
  338. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/duck/test_duckdb_udf.py +0 -0
  339. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/duck/test_tpcds.py +0 -0
  340. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/postgres/__init__.py +0 -0
  341. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/postgres/test_postgres_activate.py +0 -0
  342. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/postgres/test_postgres_catalog.py +0 -0
  343. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/postgres/test_postgres_dataframe.py +0 -0
  344. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/postgres/test_postgres_session.py +0 -0
  345. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/redshift/__init__.py +0 -0
  346. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/redshift/test_redshift_catalog.py +0 -0
  347. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/redshift/test_redshift_session.py +0 -0
  348. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/snowflake/__init__.py +0 -0
  349. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/snowflake/test_snowflake_catalog.py +0 -0
  350. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/snowflake/test_snowflake_dataframe.py +0 -0
  351. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/snowflake/test_snowflake_session.py +0 -0
  352. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/spark/__init__.py +0 -0
  353. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/spark/test_spark_catalog.py +0 -0
  354. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/spark/test_spark_dataframe.py +0 -0
  355. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/test_engine_column.py +0 -0
  356. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/test_engine_dataframe.py +0 -0
  357. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/test_engine_reader.py +0 -0
  358. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/test_engine_session.py +0 -0
  359. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/test_engine_table.py +0 -0
  360. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/test_engine_writer.py +0 -0
  361. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/test_int_functions.py +0 -0
  362. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/engines/test_int_testing.py +0 -0
  363. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/fixtures.py +0 -0
  364. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/test_int_dataframe.py +0 -0
  365. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/test_int_dataframe_stats.py +0 -0
  366. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/test_int_grouped_data.py +0 -0
  367. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/integration/test_int_session.py +0 -0
  368. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/types.py +0 -0
  369. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/__init__.py +0 -0
  370. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/bigquery/__init__.py +0 -0
  371. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/bigquery/test_activate.py +0 -0
  372. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/conftest.py +0 -0
  373. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/databricks/__init__.py +0 -0
  374. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/databricks/test_activate.py +0 -0
  375. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/duck/__init__.py +0 -0
  376. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/duck/test_activate.py +0 -0
  377. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/postgres/__init__.py +0 -0
  378. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/postgres/test_activate.py +0 -0
  379. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/redshift/__init__.py +0 -0
  380. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/redshift/test_activate.py +0 -0
  381. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/snowflake/__init__.py +0 -0
  382. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/snowflake/test_activate.py +0 -0
  383. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/spark/__init__.py +0 -0
  384. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/spark/test_activate.py +0 -0
  385. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/__init__.py +0 -0
  386. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/fixtures.py +0 -0
  387. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/test_activate.py +0 -0
  388. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/test_column.py +0 -0
  389. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/test_dataframe.py +0 -0
  390. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/test_dataframe_writer.py +0 -0
  391. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/test_functions.py +0 -0
  392. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/test_session.py +0 -0
  393. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/test_session_case_sensitivity.py +0 -0
  394. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/test_types.py +0 -0
  395. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/standalone/test_window.py +0 -0
  396. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/test_activate.py +0 -0
  397. {sqlframe-3.26.0 → sqlframe-3.27.1}/tests/unit/test_util.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 3.26.0
3
+ Version: 3.27.1
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
Binary file
@@ -417,6 +417,7 @@ See something that you would like to see supported? [Open an issue](https://gith
417
417
  * [percent_rank](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.percent_rank.html)
418
418
  * [percentile](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.percentile.html)
419
419
  * [percentile_approx](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.percentile_approx.html)
420
+ * [product](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.product.html)
420
421
  * [position](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.position.html)
421
422
  * [pow](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.pow.html)
422
423
  * [quarter](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.quarter.html)
Binary file
Binary file
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '3.26.0'
21
- __version_tuple__ = version_tuple = (3, 26, 0)
20
+ __version__ = version = '3.27.1'
21
+ __version_tuple__ = version_tuple = (3, 27, 1)
@@ -4944,7 +4944,7 @@ def printf(format: ColumnOrName, *cols: ColumnOrName) -> Column:
4944
4944
  return Column.invoke_anonymous_function(format, "printf", *cols)
4945
4945
 
4946
4946
 
4947
- @meta(unsupported_engines=["*", "spark", "databricks"])
4947
+ @meta(unsupported_engines=["bigquery", "postgres", "redshift", "snowflake", "spark", "databricks"])
4948
4948
  def product(col: ColumnOrName) -> Column:
4949
4949
  """
4950
4950
  Aggregate function: returns the product of the values in a group.
@@ -38,6 +38,7 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
38
38
  def __init__(self, spark: SESSION):
39
39
  self._session = spark
40
40
  self.state_format_to_read: t.Optional[str] = None
41
+ self.state_options: t.Dict[str, OptionalPrimitiveType] = {}
41
42
 
42
43
  @property
43
44
  def session(self) -> SESSION:
@@ -107,6 +108,88 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
107
108
  self.state_format_to_read = source
108
109
  return self
109
110
 
111
+ def options(self, **options: OptionalPrimitiveType) -> "Self":
112
+ """Adds input options for the underlying data source.
113
+
114
+ .. versionadded:: 1.4.0
115
+
116
+ .. versionchanged:: 3.4.0
117
+ Supports Spark Connect.
118
+
119
+ Parameters
120
+ ----------
121
+ **options : dict
122
+ The dictionary of string keys and primitive-type values.
123
+
124
+ Examples
125
+ --------
126
+ >>> spark.read.options(inferSchema=True, header=True)
127
+ <...readwriter.DataFrameReader object ...>
128
+
129
+ Specify the option 'nullValue' and 'header' with reading a CSV file.
130
+
131
+ >>> import tempfile
132
+ >>> with tempfile.TemporaryDirectory() as d:
133
+ ... # Write a DataFrame into a CSV file with a header.
134
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
135
+ ... df.write.option("header", True).mode("overwrite").format("csv").save(d)
136
+ ...
137
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon',
138
+ ... # and 'header' option set to `True`.
139
+ ... spark.read.options(
140
+ ... nullValue="Hyukjin Kwon",
141
+ ... header=True
142
+ ... ).format('csv').load(d).show()
143
+ +---+----+
144
+ |age|name|
145
+ +---+----+
146
+ |100|NULL|
147
+ +---+----+
148
+ """
149
+
150
+ self.state_options = {**self.state_options, **options}
151
+ return self
152
+
153
+ def option(self, key: str, value: OptionalPrimitiveType) -> "Self":
154
+ """Adds an input option for the underlying data source.
155
+
156
+ .. versionadded:: 1.4.0
157
+
158
+ .. versionchanged:: 3.4.0
159
+ Supports Spark Connect.
160
+
161
+ Parameters
162
+ ----------
163
+ key : str
164
+ The key of the option.
165
+ value :
166
+ The value of the option.
167
+
168
+ Examples
169
+ --------
170
+ >>> spark.read.option("inferSchema", True)
171
+ <...readwriter.DataFrameReader object ...>
172
+
173
+ Specify the option 'nullValue' and 'header' with reading a CSV file.
174
+
175
+ >>> import tempfile
176
+ >>> with tempfile.TemporaryDirectory() as d:
177
+ ... # Write a DataFrame into a CSV file with a header.
178
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
179
+ ... df.write.option("header", True).mode("overwrite").format("csv").save(d)
180
+ ...
181
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon',
182
+ ... # and 'header' option set to `True`.
183
+ ... spark.read.option("nullValue", "Hyukjin Kwon").option("header", True).format('csv').load(d).show()
184
+ +---+----+
185
+ |age|name|
186
+ +---+----+
187
+ |100|NULL|
188
+ +---+----+
189
+ """
190
+ self.state_options[key] = value
191
+ return self
192
+
110
193
  def load(
111
194
  self,
112
195
  path: t.Optional[PathOrPaths] = None,
@@ -220,7 +303,9 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
220
303
  modifiedAfter=modifiedAfter,
221
304
  allowNonNumericNumbers=allowNonNumericNumbers,
222
305
  )
223
- return self.load(path=path, format="json", schema=schema, **options)
306
+ # Apply previously set options, with method-specific options taking precedence
307
+ all_options = {**self.state_options, **{k: v for k, v in options.items() if v is not None}}
308
+ return self.load(path=path, format="json", schema=schema, **all_options)
224
309
 
225
310
  def parquet(self, *paths: str, **options: OptionalPrimitiveType) -> DF:
226
311
  """
@@ -263,7 +348,8 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
263
348
  |100|Hyukjin Kwon|
264
349
  +---+------------+
265
350
  """
266
- dfs = [self.load(path=path, format="parquet", **options) for path in paths] # type: ignore
351
+ all_options = {**self.state_options, **{k: v for k, v in options.items() if v is not None}}
352
+ dfs = [self.load(path=path, format="parquet", **all_options) for path in paths] # type: ignore
267
353
  return reduce(lambda a, b: a.union(b), dfs)
268
354
 
269
355
  def csv(
@@ -384,7 +470,9 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
384
470
  modifiedAfter=modifiedAfter,
385
471
  unescapedQuoteHandling=unescapedQuoteHandling,
386
472
  )
387
- return self.load(path=path, format="csv", schema=schema, **options)
473
+ # Apply previously set options, with method-specific options taking precedence
474
+ all_options = {**self.state_options, **{k: v for k, v in options.items() if v is not None}}
475
+ return self.load(path=path, format="csv", schema=schema, **all_options)
388
476
 
389
477
 
390
478
  class _BaseDataFrameWriter(t.Generic[SESSION, DF]):
@@ -193,22 +193,21 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
193
193
  def getActiveSession(self) -> Self:
194
194
  return self
195
195
 
196
- def range(self, *args):
197
- start = 0
198
- step = 1
199
- numPartitions = None
200
- if len(args) == 1:
201
- end = args[0]
202
- elif len(args) == 2:
203
- start, end = args
204
- elif len(args) == 3:
205
- start, end, step = args
206
- elif len(args) == 4:
207
- start, end, step, numPartitions = args
208
- else:
209
- raise ValueError(
210
- "range() takes 1 to 4 positional arguments but {} were given".format(len(args))
211
- )
196
+ def range(
197
+ self,
198
+ start: int,
199
+ end: t.Optional[int] = None,
200
+ step: int = 1,
201
+ numPartitions: t.Optional[int] = None,
202
+ ):
203
+ # Ensure end is provided by either args or kwargs
204
+ if end is None:
205
+ if start:
206
+ end = start
207
+ start = 0
208
+ else:
209
+ raise ValueError("range() requires an 'end' value")
210
+
212
211
  if numPartitions is not None:
213
212
  logger.warning("numPartitions is not supported")
214
213
  return self.createDataFrame([[x] for x in range(start, end, step)], schema={"id": "long"})
@@ -94,11 +94,15 @@ class DatabricksDataFrameReader(
94
94
  """
95
95
  assert path is not None, "path is required"
96
96
  assert isinstance(path, str), "path must be a string"
97
+
98
+ # Merge state_options with provided options, with provided options taking precedence
99
+ merged_options = {**self.state_options, **options}
100
+
97
101
  format = format or self.state_format_to_read or _infer_format(path)
98
102
  fs_prefix, filepath = split_filepath(path)
99
103
 
100
104
  if fs_prefix == "":
101
- return super().load(path, format, schema, **options)
105
+ return super().load(path, format, schema, **merged_options)
102
106
 
103
107
  if schema:
104
108
  column_mapping = ensure_column_mapping(schema)
@@ -116,7 +120,7 @@ class DatabricksDataFrameReader(
116
120
  paths = ",".join([f"{path}" for path in ensure_list(path)])
117
121
 
118
122
  format_options: dict[str, OptionalPrimitiveType] = {
119
- k: v for k, v in options.items() if v is not None
123
+ k: v for k, v in merged_options.items() if v is not None
120
124
  }
121
125
  format_options["format"] = format
122
126
  format_options["schemaEvolutionMode"] = "none"
@@ -137,7 +141,7 @@ class DatabricksDataFrameReader(
137
141
  qualify=False,
138
142
  )
139
143
  if select_columns == [exp.Star()] and df.schema:
140
- return self.load(path=path, format=format, schema=df.schema, **options)
144
+ return self.load(path=path, format=format, schema=df.schema, **merged_options)
141
145
  self.session._last_loaded_file = path # type: ignore
142
146
  return df
143
147
 
@@ -148,6 +148,7 @@ from sqlframe.base.functions import percentile_approx as percentile_approx
148
148
  from sqlframe.base.functions import position as position
149
149
  from sqlframe.base.functions import pow as pow
150
150
  from sqlframe.base.functions import power as power
151
+ from sqlframe.base.functions import product as product
151
152
  from sqlframe.base.functions import quarter as quarter
152
153
  from sqlframe.base.functions import radians as radians
153
154
  from sqlframe.base.functions import rand as rand
@@ -75,31 +75,34 @@ class DuckDBDataFrameReader(
75
75
  |100|NULL|
76
76
  +---+----+
77
77
  """
78
+ # Merge state_options with provided options, with provided options taking precedence
79
+ merged_options = {**self.state_options, **options}
80
+
78
81
  format = format or self.state_format_to_read
79
82
  if schema:
80
83
  column_mapping = ensure_column_mapping(schema)
81
84
  select_column_mapping = column_mapping.copy()
82
- if options.get("filename"):
85
+ if merged_options.get("filename"):
83
86
  select_column_mapping["filename"] = "VARCHAR"
84
87
  select_columns = [x.expression for x in self._to_casted_columns(select_column_mapping)]
85
88
  if format == "csv":
86
89
  duckdb_columns = ", ".join(
87
90
  [f"'{column}': '{dtype}'" for column, dtype in column_mapping.items()]
88
91
  )
89
- options["columns"] = "{" + duckdb_columns + "}"
92
+ merged_options["columns"] = "{" + duckdb_columns + "}"
90
93
  else:
91
94
  select_columns = [exp.Star()]
92
95
  if format == "delta":
93
96
  from_clause = f"delta_scan('{path}')"
94
97
  elif format:
95
- options.pop("inferSchema", None)
98
+ merged_options.pop("inferSchema", None)
96
99
  paths = ",".join([f"'{path}'" for path in ensure_list(path)])
97
- from_clause = f"read_{format}([{paths}], {to_csv(options)})"
100
+ from_clause = f"read_{format}([{paths}], {to_csv(merged_options)})"
98
101
  else:
99
102
  from_clause = f"'{path}'"
100
103
  df = self.session.sql(exp.select(*select_columns).from_(from_clause), qualify=False)
101
104
  if select_columns == [exp.Star()]:
102
- return self.load(path=path, format=format, schema=df.schema, **options)
105
+ return self.load(path=path, format=format, schema=df.schema, **merged_options)
103
106
  self.session._last_loaded_file = path # type: ignore
104
107
  return df
105
108
 
@@ -78,6 +78,10 @@ class SparkDataFrameReader(
78
78
  """
79
79
  assert path is not None, "path is required"
80
80
  assert isinstance(path, str), "path must be a string"
81
+
82
+ # Merge state_options with provided options, with provided options taking precedence
83
+ merged_options = {**self.state_options, **options}
84
+
81
85
  format = format or self.state_format_to_read or _infer_format(path)
82
86
  if schema:
83
87
  column_mapping = ensure_column_mapping(schema)
@@ -93,11 +97,13 @@ class SparkDataFrameReader(
93
97
  from_clause = f"delta.`{path}`"
94
98
  elif format:
95
99
  paths = ",".join([f"{path}" for path in ensure_list(path)])
96
- tmp_view_key = options.get("_tmp_view_key_", f"{generate_random_identifier()}_vw")
97
- options["_tmp_view_key_"] = tmp_view_key
100
+ tmp_view_key = merged_options.get(
101
+ "_tmp_view_key_", f"{generate_random_identifier()}_vw"
102
+ )
103
+ merged_options["_tmp_view_key_"] = tmp_view_key
98
104
 
99
105
  format_options: dict[str, OptionalPrimitiveType] = {
100
- k: v for k, v in options.items() if v is not None
106
+ k: v for k, v in merged_options.items() if v is not None
101
107
  }
102
108
  format_options.pop("_tmp_view_key_")
103
109
  format_options["path"] = paths
@@ -121,7 +127,7 @@ class SparkDataFrameReader(
121
127
  qualify=False,
122
128
  )
123
129
  if select_columns == [exp.Star()] and df.schema:
124
- return self.load(path=path, format=format, schema=df.schema, **options)
130
+ return self.load(path=path, format=format, schema=df.schema, **merged_options)
125
131
  self.session._last_loaded_file = path # type: ignore
126
132
  return df
127
133
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 3.26.0
3
+ Version: 3.27.1
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -41,13 +41,9 @@ docs/docs/duckdb.md
41
41
  docs/docs/postgres.md
42
42
  docs/docs/images/SF.png
43
43
  docs/docs/images/favicon.png
44
- docs/docs/images/favicon_old.png
45
- docs/docs/images/sqlframe_diagram.png
46
44
  docs/docs/images/sqlframe_logo.png
47
45
  docs/images/SF.png
48
46
  docs/images/favicon.png
49
- docs/images/favicon_old.png
50
- docs/images/sqlframe_diagram.png
51
47
  docs/images/sqlframe_logo.png
52
48
  docs/stylesheets/extra.css
53
49
  sqlframe/LICENSE
@@ -359,6 +355,7 @@ tests/integration/engines/spark/test_spark_dataframe.py
359
355
  tests/unit/__init__.py
360
356
  tests/unit/conftest.py
361
357
  tests/unit/test_activate.py
358
+ tests/unit/test_base_reader_options.py
362
359
  tests/unit/test_util.py
363
360
  tests/unit/bigquery/__init__.py
364
361
  tests/unit/bigquery/test_activate.py
@@ -366,6 +363,7 @@ tests/unit/databricks/__init__.py
366
363
  tests/unit/databricks/test_activate.py
367
364
  tests/unit/duck/__init__.py
368
365
  tests/unit/duck/test_activate.py
366
+ tests/unit/duck/test_reader_options.py
369
367
  tests/unit/postgres/__init__.py
370
368
  tests/unit/postgres/test_activate.py
371
369
  tests/unit/redshift/__init__.py
@@ -374,6 +372,7 @@ tests/unit/snowflake/__init__.py
374
372
  tests/unit/snowflake/test_activate.py
375
373
  tests/unit/spark/__init__.py
376
374
  tests/unit/spark/test_activate.py
375
+ tests/unit/spark/test_reader_options.py
377
376
  tests/unit/standalone/__init__.py
378
377
  tests/unit/standalone/fixtures.py
379
378
  tests/unit/standalone/test_activate.py
@@ -0,0 +1,116 @@
1
+ from unittest.mock import MagicMock, patch
2
+
3
+ import pytest
4
+
5
+ from sqlframe.base.readerwriter import _BaseDataFrameReader
6
+ from sqlframe.duckdb import DuckDBDataFrameReader
7
+
8
+
9
+ @pytest.fixture
10
+ def mock_duckdb_session():
11
+ """Create a mock DuckDBSession for testing."""
12
+ session = MagicMock()
13
+ session.input_dialect = "duckdb"
14
+ session.sql = MagicMock()
15
+ session._last_loaded_file = None
16
+ return session
17
+
18
+
19
+ @pytest.fixture
20
+ def reader(mock_duckdb_session):
21
+ """Create a DataFrameReader instance for testing."""
22
+ return DuckDBDataFrameReader(mock_duckdb_session)
23
+
24
+
25
+ def test_options_initialization(reader):
26
+ """Test that options are correctly initialized."""
27
+ assert reader.state_options == {}
28
+
29
+
30
+ def test_options_method(reader):
31
+ """Test that the options method correctly stores options."""
32
+ reader.options(inferSchema=True, header=True)
33
+ assert reader.state_options == {"inferSchema": True, "header": True}
34
+
35
+
36
+ def test_option_method(reader):
37
+ """Test that the option method correctly stores a single option."""
38
+ reader.option("inferSchema", True)
39
+ assert reader.state_options == {"inferSchema": True}
40
+
41
+
42
+ def test_options_and_option_methods_together(reader):
43
+ """Test that option and options methods can be used together."""
44
+ reader.options(inferSchema=True, header=True)
45
+ reader.option("delimiter", ",")
46
+ assert reader.state_options == {"inferSchema": True, "header": True, "delimiter": ","}
47
+
48
+
49
+ def test_options_override(reader):
50
+ """Test that options override previous values with the same key."""
51
+ reader.options(inferSchema=True, header=True)
52
+ reader.options(inferSchema=False, nullValue="NULL")
53
+ assert reader.state_options == {"inferSchema": False, "header": True, "nullValue": "NULL"}
54
+
55
+
56
+ def test_option_override(reader):
57
+ """Test that option overrides previous values with the same key."""
58
+ reader.option("inferSchema", True)
59
+ reader.option("inferSchema", False)
60
+ assert reader.state_options == {"inferSchema": False}
61
+
62
+
63
+ @patch.object(DuckDBDataFrameReader, "load")
64
+ def test_csv_uses_options(mock_load, reader):
65
+ """Test that the csv method uses the stored options."""
66
+ # Setup
67
+ mock_load.return_value = MagicMock()
68
+ reader.options(inferSchema=True, header=True, delimiter=",")
69
+
70
+ # Execute
71
+ reader.csv("test.csv", sep="|")
72
+
73
+ # Assert
74
+ call_args = mock_load.call_args
75
+ assert call_args.kwargs["format"] == "csv"
76
+ assert call_args.kwargs["path"] == "test.csv"
77
+ assert call_args.kwargs["inferSchema"] is True
78
+ assert call_args.kwargs["header"] is True
79
+ assert call_args.kwargs["sep"] == "|" # Method-specific overrides global
80
+ assert call_args.kwargs["delimiter"] == ","
81
+
82
+
83
+ @patch.object(DuckDBDataFrameReader, "load")
84
+ def test_parquet_uses_options(mock_load, reader):
85
+ """Test that the parquet method uses the stored options."""
86
+ # Setup
87
+ mock_load.return_value = MagicMock()
88
+ reader.options(compression="snappy", row_group_size=1000)
89
+
90
+ # Execute
91
+ reader.parquet("test.parquet")
92
+
93
+ # Assert
94
+ call_args = mock_load.call_args
95
+ assert call_args.kwargs["format"] == "parquet"
96
+ assert call_args.kwargs["path"] == "test.parquet"
97
+ assert call_args.kwargs["compression"] == "snappy"
98
+ assert call_args.kwargs["row_group_size"] == 1000
99
+
100
+
101
+ @patch.object(DuckDBDataFrameReader, "load")
102
+ def test_method_specific_options_override_global_options(mock_load, reader):
103
+ """Test that method-specific options override global options."""
104
+ # Setup
105
+ mock_load.return_value = MagicMock()
106
+ reader.options(header=True, delimiter=",")
107
+
108
+ # Execute
109
+ reader.csv("test.csv", header=False)
110
+
111
+ # Assert
112
+ call_args = mock_load.call_args
113
+ assert call_args.kwargs["header"] is False # Method-specific overrides global
114
+ assert call_args.kwargs["delimiter"] == ","
115
+ assert call_args.kwargs["format"] == "csv"
116
+ assert call_args.kwargs["path"] == "test.csv"
@@ -0,0 +1,152 @@
1
+ from unittest.mock import MagicMock, patch
2
+
3
+ import pytest
4
+
5
+ from sqlframe.base.readerwriter import _BaseDataFrameReader
6
+ from sqlframe.spark import SparkDataFrameReader, SparkSession
7
+
8
+
9
+ @pytest.fixture
10
+ def mock_spark_session():
11
+ """Create a mock SparkSession for testing."""
12
+ session = MagicMock()
13
+ session.input_dialect = "spark"
14
+ session.spark_session = MagicMock()
15
+ session.sql = MagicMock()
16
+ session._last_loaded_file = None
17
+ return session
18
+
19
+
20
+ @pytest.fixture
21
+ def reader(mock_spark_session):
22
+ """Create a DataFrameReader instance for testing."""
23
+ return SparkDataFrameReader(mock_spark_session)
24
+
25
+
26
+ def test_options_initialization(reader):
27
+ """Test that options are correctly initialized."""
28
+ assert reader.state_options == {}
29
+
30
+
31
+ def test_options_method(reader):
32
+ """Test that the options method correctly stores options."""
33
+ reader.options(inferSchema=True, header=True)
34
+ assert reader.state_options == {"inferSchema": True, "header": True}
35
+
36
+
37
+ def test_option_method(reader):
38
+ """Test that the option method correctly stores a single option."""
39
+ reader.option("inferSchema", True)
40
+ assert reader.state_options == {"inferSchema": True}
41
+
42
+
43
+ def test_options_and_option_methods_together(reader):
44
+ """Test that option and options methods can be used together."""
45
+ reader.options(inferSchema=True, header=True)
46
+ reader.option("delimiter", ",")
47
+ assert reader.state_options == {"inferSchema": True, "header": True, "delimiter": ","}
48
+
49
+
50
+ def test_options_override(reader):
51
+ """Test that options override previous values with the same key."""
52
+ reader.options(inferSchema=True, header=True)
53
+ reader.options(inferSchema=False, nullValue="NULL")
54
+ assert reader.state_options == {"inferSchema": False, "header": True, "nullValue": "NULL"}
55
+
56
+
57
+ def test_option_override(reader):
58
+ """Test that option overrides previous values with the same key."""
59
+ reader.option("inferSchema", True)
60
+ reader.option("inferSchema", False)
61
+ assert reader.state_options == {"inferSchema": False}
62
+
63
+
64
+ @patch("sqlframe.spark.readwriter.SparkDataFrameReader.load")
65
+ def test_json_uses_options(mock_load, reader):
66
+ """Test that the json method uses the stored options."""
67
+ reader.options(inferSchema=True, header=True)
68
+ reader.json("test.json")
69
+
70
+ # Get the actual call arguments from the mock
71
+ # The call_args is a tuple (args, kwargs)
72
+ call_args = mock_load.call_args
73
+
74
+ # Check that the method was called with the correct options
75
+ assert call_args.kwargs["inferSchema"] is True
76
+ assert call_args.kwargs["header"] is True
77
+ assert call_args.kwargs["format"] == "json"
78
+ assert call_args.kwargs["path"] == "test.json"
79
+
80
+
81
+ @patch("sqlframe.spark.readwriter.SparkDataFrameReader.load")
82
+ def test_csv_uses_options(mock_load, reader):
83
+ """Test that the csv method uses the stored options."""
84
+ reader.options(inferSchema=True, header=True)
85
+ reader.csv("test.csv", comment="TEST_COMMENT")
86
+
87
+ # Get the actual call arguments from the mock
88
+ call_args = mock_load.call_args
89
+
90
+ # Check that the method was called with the correct options
91
+ assert call_args.kwargs["inferSchema"] is True
92
+ assert call_args.kwargs["header"] is True
93
+ assert call_args.kwargs["comment"] == "TEST_COMMENT"
94
+ assert call_args.kwargs["format"] == "csv"
95
+ assert call_args.kwargs["path"] == "test.csv"
96
+
97
+
98
+ @patch("sqlframe.spark.readwriter.SparkDataFrameReader.load")
99
+ def test_parquet_uses_options(mock_load, reader):
100
+ """Test that the parquet method uses the stored options."""
101
+ reader.options(inferSchema=True)
102
+ reader.parquet("test.parquet")
103
+
104
+ # Get the actual call arguments from the mock
105
+ call_args = mock_load.call_args
106
+
107
+ # Check that the method was called with the correct options
108
+ assert call_args.kwargs["inferSchema"] is True
109
+ assert call_args.kwargs["format"] == "parquet"
110
+ assert call_args.kwargs["path"] == "test.parquet"
111
+
112
+
113
+ @patch("sqlframe.spark.readwriter.SparkDataFrameReader.load")
114
+ def test_method_specific_options_override_global_options(mock_load, reader):
115
+ """Test that method-specific options override global options."""
116
+ reader.options(inferSchema=True, header=True)
117
+ reader.csv("test.csv", inferSchema=False, sep="|")
118
+
119
+ # Get the actual call arguments from the mock
120
+ call_args = mock_load.call_args
121
+
122
+ # Check that the method was called with the correct options
123
+ # Method-specific options should take precedence
124
+ assert call_args.kwargs["inferSchema"] is False
125
+ assert call_args.kwargs["header"] is True
126
+ assert call_args.kwargs["sep"] == "|"
127
+ assert call_args.kwargs["format"] == "csv"
128
+ assert call_args.kwargs["path"] == "test.csv"
129
+
130
+
131
+ def test_load_merges_options(reader):
132
+ """Test that the load method merges options from state_options."""
133
+ reader.options(inferSchema=True, header=True)
134
+
135
+ # Need to patch at a different level since we're testing the actual load method
136
+ with patch.object(reader.session, "sql") as mock_sql:
137
+ mock_sql.return_value = MagicMock()
138
+ mock_sql.return_value.schema = None
139
+
140
+ reader.load("test.csv", format="csv", nullValue="NULL")
141
+
142
+ # Get the actual SQL that would be executed
143
+ # The SQL should include the options from both state_options and method arguments
144
+ mock_session_sql_call = reader.session.spark_session.sql.call_args[0][0]
145
+
146
+ # Check that all options are included in the SQL
147
+ assert "inferSchema" in mock_session_sql_call
148
+ assert "header" in mock_session_sql_call
149
+ assert "nullValue" in mock_session_sql_call
150
+ assert "path" in mock_session_sql_call
151
+ assert "USING csv" in mock_session_sql_call
152
+ assert "test.csv" in mock_session_sql_call