snowpark-connect 0.20.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of snowpark-connect might be problematic. Click here for more details.

Files changed (879) hide show
  1. snowflake/snowpark_connect/__init__.py +23 -0
  2. snowflake/snowpark_connect/analyze_plan/__init__.py +3 -0
  3. snowflake/snowpark_connect/analyze_plan/map_tree_string.py +38 -0
  4. snowflake/snowpark_connect/column_name_handler.py +735 -0
  5. snowflake/snowpark_connect/config.py +576 -0
  6. snowflake/snowpark_connect/constants.py +47 -0
  7. snowflake/snowpark_connect/control_server.py +52 -0
  8. snowflake/snowpark_connect/dataframe_name_handler.py +54 -0
  9. snowflake/snowpark_connect/date_time_format_mapping.py +399 -0
  10. snowflake/snowpark_connect/empty_dataframe.py +18 -0
  11. snowflake/snowpark_connect/error/__init__.py +11 -0
  12. snowflake/snowpark_connect/error/error_mapping.py +6174 -0
  13. snowflake/snowpark_connect/error/error_utils.py +321 -0
  14. snowflake/snowpark_connect/error/exceptions.py +24 -0
  15. snowflake/snowpark_connect/execute_plan/__init__.py +3 -0
  16. snowflake/snowpark_connect/execute_plan/map_execution_command.py +204 -0
  17. snowflake/snowpark_connect/execute_plan/map_execution_root.py +173 -0
  18. snowflake/snowpark_connect/execute_plan/utils.py +183 -0
  19. snowflake/snowpark_connect/expression/__init__.py +3 -0
  20. snowflake/snowpark_connect/expression/literal.py +90 -0
  21. snowflake/snowpark_connect/expression/map_cast.py +343 -0
  22. snowflake/snowpark_connect/expression/map_expression.py +293 -0
  23. snowflake/snowpark_connect/expression/map_extension.py +104 -0
  24. snowflake/snowpark_connect/expression/map_sql_expression.py +633 -0
  25. snowflake/snowpark_connect/expression/map_udf.py +142 -0
  26. snowflake/snowpark_connect/expression/map_unresolved_attribute.py +241 -0
  27. snowflake/snowpark_connect/expression/map_unresolved_extract_value.py +85 -0
  28. snowflake/snowpark_connect/expression/map_unresolved_function.py +9450 -0
  29. snowflake/snowpark_connect/expression/map_unresolved_star.py +218 -0
  30. snowflake/snowpark_connect/expression/map_update_fields.py +164 -0
  31. snowflake/snowpark_connect/expression/map_window_function.py +258 -0
  32. snowflake/snowpark_connect/expression/typer.py +125 -0
  33. snowflake/snowpark_connect/includes/__init__.py +0 -0
  34. snowflake/snowpark_connect/includes/jars/antlr4-runtime-4.9.3.jar +0 -0
  35. snowflake/snowpark_connect/includes/jars/commons-cli-1.5.0.jar +0 -0
  36. snowflake/snowpark_connect/includes/jars/commons-codec-1.16.1.jar +0 -0
  37. snowflake/snowpark_connect/includes/jars/commons-collections-3.2.2.jar +0 -0
  38. snowflake/snowpark_connect/includes/jars/commons-collections4-4.4.jar +0 -0
  39. snowflake/snowpark_connect/includes/jars/commons-compiler-3.1.9.jar +0 -0
  40. snowflake/snowpark_connect/includes/jars/commons-compress-1.26.0.jar +0 -0
  41. snowflake/snowpark_connect/includes/jars/commons-crypto-1.1.0.jar +0 -0
  42. snowflake/snowpark_connect/includes/jars/commons-dbcp-1.4.jar +0 -0
  43. snowflake/snowpark_connect/includes/jars/commons-io-2.16.1.jar +0 -0
  44. snowflake/snowpark_connect/includes/jars/commons-lang-2.6.jar +0 -0
  45. snowflake/snowpark_connect/includes/jars/commons-lang3-3.12.0.jar +0 -0
  46. snowflake/snowpark_connect/includes/jars/commons-logging-1.1.3.jar +0 -0
  47. snowflake/snowpark_connect/includes/jars/commons-math3-3.6.1.jar +0 -0
  48. snowflake/snowpark_connect/includes/jars/commons-pool-1.5.4.jar +0 -0
  49. snowflake/snowpark_connect/includes/jars/commons-text-1.10.0.jar +0 -0
  50. snowflake/snowpark_connect/includes/jars/hadoop-client-api-3.3.4.jar +0 -0
  51. snowflake/snowpark_connect/includes/jars/jackson-annotations-2.15.2.jar +0 -0
  52. snowflake/snowpark_connect/includes/jars/jackson-core-2.15.2.jar +0 -0
  53. snowflake/snowpark_connect/includes/jars/jackson-core-asl-1.9.13.jar +0 -0
  54. snowflake/snowpark_connect/includes/jars/jackson-databind-2.15.2.jar +0 -0
  55. snowflake/snowpark_connect/includes/jars/jackson-dataformat-yaml-2.15.2.jar +0 -0
  56. snowflake/snowpark_connect/includes/jars/jackson-datatype-jsr310-2.15.2.jar +0 -0
  57. snowflake/snowpark_connect/includes/jars/jackson-mapper-asl-1.9.13.jar +0 -0
  58. snowflake/snowpark_connect/includes/jars/jackson-module-scala_2.12-2.15.2.jar +0 -0
  59. snowflake/snowpark_connect/includes/jars/json4s-ast_2.12-3.7.0-M11.jar +0 -0
  60. snowflake/snowpark_connect/includes/jars/json4s-core_2.12-3.7.0-M11.jar +0 -0
  61. snowflake/snowpark_connect/includes/jars/json4s-jackson_2.12-3.7.0-M11.jar +0 -0
  62. snowflake/snowpark_connect/includes/jars/json4s-scalap_2.12-3.7.0-M11.jar +0 -0
  63. snowflake/snowpark_connect/includes/jars/kryo-shaded-4.0.2.jar +0 -0
  64. snowflake/snowpark_connect/includes/jars/log4j-1.2-api-2.20.0.jar +0 -0
  65. snowflake/snowpark_connect/includes/jars/log4j-api-2.20.0.jar +0 -0
  66. snowflake/snowpark_connect/includes/jars/log4j-core-2.20.0.jar +0 -0
  67. snowflake/snowpark_connect/includes/jars/log4j-slf4j2-impl-2.20.0.jar +0 -0
  68. snowflake/snowpark_connect/includes/jars/paranamer-2.8.jar +0 -0
  69. snowflake/snowpark_connect/includes/jars/scala-collection-compat_2.12-2.7.0.jar +0 -0
  70. snowflake/snowpark_connect/includes/jars/scala-compiler-2.12.18.jar +0 -0
  71. snowflake/snowpark_connect/includes/jars/scala-library-2.12.18.jar +0 -0
  72. snowflake/snowpark_connect/includes/jars/scala-parser-combinators_2.12-2.3.0.jar +0 -0
  73. snowflake/snowpark_connect/includes/jars/scala-reflect-2.12.18.jar +0 -0
  74. snowflake/snowpark_connect/includes/jars/scala-xml_2.12-2.1.0.jar +0 -0
  75. snowflake/snowpark_connect/includes/jars/slf4j-api-2.0.7.jar +0 -0
  76. snowflake/snowpark_connect/includes/jars/spark-catalyst_2.12-3.5.6.jar +0 -0
  77. snowflake/snowpark_connect/includes/jars/spark-common-utils_2.12-3.5.6.jar +0 -0
  78. snowflake/snowpark_connect/includes/jars/spark-core_2.12-3.5.6.jar +0 -0
  79. snowflake/snowpark_connect/includes/jars/spark-graphx_2.12-3.5.6.jar +0 -0
  80. snowflake/snowpark_connect/includes/jars/spark-hive-thriftserver_2.12-3.5.6.jar +0 -0
  81. snowflake/snowpark_connect/includes/jars/spark-hive_2.12-3.5.6.jar +0 -0
  82. snowflake/snowpark_connect/includes/jars/spark-kubernetes_2.12-3.5.6.jar +0 -0
  83. snowflake/snowpark_connect/includes/jars/spark-kvstore_2.12-3.5.6.jar +0 -0
  84. snowflake/snowpark_connect/includes/jars/spark-launcher_2.12-3.5.6.jar +0 -0
  85. snowflake/snowpark_connect/includes/jars/spark-mesos_2.12-3.5.6.jar +0 -0
  86. snowflake/snowpark_connect/includes/jars/spark-mllib-local_2.12-3.5.6.jar +0 -0
  87. snowflake/snowpark_connect/includes/jars/spark-mllib_2.12-3.5.6.jar +0 -0
  88. snowflake/snowpark_connect/includes/jars/spark-network-common_2.12-3.5.6.jar +0 -0
  89. snowflake/snowpark_connect/includes/jars/spark-network-shuffle_2.12-3.5.6.jar +0 -0
  90. snowflake/snowpark_connect/includes/jars/spark-repl_2.12-3.5.6.jar +0 -0
  91. snowflake/snowpark_connect/includes/jars/spark-sketch_2.12-3.5.6.jar +0 -0
  92. snowflake/snowpark_connect/includes/jars/spark-sql-api_2.12-3.5.6.jar +0 -0
  93. snowflake/snowpark_connect/includes/jars/spark-sql_2.12-3.5.6.jar +0 -0
  94. snowflake/snowpark_connect/includes/jars/spark-streaming_2.12-3.5.6.jar +0 -0
  95. snowflake/snowpark_connect/includes/jars/spark-tags_2.12-3.5.6.jar +0 -0
  96. snowflake/snowpark_connect/includes/jars/spark-unsafe_2.12-3.5.6.jar +0 -0
  97. snowflake/snowpark_connect/includes/jars/spark-yarn_2.12-3.5.6.jar +0 -0
  98. snowflake/snowpark_connect/includes/python/__init__.py +21 -0
  99. snowflake/snowpark_connect/includes/python/pyspark/__init__.py +173 -0
  100. snowflake/snowpark_connect/includes/python/pyspark/_globals.py +71 -0
  101. snowflake/snowpark_connect/includes/python/pyspark/_typing.pyi +43 -0
  102. snowflake/snowpark_connect/includes/python/pyspark/accumulators.py +341 -0
  103. snowflake/snowpark_connect/includes/python/pyspark/broadcast.py +383 -0
  104. snowflake/snowpark_connect/includes/python/pyspark/cloudpickle/__init__.py +8 -0
  105. snowflake/snowpark_connect/includes/python/pyspark/cloudpickle/cloudpickle.py +948 -0
  106. snowflake/snowpark_connect/includes/python/pyspark/cloudpickle/cloudpickle_fast.py +844 -0
  107. snowflake/snowpark_connect/includes/python/pyspark/cloudpickle/compat.py +18 -0
  108. snowflake/snowpark_connect/includes/python/pyspark/conf.py +276 -0
  109. snowflake/snowpark_connect/includes/python/pyspark/context.py +2601 -0
  110. snowflake/snowpark_connect/includes/python/pyspark/daemon.py +218 -0
  111. snowflake/snowpark_connect/includes/python/pyspark/errors/__init__.py +70 -0
  112. snowflake/snowpark_connect/includes/python/pyspark/errors/error_classes.py +889 -0
  113. snowflake/snowpark_connect/includes/python/pyspark/errors/exceptions/__init__.py +16 -0
  114. snowflake/snowpark_connect/includes/python/pyspark/errors/exceptions/base.py +228 -0
  115. snowflake/snowpark_connect/includes/python/pyspark/errors/exceptions/captured.py +307 -0
  116. snowflake/snowpark_connect/includes/python/pyspark/errors/exceptions/connect.py +190 -0
  117. snowflake/snowpark_connect/includes/python/pyspark/errors/tests/__init__.py +16 -0
  118. snowflake/snowpark_connect/includes/python/pyspark/errors/tests/test_errors.py +60 -0
  119. snowflake/snowpark_connect/includes/python/pyspark/errors/utils.py +116 -0
  120. snowflake/snowpark_connect/includes/python/pyspark/files.py +165 -0
  121. snowflake/snowpark_connect/includes/python/pyspark/find_spark_home.py +95 -0
  122. snowflake/snowpark_connect/includes/python/pyspark/install.py +203 -0
  123. snowflake/snowpark_connect/includes/python/pyspark/instrumentation_utils.py +190 -0
  124. snowflake/snowpark_connect/includes/python/pyspark/java_gateway.py +248 -0
  125. snowflake/snowpark_connect/includes/python/pyspark/join.py +118 -0
  126. snowflake/snowpark_connect/includes/python/pyspark/ml/__init__.py +71 -0
  127. snowflake/snowpark_connect/includes/python/pyspark/ml/_typing.pyi +84 -0
  128. snowflake/snowpark_connect/includes/python/pyspark/ml/base.py +414 -0
  129. snowflake/snowpark_connect/includes/python/pyspark/ml/classification.py +4332 -0
  130. snowflake/snowpark_connect/includes/python/pyspark/ml/clustering.py +2188 -0
  131. snowflake/snowpark_connect/includes/python/pyspark/ml/common.py +146 -0
  132. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/__init__.py +44 -0
  133. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/base.py +346 -0
  134. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/classification.py +382 -0
  135. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/evaluation.py +291 -0
  136. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/feature.py +258 -0
  137. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/functions.py +77 -0
  138. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/io_utils.py +335 -0
  139. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/pipeline.py +262 -0
  140. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/summarizer.py +120 -0
  141. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/tuning.py +579 -0
  142. snowflake/snowpark_connect/includes/python/pyspark/ml/connect/util.py +173 -0
  143. snowflake/snowpark_connect/includes/python/pyspark/ml/deepspeed/__init__.py +16 -0
  144. snowflake/snowpark_connect/includes/python/pyspark/ml/deepspeed/deepspeed_distributor.py +165 -0
  145. snowflake/snowpark_connect/includes/python/pyspark/ml/deepspeed/tests/test_deepspeed_distributor.py +306 -0
  146. snowflake/snowpark_connect/includes/python/pyspark/ml/dl_util.py +150 -0
  147. snowflake/snowpark_connect/includes/python/pyspark/ml/evaluation.py +1166 -0
  148. snowflake/snowpark_connect/includes/python/pyspark/ml/feature.py +7474 -0
  149. snowflake/snowpark_connect/includes/python/pyspark/ml/fpm.py +543 -0
  150. snowflake/snowpark_connect/includes/python/pyspark/ml/functions.py +842 -0
  151. snowflake/snowpark_connect/includes/python/pyspark/ml/image.py +271 -0
  152. snowflake/snowpark_connect/includes/python/pyspark/ml/linalg/__init__.py +1382 -0
  153. snowflake/snowpark_connect/includes/python/pyspark/ml/model_cache.py +55 -0
  154. snowflake/snowpark_connect/includes/python/pyspark/ml/param/__init__.py +602 -0
  155. snowflake/snowpark_connect/includes/python/pyspark/ml/param/_shared_params_code_gen.py +368 -0
  156. snowflake/snowpark_connect/includes/python/pyspark/ml/param/shared.py +878 -0
  157. snowflake/snowpark_connect/includes/python/pyspark/ml/pipeline.py +451 -0
  158. snowflake/snowpark_connect/includes/python/pyspark/ml/recommendation.py +748 -0
  159. snowflake/snowpark_connect/includes/python/pyspark/ml/regression.py +3335 -0
  160. snowflake/snowpark_connect/includes/python/pyspark/ml/stat.py +523 -0
  161. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/__init__.py +16 -0
  162. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_classification.py +53 -0
  163. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_evaluation.py +50 -0
  164. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_feature.py +43 -0
  165. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_function.py +114 -0
  166. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_pipeline.py +47 -0
  167. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_summarizer.py +43 -0
  168. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_tuning.py +46 -0
  169. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_classification.py +238 -0
  170. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_evaluation.py +194 -0
  171. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_feature.py +156 -0
  172. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_pipeline.py +184 -0
  173. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_summarizer.py +78 -0
  174. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_tuning.py +292 -0
  175. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_parity_torch_data_loader.py +50 -0
  176. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_parity_torch_distributor.py +152 -0
  177. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_algorithms.py +456 -0
  178. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_base.py +96 -0
  179. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_dl_util.py +186 -0
  180. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_evaluation.py +77 -0
  181. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_feature.py +401 -0
  182. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_functions.py +528 -0
  183. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_image.py +82 -0
  184. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_linalg.py +409 -0
  185. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_model_cache.py +55 -0
  186. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_param.py +441 -0
  187. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_persistence.py +546 -0
  188. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_pipeline.py +71 -0
  189. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_stat.py +52 -0
  190. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_training_summary.py +494 -0
  191. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_util.py +85 -0
  192. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_wrapper.py +138 -0
  193. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/__init__.py +16 -0
  194. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_cv_io_basic.py +151 -0
  195. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_cv_io_nested.py +97 -0
  196. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_cv_io_pipeline.py +143 -0
  197. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_tuning.py +551 -0
  198. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_tvs_io_basic.py +137 -0
  199. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_tvs_io_nested.py +96 -0
  200. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_tvs_io_pipeline.py +142 -0
  201. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/__init__.py +16 -0
  202. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/data.py +100 -0
  203. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/distributor.py +1133 -0
  204. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/log_communication.py +198 -0
  205. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/tests/__init__.py +16 -0
  206. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/tests/test_data_loader.py +137 -0
  207. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/tests/test_distributor.py +561 -0
  208. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/tests/test_log_communication.py +172 -0
  209. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/torch_run_process_wrapper.py +83 -0
  210. snowflake/snowpark_connect/includes/python/pyspark/ml/tree.py +434 -0
  211. snowflake/snowpark_connect/includes/python/pyspark/ml/tuning.py +1741 -0
  212. snowflake/snowpark_connect/includes/python/pyspark/ml/util.py +749 -0
  213. snowflake/snowpark_connect/includes/python/pyspark/ml/wrapper.py +465 -0
  214. snowflake/snowpark_connect/includes/python/pyspark/mllib/__init__.py +44 -0
  215. snowflake/snowpark_connect/includes/python/pyspark/mllib/_typing.pyi +33 -0
  216. snowflake/snowpark_connect/includes/python/pyspark/mllib/classification.py +989 -0
  217. snowflake/snowpark_connect/includes/python/pyspark/mllib/clustering.py +1318 -0
  218. snowflake/snowpark_connect/includes/python/pyspark/mllib/common.py +174 -0
  219. snowflake/snowpark_connect/includes/python/pyspark/mllib/evaluation.py +691 -0
  220. snowflake/snowpark_connect/includes/python/pyspark/mllib/feature.py +1085 -0
  221. snowflake/snowpark_connect/includes/python/pyspark/mllib/fpm.py +233 -0
  222. snowflake/snowpark_connect/includes/python/pyspark/mllib/linalg/__init__.py +1653 -0
  223. snowflake/snowpark_connect/includes/python/pyspark/mllib/linalg/distributed.py +1662 -0
  224. snowflake/snowpark_connect/includes/python/pyspark/mllib/random.py +698 -0
  225. snowflake/snowpark_connect/includes/python/pyspark/mllib/recommendation.py +389 -0
  226. snowflake/snowpark_connect/includes/python/pyspark/mllib/regression.py +1067 -0
  227. snowflake/snowpark_connect/includes/python/pyspark/mllib/stat/KernelDensity.py +59 -0
  228. snowflake/snowpark_connect/includes/python/pyspark/mllib/stat/__init__.py +34 -0
  229. snowflake/snowpark_connect/includes/python/pyspark/mllib/stat/_statistics.py +409 -0
  230. snowflake/snowpark_connect/includes/python/pyspark/mllib/stat/distribution.py +39 -0
  231. snowflake/snowpark_connect/includes/python/pyspark/mllib/stat/test.py +86 -0
  232. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/__init__.py +16 -0
  233. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_algorithms.py +353 -0
  234. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_feature.py +192 -0
  235. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_linalg.py +680 -0
  236. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_stat.py +206 -0
  237. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_streaming_algorithms.py +471 -0
  238. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_util.py +108 -0
  239. snowflake/snowpark_connect/includes/python/pyspark/mllib/tree.py +888 -0
  240. snowflake/snowpark_connect/includes/python/pyspark/mllib/util.py +659 -0
  241. snowflake/snowpark_connect/includes/python/pyspark/pandas/__init__.py +165 -0
  242. snowflake/snowpark_connect/includes/python/pyspark/pandas/_typing.py +52 -0
  243. snowflake/snowpark_connect/includes/python/pyspark/pandas/accessors.py +989 -0
  244. snowflake/snowpark_connect/includes/python/pyspark/pandas/base.py +1804 -0
  245. snowflake/snowpark_connect/includes/python/pyspark/pandas/categorical.py +822 -0
  246. snowflake/snowpark_connect/includes/python/pyspark/pandas/config.py +539 -0
  247. snowflake/snowpark_connect/includes/python/pyspark/pandas/correlation.py +262 -0
  248. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/__init__.py +16 -0
  249. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/base.py +519 -0
  250. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/binary_ops.py +98 -0
  251. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/boolean_ops.py +426 -0
  252. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/categorical_ops.py +141 -0
  253. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/complex_ops.py +145 -0
  254. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/date_ops.py +127 -0
  255. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/datetime_ops.py +171 -0
  256. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/null_ops.py +83 -0
  257. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/num_ops.py +588 -0
  258. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/string_ops.py +154 -0
  259. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/timedelta_ops.py +101 -0
  260. snowflake/snowpark_connect/includes/python/pyspark/pandas/data_type_ops/udt_ops.py +29 -0
  261. snowflake/snowpark_connect/includes/python/pyspark/pandas/datetimes.py +891 -0
  262. snowflake/snowpark_connect/includes/python/pyspark/pandas/exceptions.py +150 -0
  263. snowflake/snowpark_connect/includes/python/pyspark/pandas/extensions.py +388 -0
  264. snowflake/snowpark_connect/includes/python/pyspark/pandas/frame.py +13738 -0
  265. snowflake/snowpark_connect/includes/python/pyspark/pandas/generic.py +3560 -0
  266. snowflake/snowpark_connect/includes/python/pyspark/pandas/groupby.py +4448 -0
  267. snowflake/snowpark_connect/includes/python/pyspark/pandas/indexes/__init__.py +21 -0
  268. snowflake/snowpark_connect/includes/python/pyspark/pandas/indexes/base.py +2783 -0
  269. snowflake/snowpark_connect/includes/python/pyspark/pandas/indexes/category.py +773 -0
  270. snowflake/snowpark_connect/includes/python/pyspark/pandas/indexes/datetimes.py +843 -0
  271. snowflake/snowpark_connect/includes/python/pyspark/pandas/indexes/multi.py +1323 -0
  272. snowflake/snowpark_connect/includes/python/pyspark/pandas/indexes/numeric.py +210 -0
  273. snowflake/snowpark_connect/includes/python/pyspark/pandas/indexes/timedelta.py +197 -0
  274. snowflake/snowpark_connect/includes/python/pyspark/pandas/indexing.py +1862 -0
  275. snowflake/snowpark_connect/includes/python/pyspark/pandas/internal.py +1680 -0
  276. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/__init__.py +48 -0
  277. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/common.py +76 -0
  278. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/frame.py +63 -0
  279. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/general_functions.py +43 -0
  280. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/groupby.py +93 -0
  281. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/indexes.py +184 -0
  282. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/resample.py +101 -0
  283. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/scalars.py +29 -0
  284. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/series.py +69 -0
  285. snowflake/snowpark_connect/includes/python/pyspark/pandas/missing/window.py +168 -0
  286. snowflake/snowpark_connect/includes/python/pyspark/pandas/mlflow.py +238 -0
  287. snowflake/snowpark_connect/includes/python/pyspark/pandas/namespace.py +3807 -0
  288. snowflake/snowpark_connect/includes/python/pyspark/pandas/numpy_compat.py +260 -0
  289. snowflake/snowpark_connect/includes/python/pyspark/pandas/plot/__init__.py +17 -0
  290. snowflake/snowpark_connect/includes/python/pyspark/pandas/plot/core.py +1213 -0
  291. snowflake/snowpark_connect/includes/python/pyspark/pandas/plot/matplotlib.py +928 -0
  292. snowflake/snowpark_connect/includes/python/pyspark/pandas/plot/plotly.py +261 -0
  293. snowflake/snowpark_connect/includes/python/pyspark/pandas/resample.py +816 -0
  294. snowflake/snowpark_connect/includes/python/pyspark/pandas/series.py +7440 -0
  295. snowflake/snowpark_connect/includes/python/pyspark/pandas/sql_formatter.py +308 -0
  296. snowflake/snowpark_connect/includes/python/pyspark/pandas/sql_processor.py +394 -0
  297. snowflake/snowpark_connect/includes/python/pyspark/pandas/strings.py +2371 -0
  298. snowflake/snowpark_connect/includes/python/pyspark/pandas/supported_api_gen.py +378 -0
  299. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/__init__.py +16 -0
  300. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/__init__.py +16 -0
  301. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_any_all.py +177 -0
  302. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_apply_func.py +575 -0
  303. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_binary_ops.py +235 -0
  304. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_combine.py +653 -0
  305. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_compute.py +463 -0
  306. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_corrwith.py +86 -0
  307. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_cov.py +151 -0
  308. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_cumulative.py +139 -0
  309. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_describe.py +458 -0
  310. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_eval.py +86 -0
  311. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_melt.py +202 -0
  312. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_missing_data.py +520 -0
  313. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_pivot.py +361 -0
  314. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/__init__.py +16 -0
  315. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/__init__.py +16 -0
  316. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py +40 -0
  317. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_apply_func.py +42 -0
  318. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_binary_ops.py +40 -0
  319. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_combine.py +37 -0
  320. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_compute.py +60 -0
  321. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_corrwith.py +40 -0
  322. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_cov.py +40 -0
  323. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_cumulative.py +90 -0
  324. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_describe.py +40 -0
  325. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_eval.py +40 -0
  326. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_melt.py +40 -0
  327. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py +42 -0
  328. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_pivot.py +37 -0
  329. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/__init__.py +16 -0
  330. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_base.py +36 -0
  331. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_binary_ops.py +42 -0
  332. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_boolean_ops.py +47 -0
  333. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_categorical_ops.py +55 -0
  334. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_complex_ops.py +40 -0
  335. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_date_ops.py +47 -0
  336. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_datetime_ops.py +47 -0
  337. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_null_ops.py +42 -0
  338. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py +43 -0
  339. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_ops.py +47 -0
  340. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_reverse.py +43 -0
  341. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_string_ops.py +47 -0
  342. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_timedelta_ops.py +47 -0
  343. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_udt_ops.py +40 -0
  344. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/testing_utils.py +226 -0
  345. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/__init__.py +16 -0
  346. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_align.py +39 -0
  347. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_basic_slow.py +55 -0
  348. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_cov_corrwith.py +39 -0
  349. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_dot_frame.py +39 -0
  350. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_dot_series.py +39 -0
  351. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_index.py +39 -0
  352. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_series.py +39 -0
  353. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_setitem_frame.py +43 -0
  354. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_setitem_series.py +43 -0
  355. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/__init__.py +16 -0
  356. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_attrs.py +40 -0
  357. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_constructor.py +39 -0
  358. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_conversion.py +42 -0
  359. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_reindexing.py +42 -0
  360. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_reshaping.py +37 -0
  361. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_spark.py +40 -0
  362. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_take.py +42 -0
  363. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_time_series.py +48 -0
  364. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_truncate.py +40 -0
  365. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/__init__.py +16 -0
  366. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_aggregate.py +40 -0
  367. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_apply_func.py +41 -0
  368. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_cumulative.py +67 -0
  369. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_describe.py +40 -0
  370. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_groupby.py +55 -0
  371. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_head_tail.py +40 -0
  372. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_index.py +38 -0
  373. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_missing_data.py +55 -0
  374. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_split_apply.py +39 -0
  375. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_stat.py +38 -0
  376. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/__init__.py +16 -0
  377. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_align.py +40 -0
  378. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py +50 -0
  379. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_category.py +73 -0
  380. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py +39 -0
  381. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_indexing.py +40 -0
  382. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_reindex.py +40 -0
  383. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_rename.py +40 -0
  384. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_reset_index.py +48 -0
  385. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_timedelta.py +39 -0
  386. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/io/__init__.py +16 -0
  387. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/io/test_parity_io.py +40 -0
  388. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/__init__.py +16 -0
  389. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_frame_plot.py +45 -0
  390. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_frame_plot_matplotlib.py +45 -0
  391. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_frame_plot_plotly.py +49 -0
  392. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_series_plot.py +37 -0
  393. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_series_plot_matplotlib.py +53 -0
  394. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_series_plot_plotly.py +45 -0
  395. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/__init__.py +16 -0
  396. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_all_any.py +38 -0
  397. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_arg_ops.py +37 -0
  398. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_as_of.py +37 -0
  399. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_as_type.py +38 -0
  400. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_compute.py +37 -0
  401. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_conversion.py +40 -0
  402. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py +40 -0
  403. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_index.py +38 -0
  404. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py +40 -0
  405. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_series.py +37 -0
  406. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_sort.py +38 -0
  407. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_stat.py +38 -0
  408. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_categorical.py +66 -0
  409. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_config.py +37 -0
  410. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_csv.py +37 -0
  411. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_dataframe_conversion.py +42 -0
  412. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_dataframe_spark_io.py +39 -0
  413. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_default_index.py +49 -0
  414. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ewm.py +37 -0
  415. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_expanding.py +39 -0
  416. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_extension.py +49 -0
  417. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_frame_spark.py +53 -0
  418. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_generic_functions.py +43 -0
  419. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_indexing.py +49 -0
  420. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_indexops_spark.py +39 -0
  421. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_internal.py +41 -0
  422. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_namespace.py +39 -0
  423. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_numpy_compat.py +60 -0
  424. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames.py +48 -0
  425. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py +39 -0
  426. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py +44 -0
  427. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_rolling.py +84 -0
  428. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_repr.py +37 -0
  429. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_resample.py +45 -0
  430. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_reshape.py +39 -0
  431. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_rolling.py +39 -0
  432. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_scalars.py +37 -0
  433. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_series_conversion.py +39 -0
  434. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_series_datetime.py +39 -0
  435. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_series_string.py +39 -0
  436. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_spark_functions.py +39 -0
  437. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_sql.py +43 -0
  438. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_stats.py +37 -0
  439. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_typedef.py +36 -0
  440. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_utils.py +37 -0
  441. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_window.py +39 -0
  442. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/__init__.py +16 -0
  443. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_base.py +107 -0
  444. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_binary_ops.py +224 -0
  445. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_boolean_ops.py +825 -0
  446. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_categorical_ops.py +562 -0
  447. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_complex_ops.py +368 -0
  448. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_date_ops.py +257 -0
  449. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_datetime_ops.py +260 -0
  450. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_null_ops.py +178 -0
  451. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py +184 -0
  452. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_num_ops.py +497 -0
  453. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_num_reverse.py +140 -0
  454. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_string_ops.py +354 -0
  455. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_timedelta_ops.py +219 -0
  456. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_udt_ops.py +192 -0
  457. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/testing_utils.py +228 -0
  458. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/__init__.py +16 -0
  459. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_align.py +118 -0
  460. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_basic_slow.py +198 -0
  461. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_cov_corrwith.py +181 -0
  462. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_dot_frame.py +103 -0
  463. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_dot_series.py +141 -0
  464. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_index.py +109 -0
  465. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_series.py +136 -0
  466. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_setitem_frame.py +125 -0
  467. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_setitem_series.py +217 -0
  468. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/__init__.py +16 -0
  469. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_attrs.py +384 -0
  470. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_constructor.py +598 -0
  471. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_conversion.py +73 -0
  472. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_reindexing.py +869 -0
  473. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_reshaping.py +487 -0
  474. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_spark.py +309 -0
  475. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_take.py +156 -0
  476. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_time_series.py +149 -0
  477. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_truncate.py +163 -0
  478. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/__init__.py +16 -0
  479. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_aggregate.py +311 -0
  480. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_apply_func.py +524 -0
  481. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_cumulative.py +419 -0
  482. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_describe.py +144 -0
  483. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_groupby.py +979 -0
  484. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_head_tail.py +234 -0
  485. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_index.py +206 -0
  486. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_missing_data.py +421 -0
  487. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_split_apply.py +187 -0
  488. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_stat.py +397 -0
  489. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/__init__.py +16 -0
  490. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_align.py +100 -0
  491. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_base.py +2743 -0
  492. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_category.py +484 -0
  493. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_datetime.py +276 -0
  494. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_indexing.py +432 -0
  495. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_reindex.py +310 -0
  496. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_rename.py +257 -0
  497. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_reset_index.py +160 -0
  498. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_timedelta.py +128 -0
  499. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/io/__init__.py +16 -0
  500. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/io/test_io.py +137 -0
  501. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/__init__.py +16 -0
  502. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_frame_plot.py +170 -0
  503. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_frame_plot_matplotlib.py +547 -0
  504. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_frame_plot_plotly.py +285 -0
  505. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_series_plot.py +106 -0
  506. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_series_plot_matplotlib.py +409 -0
  507. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_series_plot_plotly.py +247 -0
  508. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/__init__.py +16 -0
  509. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_all_any.py +105 -0
  510. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_arg_ops.py +197 -0
  511. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_as_of.py +137 -0
  512. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_as_type.py +227 -0
  513. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_compute.py +634 -0
  514. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_conversion.py +88 -0
  515. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_cumulative.py +139 -0
  516. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_index.py +475 -0
  517. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_missing_data.py +265 -0
  518. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_series.py +818 -0
  519. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_sort.py +162 -0
  520. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_stat.py +780 -0
  521. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_categorical.py +741 -0
  522. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_config.py +160 -0
  523. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_csv.py +453 -0
  524. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_dataframe_conversion.py +281 -0
  525. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_dataframe_spark_io.py +487 -0
  526. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_default_index.py +109 -0
  527. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ewm.py +434 -0
  528. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_expanding.py +253 -0
  529. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_extension.py +152 -0
  530. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_frame_spark.py +162 -0
  531. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_generic_functions.py +234 -0
  532. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_indexing.py +1339 -0
  533. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_indexops_spark.py +82 -0
  534. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_internal.py +124 -0
  535. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_namespace.py +638 -0
  536. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_numpy_compat.py +200 -0
  537. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ops_on_diff_frames.py +1355 -0
  538. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby.py +655 -0
  539. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby_expanding.py +113 -0
  540. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby_rolling.py +118 -0
  541. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_repr.py +192 -0
  542. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_resample.py +346 -0
  543. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_reshape.py +495 -0
  544. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_rolling.py +263 -0
  545. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_scalars.py +59 -0
  546. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_series_conversion.py +85 -0
  547. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_series_datetime.py +364 -0
  548. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_series_string.py +362 -0
  549. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_spark_functions.py +46 -0
  550. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_sql.py +123 -0
  551. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_stats.py +581 -0
  552. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_typedef.py +447 -0
  553. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_utils.py +301 -0
  554. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_window.py +465 -0
  555. snowflake/snowpark_connect/includes/python/pyspark/pandas/typedef/__init__.py +18 -0
  556. snowflake/snowpark_connect/includes/python/pyspark/pandas/typedef/typehints.py +874 -0
  557. snowflake/snowpark_connect/includes/python/pyspark/pandas/usage_logging/__init__.py +143 -0
  558. snowflake/snowpark_connect/includes/python/pyspark/pandas/usage_logging/usage_logger.py +132 -0
  559. snowflake/snowpark_connect/includes/python/pyspark/pandas/utils.py +1063 -0
  560. snowflake/snowpark_connect/includes/python/pyspark/pandas/window.py +2702 -0
  561. snowflake/snowpark_connect/includes/python/pyspark/profiler.py +489 -0
  562. snowflake/snowpark_connect/includes/python/pyspark/py.typed +1 -0
  563. snowflake/snowpark_connect/includes/python/pyspark/python/pyspark/shell.py +123 -0
  564. snowflake/snowpark_connect/includes/python/pyspark/rdd.py +5518 -0
  565. snowflake/snowpark_connect/includes/python/pyspark/rddsampler.py +115 -0
  566. snowflake/snowpark_connect/includes/python/pyspark/resource/__init__.py +38 -0
  567. snowflake/snowpark_connect/includes/python/pyspark/resource/information.py +69 -0
  568. snowflake/snowpark_connect/includes/python/pyspark/resource/profile.py +317 -0
  569. snowflake/snowpark_connect/includes/python/pyspark/resource/requests.py +539 -0
  570. snowflake/snowpark_connect/includes/python/pyspark/resource/tests/__init__.py +16 -0
  571. snowflake/snowpark_connect/includes/python/pyspark/resource/tests/test_resources.py +83 -0
  572. snowflake/snowpark_connect/includes/python/pyspark/resultiterable.py +45 -0
  573. snowflake/snowpark_connect/includes/python/pyspark/serializers.py +681 -0
  574. snowflake/snowpark_connect/includes/python/pyspark/shell.py +123 -0
  575. snowflake/snowpark_connect/includes/python/pyspark/shuffle.py +854 -0
  576. snowflake/snowpark_connect/includes/python/pyspark/sql/__init__.py +75 -0
  577. snowflake/snowpark_connect/includes/python/pyspark/sql/_typing.pyi +80 -0
  578. snowflake/snowpark_connect/includes/python/pyspark/sql/avro/__init__.py +18 -0
  579. snowflake/snowpark_connect/includes/python/pyspark/sql/avro/functions.py +188 -0
  580. snowflake/snowpark_connect/includes/python/pyspark/sql/catalog.py +1270 -0
  581. snowflake/snowpark_connect/includes/python/pyspark/sql/column.py +1431 -0
  582. snowflake/snowpark_connect/includes/python/pyspark/sql/conf.py +99 -0
  583. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/__init__.py +18 -0
  584. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/_typing.py +90 -0
  585. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/avro/__init__.py +18 -0
  586. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/avro/functions.py +107 -0
  587. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/catalog.py +356 -0
  588. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/client/__init__.py +22 -0
  589. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/client/artifact.py +412 -0
  590. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/client/core.py +1689 -0
  591. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/client/reattach.py +340 -0
  592. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/column.py +514 -0
  593. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/conf.py +128 -0
  594. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/conversion.py +490 -0
  595. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/dataframe.py +2172 -0
  596. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/expressions.py +1056 -0
  597. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/functions.py +3937 -0
  598. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/group.py +418 -0
  599. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/plan.py +2289 -0
  600. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/__init__.py +25 -0
  601. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/base_pb2.py +203 -0
  602. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/base_pb2.pyi +2718 -0
  603. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/base_pb2_grpc.py +423 -0
  604. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/catalog_pb2.py +109 -0
  605. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/catalog_pb2.pyi +1130 -0
  606. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/commands_pb2.py +141 -0
  607. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/commands_pb2.pyi +1766 -0
  608. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/common_pb2.py +47 -0
  609. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/common_pb2.pyi +123 -0
  610. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/example_plugins_pb2.py +53 -0
  611. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/example_plugins_pb2.pyi +112 -0
  612. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/expressions_pb2.py +107 -0
  613. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/expressions_pb2.pyi +1507 -0
  614. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/relations_pb2.py +195 -0
  615. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/relations_pb2.pyi +3613 -0
  616. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/types_pb2.py +95 -0
  617. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/proto/types_pb2.pyi +980 -0
  618. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/protobuf/__init__.py +18 -0
  619. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/protobuf/functions.py +166 -0
  620. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/readwriter.py +861 -0
  621. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/session.py +952 -0
  622. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/streaming/__init__.py +22 -0
  623. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/streaming/query.py +295 -0
  624. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/streaming/readwriter.py +618 -0
  625. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/streaming/worker/__init__.py +18 -0
  626. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/streaming/worker/foreach_batch_worker.py +87 -0
  627. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/streaming/worker/listener_worker.py +100 -0
  628. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/types.py +301 -0
  629. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/udf.py +296 -0
  630. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/udtf.py +200 -0
  631. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/utils.py +58 -0
  632. snowflake/snowpark_connect/includes/python/pyspark/sql/connect/window.py +266 -0
  633. snowflake/snowpark_connect/includes/python/pyspark/sql/context.py +818 -0
  634. snowflake/snowpark_connect/includes/python/pyspark/sql/dataframe.py +5973 -0
  635. snowflake/snowpark_connect/includes/python/pyspark/sql/functions.py +15889 -0
  636. snowflake/snowpark_connect/includes/python/pyspark/sql/group.py +547 -0
  637. snowflake/snowpark_connect/includes/python/pyspark/sql/observation.py +152 -0
  638. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/__init__.py +21 -0
  639. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/_typing/__init__.pyi +344 -0
  640. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/_typing/protocols/__init__.pyi +17 -0
  641. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/_typing/protocols/frame.pyi +20 -0
  642. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/_typing/protocols/series.pyi +20 -0
  643. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/conversion.py +671 -0
  644. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/functions.py +480 -0
  645. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/functions.pyi +132 -0
  646. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/group_ops.py +523 -0
  647. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/map_ops.py +216 -0
  648. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/serializers.py +1019 -0
  649. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/typehints.py +172 -0
  650. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/types.py +972 -0
  651. snowflake/snowpark_connect/includes/python/pyspark/sql/pandas/utils.py +86 -0
  652. snowflake/snowpark_connect/includes/python/pyspark/sql/protobuf/__init__.py +18 -0
  653. snowflake/snowpark_connect/includes/python/pyspark/sql/protobuf/functions.py +334 -0
  654. snowflake/snowpark_connect/includes/python/pyspark/sql/readwriter.py +2159 -0
  655. snowflake/snowpark_connect/includes/python/pyspark/sql/session.py +2088 -0
  656. snowflake/snowpark_connect/includes/python/pyspark/sql/sql_formatter.py +84 -0
  657. snowflake/snowpark_connect/includes/python/pyspark/sql/streaming/__init__.py +21 -0
  658. snowflake/snowpark_connect/includes/python/pyspark/sql/streaming/listener.py +1050 -0
  659. snowflake/snowpark_connect/includes/python/pyspark/sql/streaming/query.py +746 -0
  660. snowflake/snowpark_connect/includes/python/pyspark/sql/streaming/readwriter.py +1652 -0
  661. snowflake/snowpark_connect/includes/python/pyspark/sql/streaming/state.py +288 -0
  662. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/__init__.py +16 -0
  663. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/__init__.py +16 -0
  664. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/client/__init__.py +16 -0
  665. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/client/test_artifact.py +420 -0
  666. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/client/test_client.py +358 -0
  667. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/__init__.py +16 -0
  668. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/test_parity_foreach.py +36 -0
  669. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/test_parity_foreach_batch.py +44 -0
  670. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/test_parity_listener.py +116 -0
  671. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/test_parity_streaming.py +35 -0
  672. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_connect_basic.py +3612 -0
  673. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_connect_column.py +1042 -0
  674. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_connect_function.py +2381 -0
  675. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_connect_plan.py +1060 -0
  676. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_arrow.py +163 -0
  677. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_arrow_map.py +38 -0
  678. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_arrow_python_udf.py +48 -0
  679. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_catalog.py +36 -0
  680. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_column.py +55 -0
  681. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_conf.py +36 -0
  682. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_dataframe.py +96 -0
  683. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_datasources.py +44 -0
  684. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_errors.py +36 -0
  685. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_functions.py +59 -0
  686. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_group.py +36 -0
  687. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_cogrouped_map.py +59 -0
  688. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py +74 -0
  689. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map_with_state.py +62 -0
  690. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_map.py +58 -0
  691. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_udf.py +70 -0
  692. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_udf_grouped_agg.py +50 -0
  693. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_udf_scalar.py +68 -0
  694. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_udf_window.py +40 -0
  695. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_readwriter.py +46 -0
  696. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_serde.py +44 -0
  697. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_types.py +100 -0
  698. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_udf.py +100 -0
  699. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_udtf.py +163 -0
  700. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_session.py +181 -0
  701. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_utils.py +42 -0
  702. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/__init__.py +16 -0
  703. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_cogrouped_map.py +623 -0
  704. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py +869 -0
  705. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_grouped_map_with_state.py +342 -0
  706. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_map.py +436 -0
  707. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf.py +363 -0
  708. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py +592 -0
  709. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_scalar.py +1503 -0
  710. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints.py +392 -0
  711. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints_with_future_annotations.py +375 -0
  712. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py +411 -0
  713. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/__init__.py +16 -0
  714. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/test_streaming.py +401 -0
  715. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/test_streaming_foreach.py +295 -0
  716. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/test_streaming_foreach_batch.py +106 -0
  717. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/test_streaming_listener.py +558 -0
  718. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_arrow.py +1346 -0
  719. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_arrow_map.py +182 -0
  720. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_arrow_python_udf.py +202 -0
  721. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_catalog.py +503 -0
  722. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_column.py +225 -0
  723. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_conf.py +83 -0
  724. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_context.py +201 -0
  725. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_dataframe.py +1931 -0
  726. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_datasources.py +256 -0
  727. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_errors.py +69 -0
  728. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_functions.py +1349 -0
  729. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_group.py +53 -0
  730. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_pandas_sqlmetrics.py +68 -0
  731. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_readwriter.py +283 -0
  732. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_serde.py +155 -0
  733. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_session.py +412 -0
  734. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_types.py +1581 -0
  735. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_udf.py +961 -0
  736. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_udf_profiler.py +165 -0
  737. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_udtf.py +1456 -0
  738. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_utils.py +1686 -0
  739. snowflake/snowpark_connect/includes/python/pyspark/sql/types.py +2558 -0
  740. snowflake/snowpark_connect/includes/python/pyspark/sql/udf.py +714 -0
  741. snowflake/snowpark_connect/includes/python/pyspark/sql/udtf.py +325 -0
  742. snowflake/snowpark_connect/includes/python/pyspark/sql/utils.py +339 -0
  743. snowflake/snowpark_connect/includes/python/pyspark/sql/window.py +492 -0
  744. snowflake/snowpark_connect/includes/python/pyspark/statcounter.py +165 -0
  745. snowflake/snowpark_connect/includes/python/pyspark/status.py +112 -0
  746. snowflake/snowpark_connect/includes/python/pyspark/storagelevel.py +97 -0
  747. snowflake/snowpark_connect/includes/python/pyspark/streaming/__init__.py +22 -0
  748. snowflake/snowpark_connect/includes/python/pyspark/streaming/context.py +471 -0
  749. snowflake/snowpark_connect/includes/python/pyspark/streaming/dstream.py +933 -0
  750. snowflake/snowpark_connect/includes/python/pyspark/streaming/kinesis.py +205 -0
  751. snowflake/snowpark_connect/includes/python/pyspark/streaming/listener.py +83 -0
  752. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/__init__.py +16 -0
  753. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/test_context.py +184 -0
  754. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/test_dstream.py +706 -0
  755. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/test_kinesis.py +118 -0
  756. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/test_listener.py +160 -0
  757. snowflake/snowpark_connect/includes/python/pyspark/streaming/util.py +168 -0
  758. snowflake/snowpark_connect/includes/python/pyspark/taskcontext.py +502 -0
  759. snowflake/snowpark_connect/includes/python/pyspark/testing/__init__.py +21 -0
  760. snowflake/snowpark_connect/includes/python/pyspark/testing/connectutils.py +199 -0
  761. snowflake/snowpark_connect/includes/python/pyspark/testing/mllibutils.py +30 -0
  762. snowflake/snowpark_connect/includes/python/pyspark/testing/mlutils.py +275 -0
  763. snowflake/snowpark_connect/includes/python/pyspark/testing/objects.py +121 -0
  764. snowflake/snowpark_connect/includes/python/pyspark/testing/pandasutils.py +714 -0
  765. snowflake/snowpark_connect/includes/python/pyspark/testing/sqlutils.py +168 -0
  766. snowflake/snowpark_connect/includes/python/pyspark/testing/streamingutils.py +178 -0
  767. snowflake/snowpark_connect/includes/python/pyspark/testing/utils.py +636 -0
  768. snowflake/snowpark_connect/includes/python/pyspark/tests/__init__.py +16 -0
  769. snowflake/snowpark_connect/includes/python/pyspark/tests/test_appsubmit.py +306 -0
  770. snowflake/snowpark_connect/includes/python/pyspark/tests/test_broadcast.py +196 -0
  771. snowflake/snowpark_connect/includes/python/pyspark/tests/test_conf.py +44 -0
  772. snowflake/snowpark_connect/includes/python/pyspark/tests/test_context.py +346 -0
  773. snowflake/snowpark_connect/includes/python/pyspark/tests/test_daemon.py +89 -0
  774. snowflake/snowpark_connect/includes/python/pyspark/tests/test_install_spark.py +124 -0
  775. snowflake/snowpark_connect/includes/python/pyspark/tests/test_join.py +69 -0
  776. snowflake/snowpark_connect/includes/python/pyspark/tests/test_memory_profiler.py +167 -0
  777. snowflake/snowpark_connect/includes/python/pyspark/tests/test_pin_thread.py +194 -0
  778. snowflake/snowpark_connect/includes/python/pyspark/tests/test_profiler.py +168 -0
  779. snowflake/snowpark_connect/includes/python/pyspark/tests/test_rdd.py +939 -0
  780. snowflake/snowpark_connect/includes/python/pyspark/tests/test_rddbarrier.py +52 -0
  781. snowflake/snowpark_connect/includes/python/pyspark/tests/test_rddsampler.py +66 -0
  782. snowflake/snowpark_connect/includes/python/pyspark/tests/test_readwrite.py +368 -0
  783. snowflake/snowpark_connect/includes/python/pyspark/tests/test_serializers.py +257 -0
  784. snowflake/snowpark_connect/includes/python/pyspark/tests/test_shuffle.py +267 -0
  785. snowflake/snowpark_connect/includes/python/pyspark/tests/test_stage_sched.py +153 -0
  786. snowflake/snowpark_connect/includes/python/pyspark/tests/test_statcounter.py +130 -0
  787. snowflake/snowpark_connect/includes/python/pyspark/tests/test_taskcontext.py +350 -0
  788. snowflake/snowpark_connect/includes/python/pyspark/tests/test_util.py +97 -0
  789. snowflake/snowpark_connect/includes/python/pyspark/tests/test_worker.py +271 -0
  790. snowflake/snowpark_connect/includes/python/pyspark/traceback_utils.py +81 -0
  791. snowflake/snowpark_connect/includes/python/pyspark/util.py +416 -0
  792. snowflake/snowpark_connect/includes/python/pyspark/version.py +19 -0
  793. snowflake/snowpark_connect/includes/python/pyspark/worker.py +1307 -0
  794. snowflake/snowpark_connect/includes/python/pyspark/worker_util.py +46 -0
  795. snowflake/snowpark_connect/proto/__init__.py +10 -0
  796. snowflake/snowpark_connect/proto/control_pb2.py +35 -0
  797. snowflake/snowpark_connect/proto/control_pb2.pyi +38 -0
  798. snowflake/snowpark_connect/proto/control_pb2_grpc.py +183 -0
  799. snowflake/snowpark_connect/proto/snowflake_expression_ext_pb2.py +35 -0
  800. snowflake/snowpark_connect/proto/snowflake_expression_ext_pb2.pyi +53 -0
  801. snowflake/snowpark_connect/proto/snowflake_rdd_pb2.pyi +39 -0
  802. snowflake/snowpark_connect/proto/snowflake_relation_ext_pb2.py +47 -0
  803. snowflake/snowpark_connect/proto/snowflake_relation_ext_pb2.pyi +111 -0
  804. snowflake/snowpark_connect/relation/__init__.py +3 -0
  805. snowflake/snowpark_connect/relation/catalogs/__init__.py +12 -0
  806. snowflake/snowpark_connect/relation/catalogs/abstract_spark_catalog.py +287 -0
  807. snowflake/snowpark_connect/relation/catalogs/snowflake_catalog.py +467 -0
  808. snowflake/snowpark_connect/relation/catalogs/utils.py +51 -0
  809. snowflake/snowpark_connect/relation/io_utils.py +76 -0
  810. snowflake/snowpark_connect/relation/map_aggregate.py +322 -0
  811. snowflake/snowpark_connect/relation/map_catalog.py +151 -0
  812. snowflake/snowpark_connect/relation/map_column_ops.py +1068 -0
  813. snowflake/snowpark_connect/relation/map_crosstab.py +48 -0
  814. snowflake/snowpark_connect/relation/map_extension.py +412 -0
  815. snowflake/snowpark_connect/relation/map_join.py +341 -0
  816. snowflake/snowpark_connect/relation/map_local_relation.py +326 -0
  817. snowflake/snowpark_connect/relation/map_map_partitions.py +146 -0
  818. snowflake/snowpark_connect/relation/map_relation.py +253 -0
  819. snowflake/snowpark_connect/relation/map_row_ops.py +716 -0
  820. snowflake/snowpark_connect/relation/map_sample_by.py +35 -0
  821. snowflake/snowpark_connect/relation/map_show_string.py +50 -0
  822. snowflake/snowpark_connect/relation/map_sql.py +1874 -0
  823. snowflake/snowpark_connect/relation/map_stats.py +324 -0
  824. snowflake/snowpark_connect/relation/map_subquery_alias.py +32 -0
  825. snowflake/snowpark_connect/relation/map_udtf.py +288 -0
  826. snowflake/snowpark_connect/relation/read/__init__.py +7 -0
  827. snowflake/snowpark_connect/relation/read/jdbc_read_dbapi.py +668 -0
  828. snowflake/snowpark_connect/relation/read/map_read.py +367 -0
  829. snowflake/snowpark_connect/relation/read/map_read_csv.py +142 -0
  830. snowflake/snowpark_connect/relation/read/map_read_jdbc.py +108 -0
  831. snowflake/snowpark_connect/relation/read/map_read_json.py +344 -0
  832. snowflake/snowpark_connect/relation/read/map_read_parquet.py +194 -0
  833. snowflake/snowpark_connect/relation/read/map_read_socket.py +59 -0
  834. snowflake/snowpark_connect/relation/read/map_read_table.py +109 -0
  835. snowflake/snowpark_connect/relation/read/map_read_text.py +106 -0
  836. snowflake/snowpark_connect/relation/read/reader_config.py +399 -0
  837. snowflake/snowpark_connect/relation/read/utils.py +155 -0
  838. snowflake/snowpark_connect/relation/stage_locator.py +161 -0
  839. snowflake/snowpark_connect/relation/utils.py +219 -0
  840. snowflake/snowpark_connect/relation/write/__init__.py +3 -0
  841. snowflake/snowpark_connect/relation/write/jdbc_write_dbapi.py +339 -0
  842. snowflake/snowpark_connect/relation/write/map_write.py +436 -0
  843. snowflake/snowpark_connect/relation/write/map_write_jdbc.py +48 -0
  844. snowflake/snowpark_connect/resources/java_udfs-1.0-SNAPSHOT.jar +0 -0
  845. snowflake/snowpark_connect/resources_initializer.py +75 -0
  846. snowflake/snowpark_connect/server.py +1136 -0
  847. snowflake/snowpark_connect/start_server.py +32 -0
  848. snowflake/snowpark_connect/tcm.py +8 -0
  849. snowflake/snowpark_connect/type_mapping.py +1003 -0
  850. snowflake/snowpark_connect/typed_column.py +94 -0
  851. snowflake/snowpark_connect/utils/__init__.py +3 -0
  852. snowflake/snowpark_connect/utils/artifacts.py +48 -0
  853. snowflake/snowpark_connect/utils/attribute_handling.py +72 -0
  854. snowflake/snowpark_connect/utils/cache.py +84 -0
  855. snowflake/snowpark_connect/utils/concurrent.py +124 -0
  856. snowflake/snowpark_connect/utils/context.py +390 -0
  857. snowflake/snowpark_connect/utils/describe_query_cache.py +231 -0
  858. snowflake/snowpark_connect/utils/interrupt.py +85 -0
  859. snowflake/snowpark_connect/utils/io_utils.py +35 -0
  860. snowflake/snowpark_connect/utils/pandas_udtf_utils.py +117 -0
  861. snowflake/snowpark_connect/utils/profiling.py +47 -0
  862. snowflake/snowpark_connect/utils/session.py +180 -0
  863. snowflake/snowpark_connect/utils/snowpark_connect_logging.py +38 -0
  864. snowflake/snowpark_connect/utils/telemetry.py +513 -0
  865. snowflake/snowpark_connect/utils/udf_cache.py +392 -0
  866. snowflake/snowpark_connect/utils/udf_helper.py +328 -0
  867. snowflake/snowpark_connect/utils/udf_utils.py +310 -0
  868. snowflake/snowpark_connect/utils/udtf_helper.py +420 -0
  869. snowflake/snowpark_connect/utils/udtf_utils.py +799 -0
  870. snowflake/snowpark_connect/utils/xxhash64.py +247 -0
  871. snowflake/snowpark_connect/version.py +6 -0
  872. snowpark_connect-0.20.2.data/scripts/snowpark-connect +71 -0
  873. snowpark_connect-0.20.2.data/scripts/snowpark-session +11 -0
  874. snowpark_connect-0.20.2.data/scripts/snowpark-submit +354 -0
  875. snowpark_connect-0.20.2.dist-info/METADATA +37 -0
  876. snowpark_connect-0.20.2.dist-info/RECORD +879 -0
  877. snowpark_connect-0.20.2.dist-info/WHEEL +5 -0
  878. snowpark_connect-0.20.2.dist-info/licenses/LICENSE.txt +202 -0
  879. snowpark_connect-0.20.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2159 @@
1
+ #
2
+ # Licensed to the Apache Software Foundation (ASF) under one or more
3
+ # contributor license agreements. See the NOTICE file distributed with
4
+ # this work for additional information regarding copyright ownership.
5
+ # The ASF licenses this file to You under the Apache License, Version 2.0
6
+ # (the "License"); you may not use this file except in compliance with
7
+ # the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ #
17
+ import sys
18
+ from typing import cast, overload, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING, Union
19
+
20
+ from py4j.java_gateway import JavaClass, JavaObject
21
+
22
+ from pyspark import RDD, since
23
+ from pyspark.sql.column import _to_seq, _to_java_column, Column
24
+ from pyspark.sql.types import StructType
25
+ from pyspark.sql import utils
26
+ from pyspark.sql.utils import to_str
27
+ from pyspark.errors import PySparkTypeError, PySparkValueError
28
+
29
+ if TYPE_CHECKING:
30
+ from pyspark.sql._typing import OptionalPrimitiveType, ColumnOrName
31
+ from pyspark.sql.session import SparkSession
32
+ from pyspark.sql.dataframe import DataFrame
33
+ from pyspark.sql.streaming import StreamingQuery
34
+
35
+ __all__ = ["DataFrameReader", "DataFrameWriter", "DataFrameWriterV2"]
36
+
37
+ PathOrPaths = Union[str, List[str]]
38
+ TupleOrListOfString = Union[List[str], Tuple[str, ...]]
39
+
40
+
41
+ class OptionUtils:
42
+ def _set_opts(
43
+ self,
44
+ schema: Optional[Union[StructType, str]] = None,
45
+ **options: "OptionalPrimitiveType",
46
+ ) -> None:
47
+ """
48
+ Set named options (filter out those the value is None)
49
+ """
50
+ if schema is not None:
51
+ self.schema(schema) # type: ignore[attr-defined]
52
+ for k, v in options.items():
53
+ if v is not None:
54
+ self.option(k, v) # type: ignore[attr-defined]
55
+
56
+
57
+ class DataFrameReader(OptionUtils):
58
+ """
59
+ Interface used to load a :class:`DataFrame` from external storage systems
60
+ (e.g. file systems, key-value stores, etc). Use :attr:`SparkSession.read`
61
+ to access this.
62
+
63
+ .. versionadded:: 1.4.0
64
+
65
+ .. versionchanged:: 3.4.0
66
+ Supports Spark Connect.
67
+ """
68
+
69
+ def __init__(self, spark: "SparkSession"):
70
+ self._jreader = spark._jsparkSession.read()
71
+ self._spark = spark
72
+
73
+ def _df(self, jdf: JavaObject) -> "DataFrame":
74
+ from pyspark.sql.dataframe import DataFrame
75
+
76
+ return DataFrame(jdf, self._spark)
77
+
78
+ def format(self, source: str) -> "DataFrameReader":
79
+ """Specifies the input data source format.
80
+
81
+ .. versionadded:: 1.4.0
82
+
83
+ .. versionchanged:: 3.4.0
84
+ Supports Spark Connect.
85
+
86
+ Parameters
87
+ ----------
88
+ source : str
89
+ string, name of the data source, e.g. 'json', 'parquet'.
90
+
91
+ Examples
92
+ --------
93
+ >>> spark.read.format('json')
94
+ <...readwriter.DataFrameReader object ...>
95
+
96
+ Write a DataFrame into a JSON file and read it back.
97
+
98
+ >>> import tempfile
99
+ >>> with tempfile.TemporaryDirectory() as d:
100
+ ... # Write a DataFrame into a JSON file
101
+ ... spark.createDataFrame(
102
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
103
+ ... ).write.mode("overwrite").format("json").save(d)
104
+ ...
105
+ ... # Read the JSON file as a DataFrame.
106
+ ... spark.read.format('json').load(d).show()
107
+ +---+------------+
108
+ |age| name|
109
+ +---+------------+
110
+ |100|Hyukjin Kwon|
111
+ +---+------------+
112
+ """
113
+ self._jreader = self._jreader.format(source)
114
+ return self
115
+
116
+ def schema(self, schema: Union[StructType, str]) -> "DataFrameReader":
117
+ """Specifies the input schema.
118
+
119
+ Some data sources (e.g. JSON) can infer the input schema automatically from data.
120
+ By specifying the schema here, the underlying data source can skip the schema
121
+ inference step, and thus speed up data loading.
122
+
123
+ .. versionadded:: 1.4.0
124
+
125
+ .. versionchanged:: 3.4.0
126
+ Supports Spark Connect.
127
+
128
+ Parameters
129
+ ----------
130
+ schema : :class:`pyspark.sql.types.StructType` or str
131
+ a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
132
+ (For example ``col0 INT, col1 DOUBLE``).
133
+
134
+ Examples
135
+ --------
136
+ >>> spark.read.schema("col0 INT, col1 DOUBLE")
137
+ <...readwriter.DataFrameReader object ...>
138
+
139
+ Specify the schema with reading a CSV file.
140
+
141
+ >>> import tempfile
142
+ >>> with tempfile.TemporaryDirectory() as d:
143
+ ... spark.read.schema("col0 INT, col1 DOUBLE").format("csv").load(d).printSchema()
144
+ root
145
+ |-- col0: integer (nullable = true)
146
+ |-- col1: double (nullable = true)
147
+ """
148
+ from pyspark.sql import SparkSession
149
+
150
+ spark = SparkSession._getActiveSessionOrCreate()
151
+ if isinstance(schema, StructType):
152
+ jschema = spark._jsparkSession.parseDataType(schema.json())
153
+ self._jreader = self._jreader.schema(jschema)
154
+ elif isinstance(schema, str):
155
+ self._jreader = self._jreader.schema(schema)
156
+ else:
157
+ raise PySparkTypeError(
158
+ error_class="NOT_STR_OR_STRUCT",
159
+ message_parameters={
160
+ "arg_name": "schema",
161
+ "arg_type": type(schema).__name__,
162
+ },
163
+ )
164
+ return self
165
+
166
+ def option(self, key: str, value: "OptionalPrimitiveType") -> "DataFrameReader":
167
+ """
168
+ Adds an input option for the underlying data source.
169
+
170
+ .. versionadded:: 1.5.0
171
+
172
+ .. versionchanged:: 3.4.0
173
+ Supports Spark Connect.
174
+
175
+ Parameters
176
+ ----------
177
+ key : str
178
+ The key for the option to set.
179
+ value
180
+ The value for the option to set.
181
+
182
+ Examples
183
+ --------
184
+ >>> spark.read.option("key", "value")
185
+ <...readwriter.DataFrameReader object ...>
186
+
187
+ Specify the option 'nullValue' with reading a CSV file.
188
+
189
+ >>> import tempfile
190
+ >>> with tempfile.TemporaryDirectory() as d:
191
+ ... # Write a DataFrame into a CSV file
192
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
193
+ ... df.write.mode("overwrite").format("csv").save(d)
194
+ ...
195
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon'.
196
+ ... spark.read.schema(df.schema).option(
197
+ ... "nullValue", "Hyukjin Kwon").format('csv').load(d).show()
198
+ +---+----+
199
+ |age|name|
200
+ +---+----+
201
+ |100|NULL|
202
+ +---+----+
203
+ """
204
+ self._jreader = self._jreader.option(key, to_str(value))
205
+ return self
206
+
207
+ def options(self, **options: "OptionalPrimitiveType") -> "DataFrameReader":
208
+ """
209
+ Adds input options for the underlying data source.
210
+
211
+ .. versionadded:: 1.4.0
212
+
213
+ .. versionchanged:: 3.4.0
214
+ Supports Spark Connect.
215
+
216
+ Parameters
217
+ ----------
218
+ **options : dict
219
+ The dictionary of string keys and prmitive-type values.
220
+
221
+ Examples
222
+ --------
223
+ >>> spark.read.option("key", "value")
224
+ <...readwriter.DataFrameReader object ...>
225
+
226
+ Specify the option 'nullValue' and 'header' with reading a CSV file.
227
+
228
+ >>> import tempfile
229
+ >>> with tempfile.TemporaryDirectory() as d:
230
+ ... # Write a DataFrame into a CSV file with a header.
231
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
232
+ ... df.write.option("header", True).mode("overwrite").format("csv").save(d)
233
+ ...
234
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon',
235
+ ... # and 'header' option set to `True`.
236
+ ... spark.read.options(
237
+ ... nullValue="Hyukjin Kwon",
238
+ ... header=True
239
+ ... ).format('csv').load(d).show()
240
+ +---+----+
241
+ |age|name|
242
+ +---+----+
243
+ |100|NULL|
244
+ +---+----+
245
+ """
246
+ for k in options:
247
+ self._jreader = self._jreader.option(k, to_str(options[k]))
248
+ return self
249
+
250
+ def load(
251
+ self,
252
+ path: Optional[PathOrPaths] = None,
253
+ format: Optional[str] = None,
254
+ schema: Optional[Union[StructType, str]] = None,
255
+ **options: "OptionalPrimitiveType",
256
+ ) -> "DataFrame":
257
+ """Loads data from a data source and returns it as a :class:`DataFrame`.
258
+
259
+ .. versionadded:: 1.4.0
260
+
261
+ .. versionchanged:: 3.4.0
262
+ Supports Spark Connect.
263
+
264
+ Parameters
265
+ ----------
266
+ path : str or list, optional
267
+ optional string or a list of string for file-system backed data sources.
268
+ format : str, optional
269
+ optional string for format of the data source. Default to 'parquet'.
270
+ schema : :class:`pyspark.sql.types.StructType` or str, optional
271
+ optional :class:`pyspark.sql.types.StructType` for the input schema
272
+ or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
273
+ **options : dict
274
+ all other string options
275
+
276
+ Examples
277
+ --------
278
+ Load a CSV file with format, schema and options specified.
279
+
280
+ >>> import tempfile
281
+ >>> with tempfile.TemporaryDirectory() as d:
282
+ ... # Write a DataFrame into a CSV file with a header
283
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
284
+ ... df.write.option("header", True).mode("overwrite").format("csv").save(d)
285
+ ...
286
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon',
287
+ ... # and 'header' option set to `True`.
288
+ ... df = spark.read.load(
289
+ ... d, schema=df.schema, format="csv", nullValue="Hyukjin Kwon", header=True)
290
+ ... df.printSchema()
291
+ ... df.show()
292
+ root
293
+ |-- age: long (nullable = true)
294
+ |-- name: string (nullable = true)
295
+ +---+----+
296
+ |age|name|
297
+ +---+----+
298
+ |100|NULL|
299
+ +---+----+
300
+ """
301
+ if format is not None:
302
+ self.format(format)
303
+ if schema is not None:
304
+ self.schema(schema)
305
+ self.options(**options)
306
+ if isinstance(path, str):
307
+ return self._df(self._jreader.load(path))
308
+ elif path is not None:
309
+ if type(path) != list:
310
+ path = [path] # type: ignore[list-item]
311
+ assert self._spark._sc._jvm is not None
312
+ return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path)))
313
+ else:
314
+ return self._df(self._jreader.load())
315
+
316
+ def json(
317
+ self,
318
+ path: Union[str, List[str], RDD[str]],
319
+ schema: Optional[Union[StructType, str]] = None,
320
+ primitivesAsString: Optional[Union[bool, str]] = None,
321
+ prefersDecimal: Optional[Union[bool, str]] = None,
322
+ allowComments: Optional[Union[bool, str]] = None,
323
+ allowUnquotedFieldNames: Optional[Union[bool, str]] = None,
324
+ allowSingleQuotes: Optional[Union[bool, str]] = None,
325
+ allowNumericLeadingZero: Optional[Union[bool, str]] = None,
326
+ allowBackslashEscapingAnyCharacter: Optional[Union[bool, str]] = None,
327
+ mode: Optional[str] = None,
328
+ columnNameOfCorruptRecord: Optional[str] = None,
329
+ dateFormat: Optional[str] = None,
330
+ timestampFormat: Optional[str] = None,
331
+ multiLine: Optional[Union[bool, str]] = None,
332
+ allowUnquotedControlChars: Optional[Union[bool, str]] = None,
333
+ lineSep: Optional[str] = None,
334
+ samplingRatio: Optional[Union[float, str]] = None,
335
+ dropFieldIfAllNull: Optional[Union[bool, str]] = None,
336
+ encoding: Optional[str] = None,
337
+ locale: Optional[str] = None,
338
+ pathGlobFilter: Optional[Union[bool, str]] = None,
339
+ recursiveFileLookup: Optional[Union[bool, str]] = None,
340
+ modifiedBefore: Optional[Union[bool, str]] = None,
341
+ modifiedAfter: Optional[Union[bool, str]] = None,
342
+ allowNonNumericNumbers: Optional[Union[bool, str]] = None,
343
+ ) -> "DataFrame":
344
+ """
345
+ Loads JSON files and returns the results as a :class:`DataFrame`.
346
+
347
+ `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
348
+ For JSON (one record per file), set the ``multiLine`` parameter to ``true``.
349
+
350
+ If the ``schema`` parameter is not specified, this function goes
351
+ through the input once to determine the input schema.
352
+
353
+ .. versionadded:: 1.4.0
354
+
355
+ .. versionchanged:: 3.4.0
356
+ Supports Spark Connect.
357
+
358
+ Parameters
359
+ ----------
360
+ path : str, list or :class:`RDD`
361
+ string represents path to the JSON dataset, or a list of paths,
362
+ or RDD of Strings storing JSON objects.
363
+ schema : :class:`pyspark.sql.types.StructType` or str, optional
364
+ an optional :class:`pyspark.sql.types.StructType` for the input schema or
365
+ a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
366
+
367
+ Other Parameters
368
+ ----------------
369
+ Extra options
370
+ For the extra options, refer to
371
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_
372
+ for the version you use.
373
+
374
+ .. # noqa
375
+
376
+ Examples
377
+ --------
378
+ Write a DataFrame into a JSON file and read it back.
379
+
380
+ >>> import tempfile
381
+ >>> with tempfile.TemporaryDirectory() as d:
382
+ ... # Write a DataFrame into a JSON file
383
+ ... spark.createDataFrame(
384
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
385
+ ... ).write.mode("overwrite").format("json").save(d)
386
+ ...
387
+ ... # Read the JSON file as a DataFrame.
388
+ ... spark.read.json(d).show()
389
+ +---+------------+
390
+ |age| name|
391
+ +---+------------+
392
+ |100|Hyukjin Kwon|
393
+ +---+------------+
394
+ """
395
+ self._set_opts(
396
+ schema=schema,
397
+ primitivesAsString=primitivesAsString,
398
+ prefersDecimal=prefersDecimal,
399
+ allowComments=allowComments,
400
+ allowUnquotedFieldNames=allowUnquotedFieldNames,
401
+ allowSingleQuotes=allowSingleQuotes,
402
+ allowNumericLeadingZero=allowNumericLeadingZero,
403
+ allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
404
+ mode=mode,
405
+ columnNameOfCorruptRecord=columnNameOfCorruptRecord,
406
+ dateFormat=dateFormat,
407
+ timestampFormat=timestampFormat,
408
+ multiLine=multiLine,
409
+ allowUnquotedControlChars=allowUnquotedControlChars,
410
+ lineSep=lineSep,
411
+ samplingRatio=samplingRatio,
412
+ dropFieldIfAllNull=dropFieldIfAllNull,
413
+ encoding=encoding,
414
+ locale=locale,
415
+ pathGlobFilter=pathGlobFilter,
416
+ recursiveFileLookup=recursiveFileLookup,
417
+ modifiedBefore=modifiedBefore,
418
+ modifiedAfter=modifiedAfter,
419
+ allowNonNumericNumbers=allowNonNumericNumbers,
420
+ )
421
+ if isinstance(path, str):
422
+ path = [path]
423
+ if type(path) == list:
424
+ assert self._spark._sc._jvm is not None
425
+ return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path)))
426
+ elif isinstance(path, RDD):
427
+
428
+ def func(iterator: Iterable) -> Iterable:
429
+ for x in iterator:
430
+ if not isinstance(x, str):
431
+ x = str(x)
432
+ if isinstance(x, str):
433
+ x = x.encode("utf-8")
434
+ yield x
435
+
436
+ keyed = path.mapPartitions(func)
437
+ keyed._bypass_serializer = True # type: ignore[attr-defined]
438
+ assert self._spark._jvm is not None
439
+ jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString())
440
+ return self._df(self._jreader.json(jrdd))
441
+ else:
442
+ raise PySparkTypeError(
443
+ error_class="NOT_STR_OR_LIST_OF_RDD",
444
+ message_parameters={
445
+ "arg_name": "path",
446
+ "arg_type": type(path).__name__,
447
+ },
448
+ )
449
+
450
+ def table(self, tableName: str) -> "DataFrame":
451
+ """Returns the specified table as a :class:`DataFrame`.
452
+
453
+ .. versionadded:: 1.4.0
454
+
455
+ .. versionchanged:: 3.4.0
456
+ Supports Spark Connect.
457
+
458
+ Parameters
459
+ ----------
460
+ tableName : str
461
+ string, name of the table.
462
+
463
+ Examples
464
+ --------
465
+ >>> df = spark.range(10)
466
+ >>> df.createOrReplaceTempView('tblA')
467
+ >>> spark.read.table('tblA').show()
468
+ +---+
469
+ | id|
470
+ +---+
471
+ | 0|
472
+ | 1|
473
+ | 2|
474
+ | 3|
475
+ | 4|
476
+ | 5|
477
+ | 6|
478
+ | 7|
479
+ | 8|
480
+ | 9|
481
+ +---+
482
+ >>> _ = spark.sql("DROP TABLE tblA")
483
+ """
484
+ return self._df(self._jreader.table(tableName))
485
+
486
+ def parquet(self, *paths: str, **options: "OptionalPrimitiveType") -> "DataFrame":
487
+ """
488
+ Loads Parquet files, returning the result as a :class:`DataFrame`.
489
+
490
+ .. versionadded:: 1.4.0
491
+
492
+ .. versionchanged:: 3.4.0
493
+ Supports Spark Connect.
494
+
495
+ Parameters
496
+ ----------
497
+ paths : str
498
+
499
+ Other Parameters
500
+ ----------------
501
+ **options
502
+ For the extra options, refer to
503
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#data-source-option>`_
504
+ for the version you use.
505
+
506
+ .. # noqa
507
+
508
+ Examples
509
+ --------
510
+ Write a DataFrame into a Parquet file and read it back.
511
+
512
+ >>> import tempfile
513
+ >>> with tempfile.TemporaryDirectory() as d:
514
+ ... # Write a DataFrame into a Parquet file
515
+ ... spark.createDataFrame(
516
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
517
+ ... ).write.mode("overwrite").format("parquet").save(d)
518
+ ...
519
+ ... # Read the Parquet file as a DataFrame.
520
+ ... spark.read.parquet(d).show()
521
+ +---+------------+
522
+ |age| name|
523
+ +---+------------+
524
+ |100|Hyukjin Kwon|
525
+ +---+------------+
526
+ """
527
+ mergeSchema = options.get("mergeSchema", None)
528
+ pathGlobFilter = options.get("pathGlobFilter", None)
529
+ modifiedBefore = options.get("modifiedBefore", None)
530
+ modifiedAfter = options.get("modifiedAfter", None)
531
+ recursiveFileLookup = options.get("recursiveFileLookup", None)
532
+ datetimeRebaseMode = options.get("datetimeRebaseMode", None)
533
+ int96RebaseMode = options.get("int96RebaseMode", None)
534
+ self._set_opts(
535
+ mergeSchema=mergeSchema,
536
+ pathGlobFilter=pathGlobFilter,
537
+ recursiveFileLookup=recursiveFileLookup,
538
+ modifiedBefore=modifiedBefore,
539
+ modifiedAfter=modifiedAfter,
540
+ datetimeRebaseMode=datetimeRebaseMode,
541
+ int96RebaseMode=int96RebaseMode,
542
+ )
543
+
544
+ return self._df(self._jreader.parquet(_to_seq(self._spark._sc, paths)))
545
+
546
+ def text(
547
+ self,
548
+ paths: PathOrPaths,
549
+ wholetext: bool = False,
550
+ lineSep: Optional[str] = None,
551
+ pathGlobFilter: Optional[Union[bool, str]] = None,
552
+ recursiveFileLookup: Optional[Union[bool, str]] = None,
553
+ modifiedBefore: Optional[Union[bool, str]] = None,
554
+ modifiedAfter: Optional[Union[bool, str]] = None,
555
+ ) -> "DataFrame":
556
+ """
557
+ Loads text files and returns a :class:`DataFrame` whose schema starts with a
558
+ string column named "value", and followed by partitioned columns if there
559
+ are any.
560
+ The text files must be encoded as UTF-8.
561
+
562
+ By default, each line in the text file is a new row in the resulting DataFrame.
563
+
564
+ .. versionadded:: 1.6.0
565
+
566
+ .. versionchanged:: 3.4.0
567
+ Supports Spark Connect.
568
+
569
+ Parameters
570
+ ----------
571
+ paths : str or list
572
+ string, or list of strings, for input path(s).
573
+
574
+ Other Parameters
575
+ ----------------
576
+ Extra options
577
+ For the extra options, refer to
578
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-text.html#data-source-option>`_
579
+ for the version you use.
580
+
581
+ .. # noqa
582
+
583
+ Examples
584
+ --------
585
+ Write a DataFrame into a text file and read it back.
586
+
587
+ >>> import tempfile
588
+ >>> with tempfile.TemporaryDirectory() as d:
589
+ ... # Write a DataFrame into a text file
590
+ ... df = spark.createDataFrame([("a",), ("b",), ("c",)], schema=["alphabets"])
591
+ ... df.write.mode("overwrite").format("text").save(d)
592
+ ...
593
+ ... # Read the text file as a DataFrame.
594
+ ... spark.read.schema(df.schema).text(d).sort("alphabets").show()
595
+ +---------+
596
+ |alphabets|
597
+ +---------+
598
+ | a|
599
+ | b|
600
+ | c|
601
+ +---------+
602
+ """
603
+ self._set_opts(
604
+ wholetext=wholetext,
605
+ lineSep=lineSep,
606
+ pathGlobFilter=pathGlobFilter,
607
+ recursiveFileLookup=recursiveFileLookup,
608
+ modifiedBefore=modifiedBefore,
609
+ modifiedAfter=modifiedAfter,
610
+ )
611
+
612
+ if isinstance(paths, str):
613
+ paths = [paths]
614
+ assert self._spark._sc._jvm is not None
615
+ return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
616
+
617
+ def csv(
618
+ self,
619
+ path: PathOrPaths,
620
+ schema: Optional[Union[StructType, str]] = None,
621
+ sep: Optional[str] = None,
622
+ encoding: Optional[str] = None,
623
+ quote: Optional[str] = None,
624
+ escape: Optional[str] = None,
625
+ comment: Optional[str] = None,
626
+ header: Optional[Union[bool, str]] = None,
627
+ inferSchema: Optional[Union[bool, str]] = None,
628
+ ignoreLeadingWhiteSpace: Optional[Union[bool, str]] = None,
629
+ ignoreTrailingWhiteSpace: Optional[Union[bool, str]] = None,
630
+ nullValue: Optional[str] = None,
631
+ nanValue: Optional[str] = None,
632
+ positiveInf: Optional[str] = None,
633
+ negativeInf: Optional[str] = None,
634
+ dateFormat: Optional[str] = None,
635
+ timestampFormat: Optional[str] = None,
636
+ maxColumns: Optional[Union[int, str]] = None,
637
+ maxCharsPerColumn: Optional[Union[int, str]] = None,
638
+ maxMalformedLogPerPartition: Optional[Union[int, str]] = None,
639
+ mode: Optional[str] = None,
640
+ columnNameOfCorruptRecord: Optional[str] = None,
641
+ multiLine: Optional[Union[bool, str]] = None,
642
+ charToEscapeQuoteEscaping: Optional[str] = None,
643
+ samplingRatio: Optional[Union[float, str]] = None,
644
+ enforceSchema: Optional[Union[bool, str]] = None,
645
+ emptyValue: Optional[str] = None,
646
+ locale: Optional[str] = None,
647
+ lineSep: Optional[str] = None,
648
+ pathGlobFilter: Optional[Union[bool, str]] = None,
649
+ recursiveFileLookup: Optional[Union[bool, str]] = None,
650
+ modifiedBefore: Optional[Union[bool, str]] = None,
651
+ modifiedAfter: Optional[Union[bool, str]] = None,
652
+ unescapedQuoteHandling: Optional[str] = None,
653
+ ) -> "DataFrame":
654
+ r"""Loads a CSV file and returns the result as a :class:`DataFrame`.
655
+
656
+ This function will go through the input once to determine the input schema if
657
+ ``inferSchema`` is enabled. To avoid going through the entire data once, disable
658
+ ``inferSchema`` option or specify the schema explicitly using ``schema``.
659
+
660
+ .. versionadded:: 2.0.0
661
+
662
+ .. versionchanged:: 3.4.0
663
+ Supports Spark Connect.
664
+
665
+ Parameters
666
+ ----------
667
+ path : str or list
668
+ string, or list of strings, for input path(s),
669
+ or RDD of Strings storing CSV rows.
670
+ schema : :class:`pyspark.sql.types.StructType` or str, optional
671
+ an optional :class:`pyspark.sql.types.StructType` for the input schema
672
+ or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
673
+
674
+ Other Parameters
675
+ ----------------
676
+ Extra options
677
+ For the extra options, refer to
678
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_
679
+ for the version you use.
680
+
681
+ .. # noqa
682
+
683
+ Examples
684
+ --------
685
+ Write a DataFrame into a CSV file and read it back.
686
+
687
+ >>> import tempfile
688
+ >>> with tempfile.TemporaryDirectory() as d:
689
+ ... # Write a DataFrame into a CSV file
690
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
691
+ ... df.write.mode("overwrite").format("csv").save(d)
692
+ ...
693
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon'.
694
+ ... spark.read.csv(d, schema=df.schema, nullValue="Hyukjin Kwon").show()
695
+ +---+----+
696
+ |age|name|
697
+ +---+----+
698
+ |100|NULL|
699
+ +---+----+
700
+ """
701
+ self._set_opts(
702
+ schema=schema,
703
+ sep=sep,
704
+ encoding=encoding,
705
+ quote=quote,
706
+ escape=escape,
707
+ comment=comment,
708
+ header=header,
709
+ inferSchema=inferSchema,
710
+ ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
711
+ ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace,
712
+ nullValue=nullValue,
713
+ nanValue=nanValue,
714
+ positiveInf=positiveInf,
715
+ negativeInf=negativeInf,
716
+ dateFormat=dateFormat,
717
+ timestampFormat=timestampFormat,
718
+ maxColumns=maxColumns,
719
+ maxCharsPerColumn=maxCharsPerColumn,
720
+ maxMalformedLogPerPartition=maxMalformedLogPerPartition,
721
+ mode=mode,
722
+ columnNameOfCorruptRecord=columnNameOfCorruptRecord,
723
+ multiLine=multiLine,
724
+ charToEscapeQuoteEscaping=charToEscapeQuoteEscaping,
725
+ samplingRatio=samplingRatio,
726
+ enforceSchema=enforceSchema,
727
+ emptyValue=emptyValue,
728
+ locale=locale,
729
+ lineSep=lineSep,
730
+ pathGlobFilter=pathGlobFilter,
731
+ recursiveFileLookup=recursiveFileLookup,
732
+ modifiedBefore=modifiedBefore,
733
+ modifiedAfter=modifiedAfter,
734
+ unescapedQuoteHandling=unescapedQuoteHandling,
735
+ )
736
+ if isinstance(path, str):
737
+ path = [path]
738
+ if type(path) == list:
739
+ assert self._spark._sc._jvm is not None
740
+ return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path)))
741
+ elif isinstance(path, RDD):
742
+
743
+ def func(iterator):
744
+ for x in iterator:
745
+ if not isinstance(x, str):
746
+ x = str(x)
747
+ if isinstance(x, str):
748
+ x = x.encode("utf-8")
749
+ yield x
750
+
751
+ keyed = path.mapPartitions(func)
752
+ keyed._bypass_serializer = True
753
+ jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString())
754
+ # see SPARK-22112
755
+ # There aren't any jvm api for creating a dataframe from rdd storing csv.
756
+ # We can do it through creating a jvm dataset firstly and using the jvm api
757
+ # for creating a dataframe from dataset storing csv.
758
+ jdataset = self._spark._jsparkSession.createDataset(
759
+ jrdd.rdd(), self._spark._jvm.Encoders.STRING()
760
+ )
761
+ return self._df(self._jreader.csv(jdataset))
762
+ else:
763
+ raise PySparkTypeError(
764
+ error_class="NOT_STR_OR_LIST_OF_RDD",
765
+ message_parameters={
766
+ "arg_name": "path",
767
+ "arg_type": type(path).__name__,
768
+ },
769
+ )
770
+
771
+ def orc(
772
+ self,
773
+ path: PathOrPaths,
774
+ mergeSchema: Optional[bool] = None,
775
+ pathGlobFilter: Optional[Union[bool, str]] = None,
776
+ recursiveFileLookup: Optional[Union[bool, str]] = None,
777
+ modifiedBefore: Optional[Union[bool, str]] = None,
778
+ modifiedAfter: Optional[Union[bool, str]] = None,
779
+ ) -> "DataFrame":
780
+ """Loads ORC files, returning the result as a :class:`DataFrame`.
781
+
782
+ .. versionadded:: 1.5.0
783
+
784
+ .. versionchanged:: 3.4.0
785
+ Supports Spark Connect.
786
+
787
+ Parameters
788
+ ----------
789
+ path : str or list
790
+
791
+ Other Parameters
792
+ ----------------
793
+ Extra options
794
+ For the extra options, refer to
795
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-orc.html#data-source-option>`_
796
+ for the version you use.
797
+
798
+ .. # noqa
799
+
800
+ Examples
801
+ --------
802
+ Write a DataFrame into a ORC file and read it back.
803
+
804
+ >>> import tempfile
805
+ >>> with tempfile.TemporaryDirectory() as d:
806
+ ... # Write a DataFrame into a ORC file
807
+ ... spark.createDataFrame(
808
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
809
+ ... ).write.mode("overwrite").format("orc").save(d)
810
+ ...
811
+ ... # Read the Parquet file as a DataFrame.
812
+ ... spark.read.orc(d).show()
813
+ +---+------------+
814
+ |age| name|
815
+ +---+------------+
816
+ |100|Hyukjin Kwon|
817
+ +---+------------+
818
+ """
819
+ self._set_opts(
820
+ mergeSchema=mergeSchema,
821
+ pathGlobFilter=pathGlobFilter,
822
+ modifiedBefore=modifiedBefore,
823
+ modifiedAfter=modifiedAfter,
824
+ recursiveFileLookup=recursiveFileLookup,
825
+ )
826
+ if isinstance(path, str):
827
+ path = [path]
828
+ return self._df(self._jreader.orc(_to_seq(self._spark._sc, path)))
829
+
830
+ @overload
831
+ def jdbc(
832
+ self, url: str, table: str, *, properties: Optional[Dict[str, str]] = None
833
+ ) -> "DataFrame":
834
+ ...
835
+
836
+ @overload
837
+ def jdbc(
838
+ self,
839
+ url: str,
840
+ table: str,
841
+ column: str,
842
+ lowerBound: Union[int, str],
843
+ upperBound: Union[int, str],
844
+ numPartitions: int,
845
+ *,
846
+ properties: Optional[Dict[str, str]] = None,
847
+ ) -> "DataFrame":
848
+ ...
849
+
850
+ @overload
851
+ def jdbc(
852
+ self,
853
+ url: str,
854
+ table: str,
855
+ *,
856
+ predicates: List[str],
857
+ properties: Optional[Dict[str, str]] = None,
858
+ ) -> "DataFrame":
859
+ ...
860
+
861
+ def jdbc(
862
+ self,
863
+ url: str,
864
+ table: str,
865
+ column: Optional[str] = None,
866
+ lowerBound: Optional[Union[int, str]] = None,
867
+ upperBound: Optional[Union[int, str]] = None,
868
+ numPartitions: Optional[int] = None,
869
+ predicates: Optional[List[str]] = None,
870
+ properties: Optional[Dict[str, str]] = None,
871
+ ) -> "DataFrame":
872
+ """
873
+ Construct a :class:`DataFrame` representing the database table named ``table``
874
+ accessible via JDBC URL ``url`` and connection ``properties``.
875
+
876
+ Partitions of the table will be retrieved in parallel if either ``column`` or
877
+ ``predicates`` is specified. ``lowerBound``, ``upperBound`` and ``numPartitions``
878
+ is needed when ``column`` is specified.
879
+
880
+ If both ``column`` and ``predicates`` are specified, ``column`` will be used.
881
+
882
+ .. versionadded:: 1.4.0
883
+
884
+ .. versionchanged:: 3.4.0
885
+ Supports Spark Connect.
886
+
887
+ Parameters
888
+ ----------
889
+ table : str
890
+ the name of the table
891
+ column : str, optional
892
+ alias of ``partitionColumn`` option. Refer to ``partitionColumn`` in
893
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html#data-source-option>`_
894
+ for the version you use.
895
+ predicates : list, optional
896
+ a list of expressions suitable for inclusion in WHERE clauses;
897
+ each one defines one partition of the :class:`DataFrame`
898
+ properties : dict, optional
899
+ a dictionary of JDBC database connection arguments. Normally at
900
+ least properties "user" and "password" with their corresponding values.
901
+ For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
902
+
903
+ Other Parameters
904
+ ----------------
905
+ Extra options
906
+ For the extra options, refer to
907
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html#data-source-option>`_
908
+ for the version you use.
909
+
910
+ .. # noqa
911
+
912
+ Notes
913
+ -----
914
+ Don't create too many partitions in parallel on a large cluster;
915
+ otherwise Spark might crash your external database systems.
916
+
917
+ Returns
918
+ -------
919
+ :class:`DataFrame`
920
+ """
921
+ if properties is None:
922
+ properties = dict()
923
+ assert self._spark._sc._gateway is not None
924
+ jprop = JavaClass(
925
+ "java.util.Properties",
926
+ self._spark._sc._gateway._gateway_client,
927
+ )()
928
+ for k in properties:
929
+ jprop.setProperty(k, properties[k])
930
+ if column is not None:
931
+ assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
932
+ assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
933
+ assert (
934
+ numPartitions is not None
935
+ ), "numPartitions can not be None when ``column`` is specified"
936
+ return self._df(
937
+ self._jreader.jdbc(
938
+ url, table, column, int(lowerBound), int(upperBound), int(numPartitions), jprop
939
+ )
940
+ )
941
+ if predicates is not None:
942
+ gateway = self._spark._sc._gateway
943
+ assert gateway is not None
944
+ jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
945
+ return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
946
+ return self._df(self._jreader.jdbc(url, table, jprop))
947
+
948
+
949
+ class DataFrameWriter(OptionUtils):
950
+ """
951
+ Interface used to write a :class:`DataFrame` to external storage systems
952
+ (e.g. file systems, key-value stores, etc). Use :attr:`DataFrame.write`
953
+ to access this.
954
+
955
+ .. versionadded:: 1.4.0
956
+
957
+ .. versionchanged:: 3.4.0
958
+ Supports Spark Connect.
959
+ """
960
+
961
+ def __init__(self, df: "DataFrame"):
962
+ self._df = df
963
+ self._spark = df.sparkSession
964
+ self._jwrite = df._jdf.write()
965
+
966
+ def _sq(self, jsq: JavaObject) -> "StreamingQuery":
967
+ from pyspark.sql.streaming import StreamingQuery
968
+
969
+ return StreamingQuery(jsq)
970
+
971
+ def mode(self, saveMode: Optional[str]) -> "DataFrameWriter":
972
+ """Specifies the behavior when data or table already exists.
973
+
974
+ Options include:
975
+
976
+ * `append`: Append contents of this :class:`DataFrame` to existing data.
977
+ * `overwrite`: Overwrite existing data.
978
+ * `error` or `errorifexists`: Throw an exception if data already exists.
979
+ * `ignore`: Silently ignore this operation if data already exists.
980
+
981
+ .. versionadded:: 1.4.0
982
+
983
+ .. versionchanged:: 3.4.0
984
+ Supports Spark Connect.
985
+
986
+ Examples
987
+ --------
988
+ Raise an error when writing to an existing path.
989
+
990
+ >>> import tempfile
991
+ >>> with tempfile.TemporaryDirectory() as d:
992
+ ... spark.createDataFrame(
993
+ ... [{"age": 80, "name": "Xinrong Meng"}]
994
+ ... ).write.mode("error").format("parquet").save(d) # doctest: +SKIP
995
+ Traceback (most recent call last):
996
+ ...
997
+ ...AnalysisException: ...
998
+
999
+ Write a Parquet file back with various options, and read it back.
1000
+
1001
+ >>> with tempfile.TemporaryDirectory() as d:
1002
+ ... # Overwrite the path with a new Parquet file
1003
+ ... spark.createDataFrame(
1004
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
1005
+ ... ).write.mode("overwrite").format("parquet").save(d)
1006
+ ...
1007
+ ... # Append another DataFrame into the Parquet file
1008
+ ... spark.createDataFrame(
1009
+ ... [{"age": 120, "name": "Takuya Ueshin"}]
1010
+ ... ).write.mode("append").format("parquet").save(d)
1011
+ ...
1012
+ ... # Append another DataFrame into the Parquet file
1013
+ ... spark.createDataFrame(
1014
+ ... [{"age": 140, "name": "Haejoon Lee"}]
1015
+ ... ).write.mode("ignore").format("parquet").save(d)
1016
+ ...
1017
+ ... # Read the Parquet file as a DataFrame.
1018
+ ... spark.read.parquet(d).show()
1019
+ +---+-------------+
1020
+ |age| name|
1021
+ +---+-------------+
1022
+ |120|Takuya Ueshin|
1023
+ |100| Hyukjin Kwon|
1024
+ +---+-------------+
1025
+ """
1026
+ # At the JVM side, the default value of mode is already set to "error".
1027
+ # So, if the given saveMode is None, we will not call JVM-side's mode method.
1028
+ if saveMode is not None:
1029
+ self._jwrite = self._jwrite.mode(saveMode)
1030
+ return self
1031
+
1032
+ def format(self, source: str) -> "DataFrameWriter":
1033
+ """Specifies the underlying output data source.
1034
+
1035
+ .. versionadded:: 1.4.0
1036
+
1037
+ .. versionchanged:: 3.4.0
1038
+ Supports Spark Connect.
1039
+
1040
+ Parameters
1041
+ ----------
1042
+ source : str
1043
+ string, name of the data source, e.g. 'json', 'parquet'.
1044
+
1045
+ Examples
1046
+ --------
1047
+ >>> spark.range(1).write.format('parquet')
1048
+ <...readwriter.DataFrameWriter object ...>
1049
+
1050
+ Write a DataFrame into a Parquet file and read it back.
1051
+
1052
+ >>> import tempfile
1053
+ >>> with tempfile.TemporaryDirectory() as d:
1054
+ ... # Write a DataFrame into a Parquet file
1055
+ ... spark.createDataFrame(
1056
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
1057
+ ... ).write.mode("overwrite").format("parquet").save(d)
1058
+ ...
1059
+ ... # Read the Parquet file as a DataFrame.
1060
+ ... spark.read.format('parquet').load(d).show()
1061
+ +---+------------+
1062
+ |age| name|
1063
+ +---+------------+
1064
+ |100|Hyukjin Kwon|
1065
+ +---+------------+
1066
+ """
1067
+ self._jwrite = self._jwrite.format(source)
1068
+ return self
1069
+
1070
+ def option(self, key: str, value: "OptionalPrimitiveType") -> "DataFrameWriter":
1071
+ """
1072
+ Adds an output option for the underlying data source.
1073
+
1074
+ .. versionadded:: 1.5.0
1075
+
1076
+ .. versionchanged:: 3.4.0
1077
+ Supports Spark Connect.
1078
+
1079
+ Parameters
1080
+ ----------
1081
+ key : str
1082
+ The key for the option to set.
1083
+ value
1084
+ The value for the option to set.
1085
+
1086
+ Examples
1087
+ --------
1088
+ >>> spark.range(1).write.option("key", "value")
1089
+ <...readwriter.DataFrameWriter object ...>
1090
+
1091
+ Specify the option 'nullValue' with writing a CSV file.
1092
+
1093
+ >>> import tempfile
1094
+ >>> with tempfile.TemporaryDirectory() as d:
1095
+ ... # Write a DataFrame into a CSV file with 'nullValue' option set to 'Hyukjin Kwon'.
1096
+ ... df = spark.createDataFrame([(100, None)], "age INT, name STRING")
1097
+ ... df.write.option("nullValue", "Hyukjin Kwon").mode("overwrite").format("csv").save(d)
1098
+ ...
1099
+ ... # Read the CSV file as a DataFrame.
1100
+ ... spark.read.schema(df.schema).format('csv').load(d).show()
1101
+ +---+------------+
1102
+ |age| name|
1103
+ +---+------------+
1104
+ |100|Hyukjin Kwon|
1105
+ +---+------------+
1106
+ """
1107
+
1108
+ self._jwrite = self._jwrite.option(key, to_str(value))
1109
+ return self
1110
+
1111
+ def options(self, **options: "OptionalPrimitiveType") -> "DataFrameWriter":
1112
+ """
1113
+ Adds output options for the underlying data source.
1114
+
1115
+ .. versionadded:: 1.4.0
1116
+
1117
+ .. versionchanged:: 3.4.0
1118
+ Supports Spark Connect.
1119
+
1120
+ Parameters
1121
+ ----------
1122
+ **options : dict
1123
+ The dictionary of string keys and primitive-type values.
1124
+
1125
+ Examples
1126
+ --------
1127
+ >>> spark.range(1).write.option("key", "value")
1128
+ <...readwriter.DataFrameWriter object ...>
1129
+
1130
+ Specify the option 'nullValue' and 'header' with writing a CSV file.
1131
+
1132
+ >>> from pyspark.sql.types import StructType,StructField, StringType, IntegerType
1133
+ >>> schema = StructType([
1134
+ ... StructField("age",IntegerType(),True),
1135
+ ... StructField("name",StringType(),True),
1136
+ ... ])
1137
+ >>> import tempfile
1138
+ >>> with tempfile.TemporaryDirectory() as d:
1139
+ ... # Write a DataFrame into a CSV file with 'nullValue' option set to 'Hyukjin Kwon',
1140
+ ... # and 'header' option set to `True`.
1141
+ ... df = spark.createDataFrame([(100, None)], schema=schema)
1142
+ ... df.write.options(nullValue="Hyukjin Kwon", header=True).mode(
1143
+ ... "overwrite").format("csv").save(d)
1144
+ ...
1145
+ ... # Read the CSV file as a DataFrame.
1146
+ ... spark.read.option("header", True).format('csv').load(d).show()
1147
+ +---+------------+
1148
+ |age| name|
1149
+ +---+------------+
1150
+ |100|Hyukjin Kwon|
1151
+ +---+------------+
1152
+ """
1153
+ for k in options:
1154
+ self._jwrite = self._jwrite.option(k, to_str(options[k]))
1155
+ return self
1156
+
1157
+ @overload
1158
+ def partitionBy(self, *cols: str) -> "DataFrameWriter":
1159
+ ...
1160
+
1161
+ @overload
1162
+ def partitionBy(self, *cols: List[str]) -> "DataFrameWriter":
1163
+ ...
1164
+
1165
+ def partitionBy(self, *cols: Union[str, List[str]]) -> "DataFrameWriter":
1166
+ """Partitions the output by the given columns on the file system.
1167
+
1168
+ If specified, the output is laid out on the file system similar
1169
+ to Hive's partitioning scheme.
1170
+
1171
+ .. versionadded:: 1.4.0
1172
+
1173
+ .. versionchanged:: 3.4.0
1174
+ Supports Spark Connect.
1175
+
1176
+ Parameters
1177
+ ----------
1178
+ cols : str or list
1179
+ name of columns
1180
+
1181
+ Examples
1182
+ --------
1183
+ Write a DataFrame into a Parquet file in a partitioned manner, and read it back.
1184
+
1185
+ >>> import tempfile
1186
+ >>> import os
1187
+ >>> with tempfile.TemporaryDirectory() as d:
1188
+ ... # Write a DataFrame into a Parquet file in a partitioned manner.
1189
+ ... spark.createDataFrame(
1190
+ ... [{"age": 100, "name": "Hyukjin Kwon"}, {"age": 120, "name": "Ruifeng Zheng"}]
1191
+ ... ).write.partitionBy("name").mode("overwrite").format("parquet").save(d)
1192
+ ...
1193
+ ... # Read the Parquet file as a DataFrame.
1194
+ ... spark.read.parquet(d).sort("age").show()
1195
+ ...
1196
+ ... # Read one partition as a DataFrame.
1197
+ ... spark.read.parquet(f"{d}{os.path.sep}name=Hyukjin Kwon").show()
1198
+ +---+-------------+
1199
+ |age| name|
1200
+ +---+-------------+
1201
+ |100| Hyukjin Kwon|
1202
+ |120|Ruifeng Zheng|
1203
+ +---+-------------+
1204
+ +---+
1205
+ |age|
1206
+ +---+
1207
+ |100|
1208
+ +---+
1209
+ """
1210
+ if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
1211
+ cols = cols[0] # type: ignore[assignment]
1212
+ self._jwrite = self._jwrite.partitionBy(
1213
+ _to_seq(self._spark._sc, cast(Iterable["ColumnOrName"], cols))
1214
+ )
1215
+ return self
1216
+
1217
+ @overload
1218
+ def bucketBy(self, numBuckets: int, col: str, *cols: str) -> "DataFrameWriter":
1219
+ ...
1220
+
1221
+ @overload
1222
+ def bucketBy(self, numBuckets: int, col: TupleOrListOfString) -> "DataFrameWriter":
1223
+ ...
1224
+
1225
+ def bucketBy(
1226
+ self, numBuckets: int, col: Union[str, TupleOrListOfString], *cols: Optional[str]
1227
+ ) -> "DataFrameWriter":
1228
+ """Buckets the output by the given columns. If specified,
1229
+ the output is laid out on the file system similar to Hive's bucketing scheme,
1230
+ but with a different bucket hash function and is not compatible with Hive's bucketing.
1231
+
1232
+ .. versionadded:: 2.3.0
1233
+
1234
+ .. versionchanged:: 3.4.0
1235
+ Supports Spark Connect.
1236
+
1237
+ Parameters
1238
+ ----------
1239
+ numBuckets : int
1240
+ the number of buckets to save
1241
+ col : str, list or tuple
1242
+ a name of a column, or a list of names.
1243
+ cols : str
1244
+ additional names (optional). If `col` is a list it should be empty.
1245
+
1246
+ Notes
1247
+ -----
1248
+ Applicable for file-based data sources in combination with
1249
+ :py:meth:`DataFrameWriter.saveAsTable`.
1250
+
1251
+ Examples
1252
+ --------
1253
+ Write a DataFrame into a Parquet file in a buckted manner, and read it back.
1254
+
1255
+ >>> from pyspark.sql.functions import input_file_name
1256
+ >>> # Write a DataFrame into a Parquet file in a bucketed manner.
1257
+ ... _ = spark.sql("DROP TABLE IF EXISTS bucketed_table")
1258
+ >>> spark.createDataFrame([
1259
+ ... (100, "Hyukjin Kwon"), (120, "Hyukjin Kwon"), (140, "Haejoon Lee")],
1260
+ ... schema=["age", "name"]
1261
+ ... ).write.bucketBy(2, "name").mode("overwrite").saveAsTable("bucketed_table")
1262
+ >>> # Read the Parquet file as a DataFrame.
1263
+ ... spark.read.table("bucketed_table").sort("age").show()
1264
+ +---+------------+
1265
+ |age| name|
1266
+ +---+------------+
1267
+ |100|Hyukjin Kwon|
1268
+ |120|Hyukjin Kwon|
1269
+ |140| Haejoon Lee|
1270
+ +---+------------+
1271
+ >>> _ = spark.sql("DROP TABLE bucketed_table")
1272
+ """
1273
+ if not isinstance(numBuckets, int):
1274
+ raise PySparkTypeError(
1275
+ error_class="NOT_INT",
1276
+ message_parameters={
1277
+ "arg_name": "numBuckets",
1278
+ "arg_type": type(numBuckets).__name__,
1279
+ },
1280
+ )
1281
+
1282
+ if isinstance(col, (list, tuple)):
1283
+ if cols:
1284
+ raise PySparkValueError(
1285
+ error_class="CANNOT_SET_TOGETHER",
1286
+ message_parameters={
1287
+ "arg_list": f"`col` of type {type(col).__name__} and `cols`",
1288
+ },
1289
+ )
1290
+
1291
+ col, cols = col[0], col[1:] # type: ignore[assignment]
1292
+
1293
+ for c in cols:
1294
+ if not isinstance(c, str):
1295
+ raise PySparkTypeError(
1296
+ error_class="NOT_LIST_OF_STR",
1297
+ message_parameters={
1298
+ "arg_name": "cols",
1299
+ "arg_type": type(c).__name__,
1300
+ },
1301
+ )
1302
+ if not isinstance(col, str):
1303
+ raise PySparkTypeError(
1304
+ error_class="NOT_LIST_OF_STR",
1305
+ message_parameters={
1306
+ "arg_name": "col",
1307
+ "arg_type": type(col).__name__,
1308
+ },
1309
+ )
1310
+
1311
+ self._jwrite = self._jwrite.bucketBy(
1312
+ numBuckets, col, _to_seq(self._spark._sc, cast(Iterable["ColumnOrName"], cols))
1313
+ )
1314
+ return self
1315
+
1316
+ @overload
1317
+ def sortBy(self, col: str, *cols: str) -> "DataFrameWriter":
1318
+ ...
1319
+
1320
+ @overload
1321
+ def sortBy(self, col: TupleOrListOfString) -> "DataFrameWriter":
1322
+ ...
1323
+
1324
+ def sortBy(
1325
+ self, col: Union[str, TupleOrListOfString], *cols: Optional[str]
1326
+ ) -> "DataFrameWriter":
1327
+ """Sorts the output in each bucket by the given columns on the file system.
1328
+
1329
+ .. versionadded:: 2.3.0
1330
+
1331
+ .. versionchanged:: 3.4.0
1332
+ Supports Spark Connect.
1333
+
1334
+ Parameters
1335
+ ----------
1336
+ col : str, tuple or list
1337
+ a name of a column, or a list of names.
1338
+ cols : str
1339
+ additional names (optional). If `col` is a list it should be empty.
1340
+
1341
+ Examples
1342
+ --------
1343
+ Write a DataFrame into a Parquet file in a sorted-buckted manner, and read it back.
1344
+
1345
+ >>> from pyspark.sql.functions import input_file_name
1346
+ >>> # Write a DataFrame into a Parquet file in a sorted-bucketed manner.
1347
+ ... _ = spark.sql("DROP TABLE IF EXISTS sorted_bucketed_table")
1348
+ >>> spark.createDataFrame([
1349
+ ... (100, "Hyukjin Kwon"), (120, "Hyukjin Kwon"), (140, "Haejoon Lee")],
1350
+ ... schema=["age", "name"]
1351
+ ... ).write.bucketBy(1, "name").sortBy("age").mode(
1352
+ ... "overwrite").saveAsTable("sorted_bucketed_table")
1353
+ >>> # Read the Parquet file as a DataFrame.
1354
+ ... spark.read.table("sorted_bucketed_table").sort("age").show()
1355
+ +---+------------+
1356
+ |age| name|
1357
+ +---+------------+
1358
+ |100|Hyukjin Kwon|
1359
+ |120|Hyukjin Kwon|
1360
+ |140| Haejoon Lee|
1361
+ +---+------------+
1362
+ >>> _ = spark.sql("DROP TABLE sorted_bucketed_table")
1363
+ """
1364
+ if isinstance(col, (list, tuple)):
1365
+ if cols:
1366
+ raise PySparkValueError(
1367
+ error_class="CANNOT_SET_TOGETHER",
1368
+ message_parameters={
1369
+ "arg_list": f"`col` of type {type(col).__name__} and `cols`",
1370
+ },
1371
+ )
1372
+
1373
+ col, cols = col[0], col[1:] # type: ignore[assignment]
1374
+
1375
+ for c in cols:
1376
+ if not isinstance(c, str):
1377
+ raise PySparkTypeError(
1378
+ error_class="NOT_LIST_OF_STR",
1379
+ message_parameters={
1380
+ "arg_name": "cols",
1381
+ "arg_type": type(c).__name__,
1382
+ },
1383
+ )
1384
+ if not isinstance(col, str):
1385
+ raise PySparkTypeError(
1386
+ error_class="NOT_LIST_OF_STR",
1387
+ message_parameters={
1388
+ "arg_name": "col",
1389
+ "arg_type": type(col).__name__,
1390
+ },
1391
+ )
1392
+
1393
+ self._jwrite = self._jwrite.sortBy(
1394
+ col, _to_seq(self._spark._sc, cast(Iterable["ColumnOrName"], cols))
1395
+ )
1396
+ return self
1397
+
1398
+ def save(
1399
+ self,
1400
+ path: Optional[str] = None,
1401
+ format: Optional[str] = None,
1402
+ mode: Optional[str] = None,
1403
+ partitionBy: Optional[Union[str, List[str]]] = None,
1404
+ **options: "OptionalPrimitiveType",
1405
+ ) -> None:
1406
+ """Saves the contents of the :class:`DataFrame` to a data source.
1407
+
1408
+ The data source is specified by the ``format`` and a set of ``options``.
1409
+ If ``format`` is not specified, the default data source configured by
1410
+ ``spark.sql.sources.default`` will be used.
1411
+
1412
+ .. versionadded:: 1.4.0
1413
+
1414
+ .. versionchanged:: 3.4.0
1415
+ Supports Spark Connect.
1416
+
1417
+ Parameters
1418
+ ----------
1419
+ path : str, optional
1420
+ the path in a Hadoop supported file system
1421
+ format : str, optional
1422
+ the format used to save
1423
+ mode : str, optional
1424
+ specifies the behavior of the save operation when data already exists.
1425
+
1426
+ * ``append``: Append contents of this :class:`DataFrame` to existing data.
1427
+ * ``overwrite``: Overwrite existing data.
1428
+ * ``ignore``: Silently ignore this operation if data already exists.
1429
+ * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
1430
+ exists.
1431
+ partitionBy : list, optional
1432
+ names of partitioning columns
1433
+ **options : dict
1434
+ all other string options
1435
+
1436
+ Examples
1437
+ --------
1438
+ Write a DataFrame into a JSON file and read it back.
1439
+
1440
+ >>> import tempfile
1441
+ >>> with tempfile.TemporaryDirectory() as d:
1442
+ ... # Write a DataFrame into a JSON file
1443
+ ... spark.createDataFrame(
1444
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
1445
+ ... ).write.mode("overwrite").format("json").save(d)
1446
+ ...
1447
+ ... # Read the JSON file as a DataFrame.
1448
+ ... spark.read.format('json').load(d).show()
1449
+ +---+------------+
1450
+ |age| name|
1451
+ +---+------------+
1452
+ |100|Hyukjin Kwon|
1453
+ +---+------------+
1454
+ """
1455
+ self.mode(mode).options(**options)
1456
+ if partitionBy is not None:
1457
+ self.partitionBy(partitionBy)
1458
+ if format is not None:
1459
+ self.format(format)
1460
+ if path is None:
1461
+ self._jwrite.save()
1462
+ else:
1463
+ self._jwrite.save(path)
1464
+
1465
+ def insertInto(self, tableName: str, overwrite: Optional[bool] = None) -> None:
1466
+ """Inserts the content of the :class:`DataFrame` to the specified table.
1467
+
1468
+ It requires that the schema of the :class:`DataFrame` is the same as the
1469
+ schema of the table.
1470
+
1471
+ .. versionadded:: 1.4.0
1472
+
1473
+ .. versionchanged:: 3.4.0
1474
+ Supports Spark Connect.
1475
+
1476
+ Parameters
1477
+ ----------
1478
+ overwrite : bool, optional
1479
+ If true, overwrites existing data. Disabled by default
1480
+
1481
+ Notes
1482
+ -----
1483
+ Unlike :meth:`DataFrameWriter.saveAsTable`, :meth:`DataFrameWriter.insertInto` ignores
1484
+ the column names and just uses position-based resolution.
1485
+
1486
+ Examples
1487
+ --------
1488
+ >>> _ = spark.sql("DROP TABLE IF EXISTS tblA")
1489
+ >>> df = spark.createDataFrame([
1490
+ ... (100, "Hyukjin Kwon"), (120, "Hyukjin Kwon"), (140, "Haejoon Lee")],
1491
+ ... schema=["age", "name"]
1492
+ ... )
1493
+ >>> df.write.saveAsTable("tblA")
1494
+
1495
+ Insert the data into 'tblA' table but with different column names.
1496
+
1497
+ >>> df.selectExpr("age AS col1", "name AS col2").write.insertInto("tblA")
1498
+ >>> spark.read.table("tblA").sort("age").show()
1499
+ +---+------------+
1500
+ |age| name|
1501
+ +---+------------+
1502
+ |100|Hyukjin Kwon|
1503
+ |100|Hyukjin Kwon|
1504
+ |120|Hyukjin Kwon|
1505
+ |120|Hyukjin Kwon|
1506
+ |140| Haejoon Lee|
1507
+ |140| Haejoon Lee|
1508
+ +---+------------+
1509
+ >>> _ = spark.sql("DROP TABLE tblA")
1510
+ """
1511
+ if overwrite is not None:
1512
+ self.mode("overwrite" if overwrite else "append")
1513
+ self._jwrite.insertInto(tableName)
1514
+
1515
+ def saveAsTable(
1516
+ self,
1517
+ name: str,
1518
+ format: Optional[str] = None,
1519
+ mode: Optional[str] = None,
1520
+ partitionBy: Optional[Union[str, List[str]]] = None,
1521
+ **options: "OptionalPrimitiveType",
1522
+ ) -> None:
1523
+ """Saves the content of the :class:`DataFrame` as the specified table.
1524
+
1525
+ In the case the table already exists, behavior of this function depends on the
1526
+ save mode, specified by the `mode` function (default to throwing an exception).
1527
+ When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
1528
+ the same as that of the existing table.
1529
+
1530
+ * `append`: Append contents of this :class:`DataFrame` to existing data.
1531
+ * `overwrite`: Overwrite existing data.
1532
+ * `error` or `errorifexists`: Throw an exception if data already exists.
1533
+ * `ignore`: Silently ignore this operation if data already exists.
1534
+
1535
+ .. versionadded:: 1.4.0
1536
+
1537
+ .. versionchanged:: 3.4.0
1538
+ Supports Spark Connect.
1539
+
1540
+ Notes
1541
+ -----
1542
+ When `mode` is `Append`, if there is an existing table, we will use the format and
1543
+ options of the existing table. The column order in the schema of the :class:`DataFrame`
1544
+ doesn't need to be the same as that of the existing table. Unlike
1545
+ :meth:`DataFrameWriter.insertInto`, :meth:`DataFrameWriter.saveAsTable` will use the
1546
+ column names to find the correct column positions.
1547
+
1548
+ Parameters
1549
+ ----------
1550
+ name : str
1551
+ the table name
1552
+ format : str, optional
1553
+ the format used to save
1554
+ mode : str, optional
1555
+ one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
1556
+ (default: error)
1557
+ partitionBy : str or list
1558
+ names of partitioning columns
1559
+ **options : dict
1560
+ all other string options
1561
+
1562
+ Examples
1563
+ --------
1564
+ Creates a table from a DataFrame, and read it back.
1565
+
1566
+ >>> _ = spark.sql("DROP TABLE IF EXISTS tblA")
1567
+ >>> spark.createDataFrame([
1568
+ ... (100, "Hyukjin Kwon"), (120, "Hyukjin Kwon"), (140, "Haejoon Lee")],
1569
+ ... schema=["age", "name"]
1570
+ ... ).write.saveAsTable("tblA")
1571
+ >>> spark.read.table("tblA").sort("age").show()
1572
+ +---+------------+
1573
+ |age| name|
1574
+ +---+------------+
1575
+ |100|Hyukjin Kwon|
1576
+ |120|Hyukjin Kwon|
1577
+ |140| Haejoon Lee|
1578
+ +---+------------+
1579
+ >>> _ = spark.sql("DROP TABLE tblA")
1580
+ """
1581
+ self.mode(mode).options(**options)
1582
+ if partitionBy is not None:
1583
+ self.partitionBy(partitionBy)
1584
+ if format is not None:
1585
+ self.format(format)
1586
+ self._jwrite.saveAsTable(name)
1587
+
1588
+ def json(
1589
+ self,
1590
+ path: str,
1591
+ mode: Optional[str] = None,
1592
+ compression: Optional[str] = None,
1593
+ dateFormat: Optional[str] = None,
1594
+ timestampFormat: Optional[str] = None,
1595
+ lineSep: Optional[str] = None,
1596
+ encoding: Optional[str] = None,
1597
+ ignoreNullFields: Optional[Union[bool, str]] = None,
1598
+ ) -> None:
1599
+ """Saves the content of the :class:`DataFrame` in JSON format
1600
+ (`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
1601
+ specified path.
1602
+
1603
+ .. versionadded:: 1.4.0
1604
+
1605
+ .. versionchanged:: 3.4.0
1606
+ Supports Spark Connect.
1607
+
1608
+ Parameters
1609
+ ----------
1610
+ path : str
1611
+ the path in any Hadoop supported file system
1612
+ mode : str, optional
1613
+ specifies the behavior of the save operation when data already exists.
1614
+
1615
+ * ``append``: Append contents of this :class:`DataFrame` to existing data.
1616
+ * ``overwrite``: Overwrite existing data.
1617
+ * ``ignore``: Silently ignore this operation if data already exists.
1618
+ * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
1619
+ exists.
1620
+
1621
+ Other Parameters
1622
+ ----------------
1623
+ Extra options
1624
+ For the extra options, refer to
1625
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_
1626
+ for the version you use.
1627
+
1628
+ .. # noqa
1629
+
1630
+ Examples
1631
+ --------
1632
+ Write a DataFrame into a JSON file and read it back.
1633
+
1634
+ >>> import tempfile
1635
+ >>> with tempfile.TemporaryDirectory() as d:
1636
+ ... # Write a DataFrame into a JSON file
1637
+ ... spark.createDataFrame(
1638
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
1639
+ ... ).write.json(d, mode="overwrite")
1640
+ ...
1641
+ ... # Read the JSON file as a DataFrame.
1642
+ ... spark.read.format("json").load(d).show()
1643
+ +---+------------+
1644
+ |age| name|
1645
+ +---+------------+
1646
+ |100|Hyukjin Kwon|
1647
+ +---+------------+
1648
+ """
1649
+ self.mode(mode)
1650
+ self._set_opts(
1651
+ compression=compression,
1652
+ dateFormat=dateFormat,
1653
+ timestampFormat=timestampFormat,
1654
+ lineSep=lineSep,
1655
+ encoding=encoding,
1656
+ ignoreNullFields=ignoreNullFields,
1657
+ )
1658
+ self._jwrite.json(path)
1659
+
1660
+ def parquet(
1661
+ self,
1662
+ path: str,
1663
+ mode: Optional[str] = None,
1664
+ partitionBy: Optional[Union[str, List[str]]] = None,
1665
+ compression: Optional[str] = None,
1666
+ ) -> None:
1667
+ """Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
1668
+
1669
+ .. versionadded:: 1.4.0
1670
+
1671
+ .. versionchanged:: 3.4.0
1672
+ Supports Spark Connect.
1673
+
1674
+ Parameters
1675
+ ----------
1676
+ path : str
1677
+ the path in any Hadoop supported file system
1678
+ mode : str, optional
1679
+ specifies the behavior of the save operation when data already exists.
1680
+
1681
+ * ``append``: Append contents of this :class:`DataFrame` to existing data.
1682
+ * ``overwrite``: Overwrite existing data.
1683
+ * ``ignore``: Silently ignore this operation if data already exists.
1684
+ * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
1685
+ exists.
1686
+ partitionBy : str or list, optional
1687
+ names of partitioning columns
1688
+
1689
+ Other Parameters
1690
+ ----------------
1691
+ Extra options
1692
+ For the extra options, refer to
1693
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#data-source-option>`_
1694
+ for the version you use.
1695
+
1696
+ .. # noqa
1697
+
1698
+ Examples
1699
+ --------
1700
+ Write a DataFrame into a Parquet file and read it back.
1701
+
1702
+ >>> import tempfile
1703
+ >>> with tempfile.TemporaryDirectory() as d:
1704
+ ... # Write a DataFrame into a Parquet file
1705
+ ... spark.createDataFrame(
1706
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
1707
+ ... ).write.parquet(d, mode="overwrite")
1708
+ ...
1709
+ ... # Read the Parquet file as a DataFrame.
1710
+ ... spark.read.format("parquet").load(d).show()
1711
+ +---+------------+
1712
+ |age| name|
1713
+ +---+------------+
1714
+ |100|Hyukjin Kwon|
1715
+ +---+------------+
1716
+ """
1717
+ self.mode(mode)
1718
+ if partitionBy is not None:
1719
+ self.partitionBy(partitionBy)
1720
+ self._set_opts(compression=compression)
1721
+ self._jwrite.parquet(path)
1722
+
1723
+ def text(
1724
+ self, path: str, compression: Optional[str] = None, lineSep: Optional[str] = None
1725
+ ) -> None:
1726
+ """Saves the content of the DataFrame in a text file at the specified path.
1727
+ The text files will be encoded as UTF-8.
1728
+
1729
+ .. versionadded:: 1.6.0
1730
+
1731
+ .. versionchanged:: 3.4.0
1732
+ Supports Spark Connect.
1733
+
1734
+ Parameters
1735
+ ----------
1736
+ path : str
1737
+ the path in any Hadoop supported file system
1738
+
1739
+ Other Parameters
1740
+ ----------------
1741
+ Extra options
1742
+ For the extra options, refer to
1743
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-text.html#data-source-option>`_
1744
+ for the version you use.
1745
+
1746
+ .. # noqa
1747
+
1748
+ Notes
1749
+ -----
1750
+ The DataFrame must have only one column that is of string type.
1751
+ Each row becomes a new line in the output file.
1752
+
1753
+ Examples
1754
+ --------
1755
+ Write a DataFrame into a text file and read it back.
1756
+
1757
+ >>> import tempfile
1758
+ >>> with tempfile.TemporaryDirectory() as d:
1759
+ ... # Write a DataFrame into a text file
1760
+ ... df = spark.createDataFrame([("a",), ("b",), ("c",)], schema=["alphabets"])
1761
+ ... df.write.mode("overwrite").text(d)
1762
+ ...
1763
+ ... # Read the text file as a DataFrame.
1764
+ ... spark.read.schema(df.schema).format("text").load(d).sort("alphabets").show()
1765
+ +---------+
1766
+ |alphabets|
1767
+ +---------+
1768
+ | a|
1769
+ | b|
1770
+ | c|
1771
+ +---------+
1772
+ """
1773
+ self._set_opts(compression=compression, lineSep=lineSep)
1774
+ self._jwrite.text(path)
1775
+
1776
+ def csv(
1777
+ self,
1778
+ path: str,
1779
+ mode: Optional[str] = None,
1780
+ compression: Optional[str] = None,
1781
+ sep: Optional[str] = None,
1782
+ quote: Optional[str] = None,
1783
+ escape: Optional[str] = None,
1784
+ header: Optional[Union[bool, str]] = None,
1785
+ nullValue: Optional[str] = None,
1786
+ escapeQuotes: Optional[Union[bool, str]] = None,
1787
+ quoteAll: Optional[Union[bool, str]] = None,
1788
+ dateFormat: Optional[str] = None,
1789
+ timestampFormat: Optional[str] = None,
1790
+ ignoreLeadingWhiteSpace: Optional[Union[bool, str]] = None,
1791
+ ignoreTrailingWhiteSpace: Optional[Union[bool, str]] = None,
1792
+ charToEscapeQuoteEscaping: Optional[str] = None,
1793
+ encoding: Optional[str] = None,
1794
+ emptyValue: Optional[str] = None,
1795
+ lineSep: Optional[str] = None,
1796
+ ) -> None:
1797
+ r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
1798
+
1799
+ .. versionadded:: 2.0.0
1800
+
1801
+ .. versionchanged:: 3.4.0
1802
+ Supports Spark Connect.
1803
+
1804
+ Parameters
1805
+ ----------
1806
+ path : str
1807
+ the path in any Hadoop supported file system
1808
+ mode : str, optional
1809
+ specifies the behavior of the save operation when data already exists.
1810
+
1811
+ * ``append``: Append contents of this :class:`DataFrame` to existing data.
1812
+ * ``overwrite``: Overwrite existing data.
1813
+ * ``ignore``: Silently ignore this operation if data already exists.
1814
+ * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
1815
+ exists.
1816
+
1817
+ Other Parameters
1818
+ ----------------
1819
+ Extra options
1820
+ For the extra options, refer to
1821
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_
1822
+ for the version you use.
1823
+
1824
+ .. # noqa
1825
+
1826
+ Examples
1827
+ --------
1828
+ Write a DataFrame into a CSV file and read it back.
1829
+
1830
+ >>> import tempfile
1831
+ >>> with tempfile.TemporaryDirectory() as d:
1832
+ ... # Write a DataFrame into a CSV file
1833
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
1834
+ ... df.write.csv(d, mode="overwrite")
1835
+ ...
1836
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon'.
1837
+ ... spark.read.schema(df.schema).format("csv").option(
1838
+ ... "nullValue", "Hyukjin Kwon").load(d).show()
1839
+ +---+----+
1840
+ |age|name|
1841
+ +---+----+
1842
+ |100|NULL|
1843
+ +---+----+
1844
+ """
1845
+ self.mode(mode)
1846
+ self._set_opts(
1847
+ compression=compression,
1848
+ sep=sep,
1849
+ quote=quote,
1850
+ escape=escape,
1851
+ header=header,
1852
+ nullValue=nullValue,
1853
+ escapeQuotes=escapeQuotes,
1854
+ quoteAll=quoteAll,
1855
+ dateFormat=dateFormat,
1856
+ timestampFormat=timestampFormat,
1857
+ ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
1858
+ ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace,
1859
+ charToEscapeQuoteEscaping=charToEscapeQuoteEscaping,
1860
+ encoding=encoding,
1861
+ emptyValue=emptyValue,
1862
+ lineSep=lineSep,
1863
+ )
1864
+ self._jwrite.csv(path)
1865
+
1866
+ def orc(
1867
+ self,
1868
+ path: str,
1869
+ mode: Optional[str] = None,
1870
+ partitionBy: Optional[Union[str, List[str]]] = None,
1871
+ compression: Optional[str] = None,
1872
+ ) -> None:
1873
+ """Saves the content of the :class:`DataFrame` in ORC format at the specified path.
1874
+
1875
+ .. versionadded:: 1.5.0
1876
+
1877
+ .. versionchanged:: 3.4.0
1878
+ Supports Spark Connect.
1879
+
1880
+ Parameters
1881
+ ----------
1882
+ path : str
1883
+ the path in any Hadoop supported file system
1884
+ mode : str, optional
1885
+ specifies the behavior of the save operation when data already exists.
1886
+
1887
+ * ``append``: Append contents of this :class:`DataFrame` to existing data.
1888
+ * ``overwrite``: Overwrite existing data.
1889
+ * ``ignore``: Silently ignore this operation if data already exists.
1890
+ * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
1891
+ exists.
1892
+ partitionBy : str or list, optional
1893
+ names of partitioning columns
1894
+
1895
+ Other Parameters
1896
+ ----------------
1897
+ Extra options
1898
+ For the extra options, refer to
1899
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-orc.html#data-source-option>`_
1900
+ for the version you use.
1901
+
1902
+ .. # noqa
1903
+
1904
+ Examples
1905
+ --------
1906
+ Write a DataFrame into a ORC file and read it back.
1907
+
1908
+ >>> import tempfile
1909
+ >>> with tempfile.TemporaryDirectory() as d:
1910
+ ... # Write a DataFrame into a ORC file
1911
+ ... spark.createDataFrame(
1912
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
1913
+ ... ).write.orc(d, mode="overwrite")
1914
+ ...
1915
+ ... # Read the Parquet file as a DataFrame.
1916
+ ... spark.read.format("orc").load(d).show()
1917
+ +---+------------+
1918
+ |age| name|
1919
+ +---+------------+
1920
+ |100|Hyukjin Kwon|
1921
+ +---+------------+
1922
+ """
1923
+ self.mode(mode)
1924
+ if partitionBy is not None:
1925
+ self.partitionBy(partitionBy)
1926
+ self._set_opts(compression=compression)
1927
+ self._jwrite.orc(path)
1928
+
1929
+ def jdbc(
1930
+ self,
1931
+ url: str,
1932
+ table: str,
1933
+ mode: Optional[str] = None,
1934
+ properties: Optional[Dict[str, str]] = None,
1935
+ ) -> None:
1936
+ """Saves the content of the :class:`DataFrame` to an external database table via JDBC.
1937
+
1938
+ .. versionadded:: 1.4.0
1939
+
1940
+ .. versionchanged:: 3.4.0
1941
+ Supports Spark Connect.
1942
+
1943
+ Parameters
1944
+ ----------
1945
+ table : str
1946
+ Name of the table in the external database.
1947
+ mode : str, optional
1948
+ specifies the behavior of the save operation when data already exists.
1949
+
1950
+ * ``append``: Append contents of this :class:`DataFrame` to existing data.
1951
+ * ``overwrite``: Overwrite existing data.
1952
+ * ``ignore``: Silently ignore this operation if data already exists.
1953
+ * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
1954
+ exists.
1955
+ properties : dict
1956
+ a dictionary of JDBC database connection arguments. Normally at
1957
+ least properties "user" and "password" with their corresponding values.
1958
+ For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
1959
+
1960
+ Other Parameters
1961
+ ----------------
1962
+ Extra options
1963
+ For the extra options, refer to
1964
+ `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html#data-source-option>`_
1965
+ for the version you use.
1966
+
1967
+ .. # noqa
1968
+
1969
+ Notes
1970
+ -----
1971
+ Don't create too many partitions in parallel on a large cluster;
1972
+ otherwise Spark might crash your external database systems.
1973
+ """
1974
+ if properties is None:
1975
+ properties = dict()
1976
+
1977
+ assert self._spark._sc._gateway is not None
1978
+ jprop = JavaClass(
1979
+ "java.util.Properties",
1980
+ self._spark._sc._gateway._gateway_client,
1981
+ )()
1982
+ for k in properties:
1983
+ jprop.setProperty(k, properties[k])
1984
+ self.mode(mode)._jwrite.jdbc(url, table, jprop)
1985
+
1986
+
1987
+ class DataFrameWriterV2:
1988
+ """
1989
+ Interface used to write a class:`pyspark.sql.dataframe.DataFrame`
1990
+ to external storage using the v2 API.
1991
+
1992
+ .. versionadded:: 3.1.0
1993
+
1994
+ .. versionchanged:: 3.4.0
1995
+ Supports Spark Connect.
1996
+ """
1997
+
1998
+ def __init__(self, df: "DataFrame", table: str):
1999
+ self._df = df
2000
+ self._spark = df.sparkSession
2001
+ self._jwriter = df._jdf.writeTo(table)
2002
+
2003
+ @since(3.1)
2004
+ def using(self, provider: str) -> "DataFrameWriterV2":
2005
+ """
2006
+ Specifies a provider for the underlying output data source.
2007
+ Spark's default catalog supports "parquet", "json", etc.
2008
+ """
2009
+ self._jwriter.using(provider)
2010
+ return self
2011
+
2012
+ @since(3.1)
2013
+ def option(self, key: str, value: "OptionalPrimitiveType") -> "DataFrameWriterV2":
2014
+ """
2015
+ Add a write option.
2016
+ """
2017
+ self._jwriter.option(key, to_str(value))
2018
+ return self
2019
+
2020
+ @since(3.1)
2021
+ def options(self, **options: "OptionalPrimitiveType") -> "DataFrameWriterV2":
2022
+ """
2023
+ Add write options.
2024
+ """
2025
+ options = {k: to_str(v) for k, v in options.items()}
2026
+ self._jwriter.options(options)
2027
+ return self
2028
+
2029
+ @since(3.1)
2030
+ def tableProperty(self, property: str, value: str) -> "DataFrameWriterV2":
2031
+ """
2032
+ Add table property.
2033
+ """
2034
+ self._jwriter.tableProperty(property, value)
2035
+ return self
2036
+
2037
+ @since(3.1)
2038
+ def partitionedBy(self, col: Column, *cols: Column) -> "DataFrameWriterV2":
2039
+ """
2040
+ Partition the output table created by `create`, `createOrReplace`, or `replace` using
2041
+ the given columns or transforms.
2042
+
2043
+ When specified, the table data will be stored by these values for efficient reads.
2044
+
2045
+ For example, when a table is partitioned by day, it may be stored
2046
+ in a directory layout like:
2047
+
2048
+ * `table/day=2019-06-01/`
2049
+ * `table/day=2019-06-02/`
2050
+
2051
+ Partitioning is one of the most widely used techniques to optimize physical data layout.
2052
+ It provides a coarse-grained index for skipping unnecessary data reads when queries have
2053
+ predicates on the partitioned columns. In order for partitioning to work well, the number
2054
+ of distinct values in each column should typically be less than tens of thousands.
2055
+
2056
+ `col` and `cols` support only the following functions:
2057
+
2058
+ * :py:func:`pyspark.sql.functions.years`
2059
+ * :py:func:`pyspark.sql.functions.months`
2060
+ * :py:func:`pyspark.sql.functions.days`
2061
+ * :py:func:`pyspark.sql.functions.hours`
2062
+ * :py:func:`pyspark.sql.functions.bucket`
2063
+
2064
+ """
2065
+ col = _to_java_column(col)
2066
+ cols = _to_seq(self._spark._sc, [_to_java_column(c) for c in cols])
2067
+ self._jwriter.partitionedBy(col, cols)
2068
+ return self
2069
+
2070
+ @since(3.1)
2071
+ def create(self) -> None:
2072
+ """
2073
+ Create a new table from the contents of the data frame.
2074
+
2075
+ The new table's schema, partition layout, properties, and other configuration will be
2076
+ based on the configuration set on this writer.
2077
+ """
2078
+ self._jwriter.create()
2079
+
2080
+ @since(3.1)
2081
+ def replace(self) -> None:
2082
+ """
2083
+ Replace an existing table with the contents of the data frame.
2084
+
2085
+ The existing table's schema, partition layout, properties, and other configuration will be
2086
+ replaced with the contents of the data frame and the configuration set on this writer.
2087
+ """
2088
+ self._jwriter.replace()
2089
+
2090
+ @since(3.1)
2091
+ def createOrReplace(self) -> None:
2092
+ """
2093
+ Create a new table or replace an existing table with the contents of the data frame.
2094
+
2095
+ The output table's schema, partition layout, properties,
2096
+ and other configuration will be based on the contents of the data frame
2097
+ and the configuration set on this writer.
2098
+ If the table exists, its configuration and data will be replaced.
2099
+ """
2100
+ self._jwriter.createOrReplace()
2101
+
2102
+ @since(3.1)
2103
+ def append(self) -> None:
2104
+ """
2105
+ Append the contents of the data frame to the output table.
2106
+ """
2107
+ self._jwriter.append()
2108
+
2109
+ @since(3.1)
2110
+ def overwrite(self, condition: Column) -> None:
2111
+ """
2112
+ Overwrite rows matching the given filter condition with the contents of the data frame in
2113
+ the output table.
2114
+ """
2115
+ condition = _to_java_column(condition)
2116
+ self._jwriter.overwrite(condition)
2117
+
2118
+ @since(3.1)
2119
+ def overwritePartitions(self) -> None:
2120
+ """
2121
+ Overwrite all partition for which the data frame contains at least one row with the contents
2122
+ of the data frame in the output table.
2123
+
2124
+ This operation is equivalent to Hive's `INSERT OVERWRITE ... PARTITION`, which replaces
2125
+ partitions dynamically depending on the contents of the data frame.
2126
+ """
2127
+ self._jwriter.overwritePartitions()
2128
+
2129
+
2130
+ def _test() -> None:
2131
+ import doctest
2132
+ import os
2133
+ import py4j
2134
+ from pyspark.context import SparkContext
2135
+ from pyspark.sql import SparkSession
2136
+ import pyspark.sql.readwriter
2137
+
2138
+ os.chdir(os.environ["SPARK_HOME"])
2139
+
2140
+ globs = pyspark.sql.readwriter.__dict__.copy()
2141
+ sc = SparkContext("local[4]", "PythonTest")
2142
+ try:
2143
+ spark = SparkSession._getActiveSessionOrCreate()
2144
+ except py4j.protocol.Py4JError:
2145
+ spark = SparkSession(sc)
2146
+
2147
+ globs["spark"] = spark
2148
+ (failure_count, test_count) = doctest.testmod(
2149
+ pyspark.sql.readwriter,
2150
+ globs=globs,
2151
+ optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF,
2152
+ )
2153
+ spark.stop()
2154
+ if failure_count:
2155
+ sys.exit(-1)
2156
+
2157
+
2158
+ if __name__ == "__main__":
2159
+ _test()