snowpark-connect 0.24.0__py3-none-any.whl → 0.26.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of snowpark-connect might be problematic. Click here for more details.

Files changed (484) hide show
  1. snowflake/snowpark_connect/column_name_handler.py +116 -4
  2. snowflake/snowpark_connect/config.py +23 -0
  3. snowflake/snowpark_connect/constants.py +0 -29
  4. snowflake/snowpark_connect/dataframe_container.py +22 -0
  5. snowflake/snowpark_connect/execute_plan/map_execution_command.py +56 -1
  6. snowflake/snowpark_connect/expression/literal.py +13 -2
  7. snowflake/snowpark_connect/expression/map_cast.py +5 -8
  8. snowflake/snowpark_connect/expression/map_sql_expression.py +23 -1
  9. snowflake/snowpark_connect/expression/map_udf.py +88 -29
  10. snowflake/snowpark_connect/expression/map_unresolved_attribute.py +199 -15
  11. snowflake/snowpark_connect/expression/map_unresolved_extract_value.py +44 -16
  12. snowflake/snowpark_connect/expression/map_unresolved_function.py +840 -367
  13. snowflake/snowpark_connect/expression/map_unresolved_star.py +3 -2
  14. snowflake/snowpark_connect/hidden_column.py +39 -0
  15. snowflake/snowpark_connect/includes/jars/hadoop-client-api-trimmed-3.3.4.jar +0 -0
  16. snowflake/snowpark_connect/includes/jars/json4s-native_2.12-3.7.0-M11.jar +0 -0
  17. snowflake/snowpark_connect/includes/jars/paranamer-2.8.3.jar +0 -0
  18. snowflake/snowpark_connect/includes/jars/sas-scala-udf_2.12-0.1.0.jar +0 -0
  19. snowflake/snowpark_connect/includes/jars/{hadoop-client-api-3.3.4.jar → spark-connect-client-jvm_2.12-3.5.6.jar} +0 -0
  20. snowflake/snowpark_connect/relation/map_column_ops.py +17 -4
  21. snowflake/snowpark_connect/relation/map_extension.py +52 -11
  22. snowflake/snowpark_connect/relation/map_join.py +258 -62
  23. snowflake/snowpark_connect/relation/map_map_partitions.py +9 -4
  24. snowflake/snowpark_connect/relation/map_relation.py +12 -1
  25. snowflake/snowpark_connect/relation/map_row_ops.py +8 -1
  26. snowflake/snowpark_connect/relation/map_sql.py +88 -11
  27. snowflake/snowpark_connect/relation/map_udtf.py +100 -46
  28. snowflake/snowpark_connect/relation/read/map_read.py +3 -3
  29. snowflake/snowpark_connect/relation/read/map_read_jdbc.py +1 -1
  30. snowflake/snowpark_connect/relation/read/map_read_json.py +8 -1
  31. snowflake/snowpark_connect/relation/read/map_read_table.py +1 -9
  32. snowflake/snowpark_connect/relation/read/reader_config.py +3 -1
  33. snowflake/snowpark_connect/relation/utils.py +44 -0
  34. snowflake/snowpark_connect/relation/write/map_write.py +175 -75
  35. snowflake/snowpark_connect/resources_initializer.py +47 -6
  36. snowflake/snowpark_connect/server.py +26 -4
  37. snowflake/snowpark_connect/type_mapping.py +29 -25
  38. snowflake/snowpark_connect/typed_column.py +14 -0
  39. snowflake/snowpark_connect/utils/artifacts.py +23 -0
  40. snowflake/snowpark_connect/utils/concurrent.py +4 -0
  41. snowflake/snowpark_connect/utils/context.py +6 -1
  42. snowflake/snowpark_connect/utils/external_udxf_cache.py +36 -0
  43. snowflake/snowpark_connect/utils/scala_udf_utils.py +596 -0
  44. snowflake/snowpark_connect/utils/session.py +4 -0
  45. snowflake/snowpark_connect/utils/telemetry.py +6 -17
  46. snowflake/snowpark_connect/utils/udf_helper.py +2 -0
  47. snowflake/snowpark_connect/utils/udf_utils.py +22 -1
  48. snowflake/snowpark_connect/utils/udtf_utils.py +1 -0
  49. snowflake/snowpark_connect/version.py +1 -1
  50. {snowpark_connect-0.24.0.dist-info → snowpark_connect-0.26.0.dist-info}/METADATA +1 -1
  51. snowpark_connect-0.26.0.dist-info/RECORD +481 -0
  52. snowflake/snowpark_connect/includes/jars/scala-compiler-2.12.18.jar +0 -0
  53. snowflake/snowpark_connect/includes/jars/spark-kubernetes_2.12-3.5.6.jar +0 -0
  54. snowflake/snowpark_connect/includes/jars/spark-mllib_2.12-3.5.6.jar +0 -0
  55. snowflake/snowpark_connect/includes/jars/spark-streaming_2.12-3.5.6.jar +0 -0
  56. snowflake/snowpark_connect/includes/python/pyspark/errors/tests/__init__.py +0 -16
  57. snowflake/snowpark_connect/includes/python/pyspark/errors/tests/test_errors.py +0 -60
  58. snowflake/snowpark_connect/includes/python/pyspark/ml/deepspeed/tests/test_deepspeed_distributor.py +0 -306
  59. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/__init__.py +0 -16
  60. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_classification.py +0 -53
  61. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_evaluation.py +0 -50
  62. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_feature.py +0 -43
  63. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_function.py +0 -114
  64. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_pipeline.py +0 -47
  65. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_summarizer.py +0 -43
  66. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_connect_tuning.py +0 -46
  67. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_classification.py +0 -238
  68. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_evaluation.py +0 -194
  69. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_feature.py +0 -156
  70. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_pipeline.py +0 -184
  71. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_summarizer.py +0 -78
  72. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_legacy_mode_tuning.py +0 -292
  73. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_parity_torch_data_loader.py +0 -50
  74. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/connect/test_parity_torch_distributor.py +0 -152
  75. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_algorithms.py +0 -456
  76. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_base.py +0 -96
  77. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_dl_util.py +0 -186
  78. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_evaluation.py +0 -77
  79. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_feature.py +0 -401
  80. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_functions.py +0 -528
  81. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_image.py +0 -82
  82. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_linalg.py +0 -409
  83. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_model_cache.py +0 -55
  84. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_param.py +0 -441
  85. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_persistence.py +0 -546
  86. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_pipeline.py +0 -71
  87. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_stat.py +0 -52
  88. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_training_summary.py +0 -494
  89. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_util.py +0 -85
  90. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/test_wrapper.py +0 -138
  91. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/__init__.py +0 -16
  92. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_cv_io_basic.py +0 -151
  93. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_cv_io_nested.py +0 -97
  94. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_cv_io_pipeline.py +0 -143
  95. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_tuning.py +0 -551
  96. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_tvs_io_basic.py +0 -137
  97. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_tvs_io_nested.py +0 -96
  98. snowflake/snowpark_connect/includes/python/pyspark/ml/tests/tuning/test_tvs_io_pipeline.py +0 -142
  99. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/tests/__init__.py +0 -16
  100. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/tests/test_data_loader.py +0 -137
  101. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/tests/test_distributor.py +0 -561
  102. snowflake/snowpark_connect/includes/python/pyspark/ml/torch/tests/test_log_communication.py +0 -172
  103. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/__init__.py +0 -16
  104. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_algorithms.py +0 -353
  105. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_feature.py +0 -192
  106. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_linalg.py +0 -680
  107. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_stat.py +0 -206
  108. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_streaming_algorithms.py +0 -471
  109. snowflake/snowpark_connect/includes/python/pyspark/mllib/tests/test_util.py +0 -108
  110. snowflake/snowpark_connect/includes/python/pyspark/pandas/spark/__init__.py +0 -16
  111. snowflake/snowpark_connect/includes/python/pyspark/pandas/spark/accessors.py +0 -1281
  112. snowflake/snowpark_connect/includes/python/pyspark/pandas/spark/functions.py +0 -203
  113. snowflake/snowpark_connect/includes/python/pyspark/pandas/spark/utils.py +0 -202
  114. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/__init__.py +0 -16
  115. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/__init__.py +0 -16
  116. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_any_all.py +0 -177
  117. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_apply_func.py +0 -575
  118. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_binary_ops.py +0 -235
  119. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_combine.py +0 -653
  120. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_compute.py +0 -463
  121. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_corrwith.py +0 -86
  122. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_cov.py +0 -151
  123. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_cumulative.py +0 -139
  124. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_describe.py +0 -458
  125. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_eval.py +0 -86
  126. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_melt.py +0 -202
  127. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_missing_data.py +0 -520
  128. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/computation/test_pivot.py +0 -361
  129. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/__init__.py +0 -16
  130. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/__init__.py +0 -16
  131. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_any_all.py +0 -40
  132. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_apply_func.py +0 -42
  133. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_binary_ops.py +0 -40
  134. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_combine.py +0 -37
  135. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_compute.py +0 -60
  136. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_corrwith.py +0 -40
  137. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_cov.py +0 -40
  138. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_cumulative.py +0 -90
  139. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_describe.py +0 -40
  140. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_eval.py +0 -40
  141. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_melt.py +0 -40
  142. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py +0 -42
  143. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/computation/test_parity_pivot.py +0 -37
  144. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/__init__.py +0 -16
  145. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_base.py +0 -36
  146. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_binary_ops.py +0 -42
  147. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_boolean_ops.py +0 -47
  148. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_categorical_ops.py +0 -55
  149. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_complex_ops.py +0 -40
  150. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_date_ops.py +0 -47
  151. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_datetime_ops.py +0 -47
  152. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_null_ops.py +0 -42
  153. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_arithmetic.py +0 -43
  154. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_ops.py +0 -47
  155. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_num_reverse.py +0 -43
  156. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_string_ops.py +0 -47
  157. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_timedelta_ops.py +0 -47
  158. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/test_parity_udt_ops.py +0 -40
  159. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/data_type_ops/testing_utils.py +0 -226
  160. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/__init__.py +0 -16
  161. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_align.py +0 -39
  162. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_basic_slow.py +0 -55
  163. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_cov_corrwith.py +0 -39
  164. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_dot_frame.py +0 -39
  165. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_dot_series.py +0 -39
  166. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_index.py +0 -39
  167. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_series.py +0 -39
  168. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_setitem_frame.py +0 -43
  169. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/diff_frames_ops/test_parity_setitem_series.py +0 -43
  170. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/__init__.py +0 -16
  171. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_attrs.py +0 -40
  172. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_constructor.py +0 -39
  173. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_conversion.py +0 -42
  174. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_reindexing.py +0 -42
  175. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_reshaping.py +0 -37
  176. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_spark.py +0 -40
  177. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_take.py +0 -42
  178. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_time_series.py +0 -48
  179. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/frame/test_parity_truncate.py +0 -40
  180. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/__init__.py +0 -16
  181. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_aggregate.py +0 -40
  182. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_apply_func.py +0 -41
  183. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_cumulative.py +0 -67
  184. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_describe.py +0 -40
  185. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_groupby.py +0 -55
  186. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_head_tail.py +0 -40
  187. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_index.py +0 -38
  188. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_missing_data.py +0 -55
  189. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_split_apply.py +0 -39
  190. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/groupby/test_parity_stat.py +0 -38
  191. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/__init__.py +0 -16
  192. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_align.py +0 -40
  193. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py +0 -50
  194. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_category.py +0 -73
  195. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_datetime.py +0 -39
  196. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_indexing.py +0 -40
  197. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_reindex.py +0 -40
  198. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_rename.py +0 -40
  199. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_reset_index.py +0 -48
  200. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/indexes/test_parity_timedelta.py +0 -39
  201. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/io/__init__.py +0 -16
  202. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/io/test_parity_io.py +0 -40
  203. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/__init__.py +0 -16
  204. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_frame_plot.py +0 -45
  205. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_frame_plot_matplotlib.py +0 -45
  206. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_frame_plot_plotly.py +0 -49
  207. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_series_plot.py +0 -37
  208. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_series_plot_matplotlib.py +0 -53
  209. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/plot/test_parity_series_plot_plotly.py +0 -45
  210. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/__init__.py +0 -16
  211. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_all_any.py +0 -38
  212. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_arg_ops.py +0 -37
  213. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_as_of.py +0 -37
  214. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_as_type.py +0 -38
  215. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_compute.py +0 -37
  216. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_conversion.py +0 -40
  217. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py +0 -40
  218. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_index.py +0 -38
  219. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py +0 -40
  220. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_series.py +0 -37
  221. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_sort.py +0 -38
  222. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/series/test_parity_stat.py +0 -38
  223. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_categorical.py +0 -66
  224. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_config.py +0 -37
  225. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_csv.py +0 -37
  226. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_dataframe_conversion.py +0 -42
  227. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_dataframe_spark_io.py +0 -39
  228. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_default_index.py +0 -49
  229. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ewm.py +0 -37
  230. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_expanding.py +0 -39
  231. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_extension.py +0 -49
  232. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_frame_spark.py +0 -53
  233. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_generic_functions.py +0 -43
  234. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_indexing.py +0 -49
  235. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_indexops_spark.py +0 -39
  236. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_internal.py +0 -41
  237. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_namespace.py +0 -39
  238. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_numpy_compat.py +0 -60
  239. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames.py +0 -48
  240. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py +0 -39
  241. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py +0 -44
  242. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_rolling.py +0 -84
  243. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_repr.py +0 -37
  244. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_resample.py +0 -45
  245. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_reshape.py +0 -39
  246. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_rolling.py +0 -39
  247. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_scalars.py +0 -37
  248. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_series_conversion.py +0 -39
  249. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_series_datetime.py +0 -39
  250. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_series_string.py +0 -39
  251. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_spark_functions.py +0 -39
  252. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_sql.py +0 -43
  253. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_stats.py +0 -37
  254. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_typedef.py +0 -36
  255. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_utils.py +0 -37
  256. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/connect/test_parity_window.py +0 -39
  257. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/__init__.py +0 -16
  258. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_base.py +0 -107
  259. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_binary_ops.py +0 -224
  260. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_boolean_ops.py +0 -825
  261. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_categorical_ops.py +0 -562
  262. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_complex_ops.py +0 -368
  263. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_date_ops.py +0 -257
  264. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_datetime_ops.py +0 -260
  265. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_null_ops.py +0 -178
  266. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_num_arithmetic.py +0 -184
  267. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_num_ops.py +0 -497
  268. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_num_reverse.py +0 -140
  269. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_string_ops.py +0 -354
  270. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_timedelta_ops.py +0 -219
  271. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/test_udt_ops.py +0 -192
  272. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/data_type_ops/testing_utils.py +0 -228
  273. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/__init__.py +0 -16
  274. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_align.py +0 -118
  275. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_basic_slow.py +0 -198
  276. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_cov_corrwith.py +0 -181
  277. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_dot_frame.py +0 -103
  278. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_dot_series.py +0 -141
  279. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_index.py +0 -109
  280. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_series.py +0 -136
  281. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_setitem_frame.py +0 -125
  282. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/diff_frames_ops/test_setitem_series.py +0 -217
  283. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/__init__.py +0 -16
  284. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_attrs.py +0 -384
  285. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_constructor.py +0 -598
  286. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_conversion.py +0 -73
  287. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_reindexing.py +0 -869
  288. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_reshaping.py +0 -487
  289. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_spark.py +0 -309
  290. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_take.py +0 -156
  291. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_time_series.py +0 -149
  292. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/frame/test_truncate.py +0 -163
  293. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/__init__.py +0 -16
  294. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_aggregate.py +0 -311
  295. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_apply_func.py +0 -524
  296. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_cumulative.py +0 -419
  297. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_describe.py +0 -144
  298. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_groupby.py +0 -979
  299. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_head_tail.py +0 -234
  300. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_index.py +0 -206
  301. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_missing_data.py +0 -421
  302. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_split_apply.py +0 -187
  303. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/groupby/test_stat.py +0 -397
  304. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/__init__.py +0 -16
  305. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_align.py +0 -100
  306. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_base.py +0 -2743
  307. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_category.py +0 -484
  308. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_datetime.py +0 -276
  309. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_indexing.py +0 -432
  310. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_reindex.py +0 -310
  311. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_rename.py +0 -257
  312. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_reset_index.py +0 -160
  313. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/indexes/test_timedelta.py +0 -128
  314. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/io/__init__.py +0 -16
  315. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/io/test_io.py +0 -137
  316. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/__init__.py +0 -16
  317. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_frame_plot.py +0 -170
  318. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_frame_plot_matplotlib.py +0 -547
  319. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_frame_plot_plotly.py +0 -285
  320. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_series_plot.py +0 -106
  321. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_series_plot_matplotlib.py +0 -409
  322. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/plot/test_series_plot_plotly.py +0 -247
  323. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/__init__.py +0 -16
  324. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_all_any.py +0 -105
  325. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_arg_ops.py +0 -197
  326. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_as_of.py +0 -137
  327. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_as_type.py +0 -227
  328. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_compute.py +0 -634
  329. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_conversion.py +0 -88
  330. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_cumulative.py +0 -139
  331. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_index.py +0 -475
  332. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_missing_data.py +0 -265
  333. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_series.py +0 -818
  334. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_sort.py +0 -162
  335. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/series/test_stat.py +0 -780
  336. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_categorical.py +0 -741
  337. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_config.py +0 -160
  338. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_csv.py +0 -453
  339. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_dataframe_conversion.py +0 -281
  340. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_dataframe_spark_io.py +0 -487
  341. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_default_index.py +0 -109
  342. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ewm.py +0 -434
  343. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_expanding.py +0 -253
  344. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_extension.py +0 -152
  345. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_frame_spark.py +0 -162
  346. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_generic_functions.py +0 -234
  347. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_indexing.py +0 -1339
  348. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_indexops_spark.py +0 -82
  349. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_internal.py +0 -124
  350. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_namespace.py +0 -638
  351. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_numpy_compat.py +0 -200
  352. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ops_on_diff_frames.py +0 -1355
  353. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby.py +0 -655
  354. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby_expanding.py +0 -113
  355. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby_rolling.py +0 -118
  356. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_repr.py +0 -192
  357. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_resample.py +0 -346
  358. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_reshape.py +0 -495
  359. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_rolling.py +0 -263
  360. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_scalars.py +0 -59
  361. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_series_conversion.py +0 -85
  362. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_series_datetime.py +0 -364
  363. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_series_string.py +0 -362
  364. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_spark_functions.py +0 -46
  365. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_sql.py +0 -123
  366. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_stats.py +0 -581
  367. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_typedef.py +0 -447
  368. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_utils.py +0 -301
  369. snowflake/snowpark_connect/includes/python/pyspark/pandas/tests/test_window.py +0 -465
  370. snowflake/snowpark_connect/includes/python/pyspark/resource/tests/__init__.py +0 -16
  371. snowflake/snowpark_connect/includes/python/pyspark/resource/tests/test_resources.py +0 -83
  372. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/__init__.py +0 -16
  373. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/__init__.py +0 -16
  374. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/client/__init__.py +0 -16
  375. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/client/test_artifact.py +0 -420
  376. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/client/test_client.py +0 -358
  377. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/__init__.py +0 -16
  378. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/test_parity_foreach.py +0 -36
  379. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/test_parity_foreach_batch.py +0 -44
  380. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/test_parity_listener.py +0 -116
  381. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/streaming/test_parity_streaming.py +0 -35
  382. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_connect_basic.py +0 -3612
  383. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_connect_column.py +0 -1042
  384. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_connect_function.py +0 -2381
  385. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_connect_plan.py +0 -1060
  386. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_arrow.py +0 -163
  387. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_arrow_map.py +0 -38
  388. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_arrow_python_udf.py +0 -48
  389. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_catalog.py +0 -36
  390. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_column.py +0 -55
  391. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_conf.py +0 -36
  392. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_dataframe.py +0 -96
  393. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_datasources.py +0 -44
  394. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_errors.py +0 -36
  395. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_functions.py +0 -59
  396. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_group.py +0 -36
  397. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_cogrouped_map.py +0 -59
  398. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py +0 -74
  399. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map_with_state.py +0 -62
  400. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_map.py +0 -58
  401. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_udf.py +0 -70
  402. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_udf_grouped_agg.py +0 -50
  403. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_udf_scalar.py +0 -68
  404. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_pandas_udf_window.py +0 -40
  405. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_readwriter.py +0 -46
  406. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_serde.py +0 -44
  407. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_types.py +0 -100
  408. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_udf.py +0 -100
  409. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_parity_udtf.py +0 -163
  410. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_session.py +0 -181
  411. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/connect/test_utils.py +0 -42
  412. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/__init__.py +0 -16
  413. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_cogrouped_map.py +0 -623
  414. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py +0 -869
  415. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_grouped_map_with_state.py +0 -342
  416. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_map.py +0 -436
  417. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf.py +0 -363
  418. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py +0 -592
  419. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_scalar.py +0 -1503
  420. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints.py +0 -392
  421. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_typehints_with_future_annotations.py +0 -375
  422. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py +0 -411
  423. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/__init__.py +0 -16
  424. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/test_streaming.py +0 -401
  425. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/test_streaming_foreach.py +0 -295
  426. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/test_streaming_foreach_batch.py +0 -106
  427. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/streaming/test_streaming_listener.py +0 -558
  428. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_arrow.py +0 -1346
  429. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_arrow_map.py +0 -182
  430. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_arrow_python_udf.py +0 -202
  431. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_catalog.py +0 -503
  432. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_column.py +0 -225
  433. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_conf.py +0 -83
  434. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_context.py +0 -201
  435. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_dataframe.py +0 -1931
  436. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_datasources.py +0 -256
  437. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_errors.py +0 -69
  438. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_functions.py +0 -1349
  439. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_group.py +0 -53
  440. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_pandas_sqlmetrics.py +0 -68
  441. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_readwriter.py +0 -283
  442. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_serde.py +0 -155
  443. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_session.py +0 -412
  444. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_types.py +0 -1581
  445. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_udf.py +0 -961
  446. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_udf_profiler.py +0 -165
  447. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_udtf.py +0 -1456
  448. snowflake/snowpark_connect/includes/python/pyspark/sql/tests/test_utils.py +0 -1686
  449. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/__init__.py +0 -16
  450. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/test_context.py +0 -184
  451. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/test_dstream.py +0 -706
  452. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/test_kinesis.py +0 -118
  453. snowflake/snowpark_connect/includes/python/pyspark/streaming/tests/test_listener.py +0 -160
  454. snowflake/snowpark_connect/includes/python/pyspark/tests/__init__.py +0 -16
  455. snowflake/snowpark_connect/includes/python/pyspark/tests/test_appsubmit.py +0 -306
  456. snowflake/snowpark_connect/includes/python/pyspark/tests/test_broadcast.py +0 -196
  457. snowflake/snowpark_connect/includes/python/pyspark/tests/test_conf.py +0 -44
  458. snowflake/snowpark_connect/includes/python/pyspark/tests/test_context.py +0 -346
  459. snowflake/snowpark_connect/includes/python/pyspark/tests/test_daemon.py +0 -89
  460. snowflake/snowpark_connect/includes/python/pyspark/tests/test_install_spark.py +0 -124
  461. snowflake/snowpark_connect/includes/python/pyspark/tests/test_join.py +0 -69
  462. snowflake/snowpark_connect/includes/python/pyspark/tests/test_memory_profiler.py +0 -167
  463. snowflake/snowpark_connect/includes/python/pyspark/tests/test_pin_thread.py +0 -194
  464. snowflake/snowpark_connect/includes/python/pyspark/tests/test_profiler.py +0 -168
  465. snowflake/snowpark_connect/includes/python/pyspark/tests/test_rdd.py +0 -939
  466. snowflake/snowpark_connect/includes/python/pyspark/tests/test_rddbarrier.py +0 -52
  467. snowflake/snowpark_connect/includes/python/pyspark/tests/test_rddsampler.py +0 -66
  468. snowflake/snowpark_connect/includes/python/pyspark/tests/test_readwrite.py +0 -368
  469. snowflake/snowpark_connect/includes/python/pyspark/tests/test_serializers.py +0 -257
  470. snowflake/snowpark_connect/includes/python/pyspark/tests/test_shuffle.py +0 -267
  471. snowflake/snowpark_connect/includes/python/pyspark/tests/test_stage_sched.py +0 -153
  472. snowflake/snowpark_connect/includes/python/pyspark/tests/test_statcounter.py +0 -130
  473. snowflake/snowpark_connect/includes/python/pyspark/tests/test_taskcontext.py +0 -350
  474. snowflake/snowpark_connect/includes/python/pyspark/tests/test_util.py +0 -97
  475. snowflake/snowpark_connect/includes/python/pyspark/tests/test_worker.py +0 -271
  476. snowpark_connect-0.24.0.dist-info/RECORD +0 -898
  477. {snowpark_connect-0.24.0.data → snowpark_connect-0.26.0.data}/scripts/snowpark-connect +0 -0
  478. {snowpark_connect-0.24.0.data → snowpark_connect-0.26.0.data}/scripts/snowpark-session +0 -0
  479. {snowpark_connect-0.24.0.data → snowpark_connect-0.26.0.data}/scripts/snowpark-submit +0 -0
  480. {snowpark_connect-0.24.0.dist-info → snowpark_connect-0.26.0.dist-info}/WHEEL +0 -0
  481. {snowpark_connect-0.24.0.dist-info → snowpark_connect-0.26.0.dist-info}/licenses/LICENSE-binary +0 -0
  482. {snowpark_connect-0.24.0.dist-info → snowpark_connect-0.26.0.dist-info}/licenses/LICENSE.txt +0 -0
  483. {snowpark_connect-0.24.0.dist-info → snowpark_connect-0.26.0.dist-info}/licenses/NOTICE-binary +0 -0
  484. {snowpark_connect-0.24.0.dist-info → snowpark_connect-0.26.0.dist-info}/top_level.txt +0 -0
@@ -1,1346 +0,0 @@
1
- #
2
- # Licensed to the Apache Software Foundation (ASF) under one or more
3
- # contributor license agreements. See the NOTICE file distributed with
4
- # this work for additional information regarding copyright ownership.
5
- # The ASF licenses this file to You under the Apache License, Version 2.0
6
- # (the "License"); you may not use this file except in compliance with
7
- # the License. You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- #
17
-
18
- import datetime
19
- import os
20
- import threading
21
- import calendar
22
- import time
23
- import unittest
24
- import warnings
25
- from distutils.version import LooseVersion
26
- from typing import cast
27
- from collections import namedtuple
28
- import sys
29
-
30
- from pyspark import SparkContext, SparkConf
31
- from pyspark.sql import Row, SparkSession
32
- from pyspark.sql.functions import rand, udf, assert_true, lit
33
- from pyspark.sql.types import (
34
- StructType,
35
- StringType,
36
- IntegerType,
37
- LongType,
38
- FloatType,
39
- DoubleType,
40
- DecimalType,
41
- DateType,
42
- TimestampType,
43
- TimestampNTZType,
44
- BinaryType,
45
- StructField,
46
- ArrayType,
47
- MapType,
48
- NullType,
49
- DayTimeIntervalType,
50
- )
51
- from pyspark.testing.objects import ExamplePoint, ExamplePointUDT
52
- from pyspark.testing.sqlutils import (
53
- ReusedSQLTestCase,
54
- have_pandas,
55
- have_pyarrow,
56
- pandas_requirement_message,
57
- pyarrow_requirement_message,
58
- )
59
- from pyspark.testing.utils import QuietTest
60
- from pyspark.errors import ArithmeticException, PySparkTypeError, UnsupportedOperationException
61
-
62
- if have_pandas:
63
- import pandas as pd
64
- from pandas.testing import assert_frame_equal
65
-
66
- if have_pyarrow:
67
- import pyarrow as pa # noqa: F401
68
-
69
-
70
- class ArrowTestsMixin:
71
- @classmethod
72
- def setUpClass(cls):
73
- from datetime import date, datetime
74
- from decimal import Decimal
75
-
76
- super().setUpClass()
77
- cls.warnings_lock = threading.Lock()
78
-
79
- # Synchronize default timezone between Python and Java
80
- cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
81
- tz = "America/Los_Angeles"
82
- os.environ["TZ"] = tz
83
- time.tzset()
84
-
85
- cls.spark.conf.set("spark.sql.session.timeZone", tz)
86
-
87
- # Test fallback
88
- cls.spark.conf.set("spark.sql.execution.arrow.enabled", "false")
89
- assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "false"
90
- cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
91
- assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "true"
92
-
93
- cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "true")
94
- assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "true"
95
- cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
96
- assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "false"
97
-
98
- # Enable Arrow optimization in this tests.
99
- cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
100
- # Disable fallback by default to easily detect the failures.
101
- cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
102
-
103
- cls.schema_wo_null = StructType(
104
- [
105
- StructField("1_str_t", StringType(), True),
106
- StructField("2_int_t", IntegerType(), True),
107
- StructField("3_long_t", LongType(), True),
108
- StructField("4_float_t", FloatType(), True),
109
- StructField("5_double_t", DoubleType(), True),
110
- StructField("6_decimal_t", DecimalType(38, 18), True),
111
- StructField("7_date_t", DateType(), True),
112
- StructField("8_timestamp_t", TimestampType(), True),
113
- StructField("9_binary_t", BinaryType(), True),
114
- ]
115
- )
116
- cls.schema = cls.schema_wo_null.add("10_null_t", NullType(), True)
117
- cls.data_wo_null = [
118
- (
119
- "a",
120
- 1,
121
- 10,
122
- 0.2,
123
- 2.0,
124
- Decimal("2.0"),
125
- date(1969, 1, 1),
126
- datetime(1969, 1, 1, 1, 1, 1),
127
- bytearray(b"a"),
128
- ),
129
- (
130
- "b",
131
- 2,
132
- 20,
133
- 0.4,
134
- 4.0,
135
- Decimal("4.0"),
136
- date(2012, 2, 2),
137
- datetime(2012, 2, 2, 2, 2, 2),
138
- bytearray(b"bb"),
139
- ),
140
- (
141
- "c",
142
- 3,
143
- 30,
144
- 0.8,
145
- 6.0,
146
- Decimal("6.0"),
147
- date(2100, 3, 3),
148
- datetime(2100, 3, 3, 3, 3, 3),
149
- bytearray(b"ccc"),
150
- ),
151
- (
152
- "d",
153
- 4,
154
- 40,
155
- 1.0,
156
- 8.0,
157
- Decimal("8.0"),
158
- date(2262, 4, 12),
159
- datetime(2262, 3, 3, 3, 3, 3),
160
- bytearray(b"dddd"),
161
- ),
162
- ]
163
- cls.data = [tuple(list(d) + [None]) for d in cls.data_wo_null]
164
-
165
- @classmethod
166
- def tearDownClass(cls):
167
- del os.environ["TZ"]
168
- if cls.tz_prev is not None:
169
- os.environ["TZ"] = cls.tz_prev
170
- time.tzset()
171
- super().tearDownClass()
172
-
173
- def create_pandas_data_frame(self):
174
- import numpy as np
175
-
176
- data_dict = {}
177
- for j, name in enumerate(self.schema.names):
178
- data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
179
- # need to convert these to numpy types first
180
- data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
181
- data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
182
- return pd.DataFrame(data=data_dict)
183
-
184
- @property
185
- def create_np_arrs(self):
186
- import numpy as np
187
-
188
- int_dtypes = ["int8", "int16", "int32", "int64"]
189
- float_dtypes = ["float32", "float64"]
190
- return (
191
- [np.array([1, 2]).astype(t) for t in int_dtypes]
192
- + [np.array([0.1, 0.2]).astype(t) for t in float_dtypes]
193
- + [np.array([[1], [2]]).astype(t) for t in int_dtypes]
194
- + [np.array([[0.1], [0.2]]).astype(t) for t in float_dtypes]
195
- + [np.array([[1, 1, 1], [2, 2, 2]]).astype(t) for t in int_dtypes]
196
- + [np.array([[0.1, 0.1, 0.1], [0.2, 0.2, 0.2]]).astype(t) for t in float_dtypes]
197
- )
198
-
199
- @unittest.skipIf(
200
- not have_pyarrow or LooseVersion(pa.__version__) >= "2.0",
201
- "will not fallback with pyarrow>=2.0",
202
- )
203
- def test_toPandas_fallback_enabled(self):
204
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
205
- schema = StructType([StructField("a", ArrayType(StructType()), True)])
206
- df = self.spark.createDataFrame([([Row()],)], schema=schema)
207
- with QuietTest(self.sc):
208
- with self.warnings_lock:
209
- with warnings.catch_warnings(record=True) as warns:
210
- # we want the warnings to appear even if this test is run from a subclass
211
- warnings.simplefilter("always")
212
- pdf = df.toPandas()
213
- # Catch and check the last UserWarning.
214
- user_warns = [
215
- warn.message for warn in warns if isinstance(warn.message, UserWarning)
216
- ]
217
- self.assertTrue(len(user_warns) > 0)
218
- self.assertTrue("Attempting non-optimization" in str(user_warns[-1]))
219
- assert_frame_equal(pdf, pd.DataFrame({"a": [[Row()]]}))
220
-
221
- @unittest.skipIf(
222
- not have_pyarrow or LooseVersion(pa.__version__) >= "2.0",
223
- "will not fallback with pyarrow>=2.0",
224
- )
225
- def test_toPandas_fallback_disabled(self):
226
- schema = StructType([StructField("a", ArrayType(StructType()), True)])
227
- df = self.spark.createDataFrame([(None,)], schema=schema)
228
- with QuietTest(self.sc):
229
- with self.warnings_lock:
230
- with self.assertRaises(PySparkTypeError) as pe:
231
- df.toPandas()
232
-
233
- self.check_error(
234
- exception=pe.exception,
235
- error_class="UNSUPPORTED_DATA_TYPE_FOR_ARROW_VERSION",
236
- message_parameters={"data_type": "Array of StructType"},
237
- )
238
-
239
- def test_toPandas_empty_df_arrow_enabled(self):
240
- for arrow_enabled in [True, False]:
241
- with self.subTest(arrow_enabled=arrow_enabled):
242
- self.check_toPandas_empty_df_arrow_enabled(arrow_enabled)
243
-
244
- def check_toPandas_empty_df_arrow_enabled(self, arrow_enabled):
245
- # SPARK-30537 test that toPandas() on an empty dataframe has the correct dtypes
246
- # when arrow is enabled
247
- from datetime import date
248
- from decimal import Decimal
249
-
250
- schema = StructType(
251
- [
252
- StructField("a", StringType(), True),
253
- StructField("a", IntegerType(), True),
254
- StructField("c", TimestampType(), True),
255
- StructField("d", NullType(), True),
256
- StructField("e", LongType(), True),
257
- StructField("f", FloatType(), True),
258
- StructField("g", DateType(), True),
259
- StructField("h", BinaryType(), True),
260
- StructField("i", DecimalType(38, 18), True),
261
- StructField("k", TimestampNTZType(), True),
262
- StructField("L", DayTimeIntervalType(0, 3), True),
263
- ]
264
- )
265
- df = self.spark.createDataFrame([], schema=schema)
266
- non_empty_df = self.spark.createDataFrame(
267
- [
268
- (
269
- "a",
270
- 1,
271
- datetime.datetime(1969, 1, 1, 1, 1, 1),
272
- None,
273
- 10,
274
- 0.2,
275
- date(1969, 1, 1),
276
- bytearray(b"a"),
277
- Decimal("2.0"),
278
- datetime.datetime(1969, 1, 1, 1, 1, 1),
279
- datetime.timedelta(microseconds=123),
280
- )
281
- ],
282
- schema=schema,
283
- )
284
-
285
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
286
- pdf = df.toPandas()
287
- pdf_non_empty = non_empty_df.toPandas()
288
- self.assertTrue(pdf.dtypes.equals(pdf_non_empty.dtypes))
289
-
290
- def test_null_conversion(self):
291
- df_null = self.spark.createDataFrame(
292
- [tuple([None for _ in range(len(self.data_wo_null[0]))])] + self.data_wo_null
293
- )
294
- pdf = df_null.toPandas()
295
- null_counts = pdf.isnull().sum().tolist()
296
- self.assertTrue(all([c == 1 for c in null_counts]))
297
-
298
- def _toPandas_arrow_toggle(self, df):
299
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
300
- pdf = df.toPandas()
301
-
302
- pdf_arrow = df.toPandas()
303
-
304
- return pdf, pdf_arrow
305
-
306
- def test_toPandas_arrow_toggle(self):
307
- df = self.spark.createDataFrame(self.data, schema=self.schema)
308
- pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
309
- expected = self.create_pandas_data_frame()
310
- assert_frame_equal(expected, pdf)
311
- assert_frame_equal(expected, pdf_arrow)
312
-
313
- def test_create_data_frame_to_pandas_timestamp_ntz(self):
314
- for arrow_enabled in [True, False]:
315
- with self.subTest(arrow_enabled=arrow_enabled):
316
- self.check_create_data_frame_to_pandas_timestamp_ntz(arrow_enabled)
317
-
318
- def check_create_data_frame_to_pandas_timestamp_ntz(self, arrow_enabled):
319
- # SPARK-36626: Test TimestampNTZ in createDataFrame and toPandas
320
- with self.sql_conf({"spark.sql.session.timeZone": "America/Los_Angeles"}):
321
- origin = pd.DataFrame({"a": [datetime.datetime(2012, 2, 2, 2, 2, 2)]})
322
- df = self.spark.createDataFrame(
323
- origin, schema=StructType([StructField("a", TimestampNTZType(), True)])
324
- )
325
- df.selectExpr("assert_true('2012-02-02 02:02:02' == CAST(a AS STRING))").collect()
326
-
327
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
328
- pdf = df.toPandas()
329
- assert_frame_equal(origin, pdf)
330
-
331
- def test_create_data_frame_to_pandas_day_time_internal(self):
332
- for arrow_enabled in [True, False]:
333
- with self.subTest(arrow_enabled=arrow_enabled):
334
- self.check_create_data_frame_to_pandas_day_time_internal(arrow_enabled)
335
-
336
- def check_create_data_frame_to_pandas_day_time_internal(self, arrow_enabled):
337
- # SPARK-37279: Test DayTimeInterval in createDataFrame and toPandas
338
- origin = pd.DataFrame({"a": [datetime.timedelta(microseconds=123)]})
339
- df = self.spark.createDataFrame(origin)
340
- df.select(
341
- assert_true(lit("INTERVAL '0 00:00:00.000123' DAY TO SECOND") == df.a.cast("string"))
342
- ).collect()
343
-
344
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
345
- pdf = df.toPandas()
346
- assert_frame_equal(origin, pdf)
347
-
348
- def test_toPandas_respect_session_timezone(self):
349
- for arrow_enabled in [True, False]:
350
- with self.subTest(arrow_enabled=arrow_enabled):
351
- self.check_toPandas_respect_session_timezone(arrow_enabled)
352
-
353
- def check_toPandas_respect_session_timezone(self, arrow_enabled):
354
- df = self.spark.createDataFrame(self.data, schema=self.schema)
355
-
356
- timezone = "America/Los_Angeles"
357
- with self.sql_conf({"spark.sql.session.timeZone": timezone}):
358
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
359
- pdf_la = df.toPandas()
360
-
361
- timezone = "America/New_York"
362
- with self.sql_conf({"spark.sql.session.timeZone": timezone}):
363
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
364
- pdf_ny = df.toPandas()
365
-
366
- self.assertFalse(pdf_ny.equals(pdf_la))
367
-
368
- from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz
369
-
370
- pdf_la_corrected = pdf_la.copy()
371
- for field in self.schema:
372
- if isinstance(field.dataType, TimestampType):
373
- pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
374
- pdf_la_corrected[field.name], timezone
375
- )
376
- assert_frame_equal(pdf_ny, pdf_la_corrected)
377
-
378
- def test_pandas_round_trip(self):
379
- pdf = self.create_pandas_data_frame()
380
- df = self.spark.createDataFrame(self.data, schema=self.schema)
381
- pdf_arrow = df.toPandas()
382
- assert_frame_equal(pdf_arrow, pdf)
383
-
384
- def test_pandas_self_destruct(self):
385
- import pyarrow as pa
386
-
387
- rows = 2**10
388
- cols = 4
389
- expected_bytes = rows * cols * 8
390
- df = self.spark.range(0, rows).select(*[rand() for _ in range(cols)])
391
- # Test the self_destruct behavior by testing _collect_as_arrow directly
392
- allocation_before = pa.total_allocated_bytes()
393
- batches = df._collect_as_arrow(split_batches=True)
394
- table = pa.Table.from_batches(batches)
395
- del batches
396
- pdf_split = table.to_pandas(self_destruct=True, split_blocks=True, use_threads=False)
397
- allocation_after = pa.total_allocated_bytes()
398
- difference = allocation_after - allocation_before
399
- # Should be around 1x the data size (table should not hold on to any memory)
400
- self.assertGreaterEqual(difference, 0.9 * expected_bytes)
401
- self.assertLessEqual(difference, 1.1 * expected_bytes)
402
-
403
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.selfDestruct.enabled": False}):
404
- no_self_destruct_pdf = df.toPandas()
405
- # Note while memory usage is 2x data size here (both table and pdf hold on to
406
- # memory), in this case Arrow still only tracks 1x worth of memory (since the
407
- # batches are not allocated by Arrow in this case), so we can't make any
408
- # assertions here
409
-
410
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.selfDestruct.enabled": True}):
411
- self_destruct_pdf = df.toPandas()
412
-
413
- assert_frame_equal(pdf_split, no_self_destruct_pdf)
414
- assert_frame_equal(pdf_split, self_destruct_pdf)
415
-
416
- def test_filtered_frame(self):
417
- df = self.spark.range(3).toDF("i")
418
- pdf = df.filter("i < 0").toPandas()
419
- self.assertEqual(len(pdf.columns), 1)
420
- self.assertEqual(pdf.columns[0], "i")
421
- self.assertTrue(pdf.empty)
422
-
423
- def test_no_partition_frame(self):
424
- schema = StructType([StructField("field1", StringType(), True)])
425
- df = self.spark.createDataFrame(self.sc.emptyRDD(), schema)
426
- pdf = df.toPandas()
427
- self.assertEqual(len(pdf.columns), 1)
428
- self.assertEqual(pdf.columns[0], "field1")
429
- self.assertTrue(pdf.empty)
430
-
431
- def test_propagates_spark_exception(self):
432
- with QuietTest(self.sc):
433
- self.check_propagates_spark_exception()
434
-
435
- def check_propagates_spark_exception(self):
436
- df = self.spark.range(3).toDF("i")
437
-
438
- def raise_exception():
439
- raise RuntimeError("My error")
440
-
441
- exception_udf = udf(raise_exception, IntegerType())
442
- df = df.withColumn("error", exception_udf())
443
-
444
- with self.assertRaisesRegex(Exception, "My error"):
445
- df.toPandas()
446
-
447
- def _createDataFrame_toggle(self, data, schema=None):
448
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
449
- df_no_arrow = self.spark.createDataFrame(data, schema=schema)
450
-
451
- df_arrow = self.spark.createDataFrame(data, schema=schema)
452
-
453
- return df_no_arrow, df_arrow
454
-
455
- def test_createDataFrame_toggle(self):
456
- pdf = self.create_pandas_data_frame()
457
- df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
458
- self.assertEqual(df_no_arrow.collect(), df_arrow.collect())
459
-
460
- def test_createDataFrame_respect_session_timezone(self):
461
- for arrow_enabled in [True, False]:
462
- with self.subTest(arrow_enabled=arrow_enabled):
463
- self.check_createDataFrame_respect_session_timezone(arrow_enabled)
464
-
465
- def check_createDataFrame_respect_session_timezone(self, arrow_enabled):
466
- from datetime import timedelta
467
-
468
- pdf = self.create_pandas_data_frame()
469
- timezone = "America/Los_Angeles"
470
- with self.sql_conf({"spark.sql.session.timeZone": timezone}):
471
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
472
- df_la = self.spark.createDataFrame(pdf, schema=self.schema)
473
- result_la = df_la.collect()
474
-
475
- timezone = "America/New_York"
476
- with self.sql_conf({"spark.sql.session.timeZone": timezone}):
477
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
478
- df_ny = self.spark.createDataFrame(pdf, schema=self.schema)
479
- result_ny = df_ny.collect()
480
-
481
- self.assertNotEqual(result_ny, result_la)
482
-
483
- # Correct result_la by adjusting 3 hours difference between Los Angeles and New York
484
- result_la_corrected = [
485
- Row(
486
- **{
487
- k: v - timedelta(hours=3) if k == "8_timestamp_t" else v
488
- for k, v in row.asDict().items()
489
- }
490
- )
491
- for row in result_la
492
- ]
493
- self.assertEqual(result_ny, result_la_corrected)
494
-
495
- def test_createDataFrame_with_schema(self):
496
- pdf = self.create_pandas_data_frame()
497
- df = self.spark.createDataFrame(pdf, schema=self.schema)
498
- self.assertEqual(self.schema, df.schema)
499
- pdf_arrow = df.toPandas()
500
- assert_frame_equal(pdf_arrow, pdf)
501
-
502
- def test_createDataFrame_with_incorrect_schema(self):
503
- with QuietTest(self.sc):
504
- self.check_createDataFrame_with_incorrect_schema()
505
-
506
- def check_createDataFrame_with_incorrect_schema(self):
507
- pdf = self.create_pandas_data_frame()
508
- fields = list(self.schema)
509
- fields[5], fields[6] = fields[6], fields[5] # swap decimal with date
510
- wrong_schema = StructType(fields)
511
- with self.sql_conf({"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
512
- with self.assertRaises(Exception) as context:
513
- self.spark.createDataFrame(pdf, schema=wrong_schema)
514
-
515
- # the exception provides us with the column that is incorrect
516
- exception = context.exception
517
- self.assertTrue(hasattr(exception, "args"))
518
- self.assertEqual(len(exception.args), 1)
519
- self.assertRegex(
520
- exception.args[0],
521
- "with name '7_date_t' " "to Arrow Array \\(decimal128\\(38, 18\\)\\)",
522
- )
523
-
524
- # the inner exception provides us with the incorrect types
525
- exception = exception.__context__
526
- self.assertTrue(hasattr(exception, "args"))
527
- self.assertEqual(len(exception.args), 1)
528
- self.assertRegex(exception.args[0], "[D|d]ecimal.*got.*date")
529
-
530
- def test_createDataFrame_with_names(self):
531
- pdf = self.create_pandas_data_frame()
532
- new_names = list(map(str, range(len(self.schema.fieldNames()))))
533
- # Test that schema as a list of column names gets applied
534
- df = self.spark.createDataFrame(pdf, schema=list(new_names))
535
- self.assertEqual(df.schema.fieldNames(), new_names)
536
- # Test that schema as tuple of column names gets applied
537
- df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
538
- self.assertEqual(df.schema.fieldNames(), new_names)
539
-
540
- def test_createDataFrame_column_name_encoding(self):
541
- pdf = pd.DataFrame({"a": [1]})
542
- columns = self.spark.createDataFrame(pdf).columns
543
- self.assertTrue(isinstance(columns[0], str))
544
- self.assertEqual(columns[0], "a")
545
- columns = self.spark.createDataFrame(pdf, ["b"]).columns
546
- self.assertTrue(isinstance(columns[0], str))
547
- self.assertEqual(columns[0], "b")
548
-
549
- def test_createDataFrame_with_single_data_type(self):
550
- with QuietTest(self.sc):
551
- self.check_createDataFrame_with_single_data_type()
552
-
553
- def check_createDataFrame_with_single_data_type(self):
554
- for schema in ["int", IntegerType()]:
555
- with self.subTest(schema=schema):
556
- with self.assertRaises(PySparkTypeError) as pe:
557
- self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema=schema).collect()
558
-
559
- self.check_error(
560
- exception=pe.exception,
561
- error_class="UNSUPPORTED_DATA_TYPE_FOR_ARROW",
562
- message_parameters={"data_type": "IntegerType()"},
563
- )
564
-
565
- def test_createDataFrame_does_not_modify_input(self):
566
- # Some series get converted for Spark to consume, this makes sure input is unchanged
567
- pdf = self.create_pandas_data_frame()
568
- # Use a nanosecond value to make sure it is not truncated
569
- pdf.iloc[0, 7] = pd.Timestamp(1)
570
- # Integers with nulls will get NaNs filled with 0 and will be casted
571
- pdf.iloc[1, 1] = None
572
- pdf_copy = pdf.copy(deep=True)
573
- self.spark.createDataFrame(pdf, schema=self.schema)
574
- self.assertTrue(pdf.equals(pdf_copy))
575
-
576
- def test_schema_conversion_roundtrip(self):
577
- from pyspark.sql.pandas.types import from_arrow_schema, to_arrow_schema
578
-
579
- arrow_schema = to_arrow_schema(self.schema)
580
- schema_rt = from_arrow_schema(arrow_schema, prefer_timestamp_ntz=True)
581
- self.assertEqual(self.schema, schema_rt)
582
-
583
- def test_createDataFrame_with_ndarray(self):
584
- for arrow_enabled in [True, False]:
585
- with self.subTest(arrow_enabled=arrow_enabled):
586
- self.check_createDataFrame_with_ndarray(arrow_enabled)
587
-
588
- def check_createDataFrame_with_ndarray(self, arrow_enabled):
589
- import numpy as np
590
-
591
- dtypes = ["tinyint", "smallint", "int", "bigint", "float", "double"]
592
- expected_dtypes = (
593
- [[("value", t)] for t in dtypes]
594
- + [[("value", t)] for t in dtypes]
595
- + [[("_1", t), ("_2", t), ("_3", t)] for t in dtypes]
596
- )
597
- arrs = self.create_np_arrs
598
-
599
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
600
- for arr, dtypes in zip(arrs, expected_dtypes):
601
- df = self.spark.createDataFrame(arr)
602
- self.assertEqual(df.dtypes, dtypes)
603
- np.array_equal(np.array(df.collect()), arr)
604
-
605
- with self.assertRaisesRegex(
606
- ValueError, "NumPy array input should be of 1 or 2 dimensions"
607
- ):
608
- self.spark.createDataFrame(np.array(0))
609
-
610
- def test_createDataFrame_with_array_type(self):
611
- for arrow_enabled in [True, False]:
612
- with self.subTest(arrow_enabled=arrow_enabled):
613
- self.check_createDataFrame_with_array_type(arrow_enabled)
614
-
615
- def check_createDataFrame_with_array_type(self, arrow_enabled):
616
- pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [["x", "y"], ["y", "z"]]})
617
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
618
- df = self.spark.createDataFrame(pdf)
619
- result = df.collect()
620
- expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
621
- for r in range(len(expected)):
622
- for e in range(len(expected[r])):
623
- self.assertTrue(expected[r][e] == result[r][e])
624
-
625
- def test_toPandas_with_array_type(self):
626
- for arrow_enabled in [True, False]:
627
- with self.subTest(arrow_enabled=arrow_enabled):
628
- self.check_toPandas_with_array_type(arrow_enabled)
629
-
630
- def check_toPandas_with_array_type(self, arrow_enabled):
631
- expected = [([1, 2], ["x", "y"]), ([3, 4], ["y", "z"])]
632
- array_schema = StructType(
633
- [StructField("a", ArrayType(IntegerType())), StructField("b", ArrayType(StringType()))]
634
- )
635
- df = self.spark.createDataFrame(expected, schema=array_schema)
636
- pdf = df.toPandas()
637
- result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
638
- for r in range(len(expected)):
639
- for e in range(len(expected[r])):
640
- self.assertTrue(expected[r][e] == result[r][e])
641
-
642
- def test_createDataFrame_with_map_type(self):
643
- with QuietTest(self.sc):
644
- for arrow_enabled in [True, False]:
645
- with self.subTest(arrow_enabled=arrow_enabled):
646
- self.check_createDataFrame_with_map_type(arrow_enabled)
647
-
648
- def check_createDataFrame_with_map_type(self, arrow_enabled):
649
- map_data = [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}]
650
-
651
- pdf = pd.DataFrame({"id": [0, 1, 2, 3, 4], "m": map_data})
652
- for schema in (
653
- "id long, m map<string, long>",
654
- StructType().add("id", LongType()).add("m", MapType(StringType(), LongType())),
655
- ):
656
- with self.subTest(schema=schema):
657
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
658
- if arrow_enabled and LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
659
- with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
660
- self.spark.createDataFrame(pdf, schema=schema).collect()
661
- else:
662
- df = self.spark.createDataFrame(pdf, schema=schema)
663
-
664
- result = df.collect()
665
-
666
- for row in result:
667
- i, m = row
668
- self.assertEqual(m, map_data[i])
669
-
670
- def test_createDataFrame_with_struct_type(self):
671
- for arrow_enabled in [True, False]:
672
- with self.subTest(arrow_enabled=arrow_enabled):
673
- self.check_createDataFrame_with_struct_type(arrow_enabled)
674
-
675
- def check_createDataFrame_with_struct_type(self, arrow_enabled):
676
- pdf = pd.DataFrame(
677
- {"a": [Row(1, "a"), Row(2, "b")], "b": [{"s": 3, "t": "x"}, {"s": 4, "t": "y"}]}
678
- )
679
- for schema in (
680
- "a struct<x int, y string>, b struct<s int, t string>",
681
- StructType()
682
- .add("a", StructType().add("x", LongType()).add("y", StringType()))
683
- .add("b", StructType().add("s", LongType()).add("t", StringType())),
684
- ):
685
- with self.subTest(schema=schema):
686
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
687
- df = self.spark.createDataFrame(pdf, schema)
688
- result = df.collect()
689
- expected = [(rec[0], Row(**rec[1])) for rec in pdf.to_records(index=False)]
690
- for r in range(len(expected)):
691
- for e in range(len(expected[r])):
692
- self.assertTrue(
693
- expected[r][e] == result[r][e], f"{expected[r][e]} == {result[r][e]}"
694
- )
695
-
696
- def test_createDataFrame_with_string_dtype(self):
697
- # SPARK-34521: spark.createDataFrame does not support Pandas StringDtype extension type
698
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
699
- data = [["abc"], ["def"], [None], ["ghi"], [None]]
700
- pandas_df = pd.DataFrame(data, columns=["col"], dtype="string")
701
- schema = StructType([StructField("col", StringType(), True)])
702
- df = self.spark.createDataFrame(pandas_df, schema=schema)
703
-
704
- # dtypes won't match. Pandas has two different ways to store string columns:
705
- # using ndarray (when dtype isn't specified) or using a StringArray when dtype="string".
706
- # When calling dataframe#toPandas() it will use the ndarray version.
707
- # Changing that to use a StringArray would be backwards incompatible.
708
- assert_frame_equal(pandas_df, df.toPandas(), check_dtype=False)
709
-
710
- def test_createDataFrame_with_int64(self):
711
- # SPARK-34521: spark.createDataFrame does not support Pandas StringDtype extension type
712
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
713
- pandas_df = pd.DataFrame({"col": [1, 2, 3, None]}, dtype="Int64")
714
- df = self.spark.createDataFrame(pandas_df)
715
- assert_frame_equal(pandas_df, df.toPandas(), check_dtype=False)
716
-
717
- def test_toPandas_with_map_type(self):
718
- with QuietTest(self.sc):
719
- for arrow_enabled in [True, False]:
720
- with self.subTest(arrow_enabled=arrow_enabled):
721
- self.check_toPandas_with_map_type(arrow_enabled)
722
-
723
- def check_toPandas_with_map_type(self, arrow_enabled):
724
- origin = pd.DataFrame(
725
- {"id": [0, 1, 2, 3], "m": [{}, {"a": 1}, {"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}]}
726
- )
727
-
728
- for schema in [
729
- "id long, m map<string, long>",
730
- StructType().add("id", LongType()).add("m", MapType(StringType(), LongType())),
731
- ]:
732
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
733
- df = self.spark.createDataFrame(origin, schema=schema)
734
-
735
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
736
- if arrow_enabled and LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
737
- with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
738
- df.toPandas()
739
- else:
740
- pdf = df.toPandas()
741
- assert_frame_equal(origin, pdf)
742
-
743
- def test_toPandas_with_map_type_nulls(self):
744
- with QuietTest(self.sc):
745
- for arrow_enabled in [True, False]:
746
- with self.subTest(arrow_enabled=arrow_enabled):
747
- self.check_toPandas_with_map_type_nulls(arrow_enabled)
748
-
749
- def check_toPandas_with_map_type_nulls(self, arrow_enabled):
750
- origin = pd.DataFrame(
751
- {"id": [0, 1, 2, 3, 4], "m": [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}]}
752
- )
753
-
754
- for schema in [
755
- "id long, m map<string, long>",
756
- StructType().add("id", LongType()).add("m", MapType(StringType(), LongType())),
757
- ]:
758
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
759
- df = self.spark.createDataFrame(origin, schema=schema)
760
-
761
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
762
- if arrow_enabled and LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
763
- with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
764
- df.toPandas()
765
- else:
766
- pdf = df.toPandas()
767
- assert_frame_equal(origin, pdf)
768
-
769
- def test_createDataFrame_with_int_col_names(self):
770
- for arrow_enabled in [True, False]:
771
- with self.subTest(arrow_enabled=arrow_enabled):
772
- self.check_createDataFrame_with_int_col_names(arrow_enabled)
773
-
774
- def check_createDataFrame_with_int_col_names(self, arrow_enabled):
775
- import numpy as np
776
-
777
- pdf = pd.DataFrame(np.random.rand(4, 2))
778
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
779
- df = self.spark.createDataFrame(pdf)
780
- pdf_col_names = [str(c) for c in pdf.columns]
781
- self.assertEqual(pdf_col_names, df.columns)
782
-
783
- @unittest.skipIf(
784
- not have_pyarrow or LooseVersion(pa.__version__) >= "2.0",
785
- "will not fallback with pyarrow>=2.0",
786
- )
787
- def test_createDataFrame_fallback_enabled(self):
788
- with QuietTest(self.sc):
789
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
790
- with warnings.catch_warnings(record=True) as warns:
791
- # we want the warnings to appear even if this test is run from a subclass
792
- warnings.simplefilter("always")
793
- df = self.spark.createDataFrame(
794
- pd.DataFrame({"a": [[Row()]]}), "a: array<struct<>>"
795
- )
796
- # Catch and check the last UserWarning.
797
- user_warns = [
798
- warn.message for warn in warns if isinstance(warn.message, UserWarning)
799
- ]
800
- self.assertTrue(len(user_warns) > 0)
801
- self.assertTrue("Attempting non-optimization" in str(user_warns[-1]))
802
- self.assertEqual(df.collect(), [Row(a=[Row()])])
803
-
804
- @unittest.skipIf(
805
- not have_pyarrow or LooseVersion(pa.__version__) >= "2.0",
806
- "will not fallback with pyarrow>=2.0",
807
- )
808
- def test_createDataFrame_fallback_disabled(self):
809
- with QuietTest(self.sc):
810
- with self.assertRaises(PySparkTypeError) as pe:
811
- self.spark.createDataFrame(pd.DataFrame({"a": [[Row()]]}), "a: array<struct<>>")
812
-
813
- self.check_error(
814
- exception=pe.exception,
815
- error_class="UNSUPPORTED_DATA_TYPE_FOR_ARROW_VERSION",
816
- message_parameters={"data_type": "Array of StructType"},
817
- )
818
-
819
- # Regression test for SPARK-23314
820
- def test_timestamp_dst(self):
821
- # Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
822
- dt = [
823
- datetime.datetime(2015, 11, 1, 0, 30),
824
- datetime.datetime(2015, 11, 1, 1, 30),
825
- datetime.datetime(2015, 11, 1, 2, 30),
826
- ]
827
- pdf = pd.DataFrame({"time": dt})
828
-
829
- df_from_python = self.spark.createDataFrame(dt, "timestamp").toDF("time")
830
- df_from_pandas = self.spark.createDataFrame(pdf)
831
-
832
- assert_frame_equal(pdf, df_from_python.toPandas())
833
- assert_frame_equal(pdf, df_from_pandas.toPandas())
834
-
835
- # Regression test for SPARK-28003
836
- def test_timestamp_nat(self):
837
- for arrow_enabled in [True, False]:
838
- with self.subTest(arrow_enabled=arrow_enabled):
839
- self.check_timestamp_nat(arrow_enabled)
840
-
841
- def check_timestamp_nat(self, arrow_enabled):
842
- dt = [pd.NaT, pd.Timestamp("2019-06-11"), None] * 100
843
- pdf = pd.DataFrame({"time": dt})
844
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
845
- df = self.spark.createDataFrame(pdf)
846
-
847
- assert_frame_equal(pdf, df.toPandas())
848
-
849
- def test_toPandas_batch_order(self):
850
- def delay_first_part(partition_index, iterator):
851
- if partition_index == 0:
852
- time.sleep(0.1)
853
- return iterator
854
-
855
- # Collects Arrow RecordBatches out of order in driver JVM then re-orders in Python
856
- def run_test(num_records, num_parts, max_records, use_delay=False):
857
- df = self.spark.range(num_records, numPartitions=num_parts).toDF("a")
858
- if use_delay:
859
- df = df.rdd.mapPartitionsWithIndex(delay_first_part).toDF()
860
- with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": max_records}):
861
- pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
862
- assert_frame_equal(pdf, pdf_arrow)
863
-
864
- cases = [
865
- (1024, 512, 2), # Use large num partitions for more likely collecting out of order
866
- (64, 8, 2, True), # Use delay in first partition to force collecting out of order
867
- (64, 64, 1), # Test single batch per partition
868
- (64, 1, 64), # Test single partition, single batch
869
- (64, 1, 8), # Test single partition, multiple batches
870
- (30, 7, 2), # Test different sized partitions
871
- ]
872
-
873
- for case in cases:
874
- run_test(*case)
875
-
876
- def test_createDataFrame_with_category_type(self):
877
- pdf = pd.DataFrame({"A": ["a", "b", "c", "a"]})
878
- pdf["B"] = pdf["A"].astype("category")
879
- category_first_element = dict(enumerate(pdf["B"].cat.categories))[0]
880
-
881
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
882
- arrow_df = self.spark.createDataFrame(pdf)
883
- arrow_type = arrow_df.dtypes[1][1]
884
- result_arrow = arrow_df.toPandas()
885
- arrow_first_category_element = result_arrow["B"][0]
886
-
887
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
888
- df = self.spark.createDataFrame(pdf)
889
- spark_type = df.dtypes[1][1]
890
- result_spark = df.toPandas()
891
- spark_first_category_element = result_spark["B"][0]
892
-
893
- assert_frame_equal(result_spark, result_arrow)
894
-
895
- # ensure original category elements are string
896
- self.assertIsInstance(category_first_element, str)
897
- # spark data frame and arrow execution mode enabled data frame type must match pandas
898
- self.assertEqual(spark_type, "string")
899
- self.assertEqual(arrow_type, "string")
900
- self.assertIsInstance(arrow_first_category_element, str)
901
- self.assertIsInstance(spark_first_category_element, str)
902
-
903
- def test_createDataFrame_with_float_index(self):
904
- # SPARK-32098: float index should not produce duplicated or truncated Spark DataFrame
905
- self.assertEqual(
906
- self.spark.createDataFrame(pd.DataFrame({"a": [1, 2, 3]}, index=[2.0, 3.0, 4.0]))
907
- .distinct()
908
- .count(),
909
- 3,
910
- )
911
-
912
- def test_no_partition_toPandas(self):
913
- # SPARK-32301: toPandas should work from a Spark DataFrame with no partitions
914
- # Forward-ported from SPARK-32300.
915
- pdf = self.spark.sparkContext.emptyRDD().toDF("col1 int").toPandas()
916
- self.assertEqual(len(pdf), 0)
917
- self.assertEqual(list(pdf.columns), ["col1"])
918
-
919
- def test_createDataFrame_empty_partition(self):
920
- pdf = pd.DataFrame({"c1": [1], "c2": ["string"]})
921
- df = self.spark.createDataFrame(pdf)
922
- self.assertEqual([Row(c1=1, c2="string")], df.collect())
923
- self.assertGreater(self.spark.sparkContext.defaultParallelism, len(pdf))
924
-
925
- def test_toPandas_error(self):
926
- for arrow_enabled in [True, False]:
927
- with self.subTest(arrow_enabled=arrow_enabled):
928
- self.check_toPandas_error(arrow_enabled)
929
-
930
- def check_toPandas_error(self, arrow_enabled):
931
- with self.sql_conf(
932
- {
933
- "spark.sql.ansi.enabled": True,
934
- "spark.sql.execution.arrow.pyspark.enabled": arrow_enabled,
935
- }
936
- ):
937
- with self.assertRaises(ArithmeticException):
938
- self.spark.sql("select 1/0").toPandas()
939
-
940
- def test_toPandas_duplicate_field_names(self):
941
- for arrow_enabled in [True, False]:
942
- with self.subTest(arrow_enabled=arrow_enabled):
943
- self.check_toPandas_duplicate_field_names(arrow_enabled)
944
-
945
- def check_toPandas_duplicate_field_names(self, arrow_enabled):
946
- data = [Row(Row("a", 1), Row(2, 3, "b", 4, "c")), Row(Row("x", 6), Row(7, 8, "y", 9, "z"))]
947
- schema = (
948
- StructType()
949
- .add("struct", StructType().add("x", StringType()).add("x", IntegerType()))
950
- .add(
951
- "struct",
952
- StructType()
953
- .add("a", IntegerType())
954
- .add("x", IntegerType())
955
- .add("x", StringType())
956
- .add("y", IntegerType())
957
- .add("y", StringType()),
958
- )
959
- )
960
- for struct_in_pandas in ["legacy", "row", "dict"]:
961
- df = self.spark.createDataFrame(data, schema=schema)
962
-
963
- with self.subTest(struct_in_pandas=struct_in_pandas):
964
- with self.sql_conf(
965
- {
966
- "spark.sql.execution.arrow.pyspark.enabled": arrow_enabled,
967
- "spark.sql.execution.pandas.structHandlingMode": struct_in_pandas,
968
- }
969
- ):
970
- if arrow_enabled and struct_in_pandas == "legacy":
971
- with self.assertRaisesRegexp(
972
- UnsupportedOperationException, "DUPLICATED_FIELD_NAME_IN_ARROW_STRUCT"
973
- ):
974
- df.toPandas()
975
- else:
976
- if struct_in_pandas == "dict":
977
- expected = pd.DataFrame(
978
- [
979
- [
980
- {"x_0": "a", "x_1": 1},
981
- {"a": 2, "x_0": 3, "x_1": "b", "y_0": 4, "y_1": "c"},
982
- ],
983
- [
984
- {"x_0": "x", "x_1": 6},
985
- {"a": 7, "x_0": 8, "x_1": "y", "y_0": 9, "y_1": "z"},
986
- ],
987
- ],
988
- columns=schema.names,
989
- )
990
- else:
991
- expected = pd.DataFrame.from_records(data, columns=schema.names)
992
- assert_frame_equal(df.toPandas(), expected)
993
-
994
- def test_createDataFrame_duplicate_field_names(self):
995
- for arrow_enabled in [True, False]:
996
- with self.subTest(arrow_enabled=arrow_enabled):
997
- self.check_createDataFrame_duplicate_field_names(arrow_enabled)
998
-
999
- def check_createDataFrame_duplicate_field_names(self, arrow_enabled):
1000
- schema = (
1001
- StructType()
1002
- .add("struct", StructType().add("x", StringType()).add("x", IntegerType()))
1003
- .add(
1004
- "struct",
1005
- StructType()
1006
- .add("a", IntegerType())
1007
- .add("x", IntegerType())
1008
- .add("x", StringType())
1009
- .add("y", IntegerType())
1010
- .add("y", StringType()),
1011
- )
1012
- )
1013
-
1014
- data = [Row(Row("a", 1), Row(2, 3, "b", 4, "c")), Row(Row("x", 6), Row(7, 8, "y", 9, "z"))]
1015
- pdf = pd.DataFrame.from_records(data, columns=schema.names)
1016
-
1017
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
1018
- df = self.spark.createDataFrame(pdf, schema)
1019
-
1020
- self.assertEqual(df.collect(), data)
1021
-
1022
- @unittest.skipIf(
1023
- LooseVersion(pd.__version__) >= LooseVersion("2.0.0"),
1024
- "TODO(SPARK-43506): Enable ArrowTests.test_toPandas_empty_columns for pandas 2.0.0.",
1025
- )
1026
- def test_toPandas_empty_columns(self):
1027
- for arrow_enabled in [True, False]:
1028
- with self.subTest(arrow_enabled=arrow_enabled):
1029
- self.check_toPandas_empty_columns(arrow_enabled)
1030
-
1031
- def check_toPandas_empty_columns(self, arrow_enabled):
1032
- df = self.spark.range(2).select([])
1033
-
1034
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
1035
- assert_frame_equal(df.toPandas(), pd.DataFrame(columns=[], index=range(2)))
1036
-
1037
- def test_createDataFrame_nested_timestamp(self):
1038
- for arrow_enabled in [True, False]:
1039
- with self.subTest(arrow_enabled=arrow_enabled):
1040
- self.check_createDataFrame_nested_timestamp(arrow_enabled)
1041
-
1042
- def check_createDataFrame_nested_timestamp(self, arrow_enabled):
1043
- schema = (
1044
- StructType()
1045
- .add("ts", TimestampType())
1046
- .add("ts_ntz", TimestampNTZType())
1047
- .add(
1048
- "struct", StructType().add("ts", TimestampType()).add("ts_ntz", TimestampNTZType())
1049
- )
1050
- .add("array", ArrayType(TimestampType()))
1051
- .add("array_ntz", ArrayType(TimestampNTZType()))
1052
- .add("map", MapType(StringType(), TimestampType()))
1053
- .add("map_ntz", MapType(StringType(), TimestampNTZType()))
1054
- )
1055
- data = [
1056
- Row(
1057
- datetime.datetime(2023, 1, 1, 0, 0, 0),
1058
- datetime.datetime(2023, 1, 1, 0, 0, 0),
1059
- Row(
1060
- datetime.datetime(2023, 1, 1, 0, 0, 0),
1061
- datetime.datetime(2023, 1, 1, 0, 0, 0),
1062
- ),
1063
- [datetime.datetime(2023, 1, 1, 0, 0, 0)],
1064
- [datetime.datetime(2023, 1, 1, 0, 0, 0)],
1065
- dict(ts=datetime.datetime(2023, 1, 1, 0, 0, 0)),
1066
- dict(ts_ntz=datetime.datetime(2023, 1, 1, 0, 0, 0)),
1067
- )
1068
- ]
1069
- pdf = pd.DataFrame.from_records(data, columns=schema.names)
1070
-
1071
- with self.sql_conf(
1072
- {
1073
- "spark.sql.session.timeZone": "America/New_York",
1074
- "spark.sql.execution.arrow.pyspark.enabled": arrow_enabled,
1075
- }
1076
- ):
1077
- df = self.spark.createDataFrame(pdf, schema)
1078
-
1079
- expected = Row(
1080
- ts=datetime.datetime(2022, 12, 31, 21, 0, 0),
1081
- ts_ntz=datetime.datetime(2023, 1, 1, 0, 0, 0),
1082
- struct=Row(
1083
- ts=datetime.datetime(2022, 12, 31, 21, 0, 0),
1084
- ts_ntz=datetime.datetime(2023, 1, 1, 0, 0, 0),
1085
- ),
1086
- array=[datetime.datetime(2022, 12, 31, 21, 0, 0)],
1087
- array_ntz=[datetime.datetime(2023, 1, 1, 0, 0, 0)],
1088
- map=dict(ts=datetime.datetime(2022, 12, 31, 21, 0, 0)),
1089
- map_ntz=dict(ts_ntz=datetime.datetime(2023, 1, 1, 0, 0, 0)),
1090
- )
1091
-
1092
- self.assertEqual(df.first(), expected)
1093
-
1094
- @unittest.skipIf(sys.version_info < (3, 9), "zoneinfo is available from Python 3.9+")
1095
- def test_toPandas_timestmap_tzinfo(self):
1096
- for arrow_enabled in [True, False]:
1097
- with self.subTest(arrow_enabled=arrow_enabled):
1098
- self.check_toPandas_timestmap_tzinfo(arrow_enabled)
1099
-
1100
- def check_toPandas_timestmap_tzinfo(self, arrow_enabled):
1101
- # SPARK-47202: Test timestamp with tzinfo in toPandas and createDataFrame
1102
- from zoneinfo import ZoneInfo
1103
-
1104
- ts_tzinfo = datetime.datetime(2023, 1, 1, 0, 0, 0, tzinfo=ZoneInfo("America/Los_Angeles"))
1105
- data = pd.DataFrame({"a": [ts_tzinfo]})
1106
- df = self.spark.createDataFrame(data)
1107
-
1108
- with self.sql_conf(
1109
- {
1110
- "spark.sql.execution.arrow.pyspark.enabled": arrow_enabled,
1111
- }
1112
- ):
1113
- pdf = df.toPandas()
1114
-
1115
- expected = pd.DataFrame(
1116
- # Spark unsets tzinfo and converts them to localtimes.
1117
- {"a": [datetime.datetime.fromtimestamp(calendar.timegm(ts_tzinfo.utctimetuple()))]}
1118
- )
1119
-
1120
- assert_frame_equal(pdf, expected)
1121
-
1122
- def test_toPandas_nested_timestamp(self):
1123
- for arrow_enabled in [True, False]:
1124
- with self.subTest(arrow_enabled=arrow_enabled):
1125
- self.check_toPandas_nested_timestamp(arrow_enabled)
1126
-
1127
- def check_toPandas_nested_timestamp(self, arrow_enabled):
1128
- schema = (
1129
- StructType()
1130
- .add("ts", TimestampType())
1131
- .add("ts_ntz", TimestampNTZType())
1132
- .add(
1133
- "struct", StructType().add("ts", TimestampType()).add("ts_ntz", TimestampNTZType())
1134
- )
1135
- .add("array", ArrayType(TimestampType()))
1136
- .add("array_ntz", ArrayType(TimestampNTZType()))
1137
- .add("map", MapType(StringType(), TimestampType()))
1138
- .add("map_ntz", MapType(StringType(), TimestampNTZType()))
1139
- )
1140
- data = [
1141
- Row(
1142
- datetime.datetime(2023, 1, 1, 0, 0, 0),
1143
- datetime.datetime(2023, 1, 1, 0, 0, 0),
1144
- Row(
1145
- datetime.datetime(2023, 1, 1, 0, 0, 0),
1146
- datetime.datetime(2023, 1, 1, 0, 0, 0),
1147
- ),
1148
- [datetime.datetime(2023, 1, 1, 0, 0, 0)],
1149
- [datetime.datetime(2023, 1, 1, 0, 0, 0)],
1150
- dict(ts=datetime.datetime(2023, 1, 1, 0, 0, 0)),
1151
- dict(ts_ntz=datetime.datetime(2023, 1, 1, 0, 0, 0)),
1152
- )
1153
- ]
1154
- df = self.spark.createDataFrame(data, schema)
1155
-
1156
- with self.sql_conf(
1157
- {
1158
- "spark.sql.session.timeZone": "America/New_York",
1159
- "spark.sql.execution.arrow.pyspark.enabled": arrow_enabled,
1160
- "spark.sql.execution.pandas.structHandlingMode": "row",
1161
- }
1162
- ):
1163
- pdf = df.toPandas()
1164
-
1165
- expected = pd.DataFrame(
1166
- {
1167
- "ts": [datetime.datetime(2023, 1, 1, 3, 0, 0)],
1168
- "ts_ntz": [datetime.datetime(2023, 1, 1, 0, 0, 0)],
1169
- "struct": [
1170
- Row(
1171
- datetime.datetime(2023, 1, 1, 3, 0, 0),
1172
- datetime.datetime(2023, 1, 1, 0, 0, 0),
1173
- )
1174
- ],
1175
- "array": [[datetime.datetime(2023, 1, 1, 3, 0, 0)]],
1176
- "array_ntz": [[datetime.datetime(2023, 1, 1, 0, 0, 0)]],
1177
- "map": [dict(ts=datetime.datetime(2023, 1, 1, 3, 0, 0))],
1178
- "map_ntz": [dict(ts_ntz=datetime.datetime(2023, 1, 1, 0, 0, 0))],
1179
- }
1180
- )
1181
-
1182
- assert_frame_equal(pdf, expected)
1183
-
1184
- def test_createDataFrame_udt(self):
1185
- for arrow_enabled in [True, False]:
1186
- with self.subTest(arrow_enabled=arrow_enabled):
1187
- self.check_createDataFrame_udt(arrow_enabled)
1188
-
1189
- def check_createDataFrame_udt(self, arrow_enabled):
1190
- schema = (
1191
- StructType()
1192
- .add("point", ExamplePointUDT())
1193
- .add("struct", StructType().add("point", ExamplePointUDT()))
1194
- .add("array", ArrayType(ExamplePointUDT()))
1195
- .add("map", MapType(StringType(), ExamplePointUDT()))
1196
- )
1197
- data = [
1198
- Row(
1199
- ExamplePoint(1.0, 2.0),
1200
- Row(ExamplePoint(3.0, 4.0)),
1201
- [ExamplePoint(5.0, 6.0)],
1202
- dict(point=ExamplePoint(7.0, 8.0)),
1203
- )
1204
- ]
1205
- pdf = pd.DataFrame.from_records(data, columns=schema.names)
1206
-
1207
- with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrow_enabled}):
1208
- df = self.spark.createDataFrame(pdf, schema)
1209
-
1210
- self.assertEqual(df.collect(), data)
1211
-
1212
- def test_toPandas_udt(self):
1213
- for arrow_enabled in [True, False]:
1214
- with self.subTest(arrow_enabled=arrow_enabled):
1215
- self.check_toPandas_udt(arrow_enabled)
1216
-
1217
- def check_toPandas_udt(self, arrow_enabled):
1218
- schema = (
1219
- StructType()
1220
- .add("point", ExamplePointUDT())
1221
- .add("struct", StructType().add("point", ExamplePointUDT()))
1222
- .add("array", ArrayType(ExamplePointUDT()))
1223
- .add("map", MapType(StringType(), ExamplePointUDT()))
1224
- )
1225
- data = [
1226
- Row(
1227
- ExamplePoint(1.0, 2.0),
1228
- Row(ExamplePoint(3.0, 4.0)),
1229
- [ExamplePoint(5.0, 6.0)],
1230
- dict(point=ExamplePoint(7.0, 8.0)),
1231
- )
1232
- ]
1233
- df = self.spark.createDataFrame(data, schema)
1234
-
1235
- with self.sql_conf(
1236
- {
1237
- "spark.sql.execution.arrow.pyspark.enabled": arrow_enabled,
1238
- "spark.sql.execution.pandas.structHandlingMode": "row",
1239
- }
1240
- ):
1241
- pdf = df.toPandas()
1242
-
1243
- expected = pd.DataFrame.from_records(data, columns=schema.names)
1244
-
1245
- assert_frame_equal(pdf, expected)
1246
-
1247
- def test_create_dataframe_namedtuples(self):
1248
- # SPARK-44980: Inherited namedtuples in createDataFrame
1249
- for arrow_enabled in [True, False]:
1250
- with self.subTest(arrow_enabled=arrow_enabled):
1251
- self.check_create_dataframe_namedtuples(arrow_enabled)
1252
-
1253
- def check_create_dataframe_namedtuples(self, arrow_enabled):
1254
- MyTuple = namedtuple("MyTuple", ["a", "b", "c"])
1255
-
1256
- class MyInheritedTuple(MyTuple):
1257
- pass
1258
-
1259
- with self.sql_conf(
1260
- {
1261
- "spark.sql.execution.arrow.pyspark.enabled": arrow_enabled,
1262
- }
1263
- ):
1264
- df = self.spark.createDataFrame([MyInheritedTuple(1, 2, 3)])
1265
- self.assertEqual(df.first(), Row(a=1, b=2, c=3))
1266
-
1267
- df = self.spark.createDataFrame([MyInheritedTuple(1, 2, MyInheritedTuple(1, 2, 3))])
1268
- self.assertEqual(df.first(), Row(a=1, b=2, c=Row(a=1, b=2, c=3)))
1269
-
1270
- def test_negative_and_zero_batch_size(self):
1271
- # SPARK-47068: Negative and zero value should work as unlimited batch size.
1272
- with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 0}):
1273
- pdf = pd.DataFrame({"a": [123]})
1274
- assert_frame_equal(pdf, self.spark.createDataFrame(pdf).toPandas())
1275
-
1276
- with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": -1}):
1277
- pdf = pd.DataFrame({"a": [123]})
1278
- assert_frame_equal(pdf, self.spark.createDataFrame(pdf).toPandas())
1279
-
1280
-
1281
- @unittest.skipIf(
1282
- not have_pandas or not have_pyarrow,
1283
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
1284
- )
1285
- class ArrowTests(ArrowTestsMixin, ReusedSQLTestCase):
1286
- pass
1287
-
1288
-
1289
- @unittest.skipIf(
1290
- not have_pandas or not have_pyarrow,
1291
- cast(str, pandas_requirement_message or pyarrow_requirement_message),
1292
- )
1293
- class MaxResultArrowTests(unittest.TestCase):
1294
- # These tests are separate as 'spark.driver.maxResultSize' configuration
1295
- # is a static configuration to Spark context.
1296
-
1297
- @classmethod
1298
- def setUpClass(cls):
1299
- cls.spark = SparkSession(
1300
- SparkContext(
1301
- "local[4]", cls.__name__, conf=SparkConf().set("spark.driver.maxResultSize", "10k")
1302
- )
1303
- )
1304
-
1305
- # Explicitly enable Arrow and disable fallback.
1306
- cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
1307
- cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
1308
-
1309
- @classmethod
1310
- def tearDownClass(cls):
1311
- if hasattr(cls, "spark"):
1312
- cls.spark.stop()
1313
-
1314
- def test_exception_by_max_results(self):
1315
- with self.assertRaisesRegex(Exception, "is bigger than"):
1316
- self.spark.range(0, 10000, 1, 100).toPandas()
1317
-
1318
-
1319
- class EncryptionArrowTests(ArrowTests):
1320
- @classmethod
1321
- def conf(cls):
1322
- return super(EncryptionArrowTests, cls).conf().set("spark.io.encryption.enabled", "true")
1323
-
1324
-
1325
- class RDDBasedArrowTests(ArrowTests):
1326
- @classmethod
1327
- def conf(cls):
1328
- return (
1329
- super(RDDBasedArrowTests, cls)
1330
- .conf()
1331
- .set("spark.sql.execution.arrow.localRelationThreshold", "0")
1332
- # to test multiple partitions
1333
- .set("spark.sql.execution.arrow.maxRecordsPerBatch", "2")
1334
- )
1335
-
1336
-
1337
- if __name__ == "__main__":
1338
- from pyspark.sql.tests.test_arrow import * # noqa: F401
1339
-
1340
- try:
1341
- import xmlrunner # type: ignore
1342
-
1343
- testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
1344
- except ImportError:
1345
- testRunner = None
1346
- unittest.main(testRunner=testRunner, verbosity=2)