mindspore 2.3.0__cp39-none-any.whl → 2.3.0rc2__cp39-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (423) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +0 -1512
  3. mindspore/__init__.py +1 -2
  4. mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
  6. mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
  7. mindspore/_checkparam.py +25 -5
  8. mindspore/_extends/graph_kernel/model/graph_parallel.py +1 -1
  9. mindspore/_extends/parse/__init__.py +2 -2
  10. mindspore/_extends/parse/compile_config.py +0 -29
  11. mindspore/_extends/parse/namespace.py +2 -2
  12. mindspore/_extends/parse/parser.py +5 -21
  13. mindspore/_extends/parse/resources.py +7 -5
  14. mindspore/_extends/parse/standard_method.py +59 -40
  15. mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
  16. mindspore/amp.py +5 -26
  17. mindspore/bin/cache_admin +0 -0
  18. mindspore/bin/cache_server +0 -0
  19. mindspore/boost/adasum.py +1 -1
  20. mindspore/boost/base.py +1 -1
  21. mindspore/boost/boost_cell_wrapper.py +1 -1
  22. mindspore/boost/grad_freeze.py +2 -2
  23. mindspore/boost/less_batch_normalization.py +6 -9
  24. mindspore/common/__init__.py +1 -8
  25. mindspore/common/_register_for_tensor.py +9 -8
  26. mindspore/common/api.py +65 -275
  27. mindspore/common/dtype.py +4 -8
  28. mindspore/common/dump.py +5 -2
  29. mindspore/common/jit_config.py +1 -1
  30. mindspore/common/lazy_inline.py +2 -14
  31. mindspore/common/parameter.py +15 -14
  32. mindspore/common/recompute.py +5 -20
  33. mindspore/common/sparse_tensor.py +6 -21
  34. mindspore/common/tensor.py +52 -100
  35. mindspore/communication/__init__.py +11 -6
  36. mindspore/communication/management.py +94 -92
  37. mindspore/context.py +18 -180
  38. mindspore/dataset/engine/datasets.py +46 -69
  39. mindspore/dataset/engine/datasets_user_defined.py +53 -72
  40. mindspore/dataset/engine/datasets_vision.py +2 -2
  41. mindspore/dataset/engine/queue.py +38 -56
  42. mindspore/dataset/engine/validators.py +5 -11
  43. mindspore/dataset/vision/__init__.py +5 -5
  44. mindspore/dataset/vision/c_transforms.py +5 -5
  45. mindspore/dataset/vision/py_transforms_util.py +1 -1
  46. mindspore/dataset/vision/transforms.py +46 -591
  47. mindspore/dataset/vision/utils.py +1 -121
  48. mindspore/dataset/vision/validators.py +3 -9
  49. mindspore/hal/__init__.py +1 -7
  50. mindspore/hal/device.py +1 -1
  51. mindspore/include/api/model.h +0 -3
  52. mindspore/include/dataset/vision.h +2 -54
  53. mindspore/include/mindapi/base/types.h +0 -1
  54. mindspore/lib/libdnnl.so.2 +0 -0
  55. mindspore/lib/libmindspore.so +0 -0
  56. mindspore/lib/libmindspore_backend.so +0 -0
  57. mindspore/lib/libmindspore_common.so +0 -0
  58. mindspore/lib/libmindspore_core.so +0 -0
  59. mindspore/lib/libmindspore_glog.so.0 +0 -0
  60. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  61. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  62. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  63. mindspore/lib/libmindspore_shared_lib.so +0 -0
  64. mindspore/lib/libmpi_adapter.so +0 -0
  65. mindspore/lib/libmpi_collective.so +0 -0
  66. mindspore/lib/libnnacl.so +0 -0
  67. mindspore/lib/libopencv_core.so.4.5 +0 -0
  68. mindspore/lib/libps_cache.so +0 -0
  69. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +0 -35
  70. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  71. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
  72. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  73. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +0 -72
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  75. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/{aclnn_all_finite.h → aclnn_add_custom.h} +11 -9
  76. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +1 -1
  77. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +1 -1
  78. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
  79. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +12 -184
  80. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +15 -7
  81. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +15 -7
  82. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
  83. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
  84. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +31 -77
  85. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +31 -77
  86. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64/libcust_opmaster_rt2.0.so +0 -0
  87. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
  88. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +5 -4
  89. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so +0 -0
  90. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  91. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  92. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  93. mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
  94. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  95. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
  96. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
  97. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
  98. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +286 -275
  99. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
  100. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
  101. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
  102. mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
  103. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/add_impl.h +0 -1
  104. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +0 -1
  105. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -3
  106. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/backend_param.h +0 -5
  107. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/cast/cast_tiling.h +45 -1
  108. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/compare/compare_impl.h +0 -1
  109. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_impl.h +4 -8
  110. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/flash_attention_score_tiling.h +4 -11
  111. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/flash_attention_score/kernel/flash_attention_score_mix_hwsync.h +0 -18
  112. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_kernel.h +0 -6
  113. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/internal_rtbackend.h +75 -1
  114. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/kernel/matmul.h +5 -5
  115. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +3 -18
  116. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_common_tiling.h +5 -5
  117. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/pp_matmul_info.h +2 -2
  118. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_data.h +3 -36
  119. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/kernel/matmul_stridedslice_fusion.h +2 -2
  120. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +4 -22
  121. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +2 -16
  122. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +3 -1
  123. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_impl.h +4 -5
  124. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +4 -9
  125. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/attention_param.h +2 -5
  126. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +0 -1
  127. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_qkv_param.h +4 -10
  128. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +12 -0
  129. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +0 -1
  130. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +0 -1
  131. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +1 -1
  132. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/backend.h +2 -10
  133. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/elewise_utils.h +1 -5
  134. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log.h +0 -1
  135. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +0 -17
  136. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/math.h +7 -2
  137. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
  138. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
  139. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
  140. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
  141. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
  142. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
  143. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
  144. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
  145. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
  146. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
  147. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
  148. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
  149. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
  150. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
  151. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
  152. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
  153. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
  154. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
  155. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
  156. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
  157. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
  158. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
  159. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
  160. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
  161. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
  162. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
  163. mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
  164. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  165. mindspore/mindrecord/filewriter.py +2 -2
  166. mindspore/mint/__init__.py +40 -720
  167. mindspore/mint/nn/__init__.py +7 -89
  168. mindspore/mint/nn/functional.py +16 -165
  169. mindspore/mint/optim/adamw.py +16 -15
  170. mindspore/nn/__init__.py +2 -0
  171. mindspore/nn/cell.py +98 -97
  172. mindspore/nn/extend/basic.py +2 -2
  173. mindspore/nn/extend/embedding.py +1 -1
  174. mindspore/nn/extend/layer/normalization.py +5 -7
  175. mindspore/nn/generator.py +297 -0
  176. mindspore/nn/layer/activation.py +3 -4
  177. mindspore/nn/layer/basic.py +16 -79
  178. mindspore/nn/layer/conv.py +8 -17
  179. mindspore/nn/layer/embedding.py +4 -1
  180. mindspore/nn/layer/math.py +1 -1
  181. mindspore/nn/layer/normalization.py +1 -1
  182. mindspore/nn/layer/pooling.py +0 -5
  183. mindspore/nn/layer/rnn_cells.py +2 -2
  184. mindspore/nn/loss/loss.py +19 -19
  185. mindspore/nn/optim/adasum.py +1 -1
  186. mindspore/nn/optim/sgd.py +2 -3
  187. mindspore/nn/probability/distribution/exponential.py +1 -1
  188. mindspore/nn/probability/distribution/geometric.py +1 -1
  189. mindspore/nn/probability/distribution/logistic.py +1 -1
  190. mindspore/nn/wrap/cell_wrapper.py +1 -25
  191. mindspore/nn/wrap/loss_scale.py +1 -24
  192. mindspore/numpy/array_ops.py +1 -5
  193. mindspore/numpy/dtypes.py +3 -3
  194. mindspore/numpy/math_ops.py +8 -8
  195. mindspore/ops/__init__.py +1 -1
  196. mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -75
  197. mindspore/ops/_vmap/vmap_array_ops.py +0 -27
  198. mindspore/ops/_vmap/vmap_math_ops.py +1 -29
  199. mindspore/ops/_vmap/vmap_nn_ops.py +18 -19
  200. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +8 -34
  201. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +9 -2
  202. mindspore/ops/auto_generate/gen_arg_handler.py +0 -26
  203. mindspore/ops/auto_generate/gen_extend_func.py +27 -603
  204. mindspore/ops/auto_generate/gen_ops_def.py +203 -993
  205. mindspore/ops/auto_generate/gen_ops_prim.py +402 -1946
  206. mindspore/ops/auto_generate/pyboost_inner_prim.py +20 -90
  207. mindspore/ops/composite/base.py +6 -3
  208. mindspore/ops/composite/math_ops.py +1 -1
  209. mindspore/ops/composite/multitype_ops/_compile_utils.py +17 -24
  210. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  211. mindspore/ops/extend/__init__.py +3 -2
  212. mindspore/ops/extend/array_func.py +51 -10
  213. mindspore/ops/extend/nn_func.py +78 -2
  214. mindspore/ops/function/__init__.py +13 -8
  215. mindspore/ops/function/array_func.py +179 -455
  216. mindspore/ops/function/clip_func.py +1 -1
  217. mindspore/ops/function/grad/grad_func.py +3 -3
  218. mindspore/ops/function/math_func.py +103 -117
  219. mindspore/ops/function/nn_func.py +163 -275
  220. mindspore/ops/function/other_func.py +2 -2
  221. mindspore/ops/function/random_func.py +69 -202
  222. mindspore/ops/function/sparse_func.py +4 -4
  223. mindspore/ops/functional.py +327 -332
  224. mindspore/ops/operations/__init__.py +3 -13
  225. mindspore/ops/operations/_grad_ops.py +27 -3
  226. mindspore/ops/operations/_inner_ops.py +356 -53
  227. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  228. mindspore/ops/operations/_tensor_array.py +8 -8
  229. mindspore/ops/operations/array_ops.py +65 -82
  230. mindspore/ops/operations/comm_ops.py +93 -784
  231. mindspore/ops/operations/custom_ops.py +28 -51
  232. mindspore/ops/operations/debug_ops.py +4 -4
  233. mindspore/ops/operations/inner_ops.py +2 -2
  234. mindspore/ops/operations/manually_defined/ops_def.py +4 -304
  235. mindspore/ops/operations/math_ops.py +50 -3
  236. mindspore/ops/operations/nn_ops.py +247 -14
  237. mindspore/ops/operations/other_ops.py +3 -3
  238. mindspore/ops/operations/random_ops.py +1 -1
  239. mindspore/ops/operations/sparse_ops.py +1 -1
  240. mindspore/ops/primitive.py +8 -9
  241. mindspore/ops/silent_check.py +5 -5
  242. mindspore/ops_generate/arg_dtype_cast.py +9 -2
  243. mindspore/ops_generate/arg_handler.py +0 -26
  244. mindspore/ops_generate/gen_aclnn_implement.py +4 -1
  245. mindspore/ops_generate/gen_ops.py +4 -26
  246. mindspore/ops_generate/gen_pyboost_func.py +12 -41
  247. mindspore/ops_generate/gen_utils.py +0 -21
  248. mindspore/ops_generate/pyboost_utils.py +2 -7
  249. mindspore/ops_generate/template.py +0 -1
  250. mindspore/parallel/_auto_parallel_context.py +1 -21
  251. mindspore/parallel/_tensor.py +5 -0
  252. mindspore/parallel/_transformer/transformer.py +1 -1
  253. mindspore/parallel/_utils.py +1 -15
  254. mindspore/parallel/algo_parameter_config.py +3 -1
  255. mindspore/parallel/checkpoint_transform.py +9 -12
  256. mindspore/parallel/cluster/process_entity/_api.py +29 -28
  257. mindspore/parallel/cluster/process_entity/_utils.py +3 -13
  258. mindspore/parallel/cluster/run.py +16 -13
  259. mindspore/parallel/parameter_broadcast.py +2 -2
  260. mindspore/parallel/shard.py +17 -31
  261. mindspore/profiler/__init__.py +2 -3
  262. mindspore/profiler/common/util.py +2 -107
  263. mindspore/profiler/envprofiling.py +1 -1
  264. mindspore/profiler/parser/ascend_analysis/constant.py +21 -8
  265. mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -82
  266. mindspore/profiler/parser/ascend_analysis/function_event.py +28 -43
  267. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +27 -49
  268. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +10 -15
  269. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +20 -25
  270. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +5 -5
  271. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +1 -10
  272. mindspore/profiler/parser/ascend_hccl_generator.py +1 -4
  273. mindspore/profiler/parser/ascend_msprof_exporter.py +22 -43
  274. mindspore/profiler/parser/ascend_timeline_generator.py +5 -7
  275. mindspore/profiler/parser/minddata_parser.py +3 -72
  276. mindspore/profiler/profiling.py +59 -176
  277. mindspore/rewrite/api/node.py +1 -1
  278. mindspore/rewrite/common/namespace.py +5 -5
  279. mindspore/rewrite/parsers/assign_parser.py +0 -2
  280. mindspore/rewrite/parsers/class_def_parser.py +4 -8
  281. mindspore/run_check/_check_version.py +1 -1
  282. mindspore/scipy/fft.py +3 -1
  283. mindspore/scipy/linalg.py +3 -2
  284. mindspore/scipy/ops.py +3 -5
  285. mindspore/scipy/optimize/__init__.py +2 -2
  286. mindspore/train/__init__.py +4 -4
  287. mindspore/train/anf_ir_pb2.py +2 -8
  288. mindspore/train/callback/__init__.py +2 -5
  289. mindspore/train/callback/_backup_and_restore.py +2 -2
  290. mindspore/train/callback/_checkpoint.py +16 -104
  291. mindspore/train/callback/_landscape.py +1 -1
  292. mindspore/train/callback/_time_monitor.py +1 -1
  293. mindspore/train/data_sink.py +4 -5
  294. mindspore/train/dataset_helper.py +20 -45
  295. mindspore/train/model.py +38 -266
  296. mindspore/train/serialization.py +105 -256
  297. mindspore/train/summary/_summary_adapter.py +1 -1
  298. mindspore/version.py +1 -1
  299. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +2 -2
  300. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +303 -420
  301. mindspore/_extends/pijit/__init__.py +0 -23
  302. mindspore/_extends/pijit/pijit_func_white_list.py +0 -343
  303. mindspore/common/file_system.py +0 -48
  304. mindspore/common/generator.py +0 -260
  305. mindspore/common/no_inline.py +0 -54
  306. mindspore/common/np_dtype.py +0 -25
  307. mindspore/communication/comm_func.py +0 -1140
  308. mindspore/hal/memory.py +0 -326
  309. mindspore/lib/libavcodec.so.59 +0 -0
  310. mindspore/lib/libavdevice.so.59 +0 -0
  311. mindspore/lib/libavfilter.so.8 +0 -0
  312. mindspore/lib/libavformat.so.59 +0 -0
  313. mindspore/lib/libavutil.so.57 +0 -0
  314. mindspore/lib/libmindspore_np_dtype.so +0 -0
  315. mindspore/lib/libswresample.so.4 +0 -0
  316. mindspore/lib/libswscale.so.6 +0 -0
  317. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.cpp +0 -326
  318. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/all_finite.py +0 -180
  319. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.json +0 -58
  320. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_576ceaeef5870c451cab59af55ea46ad.o +0 -0
  321. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.json +0 -58
  322. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_86a73ff6e28d734c96bb8d3054f7dd18.o +0 -0
  323. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.json +0 -58
  324. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/ascend910b/all_finite/AllFinite_f55e0ebaad1f2f572e43677336992fa0.o +0 -0
  325. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/all_finite.json +0 -109
  326. mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/kernel/config/ascend910b/binary_info_config.json +0 -38
  327. mindspore/lib/plugin/ascend/custom_compiler/OWNERS +0 -12
  328. mindspore/lib/plugin/ascend/custom_compiler/setup.py +0 -255
  329. mindspore/lib/plugin/ascend/custom_compiler/start.sh +0 -26
  330. mindspore/lib/plugin/ascend/custom_compiler/template.json +0 -40
  331. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme.h +0 -24
  332. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/acme_op.h +0 -69
  333. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/base_type.h +0 -133
  334. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_creator.h +0 -32
  335. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/op_param.h +0 -35
  336. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/include/tiling_info.h +0 -60
  337. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/kernel_register.h +0 -37
  338. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/platform_configs.h +0 -89
  339. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/core/platform/rt_funcs.h +0 -135
  340. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/add_op.h +0 -34
  341. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_backoff_base.h +0 -62
  342. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_elewise_op.h +0 -33
  343. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_ops.h +0 -88
  344. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/asd_pa_op.h +0 -45
  345. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/cast_op.h +0 -52
  346. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/ops/host_src/matmul_op.h +0 -95
  347. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/asd_utils.h +0 -84
  348. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/acme/src/utils/comm_utils.h +0 -61
  349. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp32.h +0 -224
  350. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/and_impl.h +0 -29
  351. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/div_impl.h +0 -29
  352. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_impl.h +0 -48
  353. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/elewise_binary_tiling.h +0 -25
  354. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/and_kernel.h +0 -46
  355. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/div_kernel.h +0 -46
  356. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_base.h +0 -260
  357. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/elewise_binary_kernel.h +0 -35
  358. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/max_kernel.h +0 -66
  359. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/min_kernel.h +0 -66
  360. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/mul_kernel.h +0 -66
  361. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/kernel/or_kernel.h +0 -46
  362. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/max_impl.h +0 -29
  363. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/min_impl.h +0 -29
  364. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/mul_impl.h +0 -29
  365. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_binary/or_impl.h +0 -29
  366. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/abs_impl.h +0 -29
  367. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_impl.h +0 -47
  368. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/elewise_unary_tiling.h +0 -24
  369. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/exp_impl.h +0 -29
  370. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/abs_kernel.h +0 -45
  371. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_base.h +0 -148
  372. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/elewise_unary_kernel.h +0 -31
  373. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/exp_kernel.h +0 -45
  374. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/ln_kernel.h +0 -45
  375. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/not_kernel.h +0 -45
  376. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/reciprocal_kernel.h +0 -45
  377. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/relu_kernel.h +0 -55
  378. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/rsqrt_kernel.h +0 -45
  379. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/kernel/sqrt_kernel.h +0 -45
  380. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/ln_impl.h +0 -29
  381. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/not_impl.h +0 -29
  382. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/reciprocal_impl.h +0 -29
  383. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/relu_impl.h +0 -29
  384. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/rsqrt_impl.h +0 -29
  385. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/elewise_unary/sqrt_impl.h +0 -29
  386. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_impl.h +0 -45
  387. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/grouped_matmul_tiling.h +0 -187
  388. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul.h +0 -245
  389. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_interface.h +0 -24
  390. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/kernel/grouped_matmul_utils.h +0 -111
  391. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/grouped_matmul/tiling_data.h +0 -54
  392. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/compare_param.h +0 -31
  393. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/elewise_param.h +0 -41
  394. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/grouped_matmul_param.h +0 -40
  395. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/profiling_util.h +0 -364
  396. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_utils.h +0 -69
  397. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_creator.h +0 -39
  398. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/register/kernel_registry.h +0 -114
  399. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/utils.h +0 -98
  400. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.json +0 -19
  401. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix.o +0 -0
  402. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aic_0.o +0 -0
  403. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MatMulPostFusionMixTactic/matmul_postfusion_mix_mix_aiv_0.o +0 -0
  404. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.json +0 -19
  405. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix.o +0 -0
  406. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aic_0.o +0 -0
  407. mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/MultiMatMulPostFusionMixTactic/multi_matmul_postfusion_mix_mix_aiv_0.o +0 -0
  408. mindspore/mint/linalg/__init__.py +0 -22
  409. mindspore/nn/layer/embedding_service.py +0 -531
  410. mindspore/nn/layer/embedding_service_layer.py +0 -393
  411. mindspore/ops/function/reshard_func.py +0 -102
  412. mindspore/ops/operations/_infer_ops.py +0 -19
  413. mindspore/ops/operations/reshard_ops.py +0 -53
  414. mindspore/profiler/common/process_pool.py +0 -41
  415. mindspore/profiler/common/singleton.py +0 -28
  416. mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
  417. mindspore/profiler/parser/ascend_memory_generator.py +0 -185
  418. mindspore/train/callback/_cluster_monitor.py +0 -201
  419. mindspore/train/callback/_flops_collector.py +0 -238
  420. mindspore/train/callback/_mindio_ttp.py +0 -443
  421. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  422. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  423. {mindspore-2.3.0.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2024 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2022 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -188,9 +188,6 @@ class Parameter(Tensor_):
188
188
  parallel_optimizer (bool): It is used to filter the weight shard operation in `SEMI_AUTO_PARALLEL` or
189
189
  `AUTO_PARALLEL` mode. It works only when enable parallel optimizer in
190
190
  `mindspore.set_auto_parallel_context()`. Default: ``True`` .
191
- storage_format (str): Only Ascend device target is supported. It is used to specify the format of the weight
192
- loaded to the device. By default, the format is not changed. The optional values are ``"FRACTAL_NZ"`` ,
193
- ``"NC1HWC0"`` , ``"FRACTAL_Z"`` , etc. Default: ``""`` .
194
191
 
195
192
  Examples:
196
193
  >>> import numpy as np
@@ -226,7 +223,6 @@ class Parameter(Tensor_):
226
223
  # it's better to make the Initializer a kind of tensor.
227
224
  obj.init_mode = None
228
225
  obj.is_default_input_init = init_data_flag
229
- obj.from_ckpt = False
230
226
  if obj.has_init:
231
227
  obj.init_mode = default_input
232
228
  else:
@@ -243,8 +239,7 @@ class Parameter(Tensor_):
243
239
  return (
244
240
  Parameter, (data, self.name, self.requires_grad, self.layerwise_parallel))
245
241
 
246
- def __init__(self, default_input, name=None, requires_grad=True, layerwise_parallel=False, parallel_optimizer=True,
247
- storage_format=""):
242
+ def __init__(self, default_input, name=None, requires_grad=True, layerwise_parallel=False, parallel_optimizer=True):
248
243
  self.param_info = ParamInfo()
249
244
  self.init_in_server = False
250
245
  self.name = name
@@ -267,7 +262,6 @@ class Parameter(Tensor_):
267
262
  self.is_in_shard = False
268
263
  self._pipeline_stage_list = []
269
264
  self.slice_num = 1
270
- self.from_ckpt = False
271
265
  if -1 in self.shape:
272
266
  raise ValueError(f"All shape elements of the Parameter must be positive. But got None.")
273
267
  if isinstance(default_input, (Tensor_, Tensor)):
@@ -295,7 +289,6 @@ class Parameter(Tensor_):
295
289
  raise TypeError(f"The type of the argument 'default_input' must be in ['Tensor', 'int', 'float',"
296
290
  f" 'numpy.ndarray', 'list']. But got type {type(default_input)}.")
297
291
  self.param_info.parameter_shape = self.shape
298
- self.param_info.storage_format = storage_format
299
292
 
300
293
  import mindspore.ops.operations.other_ops as other_ops
301
294
  self.load = other_ops.Load()
@@ -338,6 +331,11 @@ class Parameter(Tensor_):
338
331
  # in other place, so we can make a Tensor without copy data.
339
332
  return (Tensor, data)
340
333
  # make a copy of Tensor to init the parameter.
334
+ if data.dtype == mstype.bfloat16:
335
+ from mindspore.ops.operations import Cast
336
+ cpu_cast = Cast().set_device("CPU")
337
+ data = cpu_cast(data, mstype.float32)
338
+ return (Tensor, data.asnumpy(), mstype.bfloat16)
341
339
  if data.dtype == mstype.qint4x2:
342
340
  return (Tensor, data.asnumpy(), mstype.qint4x2)
343
341
  return (Tensor, data.asnumpy())
@@ -750,7 +748,6 @@ class Parameter(Tensor_):
750
748
  def requires_grad(self, value=True):
751
749
  if not isinstance(value, bool):
752
750
  raise TypeError("The argument `requires_grad` must be bool type")
753
- Tensor_.wait_pipeline(self)
754
751
  self.param_info.requires_grad = value
755
752
 
756
753
  @property
@@ -792,10 +789,14 @@ class Parameter(Tensor_):
792
789
  return new_param
793
790
 
794
791
  @_LogActionOnce(logger=logger, key='add_pipeline_stage')
792
+ @deprecated("2.3", "add_pipeline_stage")
795
793
  def add_pipeline_stage(self, stage):
796
794
  """
797
795
  Add a pipeline stage to the parameter.
798
796
 
797
+ Note:
798
+ This interface is deprecated in 2.3, and will be deleted in the future.
799
+
799
800
  Args:
800
801
  stage(int): The pipeline stage to be added.
801
802
 
@@ -813,9 +814,9 @@ class Parameter(Tensor_):
813
814
  f"Use .set_dtype(xxx) to change the dtype.")
814
815
 
815
816
  @staticmethod
816
- def _set_data_check_input_valid(current_shape, data_shape, current_tensor_is_init, incoming_tensor_is_init,
817
- from_ckpt, slice_shape=False, slice_num=1):
818
- if not from_ckpt and incoming_tensor_is_init and not current_tensor_is_init:
817
+ def _set_data_check_input_valid(current_shape, data_shape, current_tensor_is_init,
818
+ incoming_tensor_is_init, slice_shape=False, slice_num=1):
819
+ if incoming_tensor_is_init and not current_tensor_is_init:
819
820
  raise TypeError("The original tensor data is initialized, but the argument 'data' is not initialized."
820
821
  "Please initialize 'data' before call this method.")
821
822
  if tuple(current_shape) != tuple(data_shape):
@@ -870,7 +871,7 @@ class Parameter(Tensor_):
870
871
  incoming_tensor_is_init = isinstance(data, Tensor) and not data.has_init
871
872
  current_tensor_is_init = isinstance(self, Tensor) and not self.has_init
872
873
  Parameter._set_data_check_input_valid(self.shape, data.shape, current_tensor_is_init, incoming_tensor_is_init,
873
- self.from_ckpt, slice_shape, self.slice_num)
874
+ slice_shape, self.slice_num)
874
875
  if self.dtype != data.dtype:
875
876
  if mstype.implicit_conversion_seq.get(self.dtype) < mstype.implicit_conversion_seq.get(data.dtype):
876
877
  self._raise_type_error(data.dtype)
@@ -20,11 +20,10 @@ from mindspore import log as logger
20
20
  from mindspore.nn.cell import Cell
21
21
  from mindspore import context
22
22
  from mindspore.common.tensor import Tensor
23
- from mindspore import ops
24
23
  from mindspore.ops.composite import GradOperation
25
24
  from mindspore.common._register_for_recompute import recompute_registry
26
25
  from mindspore.common.api import _pynative_executor
27
- from mindspore.common.generator import get_rng_state, set_rng_state
26
+ from mindspore.nn.generator import get_rng_state, set_rng_state
28
27
 
29
28
 
30
29
  class _WrapCell(Cell):
@@ -52,11 +51,10 @@ class _RecomputeCell(Cell):
52
51
 
53
52
  def __init__(self, block):
54
53
  """Initialize Recompute cell."""
55
- super(_RecomputeCell, self).__init__(auto_prefix=False)
54
+ super(_RecomputeCell, self).__init__()
56
55
  self.args = []
57
56
  self.kwargs = []
58
57
  self.wrap_cell = _WrapCell(block)
59
-
60
58
  self.net = block
61
59
  self.internal_params = []
62
60
  self.save_rng_state = False
@@ -87,15 +85,13 @@ class _RecomputeCell(Cell):
87
85
  self.kwargs.pop()
88
86
  if kwargs:
89
87
  input_args = list(input_args) + list(kwargs.values())
90
- # To detach inputs to avoid erasing auto grad meta info of origin inputs.
91
- input_args = _detach_input(input_args)
92
88
  try:
93
89
  pre_rng_state = get_rng_state()
94
- set_rng_state(self.cpu_rng_state)
90
+ set_rng_state(*self.cpu_rng_state)
95
91
  _pynative_executor.set_is_run_recompute(True)
96
92
  grads = self.grad(self.net, self.internal_params)(*input_args, grad_input)
97
93
  _pynative_executor.set_is_run_recompute(False)
98
- set_rng_state(pre_rng_state)
94
+ set_rng_state(*pre_rng_state)
99
95
  except Exception as err:
100
96
  _pynative_executor.clear_res()
101
97
  raise err
@@ -167,17 +163,6 @@ def _padding_input_grads(args, input_grads):
167
163
  "but got {}".format(len(args), len(input_grads)))
168
164
 
169
165
 
170
- def _detach_input(input_arg):
171
- if isinstance(input_arg, Tensor):
172
- return ops.stop_gradient(input_arg)
173
- if isinstance(input_arg, (list, tuple)):
174
- detach_inputs = []
175
- for arg in input_arg:
176
- detach_inputs.append(_detach_input(arg))
177
- return detach_inputs if isinstance(input_arg, list) else tuple(detach_inputs)
178
- return input_arg
179
-
180
-
181
166
  def _check_validation(block):
182
167
  if not isinstance(block, Cell):
183
168
  raise TypeError("Recompute function now only support block which inherited from Cell!")
@@ -218,7 +203,7 @@ def recompute(block, *args, **kwargs):
218
203
  Examples:
219
204
  >>> import numpy as np
220
205
  >>> import mindspore.nn as nn
221
- >>> from mindspore import ops
206
+ >>> import mindspore.ops as ops
222
207
  >>> from mindspore import Tensor, recompute
223
208
  >>> class MyCell(nn.Cell):
224
209
  ... def __init__(self):
@@ -642,29 +642,14 @@ class CSRTensor(CSRTensor_):
642
642
  values indicated by `values` and row and column positions indicated by `indptr`
643
643
  and `indices`.
644
644
 
645
- For example, if indptr is [0, 2, 5, 6], indices is [0, 3, 1, 2, 4, 2], values is
646
- [1., 2., 3., 4., 5., 6.], shape is (3, 5), then the dense representation of the sparse tensor will be:
645
+ For example, if indptr is [0, 1, 2, 2], indices is [1, 2], values is [1., 2.], shape is
646
+ (3, 4), then the dense representation of the sparse tensor will be:
647
647
 
648
648
  .. code-block::
649
- [[1., 0., 0., 2., 0.],
650
- [0., 3., 4., 0., 5.],
651
- [0., 0., 6., 0., 0.]]
652
-
653
- The length of `indptr` should equal to `shape[0]+1`, where the elements should be equal or monotonically
654
- increasing and the maximum value should be equal to the number of non-zero values in the tensor. The length
655
- of `indices` and `values` should be equal to the number of non-zero values in the tensor. To be concrete, get
656
- the query indices of none-zero elements in every line according to `indptr`. Then get the column positions of
657
- none-zero elements in every line by looking up query indices in `indices`. Finally, get the actual values of
658
- none-zero elements in every line by looking up query indices in `values`. In the former example, 'indptr' of
659
- [0, 2, 5, 6] represents that the indices of 0th row of the tensor origins from [0, 2), the indices of
660
- the 1st row of the tensor origins from [2, 5) and the 2nd row of the tensor origins from [5, 6). For example,
661
- the column positions of the non-zero elements of the 0th row in the tensor are provided by the [0, 2) elements in
662
- `indices` (i.e. [0, 3]) and the corresponding values are provided by the [0, 2) elements in `values`
663
- (i.e. [1., 2.]). The column positions of the non-zero elements of the 1st row in the tensor are provided by the
664
- [2, 5) elements in `indices` (i.e. [1, 2, 4]) and the corresponding values are provided by the [2, 5) elements in
665
- `values` (i.e. [3., 4., 5.]). The column positions of the non-zero elements of the 2nd row in the tensor are
666
- provided by the [5, 6) elements in `indices` (i.e. [2]) and the corresponding values are provided by the [5, 6)
667
- elements in `values` (i.e. [6.]).
649
+
650
+ [[0., 1., 0., 0.],
651
+ [0., 0., 2., 0.],
652
+ [0., 0., 0., 0.]]
668
653
 
669
654
  Common arithmetic operations include: addition (+), subtraction (-), multiplication (*),
670
655
  and division (/). For details about operations supported by `CSRTensor`, see
@@ -50,8 +50,7 @@ def _check_input_data_type(input_data):
50
50
  valid_dtypes = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
51
51
  np.float16, np.float32, np.float64, np.bool_, np.str_, np.complex64, np.complex128)
52
52
  if isinstance(input_data, np.ndarray) and input_data.dtype not in valid_dtypes and \
53
- input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S' and \
54
- input_data.dtype.kind != 'T': # Support dtype np.str_ and npy_bfloat16
53
+ input_data.dtype.kind != 'U' and input_data.dtype.kind != 'S': # Support dtype np.str_
55
54
  new_line = '\n'
56
55
  for index, x in np.ndenumerate(input_data):
57
56
  if np.array(x).dtype not in valid_dtypes:
@@ -344,7 +343,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
344
343
  return out
345
344
 
346
345
  def __bool__(self):
347
- data = self.asnumpy()
346
+ if self.dtype == mstype.bfloat16:
347
+ data = self.float().asnumpy()
348
+ else:
349
+ data = self.asnumpy()
348
350
  if data.shape == ():
349
351
  return bool(data)
350
352
  if data.shape == (1,):
@@ -360,15 +362,24 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
360
362
  raise ValueError(message)
361
363
 
362
364
  def __int__(self):
363
- data = self.asnumpy()
365
+ if self.dtype == mstype.bfloat16:
366
+ data = self.float().asnumpy()
367
+ else:
368
+ data = self.asnumpy()
364
369
  return self._convert_scalar_(data, int, "Only one element tensors can be converted to Python scalars")
365
370
 
366
371
  def __float__(self):
367
- data = self.asnumpy()
372
+ if self.dtype == mstype.bfloat16:
373
+ data = self.float().asnumpy()
374
+ else:
375
+ data = self.asnumpy()
368
376
  return self._convert_scalar_(data, float, "Only one element tensors can be converted to Python scalars")
369
377
 
370
378
  def __index__(self):
371
- data = self.asnumpy()
379
+ if self.dtype == mstype.bfloat16:
380
+ data = self.float().asnumpy()
381
+ else:
382
+ data = self.asnumpy()
372
383
  if data.dtype not in ["int8", "int16", "int32", "int64", "bool"]:
373
384
  raise ValueError("Only integer tensors of a single element can be converted to an index.")
374
385
  return self._convert_scalar_(data, int,
@@ -505,6 +516,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
505
516
  def __str__(self):
506
517
  if self.dtype == mstype.type_none:
507
518
  return "Unknown Tensor type!"
519
+ if self.dtype == mstype.bfloat16:
520
+ return str(self.float().asnumpy())
508
521
  return str(self.asnumpy())
509
522
 
510
523
  def __getstate__(self):
@@ -524,13 +537,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
524
537
  """
525
538
  return self._shape
526
539
 
527
- @shape.setter
528
- def shape(self, shape_value):
529
- r"""
530
- Set the shape value.
531
- """
532
- self._shape = shape_value
533
-
534
540
  @property
535
541
  def dtype(self):
536
542
  """Return the dtype of the tensor (:class:`mindspore.dtype`)."""
@@ -2841,7 +2847,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2841
2847
  if slice_num_of_persistent_data > 1:
2842
2848
  self.assign_value(Tensor_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
2843
2849
  else:
2844
- self.assign_value(Tensor_.from_numpy(data))
2850
+ if self.dtype == mstype.bfloat16:
2851
+ # The dtype of data is np.float32 when mstype is bfloat16,
2852
+ # so we create tensor_ by init func instead of asnumpy
2853
+ self.assign_value(Tensor_(data, self.dtype))
2854
+ else:
2855
+ self.assign_value(Tensor_.from_numpy(data))
2845
2856
  return self
2846
2857
 
2847
2858
  def resize(self, *new_shape):
@@ -3161,9 +3172,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3161
3172
  location found is given. If 'right', return the last such index. If there is
3162
3173
  no suitable index, return either 0 or N (where N is the length of the tensor).
3163
3174
  Default: ``left`` .
3164
- sorter (Union[int, list, tuple, Tensor]): optional tensor of
3165
- integer indices that sort the tensor into ascending order on the innermost dimension
3166
- and the type must be int64. They are typically the result of argsort. Default: ``None`` .
3175
+ sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional tensor of
3176
+ integer indices that sort the tensor into ascending order. They are typically
3177
+ the result of argsort. Default: ``None`` .
3167
3178
 
3168
3179
  Returns:
3169
3180
  Tensor, array of insertion points with the same shape as `v`.
@@ -3184,21 +3195,31 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3184
3195
  if side not in ('left', 'right'):
3185
3196
  raise ValueError(f"For 'Tensor.searchsorted', the argument 'side' should be one of in "
3186
3197
  f"['left', 'right'], but got {side}.")
3198
+ a = self.astype(mstype.float32)
3187
3199
  if not isinstance(v, Tensor):
3188
3200
  v = tensor_operator_registry.get('make_tensor')(v)
3201
+ shape = v.shape
3189
3202
  if sorter is not None:
3190
- if not isinstance(sorter, (int, list, tuple, Tensor)):
3203
+ if not isinstance(sorter, (int, float, bool, list, tuple, Tensor)):
3191
3204
  raise TypeError("For Tensor.searchsorted, the type of the argument 'sorter' must be one of 'int', "
3192
- "'list', 'tuple', 'Tensor', but got {}.".format(type(sorter)))
3205
+ "'float', 'bool', 'list', 'tuple', 'Tensor', but got {}.".format(type(sorter)))
3193
3206
  if not isinstance(sorter, Tensor):
3194
3207
  sorter = tensor_operator_registry.get('make_tensor')(sorter)
3195
- if sorter.size != self.size:
3196
- raise ValueError('The size of sorter must be the same as the Tensor')
3197
-
3198
- dtype = mstype.int32
3199
- right = (side == 'right')
3200
- search_sorted_ = tensor_operator_registry.get('searchsorted')(dtype, right)
3201
- return search_sorted_(self, v, sorter)
3208
+ if sorter.ndim != 1 or sorter.size != a.size:
3209
+ raise ValueError('sorter must be 1-D array with the same size as the Tensor')
3210
+ sorter = sorter.reshape(sorter.shape + (1,))
3211
+ a = tensor_operator_registry.get('gather_nd')(a, sorter)
3212
+ less_op = tensor_operator_registry.get('__le__') if side == 'left' else tensor_operator_registry.get('__lt__')
3213
+ i = tensor_operator_registry.get('fill')(mstype.int32, shape, 0)
3214
+ j = tensor_operator_registry.get('fill')(mstype.int32, shape, a.size)
3215
+
3216
+ sort_range = tuple(range(math.ceil(math.log2(tensor_operator_registry.get('shape_mul')(a.shape) + 1))))
3217
+ for _ in sort_range:
3218
+ mid = (i - -j) // 2
3219
+ mask = less_op(v, tensor_operator_registry.get('gather_nd')(a, mid.reshape(mid.shape + (1,))))
3220
+ i = tensor_operator_registry.get('select')(mask, i, mid)
3221
+ j = tensor_operator_registry.get('select')(mask, mid, j)
3222
+ return j
3202
3223
 
3203
3224
  def gather_nd(self, indices):
3204
3225
  r"""
@@ -3215,35 +3236,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3215
3236
  validator.check_is_int(batch_dims, "batch_dims")
3216
3237
  return tensor_operator_registry.get('gather')(self, input_indices, axis, batch_dims)
3217
3238
 
3218
- def uniform(self, from_=0., to=1., generator=None):
3219
- r"""
3220
- Generates random numbers in the half-open interval [from_, to).
3221
-
3222
- Args:
3223
- from_ (number): The lower bound of the interval.
3224
- to (number): The upper bound of the interval.
3225
- generator (Generator, optional): The random seed. Default: None.
3226
-
3227
- Returns:
3228
- Tensor, with the same shape as tensor.
3229
-
3230
- Raises:
3231
- TypeError: If `from_` is larger than `to`.
3232
-
3233
- Supported Platforms:
3234
- ``Ascend``
3235
-
3236
- Examples:
3237
- >>> import mindspore
3238
- >>> x = mindspore.ops.ones((4, 2))
3239
- >>> generator = mindspore.Generator()
3240
- >>> generator.manual_seed(100)
3241
- >>> output = x.uniform(1., 2., generator)
3242
- >>> print(output.shape)
3243
- (4, 2)
3244
- """
3245
- return tensor_operator_registry.get('uniform')(self, from_, to, generator)
3246
-
3247
3239
  def var(self, axis=None, ddof=0, keepdims=False):
3248
3240
  """
3249
3241
  Compute the variance along the specified axis.
@@ -3364,12 +3356,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3364
3356
  [10. 35.]
3365
3357
  """
3366
3358
  if initial is None:
3367
- res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
3368
- else:
3369
- res = tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
3370
- if dtype is not None and (dtype == mstype.bool_):
3371
- res = res.astype(mstype.bool_)
3372
- return res
3359
+ return tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype)
3360
+ return tensor_operator_registry.get("sum")(self, axis, keepdims, dtype=dtype) + initial
3373
3361
 
3374
3362
  def sum_to_size(self, *size):
3375
3363
  r"""
@@ -3539,11 +3527,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
3539
3527
  validator.check_value_type('index', index, (Tensor, Tensor_,), 'Tensor.gather_elements')
3540
3528
  return tensor_operator_registry.get('gather_elements')(self, dim, index)
3541
3529
 
3542
- def nonzero(self, as_tuple=False):
3530
+ def nonzero(self):
3543
3531
  """
3544
3532
  For details, please refer to :func:`mindspore.ops.nonzero`.
3545
3533
  """
3546
- return tensor_operator_registry.get('nonzero')(self, as_tuple)
3534
+ return tensor_operator_registry.get('nonzero')(self)
3547
3535
 
3548
3536
  def svd(self, full_matrices=False, compute_uv=True):
3549
3537
  """
@@ -4725,42 +4713,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4725
4713
  return _index_put(self, values, indices)
4726
4714
 
4727
4715
 
4728
- def move_to(self, to, blocking=True):
4729
- r"""
4730
- Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
4731
-
4732
- Args:
4733
- to (str): a string type value, one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
4734
- blocking (bool): a bool type value, using synchronous copy or asynchronous copy.
4735
- Default: ``True`` , synchronous copy.
4736
-
4737
- Returns:
4738
- New Tensor, storged on target device which with the same type and shape as the "self Tensor".
4739
-
4740
- Raises:
4741
- ValueError: If the type of `blocking` is not bool type.
4742
- ValueError: If the value of `to` is not one of ``"Ascend"``, ``"GPU"``, ``"CPU"``.
4743
- ValueError: If the run mode is not PyNative mode.
4744
-
4745
- Supported Platforms:
4746
- ``Ascend`` ``GPU`` ``CPU``
4747
-
4748
- Examples:
4749
- >>> import mindspore as ms
4750
- >>> from mindspore import Tensor
4751
- >>> x = ms.Tensor([1, 2, 3], ms.int64)
4752
- >>> new_tensor = x.move_to("CPU")
4753
- """
4754
- if not isinstance(blocking, bool):
4755
- raise ValueError(f"The type of 'blocking' must be bool, but got {blocking}")
4756
- if to not in ("Ascend", "GPU", "CPU"):
4757
- raise ValueError(f"The value of 'to' must be one of ['Ascend', 'GPU', 'CPU'], but got {to}")
4758
- mode = context.get_context("mode")
4759
- if mode != context.PYNATIVE_MODE:
4760
- raise ValueError(f"The method of 'move_to' only supported in pynative mode, but got: {mode}.")
4761
- return Tensor(Tensor_.move_to(self, to, blocking))
4762
-
4763
-
4764
4716
  def _offload(self):
4765
4717
  r"""
4766
4718
  Offload tensor parameter to host. Currently, only support for pynative mode.
@@ -4856,4 +4808,4 @@ def _check_astype_and_convert(dtype):
4856
4808
  return dtype
4857
4809
 
4858
4810
 
4859
- setattr(tensor_operator_registry, 'vm_compare', _vm_compare)
4811
+ tensor_operator_registry.register('vm_compare', _vm_compare)
@@ -17,21 +17,26 @@ Collective communication interface.
17
17
 
18
18
  Note that the APIs in the following list need to preset communication environment variables.
19
19
 
20
- For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method
21
- without any third-party or configuration file dependencies.
22
- Please see the `msrun start up
23
- <https://www.mindspore.cn/tutorials/experts/zh-CN/master/parallel/msrun_launcher.html>`_
20
+ For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
21
+ Please see the `rank table Startup
22
+ <https://www.mindspore.cn/tutorials/experts/en/master/parallel/rank_table.html>`_
24
23
  for more details.
24
+
25
+ For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
26
+ <https://www.mindspore.cn/tutorials/experts/en/master/parallel/mpirun.html>`_ .
27
+
28
+ For the CPU device, users need to write a dynamic cluster startup script, please see the `Dynamic Cluster Startup
29
+ <https://www.mindspore.cn/tutorials/experts/en/master/parallel/dynamic_cluster.html>`_ .
25
30
  """
26
31
 
27
32
  from mindspore.communication.management import GlobalComm, init, release, get_rank, \
28
33
  get_group_size, get_world_rank_from_group_rank, \
29
34
  get_group_rank_from_world_rank, create_group, HCCL_WORLD_COMM_GROUP, NCCL_WORLD_COMM_GROUP, \
30
- MCCL_WORLD_COMM_GROUP, get_local_rank, get_local_rank_size, destroy_group, get_process_group_ranks
35
+ MCCL_WORLD_COMM_GROUP, get_local_rank, get_local_rank_size, destroy_group
31
36
 
32
37
 
33
38
  __all__ = [
34
39
  "GlobalComm", "init", "release", "get_rank", "get_group_size", "get_world_rank_from_group_rank",
35
40
  "get_group_rank_from_world_rank", "create_group", "HCCL_WORLD_COMM_GROUP", "NCCL_WORLD_COMM_GROUP",
36
- "MCCL_WORLD_COMM_GROUP", "get_local_rank", "get_local_rank_size", "destroy_group", "get_process_group_ranks"
41
+ "MCCL_WORLD_COMM_GROUP", "get_local_rank", "get_local_rank_size", "destroy_group"
37
42
  ]