mindspore 2.0.0a0__cp37-none-any.whl → 2.0.0rc1__cp37-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (693) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Third_Party_Open_Source_Software_Notice +9064 -0
  3. mindspore/__init__.py +4 -2
  4. mindspore/_akg/akg/composite/build_module.py +11 -0
  5. mindspore/_akg/akg/config/repository_cuda.json +11 -0
  6. mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -3
  7. mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
  8. mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
  9. mindspore/_c_mindrecord.cpython-37m-aarch64-linux-gnu.so +0 -0
  10. mindspore/_check_jit_forbidden_api.py +102 -0
  11. mindspore/_checkparam.py +1066 -1001
  12. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +4 -3
  13. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -48
  14. mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -4
  15. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -4
  16. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
  17. mindspore/_extends/parse/__init__.py +5 -3
  18. mindspore/_extends/parse/namespace.py +16 -1
  19. mindspore/_extends/parse/parser.py +107 -22
  20. mindspore/_extends/parse/resources.py +0 -7
  21. mindspore/_extends/parse/standard_method.py +885 -413
  22. mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
  23. mindspore/amp.py +52 -57
  24. mindspore/bin/cache_admin +0 -0
  25. mindspore/bin/cache_server +0 -0
  26. mindspore/boost/boost.py +2 -2
  27. mindspore/boost/boost_cell_wrapper.py +38 -20
  28. mindspore/boost/dim_reduce.py +3 -3
  29. mindspore/boost/group_loss_scale_manager.py +1 -1
  30. mindspore/common/__init__.py +4 -6
  31. mindspore/common/_decorator.py +2 -0
  32. mindspore/common/_register_for_adapter.py +55 -0
  33. mindspore/common/_stub_tensor.py +201 -0
  34. mindspore/common/_utils.py +41 -7
  35. mindspore/common/api.py +215 -141
  36. mindspore/common/dtype.py +8 -1
  37. mindspore/common/dump.py +2 -2
  38. mindspore/common/initializer.py +4 -2
  39. mindspore/common/jit_config.py +17 -13
  40. mindspore/common/mutable.py +33 -13
  41. mindspore/common/parameter.py +23 -21
  42. mindspore/common/seed.py +8 -24
  43. mindspore/common/sparse_tensor.py +62 -41
  44. mindspore/common/tensor.py +852 -1154
  45. mindspore/communication/__init__.py +2 -2
  46. mindspore/communication/_comm_helper.py +11 -4
  47. mindspore/communication/management.py +22 -21
  48. mindspore/config/op_info.config +501 -1008
  49. mindspore/config/super_bar_config.json +512 -0
  50. mindspore/context.py +201 -23
  51. mindspore/dataset/__init__.py +6 -6
  52. mindspore/dataset/audio/__init__.py +7 -7
  53. mindspore/dataset/audio/transforms.py +670 -30
  54. mindspore/dataset/audio/utils.py +47 -4
  55. mindspore/dataset/audio/validators.py +223 -1
  56. mindspore/dataset/callback/ds_callback.py +2 -2
  57. mindspore/dataset/core/config.py +210 -14
  58. mindspore/dataset/core/validator_helpers.py +2 -2
  59. mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
  60. mindspore/dataset/debug/debug_hook.py +65 -0
  61. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  62. mindspore/dataset/engine/__init__.py +7 -3
  63. mindspore/dataset/engine/cache_client.py +1 -1
  64. mindspore/dataset/engine/datasets.py +322 -66
  65. mindspore/dataset/engine/datasets_audio.py +80 -76
  66. mindspore/dataset/engine/datasets_standard_format.py +51 -38
  67. mindspore/dataset/engine/datasets_text.py +232 -118
  68. mindspore/dataset/engine/datasets_user_defined.py +41 -17
  69. mindspore/dataset/engine/datasets_vision.py +746 -225
  70. mindspore/dataset/engine/graphdata.py +75 -10
  71. mindspore/dataset/engine/iterators.py +45 -5
  72. mindspore/dataset/engine/offload.py +48 -28
  73. mindspore/dataset/engine/validators.py +117 -8
  74. mindspore/dataset/text/__init__.py +6 -5
  75. mindspore/dataset/text/transforms.py +86 -3
  76. mindspore/dataset/text/utils.py +6 -4
  77. mindspore/dataset/text/validators.py +25 -0
  78. mindspore/dataset/transforms/__init__.py +3 -2
  79. mindspore/dataset/transforms/c_transforms.py +1 -1
  80. mindspore/dataset/transforms/transforms.py +2 -2
  81. mindspore/dataset/utils/__init__.py +2 -1
  82. mindspore/dataset/utils/line_reader.py +121 -0
  83. mindspore/dataset/vision/__init__.py +2 -3
  84. mindspore/dataset/vision/c_transforms.py +9 -9
  85. mindspore/dataset/vision/py_transforms.py +5 -5
  86. mindspore/dataset/vision/py_transforms_util.py +2 -0
  87. mindspore/dataset/vision/transforms.py +160 -161
  88. mindspore/dataset/vision/utils.py +3 -3
  89. mindspore/experimental/map_parameter.py +38 -26
  90. mindspore/include/OWNERS +0 -1
  91. mindspore/include/api/callback/callback.h +9 -13
  92. mindspore/include/api/callback/ckpt_saver.h +2 -2
  93. mindspore/include/api/callback/loss_monitor.h +2 -2
  94. mindspore/include/api/callback/lr_scheduler.h +5 -5
  95. mindspore/include/api/callback/time_monitor.h +2 -2
  96. mindspore/include/api/callback/train_accuracy.h +4 -6
  97. mindspore/include/api/cfg.h +19 -6
  98. mindspore/include/api/context.h +44 -9
  99. mindspore/include/api/delegate.h +1 -1
  100. mindspore/include/api/metrics/accuracy.h +2 -2
  101. mindspore/include/api/metrics/metrics.h +4 -3
  102. mindspore/include/api/model.h +9 -4
  103. mindspore/include/api/model_parallel_runner.h +2 -2
  104. mindspore/include/api/net.h +12 -11
  105. mindspore/include/api/serialization.h +19 -3
  106. mindspore/include/api/types.h +3 -3
  107. mindspore/include/dataset/constants.h +7 -0
  108. mindspore/include/dataset/text.h +59 -0
  109. mindspore/include/mindapi/base/type_id.h +1 -0
  110. mindspore/lib/libdnnl.so.2 +0 -0
  111. mindspore/lib/libicudata.so.69 +0 -0
  112. mindspore/lib/libicui18n.so.69 +0 -0
  113. mindspore/lib/libicuuc.so.69 +0 -0
  114. mindspore/lib/libmindspore.so +0 -0
  115. mindspore/lib/libmindspore_backend.so +0 -0
  116. mindspore/lib/libmindspore_common.so +0 -0
  117. mindspore/lib/libmindspore_core.so +0 -0
  118. mindspore/lib/libmindspore_glog.so.0 +0 -0
  119. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  120. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  121. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  122. mindspore/lib/libmindspore_shared_lib.so +0 -0
  123. mindspore/lib/libmpi_adapter.so +0 -0
  124. mindspore/lib/libmpi_collective.so +0 -0
  125. mindspore/lib/libnnacl.so +0 -0
  126. mindspore/lib/libopencv_core.so.4.5 +0 -0
  127. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  128. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  129. mindspore/lib/libps_cache.so +0 -0
  130. mindspore/lib/plugin/ascend/libakg.so +0 -0
  131. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  132. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  133. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  134. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  135. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  136. mindspore/lib/plugin/cpu/libakg.so +0 -0
  137. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  138. mindspore/lib/plugin/{libmindspore_ascend.so → libmindspore_ascend.so.2} +0 -0
  139. mindspore/log.py +1 -1
  140. mindspore/mindrecord/filereader.py +18 -0
  141. mindspore/mindrecord/filewriter.py +197 -34
  142. mindspore/mindrecord/shardreader.py +9 -0
  143. mindspore/mindrecord/shardwriter.py +1 -1
  144. mindspore/mindrecord/tools/cifar100_to_mr.py +3 -3
  145. mindspore/mindrecord/tools/cifar10_to_mr.py +3 -3
  146. mindspore/mindrecord/tools/csv_to_mr.py +3 -3
  147. mindspore/mindrecord/tools/imagenet_to_mr.py +16 -11
  148. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  149. mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
  150. mindspore/nn/__init__.py +0 -4
  151. mindspore/nn/cell.py +204 -132
  152. mindspore/nn/dynamic_lr.py +1 -1
  153. mindspore/nn/grad/cell_grad.py +7 -6
  154. mindspore/nn/layer/__init__.py +5 -4
  155. mindspore/nn/layer/activation.py +40 -89
  156. mindspore/nn/layer/basic.py +255 -624
  157. mindspore/nn/layer/channel_shuffle.py +7 -6
  158. mindspore/nn/layer/combined.py +1 -1
  159. mindspore/nn/layer/container.py +41 -4
  160. mindspore/nn/layer/conv.py +64 -28
  161. mindspore/nn/layer/dense.py +9 -8
  162. mindspore/nn/layer/embedding.py +27 -25
  163. mindspore/nn/layer/image.py +53 -46
  164. mindspore/nn/layer/math.py +97 -105
  165. mindspore/nn/layer/normalization.py +117 -86
  166. mindspore/nn/layer/padding.py +185 -95
  167. mindspore/nn/layer/pooling.py +817 -414
  168. mindspore/nn/layer/rnn_cells.py +10 -15
  169. mindspore/nn/layer/rnns.py +37 -38
  170. mindspore/nn/layer/thor_layer.py +11 -12
  171. mindspore/nn/layer/timedistributed.py +5 -5
  172. mindspore/nn/layer/transformer.py +701 -0
  173. mindspore/nn/learning_rate_schedule.py +8 -8
  174. mindspore/nn/loss/__init__.py +5 -4
  175. mindspore/nn/loss/loss.py +334 -199
  176. mindspore/nn/optim/ada_grad.py +6 -6
  177. mindspore/nn/optim/adadelta.py +2 -3
  178. mindspore/nn/optim/adafactor.py +4 -5
  179. mindspore/nn/optim/adam.py +126 -62
  180. mindspore/nn/optim/adamax.py +3 -4
  181. mindspore/nn/optim/adasum.py +6 -6
  182. mindspore/nn/optim/asgd.py +2 -2
  183. mindspore/nn/optim/ftrl.py +67 -38
  184. mindspore/nn/optim/lamb.py +4 -5
  185. mindspore/nn/optim/lars.py +2 -2
  186. mindspore/nn/optim/lazyadam.py +43 -4
  187. mindspore/nn/optim/momentum.py +6 -5
  188. mindspore/nn/optim/optimizer.py +3 -1
  189. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  190. mindspore/nn/optim/rmsprop.py +1 -1
  191. mindspore/nn/optim/rprop.py +8 -9
  192. mindspore/nn/optim/sgd.py +19 -13
  193. mindspore/nn/optim/thor.py +10 -15
  194. mindspore/nn/probability/__init__.py +0 -2
  195. mindspore/nn/probability/bijector/bijector.py +4 -4
  196. mindspore/nn/probability/bijector/invert.py +1 -1
  197. mindspore/nn/probability/bijector/softplus.py +2 -2
  198. mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
  199. mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
  200. mindspore/nn/probability/distribution/_utils/utils.py +9 -15
  201. mindspore/nn/probability/distribution/bernoulli.py +3 -3
  202. mindspore/nn/probability/distribution/beta.py +1 -1
  203. mindspore/nn/probability/distribution/categorical.py +5 -7
  204. mindspore/nn/probability/distribution/cauchy.py +3 -3
  205. mindspore/nn/probability/distribution/distribution.py +2 -2
  206. mindspore/nn/probability/distribution/exponential.py +2 -2
  207. mindspore/nn/probability/distribution/gamma.py +3 -3
  208. mindspore/nn/probability/distribution/geometric.py +1 -1
  209. mindspore/nn/probability/distribution/gumbel.py +3 -3
  210. mindspore/nn/probability/distribution/half_normal.py +15 -11
  211. mindspore/nn/probability/distribution/laplace.py +16 -13
  212. mindspore/nn/probability/distribution/logistic.py +2 -2
  213. mindspore/nn/probability/distribution/normal.py +1 -1
  214. mindspore/nn/probability/distribution/poisson.py +1 -1
  215. mindspore/nn/probability/distribution/student_t.py +20 -15
  216. mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
  217. mindspore/nn/probability/distribution/uniform.py +2 -2
  218. mindspore/nn/reinforcement/_tensors_queue.py +3 -3
  219. mindspore/nn/reinforcement/tensor_array.py +2 -2
  220. mindspore/nn/sparse/sparse.py +2 -2
  221. mindspore/nn/wrap/cell_wrapper.py +27 -10
  222. mindspore/nn/wrap/grad_reducer.py +2 -2
  223. mindspore/nn/wrap/loss_scale.py +40 -24
  224. mindspore/numpy/array_creations.py +33 -22
  225. mindspore/numpy/array_ops.py +35 -30
  226. mindspore/numpy/logic_ops.py +6 -27
  227. mindspore/numpy/math_ops.py +22 -19
  228. mindspore/numpy/utils.py +1 -1
  229. mindspore/numpy/utils_const.py +108 -58
  230. mindspore/ops/_constants.py +0 -6
  231. mindspore/ops/_grad/__init__.py +2 -1
  232. mindspore/ops/_grad/grad_array_ops.py +86 -117
  233. mindspore/ops/_grad/grad_base.py +23 -1
  234. mindspore/ops/_grad/grad_clip_ops.py +2 -3
  235. mindspore/ops/_grad/grad_comm_ops.py +34 -24
  236. mindspore/ops/_grad/grad_implementations.py +9 -45
  237. mindspore/ops/_grad/grad_inner_ops.py +47 -4
  238. mindspore/ops/_grad/grad_math_ops.py +142 -117
  239. mindspore/ops/_grad/grad_nn_ops.py +71 -165
  240. mindspore/ops/_grad/grad_sequence_ops.py +296 -0
  241. mindspore/ops/_grad/grad_sparse.py +7 -6
  242. mindspore/ops/_grad_experimental/__init__.py +1 -0
  243. mindspore/ops/_grad_experimental/grad_array_ops.py +150 -15
  244. mindspore/ops/_grad_experimental/grad_image_ops.py +16 -7
  245. mindspore/ops/_grad_experimental/grad_inner_ops.py +1 -22
  246. mindspore/ops/_grad_experimental/grad_linalg_ops.py +4 -11
  247. mindspore/ops/_grad_experimental/grad_math_ops.py +210 -89
  248. mindspore/ops/_grad_experimental/grad_nn_ops.py +26 -22
  249. mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
  250. mindspore/ops/_grad_experimental/grad_sparse_ops.py +49 -8
  251. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
  252. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +2 -2
  253. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
  254. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
  255. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +4 -4
  256. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
  257. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
  258. mindspore/ops/_op_impl/_custom_op/correction_mul.py +2 -2
  259. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
  260. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -5
  261. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
  262. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
  263. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
  264. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
  265. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
  266. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
  267. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
  268. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
  269. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
  270. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
  271. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
  272. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
  273. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
  274. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
  275. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
  276. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
  277. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
  278. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
  279. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -4
  280. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
  281. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
  282. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
  283. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
  284. mindspore/ops/_op_impl/aicpu/__init__.py +236 -4
  285. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  286. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_v1.py → adaptive_avg_pool_2d.py} +6 -5
  287. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  288. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  289. mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
  290. mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
  291. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  292. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -43
  293. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  294. mindspore/{compression/common/__init__.py → ops/_op_impl/aicpu/bessel_i0.py} +15 -8
  295. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  296. mindspore/ops/_op_impl/aicpu/conj.py +11 -0
  297. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +0 -3
  298. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  299. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  300. mindspore/ops/_op_impl/aicpu/{adaptive_avg_pool_2d_grad_v1.py → digamma.py} +7 -9
  301. mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
  302. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  303. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  304. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
  305. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  306. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  307. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  308. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  309. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  310. mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/lgamma.py} +16 -10
  311. mindspore/ops/_op_impl/aicpu/mirror_pad.py +0 -4
  312. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
  313. mindspore/ops/_op_impl/aicpu/mul.py +3 -1
  314. mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
  315. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  316. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  317. mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
  318. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  319. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  320. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  321. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  322. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  323. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  324. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
  325. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
  326. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  327. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  328. mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
  329. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
  330. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  331. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  332. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  333. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  334. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  335. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
  336. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  337. mindspore/ops/_op_impl/aicpu/sparse_slice.py +4 -0
  338. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +6 -0
  339. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  340. mindspore/ops/_op_impl/aicpu/trans_data.py +1 -0
  341. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  342. mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
  343. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
  344. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
  345. mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
  346. mindspore/ops/_op_impl/cpu/sparse_slice.py +4 -0
  347. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +6 -0
  348. mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
  349. mindspore/ops/_op_impl/tbe/__init__.py +27 -611
  350. mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
  351. mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
  352. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
  353. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +1 -0
  354. mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
  355. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
  356. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
  357. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
  358. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
  359. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
  360. mindspore/ops/_op_impl/tbe/cast.py +0 -2
  361. mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
  362. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
  363. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +2 -2
  364. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
  365. mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
  366. mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
  367. mindspore/ops/_op_impl/tbe/matmul_ds.py +2 -0
  368. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
  369. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
  370. mindspore/ops/_op_impl/tbe/scatter_mul.py +2 -0
  371. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -2
  372. mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
  373. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
  374. mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
  375. mindspore/ops/_register_for_op.py +1 -0
  376. mindspore/ops/_utils/__init__.py +1 -2
  377. mindspore/ops/_utils/utils.py +19 -40
  378. mindspore/ops/_vmap/vmap_array_ops.py +116 -38
  379. mindspore/ops/_vmap/vmap_base.py +16 -9
  380. mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
  381. mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
  382. mindspore/ops/_vmap/vmap_grad_nn_ops.py +7 -5
  383. mindspore/ops/_vmap/vmap_image_ops.py +12 -5
  384. mindspore/ops/_vmap/vmap_math_ops.py +46 -5
  385. mindspore/ops/_vmap/vmap_nn_ops.py +15 -21
  386. mindspore/ops/_vmap/vmap_random_ops.py +1 -1
  387. mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
  388. mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
  389. mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
  390. mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
  391. mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
  392. mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
  393. mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
  394. mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
  395. mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +220 -106
  396. mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
  397. mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
  398. mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
  399. mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
  400. mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
  401. mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
  402. mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
  403. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
  404. mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
  405. mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
  406. mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -23
  407. mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -17
  408. mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
  409. mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
  410. mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
  411. mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
  412. mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
  413. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  414. mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +39 -41
  415. mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
  416. mindspore/ops/bprop_mindir/Flatten_bprop.mindir +41 -43
  417. mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +51 -57
  418. mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
  419. mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
  420. mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
  421. mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
  422. mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
  423. mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
  424. mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
  425. mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
  426. mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
  427. mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
  428. mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
  429. mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
  430. mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
  431. mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
  432. mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
  433. mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
  434. mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
  435. mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
  436. mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
  437. mindspore/ops/bprop_mindir/OneHot_bprop.mindir +24 -25
  438. mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
  439. mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
  440. mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
  441. mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
  442. mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
  443. mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
  444. mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
  445. mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +18 -19
  446. mindspore/ops/bprop_mindir/Reshape_bprop.mindir +53 -53
  447. mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
  448. mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +77 -85
  449. mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
  450. mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
  451. mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
  452. mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
  453. mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
  454. mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  455. mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
  456. mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
  457. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  458. mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +37 -39
  459. mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +70 -72
  460. mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
  461. mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
  462. mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
  463. mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
  464. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +17 -17
  465. mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
  466. mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
  467. mindspore/ops/bprop_mindir/generate_mindir.py +2 -0
  468. mindspore/ops/composite/__init__.py +7 -8
  469. mindspore/ops/composite/base.py +101 -47
  470. mindspore/ops/composite/math_ops.py +188 -158
  471. mindspore/ops/composite/multitype_ops/_compile_utils.py +415 -170
  472. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +142 -87
  473. mindspore/ops/composite/multitype_ops/add_impl.py +6 -1
  474. mindspore/ops/composite/multitype_ops/div_impl.py +2 -3
  475. mindspore/ops/composite/multitype_ops/getitem_impl.py +31 -3
  476. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
  477. mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
  478. mindspore/ops/composite/multitype_ops/in_impl.py +9 -0
  479. mindspore/ops/composite/multitype_ops/less_equal_impl.py +31 -0
  480. mindspore/ops/composite/multitype_ops/less_impl.py +31 -0
  481. mindspore/ops/composite/multitype_ops/mul_impl.py +21 -5
  482. mindspore/ops/composite/multitype_ops/not_in_impl.py +9 -0
  483. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
  484. mindspore/ops/composite/multitype_ops/setitem_impl.py +21 -3
  485. mindspore/ops/composite/multitype_ops/sub_impl.py +1 -1
  486. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +35 -4
  487. mindspore/ops/function/__init__.py +152 -8
  488. mindspore/ops/function/array_func.py +2555 -674
  489. mindspore/ops/function/clip_func.py +209 -13
  490. mindspore/ops/function/debug_func.py +2 -2
  491. mindspore/ops/function/grad/__init__.py +2 -1
  492. mindspore/ops/function/grad/grad_func.py +147 -62
  493. mindspore/ops/function/image_func.py +54 -38
  494. mindspore/ops/function/linalg_func.py +167 -16
  495. mindspore/ops/function/math_func.py +4849 -1492
  496. mindspore/ops/function/nn_func.py +2573 -988
  497. mindspore/ops/function/other_func.py +115 -0
  498. mindspore/ops/function/parameter_func.py +3 -3
  499. mindspore/ops/function/random_func.py +790 -73
  500. mindspore/ops/function/sparse_func.py +98 -78
  501. mindspore/ops/function/sparse_unary_func.py +54 -53
  502. mindspore/ops/function/spectral_func.py +27 -24
  503. mindspore/ops/function/vmap_func.py +22 -2
  504. mindspore/ops/functional.py +97 -37
  505. mindspore/ops/op_info_register.py +70 -28
  506. mindspore/ops/operations/__init__.py +47 -14
  507. mindspore/ops/operations/_csr_ops.py +7 -7
  508. mindspore/ops/operations/_embedding_cache_ops.py +5 -5
  509. mindspore/ops/operations/_grad_ops.py +276 -187
  510. mindspore/ops/operations/_inner_ops.py +319 -113
  511. mindspore/ops/operations/_ms_kernel.py +10 -8
  512. mindspore/ops/operations/_ocr_ops.py +9 -9
  513. mindspore/ops/operations/_opaque_predicate_registry.py +4 -0
  514. mindspore/ops/operations/_quant_ops.py +137 -102
  515. mindspore/ops/operations/_rl_inner_ops.py +121 -60
  516. mindspore/ops/operations/_scalar_ops.py +466 -0
  517. mindspore/ops/operations/_sequence_ops.py +1004 -2
  518. mindspore/ops/operations/_tensor_array.py +10 -11
  519. mindspore/ops/operations/_thor_ops.py +1 -1
  520. mindspore/ops/operations/array_ops.py +801 -466
  521. mindspore/ops/operations/comm_ops.py +51 -49
  522. mindspore/ops/operations/control_ops.py +2 -2
  523. mindspore/ops/operations/custom_ops.py +123 -44
  524. mindspore/ops/operations/debug_ops.py +24 -24
  525. mindspore/ops/operations/image_ops.py +240 -153
  526. mindspore/ops/operations/inner_ops.py +34 -50
  527. mindspore/ops/operations/linalg_ops.py +31 -9
  528. mindspore/ops/operations/math_ops.py +988 -757
  529. mindspore/ops/operations/nn_ops.py +965 -819
  530. mindspore/ops/operations/other_ops.py +51 -40
  531. mindspore/ops/operations/random_ops.py +204 -122
  532. mindspore/ops/operations/rl_ops.py +8 -9
  533. mindspore/ops/operations/sparse_ops.py +254 -93
  534. mindspore/ops/operations/spectral_ops.py +35 -3
  535. mindspore/ops/primitive.py +111 -9
  536. mindspore/parallel/_auto_parallel_context.py +189 -83
  537. mindspore/parallel/_offload_context.py +185 -0
  538. mindspore/parallel/_parallel_serialization.py +99 -7
  539. mindspore/parallel/_ps_context.py +9 -5
  540. mindspore/parallel/_recovery_context.py +1 -1
  541. mindspore/parallel/_tensor.py +7 -1
  542. mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
  543. mindspore/{nn/transformer → parallel/_transformer}/layers.py +6 -37
  544. mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
  545. mindspore/{nn/transformer → parallel/_transformer}/moe.py +20 -16
  546. mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
  547. mindspore/{nn/transformer → parallel/_transformer}/transformer.py +48 -111
  548. mindspore/parallel/_utils.py +1 -2
  549. mindspore/parallel/algo_parameter_config.py +1 -1
  550. mindspore/parallel/checkpoint_transform.py +37 -34
  551. mindspore/parallel/shard.py +17 -18
  552. mindspore/profiler/common/validator/validate_path.py +2 -2
  553. mindspore/profiler/envprofiling.py +69 -47
  554. mindspore/profiler/parser/ascend_timeline_generator.py +49 -42
  555. mindspore/profiler/parser/base_timeline_generator.py +49 -56
  556. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +98 -78
  557. mindspore/profiler/parser/hwts_log_parser.py +1 -1
  558. mindspore/profiler/parser/integrator.py +15 -14
  559. mindspore/profiler/parser/minddata_analyzer.py +2 -2
  560. mindspore/profiler/parser/msadvisor_analyzer.py +12 -25
  561. mindspore/profiler/parser/msadvisor_parser.py +2 -4
  562. mindspore/profiler/parser/optime_parser.py +17 -18
  563. mindspore/profiler/parser/profiler_info.py +2 -1
  564. mindspore/profiler/profiling.py +218 -186
  565. mindspore/rewrite/__init__.py +3 -1
  566. mindspore/rewrite/api/node.py +1 -114
  567. mindspore/rewrite/api/node_type.py +3 -0
  568. mindspore/rewrite/api/pattern_engine.py +31 -1
  569. mindspore/rewrite/api/scoped_value.py +4 -4
  570. mindspore/rewrite/api/symbol_tree.py +3 -78
  571. mindspore/rewrite/api/tree_node_helper.py +1 -1
  572. mindspore/rewrite/ast_creator_register.py +1 -0
  573. mindspore/rewrite/ast_helpers/__init__.py +2 -2
  574. mindspore/rewrite/ast_helpers/ast_creator.py +1 -2
  575. mindspore/rewrite/ast_helpers/ast_finder.py +65 -0
  576. mindspore/rewrite/ast_helpers/ast_modifier.py +11 -3
  577. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +18 -2
  578. mindspore/rewrite/namespace.py +0 -2
  579. mindspore/rewrite/node.py +157 -11
  580. mindspore/rewrite/parsers/assign_parser.py +231 -53
  581. mindspore/rewrite/parsers/class_def_parser.py +187 -109
  582. mindspore/rewrite/parsers/for_parser.py +24 -14
  583. mindspore/rewrite/parsers/function_def_parser.py +21 -4
  584. mindspore/rewrite/parsers/if_parser.py +6 -2
  585. mindspore/rewrite/sparsify/__init__.py +0 -0
  586. mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
  587. mindspore/rewrite/sparsify/sparsify.py +109 -0
  588. mindspore/rewrite/sparsify/utils.py +173 -0
  589. mindspore/rewrite/symbol_tree.py +256 -133
  590. mindspore/rewrite/symbol_tree_builder.py +38 -1
  591. mindspore/run_check/_check_version.py +69 -63
  592. mindspore/run_check/run_check.py +2 -1
  593. mindspore/scipy/linalg.py +10 -114
  594. mindspore/scipy/ops.py +2 -2
  595. mindspore/scipy/ops_wrapper.py +1 -1
  596. mindspore/scipy/optimize/_bfgs.py +1 -1
  597. mindspore/scipy/optimize/_lagrange.py +200 -0
  598. mindspore/scipy/optimize/line_search.py +3 -2
  599. mindspore/scipy/optimize/minimize.py +41 -2
  600. mindspore/scipy/sparse/__init__.py +2 -2
  601. mindspore/scipy/sparse/linalg.py +4 -464
  602. mindspore/scipy/utils.py +1 -1
  603. mindspore/scipy/utils_const.py +7 -1
  604. mindspore/train/__init__.py +1 -1
  605. mindspore/train/_utils.py +28 -5
  606. mindspore/train/amp.py +273 -102
  607. mindspore/train/callback/_backup_and_restore.py +5 -5
  608. mindspore/train/callback/_callback.py +2 -2
  609. mindspore/train/callback/_checkpoint.py +3 -3
  610. mindspore/train/callback/_early_stop.py +3 -3
  611. mindspore/train/callback/_lambda_callback.py +2 -2
  612. mindspore/train/callback/_landscape.py +29 -31
  613. mindspore/train/callback/_loss_monitor.py +3 -3
  614. mindspore/train/callback/_on_request_exit.py +3 -3
  615. mindspore/train/callback/_reduce_lr_on_plateau.py +4 -4
  616. mindspore/train/callback/_summary_collector.py +23 -16
  617. mindspore/train/callback/_time_monitor.py +3 -3
  618. mindspore/train/checkpoint_pb2.py +68 -8
  619. mindspore/train/data_sink.py +15 -3
  620. mindspore/train/dataset_helper.py +10 -15
  621. mindspore/train/loss_scale_manager.py +8 -11
  622. mindspore/train/metrics/__init__.py +1 -1
  623. mindspore/train/metrics/bleu_score.py +1 -1
  624. mindspore/train/metrics/confusion_matrix.py +1 -1
  625. mindspore/train/metrics/cosine_similarity.py +1 -1
  626. mindspore/train/metrics/dice.py +2 -2
  627. mindspore/train/metrics/fbeta.py +1 -1
  628. mindspore/train/metrics/hausdorff_distance.py +4 -3
  629. mindspore/train/metrics/mean_surface_distance.py +2 -2
  630. mindspore/train/metrics/occlusion_sensitivity.py +1 -1
  631. mindspore/train/metrics/perplexity.py +1 -1
  632. mindspore/train/metrics/precision.py +1 -1
  633. mindspore/train/metrics/recall.py +1 -1
  634. mindspore/train/metrics/roc.py +2 -2
  635. mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
  636. mindspore/train/mind_ir_pb2.py +116 -37
  637. mindspore/train/model.py +45 -28
  638. mindspore/train/serialization.py +295 -188
  639. mindspore/train/summary/_summary_adapter.py +1 -1
  640. mindspore/train/summary/summary_record.py +43 -13
  641. mindspore/train/train_thor/convert_utils.py +2 -2
  642. mindspore/train/train_thor/dataset_helper.py +3 -3
  643. mindspore/version.py +1 -1
  644. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +3 -2
  645. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +648 -574
  646. mindspore/compression/__init__.py +0 -19
  647. mindspore/compression/common/constant.py +0 -124
  648. mindspore/compression/export/__init__.py +0 -19
  649. mindspore/compression/export/quant_export.py +0 -515
  650. mindspore/compression/quant/__init__.py +0 -28
  651. mindspore/compression/quant/qat.py +0 -634
  652. mindspore/compression/quant/quant_utils.py +0 -462
  653. mindspore/compression/quant/quantizer.py +0 -68
  654. mindspore/nn/layer/quant.py +0 -1868
  655. mindspore/nn/layer/rnn_utils.py +0 -90
  656. mindspore/nn/probability/dpn/__init__.py +0 -22
  657. mindspore/nn/probability/dpn/vae/__init__.py +0 -25
  658. mindspore/nn/probability/dpn/vae/cvae.py +0 -140
  659. mindspore/nn/probability/dpn/vae/vae.py +0 -124
  660. mindspore/nn/probability/infer/__init__.py +0 -22
  661. mindspore/nn/probability/infer/variational/elbo.py +0 -70
  662. mindspore/nn/probability/infer/variational/svi.py +0 -84
  663. mindspore/nn/probability/toolbox/__init__.py +0 -22
  664. mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
  665. mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -364
  666. mindspore/nn/probability/transforms/__init__.py +0 -22
  667. mindspore/nn/probability/transforms/transform_bnn.py +0 -262
  668. mindspore/nn/probability/zhusuan/__init__.py +0 -18
  669. mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
  670. mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
  671. mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
  672. mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
  673. mindspore/ops/_op_impl/aicpu/parallel_concat.py +0 -42
  674. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  675. mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -19
  676. mindspore/ops/bprop_mindir/Cast_bprop.mindir +0 -19
  677. mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -19
  678. mindspore/ops/bprop_mindir/MatMul_bprop.mindir +0 -0
  679. mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -17
  680. mindspore/ops/bprop_mindir/Transpose_bprop.mindir +0 -0
  681. mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -15
  682. mindspore/ops/composite/array_ops.py +0 -241
  683. mindspore/ops/composite/clip_ops.py +0 -134
  684. mindspore/ops/composite/random_ops.py +0 -426
  685. mindspore/ops/composite/vmap_ops.py +0 -38
  686. mindspore/parallel/nn/__init__.py +0 -42
  687. mindspore/parallel/nn/loss.py +0 -22
  688. mindspore/parallel/nn/moe.py +0 -21
  689. mindspore/parallel/nn/op_parallel_config.py +0 -22
  690. mindspore/parallel/nn/transformer.py +0 -31
  691. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
  692. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
  693. {mindspore-2.0.0a0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2020 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -19,8 +19,7 @@ from __future__ import absolute_import
19
19
  from __future__ import division
20
20
 
21
21
  from mindspore.common import Tensor
22
- from mindspore._checkparam import Validator as validator
23
- from mindspore._checkparam import Rel
22
+ from mindspore import _checkparam as validator
24
23
  from mindspore.communication.management import get_rank, get_group_size, GlobalComm, _get_group, _host_distribute
25
24
  from mindspore.common import dtype as mstype
26
25
  from mindspore.ops.primitive import PrimitiveWithInfer, PrimitiveWithCheck, Primitive, prim_attr_register
@@ -54,11 +53,11 @@ class ReduceOp:
54
53
 
55
54
  For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
56
55
  Please see the `Ascend tutorial
57
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_ascend.html#preparations>`_
56
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_ascend.html#preparations>`_
58
57
  for more details.
59
58
 
60
59
  For the GPU devices, users need to prepare the host file and mpi, please see the `GPU tutorial
61
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_gpu.html#preparation>`_ .
60
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_gpu.html#preparation>`_ .
62
61
 
63
62
  This example should be run with multiple devices.
64
63
 
@@ -114,11 +113,10 @@ class AllReduce(Primitive):
114
113
  Reduces the tensor data across all devices in such a way that all devices will get the same final result.
115
114
 
116
115
  Note:
117
- The operation of AllReduce does not support "prod" currently.
118
116
  The tensors must have the same shape and format in all processes of the collection.
119
117
 
120
118
  Args:
121
- op (str): Specifies an operation used for element-wise reductions, like sum, max, and min.
119
+ op (str): Specifies an operation used for element-wise reductions, like sum, prod, max, and min.
122
120
  On the CPU, only 'sum' is supported. Default: ReduceOp.SUM.
123
121
  group (str): The communication group to work on. Default: "GlobalComm.WORLD_COMM_GROUP".
124
122
 
@@ -132,7 +130,6 @@ class AllReduce(Primitive):
132
130
  Raises:
133
131
  TypeError: If any of `op` and `group` is not a str,
134
132
  or fusion is not an integer, or the input's dtype is bool.
135
- ValueError: If the `op` is "prod".
136
133
 
137
134
  Supported Platforms:
138
135
  ``Ascend`` ``GPU`` ``CPU``
@@ -143,11 +140,11 @@ class AllReduce(Primitive):
143
140
 
144
141
  For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
145
142
  Please see the `Ascend tutorial
146
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_ascend.html#preparations>`_
143
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_ascend.html#preparations>`_
147
144
  for more details.
148
145
 
149
146
  For the GPU devices, users need to prepare the host file and mpi, please see the `GPU tutorial
150
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_gpu.html#preparation>`_ .
147
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_gpu.html#preparation>`_ .
151
148
 
152
149
  This example should be run with multiple devices.
153
150
 
@@ -196,7 +193,8 @@ class AllGather(PrimitiveWithInfer):
196
193
  Gathers tensors from the specified communication group.
197
194
 
198
195
  Note:
199
- The tensors must have the same shape and format in all processes of the collection.
196
+ - The tensors must have the same shape and format in all processes of the collection.
197
+ - Currently only supports GRAPH_MODE and it should be called in Cell.
200
198
 
201
199
  Args:
202
200
  group (str): The communication group to work on. Default: "GlobalComm.WORLD_COMM_GROUP".
@@ -222,11 +220,11 @@ class AllGather(PrimitiveWithInfer):
222
220
 
223
221
  For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
224
222
  Please see the `Ascend tutorial
225
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_ascend.html#preparations>`_
223
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_ascend.html#preparations>`_
226
224
  for more details.
227
225
 
228
226
  For the GPU devices, users need to prepare the host file and mpi, please see the `GPU tutorial
229
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_gpu.html#preparation>`_ .
227
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_gpu.html#preparation>`_ .
230
228
 
231
229
  This example should be run with 2 devices.
232
230
 
@@ -263,7 +261,7 @@ class AllGather(PrimitiveWithInfer):
263
261
  validator.check_value_type('group', _get_group(group), (str,), self.name)
264
262
  self.rank = get_rank(_get_group(group))
265
263
  self.rank_size = get_group_size(_get_group(group))
266
- validator.check('rank', self.rank, 'rank_size', self.rank_size, Rel.LT, self.name)
264
+ validator.check('rank', self.rank, 'rank_size', self.rank_size, validator.LT, self.name)
267
265
  self.add_prim_attr('rank_size', self.rank_size)
268
266
  self.add_prim_attr('group', _get_group(group))
269
267
  self.add_prim_attr('fusion', 0)
@@ -300,13 +298,14 @@ class _MiniStepAllGather(PrimitiveWithInfer):
300
298
  validator.check_value_type('group', _get_group(group), (str,), self.name)
301
299
  self.rank = get_rank(_get_group(group))
302
300
  self.rank_size = get_group_size(_get_group(group))
303
- validator.check('rank', self.rank, 'rank_size', self.rank_size, Rel.LT, self.name)
301
+ validator.check('rank', self.rank, 'rank_size', self.rank_size, validator.LT, self.name)
304
302
  self.add_prim_attr('rank_size', self.rank_size)
305
303
  self.add_prim_attr('group', _get_group(group))
306
304
  self.add_prim_attr('fusion', 1)
307
305
  self.grad_accumulation_step = grad_accumulation_step
308
306
  self.mean_flag = mean_flag
309
307
  self.add_prim_attr('order_enforce_skip', True)
308
+ self.add_prim_attr('side_effect_backprop_mem', True)
310
309
 
311
310
  def infer_shape(self, x_shape, z_shape):
312
311
  validator.check_positive_int(len(x_shape), "x shape", self.name)
@@ -331,15 +330,17 @@ class _MicroStepAllGather(PrimitiveWithInfer):
331
330
  @prim_attr_register
332
331
  def __init__(self, group=GlobalComm.WORLD_COMM_GROUP, mean_flag=None):
333
332
  validator.check_value_type('group', _get_group(group), (str,), self.name)
334
- self.rank = get_rank(_get_group(group))
335
- self.rank_size = get_group_size(_get_group(group))
336
- validator.check('rank', self.rank, 'rank_size', self.rank_size, Rel.LT, self.name)
337
- self.add_prim_attr('rank_size', self.rank_size)
338
- self.add_prim_attr('group', _get_group(group))
339
- self.add_prim_attr('fusion', 1)
340
- self.add_prim_attr('do_mirror', False)
341
- self.mean_flag = mean_flag
342
- self.add_prim_attr('order_enforce_skip', True)
333
+ self.rank_size = 1
334
+ if group != "":
335
+ self.rank = get_rank(_get_group(group))
336
+ self.rank_size = get_group_size(_get_group(group))
337
+ validator.check('rank', self.rank, 'rank_size', self.rank_size, validator.LT, self.name)
338
+ self.add_prim_attr('rank_size', self.rank_size)
339
+ self.add_prim_attr('group', _get_group(group))
340
+ self.add_prim_attr('fusion', 1)
341
+ self.add_prim_attr('do_mirror', False)
342
+ self.mean_flag = mean_flag
343
+ self.add_prim_attr('order_enforce_skip', True)
343
344
 
344
345
  def infer_shape(self, x_shape, z_shape):
345
346
  validator.check_positive_int(len(x_shape), "x shape", self.name)
@@ -383,9 +384,9 @@ class _HostAllGather(PrimitiveWithInfer):
383
384
  if group is None:
384
385
  raise ValueError(f"For '{self.name}', the 'group' cannot be None, but got {group}.")
385
386
  validator.check_value_type('group', group, (tuple, list), self.name)
386
- validator.check_int(len(group), 2, Rel.GE, "group size", self.name)
387
+ validator.check_int(len(group), 2, validator.GE, "group size", self.name)
387
388
  for r in group:
388
- validator.check_int_range(r, 0, 7, Rel.INC_BOTH, "rank_id", self.name)
389
+ validator.check_int_range(r, 0, 7, validator.INC_BOTH, "rank_id", self.name)
389
390
  validator.check_value_type("rank_id", r, (int,), self.name)
390
391
  self.group_size = len(group)
391
392
  self.add_prim_attr('group', group)
@@ -410,7 +411,7 @@ class ReduceScatter(Primitive):
410
411
  r"""
411
412
  Reduces and scatters tensors from the specified communication group.
412
413
  For more details about it, please refer to `Distributed Set Communication Primitives - ReduceScatter \
413
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/communicate_ops.html#reducescatter>`_ .
414
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/communicate_ops.html#reducescatter>`_ .
414
415
 
415
416
  Note:
416
417
  The tensors must have the same shape and format in all processes of the collection.
@@ -441,11 +442,11 @@ class ReduceScatter(Primitive):
441
442
 
442
443
  For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
443
444
  Please see the `Ascend tutorial
444
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_ascend.html#preparations>`_
445
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_ascend.html#preparations>`_
445
446
  for more details.
446
447
 
447
448
  For the GPU devices, users need to prepare the host file and mpi, please see the `GPU tutorial
448
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_gpu.html#preparation>`_ .
449
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_gpu.html#preparation>`_ .
449
450
 
450
451
  This example should be run with 2 devices.
451
452
 
@@ -522,9 +523,9 @@ class _HostReduceScatter(PrimitiveWithInfer):
522
523
  raise ValueError(f"For '{self.name}', the 'group' cannot be None, but got {group}.")
523
524
  validator.check_value_type('op', op, (type(ReduceOp.SUM),), self.name)
524
525
  validator.check_value_type('group', group, (tuple, list), self.name)
525
- validator.check_int(len(group), 2, Rel.GE, "group size", self.name)
526
+ validator.check_int(len(group), 2, validator.GE, "group size", self.name)
526
527
  for r in group:
527
- validator.check_int_range(r, 0, 7, Rel.INC_BOTH, "rank_id", self.name)
528
+ validator.check_int_range(r, 0, 7, validator.INC_BOTH, "rank_id", self.name)
528
529
  validator.check_value_type("rank_id", r, (int,), self.name)
529
530
  self.op = op
530
531
  self.group_size = len(group)
@@ -563,7 +564,7 @@ class Broadcast(PrimitiveWithInfer):
563
564
  - **input_x** (tuple[Tensor]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
564
565
 
565
566
  Outputs:
566
- Tensor, has the same shape of the input, i.e., :math:`(x_1, x_2, ..., x_R)`.
567
+ tuple[Tensor], Tensor has the same shape of the input, i.e., :math:`(x_1, x_2, ..., x_R)`.
567
568
  The contents depend on the data of the `root_rank` device.
568
569
 
569
570
  Raises:
@@ -578,11 +579,11 @@ class Broadcast(PrimitiveWithInfer):
578
579
 
579
580
  For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
580
581
  Please see the `Ascend tutorial
581
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_ascend.html#preparations>`_
582
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_ascend.html#preparations>`_
582
583
  for more details.
583
584
 
584
585
  For the GPU devices, users need to prepare the host file and mpi, please see the `GPU tutorial
585
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_gpu.html#preparation>`_ .
586
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_gpu.html#preparation>`_ .
586
587
 
587
588
  This example should be run with multiple devices.
588
589
 
@@ -628,7 +629,7 @@ class Broadcast(PrimitiveWithInfer):
628
629
  if not isinstance(x_dtype, tuple):
629
630
  raise TypeError(f"For '{self.name}', the 'input_x' must be a tuple, but got {type(x_dtype).__name__}!")
630
631
  for _ele in x_dtype:
631
- check_collective_target_dtype('x', _ele, self.name)
632
+ check_collective_target_dtype('input_x', _ele, self.name)
632
633
  return x_dtype
633
634
 
634
635
 
@@ -698,11 +699,11 @@ class NeighborExchange(Primitive):
698
699
  The user needs to preset
699
700
  communication environment variables before running the following example, please check the details on the
700
701
  official website of `MindSpore \
701
- <https://www.mindspore.cn/docs/en/r2.0.0-alpha/api_python/mindspore.ops.html#communication-operator>`_.
702
+ <https://www.mindspore.cn/docs/en/r2.0/api_python/mindspore.ops.html#communication-operator>`_.
702
703
 
703
704
  This operator requires a full-mesh network topology, each device has the same vlan id, and the ip & mask are
704
705
  in the same subnet, please check the `details \
705
- <https://www.mindspore.cn/tutorials/experts/zh-CN/r2.0.0-alpha/parallel/communicate_ops.html#注意事项>`_.
706
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/communicate_ops.html#notes>`_.
706
707
 
707
708
  Args:
708
709
  send_rank_ids (list(int)): Ranks which the data is sent to.
@@ -778,7 +779,7 @@ class AlltoAll(PrimitiveWithInfer):
778
779
  Note:
779
780
  This operator requires a full-mesh network topology, each device has the same vlan id, and the ip & mask are
780
781
  in the same subnet, please check the `details \
781
- <https://www.mindspore.cn/tutorials/experts/zh-CN/r2.0.0-alpha/parallel/communicate_ops.html#注意事项>`_.
782
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/communicate_ops.html#notes>`_.
782
783
 
783
784
  Args:
784
785
  split_count (int): On each process, divide blocks into split_count number.
@@ -793,11 +794,9 @@ class AlltoAll(PrimitiveWithInfer):
793
794
  Tensor. If the shape of input tensor is :math:`(x_1, x_2, ..., x_R)`, then the shape of output tensor is
794
795
  :math:`(y_1, y_2, ..., y_R)`, where:
795
796
 
796
- :math:`y_{split\_dim} = x_{split\_dim} / split\_count`
797
-
798
- :math:`y_{concat\_dim} = x_{concat\_dim} * split\_count`
799
-
800
- :math:`y_{other} = x_{other}`.
797
+ - :math:`y_{split\_dim} = x_{split\_dim} / split\_count`
798
+ - :math:`y_{concat\_dim} = x_{concat\_dim} * split\_count`
799
+ - :math:`y_{other} = x_{other}`.
801
800
 
802
801
  Raises:
803
802
  TypeError: If group is not a string.
@@ -811,11 +810,11 @@ class AlltoAll(PrimitiveWithInfer):
811
810
 
812
811
  For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
813
812
  Please see the `Ascend tutorial
814
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_ascend.html#preparations>`_
813
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_ascend.html#preparations>`_
815
814
  for more details.
816
815
 
817
816
  For the GPU devices, users need to prepare the host file and mpi, please see the `GPU tutorial
818
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_gpu.html#preparation>`_ .
817
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_gpu.html#preparation>`_ .
819
818
 
820
819
  This example should be run with 8 devices.
821
820
 
@@ -885,13 +884,13 @@ class NeighborExchangeV2(Primitive):
885
884
  NeighborExchangeV2 sends data from the local rank to ranks in the `send_rank_ids`,
886
885
  as while receive data from `recv_rank_ids`. Please refer to
887
886
  `Distributed Set Communication Primitives - NeighborExchangeV2 \
888
- <https://www.mindspore.cn/tutorials/experts/zh-CN/r2.0.0-alpha/parallel/communicate_ops.html#neighborexchangev2>`_
887
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/communicate_ops.html#neighborexchangev2>`_
889
888
  to learn about how the data is exchanged between neighborhood devices.
890
889
 
891
890
  Note:
892
891
  This operator requires a full-mesh network topology, each device has the same vlan id, and the ip & mask are
893
892
  in the same subnet, please check the `details \
894
- <https://www.mindspore.cn/tutorials/experts/zh-CN/r2.0.0-alpha/parallel/communicate_ops.html#注意事项>`_.
893
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/communicate_ops.html#notes>`_.
895
894
 
896
895
  Args:
897
896
  send_rank_ids (list(int)): Ranks which the data is sent to. 8 rank_ids represents 8 directions, if one
@@ -929,11 +928,11 @@ class NeighborExchangeV2(Primitive):
929
928
 
930
929
  For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
931
930
  Please see the `Ascend tutorial
932
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_ascend.html#preparations>`_
931
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_ascend.html#preparations>`_
933
932
  for more details.
934
933
 
935
934
  For the GPU devices, users need to prepare the host file and mpi, please see the `GPU tutorial
936
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/parallel/train_gpu.html#preparation>`_ .
935
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/parallel/train_gpu.html#preparation>`_ .
937
936
 
938
937
  This example should be run with 2 devices.
939
938
 
@@ -1032,6 +1031,7 @@ class _MirrorMiniStepOperator(PrimitiveWithInfer):
1032
1031
  self.mean_flag = mean_flag
1033
1032
  self.grad_accumulation_step = grad_accumulation_step
1034
1033
  self.add_prim_attr('order_enforce_skip', True)
1034
+ self.add_prim_attr('side_effect_backprop_mem', True)
1035
1035
 
1036
1036
  def infer_shape(self, x_shape, z_shape):
1037
1037
  return x_shape
@@ -1137,6 +1137,7 @@ class _VirtualAssignAdd(PrimitiveWithInfer):
1137
1137
  def __init__(self):
1138
1138
  """Initialize _VirtualAssignAdd."""
1139
1139
  self.add_prim_attr('order_enforce_skip', True)
1140
+ self.add_prim_attr('side_effect_backprop_mem', True)
1140
1141
 
1141
1142
  def infer_shape(self, x_shape, y_shape):
1142
1143
  return x_shape
@@ -1187,6 +1188,7 @@ class _MirrorMicroStepOperator(PrimitiveWithInfer):
1187
1188
  self.dev_num = dev_num
1188
1189
  self.mean_flag = mean_flag
1189
1190
  self.add_prim_attr('order_enforce_skip', True)
1191
+ self.add_prim_attr('side_effect_backprop_mem', True)
1190
1192
 
1191
1193
  def infer_shape(self, x_shape, z_shape):
1192
1194
  return x_shape
@@ -17,7 +17,7 @@
17
17
 
18
18
  from __future__ import absolute_import
19
19
  from mindspore.ops.primitive import PrimitiveWithInfer, prim_attr_register
20
- from mindspore._checkparam import Validator as validator, Rel
20
+ from mindspore import _checkparam as validator
21
21
  from mindspore.common import dtype as mstype
22
22
 
23
23
 
@@ -76,7 +76,7 @@ class GeSwitch(PrimitiveWithInfer):
76
76
  raise NotImplementedError
77
77
 
78
78
  def infer_shape(self, data, pred):
79
- validator.check_int_range(len(pred), 0, 1, Rel.INC_BOTH, "pred rank", self.name)
79
+ validator.check_int_range(len(pred), 0, 1, validator.INC_BOTH, "pred rank", self.name)
80
80
  return data, data
81
81
 
82
82
  def infer_dtype(self, data_type, pred_type):
@@ -26,6 +26,7 @@ import importlib
26
26
  import platform
27
27
  import subprocess
28
28
  import numpy as np
29
+ import mindspore as ms
29
30
  from mindspore._c_expression import Oplib, typing
30
31
  from mindspore import context
31
32
  from mindspore.common import Tensor
@@ -61,6 +62,27 @@ def _get_cache_path():
61
62
  return cache_path
62
63
 
63
64
 
65
+ def _get_cuda_bare_metal_version():
66
+ """
67
+ Automatically get the cuda version.
68
+
69
+ Returns:
70
+ tuple(str), the version of cuda of the platform.ss
71
+ """
72
+ raw_output = subprocess.check_output(["nvcc", "-V"],
73
+ universal_newlines=True)
74
+ output = raw_output.split()
75
+ release_idx = output.index("release") + 1
76
+ release = output[release_idx].split(".")
77
+ version_major = release[0]
78
+ version_idx = release_idx + 1
79
+ version = output[version_idx].split(".")
80
+ version_middle = version[1] if len(version) > 1 else 0
81
+ version_minor = version[2] if len(version) > 2 else 0
82
+
83
+ return int(version_major), int(version_middle), int(version_minor)
84
+
85
+
64
86
  def _compile_aot(file):
65
87
  """
66
88
  Automatically compile the source file for custom aot
@@ -105,18 +127,13 @@ def _compile_aot(file):
105
127
  cmd += ["--use_fast_math", "--expt-relaxed-constexpr"]
106
128
  cmd += ["-D_GLIBCXX_USE_CXX11_ABI=0"]
107
129
 
108
- def _get_cuda_bare_metal_version():
109
- raw_output = subprocess.check_output(["nvcc", "-V"],
110
- universal_newlines=True)
111
- output = raw_output.split()
112
- release_idx = output.index("release") + 1
113
- release = output[release_idx].split(".")
114
- version_major = release[0]
115
-
116
- return int(version_major)
117
-
118
- if _get_cuda_bare_metal_version() >= 11:
130
+ v_major, v_mid, v_minor = _get_cuda_bare_metal_version()
131
+ if v_major >= 11:
119
132
  cmd += ["-gencode", "arch=compute_80,code=sm_80", "--expt-extended-lambda"]
133
+ elif v_major == 10 and not(v_mid >= 1 and v_minor >= 168):
134
+ logger.warning("The current version of nvcc, V{}.{}.{}, might have unfixed issues with std string, "
135
+ "which will lead to errors in aot custom op with attrs."
136
+ "The version higher than V10.1.168 is recommended".format(v_major, v_mid, v_minor))
120
137
  cmd += [include_file, "-o", func_path, file]
121
138
  else:
122
139
  raise ValueError("The source file must be a cc/cpp/cu file, but get: {}".format(file))
@@ -142,10 +159,10 @@ class Custom(ops.PrimitiveWithInfer):
142
159
  function if needed. Then these `Custom` objects can be directly used in neural networks.
143
160
  Detailed description and introduction of user-defined operators, including correct writing of parameters,
144
161
  please refer to `Custom Operators Tutorial
145
- <https://www.mindspore.cn/tutorials/experts/en/r2.0.0-alpha/operation/op_custom.html>`_ .
162
+ <https://www.mindspore.cn/tutorials/experts/en/r2.0/operation/op_custom.html>`_ .
146
163
 
147
164
  .. warning::
148
- This is an experimental prototype that is subject to change.
165
+ This is an experimental API that is subject to change.
149
166
 
150
167
  .. note::
151
168
  The supported platforms are determined by the input `func_type`. The supported platforms are as follows:
@@ -437,6 +454,9 @@ class Custom(ops.PrimitiveWithInfer):
437
454
  registered_func = {}
438
455
  attr_dict = {} # Save input_names and attr_names for func.
439
456
  compiled_bin = [] # Save names for compiled bin.
457
+ tbe_path_checked = [] # Save paths for tbe functions which is safe to be imported as module.
458
+ tbe_path_failed = [] # Save paths for tbe functions which fail to be imported as module.
459
+ op_path_in_cache = [] # Save paths for op functions created in the cached.
440
460
 
441
461
  def __init__(self, func, out_shape=None, out_dtype=None, func_type="hybrid", bprop=None, reg_info=None):
442
462
  ops.PrimitiveWithInfer.__init__(self, "Custom")
@@ -469,15 +489,7 @@ class Custom(ops.PrimitiveWithInfer):
469
489
  self.out_shape = out_shape
470
490
  self.out_dtype = out_dtype
471
491
  self.bprop = bprop
472
- self.fake_output = False
473
- self.single_scalar_output = False
474
- if not self.out_dtype:
475
- self.fake_output = True
476
- elif not self.out_shape:
477
- self.single_scalar_output = True
478
- self.add_prim_attr("fake_output", self.fake_output)
479
- self.add_prim_attr("single_scalar_output", self.single_scalar_output)
480
-
492
+ self._update_op_attr()
481
493
  # Register info
482
494
  self._register_info(reg_info)
483
495
 
@@ -498,6 +510,7 @@ class Custom(ops.PrimitiveWithInfer):
498
510
  self._update_attr()
499
511
 
500
512
  def __infer__(self, *args):
513
+ """Infer function of the custom op"""
501
514
  if callable(self.out_shape):
502
515
  infer_shape = self.out_shape(*(x["shape"] for x in args))
503
516
  else:
@@ -534,7 +547,12 @@ class Custom(ops.PrimitiveWithInfer):
534
547
  logger.warning("{}, 'out_dtype' is an empty tuple. Add a placeholder instead. "
535
548
  "Not recommend to use it as it could be any uninitialized data.".format(self.log_prefix))
536
549
  infer_dtype = mstype.int32
537
-
550
+ if self.func_type == "aot":
551
+ if infer_shape is None:
552
+ logger.warning("{}, 'out_shape' is None. Add a placeholder instead. "
553
+ "A CPP version of infer shape function is required "
554
+ "in this case.".format(self.log_prefix))
555
+ infer_shape = (1,)
538
556
  # after all automatic infer information fulfillment, throw error if infer_shape/infer_dtype is still None
539
557
  if not isinstance(infer_shape, (tuple, list)):
540
558
  raise TypeError("{}, 'out_shape' must be one of [tuple, list, function], but got {}"
@@ -552,8 +570,22 @@ class Custom(ops.PrimitiveWithInfer):
552
570
  return out
553
571
 
554
572
  def get_bprop(self):
573
+ """Get the bprop of the custom op"""
555
574
  return self.bprop
556
575
 
576
+ def _update_op_attr(self):
577
+ """Update the attrs of the custom op"""
578
+ if self.out_shape is None and self.func_type == "aot":
579
+ self.add_prim_attr("cpp_infer_shape", True)
580
+ self.fake_output = False
581
+ self.single_scalar_output = False
582
+ if not self.out_dtype:
583
+ self.fake_output = True
584
+ elif not self.out_shape:
585
+ self.single_scalar_output = True
586
+ self.add_prim_attr("fake_output", self.fake_output)
587
+ self.add_prim_attr("single_scalar_output", self.single_scalar_output)
588
+
557
589
  def _check_julia_func(self):
558
590
  """Check the validity of julia func"""
559
591
  if not isinstance(self.func, str):
@@ -612,6 +644,63 @@ class Custom(ops.PrimitiveWithInfer):
612
644
  raise TypeError("{}, 'func' must be of type function, but got {}"
613
645
  .format(self.log_prefix, type(self.func)))
614
646
 
647
+ def _update_func_imply_path(self):
648
+ """Update op_imply_path of func"""
649
+ file_path = os.path.realpath(inspect.getfile(self.func))
650
+
651
+ if not self.func_type == "tbe":
652
+ # Custom ops with type other than tbe doesn't need to import from the path
653
+ # use the file path directly
654
+ return file_path
655
+ # For the custom op of type tbe, the kernel compiler will import the module from file path.
656
+ # we will try import in the initialization,
657
+ if file_path in Custom.tbe_path_checked:
658
+ logger.info("The file of {} has already been checked good to be imported.".format(self.func_name))
659
+ return file_path
660
+
661
+ if file_path not in Custom.tbe_path_failed:
662
+ # As a single file might include multiply functions
663
+ # we will not try the file path which already failed in previous trials
664
+ mod_spec = importlib.util.spec_from_file_location(
665
+ self.func_name, file_path)
666
+ custom_mod = importlib.util.module_from_spec(mod_spec)
667
+ try:
668
+ mod_spec.loader.exec_module(custom_mod)
669
+ except (ImportError, RecursionError):
670
+ Custom.tbe_path_failed.append(file_path)
671
+ else:
672
+ Custom.tbe_path_checked.append(file_path)
673
+ return file_path
674
+
675
+ # Create a new file for each tbe function
676
+ op_imply_path = os.path.realpath(_get_cache_path() + self.func_name + ".py")
677
+ if op_imply_path in Custom.op_path_in_cache:
678
+ logger.info("The new file of {} has already been created.".format(self.func_name))
679
+ return op_imply_path
680
+
681
+ logger.warning("Fail to import the original source file. Create a new source file for {}. "
682
+ "The new file will not include the dependency for the op function. "
683
+ "Check the definition of the function {} "
684
+ "in the file: {}".format(self.func_name, self.func_name, op_imply_path))
685
+
686
+ Custom.op_path_in_cache.append(op_imply_path)
687
+
688
+ if os.path.exists(op_imply_path):
689
+ try:
690
+ os.remove(op_imply_path)
691
+ except FileNotFoundError:
692
+ logger.warning("Fail to remove the existing file. Check the definition of the function {} "
693
+ "in the file: {}".format(self.func_name, op_imply_path))
694
+
695
+ with open(op_imply_path, 'at') as file:
696
+ if platform.system() != "Windows":
697
+ fcntl.flock(file.fileno(), fcntl.LOCK_EX)
698
+ file.seek(0, 2)
699
+ if file.tell() == 0:
700
+ file.write(self.func_source_str)
701
+ os.chmod(op_imply_path, stat.S_IRUSR | stat.S_IWUSR)
702
+ return op_imply_path
703
+
615
704
  def _update_func_info(self, reg_info):
616
705
  """Update information of func"""
617
706
  if callable(self.func):
@@ -627,19 +716,8 @@ class Custom(ops.PrimitiveWithInfer):
627
716
  if index != -1:
628
717
  self.func_source_str = self.func_source_str[index:]
629
718
 
630
- op_imply_path = os.path.realpath(_get_cache_path() + self.func_name + ".py")
631
- if os.path.exists(op_imply_path):
632
- os.remove(op_imply_path)
633
- with open(op_imply_path, 'at') as file:
634
- if platform.system() != "Windows":
635
- fcntl.flock(file.fileno(), fcntl.LOCK_EX)
636
- file.seek(0, 2)
637
- if file.tell() == 0:
638
- file.write(self.func_source_str)
639
- os.chmod(op_imply_path, stat.S_IRUSR | stat.S_IWUSR)
640
-
641
- # path of func
642
- self.imply_path = op_imply_path
719
+ # update path of func for TBE type of custom op
720
+ self.imply_path = self._update_func_imply_path()
643
721
  if self._is_ms_kernel:
644
722
  # static check for the Hybrid DSL in hybrid
645
723
  root = ast.parse(self.func_source_str)
@@ -670,7 +748,7 @@ class Custom(ops.PrimitiveWithInfer):
670
748
  continue
671
749
  if isinstance(reg_info_item, str):
672
750
  reg_info_item = json.loads(reg_info_item)
673
- prefix = prefix + "_" + reg_info_item.get("op_name", "")
751
+ prefix = "_".join([prefix, reg_info_item.get("op_name", "")])
674
752
  self.uniq_name = prefix + "_" + self.func_name
675
753
  else:
676
754
  raise TypeError("For '{}', 'func' must be of type function or str, but got {}"
@@ -798,16 +876,15 @@ class Custom(ops.PrimitiveWithInfer):
798
876
  if isinstance(item, dict) and item.get("value") is None:
799
877
  reg_info["attr"][i]["value"] = "all"
800
878
  reg_info["async_flag"] = reg_info.get("async_flag", False)
801
- reg_info["binfile_name"] = "%s.so" % self.func_name
879
+ reg_info["binfile"] = "%s.so" % self.func_name
802
880
  reg_info["compute_cost"] = reg_info.get("compute_cost", 10)
803
- reg_info["kernel_name"] = self.func_name
881
+ reg_info["kernel"] = self.func_name
804
882
  reg_info["partial_flag"] = reg_info.get("partial_flag", True)
805
- reg_info["need_check_supported"] = reg_info.get("need_check_supported", False)
883
+ reg_info["needCheckSupport"] = reg_info.get("need_check_supported", False)
806
884
  # Supplement necessary info for AKG if these information is missing in reg_info
807
885
  if reg_info["imply_type"] == "AKG":
808
886
  target_to_processor = {"Ascend": "AiCore", "GPU": "CUDA", "CPU": "CPU"}
809
887
  reg_info["processor"] = reg_info.get("processor", target_to_processor.get(target))
810
-
811
888
  return reg_info
812
889
 
813
890
  def _get_target(self, reg_info):
@@ -841,8 +918,8 @@ class Custom(ops.PrimitiveWithInfer):
841
918
  reg_info["imply_type"].strip():
842
919
  return reg_info["imply_type"]
843
920
  # Infer imply_type from func_type
844
- func_type_to_imply_type = {"hybrid": "AKG", "akg": "AKG", "tbe": "TBE", "aicpu": "AiCPU", "aot": target,
845
- "pyfunc": target, "julia": target}
921
+ func_type_to_imply_type = {"hybrid": "AKG", "akg": "AKG", "tbe": "TBE", "aicpu": "AiCPU", "pyfunc": target,
922
+ "julia": target, "aot": "BiSheng" if target == "Ascend" else target}
846
923
  return func_type_to_imply_type.get(self.func_type, "AKG")
847
924
 
848
925
  def _save_attr(self, reg_info):
@@ -862,9 +939,11 @@ class Custom(ops.PrimitiveWithInfer):
862
939
  for item in tensor_inputs:
863
940
  if isinstance(item, dict) and item.get("name") is not None:
864
941
  input_names.append(item["name"])
942
+ has_input_name = bool(input_names)
865
943
  for item in attr:
866
944
  if isinstance(item, dict) and item.get("name") is not None:
867
- input_names.append(item["name"])
945
+ if has_input_name or context.get_context("mode") != ms.PYNATIVE_MODE:
946
+ input_names.append(item["name"])
868
947
  attr_names.append(item["name"])
869
948
  cur_attr = {"input_names": input_names, "attr_names": attr_names}
870
949
  # If func does not have attr, save current attr.