mindspore 2.2.14__cp39-cp39-win_amd64.whl → 2.4.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1217) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +8 -5
  5. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +124 -25
  9. mindspore/_extends/builtin_operations.py +2 -1
  10. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  11. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  12. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  13. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  14. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  15. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  16. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  17. mindspore/_extends/parse/__init__.py +18 -14
  18. mindspore/_extends/parse/compile_config.py +299 -0
  19. mindspore/_extends/parse/namespace.py +2 -2
  20. mindspore/_extends/parse/parser.py +182 -68
  21. mindspore/_extends/parse/resources.py +45 -14
  22. mindspore/_extends/parse/standard_method.py +192 -252
  23. mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
  24. mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
  25. mindspore/_extends/remote/kernel_build_server.py +2 -0
  26. mindspore/_profiler.py +30 -0
  27. mindspore/amp.py +67 -26
  28. mindspore/atlprov.dll +0 -0
  29. mindspore/avcodec-59.dll +0 -0
  30. mindspore/avdevice-59.dll +0 -0
  31. mindspore/avfilter-8.dll +0 -0
  32. mindspore/avformat-59.dll +0 -0
  33. mindspore/avutil-57.dll +0 -0
  34. mindspore/boost/adasum.py +1 -1
  35. mindspore/boost/base.py +1 -1
  36. mindspore/boost/boost_cell_wrapper.py +2 -2
  37. mindspore/boost/grad_freeze.py +2 -2
  38. mindspore/boost/group_loss_scale_manager.py +1 -1
  39. mindspore/boost/less_batch_normalization.py +9 -6
  40. mindspore/c1.dll +0 -0
  41. mindspore/c1xx.dll +0 -0
  42. mindspore/c2.dll +0 -0
  43. mindspore/common/__init__.py +20 -7
  44. mindspore/common/_jit_fallback_utils.py +2 -3
  45. mindspore/common/_pijit_context.py +190 -0
  46. mindspore/common/_register_for_adapter.py +7 -0
  47. mindspore/common/_register_for_recompute.py +48 -0
  48. mindspore/common/_register_for_tensor.py +10 -10
  49. mindspore/common/_stub_tensor.py +7 -1
  50. mindspore/common/_tensor_overload.py +139 -0
  51. mindspore/common/_utils.py +5 -17
  52. mindspore/common/api.py +449 -129
  53. mindspore/common/auto_dynamic_shape.py +27 -14
  54. mindspore/common/dtype.py +17 -10
  55. mindspore/common/dump.py +8 -11
  56. mindspore/common/file_system.py +48 -0
  57. mindspore/common/generator.py +254 -0
  58. mindspore/common/hook_handle.py +65 -30
  59. mindspore/common/initializer.py +1 -1
  60. mindspore/common/jit_config.py +34 -14
  61. mindspore/common/lazy_inline.py +72 -19
  62. mindspore/common/mindir_util.py +12 -2
  63. mindspore/common/mutable.py +79 -14
  64. mindspore/common/no_inline.py +54 -0
  65. mindspore/common/np_dtype.py +25 -0
  66. mindspore/common/parameter.py +73 -21
  67. mindspore/common/recompute.py +292 -0
  68. mindspore/common/seed.py +9 -9
  69. mindspore/common/sparse_tensor.py +276 -24
  70. mindspore/common/symbol.py +122 -0
  71. mindspore/common/tensor.py +668 -514
  72. mindspore/communication/__init__.py +6 -11
  73. mindspore/communication/_comm_helper.py +43 -3
  74. mindspore/communication/comm_func.py +1395 -0
  75. mindspore/communication/management.py +117 -104
  76. mindspore/config/op_info.config +22 -54
  77. mindspore/context.py +455 -71
  78. mindspore/dataset/__init__.py +5 -5
  79. mindspore/dataset/audio/__init__.py +6 -6
  80. mindspore/dataset/audio/transforms.py +711 -158
  81. mindspore/dataset/callback/ds_callback.py +2 -2
  82. mindspore/dataset/core/config.py +7 -0
  83. mindspore/dataset/core/validator_helpers.py +7 -0
  84. mindspore/dataset/engine/cache_client.py +2 -2
  85. mindspore/dataset/engine/datasets.py +201 -116
  86. mindspore/dataset/engine/datasets_audio.py +14 -14
  87. mindspore/dataset/engine/datasets_standard_format.py +83 -3
  88. mindspore/dataset/engine/datasets_text.py +39 -39
  89. mindspore/dataset/engine/datasets_user_defined.py +230 -141
  90. mindspore/dataset/engine/datasets_vision.py +78 -74
  91. mindspore/dataset/engine/iterators.py +29 -0
  92. mindspore/dataset/engine/obs/util.py +7 -0
  93. mindspore/dataset/engine/offload.py +5 -7
  94. mindspore/dataset/engine/queue.py +138 -66
  95. mindspore/dataset/engine/serializer_deserializer.py +2 -2
  96. mindspore/dataset/engine/validators.py +41 -15
  97. mindspore/dataset/text/__init__.py +2 -5
  98. mindspore/dataset/text/transforms.py +408 -121
  99. mindspore/dataset/text/utils.py +9 -9
  100. mindspore/dataset/transforms/__init__.py +0 -3
  101. mindspore/dataset/transforms/transforms.py +261 -76
  102. mindspore/dataset/utils/browse_dataset.py +9 -9
  103. mindspore/dataset/utils/line_reader.py +2 -0
  104. mindspore/dataset/vision/__init__.py +7 -10
  105. mindspore/dataset/vision/c_transforms.py +10 -10
  106. mindspore/dataset/vision/py_transforms_util.py +1 -1
  107. mindspore/dataset/vision/transforms.py +2844 -549
  108. mindspore/dataset/vision/utils.py +161 -10
  109. mindspore/dataset/vision/validators.py +16 -3
  110. mindspore/dnnl.dll +0 -0
  111. mindspore/dpcmi.dll +0 -0
  112. mindspore/{rewrite/ast_creator_register.py → experimental/es/__init__.py} +5 -20
  113. mindspore/experimental/es/embedding_service.py +883 -0
  114. mindspore/experimental/es/embedding_service_layer.py +581 -0
  115. mindspore/experimental/llm_boost/__init__.py +21 -0
  116. mindspore/experimental/llm_boost/atb/__init__.py +23 -0
  117. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  118. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  119. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  120. mindspore/experimental/llm_boost/register.py +129 -0
  121. mindspore/experimental/llm_boost/utils.py +31 -0
  122. mindspore/experimental/optim/__init__.py +12 -2
  123. mindspore/experimental/optim/adadelta.py +161 -0
  124. mindspore/experimental/optim/adagrad.py +168 -0
  125. mindspore/experimental/optim/adam.py +35 -34
  126. mindspore/experimental/optim/adamax.py +170 -0
  127. mindspore/experimental/optim/adamw.py +124 -15
  128. mindspore/experimental/optim/asgd.py +153 -0
  129. mindspore/experimental/optim/lr_scheduler.py +66 -121
  130. mindspore/experimental/optim/nadam.py +157 -0
  131. mindspore/experimental/optim/optimizer.py +18 -8
  132. mindspore/experimental/optim/radam.py +194 -0
  133. mindspore/experimental/optim/rmsprop.py +154 -0
  134. mindspore/experimental/optim/rprop.py +164 -0
  135. mindspore/experimental/optim/sgd.py +28 -19
  136. mindspore/hal/__init__.py +40 -0
  137. mindspore/hal/_ascend.py +57 -0
  138. mindspore/hal/_base.py +57 -0
  139. mindspore/hal/_cpu.py +56 -0
  140. mindspore/hal/_gpu.py +57 -0
  141. mindspore/hal/contiguous_tensors_handle.py +175 -0
  142. mindspore/hal/device.py +356 -0
  143. mindspore/hal/event.py +179 -0
  144. mindspore/hal/memory.py +326 -0
  145. mindspore/hal/stream.py +357 -0
  146. mindspore/include/api/data_type.h +2 -2
  147. mindspore/include/api/dual_abi_helper.h +16 -3
  148. mindspore/include/api/model.h +4 -3
  149. mindspore/include/api/model_group.h +13 -1
  150. mindspore/include/api/status.h +14 -0
  151. mindspore/include/api/types.h +10 -10
  152. mindspore/include/c_api/model_c.h +173 -0
  153. mindspore/include/c_api/types_c.h +19 -0
  154. mindspore/include/dataset/config.h +2 -2
  155. mindspore/include/dataset/constants.h +2 -2
  156. mindspore/include/dataset/execute.h +3 -5
  157. mindspore/include/dataset/vision.h +58 -2
  158. mindspore/jpeg62.dll +0 -0
  159. mindspore/log.py +3 -3
  160. mindspore/mindrecord/__init__.py +5 -1
  161. mindspore/mindrecord/config.py +809 -0
  162. mindspore/mindrecord/filereader.py +25 -0
  163. mindspore/mindrecord/filewriter.py +138 -103
  164. mindspore/mindrecord/mindpage.py +40 -6
  165. mindspore/mindrecord/shardutils.py +3 -2
  166. mindspore/mindrecord/shardwriter.py +7 -0
  167. mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
  168. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
  169. mindspore/mindrecord/tools/csv_to_mr.py +4 -9
  170. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  171. mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
  172. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
  173. mindspore/mindspore_backend.dll +0 -0
  174. mindspore/mindspore_common.dll +0 -0
  175. mindspore/mindspore_core.dll +0 -0
  176. mindspore/mindspore_glog.dll +0 -0
  177. mindspore/mindspore_np_dtype.dll +0 -0
  178. mindspore/mindspore_ops.dll +0 -0
  179. mindspore/mint/__init__.py +1586 -0
  180. mindspore/mint/distributed/__init__.py +31 -0
  181. mindspore/mint/distributed/distributed.py +254 -0
  182. mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
  183. mindspore/mint/nn/__init__.py +757 -0
  184. mindspore/mint/nn/functional.py +679 -0
  185. mindspore/mint/nn/layer/__init__.py +39 -0
  186. mindspore/mint/nn/layer/activation.py +133 -0
  187. mindspore/mint/nn/layer/normalization.py +477 -0
  188. mindspore/mint/nn/layer/pooling.py +110 -0
  189. mindspore/mint/optim/__init__.py +24 -0
  190. mindspore/mint/optim/adamw.py +206 -0
  191. mindspore/mint/special/__init__.py +63 -0
  192. mindspore/msobj140.dll +0 -0
  193. mindspore/mspdb140.dll +0 -0
  194. mindspore/mspdbcore.dll +0 -0
  195. mindspore/mspdbst.dll +0 -0
  196. mindspore/mspft140.dll +0 -0
  197. mindspore/msvcdis140.dll +0 -0
  198. mindspore/msvcp140_1.dll +0 -0
  199. mindspore/msvcp140_2.dll +0 -0
  200. mindspore/msvcp140_atomic_wait.dll +0 -0
  201. mindspore/msvcp140_codecvt_ids.dll +0 -0
  202. mindspore/multiprocessing/__init__.py +73 -0
  203. mindspore/nn/cell.py +461 -323
  204. mindspore/nn/dynamic_lr.py +2 -2
  205. mindspore/nn/layer/activation.py +292 -135
  206. mindspore/nn/layer/basic.py +288 -83
  207. mindspore/nn/layer/channel_shuffle.py +3 -16
  208. mindspore/nn/layer/container.py +3 -3
  209. mindspore/nn/layer/conv.py +75 -66
  210. mindspore/nn/layer/embedding.py +221 -45
  211. mindspore/nn/layer/image.py +4 -7
  212. mindspore/nn/layer/math.py +1 -1
  213. mindspore/nn/layer/normalization.py +150 -68
  214. mindspore/nn/layer/padding.py +64 -87
  215. mindspore/nn/layer/pooling.py +175 -12
  216. mindspore/nn/layer/rnn_cells.py +6 -16
  217. mindspore/nn/layer/rnns.py +6 -5
  218. mindspore/nn/layer/thor_layer.py +1 -2
  219. mindspore/nn/layer/timedistributed.py +1 -1
  220. mindspore/nn/layer/transformer.py +55 -53
  221. mindspore/nn/learning_rate_schedule.py +6 -5
  222. mindspore/nn/loss/__init__.py +2 -2
  223. mindspore/nn/loss/loss.py +145 -88
  224. mindspore/nn/optim/__init__.py +2 -1
  225. mindspore/nn/optim/ada_grad.py +4 -2
  226. mindspore/nn/optim/adadelta.py +4 -2
  227. mindspore/nn/optim/adafactor.py +1 -1
  228. mindspore/nn/optim/adam.py +102 -181
  229. mindspore/nn/optim/adamax.py +4 -2
  230. mindspore/nn/optim/adasum.py +3 -3
  231. mindspore/nn/optim/asgd.py +4 -2
  232. mindspore/nn/optim/ftrl.py +31 -61
  233. mindspore/nn/optim/lamb.py +5 -3
  234. mindspore/nn/optim/lars.py +2 -2
  235. mindspore/nn/optim/lazyadam.py +6 -4
  236. mindspore/nn/optim/momentum.py +13 -25
  237. mindspore/nn/optim/optimizer.py +6 -3
  238. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  239. mindspore/nn/optim/rmsprop.py +9 -3
  240. mindspore/nn/optim/rprop.py +4 -2
  241. mindspore/nn/optim/sgd.py +5 -3
  242. mindspore/nn/optim/tft_wrapper.py +127 -0
  243. mindspore/nn/optim/thor.py +2 -2
  244. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  245. mindspore/nn/probability/distribution/beta.py +2 -2
  246. mindspore/nn/probability/distribution/categorical.py +4 -6
  247. mindspore/nn/probability/distribution/cauchy.py +2 -2
  248. mindspore/nn/probability/distribution/exponential.py +2 -2
  249. mindspore/nn/probability/distribution/geometric.py +1 -1
  250. mindspore/nn/probability/distribution/gumbel.py +2 -2
  251. mindspore/nn/probability/distribution/logistic.py +1 -1
  252. mindspore/nn/probability/distribution/poisson.py +2 -2
  253. mindspore/nn/probability/distribution/uniform.py +2 -2
  254. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  255. mindspore/nn/wrap/__init__.py +2 -1
  256. mindspore/nn/wrap/cell_wrapper.py +46 -12
  257. mindspore/nn/wrap/grad_reducer.py +148 -8
  258. mindspore/nn/wrap/loss_scale.py +44 -7
  259. mindspore/numpy/__init__.py +2 -0
  260. mindspore/numpy/array_creations.py +67 -68
  261. mindspore/numpy/array_ops.py +70 -66
  262. mindspore/numpy/dtypes.py +3 -3
  263. mindspore/numpy/fft.py +966 -0
  264. mindspore/numpy/logic_ops.py +11 -10
  265. mindspore/numpy/math_ops.py +147 -152
  266. mindspore/numpy/utils.py +3 -0
  267. mindspore/numpy/utils_const.py +4 -4
  268. mindspore/opencv_core452.dll +0 -0
  269. mindspore/opencv_imgcodecs452.dll +0 -0
  270. mindspore/opencv_imgproc452.dll +0 -0
  271. mindspore/ops/__init__.py +9 -6
  272. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
  273. mindspore/ops/_grad_experimental/grad_comm_ops.py +135 -36
  274. mindspore/ops/_grad_experimental/grad_math_ops.py +61 -298
  275. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  276. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  277. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  278. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  279. mindspore/ops/_op_impl/__init__.py +0 -1
  280. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  281. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
  282. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  283. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  284. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  285. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  286. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  287. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  288. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  289. mindspore/ops/_vmap/vmap_array_ops.py +162 -101
  290. mindspore/ops/_vmap/vmap_base.py +8 -1
  291. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  292. mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
  293. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  294. mindspore/ops/_vmap/vmap_math_ops.py +147 -59
  295. mindspore/ops/_vmap/vmap_nn_ops.py +292 -117
  296. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  297. mindspore/ops/auto_generate/__init__.py +31 -0
  298. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
  299. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
  300. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  301. mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
  302. mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
  303. mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
  304. mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
  305. mindspore/ops/composite/__init__.py +5 -2
  306. mindspore/ops/composite/base.py +201 -66
  307. mindspore/ops/composite/math_ops.py +10 -49
  308. mindspore/ops/composite/multitype_ops/_compile_utils.py +192 -618
  309. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
  310. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  311. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  312. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  313. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  314. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  315. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  316. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  317. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  318. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  319. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  320. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  321. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  322. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  323. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  324. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  325. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  326. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  327. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  328. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  329. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  330. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  331. mindspore/ops/composite/multitype_ops/not_in_impl.py +8 -3
  332. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  333. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  334. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  335. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  336. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  337. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  338. mindspore/ops/deprecated.py +14 -3
  339. mindspore/ops/function/__init__.py +53 -11
  340. mindspore/ops/function/array_func.py +1269 -1821
  341. mindspore/ops/function/clip_func.py +19 -31
  342. mindspore/ops/function/debug_func.py +114 -5
  343. mindspore/ops/function/fft_func.py +44 -0
  344. mindspore/ops/function/grad/grad_func.py +30 -22
  345. mindspore/ops/function/image_func.py +27 -21
  346. mindspore/ops/function/linalg_func.py +35 -68
  347. mindspore/ops/function/math_func.py +1170 -2697
  348. mindspore/ops/function/nn_func.py +2116 -1128
  349. mindspore/ops/function/other_func.py +8 -8
  350. mindspore/ops/function/parameter_func.py +5 -93
  351. mindspore/ops/function/random_func.py +435 -113
  352. mindspore/ops/function/reshard_func.py +104 -0
  353. mindspore/ops/function/sparse_func.py +4 -4
  354. mindspore/ops/function/sparse_unary_func.py +9 -16
  355. mindspore/ops/function/spectral_func.py +1 -1
  356. mindspore/ops/function/vmap_func.py +16 -15
  357. mindspore/ops/functional.py +355 -346
  358. mindspore/ops/op_info_register.py +18 -45
  359. mindspore/ops/operations/__init__.py +38 -24
  360. mindspore/ops/operations/_grad_ops.py +21 -927
  361. mindspore/ops/operations/_infer_ops.py +19 -0
  362. mindspore/ops/operations/_inner_ops.py +173 -607
  363. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  364. mindspore/ops/operations/_scalar_ops.py +5 -480
  365. mindspore/ops/operations/_sequence_ops.py +6 -36
  366. mindspore/ops/operations/_tensor_array.py +8 -8
  367. mindspore/ops/operations/array_ops.py +106 -2837
  368. mindspore/ops/operations/comm_ops.py +799 -127
  369. mindspore/ops/operations/custom_ops.py +124 -119
  370. mindspore/ops/operations/debug_ops.py +142 -41
  371. mindspore/ops/operations/image_ops.py +1 -217
  372. mindspore/ops/operations/inner_ops.py +5 -40
  373. mindspore/ops/operations/linalg_ops.py +1 -49
  374. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  375. mindspore/ops/operations/manually_defined/_inner.py +73 -0
  376. mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
  377. mindspore/ops/operations/math_ops.py +666 -4972
  378. mindspore/ops/operations/nn_ops.py +205 -2213
  379. mindspore/ops/operations/other_ops.py +60 -49
  380. mindspore/ops/operations/random_ops.py +50 -54
  381. mindspore/ops/operations/reshard_ops.py +53 -0
  382. mindspore/ops/operations/sparse_ops.py +4 -4
  383. mindspore/ops/primitive.py +216 -103
  384. mindspore/ops_generate/__init__.py +27 -0
  385. mindspore/ops_generate/arg_dtype_cast.py +252 -0
  386. mindspore/ops_generate/arg_handler.py +197 -0
  387. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  388. mindspore/ops_generate/gen_constants.py +36 -0
  389. mindspore/ops_generate/gen_ops.py +1099 -0
  390. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  391. mindspore/ops_generate/gen_pyboost_func.py +1052 -0
  392. mindspore/ops_generate/gen_utils.py +209 -0
  393. mindspore/ops_generate/op_proto.py +145 -0
  394. mindspore/ops_generate/pyboost_utils.py +367 -0
  395. mindspore/ops_generate/template.py +261 -0
  396. mindspore/parallel/__init__.py +8 -4
  397. mindspore/parallel/_auto_parallel_context.py +100 -10
  398. mindspore/parallel/_cell_wrapper.py +99 -9
  399. mindspore/parallel/_cost_model_context.py +1 -1
  400. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  401. mindspore/parallel/_parallel_serialization.py +67 -23
  402. mindspore/parallel/_ps_context.py +1 -1
  403. mindspore/parallel/_recovery_context.py +1 -1
  404. mindspore/parallel/_tensor.py +99 -22
  405. mindspore/parallel/_transformer/__init__.py +1 -1
  406. mindspore/parallel/_transformer/layers.py +1 -1
  407. mindspore/parallel/_transformer/loss.py +1 -1
  408. mindspore/parallel/_transformer/moe.py +1 -1
  409. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  410. mindspore/parallel/_transformer/transformer.py +2 -2
  411. mindspore/parallel/_utils.py +173 -6
  412. mindspore/parallel/algo_parameter_config.py +8 -10
  413. mindspore/parallel/checkpoint_transform.py +204 -38
  414. mindspore/parallel/cluster/__init__.py +15 -0
  415. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  416. mindspore/parallel/cluster/process_entity/_api.py +352 -0
  417. mindspore/parallel/cluster/process_entity/_utils.py +101 -0
  418. mindspore/parallel/cluster/run.py +136 -0
  419. mindspore/parallel/mpi/__init__.py +1 -1
  420. mindspore/parallel/mpi/_mpi_config.py +1 -1
  421. mindspore/parallel/parameter_broadcast.py +151 -0
  422. mindspore/parallel/shard.py +279 -37
  423. mindspore/parallel/transform_safetensors.py +993 -0
  424. mindspore/pgodb140.dll +0 -0
  425. mindspore/pgort140.dll +0 -0
  426. mindspore/profiler/__init__.py +4 -2
  427. mindspore/profiler/common/constant.py +29 -0
  428. mindspore/profiler/common/process_pool.py +41 -0
  429. mindspore/profiler/common/registry.py +47 -0
  430. mindspore/profiler/common/singleton.py +28 -0
  431. mindspore/profiler/common/util.py +153 -0
  432. mindspore/profiler/dynamic_profiler.py +694 -0
  433. mindspore/profiler/envprofiling.py +18 -20
  434. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  435. mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
  436. mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
  437. mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
  438. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
  439. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
  440. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
  441. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  442. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
  443. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  444. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
  445. mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
  446. mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
  447. mindspore/profiler/parser/ascend_flops_generator.py +20 -4
  448. mindspore/profiler/parser/ascend_hccl_generator.py +29 -278
  449. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  450. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  451. mindspore/profiler/parser/ascend_msprof_exporter.py +148 -146
  452. mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
  453. mindspore/profiler/parser/ascend_op_generator.py +92 -42
  454. mindspore/profiler/parser/ascend_timeline_generator.py +298 -133
  455. mindspore/profiler/parser/base_timeline_generator.py +25 -25
  456. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
  457. mindspore/profiler/parser/framework_parser.py +4 -393
  458. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  459. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  460. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  461. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  462. mindspore/profiler/parser/integrator.py +3 -1
  463. mindspore/profiler/parser/memory_usage_parser.py +0 -154
  464. mindspore/profiler/parser/minddata_parser.py +72 -3
  465. mindspore/profiler/parser/profiler_info.py +94 -7
  466. mindspore/profiler/profiler.py +153 -0
  467. mindspore/profiler/profiling.py +631 -508
  468. mindspore/rewrite/__init__.py +2 -14
  469. mindspore/rewrite/api/node.py +122 -36
  470. mindspore/rewrite/api/pattern_engine.py +2 -3
  471. mindspore/rewrite/api/scoped_value.py +16 -15
  472. mindspore/rewrite/api/symbol_tree.py +45 -29
  473. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  474. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  475. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  476. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  477. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  478. mindspore/rewrite/common/__init__.py +1 -2
  479. mindspore/rewrite/common/config.py +24 -0
  480. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  481. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  482. mindspore/rewrite/common/namespace.py +118 -0
  483. mindspore/rewrite/node/__init__.py +5 -5
  484. mindspore/rewrite/node/call_function.py +23 -7
  485. mindspore/rewrite/node/cell_container.py +7 -3
  486. mindspore/rewrite/node/control_flow.py +53 -28
  487. mindspore/rewrite/node/node.py +212 -196
  488. mindspore/rewrite/node/node_manager.py +51 -22
  489. mindspore/rewrite/node/node_topological_manager.py +3 -23
  490. mindspore/rewrite/parsers/__init__.py +12 -0
  491. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  492. mindspore/rewrite/parsers/assign_parser.py +637 -413
  493. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  494. mindspore/rewrite/parsers/class_def_parser.py +115 -148
  495. mindspore/rewrite/parsers/constant_parser.py +5 -5
  496. mindspore/rewrite/parsers/container_parser.py +4 -6
  497. mindspore/rewrite/parsers/expr_parser.py +55 -0
  498. mindspore/rewrite/parsers/for_parser.py +31 -98
  499. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  500. mindspore/rewrite/parsers/if_parser.py +28 -10
  501. mindspore/rewrite/parsers/module_parser.py +8 -182
  502. mindspore/rewrite/parsers/parser.py +1 -5
  503. mindspore/rewrite/parsers/parser_register.py +1 -1
  504. mindspore/rewrite/parsers/return_parser.py +5 -10
  505. mindspore/rewrite/parsers/while_parser.py +59 -0
  506. mindspore/rewrite/sparsify/utils.py +1 -1
  507. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  508. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +705 -186
  509. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  510. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  511. mindspore/run_check/_check_version.py +40 -115
  512. mindspore/run_check/run_check.py +1 -1
  513. mindspore/safeguard/rewrite_obfuscation.py +597 -263
  514. mindspore/swresample-4.dll +0 -0
  515. mindspore/swscale-6.dll +0 -0
  516. mindspore/tbbmalloc.dll +0 -0
  517. mindspore/tinyxml2.dll +0 -0
  518. mindspore/train/__init__.py +7 -5
  519. mindspore/train/_utils.py +204 -4
  520. mindspore/train/amp.py +335 -295
  521. mindspore/train/anf_ir_pb2.py +14 -2
  522. mindspore/train/callback/__init__.py +5 -2
  523. mindspore/train/callback/_backup_and_restore.py +5 -5
  524. mindspore/train/callback/_callback.py +4 -4
  525. mindspore/train/callback/_checkpoint.py +220 -43
  526. mindspore/train/callback/_cluster_monitor.py +201 -0
  527. mindspore/train/callback/_early_stop.py +2 -2
  528. mindspore/train/callback/_flops_collector.py +239 -0
  529. mindspore/train/callback/_landscape.py +15 -9
  530. mindspore/train/callback/_loss_monitor.py +5 -5
  531. mindspore/train/callback/_on_request_exit.py +136 -33
  532. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  533. mindspore/train/callback/_summary_collector.py +12 -12
  534. mindspore/train/callback/_tft_register.py +352 -0
  535. mindspore/train/callback/_time_monitor.py +3 -3
  536. mindspore/train/data_sink.py +6 -5
  537. mindspore/train/dataset_helper.py +66 -23
  538. mindspore/train/loss_scale_manager.py +2 -2
  539. mindspore/train/metrics/accuracy.py +7 -7
  540. mindspore/train/metrics/confusion_matrix.py +8 -6
  541. mindspore/train/metrics/cosine_similarity.py +6 -4
  542. mindspore/train/metrics/error.py +2 -2
  543. mindspore/train/metrics/metric.py +3 -3
  544. mindspore/train/metrics/perplexity.py +2 -1
  545. mindspore/train/metrics/roc.py +4 -4
  546. mindspore/train/metrics/topk.py +2 -2
  547. mindspore/train/mind_ir_pb2.py +116 -37
  548. mindspore/train/model.py +382 -76
  549. mindspore/train/serialization.py +787 -288
  550. mindspore/train/summary/_summary_adapter.py +1 -1
  551. mindspore/train/summary/summary_record.py +51 -28
  552. mindspore/train/train_thor/convert_utils.py +3 -3
  553. mindspore/turbojpeg.dll +0 -0
  554. mindspore/utils/__init__.py +21 -0
  555. mindspore/utils/utils.py +60 -0
  556. mindspore/vcmeta.dll +0 -0
  557. mindspore/vcruntime140.dll +0 -0
  558. mindspore/vcruntime140_1.dll +0 -0
  559. mindspore/version.py +1 -1
  560. {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/METADATA +8 -4
  561. mindspore-2.4.0.dist-info/RECORD +1406 -0
  562. {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +1 -0
  563. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  564. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  565. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  566. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  567. mindspore/gen_ops.py +0 -273
  568. mindspore/include/c_api/ms/abstract.h +0 -67
  569. mindspore/include/c_api/ms/attribute.h +0 -197
  570. mindspore/include/c_api/ms/base/handle_types.h +0 -43
  571. mindspore/include/c_api/ms/base/macros.h +0 -32
  572. mindspore/include/c_api/ms/base/status.h +0 -33
  573. mindspore/include/c_api/ms/base/types.h +0 -282
  574. mindspore/include/c_api/ms/context.h +0 -102
  575. mindspore/include/c_api/ms/graph.h +0 -160
  576. mindspore/include/c_api/ms/node.h +0 -606
  577. mindspore/include/c_api/ms/tensor.h +0 -161
  578. mindspore/include/c_api/ms/value.h +0 -84
  579. mindspore/mindspore_shared_lib.dll +0 -0
  580. mindspore/nn/layer/flash_attention.py +0 -189
  581. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
  582. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
  583. mindspore/ops/_op_impl/cpu/concat.py +0 -39
  584. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  585. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  586. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  587. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  588. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  589. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  590. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  591. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  592. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  593. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  594. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  595. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  596. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  597. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  598. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  599. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  600. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  601. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  602. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  603. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  604. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  605. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  606. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  607. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  608. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  609. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  610. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  611. mindspore/ops/_op_impl/tbe/add.py +0 -42
  612. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  613. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  614. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  615. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  616. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  617. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  618. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  619. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  620. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  621. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  622. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  623. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  624. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  625. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  626. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  627. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  628. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  629. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  630. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  631. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  632. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  633. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  634. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  635. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  636. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  637. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  638. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  639. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  640. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  641. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  642. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  643. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  644. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  645. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  646. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  647. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  648. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  649. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  650. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  651. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  652. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  653. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  654. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  655. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  656. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  657. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  658. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  659. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  660. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  661. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  662. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  663. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  664. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  665. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  666. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  667. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  668. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  669. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  670. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  671. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  672. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  673. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  674. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  675. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  676. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  677. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  678. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  679. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  680. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  681. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  682. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  683. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  684. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  685. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  686. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  687. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  688. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  689. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  690. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  691. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  692. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  693. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  694. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  695. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  696. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  697. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  698. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  699. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  700. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  701. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  702. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  703. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  704. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  705. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  706. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  707. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  708. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  709. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  710. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  711. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  712. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  713. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  714. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  715. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  716. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  717. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  718. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  719. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  720. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  721. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  722. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  723. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  724. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  725. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  726. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  727. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  728. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  729. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  730. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  731. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  732. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  733. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  734. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  735. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  736. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  737. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  738. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  739. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  740. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  741. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  742. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  743. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  744. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  745. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  746. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  747. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  748. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  749. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  750. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  751. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  752. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  753. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  754. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  755. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  756. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  757. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  758. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  759. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  760. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  761. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  762. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  763. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  764. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  765. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  766. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  767. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  768. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  769. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  770. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  771. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  772. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  773. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  774. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  775. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  776. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  777. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  778. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  779. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  780. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  781. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  782. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  783. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  784. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  785. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  786. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  787. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  788. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  789. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  790. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  791. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  792. mindspore/ops/_op_impl/tbe/div.py +0 -41
  793. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  794. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  795. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  796. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  797. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  798. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  799. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  800. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  801. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  802. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  803. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  804. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  805. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  806. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  807. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  808. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  809. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  810. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  811. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  812. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  813. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  814. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  815. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  816. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  817. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  818. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  819. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  820. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  821. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  822. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  823. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  824. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  825. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  826. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  827. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  828. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  829. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  830. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  831. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  832. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  833. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  834. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  835. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  836. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  837. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  838. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  839. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  840. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  841. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  842. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  843. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  844. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  845. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  846. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  847. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  848. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  849. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  850. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  851. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  852. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  853. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  854. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  855. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  856. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  857. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  858. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  859. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  860. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  861. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  862. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  863. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  864. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  865. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  866. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  867. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  868. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  869. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  870. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  871. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  872. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  873. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  874. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  875. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  876. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  877. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  878. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  879. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  880. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  881. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  882. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  883. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  884. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  885. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  886. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  887. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  888. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  889. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  890. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  891. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  892. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  893. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  894. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  895. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  896. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  897. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  898. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  899. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  900. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  901. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  902. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  903. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  904. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  905. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  906. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  907. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  908. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  909. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  910. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  911. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  912. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  913. mindspore/ops/_op_impl/tbe/less.py +0 -41
  914. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  915. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  916. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  917. mindspore/ops/_op_impl/tbe/log.py +0 -40
  918. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  919. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  920. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  921. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  922. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  923. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  924. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  925. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  926. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  927. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  928. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  929. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  930. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  931. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  932. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  933. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  934. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  935. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  936. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  937. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  938. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  939. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  940. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  941. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  942. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  943. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  944. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  945. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  946. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  947. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  948. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  949. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  950. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  951. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  952. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  953. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  954. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  955. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  956. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  957. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  958. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  959. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  960. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  961. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  962. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  963. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  964. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  965. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  966. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  967. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  968. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  969. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  970. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  971. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  972. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  973. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  974. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  975. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  976. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  977. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  978. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  979. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  980. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  981. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  982. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  983. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  984. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  985. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  986. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  987. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  988. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  989. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  990. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  991. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  992. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  993. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  994. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  995. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  996. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  997. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  998. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  999. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  1000. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  1001. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  1002. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  1003. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  1004. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  1005. mindspore/ops/_op_impl/tbe/range.py +0 -39
  1006. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  1007. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  1008. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  1009. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  1010. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  1011. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  1012. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  1013. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  1014. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  1015. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  1016. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  1017. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  1018. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  1019. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  1020. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  1021. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  1022. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  1023. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  1024. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  1025. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  1026. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  1027. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  1028. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  1029. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  1030. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  1031. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  1032. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  1033. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  1034. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  1035. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  1036. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  1037. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  1038. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  1039. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  1040. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  1041. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  1042. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  1043. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  1044. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  1045. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  1046. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  1047. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  1048. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  1049. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  1050. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  1051. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  1052. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  1053. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  1054. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  1055. mindspore/ops/_op_impl/tbe/round.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  1057. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  1058. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  1060. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  1061. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  1062. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1063. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1064. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1065. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1066. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1067. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1068. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1069. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1070. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1071. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1072. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1073. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1074. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1075. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1076. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1077. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1078. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1079. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1080. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1081. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1082. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1083. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1084. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1085. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1086. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1087. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1088. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1089. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1090. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1091. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1092. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1093. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1094. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1095. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1096. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1097. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1098. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1099. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1100. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1101. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1102. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1103. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1104. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1105. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1106. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1107. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1108. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1109. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1110. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1111. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1112. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1113. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1114. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1115. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1116. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1117. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1118. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1119. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1120. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1121. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1122. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1123. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1124. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1125. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1126. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1127. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1128. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1129. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1130. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1131. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1132. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1133. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1134. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1135. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1136. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1137. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1138. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1139. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1140. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1141. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1142. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1143. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1144. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1145. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1146. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1147. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1148. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1149. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1150. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1151. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1152. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1153. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1154. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1155. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1156. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1157. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1158. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1159. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1160. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1161. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1162. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1163. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1164. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1165. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1166. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1167. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1168. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1169. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1170. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1171. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1172. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1173. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1174. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1175. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1176. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1177. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1178. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1179. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1180. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1181. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1182. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1183. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1184. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1185. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1186. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1187. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1188. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1189. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1190. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1191. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1192. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1193. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1194. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1195. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1196. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1197. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1198. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1199. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1200. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1201. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1202. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1203. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1204. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1205. mindspore/ops/_tracefunc.py +0 -241
  1206. mindspore/ops/arg_dtype_cast.py +0 -54
  1207. mindspore/ops/silent_check.py +0 -162
  1208. mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
  1209. mindspore/profiler/parser/msadvisor_parser.py +0 -240
  1210. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1211. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1212. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1213. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1214. mindspore/rewrite/namespace.py +0 -53
  1215. mindspore-2.2.14.dist-info/RECORD +0 -1924
  1216. {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
  1217. {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
mindspore/train/model.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2023 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -18,6 +18,7 @@ from __future__ import absolute_import
18
18
  from collections.abc import Iterable
19
19
  from functools import wraps
20
20
 
21
+ import sys
21
22
  import os
22
23
  import math
23
24
  import copy
@@ -34,8 +35,10 @@ from mindspore.common.tensor import Tensor
34
35
  from mindspore.train.metrics import get_metrics, get_metric_fn
35
36
  from mindspore._checkparam import check_input_data, check_output_data
36
37
  from mindspore import _checkparam as Validator
37
- from mindspore.train.callback import _InternalCallbackParam, RunContext, _CallbackManager, Callback, TimeMonitor
38
+ from mindspore.train.callback import _InternalCallbackParam, RunContext, _CallbackManager, Callback, TimeMonitor,\
39
+ FlopsUtilizationCollector, TFTRegister
38
40
  from mindspore.train.callback import __all__ as internal_cb_names
41
+ from mindspore.train.callback._cluster_monitor import ClusterMonitor
39
42
  from mindspore import context
40
43
  from mindspore.parallel._utils import _get_parallel_mode, _get_device_num, _get_parameter_broadcast, \
41
44
  _device_number_check, _parameter_broadcast_check, _parallel_predict_check, \
@@ -48,7 +51,7 @@ from mindspore.boost import AutoBoost
48
51
  from mindspore.context import ParallelMode
49
52
  from mindspore.parallel._recovery_context import _set_recovery_context, _get_recovery_context
50
53
  from mindspore.train.dataset_helper import DatasetHelper, connect_network_with_dataset
51
- from mindspore.common.api import _pynative_executor
54
+ from mindspore.common.api import _pynative_executor, ARG_SPECIFIED, TOTAL_ARG_LEN
52
55
  from mindspore.dataset.core.config import get_debug_mode
53
56
  from mindspore.dataset.engine.datasets import _set_training_dataset, _reset_training_dataset
54
57
  from mindspore.train import amp
@@ -75,6 +78,7 @@ class _FrameworkProfilerCallback(Callback):
75
78
  """
76
79
  Profiler callback of framework for training.
77
80
  """
81
+
78
82
  def step_begin(self, run_context):
79
83
  _framework_profiler_step_start()
80
84
 
@@ -115,6 +119,236 @@ def _save_final_ckpt(func):
115
119
  func(self, *args, **kwargs)
116
120
  return wrapper
117
121
 
122
+ def _handle_tft(func):
123
+ """
124
+ Decorator function, which starts uce handle process when an exception occurs during training.
125
+ """
126
+ @wraps(func)
127
+ def wrapper(self, *args, **kwargs):
128
+ obj = None
129
+ if kwargs.get('callbacks') and isinstance(kwargs.get('callbacks'), TFTRegister):
130
+ obj = kwargs.get('callbacks')
131
+ if kwargs.get('callbacks') and isinstance(kwargs.get('callbacks'), list):
132
+ for item in kwargs.get('callbacks'):
133
+ if isinstance(item, TFTRegister):
134
+ obj = item
135
+ if obj:
136
+ tft = obj.tft
137
+ tft_env = os.getenv("MS_ENABLE_TFT", "")
138
+ uce_env = "UCE:1" in tft_env
139
+ while True:
140
+ try:
141
+ return func(self, *args, **kwargs)
142
+ except RuntimeError as e:
143
+ logger.info("uce wrapper caught RuntimeError")
144
+ if not uce_env:
145
+ logger.info("uce wrapper caught RuntimeError uce not enable")
146
+ tft.tft_report_error(tft.ReportState.RS_UNKNOWN.value)
147
+ raise e
148
+ e_str = str(e)
149
+ logger.info("uce wrapper caught RuntimeError e_str:{}".format(e_str))
150
+ if "UCEError" in e_str:
151
+ logger.info("uce wrapper report UCEError")
152
+ tft.tft_report_error(tft.ReportState.RS_UCE.value)
153
+ elif "ForceStopError" in e_str:
154
+ logger.info("uce wrapper caught RuntimeError ForceStopError")
155
+ force_stop_err = tft.ReportState.RS_NORMAL.value
156
+ tft.tft_report_error(force_stop_err)
157
+ else:
158
+ logger.info("uce wrapper caught RuntimeError rankid: {} OTHER ERROR")
159
+ tft.tft_report_error(tft.ReportState.RS_UNKNOWN.value)
160
+ raise e
161
+ ret = tft.tft_wait_next_action()
162
+ if ret == tft.Action.EXIT.value:
163
+ raise e
164
+ repair_step = tft.tft_get_repair_step()
165
+ logger.info("uce wrapper caught repair finish REPAIR STEP: {} batch_num: \
166
+ {}".format(repair_step, self.batch_num))
167
+ initial_epoch = int(repair_step/self.batch_num)
168
+ initial_step = repair_step % self.batch_num
169
+ kwargs["initial_epoch"] = initial_epoch
170
+
171
+ train_dataset = args[1]
172
+ dataset_sink_mode = args[3] if len(args) > 3 else kwargs.get('dataset_sink_mode', True)
173
+ sink_size = args[4] if len(args) > 4 else kwargs.get('sink_size', -1)
174
+
175
+ cb_initial_step = 0
176
+ if dataset_sink_mode:
177
+ train_dataset.set_init_step(initial_epoch)
178
+ dataset_size = train_dataset.get_dataset_size()
179
+ if sink_size != -1:
180
+ cb_initial_step = initial_epoch * sink_size + initial_step
181
+ else:
182
+ cb_initial_step = initial_epoch * dataset_size + initial_step
183
+ else:
184
+ train_dataset.set_init_step(initial_step)
185
+ cb_initial_step = initial_step
186
+
187
+ kwargs["initial_step"] = cb_initial_step
188
+
189
+ logger.info("uce wrapper repair complete \
190
+ initial_epoch: {}, cb_initial_step: {} ".format(initial_epoch, cb_initial_step))
191
+ continue
192
+ except BaseException as e:
193
+ logger.info("uce wrapper caught BaseException error")
194
+ tft.tft_report_error(tft.ReportState.RS_UNKNOWN.value)
195
+ raise e
196
+ else:
197
+ return func(self, *args, **kwargs)
198
+ return wrapper
199
+
200
+
201
+ def _check_tft():
202
+ """Check if TFT is supported"""
203
+ tft_env = os.getenv("MS_ENABLE_TFT")
204
+ device_target = context.get_context("device_target")
205
+ if tft_env and device_target == "Ascend":
206
+ from mindspore._c_expression import MSContext
207
+ ascend_target = MSContext.get_instance().get_ascend_soc_version()
208
+ if ascend_target == 'ascend910':
209
+ raise ValueError("TFT is not supported when using ascend910")
210
+ ms_mode = context.get_context("mode")
211
+ if ms_mode != mindspore.GRAPH_MODE:
212
+ raise ValueError("TFT is only supported in GRAPH_MODE")
213
+ jit_level = context.get_context("jit_level")
214
+ if jit_level == "O2" and "UCE:1" in tft_env:
215
+ raise ValueError("TFT is not supported when using jit_level == O2")
216
+
217
+
218
+ def _append_ccae(callbacks):
219
+ """Add cluster monitoring when CCAE is enabled."""
220
+ perf_config = os.getenv("PERF_DUMP_CONFIG")
221
+ if perf_config is None:
222
+ return callbacks
223
+ pairs = perf_config.split(',')
224
+ perf_config_dict = {}
225
+ for pair in pairs:
226
+ key, value = pair.split(':')
227
+ if value.lower() == 'true':
228
+ perf_config_dict[key] = True
229
+ elif value.lower() == 'false':
230
+ perf_config_dict[key] = False
231
+ elif value.isdigit():
232
+ perf_config_dict[key] = int(value)
233
+ else:
234
+ perf_config_dict[key] = value
235
+ if perf_config_dict.get("enable", False):
236
+ if callbacks is None:
237
+ callbacks = ClusterMonitor()
238
+ elif isinstance(callbacks, list):
239
+ callbacks.append(ClusterMonitor())
240
+ else:
241
+ callbacks = [callbacks, ClusterMonitor()]
242
+ return callbacks
243
+
244
+
245
+ def _get_arg_infos(inputs):
246
+ """Get compile argument information from inputs.
247
+
248
+ Args:
249
+ inputs (Union[list, tuple, dict]): Argument got from cell which is set by `set_inputs`.
250
+
251
+ Raises:
252
+ RuntimeError: inputs is not a list, tuple or dict.
253
+ RuntimeError: inputs is a dict without necessary keys and values.
254
+
255
+ Returns:
256
+ _type_: _description_
257
+ """
258
+ if isinstance(inputs, (list, tuple)):
259
+ arg_specified = [[idx, arg] for idx, arg in enumerate(inputs)]
260
+ arg_len = len(inputs)
261
+ elif isinstance(inputs, dict):
262
+ arg_specified = inputs.get(ARG_SPECIFIED, None)
263
+ arg_len = inputs.get(TOTAL_ARG_LEN, None)
264
+ if arg_specified is None or arg_len is None:
265
+ raise RuntimeError(
266
+ "The incremental inputs should be processed(with \"%s\" and \"%s\"), but got %s." %
267
+ (ARG_SPECIFIED, TOTAL_ARG_LEN, str(inputs)))
268
+ else:
269
+ raise RuntimeError("inputs should be a list/tuple or a dict, but got %s!" % str(inputs))
270
+
271
+ return arg_len, arg_specified
272
+
273
+
274
+ def _merge_inputs(inputs1, inputs2):
275
+ """Merge two processed inputs to a new inputs for latter setting cell's inputs."""
276
+ is_fullmode1 = isinstance(inputs1, (list, tuple))
277
+ is_fullmode2 = isinstance(inputs2, (list, tuple))
278
+
279
+ if is_fullmode1 and is_fullmode2:
280
+ return [*inputs1, *inputs2]
281
+
282
+ arg_len1, arg_specified1 = _get_arg_infos(inputs1)
283
+ arg_len2, arg_specified2 = _get_arg_infos(inputs2)
284
+
285
+ res_arg_len = arg_len1 + arg_len2
286
+ res_arg_specified = []
287
+ res_arg_specified.extend(arg_specified1)
288
+ # The second inputs should add offset before merging.
289
+ for idx, arg in arg_specified2:
290
+ res_arg_specified.append([idx + arg_len1, arg])
291
+
292
+ return {ARG_SPECIFIED: res_arg_specified, TOTAL_ARG_LEN: res_arg_len}
293
+
294
+
295
+ def _process_loss_inputs(loss_inputs):
296
+ """Process loss's inputs whose first input should be dropped for train or eval.
297
+
298
+ Args:
299
+ loss_inputs (Union[list, tuple, dict]): Arguments save by `set_inputs` or `jit`.
300
+
301
+ Raises:
302
+ RuntimeError: inputs is not a list, tuple or dict.
303
+ RuntimeError: inputs is a dict without necessary keys and values.
304
+
305
+ Returns:
306
+ list, tuple or dict: Arguments for latter setting.
307
+ """
308
+ # For train or eval, the first input of loss is the inner-tensor, so drop it.
309
+ res = None
310
+ if isinstance(loss_inputs, (list, tuple)):
311
+ res = [*loss_inputs]
312
+ res.pop(0)
313
+ elif isinstance(loss_inputs, dict):
314
+ loss_arg_specified = loss_inputs.get(ARG_SPECIFIED, None)
315
+ loss_arg_len = loss_inputs.get(TOTAL_ARG_LEN, None)
316
+ if loss_arg_specified is None or loss_arg_len is None:
317
+ raise RuntimeError(
318
+ "The loss incremental inputs should be processed(with \"%s\" and \"%s\"), but got %s." %
319
+ (ARG_SPECIFIED, TOTAL_ARG_LEN, str(loss_inputs)))
320
+ res_loss_arg_specified = []
321
+ for idx, arg in loss_arg_specified:
322
+ if idx == 0:
323
+ continue
324
+ res_loss_arg_specified.append([idx, arg])
325
+ res = {ARG_SPECIFIED: res_loss_arg_specified, TOTAL_ARG_LEN: loss_arg_len - 1}
326
+ else:
327
+ raise RuntimeError("loss_inputs should be a list/tuple or a dict, but got %s!" % str(loss_inputs))
328
+
329
+ return res
330
+
331
+
332
+ def _set_with_processed_inputs(network, inputs):
333
+ """Save set inputs for computation graph with processed inputs.
334
+
335
+ Args:
336
+ network (nn.Cell): Target cell.
337
+ inputs (Union[list, tuple, dict]): Inputs argument got from other cell.
338
+
339
+ Raises:
340
+ RuntimeError: network is not a nn.Cell.
341
+ RuntimeError: inputs is not a list, tuple or dict.
342
+ """
343
+ Validator.check_value_type('network', network, nn.Cell)
344
+ if isinstance(inputs, (list, tuple)):
345
+ network.set_inputs(*inputs)
346
+ elif isinstance(inputs, dict):
347
+ network.set_inputs(**inputs)
348
+ else:
349
+ raise RuntimeError(
350
+ "Reset inputs from a process inputs, should be a list/tuple or a dict, but got %s!" % str(inputs))
351
+
118
352
 
119
353
  class Model:
120
354
  """
@@ -133,8 +367,8 @@ class Model:
133
367
 
134
368
  Args:
135
369
  network (Cell): A training or testing network.
136
- loss_fn (Cell): Objective function. If `loss_fn` is None, the `network` should contain the calculation of loss
137
- and parallel if needed. Default: ``None`` .
370
+ loss_fn (Cell): Objective function. If `loss_fn` is None, the `network` should contain the calculation of loss.
371
+ Default: ``None`` .
138
372
  optimizer (Cell): Optimizer for updating the weights. If `optimizer` is None, the `network` needs to
139
373
  do backpropagation and update weights. Default: ``None`` .
140
374
  metrics (Union[dict, set]): A Dictionary or a set of metrics for model evaluation.
@@ -151,21 +385,11 @@ class Model:
151
385
  amp_level (str): Option for argument `level` in :func:`mindspore.amp.build_train_network`, level for mixed
152
386
  precision training. Supports ["O0", "O1", "O2", "O3", "auto"]. Default: ``"O0"`` .
153
387
 
154
- - "O0": Do not change.
155
- - "O1": Cast the operators in white_list to float16, the remaining operators are kept in float32.
156
- The operators in the whitelist: [Conv1d, Conv2d, Conv3d, Conv1dTranspose, Conv2dTranspose,
157
- Conv3dTranspose, Dense, LSTMCell, RNNCell, GRUCell, MatMul, BatchMatMul, PReLU, ReLU, Ger].
158
- - "O2": Cast network to float16, keep BatchNorm run in float32, using dynamic loss scale.
159
- - "O3": Cast network to float16, the BatchNorm is also cast to float16, loss scale will not be used.
160
- - "auto": Set level to recommended level in different devices. Set level to "O2" on GPU, set
161
- level to "O3" on Ascend. The recommended level is chosen by the expert experience, not applicable to all
162
- scenarios. User should specify the level for special network.
163
-
164
- "O2" is recommended on GPU, "O3" is recommended on Ascend.
388
+ For details on `amp_level` , refer to :func:`mindspore.amp.auto_mixed_precision`.
389
+
165
390
  The BatchNorm strategy can be changed by `keep_batchnorm_fp32` settings in `kwargs`. `keep_batchnorm_fp32`
166
391
  must be a bool. The loss scale strategy can be changed by `loss_scale_manager` setting in `kwargs`.
167
392
  `loss_scale_manager` should be a subclass of :class:`mindspore.amp.LossScaleManager`.
168
- The more detailed explanation of `amp_level` setting can be found at `mindspore.amp.build_train_network`.
169
393
 
170
394
  boost_level (str): Option for argument `level` in `mindspore.boost`, level for boost mode
171
395
  training. Supports ["O0", "O1", "O2"]. Default: ``"O0"`` .
@@ -190,7 +414,7 @@ class Model:
190
414
  >>> from mindspore.train import Model
191
415
  >>>
192
416
  >>> # Define the network structure of LeNet5. Refer to
193
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/lenet.py
417
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
194
418
  >>> net = LeNet5()
195
419
  >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
196
420
  >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
@@ -199,7 +423,7 @@ class Model:
199
423
  >>> model.predict_network
200
424
  >>> model.eval_network
201
425
  >>> # Create the dataset taking MNIST as an example. Refer to
202
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/mnist.py
426
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/mnist.py
203
427
  >>> dataset = create_dataset()
204
428
  >>> model.train(2, dataset)
205
429
  """
@@ -240,7 +464,7 @@ class Model:
240
464
  self._mindspore_lite = None
241
465
  self._lite_infer = True # if backend lite infer fails, set False
242
466
  self._mindspore_lite_model_group_id = id(self) & 0xFFFF
243
-
467
+ self.batch_num = -1
244
468
 
245
469
  def _check_for_graph_cell(self, kwargs):
246
470
  """Check for graph cell"""
@@ -312,13 +536,10 @@ class Model:
312
536
  raise ValueError("The argument 'optimizer' can not be None when set 'loss_scale_manager'.")
313
537
 
314
538
  net_inputs = network.get_inputs()
315
- loss_inputs = [None]
316
539
  if self._loss_fn:
317
- if self._loss_fn.get_inputs():
318
- loss_inputs = [*self._loss_fn.get_inputs()]
319
- loss_inputs.pop(0)
320
- if net_inputs:
321
- net_inputs = [*net_inputs, *loss_inputs]
540
+ if self._loss_fn.get_inputs() and net_inputs:
541
+ loss_inputs = _process_loss_inputs(self._loss_fn.get_inputs())
542
+ net_inputs = _merge_inputs(net_inputs, loss_inputs)
322
543
  if self._optimizer:
323
544
  amp_config = {}
324
545
  if self._loss_scale_manager_set:
@@ -336,7 +557,7 @@ class Model:
336
557
  # If need to check if loss_fn is not None, but optimizer is None
337
558
 
338
559
  if net_inputs is not None:
339
- network.set_inputs(*net_inputs)
560
+ _set_with_processed_inputs(network, net_inputs)
340
561
  return network
341
562
 
342
563
  def _build_eval_network(self, metrics, eval_network, eval_indexes):
@@ -362,17 +583,13 @@ class Model:
362
583
  f" optional, and then you can set `eval_network` or `loss_fn`. For the latter case,"
363
584
  f" framework will automatically build an evaluation network with `network` and"
364
585
  f" `loss_fn`.")
365
-
366
586
  net_inputs = self._network.get_inputs()
367
- loss_inputs = [None]
368
- if self._loss_fn.get_inputs():
369
- loss_inputs = [*self._loss_fn.get_inputs()]
370
- loss_inputs.pop(0)
371
- if net_inputs:
372
- net_inputs = [*net_inputs, *loss_inputs]
587
+ if self._loss_fn.get_inputs() and net_inputs:
588
+ loss_inputs = _process_loss_inputs(self._loss_fn.get_inputs())
589
+ net_inputs = _merge_inputs(net_inputs, loss_inputs)
373
590
  self._eval_network = nn.WithEvalCell(self._network, self._loss_fn, self._amp_level in ["O2", "O3", "auto"])
374
591
  if net_inputs is not None:
375
- self._eval_network.set_inputs(*net_inputs)
592
+ _set_with_processed_inputs(self._eval_network, net_inputs)
376
593
  self._eval_indexes = [0, 1, 2]
377
594
 
378
595
  def _build_predict_network(self):
@@ -437,15 +654,16 @@ class Model:
437
654
  dataset.__loop_size__ = 1
438
655
 
439
656
  if dataset_helper is None:
657
+ logger.info("Begin to create DatasetHelper.")
440
658
  dataset_helper = DatasetHelper(dataset, dataset_sink_mode, sink_size, epoch_num)
441
659
 
442
660
  if dataset_sink_mode:
661
+ logger.info("Begin to connect network with dataset.")
443
662
  network = connect_network_with_dataset(network, dataset_helper)
444
663
 
445
664
  if _get_recovery_context("enable_recovery") and is_train:
446
665
  _set_training_dataset(dataset_helper)
447
666
 
448
-
449
667
  network.set_train(is_train)
450
668
  network.phase = phase
451
669
  self._backbone_is_train = is_train
@@ -459,8 +677,46 @@ class Model:
459
677
  if self._backbone_is_train != is_train:
460
678
  network.set_train(is_train)
461
679
  self._backbone_is_train = is_train
680
+ # Mode train and eval are the same net, network will be set_grad in _build_train_network.
681
+ # But if mode just want to do predict or eval, must set network set_grad False
682
+ if not is_train:
683
+ network.set_grad(False)
462
684
  return network
463
685
 
686
+ def _check_need_ckpt(self, callbacks):
687
+ """Check callback list contain ckpt"""
688
+ need_ckpt = False
689
+ save_ckpt_steps = 1
690
+ last_triggered_step = 0
691
+ for cb in callbacks:
692
+ if isinstance(cb, ModelCheckpoint):
693
+ need_ckpt = True
694
+ cfg_size = cb._get_save_checkpoint_steps
695
+ save_ckpt_steps = save_ckpt_steps if (cfg_size is None or cfg_size >= sys.maxsize) else cfg_size
696
+ last_triggered_step = cb._get_last_trigger_step
697
+ break
698
+ return need_ckpt, save_ckpt_steps, last_triggered_step
699
+
700
+ def _store_training_step_info(self, cb_params):
701
+ """
702
+ cache train step info
703
+ :param cb_params: callback params
704
+ :return: none
705
+ """
706
+ if os.environ.get("MS_ENABLE_CKPT_D2H_ASYNC") != "1":
707
+ return
708
+ if (context.get_context("mode") == context.GRAPH_MODE) and (context.get_context("device_target") == "Ascend"):
709
+ cb_params.need_ckpt, cb_params.save_checkpoint_steps, \
710
+ cb_params.last_triggered_step = self._check_need_ckpt(cb_params.list_callback)
711
+ logger.info(f"need_ckpt:{cb_params.need_ckpt},"
712
+ f"save_checkpoint_steps:{cb_params.save_checkpoint_steps},"
713
+ f"cur_step_num:{cb_params.cur_step_num},"
714
+ f"last_triggered_step:{cb_params.last_triggered_step}")
715
+ context.set_context(ascend_config={"need_ckpt": cb_params.need_ckpt,
716
+ "save_checkpoint_steps": cb_params.save_checkpoint_steps,
717
+ "cur_step_num": cb_params.cur_step_num,
718
+ "last_triggered_step": cb_params.last_triggered_step})
719
+
464
720
  def _warmup_dataset(self, epoch, train_dataset, sink_size=-1):
465
721
  """
466
722
  Trigger dataset pipeline running before graph compiling.
@@ -486,6 +742,22 @@ class Model:
486
742
  train_dataset._dataset_helper = dataset_helper
487
743
  train_dataset._warmup_epoch = epoch
488
744
 
745
+ def _waiting_for_dataset_warmup_ready(self, train_dataset):
746
+ """
747
+ Wait for the dataset to warmup until there is a batch of data available for training on the device side.
748
+
749
+ Args:
750
+ train_dataset (Dataset): A training dataset iterator. If `train_dataset` is defined, training graphs will be
751
+ initialized. Default: ``None``.
752
+ """
753
+ mbuf_size = train_dataset.__transfer_dataset__.get_mbuf_queue_size()
754
+ while mbuf_size == 0:
755
+ time.sleep(10)
756
+ mbuf_size = train_dataset.__transfer_dataset__.get_mbuf_queue_size()
757
+ if mbuf_size != 0:
758
+ break
759
+ logger.warning(f"Waiting for the dataset warmup, current device queue size: {mbuf_size}")
760
+
489
761
  def _init(self, train_dataset=None, valid_dataset=None, sink_size=-1, epoch=1):
490
762
  """
491
763
  Initialize compute graphs and data graphs with the sink mode.
@@ -507,6 +779,7 @@ class Model:
507
779
  if not train_dataset and not valid_dataset:
508
780
  raise ValueError("The argument 'train_dataset' and 'valid_dataset' can not both be None or empty.")
509
781
 
782
+ logger.info("Begin to check device number in model.build() procedure.")
510
783
  _device_number_check(self._parallel_mode, self._device_number)
511
784
 
512
785
  if train_dataset:
@@ -514,30 +787,34 @@ class Model:
514
787
  raise TypeError("The type of 'train_dataset' must be `Dataset`, "
515
788
  "but got {}.".format(type(train_dataset)))
516
789
 
790
+ logger.info("Begin to check parameter broadcast in model.build() procedure.")
517
791
  _parameter_broadcast_check(self._parallel_mode, self._parameter_broadcast)
518
792
  if self._parameter_broadcast:
519
793
  self._train_network.set_broadcast_flag()
520
794
 
795
+ logger.info("Begin to exec preprocess in model.build() procedure.")
521
796
  train_dataset.__no_send__ = True
522
797
  train_dataset_helper, train_network = self._exec_preprocess(is_train=True,
523
798
  dataset=train_dataset,
524
799
  dataset_sink_mode=True,
525
800
  sink_size=sink_size)
801
+ logger.info("Begin to warmup dataset in model.build() procedure.")
526
802
  self._warmup_dataset(epoch, train_dataset, sink_size)
527
- if train_dataset.get_init_step() > 0:
528
- mbuf_size = train_dataset.__transfer_dataset__.get_mbuf_queue_size()
529
- while mbuf_size == 0:
530
- time.sleep(10)
531
- mbuf_size = train_dataset.__transfer_dataset__.get_mbuf_queue_size()
532
- if mbuf_size != 0:
533
- break
534
- logger.warning(f"Failover mode, waiting for dataset recover to specify step, "
535
- f"current device queue size: {mbuf_size}")
803
+
804
+ # Since dataset pipeline has been triggered, delete flag
805
+ delattr(train_dataset, "__no_send__")
806
+
807
+ # Waiting for the dataset warmup ready
808
+ logger.info("Begin waiting for dataset warmup in model.build() procedure.")
809
+ self._waiting_for_dataset_warmup_ready(train_dataset)
810
+ logger.info("The dataset warmup was successful in model.build() procedure.")
536
811
 
537
812
  if context.get_auto_parallel_context("pipeline_stages") > 1 and valid_dataset:
538
813
  train_network.add_flags_recursive(is_first_iteration=True)
539
814
  for inputs in train_dataset_helper:
815
+ logger.info("Begin to compile train network in model.build() procedure.")
540
816
  train_network.compile(*inputs)
817
+ self._train_network.parameter_layout_dict = train_network.parameter_layout_dict
541
818
  break
542
819
 
543
820
  if valid_dataset:
@@ -555,6 +832,7 @@ class Model:
555
832
  if context.get_auto_parallel_context("pipeline_stages") > 1:
556
833
  eval_network.add_flags_recursive(is_first_iteration=False)
557
834
  for inputs in valid_dataset_helper:
835
+ logger.info("Begin to compile eval network in model.build() procedure.")
558
836
  eval_network.compile(*inputs)
559
837
  break
560
838
 
@@ -569,9 +847,10 @@ class Model:
569
847
 
570
848
  return [callbacks]
571
849
 
850
+ @_handle_tft
572
851
  @_save_final_ckpt
573
852
  def _train(self, epoch, train_dataset, callbacks=None, dataset_sink_mode=True, sink_size=-1, initial_epoch=0,
574
- valid_dataset=None, valid_frequency=1, valid_dataset_sink_mode=True):
853
+ valid_dataset=None, valid_frequency=1, valid_dataset_sink_mode=True, initial_step=0):
575
854
  """
576
855
  Training.
577
856
 
@@ -595,12 +874,14 @@ class Model:
595
874
  self._train_network.set_broadcast_flag()
596
875
 
597
876
  cb_params = _InternalCallbackParam()
877
+ cb_params.cur_step_num = initial_step
598
878
  cb_params.train_network = self._train_network
599
879
  cb_params.epoch_num = epoch - initial_epoch
600
880
  if dataset_sink_mode and sink_size > 0:
601
881
  cb_params.batch_num = sink_size
602
882
  else:
603
883
  cb_params.batch_num = train_dataset.get_dataset_size()
884
+ self.batch_num = cb_params.batch_num
604
885
  cb_params.mode = "train"
605
886
  cb_params.loss_fn = self._loss_fn
606
887
  cb_params.optimizer = self._optimizer
@@ -610,6 +891,10 @@ class Model:
610
891
  cb_params.list_callback = self._transform_callbacks(callbacks)
611
892
  valid_infos = (valid_dataset, valid_frequency, valid_dataset_sink_mode)
612
893
  cb_params.list_callback.insert(0, _FrameworkProfilerCallback())
894
+ if os.environ.get("ENABLE_FLOPS_UTILIZATION_COLLECTOR") == "1" and \
895
+ FlopsUtilizationCollector not in cb_params.list_callback:
896
+ cb_params.list_callback.insert(0, FlopsUtilizationCollector(
897
+ cb_params.batch_num, full_flops=False))
613
898
  if context.get_context("mode") == context.PYNATIVE_MODE:
614
899
  cb_params.list_callback.insert(0, _StepSync())
615
900
  callbacks = cb_params.list_callback
@@ -625,11 +910,13 @@ class Model:
625
910
  with _CallbackManager(callbacks) as list_callback:
626
911
  self._check_reuse_dataset(train_dataset)
627
912
  if not dataset_sink_mode:
628
- self._train_process(epoch, train_dataset, list_callback, cb_params, initial_epoch, valid_infos)
913
+ self._train_process(epoch, train_dataset, list_callback, cb_params, initial_epoch,
914
+ valid_infos)
629
915
  elif context.get_context("device_target") == "CPU":
630
916
  logger.info("The CPU cannot support dataset sink mode currently."
631
917
  "So the training process will be performed with dataset not sink.")
632
- self._train_process(epoch, train_dataset, list_callback, cb_params, initial_epoch, valid_infos)
918
+ self._train_process(epoch, train_dataset, list_callback, cb_params, initial_epoch,
919
+ valid_infos)
633
920
  else:
634
921
  self._train_dataset_sink_process(epoch, train_dataset, list_callback,
635
922
  cb_params, sink_size, initial_epoch, valid_infos)
@@ -659,18 +946,17 @@ class Model:
659
946
  is_graph = (context.get_context("mode") == context.GRAPH_MODE)
660
947
  dataset_size = train_dataset.get_dataset_size()
661
948
  if dataset_size % sink_size != 0:
662
- logger.warning("In dataset_sink mode (dataset_size % sink_size) should equal to 0, "
663
- "it is suggested to pad/drop data or adjust sink_size. "
664
- "But got 'dataset_size': {}, 'sink_size': {}.".format(dataset_size, sink_size))
949
+ logger.info("In dataset_sink mode (dataset_size % sink_size) should equal to 0, "
950
+ "it is suggested to pad/drop data or adjust sink_size. "
951
+ "But got 'dataset_size': {}, 'sink_size': {}.".format(dataset_size, sink_size))
665
952
  if sink_size == -1:
666
953
  dataset_sink_num = epoch
667
954
  else:
668
955
  dataset_sink_num = math.ceil(epoch * sink_size / dataset_size)
669
956
  train_dataset.__total_batch__ = epoch * sink_size
670
957
 
671
- cb_params.cur_step_num = 0
958
+ cb_params.sink_size = sink_size
672
959
  cb_params.dataset_sink_mode = True
673
-
674
960
  run_context = RunContext(cb_params)
675
961
  list_callback.on_train_begin(run_context)
676
962
  # used to stop training for early stop, such as stopAtTIme or stopATStep
@@ -679,7 +965,6 @@ class Model:
679
965
  dataset_helper = train_dataset._dataset_helper
680
966
 
681
967
  self.epoch_iter = 0
682
-
683
968
  self._check_enable_recovery()
684
969
  # Used to check whether need perform recovery for process which is restarted.
685
970
  self._check_need_load_ckpt(cb_params, dataset_size, sink_size)
@@ -713,6 +998,7 @@ class Model:
713
998
  else:
714
999
  cb_params.cur_step_num += 1
715
1000
  self._current_step_num = int((cb_params.cur_step_num - 1) % cb_params.batch_num + 1)
1001
+ self._store_training_step_info(cb_params)
716
1002
  cb_params.train_dataset_element = inputs
717
1003
  list_callback.on_train_step_begin(run_context)
718
1004
  train_network = self._check_network_mode(train_network, True)
@@ -814,7 +1100,6 @@ class Model:
814
1100
  dataset_size (int): The number of batches in a dataset.
815
1101
  sink_size (int): Control the amount of data in each sink. Default: -1.
816
1102
  """
817
-
818
1103
  if not self.enable_recovery:
819
1104
  self.need_load_ckpt = False
820
1105
 
@@ -901,7 +1186,6 @@ class Model:
901
1186
  dataset=train_dataset,
902
1187
  dataset_sink_mode=False,
903
1188
  epoch_num=epoch)
904
- cb_params.cur_step_num = 0
905
1189
  cb_params.dataset_sink_mode = False
906
1190
  run_context = RunContext(cb_params)
907
1191
  list_callback.on_train_begin(run_context)
@@ -923,7 +1207,6 @@ class Model:
923
1207
  "returned by 'train_dataset'".format(len_element))
924
1208
  cb_params.cur_step_num += 1
925
1209
  self._current_step_num = int((cb_params.cur_step_num - 1) % cb_params.batch_num + 1)
926
-
927
1210
  cb_params.train_dataset_element = next_element
928
1211
  list_callback.on_train_step_begin(run_context)
929
1212
  self._check_network_mode(self._train_network, True)
@@ -1020,10 +1303,10 @@ class Model:
1020
1303
  >>> from mindspore.train import Model
1021
1304
  >>>
1022
1305
  >>> # Create the dataset taking MNIST as an example. Refer to
1023
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/mnist.py
1306
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/mnist.py
1024
1307
  >>> dataset = create_dataset()
1025
1308
  >>> # Define the network structure of LeNet5. Refer to
1026
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/lenet.py
1309
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
1027
1310
  >>> net = LeNet5()
1028
1311
  >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
1029
1312
  >>> loss_scale_manager = ms.FixedLossScaleManager(1024., False)
@@ -1032,9 +1315,10 @@ class Model:
1032
1315
  ... loss_scale_manager=loss_scale_manager)
1033
1316
  >>> model.train(2, dataset)
1034
1317
  """
1318
+ _check_tft()
1319
+ device_target = context.get_context("device_target")
1035
1320
  # prepare dataset for obfuscated model
1036
1321
  train_dataset = self._prepare_obf_dataset(train_dataset)
1037
- device_target = context.get_context("device_target")
1038
1322
  if _is_ps_mode() and not _cache_enable() and (device_target in ["Ascend", "CPU"]) and dataset_sink_mode:
1039
1323
  logger.info("For PS mode, reset datasink mode to False when using Ascend or CPU backend.")
1040
1324
  dataset_sink_mode = False
@@ -1074,9 +1358,9 @@ class Model:
1074
1358
 
1075
1359
  _device_number_check(self._parallel_mode, self._device_number)
1076
1360
 
1361
+ callbacks = _append_ccae(callbacks)
1077
1362
  if callbacks:
1078
1363
  self._check_methods_for_custom_callbacks(callbacks, "train")
1079
-
1080
1364
  self._train(epoch,
1081
1365
  train_dataset,
1082
1366
  callbacks=callbacks,
@@ -1110,7 +1394,7 @@ class Model:
1110
1394
  callbacks = [callbacks]
1111
1395
  for cb in callbacks:
1112
1396
  cb_name = cb.__class__.__name__
1113
- if cb_name not in internal_cb_names:
1397
+ if cb_name not in internal_cb_names:
1114
1398
  cb_methods_names = set(cb.__class__.__dict__.keys())
1115
1399
  invalid_methods_names = cb_methods_names & old_version_methods_names
1116
1400
  if invalid_methods_names:
@@ -1173,11 +1457,11 @@ class Model:
1173
1457
  >>> from mindspore.train import Model
1174
1458
  >>>
1175
1459
  >>> # Create the dataset taking MNIST as an example. Refer to
1176
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/mnist.py
1460
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/mnist.py
1177
1461
  >>> train_dataset = create_dataset("train")
1178
1462
  >>> valid_dataset = create_dataset("test")
1179
1463
  >>> # Define the network structure of LeNet5. Refer to
1180
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/lenet.py
1464
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
1181
1465
  >>> net = LeNet5()
1182
1466
  >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
1183
1467
  >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
@@ -1186,7 +1470,7 @@ class Model:
1186
1470
 
1187
1471
  Tutorial Examples:
1188
1472
  - `Advanced Encapsulation: Model - Train and Save Model
1189
- <https://www.mindspore.cn/tutorials/en/r2.2/advanced/model.html#training-and-saving-model>`_
1473
+ <https://www.mindspore.cn/docs/en/master/model_train/train_process/model.html#training-and-saving-model>`_
1190
1474
  """
1191
1475
  device_target = context.get_context("device_target")
1192
1476
  if _is_ps_mode() and not _cache_enable() and (device_target in ["Ascend", "CPU"]) and dataset_sink_mode:
@@ -1266,10 +1550,10 @@ class Model:
1266
1550
  >>> from mindspore.amp import FixedLossScaleManager
1267
1551
  >>>
1268
1552
  >>> # Create the dataset taking MNIST as an example. Refer to
1269
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/mnist.py
1553
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/mnist.py
1270
1554
  >>> dataset = create_dataset()
1271
1555
  >>> # Define the network structure of LeNet5. Refer to
1272
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/lenet.py
1556
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
1273
1557
  >>> net = LeNet5()
1274
1558
  >>> loss = nn.SoftmaxCrossEntropyWithLogits()
1275
1559
  >>> loss_scale_manager = FixedLossScaleManager()
@@ -1283,7 +1567,9 @@ class Model:
1283
1567
  if hasattr(self._train_network, '_is_check_and_refresh') and not self._train_network._is_check_and_refresh:
1284
1568
  self._train_network.check_names_and_refresh_name()
1285
1569
  self._train_network._is_check_and_refresh = True
1570
+ logger.info("Begin to init dataset in model.build() procedure.")
1286
1571
  self._init(train_dataset, valid_dataset, sink_size, epoch)
1572
+ logger.info("The model.build() which contains dataset warmup and network compile is success.")
1287
1573
 
1288
1574
  def _eval_in_fit(self, valid_dataset, callbacks=None, dataset_sink_mode=True, cb_params=None):
1289
1575
  """
@@ -1442,10 +1728,10 @@ class Model:
1442
1728
  >>> from mindspore.train import Model
1443
1729
  >>>
1444
1730
  >>> # Create the dataset taking MNIST as an example. Refer to
1445
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/mnist.py
1731
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/mnist.py
1446
1732
  >>> dataset = create_dataset()
1447
1733
  >>> # Define the network structure of LeNet5. Refer to
1448
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/lenet.py
1734
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
1449
1735
  >>> net = LeNet5()
1450
1736
  >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
1451
1737
  >>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'})
@@ -1453,7 +1739,7 @@ class Model:
1453
1739
 
1454
1740
  Tutorial Examples:
1455
1741
  - `Advanced Encapsulation: Model - Train and Save Model
1456
- <https://www.mindspore.cn/tutorials/en/r2.2/advanced/model.html#training-and-saving-model>`_
1742
+ <https://www.mindspore.cn/docs/en/master/model_train/train_process/model.html#training-and-saving-model>`_
1457
1743
  """
1458
1744
  valid_dataset = self._prepare_obf_dataset(valid_dataset)
1459
1745
  dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
@@ -1473,6 +1759,10 @@ class Model:
1473
1759
  cb_params.mode = "eval"
1474
1760
  cb_params.cur_step_num = 0
1475
1761
  cb_params.list_callback = self._transform_callbacks(callbacks)
1762
+ if os.environ.get("ENABLE_FLOPS_UTILIZATION_COLLECTOR") == "1" and \
1763
+ FlopsUtilizationCollector not in cb_params.list_callback:
1764
+ cb_params.list_callback.insert(0, FlopsUtilizationCollector(
1765
+ cb_params.batch_num, full_flops=False))
1476
1766
  cb_params.network = self._network
1477
1767
 
1478
1768
  self._clear_metrics()
@@ -1686,7 +1976,7 @@ class Model:
1686
1976
  "execution_plan" : {"op_name3" : "data_type:float16", "op_name4" : "data_type:float32"}}
1687
1977
 
1688
1978
  Note that both the "configPath" is configured in the config_dict and the config_item,
1689
- in this case, the path_b in the config_dict takes precedence.
1979
+ in this case, the path_b in the config_dict takes precedence.
1690
1980
 
1691
1981
  Returns:
1692
1982
  Tensor, array(s) of predictions.
@@ -1699,7 +1989,7 @@ class Model:
1699
1989
  >>>
1700
1990
  >>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), mindspore.float32)
1701
1991
  >>> # Define the network structure of LeNet5. Refer to
1702
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/lenet.py
1992
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
1703
1993
  >>> model = Model(LeNet5())
1704
1994
  >>> result = model.predict(input_data)
1705
1995
  """
@@ -1719,8 +2009,25 @@ class Model:
1719
2009
  self._lite_infer = False
1720
2010
  logger.warning(f"Lite inference failed, {e.__str__()}, fallback to original inference!")
1721
2011
 
2012
+ def _check_input_data():
2013
+ """Input data check."""
2014
+ for item in predict_data:
2015
+ if item is None:
2016
+ continue
2017
+ if isinstance(item, Tensor):
2018
+ if item.size == 0:
2019
+ msg = "The input data can not be empty."
2020
+ logger.critical(msg)
2021
+ raise ValueError(msg)
2022
+ continue
2023
+ if not isinstance(item, (int, float, str)):
2024
+ data_class_str = "Tensor, None, int, float, str"
2025
+ raise TypeError(f'The types of input data must be in the Union({data_class_str}, ' \
2026
+ f'tuple[{data_class_str}], list[{data_class_str}], dict[{data_class_str}]), ' \
2027
+ f'but got type {item if item is None else type(item).__name__}.')
2028
+
1722
2029
  self._check_network_mode(self._predict_network, False)
1723
- check_input_data(*predict_data, data_class=(int, float, str, None, Tensor))
2030
+ _check_input_data()
1724
2031
  _parallel_predict_check()
1725
2032
  result = self._predict_network(*predict_data)
1726
2033
 
@@ -1807,10 +2114,10 @@ class Model:
1807
2114
  >>> ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.SEMI_AUTO_PARALLEL)
1808
2115
  >>>
1809
2116
  >>> # Create the dataset taking MNIST as an example. Refer to
1810
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/mnist.py
2117
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/mnist.py
1811
2118
  >>> dataset = create_dataset()
1812
2119
  >>> # Define the network structure of LeNet5. Refer to
1813
- >>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/lenet.py
2120
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
1814
2121
  >>> net = LeNet5()
1815
2122
  >>> loss = nn.SoftmaxCrossEntropyWithLogits()
1816
2123
  >>> loss_scale_manager = ms.FixedLossScaleManager()
@@ -1832,7 +2139,6 @@ class Model:
1832
2139
  train_dataset.__model_hash__ = hash(self)
1833
2140
  return train_network.parameter_layout_dict
1834
2141
 
1835
-
1836
2142
  def infer_predict_layout(self, *predict_data, skip_backend_compile=False):
1837
2143
  """
1838
2144
  Generate parameter layout for the predict network in 'AUTO_PARALLEL' or 'SEMI_AUTO_PARALLEL' mode.