mindspore 2.1.0__cp39-none-any.whl → 2.2.10__cp39-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (569) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -1
  3. mindspore/_akg/akg/build_module.py +5 -6
  4. mindspore/_akg/akg/composite/build_module.py +46 -19
  5. mindspore/_akg/akg/composite/split_stitch.py +10 -11
  6. mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
  7. mindspore/_akg/akg/tvm/api.py +4 -3
  8. mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
  9. mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
  10. mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
  11. mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
  12. mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
  13. mindspore/_akg/akg/tvm/build_module.py +16 -1
  14. mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
  15. mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
  16. mindspore/_akg/akg/tvm/ir_builder.py +1 -1
  17. mindspore/_akg/akg/tvm/module.py +1 -2
  18. mindspore/_akg/akg/tvm/stmt.py +2 -2
  19. mindspore/_akg/akg/utils/ascend_profilier/__init__.py +0 -0
  20. mindspore/_akg/akg/utils/ascend_profilier/cann_file_parser.py +76 -0
  21. mindspore/_akg/akg/utils/ascend_profilier/file_manager.py +56 -0
  22. mindspore/_akg/akg/utils/ascend_profilier/op_summary_bean.py +23 -0
  23. mindspore/_akg/akg/utils/ascend_profilier/op_summary_headers.py +8 -0
  24. mindspore/_akg/akg/utils/ascend_profilier/op_summary_parser.py +42 -0
  25. mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +65 -0
  26. mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
  27. mindspore/_akg/akg/utils/kernel_exec.py +98 -274
  28. mindspore/_akg/akg/utils/result_analysis.py +4 -24
  29. mindspore/_akg/akg/utils/tbe_codegen_utils.py +219 -0
  30. mindspore/_akg/akg/utils/util.py +38 -0
  31. mindspore/_c_dataengine.cpython-39-aarch64-linux-gnu.so +0 -0
  32. mindspore/_c_expression.cpython-39-aarch64-linux-gnu.so +0 -0
  33. mindspore/_c_mindrecord.cpython-39-aarch64-linux-gnu.so +0 -0
  34. mindspore/_check_jit_forbidden_api.py +3 -1
  35. mindspore/_checkparam.py +23 -29
  36. mindspore/_extends/graph_kernel/__init__.py +0 -1
  37. mindspore/_extends/graph_kernel/model/graph_split.py +84 -76
  38. mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
  39. mindspore/_extends/graph_kernel/splitter.py +4 -11
  40. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +122 -15
  41. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +84 -67
  42. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
  43. mindspore/_extends/parallel_compile/akg_compiler/util.py +10 -7
  44. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +2 -2
  45. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +6 -5
  46. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
  47. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
  48. mindspore/_extends/parse/__init__.py +12 -15
  49. mindspore/_extends/parse/namespace.py +7 -33
  50. mindspore/_extends/parse/parser.py +61 -71
  51. mindspore/_extends/parse/resources.py +1 -1
  52. mindspore/_extends/parse/standard_method.py +74 -104
  53. mindspore/_extends/parse/trope.py +1 -1
  54. mindspore/_extends/remote/kernel_build_server.py +25 -7
  55. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  56. mindspore/_install_custom.py +43 -0
  57. mindspore/_mindspore_offline_debug.cpython-39-aarch64-linux-gnu.so +0 -0
  58. mindspore/amp.py +47 -11
  59. mindspore/bin/cache_admin +0 -0
  60. mindspore/bin/cache_server +0 -0
  61. mindspore/boost/boost.py +1 -8
  62. mindspore/boost/boost_cell_wrapper.py +3 -2
  63. mindspore/boost/grad_accumulation.py +1 -1
  64. mindspore/boost/group_loss_scale_manager.py +8 -7
  65. mindspore/common/__init__.py +5 -3
  66. mindspore/common/_jit_fallback_utils.py +6 -0
  67. mindspore/common/_register_for_adapter.py +2 -0
  68. mindspore/common/_register_for_tensor.py +2 -2
  69. mindspore/common/_stub_tensor.py +13 -0
  70. mindspore/common/_utils.py +13 -0
  71. mindspore/common/api.py +174 -259
  72. mindspore/common/auto_dynamic_shape.py +494 -0
  73. mindspore/common/dtype.py +18 -11
  74. mindspore/common/dump.py +6 -4
  75. mindspore/common/initializer.py +14 -14
  76. mindspore/common/jit_config.py +33 -15
  77. mindspore/common/lazy_inline.py +126 -7
  78. mindspore/common/mindir_util.py +101 -0
  79. mindspore/common/parameter.py +51 -41
  80. mindspore/common/seed.py +4 -4
  81. mindspore/common/sparse_tensor.py +13 -14
  82. mindspore/common/tensor.py +243 -165
  83. mindspore/communication/__init__.py +7 -4
  84. mindspore/communication/_comm_helper.py +83 -4
  85. mindspore/communication/management.py +152 -84
  86. mindspore/config/op_info.config +14 -3
  87. mindspore/config/super_bar_config.json +4 -2
  88. mindspore/context.py +152 -61
  89. mindspore/dataset/__init__.py +5 -5
  90. mindspore/dataset/audio/__init__.py +2 -2
  91. mindspore/dataset/audio/transforms.py +52 -52
  92. mindspore/dataset/callback/ds_callback.py +16 -2
  93. mindspore/dataset/core/config.py +68 -51
  94. mindspore/dataset/engine/cache_client.py +28 -5
  95. mindspore/dataset/engine/datasets.py +250 -112
  96. mindspore/dataset/engine/datasets_audio.py +43 -211
  97. mindspore/dataset/engine/datasets_standard_format.py +16 -35
  98. mindspore/dataset/engine/datasets_text.py +43 -67
  99. mindspore/dataset/engine/datasets_user_defined.py +86 -100
  100. mindspore/dataset/engine/datasets_vision.py +219 -1029
  101. mindspore/dataset/engine/iterators.py +11 -4
  102. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +4 -0
  103. mindspore/dataset/engine/obs/util.py +3 -0
  104. mindspore/dataset/engine/samplers.py +1 -1
  105. mindspore/dataset/engine/validators.py +19 -5
  106. mindspore/dataset/text/__init__.py +3 -3
  107. mindspore/dataset/text/transforms.py +101 -127
  108. mindspore/dataset/text/utils.py +205 -138
  109. mindspore/dataset/transforms/__init__.py +1 -1
  110. mindspore/dataset/transforms/py_transforms_util.py +40 -12
  111. mindspore/dataset/transforms/transforms.py +95 -40
  112. mindspore/dataset/utils/browse_dataset.py +8 -2
  113. mindspore/dataset/utils/line_reader.py +17 -19
  114. mindspore/dataset/vision/__init__.py +3 -3
  115. mindspore/dataset/vision/c_transforms.py +6 -3
  116. mindspore/dataset/vision/transforms.py +409 -287
  117. mindspore/dataset/vision/utils.py +13 -14
  118. mindspore/dataset/vision/validators.py +11 -1
  119. mindspore/experimental/map_parameter.py +14 -0
  120. mindspore/{nn/optim_ex → experimental/optim}/__init__.py +30 -29
  121. mindspore/{nn/optim_ex → experimental/optim}/adam.py +60 -67
  122. mindspore/{nn/optim_ex → experimental/optim}/adamw.py +181 -203
  123. mindspore/experimental/optim/lr_scheduler.py +1427 -0
  124. mindspore/{nn/optim_ex → experimental/optim}/optimizer.py +252 -259
  125. mindspore/{nn/optim_ex → experimental/optim}/sgd.py +147 -152
  126. mindspore/gen_ops.py +273 -0
  127. mindspore/include/OWNERS +0 -1
  128. mindspore/include/api/data_type.h +2 -1
  129. mindspore/include/api/graph.h +0 -15
  130. mindspore/include/api/kernel.h +2 -0
  131. mindspore/include/api/kernel_api.h +37 -12
  132. mindspore/include/api/model.h +17 -14
  133. mindspore/include/api/status.h +8 -3
  134. mindspore/include/api/types.h +37 -4
  135. mindspore/include/c_api/ms/abstract.h +67 -0
  136. mindspore/include/c_api/ms/attribute.h +197 -0
  137. mindspore/include/c_api/ms/base/handle_types.h +43 -0
  138. mindspore/include/c_api/ms/base/macros.h +32 -0
  139. mindspore/include/c_api/ms/base/status.h +33 -0
  140. mindspore/include/c_api/ms/base/types.h +282 -0
  141. mindspore/include/c_api/ms/context.h +102 -0
  142. mindspore/include/c_api/ms/graph.h +160 -0
  143. mindspore/include/c_api/ms/node.h +606 -0
  144. mindspore/include/c_api/ms/tensor.h +161 -0
  145. mindspore/include/c_api/ms/value.h +84 -0
  146. mindspore/include/dataset/constants.h +6 -5
  147. mindspore/include/dataset/execute.h +23 -13
  148. mindspore/include/dataset/text.h +26 -26
  149. mindspore/include/dataset/transforms.h +13 -13
  150. mindspore/include/dataset/vision.h +60 -60
  151. mindspore/include/dataset/vision_ascend.h +5 -6
  152. mindspore/include/dataset/vision_lite.h +17 -17
  153. mindspore/include/mindapi/base/type_id.h +1 -0
  154. mindspore/include/mindapi/base/types.h +1 -0
  155. mindspore/lib/libdnnl.so.2 +0 -0
  156. mindspore/lib/libjemalloc.so.2 +0 -0
  157. mindspore/lib/libmindspore.so +0 -0
  158. mindspore/lib/libmindspore_backend.so +0 -0
  159. mindspore/lib/libmindspore_common.so +0 -0
  160. mindspore/lib/libmindspore_core.so +0 -0
  161. mindspore/lib/libmindspore_glog.so.0 +0 -0
  162. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  163. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  164. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  165. mindspore/lib/libmindspore_shared_lib.so +0 -0
  166. mindspore/lib/libnnacl.so +0 -0
  167. mindspore/lib/libopencv_core.so.4.5 +0 -0
  168. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  169. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  170. mindspore/lib/libps_cache.so +0 -0
  171. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310/aic-ascend310-ops-info.json +123 -0
  172. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +123 -0
  173. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +158 -0
  174. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +37 -0
  175. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/add_dsl.py +46 -0
  176. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/add_tik.py +51 -0
  177. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +241 -0
  178. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/matmul_tik.py +212 -0
  179. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/add_dsl.py +46 -0
  180. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/add_tik.py +51 -0
  181. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +241 -0
  182. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/matmul_tik.py +212 -0
  183. mindspore/lib/plugin/ascend/custom_aicore_ops/op_proto/libop_proto.so +0 -0
  184. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  185. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  186. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +8928 -0
  187. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  188. mindspore/lib/plugin/ascend/libakg.so +0 -0
  189. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  190. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  191. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  192. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  193. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  194. mindspore/lib/plugin/cpu/libakg.so +0 -0
  195. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  196. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  197. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  198. mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
  199. mindspore/nn/__init__.py +0 -2
  200. mindspore/nn/cell.py +313 -74
  201. mindspore/nn/dynamic_lr.py +21 -21
  202. mindspore/nn/layer/activation.py +22 -30
  203. mindspore/nn/layer/basic.py +15 -13
  204. mindspore/nn/layer/channel_shuffle.py +1 -1
  205. mindspore/nn/layer/container.py +271 -9
  206. mindspore/nn/layer/conv.py +323 -204
  207. mindspore/nn/layer/dense.py +8 -5
  208. mindspore/nn/layer/embedding.py +33 -27
  209. mindspore/nn/layer/flash_attention.py +141 -88
  210. mindspore/nn/layer/image.py +8 -6
  211. mindspore/nn/layer/math.py +16 -25
  212. mindspore/nn/layer/normalization.py +107 -66
  213. mindspore/nn/layer/padding.py +1 -1
  214. mindspore/nn/layer/pooling.py +131 -109
  215. mindspore/nn/layer/rnn_cells.py +27 -22
  216. mindspore/nn/layer/rnns.py +13 -16
  217. mindspore/nn/layer/thor_layer.py +1 -1
  218. mindspore/nn/layer/transformer.py +221 -154
  219. mindspore/nn/learning_rate_schedule.py +9 -1
  220. mindspore/nn/loss/loss.py +235 -174
  221. mindspore/nn/optim/ada_grad.py +2 -1
  222. mindspore/nn/optim/adadelta.py +1 -0
  223. mindspore/nn/optim/adafactor.py +2 -1
  224. mindspore/nn/optim/adam.py +7 -4
  225. mindspore/nn/optim/adamax.py +3 -2
  226. mindspore/nn/optim/adasum.py +2 -2
  227. mindspore/nn/optim/asgd.py +2 -3
  228. mindspore/nn/optim/ftrl.py +6 -5
  229. mindspore/nn/optim/lamb.py +7 -4
  230. mindspore/nn/optim/lars.py +1 -1
  231. mindspore/nn/optim/lazyadam.py +5 -3
  232. mindspore/nn/optim/momentum.py +2 -1
  233. mindspore/nn/optim/optimizer.py +53 -4
  234. mindspore/nn/optim/proximal_ada_grad.py +3 -4
  235. mindspore/nn/optim/rmsprop.py +4 -3
  236. mindspore/nn/optim/rprop.py +23 -12
  237. mindspore/nn/optim/sgd.py +26 -11
  238. mindspore/nn/optim/thor.py +9 -7
  239. mindspore/nn/probability/bijector/bijector.py +5 -5
  240. mindspore/nn/probability/bijector/power_transform.py +27 -27
  241. mindspore/nn/probability/bijector/softplus.py +3 -3
  242. mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -3
  243. mindspore/nn/probability/distribution/bernoulli.py +5 -5
  244. mindspore/nn/probability/distribution/beta.py +3 -3
  245. mindspore/nn/probability/distribution/categorical.py +7 -7
  246. mindspore/nn/probability/distribution/cauchy.py +0 -1
  247. mindspore/nn/probability/distribution/distribution.py +3 -3
  248. mindspore/nn/probability/distribution/gamma.py +3 -3
  249. mindspore/nn/probability/distribution/geometric.py +4 -4
  250. mindspore/nn/probability/distribution/gumbel.py +4 -4
  251. mindspore/nn/probability/distribution/log_normal.py +2 -2
  252. mindspore/nn/probability/distribution/logistic.py +2 -2
  253. mindspore/nn/probability/distribution/poisson.py +4 -4
  254. mindspore/nn/probability/distribution/transformed_distribution.py +3 -3
  255. mindspore/nn/probability/distribution/uniform.py +6 -6
  256. mindspore/nn/wrap/cell_wrapper.py +84 -34
  257. mindspore/nn/wrap/grad_reducer.py +8 -5
  258. mindspore/nn/wrap/loss_scale.py +105 -42
  259. mindspore/numpy/array_creations.py +1 -2
  260. mindspore/numpy/array_ops.py +3 -2
  261. mindspore/numpy/utils_const.py +5 -5
  262. mindspore/offline_debug/convert_async.py +2 -2
  263. mindspore/ops/_grad_experimental/__init__.py +0 -5
  264. mindspore/ops/_grad_experimental/grad_array_ops.py +2 -3
  265. mindspore/ops/_grad_experimental/grad_comm_ops.py +15 -2
  266. mindspore/ops/_grad_experimental/grad_debug_ops.py +0 -37
  267. mindspore/ops/_grad_experimental/grad_implementations.py +11 -1
  268. mindspore/ops/_grad_experimental/grad_inner_ops.py +2 -216
  269. mindspore/ops/_grad_experimental/grad_math_ops.py +19 -199
  270. mindspore/ops/_grad_experimental/grad_sparse.py +15 -0
  271. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  272. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
  273. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +165 -109
  274. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +144 -86
  275. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +172 -187
  276. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +51 -57
  277. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +6 -17
  278. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +1 -1
  279. mindspore/ops/_op_impl/aicpu/__init__.py +14 -2
  280. mindspore/ops/_op_impl/aicpu/add.py +3 -3
  281. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
  282. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  283. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  284. mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
  285. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
  286. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
  287. mindspore/ops/_op_impl/aicpu/multinomial.py +3 -3
  288. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
  289. mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
  290. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
  291. mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
  292. mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
  293. mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
  294. mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
  295. mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -5
  296. mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -5
  297. mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
  298. mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
  299. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
  300. mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
  301. mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
  302. mindspore/ops/_op_impl/tbe/__init__.py +4 -4
  303. mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
  304. mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
  305. mindspore/ops/_primitive_cache.py +1 -1
  306. mindspore/ops/_tracefunc.py +45 -13
  307. mindspore/ops/_utils/utils.py +6 -1
  308. mindspore/ops/_vmap/vmap_array_ops.py +3 -3
  309. mindspore/ops/_vmap/vmap_base.py +3 -3
  310. mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
  311. mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
  312. mindspore/ops/_vmap/vmap_math_ops.py +5 -2
  313. mindspore/ops/_vmap/vmap_nn_ops.py +61 -7
  314. mindspore/ops/arg_dtype_cast.py +54 -0
  315. mindspore/ops/composite/base.py +37 -10
  316. mindspore/ops/composite/math_ops.py +5 -4
  317. mindspore/ops/composite/multitype_ops/_compile_utils.py +275 -73
  318. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +16 -9
  319. mindspore/ops/composite/multitype_ops/add_impl.py +43 -4
  320. mindspore/ops/composite/multitype_ops/getitem_impl.py +42 -4
  321. mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
  322. mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
  323. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
  324. mindspore/ops/deprecated.py +304 -0
  325. mindspore/ops/function/__init__.py +4 -1
  326. mindspore/ops/function/array_func.py +174 -193
  327. mindspore/ops/function/clip_func.py +81 -13
  328. mindspore/ops/function/debug_func.py +1 -1
  329. mindspore/ops/function/grad/grad_func.py +18 -9
  330. mindspore/ops/function/image_func.py +10 -4
  331. mindspore/ops/function/linalg_func.py +5 -5
  332. mindspore/ops/function/math_func.py +575 -386
  333. mindspore/ops/function/nn_func.py +568 -260
  334. mindspore/ops/function/random_func.py +88 -57
  335. mindspore/ops/function/sparse_func.py +1 -1
  336. mindspore/ops/function/sparse_unary_func.py +14 -12
  337. mindspore/ops/function/vmap_func.py +6 -5
  338. mindspore/ops/functional.py +15 -10
  339. mindspore/ops/op_info_register.py +244 -25
  340. mindspore/ops/operations/__init__.py +28 -19
  341. mindspore/ops/operations/_grad_ops.py +72 -7
  342. mindspore/ops/operations/_inner_ops.py +350 -17
  343. mindspore/ops/operations/_quant_ops.py +4 -8
  344. mindspore/ops/operations/_sequence_ops.py +42 -0
  345. mindspore/ops/operations/array_ops.py +68 -282
  346. mindspore/ops/operations/comm_ops.py +107 -59
  347. mindspore/ops/operations/custom_ops.py +94 -70
  348. mindspore/ops/operations/debug_ops.py +8 -4
  349. mindspore/ops/operations/image_ops.py +18 -12
  350. mindspore/ops/operations/inner_ops.py +26 -3
  351. mindspore/ops/operations/math_ops.py +189 -141
  352. mindspore/ops/operations/nn_ops.py +794 -489
  353. mindspore/ops/operations/other_ops.py +0 -22
  354. mindspore/ops/operations/random_ops.py +53 -111
  355. mindspore/ops/operations/sparse_ops.py +3 -1
  356. mindspore/ops/primitive.py +24 -18
  357. mindspore/parallel/_auto_parallel_context.py +68 -8
  358. mindspore/parallel/_cost_model_context.py +2 -2
  359. mindspore/parallel/_offload_context.py +17 -3
  360. mindspore/parallel/_parallel_serialization.py +12 -5
  361. mindspore/parallel/_ps_context.py +12 -0
  362. mindspore/parallel/_tensor.py +18 -13
  363. mindspore/parallel/_transformer/layers.py +5 -3
  364. mindspore/parallel/_transformer/loss.py +1 -0
  365. mindspore/parallel/_transformer/moe.py +2 -2
  366. mindspore/parallel/_transformer/op_parallel_config.py +12 -1
  367. mindspore/parallel/_transformer/transformer.py +23 -3
  368. mindspore/parallel/_utils.py +11 -7
  369. mindspore/parallel/algo_parameter_config.py +85 -5
  370. mindspore/parallel/checkpoint_transform.py +19 -12
  371. mindspore/parallel/shard.py +21 -14
  372. mindspore/profiler/common/struct_type.py +3 -3
  373. mindspore/profiler/common/util.py +4 -2
  374. mindspore/profiler/envprofiling.py +1 -1
  375. mindspore/profiler/parser/aicpu_data_parser.py +5 -3
  376. mindspore/profiler/parser/ascend_flops_generator.py +2 -2
  377. mindspore/profiler/parser/ascend_fpbp_generator.py +1 -1
  378. mindspore/profiler/parser/ascend_hccl_generator.py +249 -12
  379. mindspore/profiler/parser/ascend_msprof_exporter.py +150 -255
  380. mindspore/profiler/parser/ascend_msprof_generator.py +204 -17
  381. mindspore/profiler/parser/ascend_op_generator.py +6 -6
  382. mindspore/profiler/parser/ascend_steptrace_generator.py +6 -4
  383. mindspore/profiler/parser/ascend_timeline_generator.py +14 -187
  384. mindspore/profiler/parser/base_timeline_generator.py +10 -8
  385. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +16 -12
  386. mindspore/profiler/parser/flops_parser.py +15 -11
  387. mindspore/profiler/parser/framework_parser.py +38 -22
  388. mindspore/profiler/parser/hccl_parser.py +16 -12
  389. mindspore/profiler/parser/integrator.py +22 -11
  390. mindspore/profiler/parser/memory_usage_parser.py +2 -2
  391. mindspore/profiler/parser/minddata_analyzer.py +12 -14
  392. mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
  393. mindspore/profiler/parser/msadvisor_parser.py +8 -4
  394. mindspore/profiler/parser/op_intermediate_parser.py +5 -2
  395. mindspore/profiler/parser/optime_parser.py +1 -1
  396. mindspore/profiler/parser/profiler_info.py +21 -2
  397. mindspore/profiler/parser/step_trace_parser.py +11 -14
  398. mindspore/profiler/profiling.py +179 -89
  399. mindspore/rewrite/api/node.py +102 -19
  400. mindspore/rewrite/api/node_type.py +5 -1
  401. mindspore/rewrite/api/pattern_engine.py +1 -1
  402. mindspore/rewrite/api/scoped_value.py +9 -17
  403. mindspore/rewrite/api/symbol_tree.py +131 -47
  404. mindspore/rewrite/ast_helpers/__init__.py +2 -1
  405. mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
  406. mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
  407. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +93 -46
  408. mindspore/rewrite/common/rewrite_elog.py +5 -1
  409. mindspore/rewrite/namer.py +33 -24
  410. mindspore/rewrite/namespace.py +14 -5
  411. mindspore/{_extends/graph_kernel/expanders/complex → rewrite/node}/__init__.py +9 -9
  412. mindspore/rewrite/node/call_function.py +79 -0
  413. mindspore/rewrite/node/cell_container.py +135 -0
  414. mindspore/rewrite/node/control_flow.py +88 -0
  415. mindspore/rewrite/{node.py → node/node.py} +273 -234
  416. mindspore/rewrite/node/node_manager.py +254 -0
  417. mindspore/rewrite/{topological_manager.py → node/node_topological_manager.py} +13 -46
  418. mindspore/rewrite/parsers/arguments_parser.py +22 -21
  419. mindspore/rewrite/parsers/assign_parser.py +216 -221
  420. mindspore/rewrite/parsers/attribute_parser.py +9 -7
  421. mindspore/rewrite/parsers/class_def_parser.py +174 -113
  422. mindspore/rewrite/parsers/constant_parser.py +9 -6
  423. mindspore/rewrite/parsers/container_parser.py +9 -7
  424. mindspore/rewrite/parsers/for_parser.py +36 -15
  425. mindspore/rewrite/parsers/function_def_parser.py +24 -16
  426. mindspore/rewrite/parsers/if_parser.py +28 -24
  427. mindspore/rewrite/parsers/module_parser.py +196 -25
  428. mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
  429. mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
  430. mindspore/rewrite/parsers/return_parser.py +6 -6
  431. mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
  432. mindspore/rewrite/sparsify/utils.py +1 -1
  433. mindspore/rewrite/symbol_tree.py +523 -578
  434. mindspore/rewrite/symbol_tree_builder.py +9 -193
  435. mindspore/rewrite/symbol_tree_dumper.py +2 -2
  436. mindspore/run_check/_check_version.py +6 -4
  437. mindspore/{ops/bprop_mindir → safeguard}/__init__.py +4 -3
  438. mindspore/safeguard/rewrite_obfuscation.py +541 -0
  439. mindspore/scipy/linalg.py +1 -1
  440. mindspore/scipy/optimize/minimize.py +7 -3
  441. mindspore/train/_utils.py +7 -3
  442. mindspore/train/amp.py +323 -123
  443. mindspore/train/anf_ir_pb2.py +14 -2
  444. mindspore/train/callback/_backup_and_restore.py +2 -12
  445. mindspore/train/callback/_callback.py +29 -4
  446. mindspore/train/callback/_checkpoint.py +23 -8
  447. mindspore/train/callback/_early_stop.py +2 -2
  448. mindspore/train/callback/_landscape.py +4 -4
  449. mindspore/train/callback/_loss_monitor.py +2 -2
  450. mindspore/train/callback/_on_request_exit.py +2 -2
  451. mindspore/train/callback/_reduce_lr_on_plateau.py +3 -4
  452. mindspore/train/callback/_summary_collector.py +15 -8
  453. mindspore/train/callback/_time_monitor.py +58 -5
  454. mindspore/train/data_sink.py +5 -11
  455. mindspore/train/dataset_helper.py +84 -57
  456. mindspore/train/loss_scale_manager.py +2 -2
  457. mindspore/train/metrics/__init__.py +3 -3
  458. mindspore/train/metrics/cosine_similarity.py +1 -1
  459. mindspore/train/metrics/hausdorff_distance.py +3 -2
  460. mindspore/train/metrics/mean_surface_distance.py +3 -2
  461. mindspore/train/metrics/metric.py +39 -19
  462. mindspore/train/metrics/roc.py +2 -2
  463. mindspore/train/metrics/root_mean_square_surface_distance.py +4 -3
  464. mindspore/train/mind_ir_pb2.py +85 -36
  465. mindspore/train/model.py +187 -47
  466. mindspore/train/serialization.py +487 -161
  467. mindspore/train/summary/_summary_adapter.py +1 -1
  468. mindspore/train/summary/_writer_pool.py +3 -2
  469. mindspore/train/summary/summary_record.py +37 -17
  470. mindspore/train/train_thor/convert_utils.py +3 -3
  471. mindspore/train/train_thor/dataset_helper.py +1 -1
  472. mindspore/version.py +1 -1
  473. {mindspore-2.1.0.dist-info → mindspore-2.2.10.dist-info}/METADATA +6 -7
  474. {mindspore-2.1.0.dist-info → mindspore-2.2.10.dist-info}/RECORD +477 -517
  475. {mindspore-2.1.0.dist-info → mindspore-2.2.10.dist-info}/entry_points.txt +0 -1
  476. mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
  477. mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
  478. mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
  479. mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
  480. mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
  481. mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
  482. mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
  483. mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
  484. mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
  485. mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
  486. mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
  487. mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
  488. mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
  489. mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
  490. mindspore/_akg/akg/tvm/rpc/base.py +0 -182
  491. mindspore/_akg/akg/tvm/rpc/client.py +0 -436
  492. mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
  493. mindspore/_akg/akg/tvm/rpc/server.py +0 -413
  494. mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
  495. mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
  496. mindspore/_extends/graph_kernel/expander.py +0 -80
  497. mindspore/_extends/graph_kernel/expanders/__init__.py +0 -54
  498. mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
  499. mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
  500. mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
  501. mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
  502. mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
  503. mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
  504. mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
  505. mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
  506. mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
  507. mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
  508. mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
  509. mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
  510. mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
  511. mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
  512. mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
  513. mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
  514. mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
  515. mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
  516. mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
  517. mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
  518. mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
  519. mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
  520. mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
  521. mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
  522. mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
  523. mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
  524. mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
  525. mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
  526. mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
  527. mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
  528. mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
  529. mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
  530. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
  531. mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
  532. mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
  533. mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
  534. mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
  535. mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
  536. mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
  537. mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
  538. mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
  539. mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
  540. mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
  541. mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
  542. mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
  543. mindspore/dataset/datapreprocess/__init__.py +0 -20
  544. mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
  545. mindspore/include/api/net.h +0 -142
  546. mindspore/nn/lr_scheduler.py +0 -262
  547. mindspore/ops/_grad_experimental/grad_image_ops.py +0 -248
  548. mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -181
  549. mindspore/ops/_grad_experimental/grad_other_ops.py +0 -72
  550. mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
  551. mindspore/ops/_grad_experimental/grad_sequence_ops.py +0 -351
  552. mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -0
  553. mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -0
  554. mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -0
  555. mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
  556. mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
  557. mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -0
  558. mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -0
  559. mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
  560. mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
  561. mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -0
  562. mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -0
  563. mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -0
  564. mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -0
  565. mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -0
  566. mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
  567. mindspore/rewrite/node_visitor.py +0 -44
  568. {mindspore-2.1.0.dist-info → mindspore-2.2.10.dist-info}/WHEEL +0 -0
  569. {mindspore-2.1.0.dist-info → mindspore-2.2.10.dist-info}/top_level.txt +0 -0
mindspore/nn/loss/loss.py CHANGED
@@ -43,14 +43,51 @@ class LossBase(Cell):
43
43
  to apply reduction to loss values.
44
44
 
45
45
  Args:
46
- reduction (str): Type of reduction to be applied to loss. The optional values are ``"mean"`` , ``"sum"`` , and
47
- ``"none"`` . Default: ``"mean"`` .
46
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
47
+ ``'sum'`` . Default: ``'mean'`` .
48
+
49
+ - ``'none'``: no reduction will be applied.
50
+ - ``'mean'``: compute and return the (weighted) mean of elements in the output.
51
+ - ``'sum'``: the output elements will be summed.
48
52
 
49
53
  Raises:
50
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
54
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
51
55
 
52
56
  Supported Platforms:
53
57
  ``Ascend`` ``GPU`` ``CPU``
58
+
59
+ Examples:
60
+ >>> import mindspore
61
+ >>> from mindspore import ops, Tensor, nn
62
+ >>> import numpy as np
63
+ >>>
64
+ >>> class Net(nn.LossBase):
65
+ ... def __init__(self, reduction='mean'):
66
+ ... super(Net, self).__init__(reduction)
67
+ ... self.abs = ops.Abs()
68
+ ...
69
+ ... def construct(self, logits, labels):
70
+ ... x = self.abs(logits - labels)
71
+ ... output = self.get_loss(x)
72
+ ... axis = self.get_axis(x)
73
+ ... return output, axis
74
+ >>> net = Net()
75
+ >>> # Case 1: logits.shape = labels.shape = (3,)
76
+ >>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
77
+ >>> labels = Tensor(np.array([1, 2, 2]), mindspore.float32)
78
+ >>> output, axis = net(logits, labels)
79
+ >>> print(output)
80
+ 0.33333334
81
+ >>> print(axis)
82
+ (0,)
83
+ >>> # Case 2: logits.shape = labels.shape = (3, 3)
84
+ >>> logits = Tensor(np.array([[1, 2, 3],[1, 2, 3],[1, 2, 3]]), mindspore.float32)
85
+ >>> labels = Tensor(np.array([[1, 2, 2],[1, 2, 3],[1, 2, 3]]), mindspore.float32)
86
+ >>> output, axis = net(logits, labels)
87
+ >>> print(output)
88
+ 0.11111111
89
+ >>> print(axis)
90
+ (0, 1)
54
91
  """
55
92
 
56
93
  def __init__(self, reduction='mean'):
@@ -79,34 +116,6 @@ class LossBase(Cell):
79
116
 
80
117
  Args:
81
118
  x (Tensor): Tensor of any shape.
82
-
83
- Examples:
84
- >>> import mindspore
85
- >>> from mindspore import ops, Tensor, nn
86
- >>> import numpy as np
87
- >>>
88
- >>> class Net(nn.LossBase):
89
- ... def __init__(self, reduction='mean'):
90
- ... super(Net, self).__init__(reduction)
91
- ... self.abs = ops.Abs()
92
- ...
93
- ... def construct(self, logits, labels):
94
- ... x = self.abs(logits - labels)
95
- ... axis = self.get_axis(x)
96
- ... return axis
97
- >>> net = Net()
98
- >>> # Case 1: logits.shape = labels.shape = (3,)
99
- >>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
100
- >>> labels = Tensor(np.array([1, 2, 3]), mindspore.float32)
101
- >>> output = net(logits, labels)
102
- >>> print(output)
103
- (0,)
104
- >>> # Case 2: logits.shape = labels.shape = (3, 3)
105
- >>> logits = Tensor(np.array([[1, 2, 3],[1, 2, 3],[1, 2, 3]]), mindspore.float32)
106
- >>> labels = Tensor(np.array([[1, 2, 3],[1, 2, 3],[1, 2, 3]]), mindspore.float32)
107
- >>> output = net(logits, labels)
108
- >>> print(output)
109
- (0, 1)
110
119
  """
111
120
  shape = F.shape(x)
112
121
  length = F.tuple_len(shape)
@@ -126,34 +135,6 @@ class LossBase(Cell):
126
135
 
127
136
  Returns:
128
137
  Return the weighted loss.
129
-
130
- Examples:
131
- >>> import mindspore
132
- >>> from mindspore import ops, Tensor, nn
133
- >>> import numpy as np
134
- >>>
135
- >>> class Net(nn.LossBase):
136
- ... def __init__(self, reduction='mean'):
137
- ... super(Net, self).__init__(reduction)
138
- ... self.abs = ops.Abs()
139
- ...
140
- ... def construct(self, logits, labels):
141
- ... x = self.abs(logits - labels)
142
- ... output = self.get_loss(x)
143
- ... return output
144
- >>> net = Net()
145
- >>> # Case 1: logits.shape = labels.shape = (3,)
146
- >>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
147
- >>> labels = Tensor(np.array([1, 2, 2]), mindspore.float32)
148
- >>> output = net(logits, labels)
149
- >>> print(output)
150
- 0.33333334
151
- >>> # Case 2: logits.shape = labels.shape = (3, 3)
152
- >>> logits = Tensor(np.array([[1, 2, 3],[1, 2, 3],[1, 2, 3]]), mindspore.float32)
153
- >>> labels = Tensor(np.array([[1, 2, 2],[1, 2, 3],[1, 2, 3]]), mindspore.float32)
154
- >>> output = net(logits, labels)
155
- >>> print(output)
156
- 0.11111111
157
138
  """
158
139
  input_dtype = x.dtype
159
140
  x = self.cast(x, mstype.float32)
@@ -203,7 +184,7 @@ class L1Loss(LossBase):
203
184
  .. math::
204
185
  \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,
205
186
 
206
- where :math:`N` is the batch size. If `reduction` is not 'none', then:
187
+ where :math:`N` is the batch size. If `reduction` is not ``'none'``, then:
207
188
 
208
189
  .. math::
209
190
  \ell(x, y) =
@@ -213,9 +194,12 @@ class L1Loss(LossBase):
213
194
  \end{cases}
214
195
 
215
196
  Args:
216
- reduction (str): Type of reduction to be applied to loss. The optional values are ``"mean"`` , ``"sum"`` , and
217
- ``"none"`` . Default: ``"mean"`` . If `reduction` is ``"mean"`` or ``"sum"`` , then output a scalar Tensor,
218
- if `reduction` is ``"none"`` , the shape of the output Tensor is the broadcasted shape.
197
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
198
+ ``'sum'`` . Default: ``'mean'`` .
199
+
200
+ - ``'none'``: no reduction will be applied.
201
+ - ``'mean'``: compute and return the mean of elements in the output.
202
+ - ``'sum'``: the output elements will be summed.
219
203
 
220
204
  Inputs:
221
205
  - **logits** (Tensor) - Predicted value, Tensor of any dimension.
@@ -227,7 +211,7 @@ class L1Loss(LossBase):
227
211
  Tensor, data type is float.
228
212
 
229
213
  Raises:
230
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
214
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
231
215
  ValueError: If `logits` and `labels` have different shapes and cannot be broadcasted to each other.
232
216
 
233
217
  Supported Platforms:
@@ -273,7 +257,7 @@ class MSELoss(LossBase):
273
257
  .. math::
274
258
  \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with} \quad l_n = (x_n - y_n)^2.
275
259
 
276
- where :math:`N` is the batch size. If `reduction` is not 'none', then:
260
+ where :math:`N` is the batch size. If `reduction` is not ``'none'``, then:
277
261
 
278
262
  .. math::
279
263
  \ell(x, y) =
@@ -283,8 +267,12 @@ class MSELoss(LossBase):
283
267
  \end{cases}
284
268
 
285
269
  Args:
286
- reduction (str): Type of reduction to be applied to loss. The optional values are ``"mean"`` , ``"sum"`` , and
287
- ``"none"`` . Default: ``'mean'`` .
270
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
271
+ ``'sum'`` . Default: ``'mean'`` .
272
+
273
+ - ``'none'``: no reduction will be applied.
274
+ - ``'mean'``: compute and return the mean of elements in the output.
275
+ - ``'sum'``: the output elements will be summed.
288
276
 
289
277
  Inputs:
290
278
  - **logits** (Tensor) - The predicted value of the input. Tensor of any dimension.
@@ -293,11 +281,11 @@ class MSELoss(LossBase):
293
281
  and they should be broadcasted to each other.
294
282
 
295
283
  Outputs:
296
- Tensor, loss of type float, the shape is zero if `reduction` is 'mean' or 'sum',
284
+ Tensor, loss of type float, the shape is zero if `reduction` is ``'mean'`` or ``'sum'`` .,
297
285
  while the shape of output is the broadcasted shape if `reduction` is 'none'.
298
286
 
299
287
  Raises:
300
- ValueError: If `reduction` is not one of 'none', 'mean' or 'sum'.
288
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``.
301
289
  ValueError: If `logits` and `labels` have different shapes and cannot be broadcasted.
302
290
 
303
291
  Supported Platforms:
@@ -412,7 +400,7 @@ class MAELoss(LossBase):
412
400
  .. math::
413
401
  \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,
414
402
 
415
- where :math:`N` is the batch size. If `reduction` is not 'none', then:
403
+ where :math:`N` is the batch size. If `reduction` is not ``'none'``, then:
416
404
 
417
405
  .. math::
418
406
  \ell(x, y) =
@@ -422,8 +410,12 @@ class MAELoss(LossBase):
422
410
  \end{cases}
423
411
 
424
412
  Args:
425
- reduction (str): Type of reduction to be applied to loss. The optional values are ``"mean"`` , ``"sum"`` , and
426
- ``"none"`` . Default: ``"mean"`` .
413
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
414
+ ``'sum'`` . Default: ``'mean'`` .
415
+
416
+ - ``'none'``: no reduction will be applied.
417
+ - ``'mean'``: compute and return the mean of elements in the output.
418
+ - ``'sum'``: the output elements will be summed.
427
419
 
428
420
  Inputs:
429
421
  - **logits** (Tensor) - Tensor of shape :math:`(M, *)` where :math:`*` means, any number of
@@ -433,11 +425,11 @@ class MAELoss(LossBase):
433
425
  and they should be broadcasted to each other.
434
426
 
435
427
  Outputs:
436
- Tensor, weighted loss float tensor, the shape is zero if `reduction` is 'mean' or 'sum',
428
+ Tensor, weighted loss float tensor, the shape is zero if `reduction` is ``'mean'`` or ``'sum'`` .,
437
429
  while the shape of output is the broadcasted shape if `reduction` is 'none'.
438
430
 
439
431
  Raises:
440
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
432
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
441
433
 
442
434
  Supported Platforms:
443
435
  ``Ascend`` ``GPU`` ``CPU``
@@ -487,9 +479,12 @@ class MarginRankingLoss(LossBase):
487
479
 
488
480
  Args:
489
481
  margin (float, optional): Specify the adjustment factor of the operation. Default: ``0.0`` .
490
- reduction (str, optional): Specifies which reduction to be applied to the output. It must be one of
491
- ``"none"`` , ``"mean"`` , and ``"sum"`` , meaning no reduction, reduce mean and sum on output,
492
- respectively. Default: ``"mean"`` .
482
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
483
+ ``'sum'`` . Default: ``'mean'`` .
484
+
485
+ - ``'none'``: no reduction will be applied.
486
+ - ``'mean'``: compute and return the mean of elements in the output.
487
+ - ``'sum'``: the output elements will be summed.
493
488
 
494
489
  Inputs:
495
490
  - **input1** (Tensor) - Tensor of shape :math:`(N, *)` where :math:`*` means, any number
@@ -499,7 +494,7 @@ class MarginRankingLoss(LossBase):
499
494
  :math:`(x_1, x_2, x_3, ..., x_R)`, then the shape of `target` must be :math:`(x_1, x_2, x_3, ..., x_R)`.
500
495
 
501
496
  Outputs:
502
- Tensor or Scalar. if `reduction` is "none", its shape is the same as `labels`.
497
+ Tensor or Scalar. if `reduction` is ``"none"``, its shape is the same as `labels`.
503
498
  Otherwise, a scalar value will be returned.
504
499
 
505
500
  Raises:
@@ -509,7 +504,7 @@ class MarginRankingLoss(LossBase):
509
504
  TypeError: If the types of `input1` and `target` are inconsistent.
510
505
  ValueError: If the shape of `input1` and `input2` are inconsistent.
511
506
  ValueError: If the shape of `input1` and `target` are inconsistent.
512
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
507
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` , ``'sum'``.
513
508
 
514
509
  Supported Platforms:
515
510
  ``Ascend`` ``GPU`` ``CPU``
@@ -582,8 +577,12 @@ class SmoothL1Loss(LossBase):
582
577
  Args:
583
578
  beta (float): The loss function calculates the threshold of the transformation between L1Loss and L2Loss.
584
579
  Default: ``1.0`` .
585
- reduction (str): Type of reduction to be applied to loss. The optional values are ``"mean"`` , ``"sum"`` , and
586
- ``"none"`` . Default: ``"none"`` .
580
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
581
+ ``'sum'`` . Default: ``'none'`` .
582
+
583
+ - ``'none'``: no reduction will be applied.
584
+ - ``'mean'``: compute and return the mean of elements in the output.
585
+ - ``'sum'``: the output elements will be summed.
587
586
 
588
587
  Inputs:
589
588
  - **logits** (Tensor) - Predictive value. Tensor of any dimension. Data type must be one of float16,
@@ -591,12 +590,12 @@ class SmoothL1Loss(LossBase):
591
590
  - **labels** (Tensor) - Ground truth data, same shape and dtype as the `logits`.
592
591
 
593
592
  Outputs:
594
- Tensor, if `reduction` is 'none', then output is a tensor with the same shape as `logits`.
593
+ Tensor, if `reduction` is ``'none'``, then output is a tensor with the same shape as `logits`.
595
594
  Otherwise the shape of output tensor is :math:`()`.
596
595
 
597
596
  Raises:
598
597
  TypeError: If `beta` is not a float.
599
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
598
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
600
599
  TypeError: If `logits` or `labels` are not Tensor.
601
600
  TypeError: If dtype of `logits` or `labels` is neither float16 not float32.
602
601
  TypeError: If dtype of `logits` is not the same as `labels`.
@@ -638,27 +637,31 @@ class SoftMarginLoss(LossBase):
638
637
  (containing 1 or -1).
639
638
 
640
639
  .. math::
641
- \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()}
640
+ \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{x.nelement()}
642
641
 
643
642
  :math:`x.nelement()` represents the number of element of `x` .
644
643
 
645
644
  Args:
646
- reduction (str): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` , ``'sum'`` .
647
- Default: ``"mean"`` .
645
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
646
+ ``'sum'`` . Default: ``'mean'`` .
647
+
648
+ - ``'none'``: no reduction will be applied.
649
+ - ``'mean'``: compute and return the mean of elements in the output.
650
+ - ``'sum'``: the output elements will be summed.
648
651
 
649
652
  Inputs:
650
653
  - **logits** (Tensor) - Predict data. Data type must be float16 or float32.
651
654
  - **labels** (Tensor) - Ground truth data, with the same type and shape as `logits`.
652
655
 
653
656
  Outputs:
654
- Tensor or Scalar, if `reduction` is "none", its shape is the same as `logits`.
657
+ Tensor or Scalar, if `reduction` is ``"none"``, its shape is the same as `logits`.
655
658
  Otherwise, a scalar value will be returned.
656
659
 
657
660
  Raises:
658
661
  TypeError: If `logits` or `labels` is not a Tensor.
659
662
  TypeError: If dtype of `logits` or `labels` is neither float16 nor float32.
660
663
  ValueError: If shape of `logits` is not the same as `labels`.
661
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
664
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
662
665
 
663
666
  Supported Platforms:
664
667
  ``Ascend`` ``GPU``
@@ -710,9 +713,13 @@ class SoftmaxCrossEntropyWithLogits(LossBase):
710
713
  of entry is a valid one.
711
714
 
712
715
  Args:
713
- sparse (bool): Specifies whether labels use sparse format or not. Default: ``False`` .
714
- reduction (str): Type of reduction to be applied to loss. The optional values are ``"mean"`` , ``"sum"`` , and
715
- ``"none"`` . If ``"none"`` , do not perform reduction. Default: ``"none"`` .
716
+ sparse (bool, optional): Specifies whether labels use sparse format or not. Default: ``False`` .
717
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
718
+ ``'sum'`` . Default: ``'none'`` .
719
+
720
+ - ``'none'``: no reduction will be applied.
721
+ - ``'mean'``: compute and return the mean of elements in the output.
722
+ - ``'sum'``: the output elements will be summed.
716
723
 
717
724
  Inputs:
718
725
  - **logits** (Tensor) - Tensor of shape :math:`(N, C)` . Data type must be float16 or float32.
@@ -726,7 +733,7 @@ class SoftmaxCrossEntropyWithLogits(LossBase):
726
733
  TypeError: If `sparse` is not a bool.
727
734
  TypeError: If `sparse` is True and dtype of `labels` is neither int32 not int64.
728
735
  TypeError: If `sparse` is False and dtype of `labels` is neither float16 not float32.
729
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
736
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
730
737
 
731
738
  Supported Platforms:
732
739
  ``Ascend`` ``GPU`` ``CPU``
@@ -984,8 +991,12 @@ class SampledSoftmaxLoss(LossBase):
984
991
  remove_accidental_hits (bool): Whether to remove "accidental hits"
985
992
  where a sampled class equals to one of the labels classes. Default: ``True`` .
986
993
  seed (int): Random seed for candidate sampling. Default: 0
987
- reduction (str): Type of reduction to be applied to loss. The optional values are ``"mean"`` , ``"sum"`` , and
988
- ``"none"`` . If ``"none"`` , do not perform reduction. Default: ``"none"`` .
994
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
995
+ ``'sum'`` . Default: ``'none'`` .
996
+
997
+ - ``'none'``: no reduction will be applied.
998
+ - ``'mean'``: compute and return the mean of elements in the output.
999
+ - ``'sum'``: the output elements will be summed.
989
1000
 
990
1001
  Inputs:
991
1002
  - **weights** (Tensor) - Tensor of shape :math:`(C, dim)`.
@@ -994,13 +1005,13 @@ class SampledSoftmaxLoss(LossBase):
994
1005
  - **logits** (Tensor) - Tensor of shape :math:`(N, dim)`. The forward activations of the input network.
995
1006
 
996
1007
  Outputs:
997
- Tensor or Scalar, if `reduction` is 'none', then output is a tensor with shape :math:`(N,)`.
1008
+ Tensor or Scalar, if `reduction` is ``'none'``, then output is a tensor with shape :math:`(N,)`.
998
1009
  Otherwise, the output is a scalar.
999
1010
 
1000
1011
  Raises:
1001
1012
  TypeError: If `sampled_values` is not a list or tuple.
1002
1013
  TypeError: If dtype of `labels` is neither int32 not int64.
1003
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
1014
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
1004
1015
  ValueError: If `num_sampled` or `num_true` is greater than `num_classes`.
1005
1016
  ValueError: If length of `sampled_values` is not equal to 3.
1006
1017
 
@@ -1228,8 +1239,13 @@ class TripletMarginWithDistanceLoss(LossBase):
1228
1239
  swap (bool): The distance swap is described in detail in the paper
1229
1240
  `Learning shallow convolutional feature descriptors with triplet losses` by
1230
1241
  V. Balntas, E. Riba et al. Default: ``False`` .
1231
- reduction (str): Apply specific reduction method to the output:
1232
- ``'none'`` , ``'mean'`` , ``'sum'`` . Default: ``'mean'`` .
1242
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
1243
+ ``'sum'`` . Default: ``'mean'`` .
1244
+
1245
+ - ``'none'``: no reduction will be applied.
1246
+ - ``'mean'``: compute and return the mean of elements in the output.
1247
+ - ``'sum'``: the output elements will be summed.
1248
+
1233
1249
  margin (float): Make a margin between the positive pair and the negative pair. Default: ``1.0`` .
1234
1250
 
1235
1251
  Inputs:
@@ -1241,7 +1257,7 @@ class TripletMarginWithDistanceLoss(LossBase):
1241
1257
  with the same type and shape as `x`.
1242
1258
 
1243
1259
  Outputs:
1244
- Union[Tensor, Scalar], if `reduction` is 'none', its shape is :math:`(N)`.
1260
+ Union[Tensor, Scalar], if `reduction` is ``'none'``, its shape is :math:`(N)`.
1245
1261
  Otherwise, a scalar value will be returned.
1246
1262
 
1247
1263
  Raises:
@@ -1250,7 +1266,7 @@ class TripletMarginWithDistanceLoss(LossBase):
1250
1266
  ValueError: If dimensions of input `x`, `positive` and `negative` are less than or equal to 1 at the same time.
1251
1267
  ValueError: If length of shape of `margin` is not 0.
1252
1268
  ValueError: If shape of `x`, `positive` and `negative` cannot broadcast.
1253
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
1269
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
1254
1270
 
1255
1271
  Supported Platforms:
1256
1272
  ``Ascend`` ``GPU`` ``CPU``
@@ -1333,15 +1349,19 @@ class PoissonNLLLoss(LossBase):
1333
1349
  full (bool, optional): Whether include the Stirling approximation term in the loss calculation.
1334
1350
  Default: ``False`` .
1335
1351
  eps (float, optional): Lower bound of `input` when calculating logarithms. Default: ``1e-08`` .
1336
- reduction (str, optional): Apply specific reduction method to the output:
1337
- ``'none'`` , ``'mean'`` , ``'sum'`` . Default: ``'mean'`` .
1352
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
1353
+ ``'sum'`` . Default: ``'mean'`` .
1354
+
1355
+ - ``'none'``: no reduction will be applied.
1356
+ - ``'mean'``: compute and return the mean of elements in the output.
1357
+ - ``'sum'``: the output elements will be summed.
1338
1358
 
1339
1359
  Inputs:
1340
1360
  - **input** (Tensor) - The input Tensor. The shape can be any number of dimensions.
1341
1361
  - **target** (Tensor) - The label Tensor which has the same shape as `input`.
1342
1362
 
1343
1363
  Outputs:
1344
- Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input`.
1364
+ Tensor or Scalar, if `reduction` is ``'none'``, then output is a tensor and has the same shape as `input`.
1345
1365
  Otherwise it is a scalar.
1346
1366
 
1347
1367
  Raises:
@@ -1411,25 +1431,28 @@ class MultiLabelSoftMarginLoss(LossBase):
1411
1431
  \sum_{j = 1}^{C}\left(y_{ij}\log\frac{1}{1 + e^{- x_{ij}}} + \left( 1 - y_{ij}
1412
1432
  \right)\log\frac{e^{-x_{ij}}}{1 + e^{-x_{ij}}} \right)
1413
1433
 
1414
- where :math:`x{ij}` represents the predicted score of sample :math:`i` for class :math:`j`. :math:`y{ij}`
1434
+ where :math:`x_{ij}` represents the predicted score of sample :math:`i` for class :math:`j`. :math:`y_{ij}`
1415
1435
  represents the binary label of sample :math:`i` for class :math:`j`, where sample :math:`i` belongs to
1416
- class :math:`j` if :math:`y{ij}=1` , and sample :math:`i` does not belong to class :math:`j` if :math:`y{ij}=0`.
1436
+ class :math:`j` if :math:`y_{ij}=1` , and sample :math:`i` does not belong to class :math:`j` if :math:`y_{ij}=0`.
1417
1437
  For a multi-label classification task, each sample may have multiple labels with a value of 1 in the binary
1418
1438
  label :math:`y`. `weight` will multiply to the loss of each class if given.
1419
1439
 
1420
1440
  Args:
1421
1441
  weight (Union[Tensor, int, float]): The manual rescaling weight given to each class. Default: ``None`` .
1422
- reduction (str): Specifies which reduction to be applied to the output. It must be one of
1423
- ``'none'`` , ``'mean'`` , and ``'sum'`` , meaning no reduction, reduce mean and sum on output,
1424
- respectively. Default: ``'mean'`` .
1442
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
1443
+ ``'sum'`` . Default: ``'mean'`` .
1444
+
1445
+ - ``'none'``: no reduction will be applied.
1446
+ - ``'mean'``: compute and return the weighted mean of elements in the output.
1447
+ - ``'sum'``: the output elements will be summed.
1425
1448
 
1426
1449
  Inputs:
1427
- - **x** (Tensor) - A tensor of shape (N, C), where N is batch size and C is number
1450
+ - **x** (Tensor) - A tensor of shape :math:`(N, C)`, where N is batch size and C is number
1428
1451
  of classes.
1429
1452
  - **target** (Tensor) - The label target Tensor which has the same shape as `x`.
1430
1453
 
1431
1454
  Outputs:
1432
- Tensor, the data type is the same as x, if the reduction is 'none', its shape is (N), otherwise it is zero.
1455
+ Tensor, the data type is the same as x, if the reduction is ``'none'``, its shape is (N), otherwise it is zero.
1433
1456
 
1434
1457
  Raises:
1435
1458
  ValueError: If the rank of `x` or `target` is not 2.
@@ -1481,8 +1504,8 @@ class MultiMarginLoss(LossBase):
1481
1504
  ``'sum'`` . Default: ``'mean'`` .
1482
1505
 
1483
1506
  - ``'none'``: no reduction will be applied.
1484
- - ``'mean'``: the sum of the output will be divided by the number of elements in the output.
1485
- - ``'sum'``: the output will be summed.
1507
+ - ``'mean'``: compute and return the weighted mean of elements in the output.
1508
+ - ``'sum'``: the output elements will be summed.
1486
1509
 
1487
1510
  weight (Tensor, optional): The rescaling weight to each class with shape :math:`(C,)`. Data type only
1488
1511
  support float32, float16 or float64. Default: ``None`` , all classes are weighted equally.
@@ -1494,7 +1517,7 @@ class MultiMarginLoss(LossBase):
1494
1517
  value of target should be non-negative, less than C. `target` is :math:`y` in the above formula.
1495
1518
 
1496
1519
  Outputs:
1497
- Tensor, When `reduction` is 'none', the shape is :math:`(N,)`.
1520
+ Tensor, When `reduction` is ``'none'``, the shape is :math:`(N,)`.
1498
1521
  Otherwise, it is a scalar. Has the same data type with `x`.
1499
1522
 
1500
1523
  Raises:
@@ -1504,7 +1527,7 @@ class MultiMarginLoss(LossBase):
1504
1527
  TypeError: If dtype of `x` is not float16, float or float64.
1505
1528
  TypeError: If dtype of `weight` and `x` is not the same.
1506
1529
  ValueError: If 'p' is not 1 or 2.
1507
- ValueError: If 'reduction' is not one of {'none','sum','mean'}.
1530
+ ValueError: If 'reduction' is not one of { ``'none'`` , ``'sum'`` , ``'mean'`` }.
1508
1531
  ValueError: If shape[0] of `x` is not equal to shape[0] of `target`.
1509
1532
  ValueError: If shape[1] of `x` is not equal to shape[0] of `weight`.
1510
1533
  ValueError: IF rank of `weight` is not 1.
@@ -1529,7 +1552,6 @@ class MultiMarginLoss(LossBase):
1529
1552
  """Initialize MultiMarginLoss."""
1530
1553
  super(MultiMarginLoss, self).__init__()
1531
1554
  self.multi_margin_loss = MultiMarginLossOp(p=p, margin=margin, reduction=reduction)
1532
- self.generate_ones = ops.Fill()
1533
1555
  self.weight = weight
1534
1556
 
1535
1557
  def construct(self, x, target, weight=None):
@@ -1541,7 +1563,7 @@ class MultiMarginLoss(LossBase):
1541
1563
  if not weight_one:
1542
1564
  _check_is_tensor('weight', weight, self.cls_name)
1543
1565
  else:
1544
- weight = self.generate_ones(x.dtype, x.astype('float32')[0].shape, 1)
1566
+ weight = F.fill(x.dtype, x.astype('float32')[0].shape, 1)
1545
1567
  loss = self.multi_margin_loss(x, target, weight)
1546
1568
  return loss
1547
1569
 
@@ -1575,8 +1597,12 @@ class BCELoss(LossBase):
1575
1597
  Args:
1576
1598
  weight (Tensor, optional): A rescaling weight applied to the loss of each batch element.
1577
1599
  And it must have the same shape and data type as `inputs`. Default: ``None`` .
1578
- reduction (str): Specifies the reduction to be applied to the output.
1579
- Its value must be one of ``'none'`` , ``'mean'`` , ``'sum'`` . Default: ``'mean'`` .
1600
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
1601
+ ``'sum'`` . Default: ``'mean'`` .
1602
+
1603
+ - ``'none'``: no reduction will be applied.
1604
+ - ``'mean'``: compute and return the weighted mean of elements in the output.
1605
+ - ``'sum'``: the output elements will be summed.
1580
1606
 
1581
1607
  Inputs:
1582
1608
  - **logits** (Tensor) - The input tensor with shape :math:`(N, *)` where :math:`*` means, any number
@@ -1585,12 +1611,12 @@ class BCELoss(LossBase):
1585
1611
  of additional dimensions. The same shape and data type as `logits`.
1586
1612
 
1587
1613
  Outputs:
1588
- Tensor, has the same dtype as `logits`. if `reduction` is 'none', then it has the same shape as `logits`.
1614
+ Tensor, has the same dtype as `logits`. if `reduction` is ``'none'``, then it has the same shape as `logits`.
1589
1615
  Otherwise, it is a scalar Tensor.
1590
1616
 
1591
1617
  Raises:
1592
1618
  TypeError: If dtype of `logits`, `labels` or `weight` (if given) is neither float16 not float32.
1593
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
1619
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
1594
1620
  ValueError: If shape of `logits` is not the same as `labels` or `weight` (if given).
1595
1621
 
1596
1622
  Supported Platforms:
@@ -1650,9 +1676,12 @@ class CosineEmbeddingLoss(LossBase):
1650
1676
 
1651
1677
  Args:
1652
1678
  margin (float): Should be in [-1.0, 1.0]. Default: ``0.0`` .
1653
- reduction (str): Specifies which reduction to be applied to the output. It must be one of
1654
- ``"none"`` , ``"mean"`` , and ``"sum"`` , meaning no reduction,
1655
- reduce mean and sum on output, respectively. Default: ``"mean"`` .
1679
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
1680
+ ``'sum'`` . Default: ``'mean'`` .
1681
+
1682
+ - ``'none'``: no reduction will be applied.
1683
+ - ``'mean'``: compute and return the mean of elements in the output.
1684
+ - ``'sum'``: the output elements will be summed.
1656
1685
 
1657
1686
  Inputs:
1658
1687
  - **logits_x1** (Tensor) - Tensor of shape :math:`(N, *)` where :math:`*` means, any number
@@ -1662,12 +1691,12 @@ class CosineEmbeddingLoss(LossBase):
1662
1691
  :math:`(x_1, x_2, x_3, ..., x_R)`, then the shape of `labels` must be :math:`(x_1, x_3, x_4, ..., x_R)`.
1663
1692
 
1664
1693
  Outputs:
1665
- Tensor or Scalar, if `reduction` is "none", its shape is the same as `labels`.
1694
+ Tensor or Scalar, if `reduction` is ``"none"``, its shape is the same as `labels`.
1666
1695
  Otherwise, a scalar value will be returned.
1667
1696
 
1668
1697
  Raises:
1669
1698
  TypeError: If `margin` is not a float.
1670
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
1699
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
1671
1700
  ValueError: If `margin` is not in range [-1, 1].
1672
1701
 
1673
1702
  Supported Platforms:
@@ -1744,7 +1773,11 @@ class MultilabelMarginLoss(LossBase):
1744
1773
 
1745
1774
  Args:
1746
1775
  reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
1747
- ``'sum'`` . Default: ``"mean"`` .
1776
+ ``'sum'`` . Default: ``'mean'`` .
1777
+
1778
+ - ``'none'``: no reduction will be applied.
1779
+ - ``'mean'``: compute and return the mean of elements in the output.
1780
+ - ``'sum'``: the output elements will be summed.
1748
1781
 
1749
1782
  Inputs:
1750
1783
  - **x** (Tensor) - Predict data. Tensor of shape :math:`(C)` or :math:`(N, C)`, where :math:`N`
@@ -1753,7 +1786,7 @@ class MultilabelMarginLoss(LossBase):
1753
1786
  label targets padded by -1.
1754
1787
 
1755
1788
  Outputs:
1756
- - **y** (Union[Tensor, Scalar]) - The loss of MultilabelMarginLoss. If `reduction` is "none", its shape
1789
+ - **y** (Union[Tensor, Scalar]) - The loss of MultilabelMarginLoss. If `reduction` is ``"none"``, its shape
1757
1790
  is :math:`(N)`. Otherwise, a scalar value will be returned.
1758
1791
 
1759
1792
  Raises:
@@ -1762,7 +1795,7 @@ class MultilabelMarginLoss(LossBase):
1762
1795
  TypeError: If dtype of `target` is not int32.
1763
1796
  ValueError: If length of shape of `x` is neither 1 nor 2.
1764
1797
  ValueError: If shape of `x` is not the same as `target`.
1765
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
1798
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
1766
1799
 
1767
1800
  Supported Platforms:
1768
1801
  ``Ascend`` ``GPU``
@@ -1811,8 +1844,13 @@ class BCEWithLogitsLoss(LossBase):
1811
1844
  \end{cases}
1812
1845
 
1813
1846
  Args:
1814
- reduction (str): Type of reduction to be applied to loss. The optional values are ``'mean'`` , ``'sum'`` , and
1815
- ``'none'`` . If ``'none'`` , do not perform reduction. Default: ``'mean'`` .
1847
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
1848
+ ``'sum'`` . Default: ``'mean'`` .
1849
+
1850
+ - ``'none'``: no reduction will be applied.
1851
+ - ``'mean'``: compute and return the weighted mean of elements in the output.
1852
+ - ``'sum'``: the output elements will be summed.
1853
+
1816
1854
  weight (Tensor, optional): A rescaling weight applied to the loss of each batch element.
1817
1855
  If not None, it can be broadcast to a tensor with shape of `logits`,
1818
1856
  data type must be float16 or float32. Default: ``None`` .
@@ -1827,7 +1865,7 @@ class BCEWithLogitsLoss(LossBase):
1827
1865
  of additional dimensions. The same shape and data type as `logits`.
1828
1866
 
1829
1867
  Outputs:
1830
- Tensor or Scalar, if `reduction` is 'none', its shape is the same as `logits`.
1868
+ Tensor or Scalar, if `reduction` is ``'none'``, its shape is the same as `logits`.
1831
1869
  Otherwise, a scalar value will be returned.
1832
1870
 
1833
1871
  Raises:
@@ -1837,7 +1875,7 @@ class BCEWithLogitsLoss(LossBase):
1837
1875
  TypeError: If data type of `weight` or `pos_weight` is neither float16 nor float32.
1838
1876
  TypeError: If data type of `reduction` is not string.
1839
1877
  ValueError: If `weight` or `pos_weight` can not be broadcast to a tensor with shape of `logits`.
1840
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
1878
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
1841
1879
 
1842
1880
  Supported Platforms:
1843
1881
  ``Ascend`` ``GPU`` ``CPU``
@@ -1858,28 +1896,17 @@ class BCEWithLogitsLoss(LossBase):
1858
1896
  """Initialize BCEWithLogitsLoss."""
1859
1897
  super(BCEWithLogitsLoss, self).__init__()
1860
1898
  self.reduction = reduction
1861
- self.bce_with_logits_loss = P.BCEWithLogitsLoss(reduction=reduction)
1862
1899
  if isinstance(weight, Parameter):
1863
1900
  raise TypeError(f"For '{self.cls_name}', the 'weight' can not be a Parameter.")
1864
1901
  if isinstance(pos_weight, Parameter):
1865
1902
  raise TypeError(f"For '{self.cls_name}', the 'pos_weight' can not be a Parameter.")
1866
1903
  self.weight = weight
1867
1904
  self.pos_weight = pos_weight
1868
- self.ones = P.OnesLike()
1869
1905
 
1870
1906
  def construct(self, logits, labels):
1871
1907
  _check_is_tensor('logits', logits, self.cls_name)
1872
1908
  _check_is_tensor('labels', labels, self.cls_name)
1873
- ones_input = self.ones(logits)
1874
- if self.weight is not None:
1875
- weight = self.weight
1876
- else:
1877
- weight = ones_input
1878
- if self.pos_weight is not None:
1879
- pos_weight = self.pos_weight
1880
- else:
1881
- pos_weight = ones_input
1882
- loss = self.bce_with_logits_loss(logits, labels, weight, pos_weight)
1909
+ loss = ops.binary_cross_entropy_with_logits(logits, labels, self.weight, self.pos_weight, self.reduction)
1883
1910
  return loss
1884
1911
 
1885
1912
 
@@ -1932,9 +1959,12 @@ class FocalLoss(LossBase):
1932
1959
  gamma (float): Gamma is used to adjust the steepness of weight curve in focal loss. Default: ``2.0`` .
1933
1960
  weight (Union[Tensor, None]): A rescaling weight applied to the loss of each batch element. The dimension of
1934
1961
  weight should be 1. If None, no weight is applied. Default: ``None`` .
1935
- reduction (str): Type of reduction to be applied to loss. The optional values
1936
- are ``"mean"`` , ``"sum"`` , and ``"none"``.
1937
- If "none", do not perform reduction. Default: ``"mean"`` .
1962
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
1963
+ ``'sum'`` . Default: ``'mean'`` .
1964
+
1965
+ - ``'none'``: no reduction will be applied.
1966
+ - ``'mean'``: compute and return the weighted mean of elements in the output.
1967
+ - ``'sum'``: the output elements will be summed.
1938
1968
 
1939
1969
  Inputs:
1940
1970
  - **logits** (Tensor) - Tensor of shape should be :math:`(N, C)` or :math:`(N, C, H)` or :math:`(N, C, H, W)`.
@@ -1948,7 +1978,7 @@ class FocalLoss(LossBase):
1948
1978
  range [-:math:`C`, :math:`C`). Where :math:`C` is the number of classes in logits.
1949
1979
 
1950
1980
  Outputs:
1951
- Tensor or Scalar, if `reduction` is "none", its shape is the same as `logits`.
1981
+ Tensor or Scalar, if `reduction` is ``"none"``, its shape is the same as `logits`.
1952
1982
  Otherwise, a scalar value will be returned.
1953
1983
 
1954
1984
  Raises:
@@ -1956,7 +1986,7 @@ class FocalLoss(LossBase):
1956
1986
  TypeError: If `weight` is not a Tensor.
1957
1987
  ValueError: If `labels` dim is different from `logits`.
1958
1988
  ValueError: If `labels` channel is not 1 and `labels` shape is different from `logits`.
1959
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
1989
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
1960
1990
 
1961
1991
  Supported Platforms:
1962
1992
  ``Ascend``
@@ -2050,7 +2080,7 @@ class HuberLoss(LossBase):
2050
2080
  delta * (|x_n - y_n| - 0.5 * delta), & \text{otherwise. }
2051
2081
  \end{cases}
2052
2082
 
2053
- where :math:`N` is the batch size. If `reduction` is not "none", then:
2083
+ where :math:`N` is the batch size. If `reduction` is not ``"none"``, then:
2054
2084
 
2055
2085
  .. math::
2056
2086
  \ell(x, y) =
@@ -2060,9 +2090,13 @@ class HuberLoss(LossBase):
2060
2090
  \end{cases}
2061
2091
 
2062
2092
  Args:
2063
- reduction (str): Type of reduction to be applied to loss. The optional values are ``"mean"`` , ``"sum"`` , and
2064
- ``"none"`` . If `reduction` is ``"mean"`` or ``"sum"`` , then output a scalar Tensor, if `reduction` is
2065
- ``"none"`` , the shape of the output Tensor is the broadcasted shape. Default: ``"mean"``.
2093
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2094
+ ``'sum'`` . Default: ``'mean'`` .
2095
+
2096
+ - ``'none'``: no reduction will be applied.
2097
+ - ``'mean'``: compute and return the mean of elements in the output.
2098
+ - ``'sum'``: the output elements will be summed.
2099
+
2066
2100
  delta (Union[int, float]): The threshold to change between two type of loss.
2067
2101
  The value must be positive. Default: ``1.0`` .
2068
2102
 
@@ -2073,7 +2107,7 @@ class HuberLoss(LossBase):
2073
2107
  and they should be broadcasted to each other.
2074
2108
 
2075
2109
  Outputs:
2076
- Tensor or Scalar, if `reduction` is "none", return a Tensor with same shape and dtype as `logits`.
2110
+ Tensor or Scalar, if `reduction` is ``"none"``, return a Tensor with same shape and dtype as `logits`.
2077
2111
  Otherwise, a scalar value will be returned.
2078
2112
 
2079
2113
  Raises:
@@ -2081,7 +2115,7 @@ class HuberLoss(LossBase):
2081
2115
  TypeError: If data type of `logits` or `labels` are not the same.
2082
2116
  TypeError: If dtype of `delta` is neither float nor int.
2083
2117
  ValueError: If `delta` is less than or equal to 0.
2084
- ValueError: If `reduction` is not one of "none", "mean", "sum".
2118
+ ValueError: If `reduction` is not one of ``"none"``, ``"mean"``, ``"sum"``.
2085
2119
  ValueError: If `logits` and `labels` have different shapes and cannot be broadcasted to each other.
2086
2120
 
2087
2121
  Supported Platforms:
@@ -2150,7 +2184,12 @@ class TripletMarginLoss(LossBase):
2150
2184
  swap (bool, optional): The distance swap change the negative distance to the distance between positive
2151
2185
  sample and negative sample. Default: ``False`` .
2152
2186
  reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2153
- ``'sum'`` . Default: ``"mean"`` .
2187
+ ``'sum'`` . Default: ``'mean'`` .
2188
+
2189
+ - ``'none'``: no reduction will be applied.
2190
+ - ``'mean'``: compute and return the mean of elements in the output.
2191
+ - ``'sum'``: the output elements will be summed.
2192
+
2154
2193
  margin (Union[Tensor, float]) - Make a margin between the positive pair and the negative pair.
2155
2194
  Default: ``1.0`` .
2156
2195
 
@@ -2165,7 +2204,7 @@ class TripletMarginLoss(LossBase):
2165
2204
  Default: ``1.0`` .
2166
2205
 
2167
2206
  Outputs:
2168
- Tensor. If `reduction` is "none", its shape is :math:`(N)`. Otherwise, a scalar value will be returned.
2207
+ Tensor. If `reduction` is ``"none"``, its shape is :math:`(N)`. Otherwise, a scalar value will be returned.
2169
2208
 
2170
2209
  Raises:
2171
2210
  TypeError: If `x` or `positive` or 'negative' is not a Tensor.
@@ -2177,7 +2216,7 @@ class TripletMarginLoss(LossBase):
2177
2216
  ValueError: If the dimension of input `x` or `positive` or `negative` is bigger than or equal to 8.
2178
2217
  ValueError: If length of shape of `margin` is not 0.
2179
2218
  ValueError: If shape of `x`, `positive` and `negative` cannot broadcast.
2180
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
2219
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
2181
2220
 
2182
2221
  Supported Platforms:
2183
2222
  ``GPU``
@@ -2239,7 +2278,7 @@ class NLLLoss(LossBase):
2239
2278
  :math:`N` is the batch size, :math:`c` belonging to :math:`[0, C-1]` is class index,
2240
2279
  where :math:`C` is the number of classes.
2241
2280
 
2242
- If `reduction` is not 'none' (default 'mean'), then
2281
+ If `reduction` is not ``'none'`` (default 'mean'), then
2243
2282
 
2244
2283
  .. math::
2245
2284
 
@@ -2253,8 +2292,12 @@ class NLLLoss(LossBase):
2253
2292
  The data type only supports float32 or float16. Default: ``None`` .
2254
2293
  ignore_index (int): Specifies a target value that is ignored (typically for padding value)
2255
2294
  and does not contribute to the gradient. Default: ``-100`` .
2256
- reduction (str): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` , or ``'sum'`` .
2257
- Default: ``'mean'`` .
2295
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2296
+ ``'sum'`` . Default: ``'mean'`` .
2297
+
2298
+ - ``'none'``: no reduction will be applied.
2299
+ - ``'mean'``: compute and return the weighted mean of elements in the output.
2300
+ - ``'sum'``: the output elements will be summed.
2258
2301
 
2259
2302
  Inputs:
2260
2303
  - **logits** (Tensor) - Tensor of shape :math:`(N, C)`
@@ -2270,7 +2313,7 @@ class NLLLoss(LossBase):
2270
2313
  TypeError: If `weight` is not a Tensor.
2271
2314
  TypeError: If `ignore_index` is not an int.
2272
2315
  TypeError: If the data type of `weight` is not float16 or float32.
2273
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
2316
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
2274
2317
  TypeError: If `logits` is not a Tensor.
2275
2318
  TypeError: If `labels` is not a Tensor.
2276
2319
 
@@ -2356,7 +2399,7 @@ class CrossEntropyLoss(LossBase):
2356
2399
  where :math:`x` is the inputs, :math:`t` is the target, :math:`w` is the weight,
2357
2400
  N is the batch size, :math:`c` belonging to [0, C-1] is class index, where :math:`C` is the number of classes.
2358
2401
 
2359
- If reduction is not 'none' (default 'mean'), then
2402
+ If reduction is not ``'none'`` (default 'mean'), then
2360
2403
 
2361
2404
  .. math::
2362
2405
 
@@ -2378,7 +2421,7 @@ class CrossEntropyLoss(LossBase):
2378
2421
  where :math:`x` is the inputs, :math:`t` is the target, :math:`w` is the weight,
2379
2422
  N is the batch size, :math:`c` belonging to [0, C-1] is class index, where :math:`C` is the number of classes.
2380
2423
 
2381
- If reduction is not 'none' (default 'mean'), then
2424
+ If reduction is not ``'none'`` (default 'mean'), then
2382
2425
 
2383
2426
  .. math::
2384
2427
 
@@ -2394,8 +2437,13 @@ class CrossEntropyLoss(LossBase):
2394
2437
  The data type only supports float32 or float16. Default: ``None`` .
2395
2438
  ignore_index (int): Specifies a target value that is ignored (typically for padding value)
2396
2439
  and does not contribute to the gradient. Default: ``-100`` .
2397
- reduction (str): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` , or ``'sum'`` .
2398
- Default: ``'mean'`` .
2440
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2441
+ ``'sum'`` . Default: ``'mean'`` .
2442
+
2443
+ - ``'none'``: no reduction will be applied.
2444
+ - ``'mean'``: compute and return the weighted mean of elements in the output.
2445
+ - ``'sum'``: the output elements will be summed.
2446
+
2399
2447
  label_smoothing (float): Label smoothing values, a regularization tool used to prevent the model
2400
2448
  from overfitting when calculating Loss. The value range is [0.0, 1.0]. Default value: ``0.0`` .
2401
2449
 
@@ -2414,7 +2462,7 @@ class CrossEntropyLoss(LossBase):
2414
2462
  TypeError: If `weight` is not a Tensor.
2415
2463
  TypeError: If `ignore_index` is not an int.
2416
2464
  TypeError: If the data type of `weight` is not float16 or float32.
2417
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
2465
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
2418
2466
  TypeError: If `label_smoothing` is not a float.
2419
2467
  TypeError: If `logits` is not a Tensor.
2420
2468
  TypeError: If `labels` is not a Tensor.
@@ -2562,8 +2610,13 @@ class CTCLoss(LossBase):
2562
2610
 
2563
2611
  Args:
2564
2612
  blank (int, optional): The blank label. Default: ``0`` .
2565
- reduction (str, optional): Implements the reduction method to the output with
2566
- ``'none'`` , ``'mean'`` , or ``'sum'`` . Default: ``'mean'`` .
2613
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2614
+ ``'sum'`` . Default: ``'mean'`` .
2615
+
2616
+ - ``'none'``: no reduction will be applied.
2617
+ - ``'mean'``: compute and return the mean of elements in the output.
2618
+ - ``'sum'``: the output elements will be summed.
2619
+
2567
2620
  zero_infinity (bool, optional): If loss is infinite, this parameter determines whether to set that loss
2568
2621
  and its correlated gradient to zero. Default: ``False`` .
2569
2622
 
@@ -2583,7 +2636,7 @@ class CTCLoss(LossBase):
2583
2636
  TypeError: If `zero_infinity` is not a bool, `reduction` is not string.
2584
2637
  TypeError: If the dtype of `log_probs` is not float or double.
2585
2638
  TypeError: If the dtype of `targets`, `input_lengths` or `target_lengths` is not int32 or int64.
2586
- ValueError: If `reduction` is not "none", "mean" or "sum".
2639
+ ValueError: If `reduction` is not ``"none"``, ``"mean"`` or ``"sum"``.
2587
2640
  ValueError: If the value of `blank` is not in range [0, C). C is number of classes of `log_probs` .
2588
2641
  ValueError: If the shape of `log_probs` is :math:`(T, C)`, the dimension of `targets` is not 1 or 2.
2589
2642
  ValueError: If the shape of `log_probs` is :math:`(T, C)`, the first dimension of 2-D `target` is not 1.
@@ -2669,9 +2722,13 @@ class GaussianNLLLoss(LossBase):
2669
2722
  full (bool, optional): Whether include the constant term in the loss calculation. When :math:`full=True`,
2670
2723
  the constant term `const.` will be :math:`0.5 * log(2\pi)`. Default: ``False`` .
2671
2724
  eps (float, optional): Used to improve the stability of log function. Default: ``1e-6`` .
2672
- reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` , or
2725
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2673
2726
  ``'sum'`` . Default: ``'mean'`` .
2674
2727
 
2728
+ - ``'none'``: no reduction will be applied.
2729
+ - ``'mean'``: compute and return the mean of elements in the output.
2730
+ - ``'sum'``: the output elements will be summed.
2731
+
2675
2732
  Inputs:
2676
2733
  - **logits** (Tensor) - Tensor of shape :math:`(N, *)` or :math:`(*)` where :math:`*` means any number of
2677
2734
  additional dimensions.
@@ -2690,7 +2747,7 @@ class GaussianNLLLoss(LossBase):
2690
2747
  TypeError: If `full` is not a bool.
2691
2748
  TypeError: If `eps` is not a float.
2692
2749
  ValueError: If `eps` is not a float within (0, inf).
2693
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
2750
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
2694
2751
 
2695
2752
  Supported Platforms:
2696
2753
  ``Ascend`` ``GPU`` ``CPU``
@@ -2758,9 +2815,13 @@ class HingeEmbeddingLoss(LossBase):
2758
2815
  Args:
2759
2816
  margin (float, int): Threshold defined by Hinge Embedding Loss :math:`margin`.
2760
2817
  Represented as :math:`\Delta` in the formula. Default: ``1.0`` .
2761
- reduction (str): Specify the computing method to be applied to the outputs: ``'none'`` , ``'mean'`` , or
2818
+ reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
2762
2819
  ``'sum'`` . Default: ``'mean'`` .
2763
2820
 
2821
+ - ``'none'``: no reduction will be applied.
2822
+ - ``'mean'``: compute and return the mean of elements in the output.
2823
+ - ``'sum'``: the output elements will be summed.
2824
+
2764
2825
  Inputs:
2765
2826
  - **logits** (Tensor) - The predicted value, expressed as :math:`x` in the equation.
2766
2827
  Tensor of shape :math:`(*)` where :math:`*` means any number of dimensions.
@@ -2775,7 +2836,7 @@ class HingeEmbeddingLoss(LossBase):
2775
2836
  TypeError: If `labels` is not a Tensor.
2776
2837
  TypeError: If `margin` is not a float or int.
2777
2838
  ValueError: If `labels` does not have the same shape as `logits` or they could not broadcast to each other.
2778
- ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
2839
+ ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
2779
2840
 
2780
2841
  Supported Platforms:
2781
2842
  ``Ascend`` ``GPU`` ``CPU``