mindspore 2.7.0__cp311-cp311-win_amd64.whl → 2.7.1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (290) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -1
  3. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  6. mindspore/_extends/parse/compile_config.py +24 -1
  7. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -2
  8. mindspore/_extends/parse/resources.py +1 -1
  9. mindspore/_extends/parse/standard_method.py +8 -1
  10. mindspore/_extends/parse/trope.py +2 -1
  11. mindspore/_extends/pijit/pijit_func_white_list.py +7 -22
  12. mindspore/avcodec-59.dll +0 -0
  13. mindspore/avdevice-59.dll +0 -0
  14. mindspore/avfilter-8.dll +0 -0
  15. mindspore/avformat-59.dll +0 -0
  16. mindspore/avutil-57.dll +0 -0
  17. mindspore/boost/base.py +29 -2
  18. mindspore/common/_decorator.py +3 -2
  19. mindspore/common/_grad_function.py +3 -1
  20. mindspore/common/_tensor_cpp_method.py +1 -1
  21. mindspore/common/_tensor_docs.py +275 -64
  22. mindspore/common/_utils.py +0 -44
  23. mindspore/common/api.py +285 -35
  24. mindspore/common/dump.py +7 -108
  25. mindspore/common/dynamic_shape/auto_dynamic_shape.py +1 -3
  26. mindspore/common/hook_handle.py +60 -0
  27. mindspore/common/jit_config.py +5 -1
  28. mindspore/common/jit_trace.py +27 -12
  29. mindspore/common/lazy_inline.py +5 -3
  30. mindspore/common/parameter.py +13 -107
  31. mindspore/common/recompute.py +4 -11
  32. mindspore/common/tensor.py +16 -169
  33. mindspore/communication/_comm_helper.py +11 -1
  34. mindspore/communication/comm_func.py +138 -4
  35. mindspore/communication/management.py +85 -1
  36. mindspore/config/op_info.config +0 -15
  37. mindspore/context.py +5 -85
  38. mindspore/dataset/engine/datasets.py +8 -4
  39. mindspore/dataset/engine/datasets_vision.py +1 -1
  40. mindspore/dataset/engine/validators.py +1 -15
  41. mindspore/dnnl.dll +0 -0
  42. mindspore/{experimental/llm_boost/ascend_native → graph}/__init__.py +7 -7
  43. mindspore/graph/custom_pass.py +55 -0
  44. mindspore/include/dataset/execute.h +2 -2
  45. mindspore/jpeg62.dll +0 -0
  46. mindspore/mindrecord/__init__.py +3 -3
  47. mindspore/mindrecord/common/exceptions.py +1 -0
  48. mindspore/mindrecord/config.py +1 -1
  49. mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
  50. mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
  51. mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
  52. mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
  53. mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
  54. mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
  55. mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
  56. mindspore/mindrecord/filereader.py +4 -4
  57. mindspore/mindrecord/filewriter.py +5 -5
  58. mindspore/mindrecord/mindpage.py +2 -2
  59. mindspore/mindrecord/tools/cifar10.py +1 -1
  60. mindspore/mindrecord/tools/cifar100.py +1 -1
  61. mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
  62. mindspore/mindrecord/tools/cifar10_to_mr.py +1 -1
  63. mindspore/mindrecord/tools/csv_to_mr.py +1 -1
  64. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  65. mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
  66. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
  67. mindspore/mindspore_backend_common.dll +0 -0
  68. mindspore/mindspore_backend_manager.dll +0 -0
  69. mindspore/mindspore_cluster.dll +0 -0
  70. mindspore/mindspore_common.dll +0 -0
  71. mindspore/mindspore_core.dll +0 -0
  72. mindspore/mindspore_cpu.dll +0 -0
  73. mindspore/mindspore_dump.dll +0 -0
  74. mindspore/mindspore_frontend.dll +0 -0
  75. mindspore/mindspore_glog.dll +0 -0
  76. mindspore/mindspore_hardware_abstract.dll +0 -0
  77. mindspore/mindspore_memory_pool.dll +0 -0
  78. mindspore/mindspore_ms_backend.dll +0 -0
  79. mindspore/mindspore_ops.dll +0 -0
  80. mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
  81. mindspore/mindspore_profiler.dll +0 -0
  82. mindspore/mindspore_pyboost.dll +0 -0
  83. mindspore/mindspore_pynative.dll +0 -0
  84. mindspore/mindspore_runtime_pipeline.dll +0 -0
  85. mindspore/mindspore_runtime_utils.dll +0 -0
  86. mindspore/mindspore_tools.dll +0 -0
  87. mindspore/mint/__init__.py +15 -10
  88. mindspore/mint/distributed/distributed.py +182 -62
  89. mindspore/mint/nn/__init__.py +2 -16
  90. mindspore/mint/nn/functional.py +4 -110
  91. mindspore/mint/nn/layer/__init__.py +0 -2
  92. mindspore/mint/nn/layer/activation.py +0 -6
  93. mindspore/mint/nn/layer/basic.py +0 -47
  94. mindspore/mint/nn/layer/conv.py +4 -4
  95. mindspore/mint/nn/layer/normalization.py +8 -13
  96. mindspore/mint/nn/layer/pooling.py +0 -4
  97. mindspore/nn/__init__.py +1 -3
  98. mindspore/nn/cell.py +16 -66
  99. mindspore/nn/layer/basic.py +49 -1
  100. mindspore/nn/layer/container.py +16 -0
  101. mindspore/nn/layer/embedding.py +4 -169
  102. mindspore/nn/layer/normalization.py +2 -1
  103. mindspore/nn/layer/thor_layer.py +4 -85
  104. mindspore/nn/optim/ada_grad.py +0 -1
  105. mindspore/nn/optim/adafactor.py +0 -1
  106. mindspore/nn/optim/adam.py +31 -124
  107. mindspore/nn/optim/adamax.py +0 -1
  108. mindspore/nn/optim/asgd.py +0 -1
  109. mindspore/nn/optim/ftrl.py +8 -102
  110. mindspore/nn/optim/lamb.py +0 -1
  111. mindspore/nn/optim/lars.py +0 -3
  112. mindspore/nn/optim/lazyadam.py +25 -218
  113. mindspore/nn/optim/momentum.py +5 -43
  114. mindspore/nn/optim/optimizer.py +6 -55
  115. mindspore/nn/optim/proximal_ada_grad.py +0 -1
  116. mindspore/nn/optim/rmsprop.py +0 -1
  117. mindspore/nn/optim/rprop.py +0 -1
  118. mindspore/nn/optim/sgd.py +0 -1
  119. mindspore/nn/optim/tft_wrapper.py +0 -1
  120. mindspore/nn/optim/thor.py +0 -2
  121. mindspore/nn/probability/bijector/bijector.py +7 -8
  122. mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
  123. mindspore/nn/probability/bijector/power_transform.py +20 -21
  124. mindspore/nn/probability/bijector/scalar_affine.py +5 -5
  125. mindspore/nn/probability/bijector/softplus.py +13 -14
  126. mindspore/nn/wrap/grad_reducer.py +4 -74
  127. mindspore/numpy/array_creations.py +2 -2
  128. mindspore/numpy/fft.py +9 -9
  129. mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
  130. mindspore/onnx/onnx_export.py +137 -0
  131. mindspore/opencv_core4110.dll +0 -0
  132. mindspore/opencv_imgcodecs4110.dll +0 -0
  133. mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
  134. mindspore/ops/__init__.py +2 -0
  135. mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
  136. mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
  137. mindspore/ops/_op_impl/cpu/__init__.py +0 -5
  138. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +16 -22
  139. mindspore/ops/auto_generate/gen_extend_func.py +2 -7
  140. mindspore/ops/auto_generate/gen_ops_def.py +98 -141
  141. mindspore/ops/auto_generate/gen_ops_prim.py +12708 -12686
  142. mindspore/ops/communication.py +97 -0
  143. mindspore/ops/composite/__init__.py +5 -2
  144. mindspore/ops/composite/base.py +15 -1
  145. mindspore/ops/composite/multitype_ops/__init__.py +3 -1
  146. mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
  147. mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
  148. mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
  149. mindspore/ops/function/__init__.py +1 -0
  150. mindspore/ops/function/array_func.py +14 -12
  151. mindspore/ops/function/comm_func.py +3883 -0
  152. mindspore/ops/function/debug_func.py +3 -4
  153. mindspore/ops/function/math_func.py +45 -54
  154. mindspore/ops/function/nn_func.py +75 -294
  155. mindspore/ops/function/random_func.py +9 -18
  156. mindspore/ops/functional.py +2 -0
  157. mindspore/ops/functional_overload.py +354 -18
  158. mindspore/ops/operations/__init__.py +2 -5
  159. mindspore/ops/operations/_custom_ops_utils.py +7 -9
  160. mindspore/ops/operations/_inner_ops.py +1 -38
  161. mindspore/ops/operations/_rl_inner_ops.py +0 -933
  162. mindspore/ops/operations/array_ops.py +1 -0
  163. mindspore/ops/operations/comm_ops.py +94 -2
  164. mindspore/ops/operations/custom_ops.py +228 -19
  165. mindspore/ops/operations/debug_ops.py +27 -29
  166. mindspore/ops/operations/manually_defined/ops_def.py +27 -306
  167. mindspore/ops/operations/nn_ops.py +2 -2
  168. mindspore/ops/operations/sparse_ops.py +0 -83
  169. mindspore/ops/primitive.py +1 -17
  170. mindspore/ops/tensor_method.py +72 -3
  171. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
  172. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
  173. mindspore/ops_generate/api/functions_cc_generator.py +53 -4
  174. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
  175. mindspore/ops_generate/common/gen_constants.py +11 -10
  176. mindspore/ops_generate/common/op_proto.py +18 -1
  177. mindspore/ops_generate/common/template.py +102 -245
  178. mindspore/ops_generate/common/template_utils.py +212 -0
  179. mindspore/ops_generate/gen_custom_ops.py +69 -0
  180. mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
  181. mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
  182. mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
  183. mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
  184. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
  185. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
  186. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
  187. mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
  188. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
  189. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
  190. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
  191. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
  192. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
  193. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
  194. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
  195. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
  196. mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
  197. mindspore/ops_generate/resources/yaml_loader.py +13 -0
  198. mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
  199. mindspore/parallel/_cell_wrapper.py +1 -1
  200. mindspore/parallel/_parallel_serialization.py +1 -4
  201. mindspore/parallel/_utils.py +29 -6
  202. mindspore/parallel/checkpoint_transform.py +18 -2
  203. mindspore/parallel/cluster/process_entity/_api.py +24 -32
  204. mindspore/parallel/cluster/process_entity/_utils.py +9 -5
  205. mindspore/{experimental/llm_boost/atb → parallel/distributed}/__init__.py +21 -23
  206. mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
  207. mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
  208. mindspore/parallel/strategy.py +336 -0
  209. mindspore/parallel/transform_safetensors.py +117 -16
  210. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +3 -0
  211. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
  212. mindspore/profiler/common/constant.py +5 -0
  213. mindspore/profiler/common/file_manager.py +9 -0
  214. mindspore/profiler/common/msprof_cmd_tool.py +38 -2
  215. mindspore/profiler/common/path_manager.py +56 -24
  216. mindspore/profiler/common/profiler_context.py +2 -12
  217. mindspore/profiler/common/profiler_info.py +3 -3
  218. mindspore/profiler/common/profiler_path_manager.py +13 -0
  219. mindspore/profiler/common/util.py +30 -3
  220. mindspore/profiler/experimental_config.py +2 -1
  221. mindspore/profiler/platform/npu_profiler.py +33 -6
  222. mindspore/run_check/_check_version.py +108 -24
  223. mindspore/runtime/__init__.py +3 -2
  224. mindspore/runtime/executor.py +11 -3
  225. mindspore/runtime/memory.py +112 -0
  226. mindspore/swresample-4.dll +0 -0
  227. mindspore/swscale-6.dll +0 -0
  228. mindspore/tinyxml2.dll +0 -0
  229. mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
  230. mindspore/tools/data_dump.py +130 -0
  231. mindspore/tools/sdc_detect.py +91 -0
  232. mindspore/tools/stress_detect.py +63 -0
  233. mindspore/train/__init__.py +6 -6
  234. mindspore/train/_utils.py +5 -18
  235. mindspore/train/amp.py +6 -4
  236. mindspore/train/callback/_checkpoint.py +0 -9
  237. mindspore/train/callback/_train_fault_tolerance.py +69 -18
  238. mindspore/train/data_sink.py +1 -5
  239. mindspore/train/model.py +38 -211
  240. mindspore/train/serialization.py +126 -387
  241. mindspore/turbojpeg.dll +0 -0
  242. mindspore/utils/__init__.py +6 -3
  243. mindspore/utils/dlpack.py +92 -0
  244. mindspore/utils/dryrun.py +1 -1
  245. mindspore/utils/runtime_execution_order_check.py +10 -0
  246. mindspore/utils/sdc_detect.py +14 -12
  247. mindspore/utils/stress_detect.py +43 -0
  248. mindspore/utils/utils.py +144 -8
  249. mindspore/version.py +1 -1
  250. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
  251. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/RECORD +254 -267
  252. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -210
  253. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
  254. mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
  255. mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
  256. mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
  257. mindspore/experimental/llm_boost/register.py +0 -130
  258. mindspore/experimental/llm_boost/utils.py +0 -31
  259. mindspore/include/OWNERS +0 -7
  260. mindspore/mindspore_cpu_res_manager.dll +0 -0
  261. mindspore/mindspore_ops_kernel_common.dll +0 -0
  262. mindspore/mindspore_res_manager.dll +0 -0
  263. mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
  264. mindspore/nn/reinforcement/_batch_read_write.py +0 -142
  265. mindspore/nn/reinforcement/_tensors_queue.py +0 -152
  266. mindspore/nn/reinforcement/tensor_array.py +0 -145
  267. mindspore/opencv_core452.dll +0 -0
  268. mindspore/opencv_imgcodecs452.dll +0 -0
  269. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
  270. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
  271. mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
  272. mindspore/ops/_op_impl/cpu/buffer_append.py +0 -28
  273. mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
  274. mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
  275. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
  276. mindspore/ops/operations/_tensor_array.py +0 -359
  277. mindspore/ops/operations/rl_ops.py +0 -288
  278. mindspore/parallel/_offload_context.py +0 -275
  279. mindspore/parallel/_recovery_context.py +0 -115
  280. mindspore/parallel/_transformer/__init__.py +0 -35
  281. mindspore/parallel/_transformer/layers.py +0 -765
  282. mindspore/parallel/_transformer/loss.py +0 -251
  283. mindspore/parallel/_transformer/moe.py +0 -693
  284. mindspore/parallel/_transformer/op_parallel_config.py +0 -222
  285. mindspore/parallel/_transformer/transformer.py +0 -3124
  286. mindspore/parallel/mpi/_mpi_config.py +0 -116
  287. mindspore/train/memory_profiling_pb2.py +0 -298
  288. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
  289. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
  290. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
@@ -1294,308 +1294,6 @@ class TypeAs(Primitive):
1294
1294
  return pyboost_type_as(self, [input, other])
1295
1295
 
1296
1296
 
1297
- def to_sequence(val):
1298
- """
1299
- to_sequence
1300
- """
1301
- if isinstance(val, (tuple, list)):
1302
- return tuple(val)
1303
- return (val,)
1304
-
1305
-
1306
- class EmbeddingTableExport(Primitive):
1307
- """
1308
- EmbeddingTableExport
1309
- """
1310
-
1311
- @prim_attr_register
1312
- def __init__(self, embedding_dim, value_total_len, export_mode="all",
1313
- only_var_flag=False, file_type="bin", table_name=(),
1314
- filter_export_flag=False, steps_to_live_list=()):
1315
- """Initialize EmbeddingTableExport"""
1316
- self.add_prim_attr("_process_node_engine_id", "PS")
1317
-
1318
-
1319
- class EmbeddingTableImport(Primitive):
1320
- """
1321
- EmbeddingTableImport
1322
- """
1323
-
1324
- @prim_attr_register
1325
- def __init__(self, embedding_dim, value_total_len,
1326
- only_var_flag=False, file_type="bin", table_name=()):
1327
- """Initialize EmbeddingTableImport"""
1328
- self.add_prim_attr("_process_node_engine_id", "PS")
1329
-
1330
-
1331
- class EmbeddingComputeVarImport(Primitive):
1332
- """
1333
- EmbeddingComputeVarImport
1334
- """
1335
-
1336
- @prim_attr_register
1337
- def __init__(self, table_name=()):
1338
- """Initialize EmbeddingComputeVarImport"""
1339
- self.add_prim_attr("_process_node_engine_id", "PS")
1340
-
1341
-
1342
- class EmbeddingComputeVarExport(Primitive):
1343
- """
1344
- EmbeddingComputeVarExport
1345
- """
1346
-
1347
- @prim_attr_register
1348
- def __init__(self, table_name=()):
1349
- """Initialize EmbeddingComputeVarExport"""
1350
- self.add_prim_attr("_process_node_engine_id", "PS")
1351
-
1352
-
1353
- class InitEmbeddingHashmap(Primitive):
1354
- """
1355
- InitEmbeddingHashmap
1356
- """
1357
- @prim_attr_register
1358
- def __init__(self, value_total_len, embedding_dim, _table_id,
1359
- bucket_size=0, dtype=mstype.float32, initializer_mode="",
1360
- constant_valu=0., min=-2., max=2., mu=0., sigma=1., seed=0,
1361
- seed2=0, filter_mode="no_filter", optimizer_mode="",
1362
- optimizer_params=()):
1363
- self.add_prim_attr("_process_node_engine_id", "PS")
1364
-
1365
-
1366
- def init_embedding_hashmap(table_id, value_total_len, embedding_dim, _table_id,
1367
- bucket_size=0, dtype=mstype.float32, initializer_mode='',
1368
- constant_value=0.0, min=-2.0, max=2.0, mu=0.0, sigma=1.0,
1369
- seed=0, seed2=0, filter_mode='no_filter',
1370
- optimizer_mode='', optimizer_params=()):
1371
- """
1372
- init_embedding_hashmap
1373
- """
1374
- op = _get_cache_prim(InitEmbeddingHashmap)(value_total_len, embedding_dim, _table_id,
1375
- bucket_size, dtype, initializer_mode,
1376
- constant_value, min, max, mu, sigma, seed,
1377
- seed2, filter_mode, optimizer_mode, optimizer_params)
1378
- return op(table_id)
1379
-
1380
-
1381
- class InitPartitionMap(Primitive):
1382
- """
1383
- InitPartitionMap
1384
- """
1385
- @prim_attr_register
1386
- def __init__(self, _embedding_dim, _max_key_num,
1387
- _ps_num=1, partition_num=65537):
1388
- self.add_prim_attr("_process_node_engine_id", "PS")
1389
-
1390
-
1391
- def init_partition_map(ps_num, ps_ids, _embedding_dim, _max_key_num,
1392
- _ps_num=1, partition_num=65537):
1393
- """
1394
- init_partition_map
1395
- """
1396
- op = _get_cache_prim(InitPartitionMap)(_embedding_dim, _max_key_num, _ps_num, partition_num)
1397
- return op(ps_num, ps_ids)
1398
-
1399
-
1400
- class EmbeddingApplyAdam(Primitive):
1401
- """
1402
- EmbeddingApplyAdam
1403
- """
1404
- @prim_attr_register
1405
- def __init__(self, embedding_dim, _max_key_num, mask_zero=(0,),
1406
- padding_key=(0,), padding_key_mask=(1,),
1407
- completion_key=(0,), completion_key_mask=(1,)):
1408
- self.add_prim_attr("_process_node_engine_id", "PS")
1409
-
1410
-
1411
- class EmbeddingApplyAdamW(Primitive):
1412
- """
1413
- EmbeddingApplyAdam
1414
- """
1415
- @prim_attr_register
1416
- def __init__(self, embedding_dim, _max_key_num, amsgrad=(0,),
1417
- maximize=(0,), mask_zero=(0,), padding_key=(0,),
1418
- padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,)):
1419
- self.add_prim_attr("_process_node_engine_id", "PS")
1420
-
1421
-
1422
- class EmbeddingApplyAdaGrad(Primitive):
1423
- """
1424
- EmbeddingApplyAdaGrad
1425
- """
1426
- @prim_attr_register
1427
- def __init__(self, embedding_dim, _max_key_num, mask_zero=(0,),
1428
- padding_key=(0,), padding_key_mask=(1,),
1429
- completion_key=(0,), completion_key_mask=(1,)):
1430
- self.add_prim_attr("_process_node_engine_id", "PS")
1431
-
1432
-
1433
- class EmbeddingApplyFtrl(Primitive):
1434
- """
1435
- EmbeddingApplyFtrl
1436
- """
1437
- @prim_attr_register
1438
- def __init__(self, embedding_dim, _max_key_num, mask_zero=(0,),
1439
- padding_key=(0,), padding_key_mask=(1,),
1440
- completion_key=(0,), completion_key_mask=(1,)):
1441
- self.add_prim_attr("_process_node_engine_id", "PS")
1442
-
1443
-
1444
- class EmbeddingTableFind(Primitive):
1445
- """
1446
- EmbeddingTableFind
1447
- """
1448
- @prim_attr_register
1449
- def __init__(self, embedding_dim, _embedding_dim, _max_key_num,
1450
- _table_id, default_value=(-1.), _use_counter_filter=0):
1451
- self.add_prim_attr("_process_node_engine_id", "PS")
1452
- self.add_prim_attr("_execute_times", 2)
1453
-
1454
-
1455
- def embedding_table_find(table_id, keys, embedding_dim, _max_key_num,
1456
- _table_id, default_value=(-1.0,), _use_counter_filter=0):
1457
- r"""
1458
- embedding_table_find
1459
- """
1460
- _embedding_dim = embedding_dim if isinstance(embedding_dim, int) else embedding_dim[_table_id]
1461
- op = _get_cache_prim(EmbeddingTableFind)(to_sequence(embedding_dim), _embedding_dim,
1462
- _max_key_num, _table_id,
1463
- to_sequence(default_value),
1464
- _use_counter_filter)
1465
- return op(table_id, keys)
1466
-
1467
-
1468
- class EmbeddingTableFindAndInit(Primitive):
1469
- """
1470
- EmbeddingTableFindAndInit
1471
- """
1472
- @prim_attr_register
1473
- def __init__(self, embedding_dim, value_total_len, _embedding_dim, _table_id,
1474
- _max_key_num, initializer_mode=("random_uniform",),
1475
- constant_value=(0.,), min=(-2.,), max=(2.,), mu=(0.,),
1476
- sigma=(1.,), seed=(0,), seed2=(0,),
1477
- filter_mode=("no_filter",), filter_freq=(0,),
1478
- default_key_or_value=(0,), default_key=(0,),
1479
- default_value=(0.,), completion_key=(0,),
1480
- completion_key_mask=(1,), optimizer_mode=(),
1481
- optimizer_params=(), _use_counter_filter=0,
1482
- backward_mode="adam",
1483
- backward_int_params=((0,), (0,), (0,), (1,)),
1484
- backward_float_params=(0.9, 0.99, 0.001, 0.9, 0.999, 1e-08)):
1485
- self.add_prim_attr("_process_node_engine_id", "PS")
1486
- self.add_prim_attr("_execute_times", 2)
1487
-
1488
-
1489
- def embedding_table_find_and_init(table_id, keys, max_grad_norm, parameter, embedding_dim,
1490
- value_total_len, _table_id, _max_key_num,
1491
- initializer_mode=('random_uniform',), constant_value=(0.,),
1492
- min=(-2.,), max=(2.,), mu=(0.,), sigma=(1.,), seed=(0,),
1493
- seed2=(0,), filter_mode=("no_filter",),
1494
- filter_freq=(0,), default_key_or_value=(0,),
1495
- default_key=(0,), default_value=(0.,),
1496
- completion_key=(0,), completion_key_mask=(1,),
1497
- optimizer_mode=(), optimizer_params=(), _use_counter_filter=0,
1498
- backward_mode="adam", backward_int_params=((0,), (0,), (0,), (1,)),
1499
- backward_float_params=(0.9, 0.99, 0.001, 0.9, 0.999, 1e-08)):
1500
- """
1501
- embedding_table_find_and_init
1502
-
1503
- backward_int_params (Union[tuple[tuple[int]], list[list[int]]]):
1504
- - when the backward_mode is 'adam', 'ftrl' or 'adagrad',
1505
- it means [[global_step], mask_zero, padding_key, padding_key_mask]
1506
- - when the backward_mode is 'adamw', it means:
1507
- [[global_step], amsgrad, maximize, mask_zero, padding_key, padding_key_mask]
1508
- backward_float_params (Union[tuple[float], list[float]]):
1509
- - when the backward_mode is 'adam', it means:
1510
- [beta1_power, beta2_power, lr, beta1, beta2, epsilon]
1511
- - when the backward_mode is 'ftrl', it means:
1512
- [lr, lr_power, lambda1, lambda2]
1513
- - when the backward_mode is 'adamw', it means:
1514
- [beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon]
1515
- - when the backward_mode is 'adagrad', it means [lr,]
1516
- """
1517
- _embedding_dim = embedding_dim if isinstance(embedding_dim, int) else embedding_dim[_table_id]
1518
- op = _get_cache_prim(EmbeddingTableFindAndInit)(to_sequence(embedding_dim), to_sequence(value_total_len),
1519
- _embedding_dim, _table_id, _max_key_num,
1520
- to_sequence(initializer_mode),
1521
- to_sequence(constant_value), to_sequence(min),
1522
- to_sequence(max), to_sequence(mu),
1523
- to_sequence(sigma), to_sequence(seed),
1524
- to_sequence(seed2), to_sequence(filter_mode),
1525
- to_sequence(filter_freq), to_sequence(default_key_or_value),
1526
- to_sequence(default_key), to_sequence(default_value),
1527
- to_sequence(completion_key), to_sequence(completion_key_mask),
1528
- to_sequence(optimizer_mode), to_sequence(optimizer_params),
1529
- _use_counter_filter,
1530
- backward_mode, backward_int_params, backward_float_params)
1531
- return op(table_id, keys, max_grad_norm, parameter)
1532
-
1533
-
1534
- class FakeRemoteLookupUniqued(Primitive):
1535
-
1536
- """
1537
- FakeRemoteLookupUniqued
1538
- """
1539
- @prim_attr_register
1540
- def __init__(self, embedding_dim, value_total_len, _embedding_dim, _table_id,
1541
- _max_key_num, initializer_mode=('random_uniform',), constant_value=(0.,),
1542
- min=(-2.,), max=(2.,), mu=(0.,), sigma=(1.,), seed=(0,), seed2=(0,),
1543
- filter_mode=("no_filter",), filter_freq=(0,),
1544
- default_key_or_value=(0,), default_key=(0,), default_value=(0.,),
1545
- completion_key=(0,), completion_key_mask=(1,),
1546
- optimizer_mode=(), optimizer_params=(), _use_counter_filter=0,
1547
- backward_mode="adam", backward_int_params=((0,), (0,), (0,), (1,)),
1548
- backward_float_params=(0.9, 0.99, 0.001, 0.9, 0.999, 1e-08)):
1549
- self.add_prim_attr("_process_node_engine_id", "PS")
1550
- self.add_prim_attr("_execute_times", 2)
1551
-
1552
-
1553
- def fake_remote_lookup_uniqued(table_id, keys, actual_keys_num, unique_indices,
1554
- key_count, max_grad_norm, parameter,
1555
- embedding_dim, value_total_len, _table_id, _max_key_num,
1556
- initializer_mode=('random_uniform',), constant_value=(0.,),
1557
- min=(-2.,), max=(2.,), mu=(0.,), sigma=(1.,), seed=(0,),
1558
- seed2=(0,), filter_mode=("no_filter",),
1559
- filter_freq=(0,), default_key_or_value=(0,),
1560
- default_key=(0,), default_value=(0.,),
1561
- completion_key=(0,), completion_key_mask=(1,),
1562
- optimizer_mode=(), optimizer_params=(), _use_counter_filter=0,
1563
- backward_mode='adam', backward_int_params=((0,), (0,), (0,), (1,)),
1564
- backward_float_params=(0.9, 0.99, 0.001, 0.9, 0.999, 1e-08)):
1565
- """
1566
- fake_remote_lookup_uniqued
1567
-
1568
- backward_mode (str): determine the optimizer used by backpropagation,
1569
- valid values are ["adam", "adamw", "adagrad", "ftrl"]
1570
- backward_int_params (Union[tuple[tuple[int]], list[list[int]]]):
1571
- - when the backward_mode is 'adam', 'ftrl' or 'adagrad',
1572
- it means [[global_step], mask_zero, padding_key, padding_key_mask]
1573
- - when the backward_mode is 'adamw', it means:
1574
- [[global_step], amsgrad, maximize, mask_zero, padding_key, padding_key_mask]
1575
- backward_float_params (Union[tuple[float], list[float]]):
1576
- - when the backward_mode is 'adam', it means:
1577
- [beta1_power, beta2_power, lr, beta1, beta2, epsilon]
1578
- - when the backward_mode is 'ftrl', it means:
1579
- [lr, lr_power, lambda1, lambda2]
1580
- - when the backward_mode is 'adamw', it means:
1581
- [beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon]
1582
- - when the backward_mode is 'adagrad', it means [lr,]
1583
- """
1584
- _embedding_dim = embedding_dim if isinstance(embedding_dim, int) else embedding_dim[_table_id]
1585
- op = _get_cache_prim(FakeRemoteLookupUniqued)(to_sequence(embedding_dim), to_sequence(value_total_len),
1586
- _embedding_dim, _table_id, _max_key_num,
1587
- to_sequence(initializer_mode), to_sequence(constant_value),
1588
- to_sequence(min), to_sequence(max), to_sequence(mu),
1589
- to_sequence(sigma), to_sequence(seed), to_sequence(seed2),
1590
- to_sequence(filter_mode), to_sequence(filter_freq),
1591
- to_sequence(default_key_or_value), to_sequence(default_key),
1592
- to_sequence(default_value), to_sequence(completion_key),
1593
- to_sequence(completion_key_mask), to_sequence(optimizer_mode),
1594
- to_sequence(optimizer_params), _use_counter_filter,
1595
- backward_mode, backward_int_params, backward_float_params)
1596
- return op(table_id, keys, actual_keys_num, unique_indices, key_count, max_grad_norm, parameter)
1597
-
1598
-
1599
1297
  # Following is Python Infer Value.
1600
1298
  # A valid infer value function should be:
1601
1299
  #
@@ -1628,7 +1326,13 @@ def infer_value_for_Concat(tensors, axis):
1628
1326
  return None
1629
1327
 
1630
1328
  tensor_to_concat = [x.asnumpy() for x in tensors]
1631
- return Tensor(np.concatenate(tensor_to_concat, axis), dtype=tensors[0].dtype)
1329
+ out = np.concatenate(tensor_to_concat, axis)
1330
+ if out.dtype != np.float32:
1331
+ return Tensor(out)
1332
+ for x in tensors:
1333
+ if x.dtype in [mstype.float16, mstype.float32]:
1334
+ return Tensor(out)
1335
+ return Tensor(out, dtype=mstype.bfloat16)
1632
1336
 
1633
1337
 
1634
1338
  def infer_value_for_GatherD(input, dim, index):
@@ -1959,11 +1663,27 @@ def infer_value_for_BroadcastTo(x, shape):
1959
1663
  validator.check_value_type("shape", shape, [tuple], "BroadcastTo")
1960
1664
  shape = list(shape)
1961
1665
 
1962
- np_data = np.broadcast_to(x.asnumpy(), shape)
1963
- if 0 in shape:
1666
+ # Resolve -1 entries and support input rank < target rank.
1667
+ input_shape = list(x.shape)
1668
+ target_shape = list(shape)
1669
+ in_rank = len(input_shape)
1670
+ out_rank = len(target_shape)
1671
+ for k in range(1, out_rank + 1):
1672
+ t = target_shape[-k]
1673
+ if t == -1:
1674
+ if k <= in_rank:
1675
+ target_shape[-k] = input_shape[-k]
1676
+ else:
1677
+ pass
1678
+
1679
+ resolved_shape = target_shape
1680
+
1681
+ np_data = np.broadcast_to(x.asnumpy(), resolved_shape)
1682
+ if 0 in resolved_shape:
1964
1683
  init_func = Zero()
1965
1684
  init_func.__enable_zero_dim__ = True
1966
- out = Tensor(shape=shape, dtype=x.dtype, init=init_func)
1685
+ out = Tensor(shape=resolved_shape, dtype=x.dtype, init=init_func)
1686
+ out.init_data()
1967
1687
  return out
1968
1688
  return Tensor(np_data)
1969
1689
 
@@ -2014,6 +1734,7 @@ def infer_value_for_Reshape(x, shape):
2014
1734
  init_func = Zero()
2015
1735
  init_func.__enable_zero_dim__ = True
2016
1736
  out = Tensor(shape=shape, dtype=x.dtype, init=init_func)
1737
+ out.init_data()
2017
1738
  else:
2018
1739
  out = Tensor(x.asnumpy().reshape(shape))
2019
1740
  return out
@@ -6868,8 +6868,8 @@ class CTCLossV2(Primitive):
6868
6868
  >>> print(neg_log_hood)
6869
6869
  [-2.2986124]
6870
6870
  >>> print(log_alpha)
6871
- [[[0.3 0.3 -inf -inf -inf]
6872
- [1.2 1.8931472 1.2 -inf -inf]]]
6871
+ [[[0.3 0.3 -inf -inf 1.8931472 1.2 0. 0. ]
6872
+ [0. 0. 0. 0. 0. 0. 0. 0. ]]]
6873
6873
  """
6874
6874
 
6875
6875
  @prim_attr_register
@@ -2606,89 +2606,6 @@ class RaggedTensorToTensor(Primitive):
2606
2606
  self.add_prim_attr("num_row_partition_tensors", self.num_row_partition_tensors)
2607
2607
 
2608
2608
 
2609
- class SparseCross(Primitive):
2610
- """
2611
- Generates sparse cross from a list of sparse and dense tensors.
2612
-
2613
- Args:
2614
- hashed_output (bool): If true, returns the hash of the cross instead of the string. This will allow us
2615
- avoiding string manipulations.
2616
- num_buckets (int): An int that is >= 0. It is used if "hashed_output" is true.output = hashed_value%num_buckets
2617
- if num_buckets > 0 else "hashed_value".
2618
- hash_key (int): Specify the hash_key that will be used by the "FingerprintCat64" function to combine the
2619
- crosses fingerprints.
2620
- out_type (mindspore.dtype): The output data type. Defaults to "int64".
2621
- internal_type (mindspore.dtype): An type int64.
2622
-
2623
- Inputs:
2624
- - **indices** (list(Tensor)) - A list of Tensor objects with type int64. 2-D.
2625
- Indices of each input SparseTensor.
2626
- - **values** (list(Tensor)) - A list of Tensor objects with types from: int64.
2627
- 1-D. values of each SparseTensor.
2628
- - **shapes** (list(Tensor)) - A list with the same length as indices of Tensor objects with type int64.
2629
- 1-D. Shapes of each SparseTensor.
2630
- - **dense_inputs** (list(Tensor)) - A list of Tensor objects with types from: int64.
2631
- 2-D. Columns represented by dense Tensor.
2632
-
2633
- Outputs:
2634
- - **output_indices** (Tensor) - A Tensor of type int64. 2-D. Indices of the concatenated SparseTensor.
2635
- - **output_values** (Tensor) - A Tensor of type "out_type". 1-D.
2636
- Non-empty values of the concatenated or hashed SparseTensor.
2637
- - **output_shape** (Tensor) - A Tensor of type int64. 1-D. Shape of the concatenated SparseTensor.
2638
-
2639
- Raises:
2640
- TypeError: The indices shape rank is not equal to the shape rank.
2641
- TypeError: The indices element number is not equal to the value element number.
2642
- TypeError: The indices shape rank should be 2.
2643
- TypeError: The denses shape rank should be 2.
2644
- TypeError: The shapes rank should be 2.
2645
-
2646
- Supported Platforms:
2647
- ``CPU``
2648
-
2649
- Examples:
2650
- >>> from mindspore.ops.operations.sparse_ops import SparseCross
2651
- >>> indice1 = Tensor([[0,0],[1,0],[1,1]], dtype=mstype.int64)
2652
- >>> value1 = Tensor([1, 2, 3], dtype=mstype.int64)
2653
- >>> shape1 = Tensor([2, 2], dtype=mstype.int64)
2654
- >>> dense1 = Tensor([[1],[2]], dtype=mstype.int64)
2655
- >>> indice2 = Tensor([[0,0],[1,0],[1,1]], dtype=mstype.int64)
2656
- >>> value2 = Tensor([1, 2, 3], dtype=mstype.int64)
2657
- >>> shape2 = Tensor([2, 2], dtype=mstype.int64)
2658
- >>> dense2 = Tensor([[1],[2]], dtype=mstype.int64)
2659
- >>> indices = [indice1, indice2]
2660
- >>> values = [value1, value2]
2661
- >>> shapes = [shape1, shape2]
2662
- >>> dense_inputs = [dense1, dense2]
2663
- >>> hashed_output=True
2664
- >>> hash_key= 2
2665
- >>> out_type= mstype.int64
2666
- >>> internal_type = mstype.int64
2667
- >>> num_buckets=0
2668
- >>> sparse_cross = SparseCross(hashed_output, hash_key, out_type, internal_type, num_buckets)
2669
- >>> out = sparse_cross(indices, values, shapes, dense_inputs)
2670
- >>> print(out)
2671
- (Tensor(shape=[5, 2], dtype=Int64, value=
2672
- [[0, 0],
2673
- [1, 0],
2674
- [1, 1],
2675
- [1, 2],
2676
- [1, 3]]), Tensor(shape=[5], dtype=Int64, value= [1350190460805457680, 6319552725219729347,
2677
- 4652439303631496997, 7670687697825594049, 174086171018132662]), Tensor(shape=[2], dtype=Int64, value= [2, 4]))
2678
- """
2679
-
2680
- @prim_attr_register
2681
- def __init__(self, hashed_output, hash_key, out_type, internal_type, num_buckets=0):
2682
- """Initialize SparseCross."""
2683
- self.init_prim_io_names(inputs=["indices", "values", "shapes", "dense_inputs"],
2684
- outputs=["output_indices", "output_values", "output_shape"])
2685
- validator.check_value_type("hashed_output", hashed_output, [bool], self.name)
2686
- validator.check_value_type("hash_key", hash_key, [int], self.name)
2687
- validator.check_value_type("out_type", out_type, [mstype.Type], self.name)
2688
- validator.check_value_type("internal_type", internal_type, [mstype.Type], self.name)
2689
- validator.check_value_type("num_buckets", num_buckets, [int], self.name)
2690
-
2691
-
2692
2609
  class RaggedTensorToSparse(Primitive):
2693
2610
  r"""
2694
2611
  Converts a RaggedTensor into a SparseTensor with the same values.
@@ -20,7 +20,7 @@ import copy
20
20
  import numpy as np
21
21
  from mindspore.common.api import _wrap_func
22
22
  from mindspore.log import _LogActionOnce
23
- from mindspore import context, log as logger
23
+ from mindspore import log as logger
24
24
  from mindspore.parallel._utils import _is_in_auto_parallel_mode, _is_in_data_parallel_mode, \
25
25
  _is_in_hybrid_parallel_mode, SUPPORTED_TUPLE_IN_TUPLE_STRATEGY
26
26
  from mindspore.parallel._ps_context import _is_ps_mode, _is_role_sched
@@ -214,22 +214,6 @@ class Primitive(Primitive_):
214
214
  if in_strategy is None and out_strategy is not None:
215
215
  raise ValueError(f'The out_strategy of {self.name} is {out_strategy}, need to set in_strategy,'
216
216
  f' but got none')
217
- if not _is_in_auto_parallel_mode():
218
- mode = context.get_auto_parallel_context("parallel_mode")
219
- if in_strategy is not None:
220
- logger.warning(f"The in_strategy/in_layout of the operator in your network "
221
- f"will not take effect in {mode} mode. "
222
- f"This means the the shard function called in the network is ignored. \n"
223
- f"If you want to enable it, please use semi auto or auto parallel mode by "
224
- f"context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL "
225
- f"or context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL)")
226
- if out_strategy is not None:
227
- logger.warning(f"The out_strategy/out_layout of the operator in your network "
228
- f"will not take effect in {mode} mode."
229
- f" This means the the shard function called in the network is ignored. \n"
230
- f"If you want to enable it, please use semi auto or auto parallel mode by "
231
- f"context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL "
232
- f"or context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL)")
233
217
 
234
218
  def del_prim_attr(self, name):
235
219
  """
@@ -239,6 +239,7 @@ from mindspore.ops.function.array_func import tensor_scatter_add
239
239
  from mindspore.ops.auto_generate import select, select_ext_view
240
240
  # 94 sigmoid
241
241
  from mindspore.ops.auto_generate import sigmoid
242
+ from mindspore.ops.auto_generate import inplace_sigmoid as sigmoid_
242
243
  # 95 sin
243
244
  from mindspore.ops.auto_generate import sin
244
245
  # 96 size
@@ -366,7 +367,7 @@ from mindspore.ops.auto_generate import acos_ext, acosh_ext, asin_ext, asinh_ext
366
367
  from mindspore.ops.function.math_func import median
367
368
 
368
369
  # 156
369
-
370
+ from mindspore.ops.function.math_func import permute
370
371
  # 157
371
372
  from mindspore.ops.auto_generate import xlogy_op
372
373
 
@@ -1040,6 +1041,10 @@ def tensor_sigmoid(input):
1040
1041
  return sigmoid(input)
1041
1042
 
1042
1043
 
1044
+ def tensor_sigmoid_(input):
1045
+ return sigmoid_(input)
1046
+
1047
+
1043
1048
  # 95 sin
1044
1049
  def tensor_sin(input):
1045
1050
  return sin(input)
@@ -1137,6 +1142,7 @@ def deprecated_tensor_sum(input, axis=None, dtype=None, keepdims=False, initial=
1137
1142
 
1138
1143
  # 105 swapaxes
1139
1144
 
1145
+
1140
1146
  # 106 t
1141
1147
  def tensor_t(input):
1142
1148
  return t(input)
@@ -1165,6 +1171,7 @@ def deprecated_tensor_tile(input, reps):
1165
1171
 
1166
1172
  # 109 tolist
1167
1173
 
1174
+
1168
1175
  # 110 topk
1169
1176
  def tensor_topk(input, k, dim=-1, largest=True, sorted=True):
1170
1177
  return topk(input, k, dim, largest, sorted)
@@ -1184,6 +1191,11 @@ def deprecated_tensor_transpose(input, *axes):
1184
1191
  return transpose(input, perm)
1185
1192
 
1186
1193
 
1194
+ def deprecated_tensor_permute(input, *axis):
1195
+ perm = validator.check_transpose_axis(axis, input.ndim)
1196
+ return permute(input, perm)
1197
+
1198
+
1187
1199
  # 112 tril
1188
1200
  def deprecated_tensor_tril(input, diagonal=0):
1189
1201
  return tril(input, diagonal)
@@ -1196,6 +1208,7 @@ def tensor_trunc(input):
1196
1208
 
1197
1209
  # 114 type
1198
1210
 
1211
+
1199
1212
  # 115 type_as
1200
1213
  def deprecated_tensor_type_as(input, other):
1201
1214
  return input.astype(other.dtype)
@@ -1511,12 +1524,18 @@ def deprecated_tensor_logaddexp2(input, other):
1511
1524
 
1512
1525
 
1513
1526
  # 157
1514
- def tensor_empty(*size, dtype=None, device=None):
1527
+ def tensor_empty(*size, dtype=None, device=None, pin_memory=False):
1528
+ r"""
1529
+ For details, please refer to :func:`mindspore.mint.empty`.
1530
+ """
1515
1531
  logger.error(
1516
1532
  "This is a function for empty not should be called. Please check the implementation.")
1517
1533
 
1518
1534
 
1519
- def tensor_empty_like(input, *, dtype=None, device=None):
1535
+ def tensor_empty_like(input, *, dtype=None, device=None, pin_memory=False):
1536
+ """
1537
+ For details, please refer to :func:`mindspore.mint.empty_like`.
1538
+ """
1520
1539
  raise NotImplementedError(
1521
1540
  "This is a function for empty_like should not be called. Please check the implementation.")
1522
1541
 
@@ -1811,10 +1830,19 @@ def deprecated_tensor_var(input, axis=None, ddof=0, keepdims=False):
1811
1830
  return _tensor_div(x_sum, nums - ddof)
1812
1831
 
1813
1832
 
1833
+ # 1222
1834
+ def tensor_index_fill_(input, dim, index, value):
1835
+ raise NotImplementedError('Tensor.index_fill_ only supports Ascend.')
1836
+
1837
+
1814
1838
  def tensor_kthvalue(input, k, dim=-1, keepdim=False):
1815
1839
  raise ValueError("should not come here for kthvalue py_method.")
1816
1840
 
1817
1841
 
1842
+ def tensor_index_copy_(input, dim, index, tensor):
1843
+ raise NotImplementedError('Tensor.index_copy_ only supports Ascend.')
1844
+
1845
+
1818
1846
  def tensor_sub_empty_(input, other, alpha=1):
1819
1847
  raise ValueError("should not come here for sub_ method.")
1820
1848
 
@@ -1824,9 +1852,11 @@ def tensor_inplace_sub(input, other, *, alpha=1):
1824
1852
  return sub(input, other)
1825
1853
  return sub_ext(input, other, alpha=alpha)
1826
1854
 
1855
+
1827
1856
  def tensor_new_full(input, size, fill_value, *, dtype=None):
1828
1857
  raise NotImplementedError("new_full method support Ascend only")
1829
1858
 
1859
+
1830
1860
  def tensor_div_empty_(input, other, rounding_mode=None):
1831
1861
  raise ValueError("should not come here for div_ method.")
1832
1862
 
@@ -1858,6 +1888,10 @@ def all_gather_matmul(
1858
1888
  raise NotImplementedError('all_gather_matmul only supports Ascend.')
1859
1889
 
1860
1890
 
1891
+ def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
1892
+ raise NotImplementedError('conv1d only supports Ascend.')
1893
+
1894
+
1861
1895
  def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
1862
1896
  raise NotImplementedError('conv3d only supports Ascend.')
1863
1897
 
@@ -1926,11 +1960,46 @@ def tensor_quant_matmul(x1, x2, scale, *, offset=None, pertoken_scale=None, bias
1926
1960
  raise NotImplementedError('quant_matmul only supports Ascend.')
1927
1961
 
1928
1962
 
1963
+ def tensor_index(input, value):
1964
+ raise NotImplementedError("index only supports Ascend.")
1965
+
1966
+
1929
1967
  def tensor_gmm(x, weight, *, bias=None, group_list=None, group_type=0, group_list_type=0):
1930
1968
  raise NotImplementedError("gmm has not been implemented by python.")
1931
1969
 
1970
+
1932
1971
  def raise_func(*args, **kwargs):
1933
1972
  raise NotImplementedError("this func has not been implemented.")
1934
1973
 
1974
+
1935
1975
  def tensor_masked_scatter(input, mask, source):
1936
1976
  return masked_scatter(input, mask, source)
1977
+
1978
+
1979
+ def tensor_inplace_masked_scatter(input, mask, source):
1980
+ return F.inplace_masked_scatter(input, mask, source)
1981
+
1982
+
1983
+ def tensor_broadcast_to(x, shape):
1984
+ return F.broadcast_to(x, shape)
1985
+
1986
+ def tensor_squeeze(input, axis=None):
1987
+ return F.squeeze(input, axis)
1988
+
1989
+
1990
+ def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
1991
+ raise NotImplementedError('conv2d only supports Ascend.')
1992
+
1993
+
1994
+ def tensor_real(input):
1995
+ r"""
1996
+ For details, please refer to :func:`mindspore.ops.real`.
1997
+ """
1998
+ return ops.real(input)
1999
+
2000
+
2001
+ def tensor_imag(input):
2002
+ r"""
2003
+ For details, please refer to :func:`mindspore.ops.imag`.
2004
+ """
2005
+ return ops.imag(input)