mindspore 2.7.0__cp310-cp310-win_amd64.whl → 2.7.1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -1
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_extends/parse/compile_config.py +24 -1
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -2
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +8 -1
- mindspore/_extends/parse/trope.py +2 -1
- mindspore/_extends/pijit/pijit_func_white_list.py +7 -22
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/base.py +29 -2
- mindspore/common/_decorator.py +3 -2
- mindspore/common/_grad_function.py +3 -1
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +275 -64
- mindspore/common/_utils.py +0 -44
- mindspore/common/api.py +285 -35
- mindspore/common/dump.py +7 -108
- mindspore/common/dynamic_shape/auto_dynamic_shape.py +1 -3
- mindspore/common/hook_handle.py +60 -0
- mindspore/common/jit_config.py +5 -1
- mindspore/common/jit_trace.py +27 -12
- mindspore/common/lazy_inline.py +5 -3
- mindspore/common/parameter.py +13 -107
- mindspore/common/recompute.py +4 -11
- mindspore/common/tensor.py +16 -169
- mindspore/communication/_comm_helper.py +11 -1
- mindspore/communication/comm_func.py +138 -4
- mindspore/communication/management.py +85 -1
- mindspore/config/op_info.config +0 -15
- mindspore/context.py +5 -85
- mindspore/dataset/engine/datasets.py +8 -4
- mindspore/dataset/engine/datasets_vision.py +1 -1
- mindspore/dataset/engine/validators.py +1 -15
- mindspore/dnnl.dll +0 -0
- mindspore/{experimental/llm_boost/ascend_native → graph}/__init__.py +7 -7
- mindspore/graph/custom_pass.py +55 -0
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/__init__.py +3 -3
- mindspore/mindrecord/common/exceptions.py +1 -0
- mindspore/mindrecord/config.py +1 -1
- mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
- mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
- mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
- mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
- mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
- mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
- mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
- mindspore/mindrecord/filereader.py +4 -4
- mindspore/mindrecord/filewriter.py +5 -5
- mindspore/mindrecord/mindpage.py +2 -2
- mindspore/mindrecord/tools/cifar10.py +1 -1
- mindspore/mindrecord/tools/cifar100.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
- mindspore/mindrecord/tools/cifar10_to_mr.py +1 -1
- mindspore/mindrecord/tools/csv_to_mr.py +1 -1
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_cluster.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_hardware_abstract.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mindspore_runtime_utils.dll +0 -0
- mindspore/mindspore_tools.dll +0 -0
- mindspore/mint/__init__.py +15 -10
- mindspore/mint/distributed/distributed.py +182 -62
- mindspore/mint/nn/__init__.py +2 -16
- mindspore/mint/nn/functional.py +4 -110
- mindspore/mint/nn/layer/__init__.py +0 -2
- mindspore/mint/nn/layer/activation.py +0 -6
- mindspore/mint/nn/layer/basic.py +0 -47
- mindspore/mint/nn/layer/conv.py +4 -4
- mindspore/mint/nn/layer/normalization.py +8 -13
- mindspore/mint/nn/layer/pooling.py +0 -4
- mindspore/nn/__init__.py +1 -3
- mindspore/nn/cell.py +16 -66
- mindspore/nn/layer/basic.py +49 -1
- mindspore/nn/layer/container.py +16 -0
- mindspore/nn/layer/embedding.py +4 -169
- mindspore/nn/layer/normalization.py +2 -1
- mindspore/nn/layer/thor_layer.py +4 -85
- mindspore/nn/optim/ada_grad.py +0 -1
- mindspore/nn/optim/adafactor.py +0 -1
- mindspore/nn/optim/adam.py +31 -124
- mindspore/nn/optim/adamax.py +0 -1
- mindspore/nn/optim/asgd.py +0 -1
- mindspore/nn/optim/ftrl.py +8 -102
- mindspore/nn/optim/lamb.py +0 -1
- mindspore/nn/optim/lars.py +0 -3
- mindspore/nn/optim/lazyadam.py +25 -218
- mindspore/nn/optim/momentum.py +5 -43
- mindspore/nn/optim/optimizer.py +6 -55
- mindspore/nn/optim/proximal_ada_grad.py +0 -1
- mindspore/nn/optim/rmsprop.py +0 -1
- mindspore/nn/optim/rprop.py +0 -1
- mindspore/nn/optim/sgd.py +0 -1
- mindspore/nn/optim/tft_wrapper.py +0 -1
- mindspore/nn/optim/thor.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -8
- mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
- mindspore/nn/probability/bijector/power_transform.py +20 -21
- mindspore/nn/probability/bijector/scalar_affine.py +5 -5
- mindspore/nn/probability/bijector/softplus.py +13 -14
- mindspore/nn/wrap/grad_reducer.py +4 -74
- mindspore/numpy/array_creations.py +2 -2
- mindspore/numpy/fft.py +9 -9
- mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
- mindspore/onnx/onnx_export.py +137 -0
- mindspore/opencv_core4110.dll +0 -0
- mindspore/opencv_imgcodecs4110.dll +0 -0
- mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
- mindspore/ops/__init__.py +2 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
- mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
- mindspore/ops/_op_impl/cpu/__init__.py +0 -5
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +16 -22
- mindspore/ops/auto_generate/gen_extend_func.py +2 -7
- mindspore/ops/auto_generate/gen_ops_def.py +98 -141
- mindspore/ops/auto_generate/gen_ops_prim.py +12708 -12686
- mindspore/ops/communication.py +97 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +15 -1
- mindspore/ops/composite/multitype_ops/__init__.py +3 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
- mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
- mindspore/ops/function/__init__.py +1 -0
- mindspore/ops/function/array_func.py +14 -12
- mindspore/ops/function/comm_func.py +3883 -0
- mindspore/ops/function/debug_func.py +3 -4
- mindspore/ops/function/math_func.py +45 -54
- mindspore/ops/function/nn_func.py +75 -294
- mindspore/ops/function/random_func.py +9 -18
- mindspore/ops/functional.py +2 -0
- mindspore/ops/functional_overload.py +354 -18
- mindspore/ops/operations/__init__.py +2 -5
- mindspore/ops/operations/_custom_ops_utils.py +7 -9
- mindspore/ops/operations/_inner_ops.py +1 -38
- mindspore/ops/operations/_rl_inner_ops.py +0 -933
- mindspore/ops/operations/array_ops.py +1 -0
- mindspore/ops/operations/comm_ops.py +94 -2
- mindspore/ops/operations/custom_ops.py +228 -19
- mindspore/ops/operations/debug_ops.py +27 -29
- mindspore/ops/operations/manually_defined/ops_def.py +27 -306
- mindspore/ops/operations/nn_ops.py +2 -2
- mindspore/ops/operations/sparse_ops.py +0 -83
- mindspore/ops/primitive.py +1 -17
- mindspore/ops/tensor_method.py +72 -3
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
- mindspore/ops_generate/api/functions_cc_generator.py +53 -4
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
- mindspore/ops_generate/common/gen_constants.py +11 -10
- mindspore/ops_generate/common/op_proto.py +18 -1
- mindspore/ops_generate/common/template.py +102 -245
- mindspore/ops_generate/common/template_utils.py +212 -0
- mindspore/ops_generate/gen_custom_ops.py +69 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
- mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
- mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
- mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
- mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
- mindspore/ops_generate/resources/yaml_loader.py +13 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
- mindspore/parallel/_cell_wrapper.py +1 -1
- mindspore/parallel/_parallel_serialization.py +1 -4
- mindspore/parallel/_utils.py +29 -6
- mindspore/parallel/checkpoint_transform.py +18 -2
- mindspore/parallel/cluster/process_entity/_api.py +24 -32
- mindspore/parallel/cluster/process_entity/_utils.py +9 -5
- mindspore/{experimental/llm_boost/atb → parallel/distributed}/__init__.py +21 -23
- mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
- mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
- mindspore/parallel/strategy.py +336 -0
- mindspore/parallel/transform_safetensors.py +117 -16
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +3 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
- mindspore/profiler/common/constant.py +5 -0
- mindspore/profiler/common/file_manager.py +9 -0
- mindspore/profiler/common/msprof_cmd_tool.py +38 -2
- mindspore/profiler/common/path_manager.py +56 -24
- mindspore/profiler/common/profiler_context.py +2 -12
- mindspore/profiler/common/profiler_info.py +3 -3
- mindspore/profiler/common/profiler_path_manager.py +13 -0
- mindspore/profiler/common/util.py +30 -3
- mindspore/profiler/experimental_config.py +2 -1
- mindspore/profiler/platform/npu_profiler.py +33 -6
- mindspore/run_check/_check_version.py +108 -24
- mindspore/runtime/__init__.py +3 -2
- mindspore/runtime/executor.py +11 -3
- mindspore/runtime/memory.py +112 -0
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
- mindspore/tools/data_dump.py +130 -0
- mindspore/tools/sdc_detect.py +91 -0
- mindspore/tools/stress_detect.py +63 -0
- mindspore/train/__init__.py +6 -6
- mindspore/train/_utils.py +5 -18
- mindspore/train/amp.py +6 -4
- mindspore/train/callback/_checkpoint.py +0 -9
- mindspore/train/callback/_train_fault_tolerance.py +69 -18
- mindspore/train/data_sink.py +1 -5
- mindspore/train/model.py +38 -211
- mindspore/train/serialization.py +126 -387
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dlpack.py +92 -0
- mindspore/utils/dryrun.py +1 -1
- mindspore/utils/runtime_execution_order_check.py +10 -0
- mindspore/utils/sdc_detect.py +14 -12
- mindspore/utils/stress_detect.py +43 -0
- mindspore/utils/utils.py +144 -8
- mindspore/version.py +1 -1
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/RECORD +254 -267
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -210
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
- mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
- mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
- mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
- mindspore/experimental/llm_boost/register.py +0 -130
- mindspore/experimental/llm_boost/utils.py +0 -31
- mindspore/include/OWNERS +0 -7
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
- mindspore/nn/reinforcement/_batch_read_write.py +0 -142
- mindspore/nn/reinforcement/_tensors_queue.py +0 -152
- mindspore/nn/reinforcement/tensor_array.py +0 -145
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
- mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
- mindspore/ops/_op_impl/cpu/buffer_append.py +0 -28
- mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
- mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
- mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
- mindspore/ops/operations/_tensor_array.py +0 -359
- mindspore/ops/operations/rl_ops.py +0 -288
- mindspore/parallel/_offload_context.py +0 -275
- mindspore/parallel/_recovery_context.py +0 -115
- mindspore/parallel/_transformer/__init__.py +0 -35
- mindspore/parallel/_transformer/layers.py +0 -765
- mindspore/parallel/_transformer/loss.py +0 -251
- mindspore/parallel/_transformer/moe.py +0 -693
- mindspore/parallel/_transformer/op_parallel_config.py +0 -222
- mindspore/parallel/_transformer/transformer.py +0 -3124
- mindspore/parallel/mpi/_mpi_config.py +0 -116
- mindspore/train/memory_profiling_pb2.py +0 -298
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
|
@@ -1294,308 +1294,6 @@ class TypeAs(Primitive):
|
|
|
1294
1294
|
return pyboost_type_as(self, [input, other])
|
|
1295
1295
|
|
|
1296
1296
|
|
|
1297
|
-
def to_sequence(val):
|
|
1298
|
-
"""
|
|
1299
|
-
to_sequence
|
|
1300
|
-
"""
|
|
1301
|
-
if isinstance(val, (tuple, list)):
|
|
1302
|
-
return tuple(val)
|
|
1303
|
-
return (val,)
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
class EmbeddingTableExport(Primitive):
|
|
1307
|
-
"""
|
|
1308
|
-
EmbeddingTableExport
|
|
1309
|
-
"""
|
|
1310
|
-
|
|
1311
|
-
@prim_attr_register
|
|
1312
|
-
def __init__(self, embedding_dim, value_total_len, export_mode="all",
|
|
1313
|
-
only_var_flag=False, file_type="bin", table_name=(),
|
|
1314
|
-
filter_export_flag=False, steps_to_live_list=()):
|
|
1315
|
-
"""Initialize EmbeddingTableExport"""
|
|
1316
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
class EmbeddingTableImport(Primitive):
|
|
1320
|
-
"""
|
|
1321
|
-
EmbeddingTableImport
|
|
1322
|
-
"""
|
|
1323
|
-
|
|
1324
|
-
@prim_attr_register
|
|
1325
|
-
def __init__(self, embedding_dim, value_total_len,
|
|
1326
|
-
only_var_flag=False, file_type="bin", table_name=()):
|
|
1327
|
-
"""Initialize EmbeddingTableImport"""
|
|
1328
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
class EmbeddingComputeVarImport(Primitive):
|
|
1332
|
-
"""
|
|
1333
|
-
EmbeddingComputeVarImport
|
|
1334
|
-
"""
|
|
1335
|
-
|
|
1336
|
-
@prim_attr_register
|
|
1337
|
-
def __init__(self, table_name=()):
|
|
1338
|
-
"""Initialize EmbeddingComputeVarImport"""
|
|
1339
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
class EmbeddingComputeVarExport(Primitive):
|
|
1343
|
-
"""
|
|
1344
|
-
EmbeddingComputeVarExport
|
|
1345
|
-
"""
|
|
1346
|
-
|
|
1347
|
-
@prim_attr_register
|
|
1348
|
-
def __init__(self, table_name=()):
|
|
1349
|
-
"""Initialize EmbeddingComputeVarExport"""
|
|
1350
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
class InitEmbeddingHashmap(Primitive):
|
|
1354
|
-
"""
|
|
1355
|
-
InitEmbeddingHashmap
|
|
1356
|
-
"""
|
|
1357
|
-
@prim_attr_register
|
|
1358
|
-
def __init__(self, value_total_len, embedding_dim, _table_id,
|
|
1359
|
-
bucket_size=0, dtype=mstype.float32, initializer_mode="",
|
|
1360
|
-
constant_valu=0., min=-2., max=2., mu=0., sigma=1., seed=0,
|
|
1361
|
-
seed2=0, filter_mode="no_filter", optimizer_mode="",
|
|
1362
|
-
optimizer_params=()):
|
|
1363
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
def init_embedding_hashmap(table_id, value_total_len, embedding_dim, _table_id,
|
|
1367
|
-
bucket_size=0, dtype=mstype.float32, initializer_mode='',
|
|
1368
|
-
constant_value=0.0, min=-2.0, max=2.0, mu=0.0, sigma=1.0,
|
|
1369
|
-
seed=0, seed2=0, filter_mode='no_filter',
|
|
1370
|
-
optimizer_mode='', optimizer_params=()):
|
|
1371
|
-
"""
|
|
1372
|
-
init_embedding_hashmap
|
|
1373
|
-
"""
|
|
1374
|
-
op = _get_cache_prim(InitEmbeddingHashmap)(value_total_len, embedding_dim, _table_id,
|
|
1375
|
-
bucket_size, dtype, initializer_mode,
|
|
1376
|
-
constant_value, min, max, mu, sigma, seed,
|
|
1377
|
-
seed2, filter_mode, optimizer_mode, optimizer_params)
|
|
1378
|
-
return op(table_id)
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
class InitPartitionMap(Primitive):
|
|
1382
|
-
"""
|
|
1383
|
-
InitPartitionMap
|
|
1384
|
-
"""
|
|
1385
|
-
@prim_attr_register
|
|
1386
|
-
def __init__(self, _embedding_dim, _max_key_num,
|
|
1387
|
-
_ps_num=1, partition_num=65537):
|
|
1388
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
def init_partition_map(ps_num, ps_ids, _embedding_dim, _max_key_num,
|
|
1392
|
-
_ps_num=1, partition_num=65537):
|
|
1393
|
-
"""
|
|
1394
|
-
init_partition_map
|
|
1395
|
-
"""
|
|
1396
|
-
op = _get_cache_prim(InitPartitionMap)(_embedding_dim, _max_key_num, _ps_num, partition_num)
|
|
1397
|
-
return op(ps_num, ps_ids)
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
class EmbeddingApplyAdam(Primitive):
|
|
1401
|
-
"""
|
|
1402
|
-
EmbeddingApplyAdam
|
|
1403
|
-
"""
|
|
1404
|
-
@prim_attr_register
|
|
1405
|
-
def __init__(self, embedding_dim, _max_key_num, mask_zero=(0,),
|
|
1406
|
-
padding_key=(0,), padding_key_mask=(1,),
|
|
1407
|
-
completion_key=(0,), completion_key_mask=(1,)):
|
|
1408
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
class EmbeddingApplyAdamW(Primitive):
|
|
1412
|
-
"""
|
|
1413
|
-
EmbeddingApplyAdam
|
|
1414
|
-
"""
|
|
1415
|
-
@prim_attr_register
|
|
1416
|
-
def __init__(self, embedding_dim, _max_key_num, amsgrad=(0,),
|
|
1417
|
-
maximize=(0,), mask_zero=(0,), padding_key=(0,),
|
|
1418
|
-
padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,)):
|
|
1419
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
class EmbeddingApplyAdaGrad(Primitive):
|
|
1423
|
-
"""
|
|
1424
|
-
EmbeddingApplyAdaGrad
|
|
1425
|
-
"""
|
|
1426
|
-
@prim_attr_register
|
|
1427
|
-
def __init__(self, embedding_dim, _max_key_num, mask_zero=(0,),
|
|
1428
|
-
padding_key=(0,), padding_key_mask=(1,),
|
|
1429
|
-
completion_key=(0,), completion_key_mask=(1,)):
|
|
1430
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
class EmbeddingApplyFtrl(Primitive):
|
|
1434
|
-
"""
|
|
1435
|
-
EmbeddingApplyFtrl
|
|
1436
|
-
"""
|
|
1437
|
-
@prim_attr_register
|
|
1438
|
-
def __init__(self, embedding_dim, _max_key_num, mask_zero=(0,),
|
|
1439
|
-
padding_key=(0,), padding_key_mask=(1,),
|
|
1440
|
-
completion_key=(0,), completion_key_mask=(1,)):
|
|
1441
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
class EmbeddingTableFind(Primitive):
|
|
1445
|
-
"""
|
|
1446
|
-
EmbeddingTableFind
|
|
1447
|
-
"""
|
|
1448
|
-
@prim_attr_register
|
|
1449
|
-
def __init__(self, embedding_dim, _embedding_dim, _max_key_num,
|
|
1450
|
-
_table_id, default_value=(-1.), _use_counter_filter=0):
|
|
1451
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1452
|
-
self.add_prim_attr("_execute_times", 2)
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
def embedding_table_find(table_id, keys, embedding_dim, _max_key_num,
|
|
1456
|
-
_table_id, default_value=(-1.0,), _use_counter_filter=0):
|
|
1457
|
-
r"""
|
|
1458
|
-
embedding_table_find
|
|
1459
|
-
"""
|
|
1460
|
-
_embedding_dim = embedding_dim if isinstance(embedding_dim, int) else embedding_dim[_table_id]
|
|
1461
|
-
op = _get_cache_prim(EmbeddingTableFind)(to_sequence(embedding_dim), _embedding_dim,
|
|
1462
|
-
_max_key_num, _table_id,
|
|
1463
|
-
to_sequence(default_value),
|
|
1464
|
-
_use_counter_filter)
|
|
1465
|
-
return op(table_id, keys)
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
class EmbeddingTableFindAndInit(Primitive):
|
|
1469
|
-
"""
|
|
1470
|
-
EmbeddingTableFindAndInit
|
|
1471
|
-
"""
|
|
1472
|
-
@prim_attr_register
|
|
1473
|
-
def __init__(self, embedding_dim, value_total_len, _embedding_dim, _table_id,
|
|
1474
|
-
_max_key_num, initializer_mode=("random_uniform",),
|
|
1475
|
-
constant_value=(0.,), min=(-2.,), max=(2.,), mu=(0.,),
|
|
1476
|
-
sigma=(1.,), seed=(0,), seed2=(0,),
|
|
1477
|
-
filter_mode=("no_filter",), filter_freq=(0,),
|
|
1478
|
-
default_key_or_value=(0,), default_key=(0,),
|
|
1479
|
-
default_value=(0.,), completion_key=(0,),
|
|
1480
|
-
completion_key_mask=(1,), optimizer_mode=(),
|
|
1481
|
-
optimizer_params=(), _use_counter_filter=0,
|
|
1482
|
-
backward_mode="adam",
|
|
1483
|
-
backward_int_params=((0,), (0,), (0,), (1,)),
|
|
1484
|
-
backward_float_params=(0.9, 0.99, 0.001, 0.9, 0.999, 1e-08)):
|
|
1485
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1486
|
-
self.add_prim_attr("_execute_times", 2)
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
def embedding_table_find_and_init(table_id, keys, max_grad_norm, parameter, embedding_dim,
|
|
1490
|
-
value_total_len, _table_id, _max_key_num,
|
|
1491
|
-
initializer_mode=('random_uniform',), constant_value=(0.,),
|
|
1492
|
-
min=(-2.,), max=(2.,), mu=(0.,), sigma=(1.,), seed=(0,),
|
|
1493
|
-
seed2=(0,), filter_mode=("no_filter",),
|
|
1494
|
-
filter_freq=(0,), default_key_or_value=(0,),
|
|
1495
|
-
default_key=(0,), default_value=(0.,),
|
|
1496
|
-
completion_key=(0,), completion_key_mask=(1,),
|
|
1497
|
-
optimizer_mode=(), optimizer_params=(), _use_counter_filter=0,
|
|
1498
|
-
backward_mode="adam", backward_int_params=((0,), (0,), (0,), (1,)),
|
|
1499
|
-
backward_float_params=(0.9, 0.99, 0.001, 0.9, 0.999, 1e-08)):
|
|
1500
|
-
"""
|
|
1501
|
-
embedding_table_find_and_init
|
|
1502
|
-
|
|
1503
|
-
backward_int_params (Union[tuple[tuple[int]], list[list[int]]]):
|
|
1504
|
-
- when the backward_mode is 'adam', 'ftrl' or 'adagrad',
|
|
1505
|
-
it means [[global_step], mask_zero, padding_key, padding_key_mask]
|
|
1506
|
-
- when the backward_mode is 'adamw', it means:
|
|
1507
|
-
[[global_step], amsgrad, maximize, mask_zero, padding_key, padding_key_mask]
|
|
1508
|
-
backward_float_params (Union[tuple[float], list[float]]):
|
|
1509
|
-
- when the backward_mode is 'adam', it means:
|
|
1510
|
-
[beta1_power, beta2_power, lr, beta1, beta2, epsilon]
|
|
1511
|
-
- when the backward_mode is 'ftrl', it means:
|
|
1512
|
-
[lr, lr_power, lambda1, lambda2]
|
|
1513
|
-
- when the backward_mode is 'adamw', it means:
|
|
1514
|
-
[beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon]
|
|
1515
|
-
- when the backward_mode is 'adagrad', it means [lr,]
|
|
1516
|
-
"""
|
|
1517
|
-
_embedding_dim = embedding_dim if isinstance(embedding_dim, int) else embedding_dim[_table_id]
|
|
1518
|
-
op = _get_cache_prim(EmbeddingTableFindAndInit)(to_sequence(embedding_dim), to_sequence(value_total_len),
|
|
1519
|
-
_embedding_dim, _table_id, _max_key_num,
|
|
1520
|
-
to_sequence(initializer_mode),
|
|
1521
|
-
to_sequence(constant_value), to_sequence(min),
|
|
1522
|
-
to_sequence(max), to_sequence(mu),
|
|
1523
|
-
to_sequence(sigma), to_sequence(seed),
|
|
1524
|
-
to_sequence(seed2), to_sequence(filter_mode),
|
|
1525
|
-
to_sequence(filter_freq), to_sequence(default_key_or_value),
|
|
1526
|
-
to_sequence(default_key), to_sequence(default_value),
|
|
1527
|
-
to_sequence(completion_key), to_sequence(completion_key_mask),
|
|
1528
|
-
to_sequence(optimizer_mode), to_sequence(optimizer_params),
|
|
1529
|
-
_use_counter_filter,
|
|
1530
|
-
backward_mode, backward_int_params, backward_float_params)
|
|
1531
|
-
return op(table_id, keys, max_grad_norm, parameter)
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
class FakeRemoteLookupUniqued(Primitive):
|
|
1535
|
-
|
|
1536
|
-
"""
|
|
1537
|
-
FakeRemoteLookupUniqued
|
|
1538
|
-
"""
|
|
1539
|
-
@prim_attr_register
|
|
1540
|
-
def __init__(self, embedding_dim, value_total_len, _embedding_dim, _table_id,
|
|
1541
|
-
_max_key_num, initializer_mode=('random_uniform',), constant_value=(0.,),
|
|
1542
|
-
min=(-2.,), max=(2.,), mu=(0.,), sigma=(1.,), seed=(0,), seed2=(0,),
|
|
1543
|
-
filter_mode=("no_filter",), filter_freq=(0,),
|
|
1544
|
-
default_key_or_value=(0,), default_key=(0,), default_value=(0.,),
|
|
1545
|
-
completion_key=(0,), completion_key_mask=(1,),
|
|
1546
|
-
optimizer_mode=(), optimizer_params=(), _use_counter_filter=0,
|
|
1547
|
-
backward_mode="adam", backward_int_params=((0,), (0,), (0,), (1,)),
|
|
1548
|
-
backward_float_params=(0.9, 0.99, 0.001, 0.9, 0.999, 1e-08)):
|
|
1549
|
-
self.add_prim_attr("_process_node_engine_id", "PS")
|
|
1550
|
-
self.add_prim_attr("_execute_times", 2)
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
def fake_remote_lookup_uniqued(table_id, keys, actual_keys_num, unique_indices,
|
|
1554
|
-
key_count, max_grad_norm, parameter,
|
|
1555
|
-
embedding_dim, value_total_len, _table_id, _max_key_num,
|
|
1556
|
-
initializer_mode=('random_uniform',), constant_value=(0.,),
|
|
1557
|
-
min=(-2.,), max=(2.,), mu=(0.,), sigma=(1.,), seed=(0,),
|
|
1558
|
-
seed2=(0,), filter_mode=("no_filter",),
|
|
1559
|
-
filter_freq=(0,), default_key_or_value=(0,),
|
|
1560
|
-
default_key=(0,), default_value=(0.,),
|
|
1561
|
-
completion_key=(0,), completion_key_mask=(1,),
|
|
1562
|
-
optimizer_mode=(), optimizer_params=(), _use_counter_filter=0,
|
|
1563
|
-
backward_mode='adam', backward_int_params=((0,), (0,), (0,), (1,)),
|
|
1564
|
-
backward_float_params=(0.9, 0.99, 0.001, 0.9, 0.999, 1e-08)):
|
|
1565
|
-
"""
|
|
1566
|
-
fake_remote_lookup_uniqued
|
|
1567
|
-
|
|
1568
|
-
backward_mode (str): determine the optimizer used by backpropagation,
|
|
1569
|
-
valid values are ["adam", "adamw", "adagrad", "ftrl"]
|
|
1570
|
-
backward_int_params (Union[tuple[tuple[int]], list[list[int]]]):
|
|
1571
|
-
- when the backward_mode is 'adam', 'ftrl' or 'adagrad',
|
|
1572
|
-
it means [[global_step], mask_zero, padding_key, padding_key_mask]
|
|
1573
|
-
- when the backward_mode is 'adamw', it means:
|
|
1574
|
-
[[global_step], amsgrad, maximize, mask_zero, padding_key, padding_key_mask]
|
|
1575
|
-
backward_float_params (Union[tuple[float], list[float]]):
|
|
1576
|
-
- when the backward_mode is 'adam', it means:
|
|
1577
|
-
[beta1_power, beta2_power, lr, beta1, beta2, epsilon]
|
|
1578
|
-
- when the backward_mode is 'ftrl', it means:
|
|
1579
|
-
[lr, lr_power, lambda1, lambda2]
|
|
1580
|
-
- when the backward_mode is 'adamw', it means:
|
|
1581
|
-
[beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon]
|
|
1582
|
-
- when the backward_mode is 'adagrad', it means [lr,]
|
|
1583
|
-
"""
|
|
1584
|
-
_embedding_dim = embedding_dim if isinstance(embedding_dim, int) else embedding_dim[_table_id]
|
|
1585
|
-
op = _get_cache_prim(FakeRemoteLookupUniqued)(to_sequence(embedding_dim), to_sequence(value_total_len),
|
|
1586
|
-
_embedding_dim, _table_id, _max_key_num,
|
|
1587
|
-
to_sequence(initializer_mode), to_sequence(constant_value),
|
|
1588
|
-
to_sequence(min), to_sequence(max), to_sequence(mu),
|
|
1589
|
-
to_sequence(sigma), to_sequence(seed), to_sequence(seed2),
|
|
1590
|
-
to_sequence(filter_mode), to_sequence(filter_freq),
|
|
1591
|
-
to_sequence(default_key_or_value), to_sequence(default_key),
|
|
1592
|
-
to_sequence(default_value), to_sequence(completion_key),
|
|
1593
|
-
to_sequence(completion_key_mask), to_sequence(optimizer_mode),
|
|
1594
|
-
to_sequence(optimizer_params), _use_counter_filter,
|
|
1595
|
-
backward_mode, backward_int_params, backward_float_params)
|
|
1596
|
-
return op(table_id, keys, actual_keys_num, unique_indices, key_count, max_grad_norm, parameter)
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
1297
|
# Following is Python Infer Value.
|
|
1600
1298
|
# A valid infer value function should be:
|
|
1601
1299
|
#
|
|
@@ -1628,7 +1326,13 @@ def infer_value_for_Concat(tensors, axis):
|
|
|
1628
1326
|
return None
|
|
1629
1327
|
|
|
1630
1328
|
tensor_to_concat = [x.asnumpy() for x in tensors]
|
|
1631
|
-
|
|
1329
|
+
out = np.concatenate(tensor_to_concat, axis)
|
|
1330
|
+
if out.dtype != np.float32:
|
|
1331
|
+
return Tensor(out)
|
|
1332
|
+
for x in tensors:
|
|
1333
|
+
if x.dtype in [mstype.float16, mstype.float32]:
|
|
1334
|
+
return Tensor(out)
|
|
1335
|
+
return Tensor(out, dtype=mstype.bfloat16)
|
|
1632
1336
|
|
|
1633
1337
|
|
|
1634
1338
|
def infer_value_for_GatherD(input, dim, index):
|
|
@@ -1959,11 +1663,27 @@ def infer_value_for_BroadcastTo(x, shape):
|
|
|
1959
1663
|
validator.check_value_type("shape", shape, [tuple], "BroadcastTo")
|
|
1960
1664
|
shape = list(shape)
|
|
1961
1665
|
|
|
1962
|
-
|
|
1963
|
-
|
|
1666
|
+
# Resolve -1 entries and support input rank < target rank.
|
|
1667
|
+
input_shape = list(x.shape)
|
|
1668
|
+
target_shape = list(shape)
|
|
1669
|
+
in_rank = len(input_shape)
|
|
1670
|
+
out_rank = len(target_shape)
|
|
1671
|
+
for k in range(1, out_rank + 1):
|
|
1672
|
+
t = target_shape[-k]
|
|
1673
|
+
if t == -1:
|
|
1674
|
+
if k <= in_rank:
|
|
1675
|
+
target_shape[-k] = input_shape[-k]
|
|
1676
|
+
else:
|
|
1677
|
+
pass
|
|
1678
|
+
|
|
1679
|
+
resolved_shape = target_shape
|
|
1680
|
+
|
|
1681
|
+
np_data = np.broadcast_to(x.asnumpy(), resolved_shape)
|
|
1682
|
+
if 0 in resolved_shape:
|
|
1964
1683
|
init_func = Zero()
|
|
1965
1684
|
init_func.__enable_zero_dim__ = True
|
|
1966
|
-
out = Tensor(shape=
|
|
1685
|
+
out = Tensor(shape=resolved_shape, dtype=x.dtype, init=init_func)
|
|
1686
|
+
out.init_data()
|
|
1967
1687
|
return out
|
|
1968
1688
|
return Tensor(np_data)
|
|
1969
1689
|
|
|
@@ -2014,6 +1734,7 @@ def infer_value_for_Reshape(x, shape):
|
|
|
2014
1734
|
init_func = Zero()
|
|
2015
1735
|
init_func.__enable_zero_dim__ = True
|
|
2016
1736
|
out = Tensor(shape=shape, dtype=x.dtype, init=init_func)
|
|
1737
|
+
out.init_data()
|
|
2017
1738
|
else:
|
|
2018
1739
|
out = Tensor(x.asnumpy().reshape(shape))
|
|
2019
1740
|
return out
|
|
@@ -6868,8 +6868,8 @@ class CTCLossV2(Primitive):
|
|
|
6868
6868
|
>>> print(neg_log_hood)
|
|
6869
6869
|
[-2.2986124]
|
|
6870
6870
|
>>> print(log_alpha)
|
|
6871
|
-
[[[0.3 0.3 -inf -inf
|
|
6872
|
-
[
|
|
6871
|
+
[[[0.3 0.3 -inf -inf 1.8931472 1.2 0. 0. ]
|
|
6872
|
+
[0. 0. 0. 0. 0. 0. 0. 0. ]]]
|
|
6873
6873
|
"""
|
|
6874
6874
|
|
|
6875
6875
|
@prim_attr_register
|
|
@@ -2606,89 +2606,6 @@ class RaggedTensorToTensor(Primitive):
|
|
|
2606
2606
|
self.add_prim_attr("num_row_partition_tensors", self.num_row_partition_tensors)
|
|
2607
2607
|
|
|
2608
2608
|
|
|
2609
|
-
class SparseCross(Primitive):
|
|
2610
|
-
"""
|
|
2611
|
-
Generates sparse cross from a list of sparse and dense tensors.
|
|
2612
|
-
|
|
2613
|
-
Args:
|
|
2614
|
-
hashed_output (bool): If true, returns the hash of the cross instead of the string. This will allow us
|
|
2615
|
-
avoiding string manipulations.
|
|
2616
|
-
num_buckets (int): An int that is >= 0. It is used if "hashed_output" is true.output = hashed_value%num_buckets
|
|
2617
|
-
if num_buckets > 0 else "hashed_value".
|
|
2618
|
-
hash_key (int): Specify the hash_key that will be used by the "FingerprintCat64" function to combine the
|
|
2619
|
-
crosses fingerprints.
|
|
2620
|
-
out_type (mindspore.dtype): The output data type. Defaults to "int64".
|
|
2621
|
-
internal_type (mindspore.dtype): An type int64.
|
|
2622
|
-
|
|
2623
|
-
Inputs:
|
|
2624
|
-
- **indices** (list(Tensor)) - A list of Tensor objects with type int64. 2-D.
|
|
2625
|
-
Indices of each input SparseTensor.
|
|
2626
|
-
- **values** (list(Tensor)) - A list of Tensor objects with types from: int64.
|
|
2627
|
-
1-D. values of each SparseTensor.
|
|
2628
|
-
- **shapes** (list(Tensor)) - A list with the same length as indices of Tensor objects with type int64.
|
|
2629
|
-
1-D. Shapes of each SparseTensor.
|
|
2630
|
-
- **dense_inputs** (list(Tensor)) - A list of Tensor objects with types from: int64.
|
|
2631
|
-
2-D. Columns represented by dense Tensor.
|
|
2632
|
-
|
|
2633
|
-
Outputs:
|
|
2634
|
-
- **output_indices** (Tensor) - A Tensor of type int64. 2-D. Indices of the concatenated SparseTensor.
|
|
2635
|
-
- **output_values** (Tensor) - A Tensor of type "out_type". 1-D.
|
|
2636
|
-
Non-empty values of the concatenated or hashed SparseTensor.
|
|
2637
|
-
- **output_shape** (Tensor) - A Tensor of type int64. 1-D. Shape of the concatenated SparseTensor.
|
|
2638
|
-
|
|
2639
|
-
Raises:
|
|
2640
|
-
TypeError: The indices shape rank is not equal to the shape rank.
|
|
2641
|
-
TypeError: The indices element number is not equal to the value element number.
|
|
2642
|
-
TypeError: The indices shape rank should be 2.
|
|
2643
|
-
TypeError: The denses shape rank should be 2.
|
|
2644
|
-
TypeError: The shapes rank should be 2.
|
|
2645
|
-
|
|
2646
|
-
Supported Platforms:
|
|
2647
|
-
``CPU``
|
|
2648
|
-
|
|
2649
|
-
Examples:
|
|
2650
|
-
>>> from mindspore.ops.operations.sparse_ops import SparseCross
|
|
2651
|
-
>>> indice1 = Tensor([[0,0],[1,0],[1,1]], dtype=mstype.int64)
|
|
2652
|
-
>>> value1 = Tensor([1, 2, 3], dtype=mstype.int64)
|
|
2653
|
-
>>> shape1 = Tensor([2, 2], dtype=mstype.int64)
|
|
2654
|
-
>>> dense1 = Tensor([[1],[2]], dtype=mstype.int64)
|
|
2655
|
-
>>> indice2 = Tensor([[0,0],[1,0],[1,1]], dtype=mstype.int64)
|
|
2656
|
-
>>> value2 = Tensor([1, 2, 3], dtype=mstype.int64)
|
|
2657
|
-
>>> shape2 = Tensor([2, 2], dtype=mstype.int64)
|
|
2658
|
-
>>> dense2 = Tensor([[1],[2]], dtype=mstype.int64)
|
|
2659
|
-
>>> indices = [indice1, indice2]
|
|
2660
|
-
>>> values = [value1, value2]
|
|
2661
|
-
>>> shapes = [shape1, shape2]
|
|
2662
|
-
>>> dense_inputs = [dense1, dense2]
|
|
2663
|
-
>>> hashed_output=True
|
|
2664
|
-
>>> hash_key= 2
|
|
2665
|
-
>>> out_type= mstype.int64
|
|
2666
|
-
>>> internal_type = mstype.int64
|
|
2667
|
-
>>> num_buckets=0
|
|
2668
|
-
>>> sparse_cross = SparseCross(hashed_output, hash_key, out_type, internal_type, num_buckets)
|
|
2669
|
-
>>> out = sparse_cross(indices, values, shapes, dense_inputs)
|
|
2670
|
-
>>> print(out)
|
|
2671
|
-
(Tensor(shape=[5, 2], dtype=Int64, value=
|
|
2672
|
-
[[0, 0],
|
|
2673
|
-
[1, 0],
|
|
2674
|
-
[1, 1],
|
|
2675
|
-
[1, 2],
|
|
2676
|
-
[1, 3]]), Tensor(shape=[5], dtype=Int64, value= [1350190460805457680, 6319552725219729347,
|
|
2677
|
-
4652439303631496997, 7670687697825594049, 174086171018132662]), Tensor(shape=[2], dtype=Int64, value= [2, 4]))
|
|
2678
|
-
"""
|
|
2679
|
-
|
|
2680
|
-
@prim_attr_register
|
|
2681
|
-
def __init__(self, hashed_output, hash_key, out_type, internal_type, num_buckets=0):
|
|
2682
|
-
"""Initialize SparseCross."""
|
|
2683
|
-
self.init_prim_io_names(inputs=["indices", "values", "shapes", "dense_inputs"],
|
|
2684
|
-
outputs=["output_indices", "output_values", "output_shape"])
|
|
2685
|
-
validator.check_value_type("hashed_output", hashed_output, [bool], self.name)
|
|
2686
|
-
validator.check_value_type("hash_key", hash_key, [int], self.name)
|
|
2687
|
-
validator.check_value_type("out_type", out_type, [mstype.Type], self.name)
|
|
2688
|
-
validator.check_value_type("internal_type", internal_type, [mstype.Type], self.name)
|
|
2689
|
-
validator.check_value_type("num_buckets", num_buckets, [int], self.name)
|
|
2690
|
-
|
|
2691
|
-
|
|
2692
2609
|
class RaggedTensorToSparse(Primitive):
|
|
2693
2610
|
r"""
|
|
2694
2611
|
Converts a RaggedTensor into a SparseTensor with the same values.
|
mindspore/ops/primitive.py
CHANGED
|
@@ -20,7 +20,7 @@ import copy
|
|
|
20
20
|
import numpy as np
|
|
21
21
|
from mindspore.common.api import _wrap_func
|
|
22
22
|
from mindspore.log import _LogActionOnce
|
|
23
|
-
from mindspore import
|
|
23
|
+
from mindspore import log as logger
|
|
24
24
|
from mindspore.parallel._utils import _is_in_auto_parallel_mode, _is_in_data_parallel_mode, \
|
|
25
25
|
_is_in_hybrid_parallel_mode, SUPPORTED_TUPLE_IN_TUPLE_STRATEGY
|
|
26
26
|
from mindspore.parallel._ps_context import _is_ps_mode, _is_role_sched
|
|
@@ -214,22 +214,6 @@ class Primitive(Primitive_):
|
|
|
214
214
|
if in_strategy is None and out_strategy is not None:
|
|
215
215
|
raise ValueError(f'The out_strategy of {self.name} is {out_strategy}, need to set in_strategy,'
|
|
216
216
|
f' but got none')
|
|
217
|
-
if not _is_in_auto_parallel_mode():
|
|
218
|
-
mode = context.get_auto_parallel_context("parallel_mode")
|
|
219
|
-
if in_strategy is not None:
|
|
220
|
-
logger.warning(f"The in_strategy/in_layout of the operator in your network "
|
|
221
|
-
f"will not take effect in {mode} mode. "
|
|
222
|
-
f"This means the the shard function called in the network is ignored. \n"
|
|
223
|
-
f"If you want to enable it, please use semi auto or auto parallel mode by "
|
|
224
|
-
f"context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL "
|
|
225
|
-
f"or context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL)")
|
|
226
|
-
if out_strategy is not None:
|
|
227
|
-
logger.warning(f"The out_strategy/out_layout of the operator in your network "
|
|
228
|
-
f"will not take effect in {mode} mode."
|
|
229
|
-
f" This means the the shard function called in the network is ignored. \n"
|
|
230
|
-
f"If you want to enable it, please use semi auto or auto parallel mode by "
|
|
231
|
-
f"context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL "
|
|
232
|
-
f"or context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL)")
|
|
233
217
|
|
|
234
218
|
def del_prim_attr(self, name):
|
|
235
219
|
"""
|
mindspore/ops/tensor_method.py
CHANGED
|
@@ -239,6 +239,7 @@ from mindspore.ops.function.array_func import tensor_scatter_add
|
|
|
239
239
|
from mindspore.ops.auto_generate import select, select_ext_view
|
|
240
240
|
# 94 sigmoid
|
|
241
241
|
from mindspore.ops.auto_generate import sigmoid
|
|
242
|
+
from mindspore.ops.auto_generate import inplace_sigmoid as sigmoid_
|
|
242
243
|
# 95 sin
|
|
243
244
|
from mindspore.ops.auto_generate import sin
|
|
244
245
|
# 96 size
|
|
@@ -366,7 +367,7 @@ from mindspore.ops.auto_generate import acos_ext, acosh_ext, asin_ext, asinh_ext
|
|
|
366
367
|
from mindspore.ops.function.math_func import median
|
|
367
368
|
|
|
368
369
|
# 156
|
|
369
|
-
|
|
370
|
+
from mindspore.ops.function.math_func import permute
|
|
370
371
|
# 157
|
|
371
372
|
from mindspore.ops.auto_generate import xlogy_op
|
|
372
373
|
|
|
@@ -1040,6 +1041,10 @@ def tensor_sigmoid(input):
|
|
|
1040
1041
|
return sigmoid(input)
|
|
1041
1042
|
|
|
1042
1043
|
|
|
1044
|
+
def tensor_sigmoid_(input):
|
|
1045
|
+
return sigmoid_(input)
|
|
1046
|
+
|
|
1047
|
+
|
|
1043
1048
|
# 95 sin
|
|
1044
1049
|
def tensor_sin(input):
|
|
1045
1050
|
return sin(input)
|
|
@@ -1137,6 +1142,7 @@ def deprecated_tensor_sum(input, axis=None, dtype=None, keepdims=False, initial=
|
|
|
1137
1142
|
|
|
1138
1143
|
# 105 swapaxes
|
|
1139
1144
|
|
|
1145
|
+
|
|
1140
1146
|
# 106 t
|
|
1141
1147
|
def tensor_t(input):
|
|
1142
1148
|
return t(input)
|
|
@@ -1165,6 +1171,7 @@ def deprecated_tensor_tile(input, reps):
|
|
|
1165
1171
|
|
|
1166
1172
|
# 109 tolist
|
|
1167
1173
|
|
|
1174
|
+
|
|
1168
1175
|
# 110 topk
|
|
1169
1176
|
def tensor_topk(input, k, dim=-1, largest=True, sorted=True):
|
|
1170
1177
|
return topk(input, k, dim, largest, sorted)
|
|
@@ -1184,6 +1191,11 @@ def deprecated_tensor_transpose(input, *axes):
|
|
|
1184
1191
|
return transpose(input, perm)
|
|
1185
1192
|
|
|
1186
1193
|
|
|
1194
|
+
def deprecated_tensor_permute(input, *axis):
|
|
1195
|
+
perm = validator.check_transpose_axis(axis, input.ndim)
|
|
1196
|
+
return permute(input, perm)
|
|
1197
|
+
|
|
1198
|
+
|
|
1187
1199
|
# 112 tril
|
|
1188
1200
|
def deprecated_tensor_tril(input, diagonal=0):
|
|
1189
1201
|
return tril(input, diagonal)
|
|
@@ -1196,6 +1208,7 @@ def tensor_trunc(input):
|
|
|
1196
1208
|
|
|
1197
1209
|
# 114 type
|
|
1198
1210
|
|
|
1211
|
+
|
|
1199
1212
|
# 115 type_as
|
|
1200
1213
|
def deprecated_tensor_type_as(input, other):
|
|
1201
1214
|
return input.astype(other.dtype)
|
|
@@ -1511,12 +1524,18 @@ def deprecated_tensor_logaddexp2(input, other):
|
|
|
1511
1524
|
|
|
1512
1525
|
|
|
1513
1526
|
# 157
|
|
1514
|
-
def tensor_empty(*size, dtype=None, device=None):
|
|
1527
|
+
def tensor_empty(*size, dtype=None, device=None, pin_memory=False):
|
|
1528
|
+
r"""
|
|
1529
|
+
For details, please refer to :func:`mindspore.mint.empty`.
|
|
1530
|
+
"""
|
|
1515
1531
|
logger.error(
|
|
1516
1532
|
"This is a function for empty not should be called. Please check the implementation.")
|
|
1517
1533
|
|
|
1518
1534
|
|
|
1519
|
-
def tensor_empty_like(input, *, dtype=None, device=None):
|
|
1535
|
+
def tensor_empty_like(input, *, dtype=None, device=None, pin_memory=False):
|
|
1536
|
+
"""
|
|
1537
|
+
For details, please refer to :func:`mindspore.mint.empty_like`.
|
|
1538
|
+
"""
|
|
1520
1539
|
raise NotImplementedError(
|
|
1521
1540
|
"This is a function for empty_like should not be called. Please check the implementation.")
|
|
1522
1541
|
|
|
@@ -1811,10 +1830,19 @@ def deprecated_tensor_var(input, axis=None, ddof=0, keepdims=False):
|
|
|
1811
1830
|
return _tensor_div(x_sum, nums - ddof)
|
|
1812
1831
|
|
|
1813
1832
|
|
|
1833
|
+
# 1222
|
|
1834
|
+
def tensor_index_fill_(input, dim, index, value):
|
|
1835
|
+
raise NotImplementedError('Tensor.index_fill_ only supports Ascend.')
|
|
1836
|
+
|
|
1837
|
+
|
|
1814
1838
|
def tensor_kthvalue(input, k, dim=-1, keepdim=False):
|
|
1815
1839
|
raise ValueError("should not come here for kthvalue py_method.")
|
|
1816
1840
|
|
|
1817
1841
|
|
|
1842
|
+
def tensor_index_copy_(input, dim, index, tensor):
|
|
1843
|
+
raise NotImplementedError('Tensor.index_copy_ only supports Ascend.')
|
|
1844
|
+
|
|
1845
|
+
|
|
1818
1846
|
def tensor_sub_empty_(input, other, alpha=1):
|
|
1819
1847
|
raise ValueError("should not come here for sub_ method.")
|
|
1820
1848
|
|
|
@@ -1824,9 +1852,11 @@ def tensor_inplace_sub(input, other, *, alpha=1):
|
|
|
1824
1852
|
return sub(input, other)
|
|
1825
1853
|
return sub_ext(input, other, alpha=alpha)
|
|
1826
1854
|
|
|
1855
|
+
|
|
1827
1856
|
def tensor_new_full(input, size, fill_value, *, dtype=None):
|
|
1828
1857
|
raise NotImplementedError("new_full method support Ascend only")
|
|
1829
1858
|
|
|
1859
|
+
|
|
1830
1860
|
def tensor_div_empty_(input, other, rounding_mode=None):
|
|
1831
1861
|
raise ValueError("should not come here for div_ method.")
|
|
1832
1862
|
|
|
@@ -1858,6 +1888,10 @@ def all_gather_matmul(
|
|
|
1858
1888
|
raise NotImplementedError('all_gather_matmul only supports Ascend.')
|
|
1859
1889
|
|
|
1860
1890
|
|
|
1891
|
+
def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
|
1892
|
+
raise NotImplementedError('conv1d only supports Ascend.')
|
|
1893
|
+
|
|
1894
|
+
|
|
1861
1895
|
def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
|
1862
1896
|
raise NotImplementedError('conv3d only supports Ascend.')
|
|
1863
1897
|
|
|
@@ -1926,11 +1960,46 @@ def tensor_quant_matmul(x1, x2, scale, *, offset=None, pertoken_scale=None, bias
|
|
|
1926
1960
|
raise NotImplementedError('quant_matmul only supports Ascend.')
|
|
1927
1961
|
|
|
1928
1962
|
|
|
1963
|
+
def tensor_index(input, value):
|
|
1964
|
+
raise NotImplementedError("index only supports Ascend.")
|
|
1965
|
+
|
|
1966
|
+
|
|
1929
1967
|
def tensor_gmm(x, weight, *, bias=None, group_list=None, group_type=0, group_list_type=0):
|
|
1930
1968
|
raise NotImplementedError("gmm has not been implemented by python.")
|
|
1931
1969
|
|
|
1970
|
+
|
|
1932
1971
|
def raise_func(*args, **kwargs):
|
|
1933
1972
|
raise NotImplementedError("this func has not been implemented.")
|
|
1934
1973
|
|
|
1974
|
+
|
|
1935
1975
|
def tensor_masked_scatter(input, mask, source):
|
|
1936
1976
|
return masked_scatter(input, mask, source)
|
|
1977
|
+
|
|
1978
|
+
|
|
1979
|
+
def tensor_inplace_masked_scatter(input, mask, source):
|
|
1980
|
+
return F.inplace_masked_scatter(input, mask, source)
|
|
1981
|
+
|
|
1982
|
+
|
|
1983
|
+
def tensor_broadcast_to(x, shape):
|
|
1984
|
+
return F.broadcast_to(x, shape)
|
|
1985
|
+
|
|
1986
|
+
def tensor_squeeze(input, axis=None):
|
|
1987
|
+
return F.squeeze(input, axis)
|
|
1988
|
+
|
|
1989
|
+
|
|
1990
|
+
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
|
1991
|
+
raise NotImplementedError('conv2d only supports Ascend.')
|
|
1992
|
+
|
|
1993
|
+
|
|
1994
|
+
def tensor_real(input):
|
|
1995
|
+
r"""
|
|
1996
|
+
For details, please refer to :func:`mindspore.ops.real`.
|
|
1997
|
+
"""
|
|
1998
|
+
return ops.real(input)
|
|
1999
|
+
|
|
2000
|
+
|
|
2001
|
+
def tensor_imag(input):
|
|
2002
|
+
r"""
|
|
2003
|
+
For details, please refer to :func:`mindspore.ops.imag`.
|
|
2004
|
+
"""
|
|
2005
|
+
return ops.imag(input)
|