mindspore 2.4.0__cp310-cp310-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -0
- mindspore/__init__.py +53 -0
- mindspore/_c_dataengine.cpython-310-darwin.so +0 -0
- mindspore/_c_expression.cpython-310-darwin.so +0 -0
- mindspore/_c_mindrecord.cpython-310-darwin.so +0 -0
- mindspore/_check_jit_forbidden_api.py +106 -0
- mindspore/_checkparam.py +1419 -0
- mindspore/_extends/__init__.py +23 -0
- mindspore/_extends/builtin_operations.py +224 -0
- mindspore/_extends/graph_kernel/__init__.py +17 -0
- mindspore/_extends/graph_kernel/model/__init__.py +19 -0
- mindspore/_extends/graph_kernel/model/graph_parallel.py +311 -0
- mindspore/_extends/graph_kernel/model/graph_split.py +1348 -0
- mindspore/_extends/graph_kernel/model/model.py +553 -0
- mindspore/_extends/graph_kernel/model/model_builder.py +216 -0
- mindspore/_extends/graph_kernel/parallel_estimate.py +60 -0
- mindspore/_extends/graph_kernel/splitter.py +140 -0
- mindspore/_extends/graph_kernel/utils.py +28 -0
- mindspore/_extends/parallel_compile/__init__.py +19 -0
- mindspore/_extends/parallel_compile/akg_compiler/__init__.py +19 -0
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +269 -0
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +529 -0
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +56 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/get_file_path.py +36 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +556 -0
- mindspore/_extends/parallel_compile/akg_compiler/util.py +159 -0
- mindspore/_extends/parse/__init__.py +49 -0
- mindspore/_extends/parse/compile_config.py +299 -0
- mindspore/_extends/parse/namespace.py +136 -0
- mindspore/_extends/parse/parser.py +1448 -0
- mindspore/_extends/parse/resources.py +213 -0
- mindspore/_extends/parse/standard_method.py +4475 -0
- mindspore/_extends/parse/trope.py +97 -0
- mindspore/_extends/pijit/__init__.py +23 -0
- mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
- mindspore/_extends/remote/__init__.py +19 -0
- mindspore/_extends/remote/kernel_build_server.py +199 -0
- mindspore/_extends/remote/kernel_build_server_akg.py +55 -0
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
- mindspore/_extends/utils.py +68 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +433 -0
- mindspore/boost/__init__.py +42 -0
- mindspore/boost/adasum.py +319 -0
- mindspore/boost/base.py +535 -0
- mindspore/boost/boost.py +400 -0
- mindspore/boost/boost_cell_wrapper.py +790 -0
- mindspore/boost/dim_reduce.py +323 -0
- mindspore/boost/grad_accumulation.py +79 -0
- mindspore/boost/grad_freeze.py +382 -0
- mindspore/boost/group_loss_scale_manager.py +166 -0
- mindspore/boost/less_batch_normalization.py +174 -0
- mindspore/common/__init__.py +86 -0
- mindspore/common/_auto_dynamic.py +68 -0
- mindspore/common/_decorator.py +50 -0
- mindspore/common/_jit_fallback_utils.py +110 -0
- mindspore/common/_monad.py +25 -0
- mindspore/common/_pijit_context.py +190 -0
- mindspore/common/_register_for_adapter.py +74 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +46 -0
- mindspore/common/_stub_tensor.py +210 -0
- mindspore/common/_tensor_overload.py +139 -0
- mindspore/common/_utils.py +122 -0
- mindspore/common/api.py +2064 -0
- mindspore/common/auto_dynamic_shape.py +507 -0
- mindspore/common/dtype.py +422 -0
- mindspore/common/dump.py +130 -0
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +254 -0
- mindspore/common/hook_handle.py +143 -0
- mindspore/common/initializer.py +880 -0
- mindspore/common/jit_config.py +98 -0
- mindspore/common/lazy_inline.py +240 -0
- mindspore/common/mindir_util.py +111 -0
- mindspore/common/mutable.py +234 -0
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +1081 -0
- mindspore/common/recompute.py +292 -0
- mindspore/common/seed.py +260 -0
- mindspore/common/sparse_tensor.py +1175 -0
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +5039 -0
- mindspore/communication/__init__.py +37 -0
- mindspore/communication/_comm_helper.py +501 -0
- mindspore/communication/_hccl_management.py +297 -0
- mindspore/communication/comm_func.py +1395 -0
- mindspore/communication/management.py +673 -0
- mindspore/config/op_info.config +533 -0
- mindspore/context.py +2077 -0
- mindspore/dataset/__init__.py +90 -0
- mindspore/dataset/audio/__init__.py +61 -0
- mindspore/dataset/audio/transforms.py +3690 -0
- mindspore/dataset/audio/utils.py +386 -0
- mindspore/dataset/audio/validators.py +1172 -0
- mindspore/dataset/callback/__init__.py +20 -0
- mindspore/dataset/callback/ds_callback.py +368 -0
- mindspore/dataset/callback/validators.py +32 -0
- mindspore/dataset/core/__init__.py +13 -0
- mindspore/dataset/core/config.py +1095 -0
- mindspore/dataset/core/datatypes.py +101 -0
- mindspore/dataset/core/py_util_helpers.py +65 -0
- mindspore/dataset/core/validator_helpers.py +781 -0
- mindspore/dataset/debug/__init__.py +21 -0
- mindspore/dataset/debug/debug_hook.py +97 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +124 -0
- mindspore/dataset/engine/cache_admin.py +47 -0
- mindspore/dataset/engine/cache_client.py +129 -0
- mindspore/dataset/engine/datasets.py +4582 -0
- mindspore/dataset/engine/datasets_audio.py +911 -0
- mindspore/dataset/engine/datasets_standard_format.py +543 -0
- mindspore/dataset/engine/datasets_text.py +2161 -0
- mindspore/dataset/engine/datasets_user_defined.py +1184 -0
- mindspore/dataset/engine/datasets_vision.py +4816 -0
- mindspore/dataset/engine/iterators.py +371 -0
- mindspore/dataset/engine/obs/__init__.py +23 -0
- mindspore/dataset/engine/obs/config_loader.py +68 -0
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +508 -0
- mindspore/dataset/engine/obs/util.py +482 -0
- mindspore/dataset/engine/offload.py +596 -0
- mindspore/dataset/engine/queue.py +304 -0
- mindspore/dataset/engine/samplers.py +895 -0
- mindspore/dataset/engine/serializer_deserializer.py +159 -0
- mindspore/dataset/engine/validators.py +2895 -0
- mindspore/dataset/text/__init__.py +51 -0
- mindspore/dataset/text/transforms.py +1703 -0
- mindspore/dataset/text/utils.py +715 -0
- mindspore/dataset/text/validators.py +642 -0
- mindspore/dataset/transforms/__init__.py +45 -0
- mindspore/dataset/transforms/c_transforms.py +638 -0
- mindspore/dataset/transforms/py_transforms.py +393 -0
- mindspore/dataset/transforms/py_transforms_util.py +255 -0
- mindspore/dataset/transforms/transforms.py +1260 -0
- mindspore/dataset/transforms/validators.py +410 -0
- mindspore/dataset/utils/__init__.py +19 -0
- mindspore/dataset/utils/browse_dataset.py +190 -0
- mindspore/dataset/utils/line_reader.py +126 -0
- mindspore/dataset/vision/__init__.py +65 -0
- mindspore/dataset/vision/c_transforms.py +2641 -0
- mindspore/dataset/vision/py_transforms.py +2120 -0
- mindspore/dataset/vision/py_transforms_util.py +1660 -0
- mindspore/dataset/vision/transforms.py +7295 -0
- mindspore/dataset/vision/utils.py +863 -0
- mindspore/dataset/vision/validators.py +1483 -0
- mindspore/default_config.py +2 -0
- mindspore/experimental/__init__.py +20 -0
- mindspore/experimental/es/__init__.py +22 -0
- mindspore/experimental/es/embedding_service.py +883 -0
- mindspore/experimental/es/embedding_service_layer.py +581 -0
- mindspore/experimental/llm_boost/__init__.py +21 -0
- mindspore/experimental/llm_boost/atb/__init__.py +23 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
- mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
- mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
- mindspore/experimental/llm_boost/register.py +129 -0
- mindspore/experimental/llm_boost/utils.py +31 -0
- mindspore/experimental/map_parameter.py +309 -0
- mindspore/experimental/optim/__init__.py +40 -0
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +193 -0
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +290 -0
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +1371 -0
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +262 -0
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +156 -0
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/contiguous_tensors_handle.py +175 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +357 -0
- mindspore/include/OWNERS +7 -0
- mindspore/include/api/allocator.h +97 -0
- mindspore/include/api/callback/callback.h +93 -0
- mindspore/include/api/callback/ckpt_saver.h +41 -0
- mindspore/include/api/callback/loss_monitor.h +33 -0
- mindspore/include/api/callback/lr_scheduler.h +51 -0
- mindspore/include/api/callback/time_monitor.h +34 -0
- mindspore/include/api/callback/train_accuracy.h +37 -0
- mindspore/include/api/cell.h +90 -0
- mindspore/include/api/cfg.h +82 -0
- mindspore/include/api/context.h +602 -0
- mindspore/include/api/data_type.h +47 -0
- mindspore/include/api/delegate.h +178 -0
- mindspore/include/api/delegate_api.h +75 -0
- mindspore/include/api/dual_abi_helper.h +208 -0
- mindspore/include/api/format.h +28 -0
- mindspore/include/api/graph.h +46 -0
- mindspore/include/api/kernel.h +58 -0
- mindspore/include/api/kernel_api.h +168 -0
- mindspore/include/api/metrics/accuracy.h +36 -0
- mindspore/include/api/metrics/metrics.h +41 -0
- mindspore/include/api/model.h +438 -0
- mindspore/include/api/model_group.h +91 -0
- mindspore/include/api/model_parallel_runner.h +168 -0
- mindspore/include/api/serialization.h +185 -0
- mindspore/include/api/status.h +192 -0
- mindspore/include/api/types.h +431 -0
- mindspore/include/api/visible.h +41 -0
- mindspore/include/c_api/context_c.h +179 -0
- mindspore/include/c_api/data_type_c.h +52 -0
- mindspore/include/c_api/format_c.h +46 -0
- mindspore/include/c_api/model_c.h +347 -0
- mindspore/include/c_api/status_c.h +79 -0
- mindspore/include/c_api/tensor_c.h +146 -0
- mindspore/include/c_api/types_c.h +67 -0
- mindspore/include/dataset/config.h +163 -0
- mindspore/include/dataset/constants.h +363 -0
- mindspore/include/dataset/execute.h +196 -0
- mindspore/include/dataset/text.h +1092 -0
- mindspore/include/dataset/transforms.h +638 -0
- mindspore/include/dataset/vision.h +2129 -0
- mindspore/include/dataset/vision_ascend.h +206 -0
- mindspore/include/dataset/vision_lite.h +625 -0
- mindspore/lib/libavcodec.59.dylib +0 -0
- mindspore/lib/libavdevice.59.dylib +0 -0
- mindspore/lib/libavfilter.8.dylib +0 -0
- mindspore/lib/libavformat.59.dylib +0 -0
- mindspore/lib/libavutil.57.dylib +0 -0
- mindspore/lib/libdnnl.2.dylib +0 -0
- mindspore/lib/libicudata.69.dylib +0 -0
- mindspore/lib/libicui18n.69.dylib +0 -0
- mindspore/lib/libicuuc.69.dylib +0 -0
- mindspore/lib/libmindspore_address_sorting.15.dylib +0 -0
- mindspore/lib/libmindspore_backend.dylib +0 -0
- mindspore/lib/libmindspore_common.dylib +0 -0
- mindspore/lib/libmindspore_core.dylib +0 -0
- mindspore/lib/libmindspore_glog.0.dylib +0 -0
- mindspore/lib/libmindspore_gpr.15.dylib +0 -0
- mindspore/lib/libmindspore_grpc++.1.dylib +0 -0
- mindspore/lib/libmindspore_grpc.15.dylib +0 -0
- mindspore/lib/libmindspore_np_dtype.dylib +0 -0
- mindspore/lib/libmindspore_ops.dylib +0 -0
- mindspore/lib/libmindspore_upb.15.dylib +0 -0
- mindspore/lib/libnnacl.dylib +0 -0
- mindspore/lib/libopencv_core.4.5.dylib +0 -0
- mindspore/lib/libopencv_imgcodecs.4.5.dylib +0 -0
- mindspore/lib/libopencv_imgproc.4.5.dylib +0 -0
- mindspore/lib/libps_cache.dylib +0 -0
- mindspore/lib/libswresample.4.dylib +0 -0
- mindspore/lib/libswscale.6.dylib +0 -0
- mindspore/lib/libtinyxml2.8.dylib +0 -0
- mindspore/log.py +633 -0
- mindspore/mindrecord/__init__.py +43 -0
- mindspore/mindrecord/common/__init__.py +17 -0
- mindspore/mindrecord/common/constant.py +20 -0
- mindspore/mindrecord/common/enums.py +44 -0
- mindspore/mindrecord/common/exceptions.py +311 -0
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +174 -0
- mindspore/mindrecord/filewriter.py +722 -0
- mindspore/mindrecord/mindpage.py +210 -0
- mindspore/mindrecord/shardheader.py +141 -0
- mindspore/mindrecord/shardindexgenerator.py +74 -0
- mindspore/mindrecord/shardreader.py +117 -0
- mindspore/mindrecord/shardsegment.py +128 -0
- mindspore/mindrecord/shardutils.py +185 -0
- mindspore/mindrecord/shardwriter.py +237 -0
- mindspore/mindrecord/tools/__init__.py +17 -0
- mindspore/mindrecord/tools/cifar10.py +140 -0
- mindspore/mindrecord/tools/cifar100.py +153 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +185 -0
- mindspore/mindrecord/tools/cifar10_to_mr.py +177 -0
- mindspore/mindrecord/tools/csv_to_mr.py +200 -0
- mindspore/mindrecord/tools/imagenet_to_mr.py +206 -0
- mindspore/mindrecord/tools/mnist_to_mr.py +259 -0
- mindspore/mindrecord/tools/tfrecord_to_mr.py +360 -0
- mindspore/mint/__init__.py +1586 -0
- mindspore/mint/distributed/__init__.py +31 -0
- mindspore/mint/distributed/distributed.py +254 -0
- mindspore/mint/linalg/__init__.py +22 -0
- mindspore/mint/nn/__init__.py +757 -0
- mindspore/mint/nn/functional.py +679 -0
- mindspore/mint/nn/layer/__init__.py +39 -0
- mindspore/mint/nn/layer/activation.py +133 -0
- mindspore/mint/nn/layer/normalization.py +477 -0
- mindspore/mint/nn/layer/pooling.py +110 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +206 -0
- mindspore/mint/special/__init__.py +63 -0
- mindspore/multiprocessing/__init__.py +73 -0
- mindspore/nn/__init__.py +47 -0
- mindspore/nn/cell.py +2787 -0
- mindspore/nn/dynamic_lr.py +482 -0
- mindspore/nn/grad/__init__.py +21 -0
- mindspore/nn/grad/cell_grad.py +196 -0
- mindspore/nn/layer/__init__.py +63 -0
- mindspore/nn/layer/activation.py +1822 -0
- mindspore/nn/layer/basic.py +1629 -0
- mindspore/nn/layer/channel_shuffle.py +90 -0
- mindspore/nn/layer/combined.py +248 -0
- mindspore/nn/layer/container.py +734 -0
- mindspore/nn/layer/conv.py +1505 -0
- mindspore/nn/layer/dense.py +204 -0
- mindspore/nn/layer/embedding.py +869 -0
- mindspore/nn/layer/image.py +661 -0
- mindspore/nn/layer/math.py +1069 -0
- mindspore/nn/layer/normalization.py +1273 -0
- mindspore/nn/layer/padding.py +880 -0
- mindspore/nn/layer/pooling.py +2302 -0
- mindspore/nn/layer/rnn_cells.py +388 -0
- mindspore/nn/layer/rnns.py +849 -0
- mindspore/nn/layer/thor_layer.py +963 -0
- mindspore/nn/layer/timedistributed.py +155 -0
- mindspore/nn/layer/transformer.py +823 -0
- mindspore/nn/learning_rate_schedule.py +512 -0
- mindspore/nn/loss/__init__.py +36 -0
- mindspore/nn/loss/loss.py +2924 -0
- mindspore/nn/metrics.py +53 -0
- mindspore/nn/optim/__init__.py +45 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +111 -0
- mindspore/nn/optim/ada_grad.py +217 -0
- mindspore/nn/optim/adadelta.py +206 -0
- mindspore/nn/optim/adafactor.py +448 -0
- mindspore/nn/optim/adam.py +1297 -0
- mindspore/nn/optim/adamax.py +220 -0
- mindspore/nn/optim/adasum.py +548 -0
- mindspore/nn/optim/asgd.py +216 -0
- mindspore/nn/optim/ftrl.py +401 -0
- mindspore/nn/optim/lamb.py +296 -0
- mindspore/nn/optim/lars.py +202 -0
- mindspore/nn/optim/lazyadam.py +533 -0
- mindspore/nn/optim/momentum.py +239 -0
- mindspore/nn/optim/optimizer.py +1034 -0
- mindspore/nn/optim/proximal_ada_grad.py +242 -0
- mindspore/nn/optim/rmsprop.py +264 -0
- mindspore/nn/optim/rprop.py +251 -0
- mindspore/nn/optim/sgd.py +237 -0
- mindspore/nn/optim/tft_wrapper.py +127 -0
- mindspore/nn/optim/thor.py +1310 -0
- mindspore/nn/probability/__init__.py +22 -0
- mindspore/nn/probability/bijector/__init__.py +35 -0
- mindspore/nn/probability/bijector/bijector.py +337 -0
- mindspore/nn/probability/bijector/exp.py +65 -0
- mindspore/nn/probability/bijector/gumbel_cdf.py +144 -0
- mindspore/nn/probability/bijector/invert.py +126 -0
- mindspore/nn/probability/bijector/power_transform.py +196 -0
- mindspore/nn/probability/bijector/scalar_affine.py +167 -0
- mindspore/nn/probability/bijector/softplus.py +189 -0
- mindspore/nn/probability/bnn_layers/__init__.py +29 -0
- mindspore/nn/probability/bnn_layers/_util.py +46 -0
- mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +112 -0
- mindspore/nn/probability/bnn_layers/conv_variational.py +267 -0
- mindspore/nn/probability/bnn_layers/dense_variational.py +302 -0
- mindspore/nn/probability/bnn_layers/layer_distribution.py +123 -0
- mindspore/nn/probability/distribution/__init__.py +56 -0
- mindspore/nn/probability/distribution/_utils/__init__.py +34 -0
- mindspore/nn/probability/distribution/_utils/custom_ops.py +96 -0
- mindspore/nn/probability/distribution/_utils/utils.py +362 -0
- mindspore/nn/probability/distribution/bernoulli.py +334 -0
- mindspore/nn/probability/distribution/beta.py +391 -0
- mindspore/nn/probability/distribution/categorical.py +435 -0
- mindspore/nn/probability/distribution/cauchy.py +383 -0
- mindspore/nn/probability/distribution/distribution.py +827 -0
- mindspore/nn/probability/distribution/exponential.py +350 -0
- mindspore/nn/probability/distribution/gamma.py +391 -0
- mindspore/nn/probability/distribution/geometric.py +335 -0
- mindspore/nn/probability/distribution/gumbel.py +257 -0
- mindspore/nn/probability/distribution/half_normal.py +133 -0
- mindspore/nn/probability/distribution/laplace.py +128 -0
- mindspore/nn/probability/distribution/log_normal.py +272 -0
- mindspore/nn/probability/distribution/logistic.py +379 -0
- mindspore/nn/probability/distribution/normal.py +336 -0
- mindspore/nn/probability/distribution/poisson.py +288 -0
- mindspore/nn/probability/distribution/student_t.py +149 -0
- mindspore/nn/probability/distribution/transformed_distribution.py +235 -0
- mindspore/nn/probability/distribution/uniform.py +375 -0
- mindspore/nn/reinforcement/__init__.py +24 -0
- mindspore/nn/reinforcement/_batch_read_write.py +142 -0
- mindspore/nn/reinforcement/_tensors_queue.py +152 -0
- mindspore/nn/reinforcement/tensor_array.py +145 -0
- mindspore/nn/sparse/__init__.py +23 -0
- mindspore/nn/sparse/sparse.py +147 -0
- mindspore/nn/wrap/__init__.py +49 -0
- mindspore/nn/wrap/cell_wrapper.py +968 -0
- mindspore/nn/wrap/grad_reducer.py +608 -0
- mindspore/nn/wrap/loss_scale.py +694 -0
- mindspore/numpy/__init__.py +121 -0
- mindspore/numpy/array_creations.py +2731 -0
- mindspore/numpy/array_ops.py +2629 -0
- mindspore/numpy/dtypes.py +185 -0
- mindspore/numpy/fft.py +966 -0
- mindspore/numpy/logic_ops.py +936 -0
- mindspore/numpy/math_ops.py +5911 -0
- mindspore/numpy/utils.py +214 -0
- mindspore/numpy/utils_const.py +565 -0
- mindspore/ops/__init__.py +56 -0
- mindspore/ops/_constants.py +30 -0
- mindspore/ops/_grad_experimental/__init__.py +31 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +830 -0
- mindspore/ops/_grad_experimental/grad_base.py +143 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +714 -0
- mindspore/ops/_grad_experimental/grad_debug_ops.py +31 -0
- mindspore/ops/_grad_experimental/grad_implementations.py +203 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +79 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +802 -0
- mindspore/ops/_grad_experimental/grad_nn_ops.py +231 -0
- mindspore/ops/_grad_experimental/grad_quant_ops.py +238 -0
- mindspore/ops/_grad_experimental/grad_sparse.py +342 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +399 -0
- mindspore/ops/_grad_experimental/taylor_rule.py +220 -0
- mindspore/ops/_op_impl/__init__.py +23 -0
- mindspore/ops/_op_impl/_custom_op/__init__.py +39 -0
- mindspore/ops/_op_impl/_custom_op/_basic.py +158 -0
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +279 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +156 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +109 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +125 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +105 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +124 -0
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +116 -0
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +89 -0
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +196 -0
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +366 -0
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +162 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +136 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +206 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +88 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +128 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +199 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +88 -0
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +156 -0
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +184 -0
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +143 -0
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +169 -0
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +548 -0
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +881 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +278 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +200 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +334 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +255 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +222 -0
- mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +644 -0
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +488 -0
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +87 -0
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +129 -0
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +121 -0
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +352 -0
- mindspore/ops/_op_impl/aicpu/__init__.py +441 -0
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/acos.py +32 -0
- mindspore/ops/_op_impl/aicpu/acos_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/acosh.py +34 -0
- mindspore/ops/_op_impl/aicpu/acosh_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/add_n.py +41 -0
- mindspore/ops/_op_impl/aicpu/add_v2.py +40 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +41 -0
- mindspore/ops/_op_impl/aicpu/addcmul.py +47 -0
- mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py +32 -0
- mindspore/ops/_op_impl/aicpu/adjust_hue.py +31 -0
- mindspore/ops/_op_impl/aicpu/adjust_saturation.py +32 -0
- mindspore/ops/_op_impl/aicpu/affine_grid.py +33 -0
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/angle.py +31 -0
- mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
- mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
- mindspore/ops/_op_impl/aicpu/argmax_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/asin.py +32 -0
- mindspore/ops/_op_impl/aicpu/asin_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/asinh.py +34 -0
- mindspore/ops/_op_impl/aicpu/asinh_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/atanh.py +34 -0
- mindspore/ops/_op_impl/aicpu/avgpool_grad_v1.py +37 -0
- mindspore/ops/_op_impl/aicpu/avgpool_v1.py +36 -0
- mindspore/ops/_op_impl/aicpu/bartlett_window.py +36 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
- mindspore/ops/_op_impl/aicpu/betainc.py +31 -0
- mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +42 -0
- mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
- mindspore/ops/_op_impl/aicpu/blackman_window.py +36 -0
- mindspore/ops/_op_impl/aicpu/broadcast_to.py +58 -0
- mindspore/ops/_op_impl/aicpu/bucketize.py +34 -0
- mindspore/ops/_op_impl/aicpu/cache_swap_table.py +102 -0
- mindspore/ops/_op_impl/aicpu/cast.py +225 -0
- mindspore/ops/_op_impl/aicpu/cauchy.py +33 -0
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/check_numerics.py +33 -0
- mindspore/ops/_op_impl/aicpu/cholesky.py +32 -0
- mindspore/ops/_op_impl/aicpu/cholesky_inverse.py +31 -0
- mindspore/ops/_op_impl/aicpu/cholesky_solve.py +33 -0
- mindspore/ops/_op_impl/aicpu/choleskygrad.py +32 -0
- mindspore/ops/_op_impl/aicpu/coalesce.py +37 -0
- mindspore/ops/_op_impl/aicpu/col2im.py +38 -0
- mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
- mindspore/ops/_op_impl/aicpu/compare_and_bitpack.py +37 -0
- mindspore/ops/_op_impl/aicpu/complex.py +32 -0
- mindspore/ops/_op_impl/aicpu/complex_abs.py +31 -0
- mindspore/ops/_op_impl/aicpu/compute_accidental_hits.py +44 -0
- mindspore/ops/_op_impl/aicpu/concat.py +57 -0
- mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
- mindspore/ops/_op_impl/aicpu/conj.py +42 -0
- mindspore/ops/_op_impl/aicpu/conjugate_transpose.py +58 -0
- mindspore/ops/_op_impl/aicpu/cos.py +34 -0
- mindspore/ops/_op_impl/aicpu/cosh.py +34 -0
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize.py +69 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_boxes.py +68 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
- mindspore/ops/_op_impl/aicpu/cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_dense.py +48 -0
- mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_sparse_tensor.py +51 -0
- mindspore/ops/_op_impl/aicpu/ctc_greedy_decoder.py +35 -0
- mindspore/ops/_op_impl/aicpu/ctc_loss_v2.py +43 -0
- mindspore/ops/_op_impl/aicpu/ctc_loss_v2_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/ctcloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/cummax.py +41 -0
- mindspore/ops/_op_impl/aicpu/cumprod.py +58 -0
- mindspore/ops/_op_impl/aicpu/cumsum.py +58 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
- mindspore/ops/_op_impl/aicpu/data_format_vec_permute.py +32 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/dense_to_csr_sparse_matrix.py +49 -0
- mindspore/ops/_op_impl/aicpu/dense_to_dense_set_operation.py +45 -0
- mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
- mindspore/ops/_op_impl/aicpu/depth_to_space.py +44 -0
- mindspore/ops/_op_impl/aicpu/diag.py +36 -0
- mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
- mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
- mindspore/ops/_op_impl/aicpu/digamma.py +31 -0
- mindspore/ops/_op_impl/aicpu/div.py +41 -0
- mindspore/ops/_op_impl/aicpu/div_no_nan.py +35 -0
- mindspore/ops/_op_impl/aicpu/dropout2d.py +42 -0
- mindspore/ops/_op_impl/aicpu/dropout3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask.py +41 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask_v3.py +32 -0
- mindspore/ops/_op_impl/aicpu/dynamic_stitch.py +42 -0
- mindspore/ops/_op_impl/aicpu/edit_distance.py +56 -0
- mindspore/ops/_op_impl/aicpu/eig.py +35 -0
- mindspore/ops/_op_impl/aicpu/embedding_lookup.py +102 -0
- mindspore/ops/_op_impl/aicpu/end_of_sequence.py +30 -0
- mindspore/ops/_op_impl/aicpu/environ_create.py +28 -0
- mindspore/ops/_op_impl/aicpu/environ_destroy_all.py +28 -0
- mindspore/ops/_op_impl/aicpu/environ_get.py +41 -0
- mindspore/ops/_op_impl/aicpu/environ_set.py +40 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/exp.py +37 -0
- mindspore/ops/_op_impl/aicpu/expand.py +45 -0
- mindspore/ops/_op_impl/aicpu/expand_dims.py +42 -0
- mindspore/ops/_op_impl/aicpu/expm1.py +34 -0
- mindspore/ops/_op_impl/aicpu/extract_glimpse.py +35 -0
- mindspore/ops/_op_impl/aicpu/eye.py +44 -0
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +47 -0
- mindspore/ops/_op_impl/aicpu/fill_diagonal.py +39 -0
- mindspore/ops/_op_impl/aicpu/fill_v2.py +58 -0
- mindspore/ops/_op_impl/aicpu/flatten.py +43 -0
- mindspore/ops/_op_impl/aicpu/floor_div.py +38 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_avg_pool.py +41 -0
- mindspore/ops/_op_impl/aicpu/fractional_avg_pool_grad.py +41 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool.py +41 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_grad_with_fixed_ksize.py +43 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +65 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad.py +42 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad_with_fixed_ksize.py +42 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool_with_fixed_ksize.py +49 -0
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/fused_sparse_adam.py +46 -0
- mindspore/ops/_op_impl/aicpu/fused_sparse_ftrl.py +41 -0
- mindspore/ops/_op_impl/aicpu/fused_sparse_lazy_adam.py +46 -0
- mindspore/ops/_op_impl/aicpu/fused_sparse_proximal_adagrad.py +39 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +38 -0
- mindspore/ops/_op_impl/aicpu/gather.py +46 -0
- mindspore/ops/_op_impl/aicpu/gather_d.py +79 -0
- mindspore/ops/_op_impl/aicpu/gather_d_grad_v2.py +79 -0
- mindspore/ops/_op_impl/aicpu/gather_grad.py +54 -0
- mindspore/ops/_op_impl/aicpu/gather_nd.py +56 -0
- mindspore/ops/_op_impl/aicpu/gcd.py +32 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
- mindspore/ops/_op_impl/aicpu/geqrf.py +32 -0
- mindspore/ops/_op_impl/aicpu/get_next.py +39 -0
- mindspore/ops/_op_impl/aicpu/glu.py +33 -0
- mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/grid_sampler_2d.py +35 -0
- mindspore/ops/_op_impl/aicpu/grid_sampler_2d_grad.py +38 -0
- mindspore/ops/_op_impl/aicpu/grid_sampler_3d.py +34 -0
- mindspore/ops/_op_impl/aicpu/grid_sampler_3d_grad.py +38 -0
- mindspore/ops/_op_impl/aicpu/hamming_window.py +57 -0
- mindspore/ops/_op_impl/aicpu/hard_sigmoid.py +32 -0
- mindspore/ops/_op_impl/aicpu/hard_sigmoid_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/heaviside.py +40 -0
- mindspore/ops/_op_impl/aicpu/histogram.py +35 -0
- mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py +32 -0
- mindspore/ops/_op_impl/aicpu/hypot.py +32 -0
- mindspore/ops/_op_impl/aicpu/identity.py +42 -0
- mindspore/ops/_op_impl/aicpu/identity_n.py +41 -0
- mindspore/ops/_op_impl/aicpu/igamma.py +30 -0
- mindspore/ops/_op_impl/aicpu/igammac.py +30 -0
- mindspore/ops/_op_impl/aicpu/igammagrada.py +30 -0
- mindspore/ops/_op_impl/aicpu/im2col.py +43 -0
- mindspore/ops/_op_impl/aicpu/imag.py +31 -0
- mindspore/ops/_op_impl/aicpu/index_fill.py +54 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/init_data_set_queue.py +27 -0
- mindspore/ops/_op_impl/aicpu/inplace_index_add.py +39 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
- mindspore/ops/_op_impl/aicpu/is_finite.py +40 -0
- mindspore/ops/_op_impl/aicpu/is_inf.py +31 -0
- mindspore/ops/_op_impl/aicpu/is_nan.py +31 -0
- mindspore/ops/_op_impl/aicpu/kldivloss.py +34 -0
- mindspore/ops/_op_impl/aicpu/kldivlossgrad.py +35 -0
- mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
- mindspore/ops/_op_impl/aicpu/lcm.py +32 -0
- mindspore/ops/_op_impl/aicpu/left_shift.py +38 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/lgamma.py +33 -0
- mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +57 -0
- mindspore/ops/_op_impl/aicpu/linspace.py +33 -0
- mindspore/ops/_op_impl/aicpu/list_diff.py +50 -0
- mindspore/ops/_op_impl/aicpu/log.py +37 -0
- mindspore/ops/_op_impl/aicpu/log1p.py +34 -0
- mindspore/ops/_op_impl/aicpu/log_matrix_determinant.py +31 -0
- mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +37 -0
- mindspore/ops/_op_impl/aicpu/logical_xor.py +30 -0
- mindspore/ops/_op_impl/aicpu/logit.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/logspace.py +36 -0
- mindspore/ops/_op_impl/aicpu/lower_bound.py +47 -0
- mindspore/ops/_op_impl/aicpu/lstsq.py +34 -0
- mindspore/ops/_op_impl/aicpu/lu.py +39 -0
- mindspore/ops/_op_impl/aicpu/lu_solve.py +32 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack.py +114 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +40 -0
- mindspore/ops/_op_impl/aicpu/masked_select.py +31 -0
- mindspore/ops/_op_impl/aicpu/masked_select_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
- mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
- mindspore/ops/_op_impl/aicpu/matrix_determinant.py +30 -0
- mindspore/ops/_op_impl/aicpu/matrix_diag_part_v3.py +54 -0
- mindspore/ops/_op_impl/aicpu/matrix_diag_v3.py +56 -0
- mindspore/ops/_op_impl/aicpu/matrix_exp.py +34 -0
- mindspore/ops/_op_impl/aicpu/matrix_inverse.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +37 -0
- mindspore/ops/_op_impl/aicpu/matrix_set_diag_v3.py +54 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
- mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
- mindspore/ops/_op_impl/aicpu/max_pool3d_grad_with_argmax.py +60 -0
- mindspore/ops/_op_impl/aicpu/max_pool3d_with_argmax.py +59 -0
- mindspore/ops/_op_impl/aicpu/max_unpool2d.py +57 -0
- mindspore/ops/_op_impl/aicpu/max_unpool2d_grad.py +58 -0
- mindspore/ops/_op_impl/aicpu/max_unpool3d.py +57 -0
- mindspore/ops/_op_impl/aicpu/max_unpool3d_grad.py +58 -0
- mindspore/ops/_op_impl/aicpu/maximum_grad_grad.py +40 -0
- mindspore/ops/_op_impl/aicpu/maxpool_grad_v1.py +46 -0
- mindspore/ops/_op_impl/aicpu/maxpool_v1.py +42 -0
- mindspore/ops/_op_impl/aicpu/median.py +39 -0
- mindspore/ops/_op_impl/aicpu/median_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/meshgrid.py +41 -0
- mindspore/ops/_op_impl/aicpu/minimum_grad_grad.py +40 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +50 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +48 -0
- mindspore/ops/_op_impl/aicpu/mul.py +43 -0
- mindspore/ops/_op_impl/aicpu/mul_no_nan.py +42 -0
- mindspore/ops/_op_impl/aicpu/multi_margin_loss.py +37 -0
- mindspore/ops/_op_impl/aicpu/multi_margin_loss_grad.py +41 -0
- mindspore/ops/_op_impl/aicpu/multilabel_margin_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/multinomial.py +47 -0
- mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
- mindspore/ops/_op_impl/aicpu/mvlgamma.py +32 -0
- mindspore/ops/_op_impl/aicpu/mvlgamma_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
- mindspore/ops/_op_impl/aicpu/neg.py +36 -0
- mindspore/ops/_op_impl/aicpu/nextafter.py +32 -0
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/no_repeat_ngram.py +34 -0
- mindspore/ops/_op_impl/aicpu/non_deterministic_ints.py +33 -0
- mindspore/ops/_op_impl/aicpu/non_max_suppression.py +36 -0
- mindspore/ops/_op_impl/aicpu/non_max_suppression_with_overlaps.py +35 -0
- mindspore/ops/_op_impl/aicpu/non_zero.py +43 -0
- mindspore/ops/_op_impl/aicpu/not_equal.py +39 -0
- mindspore/ops/_op_impl/aicpu/nth_element.py +39 -0
- mindspore/ops/_op_impl/aicpu/nuclear_norm.py +33 -0
- mindspore/ops/_op_impl/aicpu/one_hot.py +116 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +39 -0
- mindspore/ops/_op_impl/aicpu/orgqr.py +34 -0
- mindspore/ops/_op_impl/aicpu/pad_and_shift.py +33 -0
- mindspore/ops/_op_impl/aicpu/pad_v3.py +61 -0
- mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +59 -0
- mindspore/ops/_op_impl/aicpu/padding.py +41 -0
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +54 -0
- mindspore/ops/_op_impl/aicpu/pdist_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/poisson.py +37 -0
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/pow.py +39 -0
- mindspore/ops/_op_impl/aicpu/print_tensor.py +39 -0
- mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +113 -0
- mindspore/ops/_op_impl/aicpu/qr.py +36 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_range.py +49 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
- mindspore/ops/_op_impl/aicpu/random_categorical.py +68 -0
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +36 -0
- mindspore/ops/_op_impl/aicpu/random_gamma.py +38 -0
- mindspore/ops/_op_impl/aicpu/random_poisson.py +134 -0
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +47 -0
- mindspore/ops/_op_impl/aicpu/randperm.py +38 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/range.py +36 -0
- mindspore/ops/_op_impl/aicpu/range_v2.py +35 -0
- mindspore/ops/_op_impl/aicpu/real.py +31 -0
- mindspore/ops/_op_impl/aicpu/real_div.py +40 -0
- mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
- mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/reduce_mean.py +57 -0
- mindspore/ops/_op_impl/aicpu/reduce_prod.py +57 -0
- mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/relu_grad_v3.py +41 -0
- mindspore/ops/_op_impl/aicpu/relu_v3.py +38 -0
- mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +96 -0
- mindspore/ops/_op_impl/aicpu/reshape.py +42 -0
- mindspore/ops/_op_impl/aicpu/resize_area.py +40 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +20 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +19 -0
- mindspore/ops/_op_impl/aicpu/resize_bilinear.py +32 -0
- mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +32 -0
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +36 -0
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/reverse_sequence.py +55 -0
- mindspore/ops/_op_impl/aicpu/reversev2.py +54 -0
- mindspore/ops/_op_impl/aicpu/rgb_to_hsv.py +32 -0
- mindspore/ops/_op_impl/aicpu/right_shift.py +38 -0
- mindspore/ops/_op_impl/aicpu/rnnt_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/round.py +34 -0
- mindspore/ops/_op_impl/aicpu/rsqrt.py +33 -0
- mindspore/ops/_op_impl/aicpu/rsqrt_grad.py +36 -0
- mindspore/ops/_op_impl/aicpu/sample_distorted_bounding_box_v2.py +49 -0
- mindspore/ops/_op_impl/aicpu/scale_and_translate.py +52 -0
- mindspore/ops/_op_impl/aicpu/scale_and_translate_grad.py +36 -0
- mindspore/ops/_op_impl/aicpu/scatter.py +79 -0
- mindspore/ops/_op_impl/aicpu/scatter_add_with_axis.py +53 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +39 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd.py +59 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_max.py +54 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_min.py +54 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/search_sorted.py +44 -0
- mindspore/ops/_op_impl/aicpu/segment_max.py +52 -0
- mindspore/ops/_op_impl/aicpu/segment_mean.py +56 -0
- mindspore/ops/_op_impl/aicpu/segment_min.py +52 -0
- mindspore/ops/_op_impl/aicpu/segment_prod.py +56 -0
- mindspore/ops/_op_impl/aicpu/segment_sum.py +56 -0
- mindspore/ops/_op_impl/aicpu/select.py +45 -0
- mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
- mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
- mindspore/ops/_op_impl/aicpu/set_size.py +38 -0
- mindspore/ops/_op_impl/aicpu/sign.py +36 -0
- mindspore/ops/_op_impl/aicpu/sin.py +34 -0
- mindspore/ops/_op_impl/aicpu/sinc.py +43 -0
- mindspore/ops/_op_impl/aicpu/sinh.py +34 -0
- mindspore/ops/_op_impl/aicpu/slice.py +59 -0
- mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sort.py +39 -0
- mindspore/ops/_op_impl/aicpu/space_to_depth.py +44 -0
- mindspore/ops/_op_impl/aicpu/sparse_addmm.py +87 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +80 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_centered_rms_prop.py +105 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_momentum.py +80 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_proximal_gradient_descent.py +79 -0
- mindspore/ops/_op_impl/aicpu/sparse_concat.py +59 -0
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_add.py +58 -0
- mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_div.py +58 -0
- mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_mul.py +58 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_nnz.py +81 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_transpose.py +116 -0
- mindspore/ops/_op_impl/aicpu/sparse_reorder.py +56 -0
- mindspore/ops/_op_impl/aicpu/sparse_reshape.py +34 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_mean_grad.py +36 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_mean_with_num_segments.py +44 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n.py +43 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_grad.py +38 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_with_num_segments.py +44 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum.py +49 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
- mindspore/ops/_op_impl/aicpu/sparse_softmax.py +33 -0
- mindspore/ops/_op_impl/aicpu/sparse_softmax_cross_entropy_with_logits_v2.py +35 -0
- mindspore/ops/_op_impl/aicpu/sparse_sparse_maximum.py +53 -0
- mindspore/ops/_op_impl/aicpu/sparse_sparse_minimum.py +53 -0
- mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_add.py +84 -0
- mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_mat_mul.py +190 -0
- mindspore/ops/_op_impl/aicpu/sparse_tensor_to_csr_sparse_matrix.py +51 -0
- mindspore/ops/_op_impl/aicpu/sparse_to_dense_v2.py +73 -0
- mindspore/ops/_op_impl/aicpu/split.py +45 -0
- mindspore/ops/_op_impl/aicpu/sqrt.py +34 -0
- mindspore/ops/_op_impl/aicpu/sqrt_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/square.py +35 -0
- mindspore/ops/_op_impl/aicpu/squared_difference.py +37 -0
- mindspore/ops/_op_impl/aicpu/squeeze.py +42 -0
- mindspore/ops/_op_impl/aicpu/sspaddmm.py +97 -0
- mindspore/ops/_op_impl/aicpu/stack.py +45 -0
- mindspore/ops/_op_impl/aicpu/stack_push_pop.py +87 -0
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +34 -0
- mindspore/ops/_op_impl/aicpu/standard_normal.py +34 -0
- mindspore/ops/_op_impl/aicpu/stateless_dropout_genmask.py +37 -0
- mindspore/ops/_op_impl/aicpu/stft.py +70 -0
- mindspore/ops/_op_impl/aicpu/strided_slice.py +43 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_grad.py +50 -0
- mindspore/ops/_op_impl/aicpu/sub.py +41 -0
- mindspore/ops/_op_impl/aicpu/sub_and_filter.py +36 -0
- mindspore/ops/_op_impl/aicpu/tan.py +34 -0
- mindspore/ops/_op_impl/aicpu/tanh.py +34 -0
- mindspore/ops/_op_impl/aicpu/tanh_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/tile.py +56 -0
- mindspore/ops/_op_impl/aicpu/topk.py +34 -0
- mindspore/ops/_op_impl/aicpu/trace.py +40 -0
- mindspore/ops/_op_impl/aicpu/tracegrad.py +41 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +35 -0
- mindspore/ops/_op_impl/aicpu/transpose.py +58 -0
- mindspore/ops/_op_impl/aicpu/tridiagonal_matmul.py +42 -0
- mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/tril.py +42 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/triplet_margin_loss.py +62 -0
- mindspore/ops/_op_impl/aicpu/triu.py +43 -0
- mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +39 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +36 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +41 -0
- mindspore/ops/_op_impl/aicpu/uniform_int.py +36 -0
- mindspore/ops/_op_impl/aicpu/uniform_real.py +33 -0
- mindspore/ops/_op_impl/aicpu/unique.py +31 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +47 -0
- mindspore/ops/_op_impl/aicpu/unique_with_pad.py +32 -0
- mindspore/ops/_op_impl/aicpu/unravel_index.py +32 -0
- mindspore/ops/_op_impl/aicpu/unsorted_segment_prod.py +53 -0
- mindspore/ops/_op_impl/aicpu/unsorted_segment_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/unstack.py +45 -0
- mindspore/ops/_op_impl/aicpu/update_cache.py +44 -0
- mindspore/ops/_op_impl/aicpu/upper_bound.py +47 -0
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +40 -0
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +50 -0
- mindspore/ops/_op_impl/aicpu/xdivy.py +35 -0
- mindspore/ops/_op_impl/aicpu/xlogy.py +33 -0
- mindspore/ops/_op_impl/aicpu/zeros_like.py +42 -0
- mindspore/ops/_op_impl/aicpu/zeta.py +31 -0
- mindspore/ops/_op_impl/akg/__init__.py +19 -0
- mindspore/ops/_op_impl/akg/ascend/__init__.py +48 -0
- mindspore/ops/_op_impl/akg/ascend/abs.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/add.py +42 -0
- mindspore/ops/_op_impl/akg/ascend/add_n.py +37 -0
- mindspore/ops/_op_impl/akg/ascend/batchmatmul.py +33 -0
- mindspore/ops/_op_impl/akg/ascend/cast.py +46 -0
- mindspore/ops/_op_impl/akg/ascend/equal.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/exp.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/expand_dims.py +33 -0
- mindspore/ops/_op_impl/akg/ascend/greater.py +34 -0
- mindspore/ops/_op_impl/akg/ascend/greater_equal.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/less.py +31 -0
- mindspore/ops/_op_impl/akg/ascend/less_equal.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/load_im2col.py +33 -0
- mindspore/ops/_op_impl/akg/ascend/log.py +34 -0
- mindspore/ops/_op_impl/akg/ascend/maximum.py +36 -0
- mindspore/ops/_op_impl/akg/ascend/minimum.py +39 -0
- mindspore/ops/_op_impl/akg/ascend/mul.py +41 -0
- mindspore/ops/_op_impl/akg/ascend/neg.py +37 -0
- mindspore/ops/_op_impl/akg/ascend/pow.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py +33 -0
- mindspore/ops/_op_impl/akg/ascend/real_div.py +36 -0
- mindspore/ops/_op_impl/akg/ascend/reciprocal.py +32 -0
- mindspore/ops/_op_impl/akg/ascend/reduce_max.py +32 -0
- mindspore/ops/_op_impl/akg/ascend/reduce_min.py +32 -0
- mindspore/ops/_op_impl/akg/ascend/reduce_sum.py +37 -0
- mindspore/ops/_op_impl/akg/ascend/rsqrt.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/select.py +37 -0
- mindspore/ops/_op_impl/akg/ascend/sqrt.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/square.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/sub.py +42 -0
- mindspore/ops/_op_impl/akg/cpu/__init__.py +23 -0
- mindspore/ops/_op_impl/akg/cpu/coo2csr.py +29 -0
- mindspore/ops/_op_impl/akg/cpu/csr2coo.py +29 -0
- mindspore/ops/_op_impl/akg/cpu/csr_gather.py +33 -0
- mindspore/ops/_op_impl/akg/cpu/csr_mm.py +34 -0
- mindspore/ops/_op_impl/akg/cpu/csr_mul.py +33 -0
- mindspore/ops/_op_impl/akg/cpu/csr_mv.py +33 -0
- mindspore/ops/_op_impl/akg/cpu/csr_reduce_sum.py +31 -0
- mindspore/ops/_op_impl/akg/gpu/__init__.py +24 -0
- mindspore/ops/_op_impl/akg/gpu/coo2csr.py +29 -0
- mindspore/ops/_op_impl/akg/gpu/csr2coo.py +29 -0
- mindspore/ops/_op_impl/akg/gpu/csr_div.py +36 -0
- mindspore/ops/_op_impl/akg/gpu/csr_gather.py +33 -0
- mindspore/ops/_op_impl/akg/gpu/csr_mm.py +37 -0
- mindspore/ops/_op_impl/akg/gpu/csr_mul.py +36 -0
- mindspore/ops/_op_impl/akg/gpu/csr_mv.py +36 -0
- mindspore/ops/_op_impl/akg/gpu/csr_reduce_sum.py +33 -0
- mindspore/ops/_op_impl/cpu/__init__.py +78 -0
- mindspore/ops/_op_impl/cpu/adam.py +49 -0
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +47 -0
- mindspore/ops/_op_impl/cpu/arg_max.py +30 -0
- mindspore/ops/_op_impl/cpu/arg_max_with_value.py +31 -0
- mindspore/ops/_op_impl/cpu/arg_min_with_value.py +31 -0
- mindspore/ops/_op_impl/cpu/buffer_append.py +28 -0
- mindspore/ops/_op_impl/cpu/buffer_get.py +28 -0
- mindspore/ops/_op_impl/cpu/buffer_sample.py +28 -0
- mindspore/ops/_op_impl/cpu/cast.py +171 -0
- mindspore/ops/_op_impl/cpu/concat_offset.py +38 -0
- mindspore/ops/_op_impl/cpu/conv2d.py +30 -0
- mindspore/ops/_op_impl/cpu/conv3d.py +30 -0
- mindspore/ops/_op_impl/cpu/div.py +32 -0
- mindspore/ops/_op_impl/cpu/dropout.py +31 -0
- mindspore/ops/_op_impl/cpu/dropout_grad.py +30 -0
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +42 -0
- mindspore/ops/_op_impl/cpu/dynamic_stitch.py +41 -0
- mindspore/ops/_op_impl/cpu/equal_count.py +30 -0
- mindspore/ops/_op_impl/cpu/gather_d.py +49 -0
- mindspore/ops/_op_impl/cpu/gather_d_grad.py +38 -0
- mindspore/ops/_op_impl/cpu/gather_d_grad_v2.py +40 -0
- mindspore/ops/_op_impl/cpu/gather_v2.py +40 -0
- mindspore/ops/_op_impl/cpu/hsigmoid.py +33 -0
- mindspore/ops/_op_impl/cpu/hsigmoid_grad.py +34 -0
- mindspore/ops/_op_impl/cpu/hswish.py +32 -0
- mindspore/ops/_op_impl/cpu/hswish_grad.py +33 -0
- mindspore/ops/_op_impl/cpu/identity_n.py +40 -0
- mindspore/ops/_op_impl/cpu/is_finite.py +39 -0
- mindspore/ops/_op_impl/cpu/l2loss.py +30 -0
- mindspore/ops/_op_impl/cpu/layer_norm.py +36 -0
- mindspore/ops/_op_impl/cpu/layer_norm_grad.py +38 -0
- mindspore/ops/_op_impl/cpu/maximum.py +35 -0
- mindspore/ops/_op_impl/cpu/maximum_grad.py +47 -0
- mindspore/ops/_op_impl/cpu/minimum.py +40 -0
- mindspore/ops/_op_impl/cpu/minimum_grad.py +51 -0
- mindspore/ops/_op_impl/cpu/mirror_pad.py +36 -0
- mindspore/ops/_op_impl/cpu/mirror_pad_grad.py +36 -0
- mindspore/ops/_op_impl/cpu/mul.py +32 -0
- mindspore/ops/_op_impl/cpu/one_hot.py +31 -0
- mindspore/ops/_op_impl/cpu/pad.py +32 -0
- mindspore/ops/_op_impl/cpu/pow.py +32 -0
- mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +42 -0
- mindspore/ops/_op_impl/cpu/pyexecute.py +29 -0
- mindspore/ops/_op_impl/cpu/pyfunc.py +29 -0
- mindspore/ops/_op_impl/cpu/range.py +34 -0
- mindspore/ops/_op_impl/cpu/real_div.py +33 -0
- mindspore/ops/_op_impl/cpu/reduce_all.py +29 -0
- mindspore/ops/_op_impl/cpu/reduce_any.py +29 -0
- mindspore/ops/_op_impl/cpu/reduce_max.py +32 -0
- mindspore/ops/_op_impl/cpu/reduce_mean.py +40 -0
- mindspore/ops/_op_impl/cpu/reduce_min.py +32 -0
- mindspore/ops/_op_impl/cpu/reduce_prod.py +40 -0
- mindspore/ops/_op_impl/cpu/reduce_std.py +31 -0
- mindspore/ops/_op_impl/cpu/reduce_sum.py +41 -0
- mindspore/ops/_op_impl/cpu/space_to_batch_nd.py +38 -0
- mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
- mindspore/ops/_op_impl/cpu/split.py +34 -0
- mindspore/ops/_op_impl/cpu/sspaddmm.py +95 -0
- mindspore/ops/_op_impl/cpu/stack.py +38 -0
- mindspore/ops/_op_impl/cpu/sub.py +32 -0
- mindspore/ops/_op_impl/cpu/tensor_copy_slices.py +41 -0
- mindspore/ops/_op_impl/cpu/tile.py +37 -0
- mindspore/ops/_op_impl/cpu/top_k.py +31 -0
- mindspore/ops/_op_impl/cpu/transpose.py +39 -0
- mindspore/ops/_primitive_cache.py +90 -0
- mindspore/ops/_register_for_op.py +73 -0
- mindspore/ops/_utils/__init__.py +20 -0
- mindspore/ops/_utils/utils.py +147 -0
- mindspore/ops/_vmap/__init__.py +25 -0
- mindspore/ops/_vmap/vmap_array_ops.py +2149 -0
- mindspore/ops/_vmap/vmap_base.py +533 -0
- mindspore/ops/_vmap/vmap_convolution_ops.py +441 -0
- mindspore/ops/_vmap/vmap_debug_ops.py +50 -0
- mindspore/ops/_vmap/vmap_grad_math_ops.py +274 -0
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +806 -0
- mindspore/ops/_vmap/vmap_image_ops.py +194 -0
- mindspore/ops/_vmap/vmap_math_ops.py +993 -0
- mindspore/ops/_vmap/vmap_nn_ops.py +2250 -0
- mindspore/ops/_vmap/vmap_other_ops.py +105 -0
- mindspore/ops/_vmap/vmap_random_ops.py +122 -0
- mindspore/ops/_vmap/vmap_sparse_ops.py +89 -0
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
- mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
- mindspore/ops/composite/__init__.py +71 -0
- mindspore/ops/composite/base.py +1318 -0
- mindspore/ops/composite/env_ops.py +41 -0
- mindspore/ops/composite/math_ops.py +125 -0
- mindspore/ops/composite/multitype_ops/__init__.py +77 -0
- mindspore/ops/composite/multitype_ops/_compile_utils.py +1459 -0
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +897 -0
- mindspore/ops/composite/multitype_ops/add_impl.py +606 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +56 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +56 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +56 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +189 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +335 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +88 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +400 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +109 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +110 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +196 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +37 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +111 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +112 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +113 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +60 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +61 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +86 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +294 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +79 -0
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +290 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +196 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +96 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +87 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +37 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +884 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +116 -0
- mindspore/ops/composite/multitype_ops/uadd_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +228 -0
- mindspore/ops/deprecated.py +315 -0
- mindspore/ops/function/__init__.py +782 -0
- mindspore/ops/function/array_func.py +7226 -0
- mindspore/ops/function/clip_func.py +384 -0
- mindspore/ops/function/debug_func.py +181 -0
- mindspore/ops/function/fft_func.py +44 -0
- mindspore/ops/function/grad/__init__.py +34 -0
- mindspore/ops/function/grad/grad_func.py +1425 -0
- mindspore/ops/function/image_func.py +292 -0
- mindspore/ops/function/linalg_func.py +416 -0
- mindspore/ops/function/math_func.py +12228 -0
- mindspore/ops/function/nn_func.py +8609 -0
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +134 -0
- mindspore/ops/function/random_func.py +1715 -0
- mindspore/ops/function/reshard_func.py +104 -0
- mindspore/ops/function/sparse_func.py +884 -0
- mindspore/ops/function/sparse_unary_func.py +2422 -0
- mindspore/ops/function/spectral_func.py +150 -0
- mindspore/ops/function/vmap_func.py +117 -0
- mindspore/ops/functional.py +464 -0
- mindspore/ops/op_info_register.py +1572 -0
- mindspore/ops/operations/__init__.py +722 -0
- mindspore/ops/operations/_csr_ops.py +403 -0
- mindspore/ops/operations/_custom_grad.py +181 -0
- mindspore/ops/operations/_embedding_cache_ops.py +307 -0
- mindspore/ops/operations/_grad_ops.py +2978 -0
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +2544 -0
- mindspore/ops/operations/_map_tensor_ops.py +112 -0
- mindspore/ops/operations/_ms_kernel.py +601 -0
- mindspore/ops/operations/_ocr_ops.py +379 -0
- mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
- mindspore/ops/operations/_pyfunc_registry.py +58 -0
- mindspore/ops/operations/_quant_ops.py +1844 -0
- mindspore/ops/operations/_rl_inner_ops.py +1231 -0
- mindspore/ops/operations/_scalar_ops.py +106 -0
- mindspore/ops/operations/_sequence_ops.py +1155 -0
- mindspore/ops/operations/_sparse_grad_ops.py +56 -0
- mindspore/ops/operations/_tensor_array.py +359 -0
- mindspore/ops/operations/_thor_ops.py +807 -0
- mindspore/ops/operations/array_ops.py +6124 -0
- mindspore/ops/operations/comm_ops.py +1985 -0
- mindspore/ops/operations/control_ops.py +127 -0
- mindspore/ops/operations/custom_ops.py +1129 -0
- mindspore/ops/operations/debug_ops.py +678 -0
- mindspore/ops/operations/image_ops.py +1041 -0
- mindspore/ops/operations/inner_ops.py +697 -0
- mindspore/ops/operations/linalg_ops.py +95 -0
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +73 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
- mindspore/ops/operations/math_ops.py +5095 -0
- mindspore/ops/operations/nn_ops.py +9575 -0
- mindspore/ops/operations/other_ops.py +874 -0
- mindspore/ops/operations/random_ops.py +1288 -0
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/rl_ops.py +288 -0
- mindspore/ops/operations/sparse_ops.py +2753 -0
- mindspore/ops/operations/spectral_ops.py +111 -0
- mindspore/ops/primitive.py +1046 -0
- mindspore/ops/signature.py +54 -0
- mindspore/ops/vm_impl_registry.py +91 -0
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +252 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_constants.py +36 -0
- mindspore/ops_generate/gen_ops.py +1099 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +1052 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +145 -0
- mindspore/ops_generate/pyboost_utils.py +367 -0
- mindspore/ops_generate/template.py +261 -0
- mindspore/parallel/__init__.py +30 -0
- mindspore/parallel/_auto_parallel_context.py +1486 -0
- mindspore/parallel/_cell_wrapper.py +174 -0
- mindspore/parallel/_cost_model_context.py +700 -0
- mindspore/parallel/_dp_allreduce_fusion.py +159 -0
- mindspore/parallel/_offload_context.py +275 -0
- mindspore/parallel/_parallel_serialization.py +561 -0
- mindspore/parallel/_ps_context.py +242 -0
- mindspore/parallel/_recovery_context.py +110 -0
- mindspore/parallel/_tensor.py +730 -0
- mindspore/parallel/_transformer/__init__.py +35 -0
- mindspore/parallel/_transformer/layers.py +765 -0
- mindspore/parallel/_transformer/loss.py +251 -0
- mindspore/parallel/_transformer/moe.py +693 -0
- mindspore/parallel/_transformer/op_parallel_config.py +222 -0
- mindspore/parallel/_transformer/transformer.py +3119 -0
- mindspore/parallel/_utils.py +612 -0
- mindspore/parallel/algo_parameter_config.py +400 -0
- mindspore/parallel/checkpoint_transform.py +650 -0
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +352 -0
- mindspore/parallel/cluster/process_entity/_utils.py +101 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +14 -0
- mindspore/parallel/mpi/_mpi_config.py +116 -0
- mindspore/parallel/parameter_broadcast.py +151 -0
- mindspore/parallel/shard.py +481 -0
- mindspore/parallel/transform_safetensors.py +993 -0
- mindspore/profiler/__init__.py +28 -0
- mindspore/profiler/common/__init__.py +14 -0
- mindspore/profiler/common/constant.py +29 -0
- mindspore/profiler/common/exceptions/__init__.py +14 -0
- mindspore/profiler/common/exceptions/error_code.py +83 -0
- mindspore/profiler/common/exceptions/exceptions.py +286 -0
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/registry.py +47 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/struct_type.py +118 -0
- mindspore/profiler/common/util.py +472 -0
- mindspore/profiler/common/validator/__init__.py +14 -0
- mindspore/profiler/common/validator/validate_path.py +84 -0
- mindspore/profiler/dynamic_profiler.py +694 -0
- mindspore/profiler/envprofiling.py +254 -0
- mindspore/profiler/parser/__init__.py +14 -0
- mindspore/profiler/parser/aicpu_data_parser.py +272 -0
- mindspore/profiler/parser/ascend_analysis/__init__.py +14 -0
- mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
- mindspore/profiler/parser/ascend_flops_generator.py +116 -0
- mindspore/profiler/parser/ascend_fpbp_generator.py +82 -0
- mindspore/profiler/parser/ascend_hccl_generator.py +271 -0
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +282 -0
- mindspore/profiler/parser/ascend_msprof_generator.py +187 -0
- mindspore/profiler/parser/ascend_op_generator.py +334 -0
- mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
- mindspore/profiler/parser/ascend_timeline_generator.py +545 -0
- mindspore/profiler/parser/base_timeline_generator.py +483 -0
- mindspore/profiler/parser/container.py +229 -0
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +697 -0
- mindspore/profiler/parser/flops_parser.py +531 -0
- mindspore/profiler/parser/framework_enum.py +111 -0
- mindspore/profiler/parser/framework_parser.py +464 -0
- mindspore/profiler/parser/framework_struct.py +61 -0
- mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
- mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
- mindspore/profiler/parser/hccl_parser.py +573 -0
- mindspore/profiler/parser/hwts_log_parser.py +122 -0
- mindspore/profiler/parser/integrator.py +526 -0
- mindspore/profiler/parser/memory_usage_parser.py +277 -0
- mindspore/profiler/parser/minddata_analyzer.py +800 -0
- mindspore/profiler/parser/minddata_parser.py +186 -0
- mindspore/profiler/parser/minddata_pipeline_parser.py +299 -0
- mindspore/profiler/parser/op_intermediate_parser.py +149 -0
- mindspore/profiler/parser/optime_parser.py +250 -0
- mindspore/profiler/parser/profiler_info.py +213 -0
- mindspore/profiler/parser/step_trace_parser.py +666 -0
- mindspore/profiler/profiler.py +153 -0
- mindspore/profiler/profiling.py +1922 -0
- mindspore/rewrite/__init__.py +28 -0
- mindspore/rewrite/api/__init__.py +17 -0
- mindspore/rewrite/api/node.py +519 -0
- mindspore/rewrite/api/node_type.py +53 -0
- mindspore/rewrite/api/pattern_engine.py +490 -0
- mindspore/rewrite/api/scoped_value.py +181 -0
- mindspore/rewrite/api/symbol_tree.py +497 -0
- mindspore/rewrite/ast_helpers/__init__.py +25 -0
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +404 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +605 -0
- mindspore/rewrite/ast_helpers/ast_replacer.py +79 -0
- mindspore/rewrite/common/__init__.py +19 -0
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/error_log.py +39 -0
- mindspore/rewrite/common/event.py +28 -0
- mindspore/rewrite/common/namer.py +271 -0
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/common/observable.py +44 -0
- mindspore/rewrite/common/observer.py +54 -0
- mindspore/rewrite/node/__init__.py +22 -0
- mindspore/rewrite/node/call_function.py +95 -0
- mindspore/rewrite/node/cell_container.py +139 -0
- mindspore/rewrite/node/control_flow.py +113 -0
- mindspore/rewrite/node/node.py +1428 -0
- mindspore/rewrite/node/node_manager.py +283 -0
- mindspore/rewrite/node/node_topological_manager.py +223 -0
- mindspore/rewrite/parsers/__init__.py +29 -0
- mindspore/rewrite/parsers/arguments_parser.py +63 -0
- mindspore/rewrite/parsers/assign_parser.py +852 -0
- mindspore/rewrite/parsers/attribute_parser.py +57 -0
- mindspore/rewrite/parsers/class_def_parser.py +289 -0
- mindspore/rewrite/parsers/constant_parser.py +104 -0
- mindspore/rewrite/parsers/container_parser.py +88 -0
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +61 -0
- mindspore/rewrite/parsers/function_def_parser.py +84 -0
- mindspore/rewrite/parsers/if_parser.py +85 -0
- mindspore/rewrite/parsers/module_parser.py +117 -0
- mindspore/rewrite/parsers/parser.py +43 -0
- mindspore/rewrite/parsers/parser_register.py +86 -0
- mindspore/rewrite/parsers/return_parser.py +37 -0
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +457 -0
- mindspore/rewrite/sparsify/sparsify.py +112 -0
- mindspore/rewrite/sparsify/utils.py +179 -0
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/symbol_tree/symbol_tree.py +1819 -0
- mindspore/rewrite/symbol_tree/symbol_tree_builder.py +76 -0
- mindspore/rewrite/symbol_tree/symbol_tree_dumper.py +142 -0
- mindspore/run_check/__init__.py +20 -0
- mindspore/run_check/_check_version.py +507 -0
- mindspore/run_check/run_check.py +66 -0
- mindspore/safeguard/__init__.py +18 -0
- mindspore/safeguard/rewrite_obfuscation.py +875 -0
- mindspore/scipy/__init__.py +18 -0
- mindspore/scipy/fft.py +264 -0
- mindspore/scipy/linalg.py +919 -0
- mindspore/scipy/ops.py +165 -0
- mindspore/scipy/ops_grad.py +115 -0
- mindspore/scipy/ops_wrapper.py +74 -0
- mindspore/scipy/optimize/__init__.py +20 -0
- mindspore/scipy/optimize/_bfgs.py +230 -0
- mindspore/scipy/optimize/_lagrange.py +201 -0
- mindspore/scipy/optimize/_lbfgs.py +146 -0
- mindspore/scipy/optimize/gradient_optimization_algorithm.py +168 -0
- mindspore/scipy/optimize/line_search.py +370 -0
- mindspore/scipy/optimize/linear_sum_assignment.py +78 -0
- mindspore/scipy/optimize/minimize.py +200 -0
- mindspore/scipy/utils.py +156 -0
- mindspore/scipy/utils_const.py +246 -0
- mindspore/train/__init__.py +48 -0
- mindspore/train/_utils.py +465 -0
- mindspore/train/amp.py +935 -0
- mindspore/train/anf_ir_pb2.py +1517 -0
- mindspore/train/callback/__init__.py +44 -0
- mindspore/train/callback/_backup_and_restore.py +117 -0
- mindspore/train/callback/_callback.py +613 -0
- mindspore/train/callback/_checkpoint.py +814 -0
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_dataset_graph.py +150 -0
- mindspore/train/callback/_early_stop.py +239 -0
- mindspore/train/callback/_flops_collector.py +239 -0
- mindspore/train/callback/_history.py +92 -0
- mindspore/train/callback/_lambda_callback.py +80 -0
- mindspore/train/callback/_landscape.py +1049 -0
- mindspore/train/callback/_loss_monitor.py +107 -0
- mindspore/train/callback/_lr_scheduler_callback.py +76 -0
- mindspore/train/callback/_on_request_exit.py +298 -0
- mindspore/train/callback/_reduce_lr_on_plateau.py +226 -0
- mindspore/train/callback/_summary_collector.py +1184 -0
- mindspore/train/callback/_tft_register.py +352 -0
- mindspore/train/callback/_time_monitor.py +141 -0
- mindspore/train/checkpoint_pb2.py +233 -0
- mindspore/train/data_sink.py +219 -0
- mindspore/train/dataset_helper.py +692 -0
- mindspore/train/lineage_pb2.py +1260 -0
- mindspore/train/loss_scale_manager.py +213 -0
- mindspore/train/memory_profiling_pb2.py +298 -0
- mindspore/train/metrics/__init__.py +175 -0
- mindspore/train/metrics/accuracy.py +133 -0
- mindspore/train/metrics/auc.py +129 -0
- mindspore/train/metrics/bleu_score.py +170 -0
- mindspore/train/metrics/confusion_matrix.py +700 -0
- mindspore/train/metrics/cosine_similarity.py +109 -0
- mindspore/train/metrics/dice.py +116 -0
- mindspore/train/metrics/error.py +175 -0
- mindspore/train/metrics/fbeta.py +167 -0
- mindspore/train/metrics/hausdorff_distance.py +333 -0
- mindspore/train/metrics/loss.py +97 -0
- mindspore/train/metrics/mean_surface_distance.py +189 -0
- mindspore/train/metrics/metric.py +373 -0
- mindspore/train/metrics/occlusion_sensitivity.py +225 -0
- mindspore/train/metrics/perplexity.py +133 -0
- mindspore/train/metrics/precision.py +160 -0
- mindspore/train/metrics/recall.py +159 -0
- mindspore/train/metrics/roc.py +223 -0
- mindspore/train/metrics/root_mean_square_surface_distance.py +191 -0
- mindspore/train/metrics/topk.py +167 -0
- mindspore/train/mind_ir_pb2.py +1908 -0
- mindspore/train/model.py +2252 -0
- mindspore/train/node_strategy_pb2.py +653 -0
- mindspore/train/print_pb2.py +184 -0
- mindspore/train/profiling_parallel_pb2.py +151 -0
- mindspore/train/serialization.py +3325 -0
- mindspore/train/summary/__init__.py +23 -0
- mindspore/train/summary/_lineage_adapter.py +41 -0
- mindspore/train/summary/_summary_adapter.py +496 -0
- mindspore/train/summary/_writer_pool.py +207 -0
- mindspore/train/summary/enums.py +56 -0
- mindspore/train/summary/summary_record.py +581 -0
- mindspore/train/summary/writer.py +167 -0
- mindspore/train/summary_pb2.py +1165 -0
- mindspore/train/train_thor/__init__.py +20 -0
- mindspore/train/train_thor/convert_utils.py +268 -0
- mindspore/train/train_thor/dataset_helper.py +192 -0
- mindspore/train/train_thor/model_thor.py +257 -0
- mindspore/utils/__init__.py +21 -0
- mindspore/utils/utils.py +60 -0
- mindspore/version.py +1 -0
- mindspore-2.4.0.dist-info/METADATA +352 -0
- mindspore-2.4.0.dist-info/RECORD +1387 -0
- mindspore-2.4.0.dist-info/WHEEL +5 -0
- mindspore-2.4.0.dist-info/entry_points.txt +3 -0
- mindspore-2.4.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1081 @@
|
|
|
1
|
+
# Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Parameter for cell."""
|
|
17
|
+
from __future__ import absolute_import
|
|
18
|
+
|
|
19
|
+
from copy import copy
|
|
20
|
+
import time
|
|
21
|
+
import os
|
|
22
|
+
import sys
|
|
23
|
+
import math
|
|
24
|
+
import numbers
|
|
25
|
+
import numpy as np
|
|
26
|
+
from mindspore import log as logger
|
|
27
|
+
from mindspore.log import _LogActionOnce
|
|
28
|
+
from mindspore._c_expression import ParamInfo
|
|
29
|
+
from mindspore.common import dtype as mstype
|
|
30
|
+
from mindspore import context
|
|
31
|
+
from mindspore.parallel._utils import _get_parallel_mode, _get_global_rank
|
|
32
|
+
from mindspore.common._utils import get_slice_num, get_slice_shape
|
|
33
|
+
from mindspore.common.initializer import initializer
|
|
34
|
+
from mindspore.common.tensor import Tensor
|
|
35
|
+
from mindspore import _checkparam as Validator
|
|
36
|
+
from mindspore._check_jit_forbidden_api import jit_forbidden_register
|
|
37
|
+
from mindspore._c_expression import Tensor as Tensor_
|
|
38
|
+
from mindspore.parallel._tensor import _get_slice_index
|
|
39
|
+
from mindspore.parallel._auto_parallel_context import auto_parallel_context
|
|
40
|
+
from mindspore.parallel._ps_context import _is_role_worker, _is_role_pserver, _is_role_sched, _clone_hash_table, \
|
|
41
|
+
_is_ps_mode
|
|
42
|
+
from mindspore.parallel._ps_context import _reinsert_hash_table_size, _insert_accumu_init_info, _cache_enable
|
|
43
|
+
from mindspore.common._decorator import deprecated
|
|
44
|
+
from mindspore.communication._comm_helper import _is_initialized
|
|
45
|
+
from mindspore.communication import get_group_size
|
|
46
|
+
import mindspore.common._monad as monad
|
|
47
|
+
|
|
48
|
+
__all__ = ['Parameter', 'ParameterTuple']
|
|
49
|
+
|
|
50
|
+
PARAMETER_NAME_DEFAULT = "Parameter"
|
|
51
|
+
PARAMETER_NAME_PREFIX_MAX_LEN = 1024
|
|
52
|
+
|
|
53
|
+
# Global variable for parameter unique key.
|
|
54
|
+
_GLOBAL_PARAMETER_KEY = -1
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _is_in_auto_parallel_mode():
|
|
58
|
+
"""Get parallel mode."""
|
|
59
|
+
return auto_parallel_context().get_parallel_mode() in ["semi_auto_parallel", "auto_parallel"]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _is_parallel_mode():
|
|
63
|
+
""" Whether is parallel mode """
|
|
64
|
+
if not _is_initialized() or context.get_context('mode') == context.PYNATIVE_MODE:
|
|
65
|
+
return False
|
|
66
|
+
if os.getenv("RUN_MODE") != "predict":
|
|
67
|
+
return False
|
|
68
|
+
if get_group_size() > 1 and _get_parallel_mode() == "stand_alone":
|
|
69
|
+
return True
|
|
70
|
+
return False
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def init_to_value(init):
|
|
74
|
+
"""
|
|
75
|
+
Get value of initializer.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Value of the initializer.
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
ValueError: The value of the argument 'init' is not correct.
|
|
82
|
+
"""
|
|
83
|
+
if isinstance(init, str):
|
|
84
|
+
if init == 'zeros':
|
|
85
|
+
return 0.0
|
|
86
|
+
if init == 'ones':
|
|
87
|
+
return 1.0
|
|
88
|
+
raise ValueError("The argument 'init' should be one of values in ['zeros', 'ones'].")
|
|
89
|
+
if isinstance(init, numbers.Number):
|
|
90
|
+
return float(init)
|
|
91
|
+
raise ValueError("The argument 'init' should be number or string, but got {}.".format(type(init)))
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _get_unique_parameter_key():
|
|
95
|
+
"""
|
|
96
|
+
Get parameter unique key.
|
|
97
|
+
Used to identify the same Parameter for Worker and Server in the embedding cache scenario.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
Integer. The unique parameter key.
|
|
101
|
+
"""
|
|
102
|
+
global _GLOBAL_PARAMETER_KEY
|
|
103
|
+
_GLOBAL_PARAMETER_KEY += 1
|
|
104
|
+
return _GLOBAL_PARAMETER_KEY
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _gen_offload_file_path(offload_dir):
|
|
108
|
+
offload_dir = os.path.relpath(offload_dir)
|
|
109
|
+
if not os.path.exists(offload_dir):
|
|
110
|
+
os.makedirs(offload_dir, mode=0o700, exist_ok=True)
|
|
111
|
+
offload_file_path = offload_dir + "/" + str(_get_global_rank()) + "_" + str(
|
|
112
|
+
_get_unique_parameter_key()) + "_" + str(time.time()) + ".data"
|
|
113
|
+
return offload_file_path
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _offload_if_config(data):
|
|
117
|
+
"""
|
|
118
|
+
Offload parameter(data size > 512) to file when enable memory offload and offload parameter to disk.
|
|
119
|
+
Args:
|
|
120
|
+
data: The parameter data to offload.
|
|
121
|
+
"""
|
|
122
|
+
if not context.get_context("memory_offload") or data is None:
|
|
123
|
+
return
|
|
124
|
+
|
|
125
|
+
offload_context = context.get_offload_context()
|
|
126
|
+
if offload_context.get("offload_param", None) != "disk":
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
data_size_threshold = 512
|
|
130
|
+
if data.nbytes < data_size_threshold:
|
|
131
|
+
return
|
|
132
|
+
|
|
133
|
+
offload_file_path = data.offload_file_path()
|
|
134
|
+
if offload_file_path is None or offload_file_path == "":
|
|
135
|
+
offload_dir = offload_context.get("offload_path", "./offload")
|
|
136
|
+
offload_file_path = _gen_offload_file_path(offload_dir)
|
|
137
|
+
data.offload(offload_file_path)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
class Parameter(Tensor_):
|
|
141
|
+
"""
|
|
142
|
+
`Parameter` is a `Tensor` subclass, when they are assigned as Cell attributes they are automatically added to
|
|
143
|
+
the list of its parameters, and will appear, e.g. in `cell.get_parameters()` iterator.
|
|
144
|
+
|
|
145
|
+
Note:
|
|
146
|
+
- In auto_parallel mode of `SEMI_AUTO_PARALLEL` and `AUTO_PARALLEL`, if init `Parameter` by
|
|
147
|
+
a `Tensor`, the type of Parameter will be `Tensor`. `Tensor` will save the shape and type info of a tensor
|
|
148
|
+
with no memory usage.
|
|
149
|
+
|
|
150
|
+
- The shape can be changed while
|
|
151
|
+
compiling for auto-parallel. Call `init_data` will return a Tensor Parameter with initialized data.
|
|
152
|
+
|
|
153
|
+
- If there is an operator in the network that requires part of the inputs to be Parameter,
|
|
154
|
+
then the Parameters as this part of the inputs are not allowed to be cast.
|
|
155
|
+
|
|
156
|
+
- Give each `Parameter` a unique name to facilitate subsequent operations and updates.
|
|
157
|
+
If there are two or more `Parameter` objects with the same name in a network,
|
|
158
|
+
will be prompted to set a unique name when defining.
|
|
159
|
+
|
|
160
|
+
- When directly printing a `Parameter`, you cannot view the actual values contained inside it.
|
|
161
|
+
You need to use the `Parameter.asnumpy()` method to access the actual values.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
default_input (Union[Tensor, int, float, numpy.ndarray, list]): Parameter data,
|
|
165
|
+
to initialize the parameter data.
|
|
166
|
+
name (str): Name of the parameter. Default: ``None`` . If two or more `Parameter`
|
|
167
|
+
objects with the same name exist in a network,
|
|
168
|
+
you will be prompted to set a unique name when defining them.
|
|
169
|
+
|
|
170
|
+
1) If the parameter is not given a name, the default name is its variable name. For example, the name of
|
|
171
|
+
param_a below is name_a, and the name of param_b is the variable name param_b.
|
|
172
|
+
|
|
173
|
+
.. code-block::
|
|
174
|
+
|
|
175
|
+
self.param_a = Parameter(Tensor([1], ms.float32), name="name_a")
|
|
176
|
+
self.param_b = Parameter(Tensor([2], ms.float32))
|
|
177
|
+
|
|
178
|
+
2) If parameter in list or tuple is not given a name, will give it a unique name. For example, the names of
|
|
179
|
+
parameters below are **Parameter$1** and **Parameter$2**.
|
|
180
|
+
|
|
181
|
+
.. code-block::
|
|
182
|
+
|
|
183
|
+
self.param_list = [Parameter(Tensor([3], ms.float32)),
|
|
184
|
+
Parameter(Tensor([4], ms.float32))]
|
|
185
|
+
|
|
186
|
+
3) If the parameter is given a name, and the same name exists between different parameters, an exception
|
|
187
|
+
will be thrown. For example, "its name 'name_a' already exists." will be thrown.
|
|
188
|
+
|
|
189
|
+
.. code-block::
|
|
190
|
+
|
|
191
|
+
self.param_a = Parameter(Tensor([1], ms.float32), name="name_a")
|
|
192
|
+
self.param_tuple = (Parameter(Tensor([5], ms.float32), name="name_a"),
|
|
193
|
+
Parameter(Tensor([6], ms.float32)))
|
|
194
|
+
|
|
195
|
+
4) If a parameter appear multiple times in list or tuple, check the name of the object only once. For
|
|
196
|
+
example, the following example will not throw an exception.
|
|
197
|
+
|
|
198
|
+
.. code-block::
|
|
199
|
+
|
|
200
|
+
self.param_a = Parameter(Tensor([1], ms.float32), name="name_a")
|
|
201
|
+
self.param_tuple = (self.param_a, self.param_a)
|
|
202
|
+
|
|
203
|
+
requires_grad (bool): True if the parameter requires gradient. Default: ``True`` .
|
|
204
|
+
layerwise_parallel (bool): When `layerwise_parallel` is true in data/hybrid parallel mode,
|
|
205
|
+
broadcast and gradients communication would not be applied to the `Parameter`. Default: ``False`` .
|
|
206
|
+
parallel_optimizer (bool): It is used to filter the weight shard operation in `SEMI_AUTO_PARALLEL` or
|
|
207
|
+
`AUTO_PARALLEL` mode. It works only when enable parallel optimizer in
|
|
208
|
+
`mindspore.set_auto_parallel_context()`. Default: ``True`` .
|
|
209
|
+
storage_format (str): Only Ascend device target is supported. It is used to specify the format of the weight
|
|
210
|
+
loaded to the device. By default, the format is not changed. The optional values are ``"FRACTAL_NZ"`` ,
|
|
211
|
+
``"NC1HWC0"`` , ``"FRACTAL_Z"`` , etc. Default: ``""`` .
|
|
212
|
+
device(str): Only Ascend device target is supported. It is used to specify the device which the parameter is
|
|
213
|
+
stored. By default, the parameter will be stored on NPU while computing. When the device is specified as
|
|
214
|
+
``"CPU"``, the parameter will be loaded into the device when it needs to be used, and unloaded to the CPU
|
|
215
|
+
after use. It takes effext only when `memory_offload` is ``"ON"``, `jit_level` is not ``"O2"`` and
|
|
216
|
+
`memory_optimize_level` is ``O0`` in `mindspore.set_context()`. Less device memory is needed when device is
|
|
217
|
+
specified as ``"CPU"``.
|
|
218
|
+
|
|
219
|
+
Examples:
|
|
220
|
+
>>> import numpy as np
|
|
221
|
+
>>> import mindspore
|
|
222
|
+
>>> from mindspore import Parameter, Tensor, ops, nn
|
|
223
|
+
>>>
|
|
224
|
+
>>> class Net(nn.Cell):
|
|
225
|
+
... def __init__(self):
|
|
226
|
+
... super(Net, self).__init__()
|
|
227
|
+
... self.matmul = ops.MatMul()
|
|
228
|
+
... self.weight = Parameter(Tensor(np.ones((1, 2)), mindspore.float32), name="w", requires_grad=True)
|
|
229
|
+
...
|
|
230
|
+
... def construct(self, x):
|
|
231
|
+
... out = self.matmul(self.weight, x)
|
|
232
|
+
... return out
|
|
233
|
+
>>> net = Net()
|
|
234
|
+
>>> x = Tensor(np.ones((2, 1)), mindspore.float32)
|
|
235
|
+
>>> print(net(x))
|
|
236
|
+
[[2.]]
|
|
237
|
+
>>> net.weight.set_data(Tensor(np.zeros((1, 2)), mindspore.float32))
|
|
238
|
+
>>> print(net(x))
|
|
239
|
+
[[0.]]
|
|
240
|
+
"""
|
|
241
|
+
_base_type = {}
|
|
242
|
+
|
|
243
|
+
def __new__(cls, default_input, *args, **kwargs):
|
|
244
|
+
init_data_flag = bool(isinstance(default_input, Tensor) and default_input.has_init)
|
|
245
|
+
rc = sys.getrefcount(default_input)
|
|
246
|
+
input_class, *class_init_args = Parameter._get_parameter_new_args(default_input, rc)
|
|
247
|
+
new_type = Parameter._get_base_class(input_class)
|
|
248
|
+
obj = input_class.__new__(new_type)
|
|
249
|
+
input_class.__init__(obj, *class_init_args)
|
|
250
|
+
# it's better to make the Initializer a kind of tensor.
|
|
251
|
+
obj.init_mode = None
|
|
252
|
+
obj.is_default_input_init = init_data_flag
|
|
253
|
+
obj.from_ckpt = False
|
|
254
|
+
if obj.has_init:
|
|
255
|
+
obj.init_mode = default_input
|
|
256
|
+
else:
|
|
257
|
+
_offload_if_config(obj)
|
|
258
|
+
return obj
|
|
259
|
+
|
|
260
|
+
def __reduce_ex__(self, _):
|
|
261
|
+
data = self
|
|
262
|
+
if self.init_mode is not None:
|
|
263
|
+
data = self.init_mode
|
|
264
|
+
else:
|
|
265
|
+
# cast to break deep infinite loop while deepcopy
|
|
266
|
+
data = Tensor(self)
|
|
267
|
+
return (
|
|
268
|
+
Parameter, (data, self.name, self.requires_grad, self.layerwise_parallel))
|
|
269
|
+
|
|
270
|
+
def __init__(self, default_input, name=None, requires_grad=True, layerwise_parallel=False, parallel_optimizer=True,
|
|
271
|
+
storage_format="", device=None):
|
|
272
|
+
self.param_info = ParamInfo()
|
|
273
|
+
self.init_in_server = False
|
|
274
|
+
self.name = name
|
|
275
|
+
self.requires_grad = requires_grad
|
|
276
|
+
self.layerwise_parallel = layerwise_parallel
|
|
277
|
+
self.parallel_optimizer = parallel_optimizer
|
|
278
|
+
# this flag for tensor copy data.
|
|
279
|
+
self.init_flag = False
|
|
280
|
+
# this flag is for ge variable copy data.
|
|
281
|
+
self.is_init = False
|
|
282
|
+
self._inited_param = None
|
|
283
|
+
self._sliced = False
|
|
284
|
+
self.is_param_ps = False
|
|
285
|
+
self.push_weight_to_server = False
|
|
286
|
+
self.pull_weight_from_server = False
|
|
287
|
+
self.requires_aggr = True
|
|
288
|
+
self._cast_type = None
|
|
289
|
+
self._unique = False
|
|
290
|
+
self.is_in_parallel = _is_in_auto_parallel_mode()
|
|
291
|
+
self.is_in_shard = False
|
|
292
|
+
self._pipeline_stage_list = []
|
|
293
|
+
self.slice_num = 1
|
|
294
|
+
self.from_ckpt = False
|
|
295
|
+
if -1 in self.shape:
|
|
296
|
+
raise ValueError(f"All shape elements of the Parameter must be positive. But got None.")
|
|
297
|
+
if isinstance(default_input, (Tensor_, Tensor)):
|
|
298
|
+
# At embedding cache scenes, we need limit the size of memory for parameter.
|
|
299
|
+
# And save out range data to persistent storage to support TB-Level size parameter.
|
|
300
|
+
slice_num_of_persistent_data = get_slice_num(default_input.dtype, default_input.shape)
|
|
301
|
+
if slice_num_of_persistent_data > 1:
|
|
302
|
+
data_shape = list(default_input.shape)
|
|
303
|
+
slice_first_dim = math.ceil(data_shape[0] / slice_num_of_persistent_data)
|
|
304
|
+
data_shape[0] = slice_first_dim
|
|
305
|
+
self.param_info.use_persistent_storage = True
|
|
306
|
+
self.param_info.origin_shape = default_input.shape
|
|
307
|
+
self.slice_num = slice_num_of_persistent_data
|
|
308
|
+
Tensor_.__init__(self, default_input.dtype, tuple(data_shape))
|
|
309
|
+
else:
|
|
310
|
+
Tensor_.__init__(self, default_input.dtype, default_input.shape)
|
|
311
|
+
|
|
312
|
+
elif isinstance(default_input, int):
|
|
313
|
+
Tensor_.__init__(self, mstype.int64, ())
|
|
314
|
+
elif isinstance(default_input, float):
|
|
315
|
+
Tensor_.__init__(self, mstype.float32, ())
|
|
316
|
+
elif isinstance(default_input, (np.ndarray, list)):
|
|
317
|
+
Tensor_.__init__(self, default_input)
|
|
318
|
+
else:
|
|
319
|
+
raise TypeError(f"The type of the argument 'default_input' must be in ['Tensor', 'int', 'float',"
|
|
320
|
+
f" 'numpy.ndarray', 'list']. But got type {type(default_input)}.")
|
|
321
|
+
self.param_info.parameter_shape = self.shape
|
|
322
|
+
self.param_info.storage_format = storage_format
|
|
323
|
+
if device is not None:
|
|
324
|
+
if device != "CPU":
|
|
325
|
+
raise ValueError(f"Only 'CPU' is supported for device, but got ${device}.")
|
|
326
|
+
self._set_user_data("parameter_device", device)
|
|
327
|
+
|
|
328
|
+
import mindspore.ops.operations.other_ops as other_ops
|
|
329
|
+
self.load = other_ops.Load()
|
|
330
|
+
|
|
331
|
+
def __deepcopy__(self, memodict):
|
|
332
|
+
new_obj = Parameter(self)
|
|
333
|
+
new_obj.name = self.name
|
|
334
|
+
new_obj._inited_param = self._inited_param
|
|
335
|
+
return new_obj
|
|
336
|
+
|
|
337
|
+
def __str__(self):
|
|
338
|
+
return f'Parameter (name={self.name}, shape={self.shape}, dtype={self.dtype}, ' \
|
|
339
|
+
f'requires_grad={self.requires_grad})'
|
|
340
|
+
|
|
341
|
+
def __repr__(self):
|
|
342
|
+
return self.__str__()
|
|
343
|
+
|
|
344
|
+
def __parameter__(self):
|
|
345
|
+
"""For parse check."""
|
|
346
|
+
|
|
347
|
+
@staticmethod
|
|
348
|
+
def _get_base_class(input_class):
|
|
349
|
+
input_class_name = Parameter.__name__
|
|
350
|
+
if input_class_name in Parameter._base_type:
|
|
351
|
+
new_type = Parameter._base_type.get(input_class_name)
|
|
352
|
+
else:
|
|
353
|
+
new_type = type(input_class_name, (Parameter, input_class), {})
|
|
354
|
+
Parameter._base_type[input_class_name] = new_type
|
|
355
|
+
return new_type
|
|
356
|
+
|
|
357
|
+
@staticmethod
|
|
358
|
+
def _get_parameter_new_args(data, rc):
|
|
359
|
+
"""Set `set_data` of current `Parameter`."""
|
|
360
|
+
if isinstance(data, bool):
|
|
361
|
+
raise ValueError('Parameter data can not be `bool`')
|
|
362
|
+
if isinstance(data, Tensor):
|
|
363
|
+
if not data.has_init:
|
|
364
|
+
if rc == 4:
|
|
365
|
+
# when ref count is 4, means the input data is not referenced
|
|
366
|
+
# in other place, so we can make a Tensor without copy data.
|
|
367
|
+
return (Tensor, data)
|
|
368
|
+
# make a copy of Tensor to init the parameter.
|
|
369
|
+
if data.dtype == mstype.qint4x2:
|
|
370
|
+
return (Tensor, data.asnumpy(), mstype.qint4x2)
|
|
371
|
+
return (Tensor, data.asnumpy())
|
|
372
|
+
|
|
373
|
+
not_init_data = _is_role_sched() or (_is_role_pserver() and _cache_enable()
|
|
374
|
+
) or _is_in_auto_parallel_mode() or _is_parallel_mode()
|
|
375
|
+
if not_init_data:
|
|
376
|
+
# do not init data while in auto parallel.
|
|
377
|
+
return (Tensor, None, data.dtype, get_slice_shape(data.dtype, data.shape), data.init)
|
|
378
|
+
return (Tensor, data.init_data())
|
|
379
|
+
if isinstance(data, int):
|
|
380
|
+
return (Tensor, data, mstype.int32)
|
|
381
|
+
if isinstance(data, float):
|
|
382
|
+
return (Tensor, data, mstype.float32)
|
|
383
|
+
return (Tensor, data)
|
|
384
|
+
|
|
385
|
+
def set_param_ps(self, init_in_server=False):
|
|
386
|
+
"""
|
|
387
|
+
Set whether the trainable parameter is updated by parameter server and whether the
|
|
388
|
+
trainable parameter is initialized on server.
|
|
389
|
+
|
|
390
|
+
Note:
|
|
391
|
+
It only works when a running task is in the parameter server mode.
|
|
392
|
+
It is supported only in graph mode.
|
|
393
|
+
|
|
394
|
+
Args:
|
|
395
|
+
init_in_server (bool): Whether trainable parameter updated by parameter server is
|
|
396
|
+
initialized on server. Default: ``False``.
|
|
397
|
+
|
|
398
|
+
Tutorial Examples:
|
|
399
|
+
- `Parameter Server Mode
|
|
400
|
+
<https://www.mindspore.cn/docs/en/master/model_train/parallel/parameter_server_training.html>`_
|
|
401
|
+
"""
|
|
402
|
+
if not _is_ps_mode() or not (_is_role_worker() or _is_role_pserver() or _is_role_sched()):
|
|
403
|
+
raise RuntimeError("Must complete following two steps before calling set_param_ps: \n"
|
|
404
|
+
"1. context.set_ps_context(enable_ps=True) \n"
|
|
405
|
+
"2. export MS_ROLE environment variable \n"
|
|
406
|
+
"Please refer to the official website for detailed usage.")
|
|
407
|
+
|
|
408
|
+
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
409
|
+
raise RuntimeError("Parameter server training is not supported in pynative mode currently."
|
|
410
|
+
"Please switch to graph mode and retry.")
|
|
411
|
+
self.is_param_ps = True
|
|
412
|
+
self.init_in_server = init_in_server
|
|
413
|
+
self.param_info.init_in_server = init_in_server
|
|
414
|
+
|
|
415
|
+
def copy(self):
|
|
416
|
+
"""
|
|
417
|
+
Copy the parameter.
|
|
418
|
+
|
|
419
|
+
Returns:
|
|
420
|
+
Parameter, a new parameter.
|
|
421
|
+
|
|
422
|
+
Examples:
|
|
423
|
+
>>> from mindspore import Tensor, Parameter
|
|
424
|
+
>>> import numpy as np
|
|
425
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
426
|
+
>>> y = x.copy()
|
|
427
|
+
"""
|
|
428
|
+
return self.clone(init='same')
|
|
429
|
+
|
|
430
|
+
@deprecated("1.8", "set_param_fl")
|
|
431
|
+
def set_param_fl(self, push_to_server=False, pull_from_server=False, requires_aggr=True):
|
|
432
|
+
if push_to_server:
|
|
433
|
+
self.push_weight_to_server = True
|
|
434
|
+
if pull_from_server:
|
|
435
|
+
self.pull_weight_from_server = True
|
|
436
|
+
if not requires_aggr:
|
|
437
|
+
self.requires_aggr = False
|
|
438
|
+
self.param_info.requires_aggr = False
|
|
439
|
+
|
|
440
|
+
@property
|
|
441
|
+
def inited_param(self):
|
|
442
|
+
"""
|
|
443
|
+
Get the new parameter after call the init_data.
|
|
444
|
+
|
|
445
|
+
Default is a None, If `self` is a Parameter without data, after call the
|
|
446
|
+
`init_data` the initialized Parameter with data will be recorded here.
|
|
447
|
+
|
|
448
|
+
Examples:
|
|
449
|
+
>>> from mindspore import Tensor, Parameter
|
|
450
|
+
>>> import numpy as np
|
|
451
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
452
|
+
>>> x.inited_param
|
|
453
|
+
"""
|
|
454
|
+
return self._inited_param
|
|
455
|
+
|
|
456
|
+
@property
|
|
457
|
+
def param_info(self):
|
|
458
|
+
return self._param_info
|
|
459
|
+
|
|
460
|
+
@param_info.setter
|
|
461
|
+
def param_info(self, param_info_):
|
|
462
|
+
param_info_.obj = self
|
|
463
|
+
self._param_info = param_info_
|
|
464
|
+
Tensor_.param_info.fset(self, param_info_)
|
|
465
|
+
|
|
466
|
+
@property
|
|
467
|
+
def name(self):
|
|
468
|
+
"""
|
|
469
|
+
Get the name of the parameter.
|
|
470
|
+
|
|
471
|
+
Examples:
|
|
472
|
+
>>> from mindspore import Tensor, Parameter
|
|
473
|
+
>>> import numpy as np
|
|
474
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
475
|
+
>>> x.name = "param1"
|
|
476
|
+
>>> x.name
|
|
477
|
+
'param1'
|
|
478
|
+
"""
|
|
479
|
+
return self.param_info.name
|
|
480
|
+
|
|
481
|
+
@name.setter
|
|
482
|
+
def name(self, name_):
|
|
483
|
+
"""
|
|
484
|
+
Define a name for the parameter.
|
|
485
|
+
|
|
486
|
+
Args:
|
|
487
|
+
name_ (`str` or `None`): The name of the parameter. When the parameter is None or an empty string,
|
|
488
|
+
the default value `PARAMETER_NAME_DEFAULT` is used.
|
|
489
|
+
"""
|
|
490
|
+
if name_ is None:
|
|
491
|
+
name_ = PARAMETER_NAME_DEFAULT
|
|
492
|
+
elif isinstance(name_, str):
|
|
493
|
+
name_ = name_.strip()
|
|
494
|
+
if name_ == '':
|
|
495
|
+
name_ = PARAMETER_NAME_DEFAULT
|
|
496
|
+
if len(name_) > PARAMETER_NAME_PREFIX_MAX_LEN:
|
|
497
|
+
raise ValueError("The length of the '{}' name should be less than {}.".
|
|
498
|
+
format(name_, PARAMETER_NAME_PREFIX_MAX_LEN))
|
|
499
|
+
else:
|
|
500
|
+
raise ValueError("The type of the Parameter's name should be 'string' or 'None', "
|
|
501
|
+
"but got {}.".format(type(name_)))
|
|
502
|
+
|
|
503
|
+
if _is_role_worker() and self.cache_enable:
|
|
504
|
+
_reinsert_hash_table_size(name_, self.param_info.name)
|
|
505
|
+
self.param_info.name = name_
|
|
506
|
+
|
|
507
|
+
@property
|
|
508
|
+
def sliced(self):
|
|
509
|
+
"""
|
|
510
|
+
Get slice status of the parameter.
|
|
511
|
+
|
|
512
|
+
Examples:
|
|
513
|
+
>>> from mindspore import Tensor, Parameter
|
|
514
|
+
>>> import numpy as np
|
|
515
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
516
|
+
>>> x.sliced = True
|
|
517
|
+
>>> x.sliced
|
|
518
|
+
True
|
|
519
|
+
"""
|
|
520
|
+
return self._sliced
|
|
521
|
+
|
|
522
|
+
@sliced.setter
|
|
523
|
+
def sliced(self, sliced_):
|
|
524
|
+
self._sliced = sliced_
|
|
525
|
+
|
|
526
|
+
@property
|
|
527
|
+
def comm_fusion(self):
|
|
528
|
+
"""
|
|
529
|
+
Get the fusion type (int) for communication operators corresponding to this parameter.
|
|
530
|
+
|
|
531
|
+
In `AUTO_PARALLEL` and `SEMI_AUTO_PARALLEL` mode, some communication operators used for parameters or
|
|
532
|
+
gradients aggregation are inserted automatically.
|
|
533
|
+
The value of `comm_fusion` must be greater than or equal to 0.
|
|
534
|
+
When the value of `comm_fusion` is ``0`` , operators will not be fused together.
|
|
535
|
+
|
|
536
|
+
Examples:
|
|
537
|
+
>>> from mindspore import Tensor, Parameter
|
|
538
|
+
>>> import numpy as np
|
|
539
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
540
|
+
>>> x.comm_fusion = 3
|
|
541
|
+
>>> x.comm_fusion
|
|
542
|
+
3
|
|
543
|
+
"""
|
|
544
|
+
return self.param_info.comm_fusion
|
|
545
|
+
|
|
546
|
+
@comm_fusion.setter
|
|
547
|
+
def comm_fusion(self, comm_fusion_):
|
|
548
|
+
if context.get_context("mode") == context.PYNATIVE_MODE and "auto_parallel" in _get_parallel_mode():
|
|
549
|
+
raise RuntimeError(
|
|
550
|
+
"`comm_fusion` does not support PYNATIVE_MODE in AUTO_PARALLEL and SEMI_AUTO_PARALLEL mode.")
|
|
551
|
+
Validator.check_non_negative_int(comm_fusion_)
|
|
552
|
+
self.param_info.comm_fusion = comm_fusion_
|
|
553
|
+
|
|
554
|
+
@property
|
|
555
|
+
def parallel_optimizer_comm_recompute(self):
|
|
556
|
+
"""
|
|
557
|
+
Get the communication recompute status(bool) of optimizer parallel for the parameter.
|
|
558
|
+
|
|
559
|
+
In `AUTO_PARALLEL` and `SEMI_AUTO_PARALLEL` mode, when applying parallel optimizer,
|
|
560
|
+
some :class:`mindspore.ops.AllGather` operators
|
|
561
|
+
used for parameters gathering are inserted automatically. It is used to control the recompute attr for those
|
|
562
|
+
:class:`mindspore.ops.AllGather` operators.
|
|
563
|
+
|
|
564
|
+
Note:
|
|
565
|
+
- Only `Graph` mode is supported.
|
|
566
|
+
- It is recommended to use cell.recompute(parallel_optimizer_comm_recompute=True/False) to configure
|
|
567
|
+
the AllGather operators introducing by parallel optimizer rather than using this interface directly.
|
|
568
|
+
|
|
569
|
+
Examples:
|
|
570
|
+
>>> from mindspore import Tensor, Parameter
|
|
571
|
+
>>> import numpy as np
|
|
572
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
573
|
+
>>> x.parallel_optimizer_comm_recompute = True
|
|
574
|
+
>>> x.parallel_optimizer_comm_recompute
|
|
575
|
+
True
|
|
576
|
+
"""
|
|
577
|
+
return self.param_info.parallel_optimizer_comm_recompute
|
|
578
|
+
|
|
579
|
+
@parallel_optimizer_comm_recompute.setter
|
|
580
|
+
def parallel_optimizer_comm_recompute(self, parallel_optimizer_comm_recompute_):
|
|
581
|
+
Validator.check_bool(parallel_optimizer_comm_recompute_)
|
|
582
|
+
self.param_info.parallel_optimizer_comm_recompute = parallel_optimizer_comm_recompute_
|
|
583
|
+
|
|
584
|
+
@property
|
|
585
|
+
def unique(self):
|
|
586
|
+
"""
|
|
587
|
+
Whether the parameter is already unique or not.
|
|
588
|
+
|
|
589
|
+
Examples:
|
|
590
|
+
>>> from mindspore import Tensor, Parameter
|
|
591
|
+
>>> import numpy as np
|
|
592
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
593
|
+
>>> x.unique = True
|
|
594
|
+
>>> x.unique
|
|
595
|
+
True
|
|
596
|
+
"""
|
|
597
|
+
return self._unique
|
|
598
|
+
|
|
599
|
+
@unique.setter
|
|
600
|
+
def unique(self, unique_):
|
|
601
|
+
self._unique = unique_
|
|
602
|
+
|
|
603
|
+
def clone(self, init='same'):
|
|
604
|
+
"""
|
|
605
|
+
Clone the parameter.
|
|
606
|
+
|
|
607
|
+
Args:
|
|
608
|
+
init (Union[Tensor, str, numbers.Number]): Initialize the shape and dtype of the parameter.
|
|
609
|
+
If `init` is a `Tensor` or `numbers.Number`, clone a new parameter with the same shape
|
|
610
|
+
and dtype, and the data of the new parameter will be set according to `init`. If `init`
|
|
611
|
+
is a `str`, the `init` should be the alias of the class inheriting from `Initializer`.
|
|
612
|
+
For example, if `init` is ``'same'``, clone a new parameter with the same data, shape, and
|
|
613
|
+
dtype. Default: ``'same'``.
|
|
614
|
+
|
|
615
|
+
Returns:
|
|
616
|
+
Parameter, a new parameter.
|
|
617
|
+
|
|
618
|
+
Examples:
|
|
619
|
+
>>> from mindspore import Tensor, Parameter
|
|
620
|
+
>>> import numpy as np
|
|
621
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
622
|
+
>>> y = x.clone()
|
|
623
|
+
"""
|
|
624
|
+
x = copy(self)
|
|
625
|
+
param_info_clone = self.param_info.clone()
|
|
626
|
+
info = self.param_info
|
|
627
|
+
if hasattr(info, "cloned_obj"):
|
|
628
|
+
info.cloned_obj.append(x)
|
|
629
|
+
else:
|
|
630
|
+
info.cloned_obj = [x]
|
|
631
|
+
self.param_info = info
|
|
632
|
+
param_info_clone.obj = x
|
|
633
|
+
x.param_info = param_info_clone
|
|
634
|
+
x.is_init = False
|
|
635
|
+
x.init = self.init
|
|
636
|
+
x.is_param_ps = self.is_param_ps
|
|
637
|
+
x.init_in_server = self.init_in_server
|
|
638
|
+
x.cache_enable = self.cache_enable
|
|
639
|
+
if x.cache_enable:
|
|
640
|
+
x.key = _get_unique_parameter_key()
|
|
641
|
+
x.requires_aggr = self.requires_aggr
|
|
642
|
+
if self.cache_shape:
|
|
643
|
+
x.cache_shape = self.cache_shape
|
|
644
|
+
if init != 'same':
|
|
645
|
+
shape = self.shape if self.slice_num == 1 else self.param_info.origin_shape
|
|
646
|
+
dtype = self.dtype
|
|
647
|
+
x.set_data(initializer(init, shape=shape, dtype=dtype))
|
|
648
|
+
device = self._get_user_data("parameter_device")
|
|
649
|
+
if device is not None:
|
|
650
|
+
x._set_user_data("parameter_device", device)
|
|
651
|
+
return x
|
|
652
|
+
|
|
653
|
+
@property
|
|
654
|
+
def layerwise_parallel(self):
|
|
655
|
+
"""
|
|
656
|
+
Get the layerwise parallel status(bool) of the parameter.
|
|
657
|
+
|
|
658
|
+
When `layerwise_parallel` is ``True`` in `DATA_PARALLEL` and `HYBRID_PARALLEL` parallel mode,
|
|
659
|
+
broadcast and gradients communication would not be applied to parameters.
|
|
660
|
+
|
|
661
|
+
Examples:
|
|
662
|
+
>>> from mindspore import Tensor, Parameter
|
|
663
|
+
>>> import numpy as np
|
|
664
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
665
|
+
>>> x.layerwise_parallel = True
|
|
666
|
+
>>> x.layerwise_parallel
|
|
667
|
+
True
|
|
668
|
+
"""
|
|
669
|
+
return self.param_info.layerwise_parallel
|
|
670
|
+
|
|
671
|
+
@layerwise_parallel.setter
|
|
672
|
+
def layerwise_parallel(self, value=True):
|
|
673
|
+
if not isinstance(value, bool):
|
|
674
|
+
raise TypeError("The argument `layerwise_parallel` must be bool type.")
|
|
675
|
+
self.param_info.layerwise_parallel = value
|
|
676
|
+
|
|
677
|
+
@property
|
|
678
|
+
def parallel_optimizer(self):
|
|
679
|
+
"""
|
|
680
|
+
Get the optimizer parallel status(bool) of the parameter.
|
|
681
|
+
|
|
682
|
+
It is used to filter the weight shard operation in `AUTO_PARALLEL` and `SEMI_AUTO_PARALLEL` mode. It works only
|
|
683
|
+
when enable parallel optimizer in `mindspore.set_auto_parallel_context()`.
|
|
684
|
+
|
|
685
|
+
Examples:
|
|
686
|
+
>>> from mindspore import Tensor, Parameter
|
|
687
|
+
>>> import numpy as np
|
|
688
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
689
|
+
>>> x.parallel_optimizer = True
|
|
690
|
+
>>> x.parallel_optimizer
|
|
691
|
+
True
|
|
692
|
+
"""
|
|
693
|
+
return self.param_info.parallel_optimizer
|
|
694
|
+
|
|
695
|
+
@parallel_optimizer.setter
|
|
696
|
+
def parallel_optimizer(self, value=True):
|
|
697
|
+
if not isinstance(value, bool):
|
|
698
|
+
raise TypeError("The argument `parallel_optimizer` must be bool type.")
|
|
699
|
+
self.param_info.parallel_optimizer = value
|
|
700
|
+
|
|
701
|
+
@property
|
|
702
|
+
def cache_enable(self):
|
|
703
|
+
"""
|
|
704
|
+
Return whether the parameter is cache enable.
|
|
705
|
+
|
|
706
|
+
Examples:
|
|
707
|
+
>>> from mindspore import Tensor, Parameter
|
|
708
|
+
>>> import numpy as np
|
|
709
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
710
|
+
>>> x.cache_enable=True
|
|
711
|
+
>>> x.cache_enable
|
|
712
|
+
True
|
|
713
|
+
"""
|
|
714
|
+
return self.param_info.cache_enable
|
|
715
|
+
|
|
716
|
+
@cache_enable.setter
|
|
717
|
+
def cache_enable(self, value=True):
|
|
718
|
+
if not isinstance(value, bool):
|
|
719
|
+
raise TypeError("The argument `cache_enable` must be bool type.")
|
|
720
|
+
self.param_info.cache_enable = value
|
|
721
|
+
|
|
722
|
+
@property
|
|
723
|
+
def cache_shape(self):
|
|
724
|
+
"""
|
|
725
|
+
Return the cache shape corresponding to the parameter if use cache.
|
|
726
|
+
|
|
727
|
+
Examples:
|
|
728
|
+
>>> from mindspore import Tensor, Parameter
|
|
729
|
+
>>> import numpy as np
|
|
730
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
731
|
+
>>> x.cache_enable=True
|
|
732
|
+
>>> x.cache_shape=[1, 2]
|
|
733
|
+
>>> x.cache_shape
|
|
734
|
+
[1, 2]
|
|
735
|
+
"""
|
|
736
|
+
return self.param_info.cache_shape
|
|
737
|
+
|
|
738
|
+
@cache_shape.setter
|
|
739
|
+
def cache_shape(self, value):
|
|
740
|
+
if not isinstance(value, (tuple, list)):
|
|
741
|
+
raise TypeError("The argument `cache_shape` must be tuple or list type.")
|
|
742
|
+
self.param_info.cache_shape = value
|
|
743
|
+
|
|
744
|
+
@property
|
|
745
|
+
def key(self):
|
|
746
|
+
"""
|
|
747
|
+
Return the parameter unique key.
|
|
748
|
+
|
|
749
|
+
Examples:
|
|
750
|
+
>>> from mindspore import Tensor, Parameter
|
|
751
|
+
>>> import numpy as np
|
|
752
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
753
|
+
>>> x.key = 2
|
|
754
|
+
>>> x.key
|
|
755
|
+
2
|
|
756
|
+
"""
|
|
757
|
+
return self.param_info.key
|
|
758
|
+
|
|
759
|
+
@key.setter
|
|
760
|
+
def key(self, value=-1):
|
|
761
|
+
"""Set the parameter unique key."""
|
|
762
|
+
if not isinstance(value, int):
|
|
763
|
+
raise TypeError("The argument `key` must be int type.")
|
|
764
|
+
self.param_info.key = value
|
|
765
|
+
|
|
766
|
+
@property
|
|
767
|
+
def requires_grad(self):
|
|
768
|
+
"""
|
|
769
|
+
Return whether the parameter requires gradient.
|
|
770
|
+
|
|
771
|
+
Examples:
|
|
772
|
+
>>> from mindspore import Tensor, Parameter
|
|
773
|
+
>>> import numpy as np
|
|
774
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
775
|
+
>>> x.requires_grad = True
|
|
776
|
+
>>> x.requires_grad
|
|
777
|
+
True
|
|
778
|
+
"""
|
|
779
|
+
return self.param_info.requires_grad
|
|
780
|
+
|
|
781
|
+
@requires_grad.setter
|
|
782
|
+
def requires_grad(self, value=True):
|
|
783
|
+
if not isinstance(value, bool):
|
|
784
|
+
raise TypeError("The argument `requires_grad` must be bool type")
|
|
785
|
+
Tensor_.wait_pipeline(self)
|
|
786
|
+
self.param_info.requires_grad = value
|
|
787
|
+
|
|
788
|
+
@property
|
|
789
|
+
def data(self):
|
|
790
|
+
"""
|
|
791
|
+
Return the parameter object.
|
|
792
|
+
|
|
793
|
+
Examples:
|
|
794
|
+
>>> from mindspore import Tensor, Parameter
|
|
795
|
+
>>> import numpy as np
|
|
796
|
+
>>> x = Parameter(Tensor(np.array([[1, 2], [3, 4]], dtype=np.float32)), name="param")
|
|
797
|
+
>>> x.data
|
|
798
|
+
Parameter (name=param, shape=(2, 2), dtype=Float32, requires_grad=True)
|
|
799
|
+
"""
|
|
800
|
+
return self
|
|
801
|
+
|
|
802
|
+
def value(self):
|
|
803
|
+
"""
|
|
804
|
+
Return the value of parameter object.
|
|
805
|
+
|
|
806
|
+
Examples:
|
|
807
|
+
>>> from mindspore import Tensor, Parameter
|
|
808
|
+
>>> import numpy as np
|
|
809
|
+
>>> x = Parameter(Tensor(np.array([1, 2], dtype=np.float32)), name="param")
|
|
810
|
+
>>> x_value = x.value()
|
|
811
|
+
>>> print(x_value)
|
|
812
|
+
[1. 2.]
|
|
813
|
+
"""
|
|
814
|
+
return self.load(self, monad.U)
|
|
815
|
+
|
|
816
|
+
def _update_tensor_data(self, data):
|
|
817
|
+
"""Update the parameter by a Tensor."""
|
|
818
|
+
if isinstance(self, Tensor):
|
|
819
|
+
self.init_flag = False
|
|
820
|
+
self.init = None
|
|
821
|
+
return self.assign_value(data)
|
|
822
|
+
new_param = Parameter(data, self.name, self.requires_grad)
|
|
823
|
+
new_param.param_info = self.param_info
|
|
824
|
+
return new_param
|
|
825
|
+
|
|
826
|
+
@_LogActionOnce(logger=logger, key='add_pipeline_stage')
|
|
827
|
+
def add_pipeline_stage(self, stage):
|
|
828
|
+
"""
|
|
829
|
+
Add a pipeline stage to the parameter.
|
|
830
|
+
|
|
831
|
+
Args:
|
|
832
|
+
stage(int): The pipeline stage to be added.
|
|
833
|
+
|
|
834
|
+
Raise:
|
|
835
|
+
TypeError: If `stage` is not a positive number or not int type.
|
|
836
|
+
"""
|
|
837
|
+
logger.warning(f"This interface may be deleted in the future.")
|
|
838
|
+
if not isinstance(stage, int) or stage < 0:
|
|
839
|
+
raise TypeError("`stage` must be a positive number of int type")
|
|
840
|
+
self._pipeline_stage_list.append(stage)
|
|
841
|
+
|
|
842
|
+
def _raise_type_error(self, incoming):
|
|
843
|
+
raise TypeError(f"Incoming Parameter dtype can not be converted to current dtype implicitly. "
|
|
844
|
+
f"Current dtype is {self.dtype}, and incoming is {incoming}. "
|
|
845
|
+
f"Use .set_dtype(xxx) to change the dtype.")
|
|
846
|
+
|
|
847
|
+
@staticmethod
|
|
848
|
+
def _set_data_check_input_valid(current_shape, data_shape, current_tensor_is_init, incoming_tensor_is_init,
|
|
849
|
+
from_ckpt, slice_shape=False, slice_num=1):
|
|
850
|
+
if not from_ckpt and incoming_tensor_is_init and not current_tensor_is_init:
|
|
851
|
+
raise TypeError("The original tensor data is initialized, but the argument 'data' is not initialized."
|
|
852
|
+
"Please initialize 'data' before call this method.")
|
|
853
|
+
if tuple(current_shape) != tuple(data_shape):
|
|
854
|
+
# If Slice create Parameter shape can be change.
|
|
855
|
+
if not slice_shape and slice_num == 1:
|
|
856
|
+
raise ValueError(f"Can not change the shape of Parameter which has been initialized."
|
|
857
|
+
f" Current shape is {current_shape}, and incoming is {data_shape}.")
|
|
858
|
+
|
|
859
|
+
@staticmethod
|
|
860
|
+
def _from_tensor(tensor, *args, **kwargs):
|
|
861
|
+
"""Create a `Parameter` that data is shared from a `Tensor`."""
|
|
862
|
+
if not isinstance(tensor, Tensor_):
|
|
863
|
+
raise TypeError(f"The type of input must be Tensor, but got {type(tensor)}.")
|
|
864
|
+
param = Tensor_.__new__(Parameter)
|
|
865
|
+
Tensor_.__init__(param, tensor)
|
|
866
|
+
param.init = None
|
|
867
|
+
param.init_mode = None
|
|
868
|
+
param.has_init = False
|
|
869
|
+
param.is_default_input_init = False
|
|
870
|
+
Parameter.__init__(param, tensor, *args, **kwargs)
|
|
871
|
+
return param
|
|
872
|
+
|
|
873
|
+
@jit_forbidden_register
|
|
874
|
+
def set_data(self, data, slice_shape=False):
|
|
875
|
+
"""
|
|
876
|
+
Set Parameter's data.
|
|
877
|
+
|
|
878
|
+
Args:
|
|
879
|
+
data (Union[Tensor, int, float]): New data.
|
|
880
|
+
slice_shape (bool): If slice the parameter is set to ``True``, the shape consistency will not be checked.
|
|
881
|
+
Default: ``False``. When `slice_shape` is ``True``, and the shapes are not consistent, a
|
|
882
|
+
ValueError will be thrown.
|
|
883
|
+
|
|
884
|
+
Returns:
|
|
885
|
+
Parameter, the parameter after set data.
|
|
886
|
+
|
|
887
|
+
Examples:
|
|
888
|
+
>>> from mindspore import Tensor, Parameter
|
|
889
|
+
>>> import numpy as np
|
|
890
|
+
>>> x = Parameter(Tensor(np.array([[1, 2], [3, 4]], dtype=np.float32)), name="param")
|
|
891
|
+
>>> x.set_data(Tensor(np.array([[6, 6], [6, 6]], dtype=np.float32)))
|
|
892
|
+
Parameter (name=param, shape=(2, 2), dtype=Float32, requires_grad=True)
|
|
893
|
+
"""
|
|
894
|
+
if not isinstance(data, (Tensor, int, float)):
|
|
895
|
+
raise TypeError(f"Parameter data must be [`Tensor`, `int`, `float`] or a kind of `Tensor` "
|
|
896
|
+
f"(like `Tensor`). But with type {type(data)}.")
|
|
897
|
+
if isinstance(data, (int, float)):
|
|
898
|
+
if self.dtype in mstype.int_type and isinstance(data, float):
|
|
899
|
+
self._raise_type_error(mstype.float_)
|
|
900
|
+
data = Tensor(data, self.dtype)
|
|
901
|
+
# both not init.
|
|
902
|
+
incoming_tensor_is_init = isinstance(data, Tensor) and not data.has_init
|
|
903
|
+
current_tensor_is_init = isinstance(self, Tensor) and not self.has_init
|
|
904
|
+
Parameter._set_data_check_input_valid(self.shape, data.shape, current_tensor_is_init, incoming_tensor_is_init,
|
|
905
|
+
self.from_ckpt, slice_shape, self.slice_num)
|
|
906
|
+
if self.dtype != data.dtype:
|
|
907
|
+
if mstype.implicit_conversion_seq.get(self.dtype) < mstype.implicit_conversion_seq.get(data.dtype):
|
|
908
|
+
self._raise_type_error(data.dtype)
|
|
909
|
+
else:
|
|
910
|
+
from mindspore.ops import functional as F
|
|
911
|
+
if isinstance(data, Tensor) and data.init is not None:
|
|
912
|
+
data.init_data()
|
|
913
|
+
data = F.cast(data, self.dtype)
|
|
914
|
+
if isinstance(data, Tensor) and data.has_init:
|
|
915
|
+
# The parameter has been initialized, directly update by the data
|
|
916
|
+
if current_tensor_is_init:
|
|
917
|
+
self._update_tensor_data(data.init_data())
|
|
918
|
+
else:
|
|
919
|
+
# also update the related inited parameter data
|
|
920
|
+
if self.inited_param is not None:
|
|
921
|
+
self.inited_param.set_data(data)
|
|
922
|
+
self.init_mode = data
|
|
923
|
+
elif incoming_tensor_is_init or current_tensor_is_init:
|
|
924
|
+
self._update_tensor_data(data)
|
|
925
|
+
self.sliced = slice_shape
|
|
926
|
+
return self
|
|
927
|
+
|
|
928
|
+
@staticmethod
|
|
929
|
+
def _get_init_data_args(layout=None):
|
|
930
|
+
"""Get the data layout args."""
|
|
931
|
+
init_data_args = ()
|
|
932
|
+
if layout:
|
|
933
|
+
if not isinstance(layout, tuple):
|
|
934
|
+
raise TypeError("The argument 'layout' should be tuple, but got {}.".format(type(layout)))
|
|
935
|
+
if len(layout) < 6:
|
|
936
|
+
raise ValueError("The length of 'layout' must be larger than 5, but got {}.".format(len(layout)))
|
|
937
|
+
slice_index = int(_get_slice_index(layout[0], layout[1], layout[5]))
|
|
938
|
+
init_data_args += (slice_index, layout[2], layout[5])
|
|
939
|
+
return init_data_args
|
|
940
|
+
|
|
941
|
+
|
|
942
|
+
def init_data(self, layout=None, set_sliced=False):
|
|
943
|
+
"""
|
|
944
|
+
Initialize the parameter's data.
|
|
945
|
+
|
|
946
|
+
Args:
|
|
947
|
+
layout (Union[None, tuple]): The parameter's layout info.
|
|
948
|
+
layout [dev_mat, tensor_map, slice_shape, filed_size, uniform_split, opt_shard_group].
|
|
949
|
+
Default: ``None``.
|
|
950
|
+
It's not None only in 'SEMI_AUTO_PARALLEL' or 'AUTO_PARALLEL' mode.
|
|
951
|
+
|
|
952
|
+
- dev_mat (list(int)): The parameter's device matrix.
|
|
953
|
+
- tensor_map (list(int)): The parameter's tensor map.
|
|
954
|
+
- slice_shape (list(int)): The parameter's slice shape.
|
|
955
|
+
- filed_size (int): The parameter's filed size.
|
|
956
|
+
- uniform_split (bool): Whether the parameter is split evenly.
|
|
957
|
+
- opt_shard_group (str): The group of the parameter while running optimizer parallel.
|
|
958
|
+
|
|
959
|
+
set_sliced (bool): True if the parameter is set sliced after initializing the data.
|
|
960
|
+
Default: ``False``.
|
|
961
|
+
|
|
962
|
+
Returns:
|
|
963
|
+
Parameter, the `Parameter` after initializing data. If current `Parameter` was already initialized before,
|
|
964
|
+
returns the same initialized `Parameter`.
|
|
965
|
+
|
|
966
|
+
Raises:
|
|
967
|
+
RuntimeError: If it is from Initializer, and parallel mode has changed after the Initializer created.
|
|
968
|
+
ValueError: If the length of the layout is less than 6.
|
|
969
|
+
TypeError: If `layout` is not tuple.
|
|
970
|
+
|
|
971
|
+
Examples:
|
|
972
|
+
>>> from mindspore import Tensor, Parameter
|
|
973
|
+
>>> import numpy as np
|
|
974
|
+
>>> x = Parameter(Tensor(np.array([[1, 2], [3, 4]], dtype=np.float32)), name="param")
|
|
975
|
+
>>> x.init_data()
|
|
976
|
+
"""
|
|
977
|
+
if self.is_default_input_init and self.is_in_parallel != _is_in_auto_parallel_mode():
|
|
978
|
+
raise RuntimeError("Must set or change parallel mode before any initializer Tensor created.")
|
|
979
|
+
if self.init_mode is None:
|
|
980
|
+
return self
|
|
981
|
+
if self.inited_param is not None:
|
|
982
|
+
return self.inited_param
|
|
983
|
+
|
|
984
|
+
init_data_args = self._get_init_data_args(layout)
|
|
985
|
+
|
|
986
|
+
if _is_role_sched():
|
|
987
|
+
return self
|
|
988
|
+
if self.init_in_server and self.is_param_ps and isinstance(self.init_mode, Tensor) and \
|
|
989
|
+
self.init_mode.init is not None and _is_role_worker():
|
|
990
|
+
if self.cache_enable:
|
|
991
|
+
data = self.init_mode.init_data(*init_data_args)
|
|
992
|
+
else:
|
|
993
|
+
data = self.init_mode.init_data(0, [1])
|
|
994
|
+
else:
|
|
995
|
+
data = self.init_mode.init_data(*init_data_args)
|
|
996
|
+
|
|
997
|
+
obj = self._update_tensor_data(data)
|
|
998
|
+
if id(obj) != id(self):
|
|
999
|
+
self._inited_param = obj
|
|
1000
|
+
obj.init_mode = None
|
|
1001
|
+
obj.sliced = set_sliced
|
|
1002
|
+
_offload_if_config(obj)
|
|
1003
|
+
return obj
|
|
1004
|
+
|
|
1005
|
+
|
|
1006
|
+
class ParameterTuple(tuple):
|
|
1007
|
+
"""
|
|
1008
|
+
Inherited from tuple, ParameterTuple is used to save multiple parameter.
|
|
1009
|
+
|
|
1010
|
+
Note:
|
|
1011
|
+
It is used to store the parameters of the network into the parameter tuple collection.
|
|
1012
|
+
|
|
1013
|
+
Examples:
|
|
1014
|
+
>>> from mindspore import Tensor, Parameter, ParameterTuple
|
|
1015
|
+
>>> import numpy as np
|
|
1016
|
+
>>> x = Parameter(Tensor(np.array([[1, 2], [3, 4]], dtype=np.float32)), name="param")
|
|
1017
|
+
>>> y = Parameter(Tensor(np.array([[5, 6], [7, 8]], dtype=np.float32)), name="param1")
|
|
1018
|
+
>>> pt = ParameterTuple([x, y])
|
|
1019
|
+
>>> pt1 = pt.clone(prefix="new")
|
|
1020
|
+
"""
|
|
1021
|
+
|
|
1022
|
+
def __new__(cls, iterable):
|
|
1023
|
+
"""Create instance object of ParameterTuple."""
|
|
1024
|
+
data = tuple(iterable)
|
|
1025
|
+
ids = set()
|
|
1026
|
+
names = set()
|
|
1027
|
+
for x in data:
|
|
1028
|
+
if not isinstance(x, Parameter):
|
|
1029
|
+
raise TypeError(f"For ParameterTuple initialization, "
|
|
1030
|
+
f"ParameterTuple input should be 'Parameter' collection, "
|
|
1031
|
+
f"but got a {type(iterable)}. ")
|
|
1032
|
+
if id(x) not in ids:
|
|
1033
|
+
if x.name in names:
|
|
1034
|
+
raise ValueError("The value {} , its name '{}' already exists. "
|
|
1035
|
+
"Please set a unique name for the parameter.".format(x, x.name))
|
|
1036
|
+
names.add(x.name)
|
|
1037
|
+
ids.add(id(x))
|
|
1038
|
+
return tuple.__new__(ParameterTuple, tuple(data))
|
|
1039
|
+
|
|
1040
|
+
def clone(self, prefix, init='same'):
|
|
1041
|
+
"""
|
|
1042
|
+
Clone the parameters in ParameterTuple element-wisely to generate a new ParameterTuple.
|
|
1043
|
+
|
|
1044
|
+
Args:
|
|
1045
|
+
prefix (str): Namespace of parameter, the prefix string will be added to the names of parameters
|
|
1046
|
+
in parametertuple.
|
|
1047
|
+
|
|
1048
|
+
init (Union[Tensor, str, numbers.Number]): Clone the shape and dtype of Parameters in ParameterTuple and
|
|
1049
|
+
set data according to `init`. Default: ``'same'``.
|
|
1050
|
+
|
|
1051
|
+
- If `init` is a `Tensor` , set the new Parameter data to the input Tensor.
|
|
1052
|
+
- If `init` is `numbers.Number` , set the new Parameter data to the input number.
|
|
1053
|
+
- If `init` is a `str`, data will be set according to the initialization method of the same name in
|
|
1054
|
+
the `Initializer`. When it is ``'same'``, the new Parameter will have the same value
|
|
1055
|
+
with the original Parameter.
|
|
1056
|
+
|
|
1057
|
+
Returns:
|
|
1058
|
+
Tuple, the new Parameter tuple.
|
|
1059
|
+
|
|
1060
|
+
Tutorial Examples:
|
|
1061
|
+
- `Tensor and Parameter - Parameter Tuple
|
|
1062
|
+
<https://mindspore.cn/docs/en/master/model_train/model_building/tensor_and_parameter.html
|
|
1063
|
+
#parameter-tuple>`_
|
|
1064
|
+
"""
|
|
1065
|
+
Validator.check_str_by_regular(prefix)
|
|
1066
|
+
new = []
|
|
1067
|
+
for x in self:
|
|
1068
|
+
x1 = x.clone(init)
|
|
1069
|
+
x1.name = prefix + "." + x1.name
|
|
1070
|
+
new.append(x1)
|
|
1071
|
+
|
|
1072
|
+
if not x1.cache_enable:
|
|
1073
|
+
continue
|
|
1074
|
+
|
|
1075
|
+
if _is_role_worker():
|
|
1076
|
+
_clone_hash_table(x.name, x.key, x1.name, x1.key)
|
|
1077
|
+
_insert_accumu_init_info(x1.name, init_to_value(init))
|
|
1078
|
+
return ParameterTuple(new)
|
|
1079
|
+
|
|
1080
|
+
def __parameter_tuple__(self):
|
|
1081
|
+
"""For parse check."""
|