mindspore 2.4.0__cp310-cp310-macosx_10_15_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -0
- mindspore/__init__.py +53 -0
- mindspore/_c_dataengine.cpython-310-darwin.so +0 -0
- mindspore/_c_expression.cpython-310-darwin.so +0 -0
- mindspore/_c_mindrecord.cpython-310-darwin.so +0 -0
- mindspore/_check_jit_forbidden_api.py +106 -0
- mindspore/_checkparam.py +1419 -0
- mindspore/_extends/__init__.py +23 -0
- mindspore/_extends/builtin_operations.py +224 -0
- mindspore/_extends/graph_kernel/__init__.py +17 -0
- mindspore/_extends/graph_kernel/model/__init__.py +19 -0
- mindspore/_extends/graph_kernel/model/graph_parallel.py +311 -0
- mindspore/_extends/graph_kernel/model/graph_split.py +1348 -0
- mindspore/_extends/graph_kernel/model/model.py +553 -0
- mindspore/_extends/graph_kernel/model/model_builder.py +216 -0
- mindspore/_extends/graph_kernel/parallel_estimate.py +60 -0
- mindspore/_extends/graph_kernel/splitter.py +140 -0
- mindspore/_extends/graph_kernel/utils.py +28 -0
- mindspore/_extends/parallel_compile/__init__.py +19 -0
- mindspore/_extends/parallel_compile/akg_compiler/__init__.py +19 -0
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +269 -0
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +529 -0
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +56 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/get_file_path.py +36 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +556 -0
- mindspore/_extends/parallel_compile/akg_compiler/util.py +159 -0
- mindspore/_extends/parse/__init__.py +49 -0
- mindspore/_extends/parse/compile_config.py +299 -0
- mindspore/_extends/parse/namespace.py +136 -0
- mindspore/_extends/parse/parser.py +1448 -0
- mindspore/_extends/parse/resources.py +213 -0
- mindspore/_extends/parse/standard_method.py +4475 -0
- mindspore/_extends/parse/trope.py +97 -0
- mindspore/_extends/pijit/__init__.py +23 -0
- mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
- mindspore/_extends/remote/__init__.py +19 -0
- mindspore/_extends/remote/kernel_build_server.py +199 -0
- mindspore/_extends/remote/kernel_build_server_akg.py +55 -0
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
- mindspore/_extends/utils.py +68 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +433 -0
- mindspore/boost/__init__.py +42 -0
- mindspore/boost/adasum.py +319 -0
- mindspore/boost/base.py +535 -0
- mindspore/boost/boost.py +400 -0
- mindspore/boost/boost_cell_wrapper.py +790 -0
- mindspore/boost/dim_reduce.py +323 -0
- mindspore/boost/grad_accumulation.py +79 -0
- mindspore/boost/grad_freeze.py +382 -0
- mindspore/boost/group_loss_scale_manager.py +166 -0
- mindspore/boost/less_batch_normalization.py +174 -0
- mindspore/common/__init__.py +86 -0
- mindspore/common/_auto_dynamic.py +68 -0
- mindspore/common/_decorator.py +50 -0
- mindspore/common/_jit_fallback_utils.py +110 -0
- mindspore/common/_monad.py +25 -0
- mindspore/common/_pijit_context.py +190 -0
- mindspore/common/_register_for_adapter.py +74 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +46 -0
- mindspore/common/_stub_tensor.py +210 -0
- mindspore/common/_tensor_overload.py +139 -0
- mindspore/common/_utils.py +122 -0
- mindspore/common/api.py +2064 -0
- mindspore/common/auto_dynamic_shape.py +507 -0
- mindspore/common/dtype.py +422 -0
- mindspore/common/dump.py +130 -0
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +254 -0
- mindspore/common/hook_handle.py +143 -0
- mindspore/common/initializer.py +880 -0
- mindspore/common/jit_config.py +98 -0
- mindspore/common/lazy_inline.py +240 -0
- mindspore/common/mindir_util.py +111 -0
- mindspore/common/mutable.py +234 -0
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +1081 -0
- mindspore/common/recompute.py +292 -0
- mindspore/common/seed.py +260 -0
- mindspore/common/sparse_tensor.py +1175 -0
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +5039 -0
- mindspore/communication/__init__.py +37 -0
- mindspore/communication/_comm_helper.py +501 -0
- mindspore/communication/_hccl_management.py +297 -0
- mindspore/communication/comm_func.py +1395 -0
- mindspore/communication/management.py +673 -0
- mindspore/config/op_info.config +533 -0
- mindspore/context.py +2077 -0
- mindspore/dataset/__init__.py +90 -0
- mindspore/dataset/audio/__init__.py +61 -0
- mindspore/dataset/audio/transforms.py +3690 -0
- mindspore/dataset/audio/utils.py +386 -0
- mindspore/dataset/audio/validators.py +1172 -0
- mindspore/dataset/callback/__init__.py +20 -0
- mindspore/dataset/callback/ds_callback.py +368 -0
- mindspore/dataset/callback/validators.py +32 -0
- mindspore/dataset/core/__init__.py +13 -0
- mindspore/dataset/core/config.py +1095 -0
- mindspore/dataset/core/datatypes.py +101 -0
- mindspore/dataset/core/py_util_helpers.py +65 -0
- mindspore/dataset/core/validator_helpers.py +781 -0
- mindspore/dataset/debug/__init__.py +21 -0
- mindspore/dataset/debug/debug_hook.py +97 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +124 -0
- mindspore/dataset/engine/cache_admin.py +47 -0
- mindspore/dataset/engine/cache_client.py +129 -0
- mindspore/dataset/engine/datasets.py +4582 -0
- mindspore/dataset/engine/datasets_audio.py +911 -0
- mindspore/dataset/engine/datasets_standard_format.py +543 -0
- mindspore/dataset/engine/datasets_text.py +2161 -0
- mindspore/dataset/engine/datasets_user_defined.py +1184 -0
- mindspore/dataset/engine/datasets_vision.py +4816 -0
- mindspore/dataset/engine/iterators.py +371 -0
- mindspore/dataset/engine/obs/__init__.py +23 -0
- mindspore/dataset/engine/obs/config_loader.py +68 -0
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +508 -0
- mindspore/dataset/engine/obs/util.py +482 -0
- mindspore/dataset/engine/offload.py +596 -0
- mindspore/dataset/engine/queue.py +304 -0
- mindspore/dataset/engine/samplers.py +895 -0
- mindspore/dataset/engine/serializer_deserializer.py +159 -0
- mindspore/dataset/engine/validators.py +2895 -0
- mindspore/dataset/text/__init__.py +51 -0
- mindspore/dataset/text/transforms.py +1703 -0
- mindspore/dataset/text/utils.py +715 -0
- mindspore/dataset/text/validators.py +642 -0
- mindspore/dataset/transforms/__init__.py +45 -0
- mindspore/dataset/transforms/c_transforms.py +638 -0
- mindspore/dataset/transforms/py_transforms.py +393 -0
- mindspore/dataset/transforms/py_transforms_util.py +255 -0
- mindspore/dataset/transforms/transforms.py +1260 -0
- mindspore/dataset/transforms/validators.py +410 -0
- mindspore/dataset/utils/__init__.py +19 -0
- mindspore/dataset/utils/browse_dataset.py +190 -0
- mindspore/dataset/utils/line_reader.py +126 -0
- mindspore/dataset/vision/__init__.py +65 -0
- mindspore/dataset/vision/c_transforms.py +2641 -0
- mindspore/dataset/vision/py_transforms.py +2120 -0
- mindspore/dataset/vision/py_transforms_util.py +1660 -0
- mindspore/dataset/vision/transforms.py +7295 -0
- mindspore/dataset/vision/utils.py +863 -0
- mindspore/dataset/vision/validators.py +1483 -0
- mindspore/default_config.py +2 -0
- mindspore/experimental/__init__.py +20 -0
- mindspore/experimental/es/__init__.py +22 -0
- mindspore/experimental/es/embedding_service.py +883 -0
- mindspore/experimental/es/embedding_service_layer.py +581 -0
- mindspore/experimental/llm_boost/__init__.py +21 -0
- mindspore/experimental/llm_boost/atb/__init__.py +23 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
- mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
- mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
- mindspore/experimental/llm_boost/register.py +129 -0
- mindspore/experimental/llm_boost/utils.py +31 -0
- mindspore/experimental/map_parameter.py +309 -0
- mindspore/experimental/optim/__init__.py +40 -0
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +193 -0
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +290 -0
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +1371 -0
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +262 -0
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +156 -0
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/contiguous_tensors_handle.py +175 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +357 -0
- mindspore/include/OWNERS +7 -0
- mindspore/include/api/allocator.h +97 -0
- mindspore/include/api/callback/callback.h +93 -0
- mindspore/include/api/callback/ckpt_saver.h +41 -0
- mindspore/include/api/callback/loss_monitor.h +33 -0
- mindspore/include/api/callback/lr_scheduler.h +51 -0
- mindspore/include/api/callback/time_monitor.h +34 -0
- mindspore/include/api/callback/train_accuracy.h +37 -0
- mindspore/include/api/cell.h +90 -0
- mindspore/include/api/cfg.h +82 -0
- mindspore/include/api/context.h +602 -0
- mindspore/include/api/data_type.h +47 -0
- mindspore/include/api/delegate.h +178 -0
- mindspore/include/api/delegate_api.h +75 -0
- mindspore/include/api/dual_abi_helper.h +208 -0
- mindspore/include/api/format.h +28 -0
- mindspore/include/api/graph.h +46 -0
- mindspore/include/api/kernel.h +58 -0
- mindspore/include/api/kernel_api.h +168 -0
- mindspore/include/api/metrics/accuracy.h +36 -0
- mindspore/include/api/metrics/metrics.h +41 -0
- mindspore/include/api/model.h +438 -0
- mindspore/include/api/model_group.h +91 -0
- mindspore/include/api/model_parallel_runner.h +168 -0
- mindspore/include/api/serialization.h +185 -0
- mindspore/include/api/status.h +192 -0
- mindspore/include/api/types.h +431 -0
- mindspore/include/api/visible.h +41 -0
- mindspore/include/c_api/context_c.h +179 -0
- mindspore/include/c_api/data_type_c.h +52 -0
- mindspore/include/c_api/format_c.h +46 -0
- mindspore/include/c_api/model_c.h +347 -0
- mindspore/include/c_api/status_c.h +79 -0
- mindspore/include/c_api/tensor_c.h +146 -0
- mindspore/include/c_api/types_c.h +67 -0
- mindspore/include/dataset/config.h +163 -0
- mindspore/include/dataset/constants.h +363 -0
- mindspore/include/dataset/execute.h +196 -0
- mindspore/include/dataset/text.h +1092 -0
- mindspore/include/dataset/transforms.h +638 -0
- mindspore/include/dataset/vision.h +2129 -0
- mindspore/include/dataset/vision_ascend.h +206 -0
- mindspore/include/dataset/vision_lite.h +625 -0
- mindspore/lib/libavcodec.59.dylib +0 -0
- mindspore/lib/libavdevice.59.dylib +0 -0
- mindspore/lib/libavfilter.8.dylib +0 -0
- mindspore/lib/libavformat.59.dylib +0 -0
- mindspore/lib/libavutil.57.dylib +0 -0
- mindspore/lib/libdnnl.2.dylib +0 -0
- mindspore/lib/libicudata.69.dylib +0 -0
- mindspore/lib/libicui18n.69.dylib +0 -0
- mindspore/lib/libicuuc.69.dylib +0 -0
- mindspore/lib/libmindspore_address_sorting.15.dylib +0 -0
- mindspore/lib/libmindspore_backend.dylib +0 -0
- mindspore/lib/libmindspore_common.dylib +0 -0
- mindspore/lib/libmindspore_core.dylib +0 -0
- mindspore/lib/libmindspore_glog.0.dylib +0 -0
- mindspore/lib/libmindspore_gpr.15.dylib +0 -0
- mindspore/lib/libmindspore_grpc++.1.dylib +0 -0
- mindspore/lib/libmindspore_grpc.15.dylib +0 -0
- mindspore/lib/libmindspore_np_dtype.dylib +0 -0
- mindspore/lib/libmindspore_ops.dylib +0 -0
- mindspore/lib/libmindspore_upb.15.dylib +0 -0
- mindspore/lib/libnnacl.dylib +0 -0
- mindspore/lib/libopencv_core.4.5.dylib +0 -0
- mindspore/lib/libopencv_imgcodecs.4.5.dylib +0 -0
- mindspore/lib/libopencv_imgproc.4.5.dylib +0 -0
- mindspore/lib/libps_cache.dylib +0 -0
- mindspore/lib/libswresample.4.dylib +0 -0
- mindspore/lib/libswscale.6.dylib +0 -0
- mindspore/lib/libtinyxml2.8.dylib +0 -0
- mindspore/log.py +633 -0
- mindspore/mindrecord/__init__.py +43 -0
- mindspore/mindrecord/common/__init__.py +17 -0
- mindspore/mindrecord/common/constant.py +20 -0
- mindspore/mindrecord/common/enums.py +44 -0
- mindspore/mindrecord/common/exceptions.py +311 -0
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +174 -0
- mindspore/mindrecord/filewriter.py +722 -0
- mindspore/mindrecord/mindpage.py +210 -0
- mindspore/mindrecord/shardheader.py +141 -0
- mindspore/mindrecord/shardindexgenerator.py +74 -0
- mindspore/mindrecord/shardreader.py +117 -0
- mindspore/mindrecord/shardsegment.py +128 -0
- mindspore/mindrecord/shardutils.py +185 -0
- mindspore/mindrecord/shardwriter.py +237 -0
- mindspore/mindrecord/tools/__init__.py +17 -0
- mindspore/mindrecord/tools/cifar10.py +140 -0
- mindspore/mindrecord/tools/cifar100.py +153 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +185 -0
- mindspore/mindrecord/tools/cifar10_to_mr.py +177 -0
- mindspore/mindrecord/tools/csv_to_mr.py +200 -0
- mindspore/mindrecord/tools/imagenet_to_mr.py +206 -0
- mindspore/mindrecord/tools/mnist_to_mr.py +259 -0
- mindspore/mindrecord/tools/tfrecord_to_mr.py +360 -0
- mindspore/mint/__init__.py +1586 -0
- mindspore/mint/distributed/__init__.py +31 -0
- mindspore/mint/distributed/distributed.py +254 -0
- mindspore/mint/linalg/__init__.py +22 -0
- mindspore/mint/nn/__init__.py +757 -0
- mindspore/mint/nn/functional.py +679 -0
- mindspore/mint/nn/layer/__init__.py +39 -0
- mindspore/mint/nn/layer/activation.py +133 -0
- mindspore/mint/nn/layer/normalization.py +477 -0
- mindspore/mint/nn/layer/pooling.py +110 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +206 -0
- mindspore/mint/special/__init__.py +63 -0
- mindspore/multiprocessing/__init__.py +73 -0
- mindspore/nn/__init__.py +47 -0
- mindspore/nn/cell.py +2787 -0
- mindspore/nn/dynamic_lr.py +482 -0
- mindspore/nn/grad/__init__.py +21 -0
- mindspore/nn/grad/cell_grad.py +196 -0
- mindspore/nn/layer/__init__.py +63 -0
- mindspore/nn/layer/activation.py +1822 -0
- mindspore/nn/layer/basic.py +1629 -0
- mindspore/nn/layer/channel_shuffle.py +90 -0
- mindspore/nn/layer/combined.py +248 -0
- mindspore/nn/layer/container.py +734 -0
- mindspore/nn/layer/conv.py +1505 -0
- mindspore/nn/layer/dense.py +204 -0
- mindspore/nn/layer/embedding.py +869 -0
- mindspore/nn/layer/image.py +661 -0
- mindspore/nn/layer/math.py +1069 -0
- mindspore/nn/layer/normalization.py +1273 -0
- mindspore/nn/layer/padding.py +880 -0
- mindspore/nn/layer/pooling.py +2302 -0
- mindspore/nn/layer/rnn_cells.py +388 -0
- mindspore/nn/layer/rnns.py +849 -0
- mindspore/nn/layer/thor_layer.py +963 -0
- mindspore/nn/layer/timedistributed.py +155 -0
- mindspore/nn/layer/transformer.py +823 -0
- mindspore/nn/learning_rate_schedule.py +512 -0
- mindspore/nn/loss/__init__.py +36 -0
- mindspore/nn/loss/loss.py +2924 -0
- mindspore/nn/metrics.py +53 -0
- mindspore/nn/optim/__init__.py +45 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +111 -0
- mindspore/nn/optim/ada_grad.py +217 -0
- mindspore/nn/optim/adadelta.py +206 -0
- mindspore/nn/optim/adafactor.py +448 -0
- mindspore/nn/optim/adam.py +1297 -0
- mindspore/nn/optim/adamax.py +220 -0
- mindspore/nn/optim/adasum.py +548 -0
- mindspore/nn/optim/asgd.py +216 -0
- mindspore/nn/optim/ftrl.py +401 -0
- mindspore/nn/optim/lamb.py +296 -0
- mindspore/nn/optim/lars.py +202 -0
- mindspore/nn/optim/lazyadam.py +533 -0
- mindspore/nn/optim/momentum.py +239 -0
- mindspore/nn/optim/optimizer.py +1034 -0
- mindspore/nn/optim/proximal_ada_grad.py +242 -0
- mindspore/nn/optim/rmsprop.py +264 -0
- mindspore/nn/optim/rprop.py +251 -0
- mindspore/nn/optim/sgd.py +237 -0
- mindspore/nn/optim/tft_wrapper.py +127 -0
- mindspore/nn/optim/thor.py +1310 -0
- mindspore/nn/probability/__init__.py +22 -0
- mindspore/nn/probability/bijector/__init__.py +35 -0
- mindspore/nn/probability/bijector/bijector.py +337 -0
- mindspore/nn/probability/bijector/exp.py +65 -0
- mindspore/nn/probability/bijector/gumbel_cdf.py +144 -0
- mindspore/nn/probability/bijector/invert.py +126 -0
- mindspore/nn/probability/bijector/power_transform.py +196 -0
- mindspore/nn/probability/bijector/scalar_affine.py +167 -0
- mindspore/nn/probability/bijector/softplus.py +189 -0
- mindspore/nn/probability/bnn_layers/__init__.py +29 -0
- mindspore/nn/probability/bnn_layers/_util.py +46 -0
- mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +112 -0
- mindspore/nn/probability/bnn_layers/conv_variational.py +267 -0
- mindspore/nn/probability/bnn_layers/dense_variational.py +302 -0
- mindspore/nn/probability/bnn_layers/layer_distribution.py +123 -0
- mindspore/nn/probability/distribution/__init__.py +56 -0
- mindspore/nn/probability/distribution/_utils/__init__.py +34 -0
- mindspore/nn/probability/distribution/_utils/custom_ops.py +96 -0
- mindspore/nn/probability/distribution/_utils/utils.py +362 -0
- mindspore/nn/probability/distribution/bernoulli.py +334 -0
- mindspore/nn/probability/distribution/beta.py +391 -0
- mindspore/nn/probability/distribution/categorical.py +435 -0
- mindspore/nn/probability/distribution/cauchy.py +383 -0
- mindspore/nn/probability/distribution/distribution.py +827 -0
- mindspore/nn/probability/distribution/exponential.py +350 -0
- mindspore/nn/probability/distribution/gamma.py +391 -0
- mindspore/nn/probability/distribution/geometric.py +335 -0
- mindspore/nn/probability/distribution/gumbel.py +257 -0
- mindspore/nn/probability/distribution/half_normal.py +133 -0
- mindspore/nn/probability/distribution/laplace.py +128 -0
- mindspore/nn/probability/distribution/log_normal.py +272 -0
- mindspore/nn/probability/distribution/logistic.py +379 -0
- mindspore/nn/probability/distribution/normal.py +336 -0
- mindspore/nn/probability/distribution/poisson.py +288 -0
- mindspore/nn/probability/distribution/student_t.py +149 -0
- mindspore/nn/probability/distribution/transformed_distribution.py +235 -0
- mindspore/nn/probability/distribution/uniform.py +375 -0
- mindspore/nn/reinforcement/__init__.py +24 -0
- mindspore/nn/reinforcement/_batch_read_write.py +142 -0
- mindspore/nn/reinforcement/_tensors_queue.py +152 -0
- mindspore/nn/reinforcement/tensor_array.py +145 -0
- mindspore/nn/sparse/__init__.py +23 -0
- mindspore/nn/sparse/sparse.py +147 -0
- mindspore/nn/wrap/__init__.py +49 -0
- mindspore/nn/wrap/cell_wrapper.py +968 -0
- mindspore/nn/wrap/grad_reducer.py +608 -0
- mindspore/nn/wrap/loss_scale.py +694 -0
- mindspore/numpy/__init__.py +121 -0
- mindspore/numpy/array_creations.py +2731 -0
- mindspore/numpy/array_ops.py +2629 -0
- mindspore/numpy/dtypes.py +185 -0
- mindspore/numpy/fft.py +966 -0
- mindspore/numpy/logic_ops.py +936 -0
- mindspore/numpy/math_ops.py +5911 -0
- mindspore/numpy/utils.py +214 -0
- mindspore/numpy/utils_const.py +565 -0
- mindspore/ops/__init__.py +56 -0
- mindspore/ops/_constants.py +30 -0
- mindspore/ops/_grad_experimental/__init__.py +31 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +830 -0
- mindspore/ops/_grad_experimental/grad_base.py +143 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +714 -0
- mindspore/ops/_grad_experimental/grad_debug_ops.py +31 -0
- mindspore/ops/_grad_experimental/grad_implementations.py +203 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +79 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +802 -0
- mindspore/ops/_grad_experimental/grad_nn_ops.py +231 -0
- mindspore/ops/_grad_experimental/grad_quant_ops.py +238 -0
- mindspore/ops/_grad_experimental/grad_sparse.py +342 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +399 -0
- mindspore/ops/_grad_experimental/taylor_rule.py +220 -0
- mindspore/ops/_op_impl/__init__.py +23 -0
- mindspore/ops/_op_impl/_custom_op/__init__.py +39 -0
- mindspore/ops/_op_impl/_custom_op/_basic.py +158 -0
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +279 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +156 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +109 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +125 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +105 -0
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +124 -0
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +116 -0
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +89 -0
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +196 -0
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +366 -0
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +162 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +136 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +206 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +88 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +128 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +199 -0
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +88 -0
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +156 -0
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +184 -0
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +143 -0
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +169 -0
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +548 -0
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +881 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +278 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +200 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +334 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +255 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +222 -0
- mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +644 -0
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +488 -0
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +87 -0
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +129 -0
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +121 -0
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +352 -0
- mindspore/ops/_op_impl/aicpu/__init__.py +441 -0
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/acos.py +32 -0
- mindspore/ops/_op_impl/aicpu/acos_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/acosh.py +34 -0
- mindspore/ops/_op_impl/aicpu/acosh_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/add_n.py +41 -0
- mindspore/ops/_op_impl/aicpu/add_v2.py +40 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +41 -0
- mindspore/ops/_op_impl/aicpu/addcmul.py +47 -0
- mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py +32 -0
- mindspore/ops/_op_impl/aicpu/adjust_hue.py +31 -0
- mindspore/ops/_op_impl/aicpu/adjust_saturation.py +32 -0
- mindspore/ops/_op_impl/aicpu/affine_grid.py +33 -0
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/angle.py +31 -0
- mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
- mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
- mindspore/ops/_op_impl/aicpu/argmax_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/asin.py +32 -0
- mindspore/ops/_op_impl/aicpu/asin_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/asinh.py +34 -0
- mindspore/ops/_op_impl/aicpu/asinh_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/atanh.py +34 -0
- mindspore/ops/_op_impl/aicpu/avgpool_grad_v1.py +37 -0
- mindspore/ops/_op_impl/aicpu/avgpool_v1.py +36 -0
- mindspore/ops/_op_impl/aicpu/bartlett_window.py +36 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
- mindspore/ops/_op_impl/aicpu/betainc.py +31 -0
- mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +42 -0
- mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
- mindspore/ops/_op_impl/aicpu/blackman_window.py +36 -0
- mindspore/ops/_op_impl/aicpu/broadcast_to.py +58 -0
- mindspore/ops/_op_impl/aicpu/bucketize.py +34 -0
- mindspore/ops/_op_impl/aicpu/cache_swap_table.py +102 -0
- mindspore/ops/_op_impl/aicpu/cast.py +225 -0
- mindspore/ops/_op_impl/aicpu/cauchy.py +33 -0
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/check_numerics.py +33 -0
- mindspore/ops/_op_impl/aicpu/cholesky.py +32 -0
- mindspore/ops/_op_impl/aicpu/cholesky_inverse.py +31 -0
- mindspore/ops/_op_impl/aicpu/cholesky_solve.py +33 -0
- mindspore/ops/_op_impl/aicpu/choleskygrad.py +32 -0
- mindspore/ops/_op_impl/aicpu/coalesce.py +37 -0
- mindspore/ops/_op_impl/aicpu/col2im.py +38 -0
- mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
- mindspore/ops/_op_impl/aicpu/compare_and_bitpack.py +37 -0
- mindspore/ops/_op_impl/aicpu/complex.py +32 -0
- mindspore/ops/_op_impl/aicpu/complex_abs.py +31 -0
- mindspore/ops/_op_impl/aicpu/compute_accidental_hits.py +44 -0
- mindspore/ops/_op_impl/aicpu/concat.py +57 -0
- mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
- mindspore/ops/_op_impl/aicpu/conj.py +42 -0
- mindspore/ops/_op_impl/aicpu/conjugate_transpose.py +58 -0
- mindspore/ops/_op_impl/aicpu/cos.py +34 -0
- mindspore/ops/_op_impl/aicpu/cosh.py +34 -0
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize.py +69 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_boxes.py +68 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
- mindspore/ops/_op_impl/aicpu/cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_dense.py +48 -0
- mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_sparse_tensor.py +51 -0
- mindspore/ops/_op_impl/aicpu/ctc_greedy_decoder.py +35 -0
- mindspore/ops/_op_impl/aicpu/ctc_loss_v2.py +43 -0
- mindspore/ops/_op_impl/aicpu/ctc_loss_v2_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/ctcloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/cummax.py +41 -0
- mindspore/ops/_op_impl/aicpu/cumprod.py +58 -0
- mindspore/ops/_op_impl/aicpu/cumsum.py +58 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
- mindspore/ops/_op_impl/aicpu/data_format_vec_permute.py +32 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/dense_to_csr_sparse_matrix.py +49 -0
- mindspore/ops/_op_impl/aicpu/dense_to_dense_set_operation.py +45 -0
- mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
- mindspore/ops/_op_impl/aicpu/depth_to_space.py +44 -0
- mindspore/ops/_op_impl/aicpu/diag.py +36 -0
- mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
- mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
- mindspore/ops/_op_impl/aicpu/digamma.py +31 -0
- mindspore/ops/_op_impl/aicpu/div.py +41 -0
- mindspore/ops/_op_impl/aicpu/div_no_nan.py +35 -0
- mindspore/ops/_op_impl/aicpu/dropout2d.py +42 -0
- mindspore/ops/_op_impl/aicpu/dropout3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask.py +41 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask_v3.py +32 -0
- mindspore/ops/_op_impl/aicpu/dynamic_stitch.py +42 -0
- mindspore/ops/_op_impl/aicpu/edit_distance.py +56 -0
- mindspore/ops/_op_impl/aicpu/eig.py +35 -0
- mindspore/ops/_op_impl/aicpu/embedding_lookup.py +102 -0
- mindspore/ops/_op_impl/aicpu/end_of_sequence.py +30 -0
- mindspore/ops/_op_impl/aicpu/environ_create.py +28 -0
- mindspore/ops/_op_impl/aicpu/environ_destroy_all.py +28 -0
- mindspore/ops/_op_impl/aicpu/environ_get.py +41 -0
- mindspore/ops/_op_impl/aicpu/environ_set.py +40 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/exp.py +37 -0
- mindspore/ops/_op_impl/aicpu/expand.py +45 -0
- mindspore/ops/_op_impl/aicpu/expand_dims.py +42 -0
- mindspore/ops/_op_impl/aicpu/expm1.py +34 -0
- mindspore/ops/_op_impl/aicpu/extract_glimpse.py +35 -0
- mindspore/ops/_op_impl/aicpu/eye.py +44 -0
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +47 -0
- mindspore/ops/_op_impl/aicpu/fill_diagonal.py +39 -0
- mindspore/ops/_op_impl/aicpu/fill_v2.py +58 -0
- mindspore/ops/_op_impl/aicpu/flatten.py +43 -0
- mindspore/ops/_op_impl/aicpu/floor_div.py +38 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_avg_pool.py +41 -0
- mindspore/ops/_op_impl/aicpu/fractional_avg_pool_grad.py +41 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool.py +41 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_grad_with_fixed_ksize.py +43 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +65 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad.py +42 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad_with_fixed_ksize.py +42 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool_with_fixed_ksize.py +49 -0
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/fused_sparse_adam.py +46 -0
- mindspore/ops/_op_impl/aicpu/fused_sparse_ftrl.py +41 -0
- mindspore/ops/_op_impl/aicpu/fused_sparse_lazy_adam.py +46 -0
- mindspore/ops/_op_impl/aicpu/fused_sparse_proximal_adagrad.py +39 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +38 -0
- mindspore/ops/_op_impl/aicpu/gather.py +46 -0
- mindspore/ops/_op_impl/aicpu/gather_d.py +79 -0
- mindspore/ops/_op_impl/aicpu/gather_d_grad_v2.py +79 -0
- mindspore/ops/_op_impl/aicpu/gather_grad.py +54 -0
- mindspore/ops/_op_impl/aicpu/gather_nd.py +56 -0
- mindspore/ops/_op_impl/aicpu/gcd.py +32 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
- mindspore/ops/_op_impl/aicpu/geqrf.py +32 -0
- mindspore/ops/_op_impl/aicpu/get_next.py +39 -0
- mindspore/ops/_op_impl/aicpu/glu.py +33 -0
- mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/grid_sampler_2d.py +35 -0
- mindspore/ops/_op_impl/aicpu/grid_sampler_2d_grad.py +38 -0
- mindspore/ops/_op_impl/aicpu/grid_sampler_3d.py +34 -0
- mindspore/ops/_op_impl/aicpu/grid_sampler_3d_grad.py +38 -0
- mindspore/ops/_op_impl/aicpu/hamming_window.py +57 -0
- mindspore/ops/_op_impl/aicpu/hard_sigmoid.py +32 -0
- mindspore/ops/_op_impl/aicpu/hard_sigmoid_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/heaviside.py +40 -0
- mindspore/ops/_op_impl/aicpu/histogram.py +35 -0
- mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py +32 -0
- mindspore/ops/_op_impl/aicpu/hypot.py +32 -0
- mindspore/ops/_op_impl/aicpu/identity.py +42 -0
- mindspore/ops/_op_impl/aicpu/identity_n.py +41 -0
- mindspore/ops/_op_impl/aicpu/igamma.py +30 -0
- mindspore/ops/_op_impl/aicpu/igammac.py +30 -0
- mindspore/ops/_op_impl/aicpu/igammagrada.py +30 -0
- mindspore/ops/_op_impl/aicpu/im2col.py +43 -0
- mindspore/ops/_op_impl/aicpu/imag.py +31 -0
- mindspore/ops/_op_impl/aicpu/index_fill.py +54 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/aicpu/init_data_set_queue.py +27 -0
- mindspore/ops/_op_impl/aicpu/inplace_index_add.py +39 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
- mindspore/ops/_op_impl/aicpu/is_finite.py +40 -0
- mindspore/ops/_op_impl/aicpu/is_inf.py +31 -0
- mindspore/ops/_op_impl/aicpu/is_nan.py +31 -0
- mindspore/ops/_op_impl/aicpu/kldivloss.py +34 -0
- mindspore/ops/_op_impl/aicpu/kldivlossgrad.py +35 -0
- mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
- mindspore/ops/_op_impl/aicpu/lcm.py +32 -0
- mindspore/ops/_op_impl/aicpu/left_shift.py +38 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/lgamma.py +33 -0
- mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +57 -0
- mindspore/ops/_op_impl/aicpu/linspace.py +33 -0
- mindspore/ops/_op_impl/aicpu/list_diff.py +50 -0
- mindspore/ops/_op_impl/aicpu/log.py +37 -0
- mindspore/ops/_op_impl/aicpu/log1p.py +34 -0
- mindspore/ops/_op_impl/aicpu/log_matrix_determinant.py +31 -0
- mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +37 -0
- mindspore/ops/_op_impl/aicpu/logical_xor.py +30 -0
- mindspore/ops/_op_impl/aicpu/logit.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/logspace.py +36 -0
- mindspore/ops/_op_impl/aicpu/lower_bound.py +47 -0
- mindspore/ops/_op_impl/aicpu/lstsq.py +34 -0
- mindspore/ops/_op_impl/aicpu/lu.py +39 -0
- mindspore/ops/_op_impl/aicpu/lu_solve.py +32 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack.py +114 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +40 -0
- mindspore/ops/_op_impl/aicpu/masked_select.py +31 -0
- mindspore/ops/_op_impl/aicpu/masked_select_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
- mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
- mindspore/ops/_op_impl/aicpu/matrix_determinant.py +30 -0
- mindspore/ops/_op_impl/aicpu/matrix_diag_part_v3.py +54 -0
- mindspore/ops/_op_impl/aicpu/matrix_diag_v3.py +56 -0
- mindspore/ops/_op_impl/aicpu/matrix_exp.py +34 -0
- mindspore/ops/_op_impl/aicpu/matrix_inverse.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +37 -0
- mindspore/ops/_op_impl/aicpu/matrix_set_diag_v3.py +54 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
- mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
- mindspore/ops/_op_impl/aicpu/max_pool3d_grad_with_argmax.py +60 -0
- mindspore/ops/_op_impl/aicpu/max_pool3d_with_argmax.py +59 -0
- mindspore/ops/_op_impl/aicpu/max_unpool2d.py +57 -0
- mindspore/ops/_op_impl/aicpu/max_unpool2d_grad.py +58 -0
- mindspore/ops/_op_impl/aicpu/max_unpool3d.py +57 -0
- mindspore/ops/_op_impl/aicpu/max_unpool3d_grad.py +58 -0
- mindspore/ops/_op_impl/aicpu/maximum_grad_grad.py +40 -0
- mindspore/ops/_op_impl/aicpu/maxpool_grad_v1.py +46 -0
- mindspore/ops/_op_impl/aicpu/maxpool_v1.py +42 -0
- mindspore/ops/_op_impl/aicpu/median.py +39 -0
- mindspore/ops/_op_impl/aicpu/median_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/meshgrid.py +41 -0
- mindspore/ops/_op_impl/aicpu/minimum_grad_grad.py +40 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +50 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +48 -0
- mindspore/ops/_op_impl/aicpu/mul.py +43 -0
- mindspore/ops/_op_impl/aicpu/mul_no_nan.py +42 -0
- mindspore/ops/_op_impl/aicpu/multi_margin_loss.py +37 -0
- mindspore/ops/_op_impl/aicpu/multi_margin_loss_grad.py +41 -0
- mindspore/ops/_op_impl/aicpu/multilabel_margin_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/multinomial.py +47 -0
- mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
- mindspore/ops/_op_impl/aicpu/mvlgamma.py +32 -0
- mindspore/ops/_op_impl/aicpu/mvlgamma_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
- mindspore/ops/_op_impl/aicpu/neg.py +36 -0
- mindspore/ops/_op_impl/aicpu/nextafter.py +32 -0
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/no_repeat_ngram.py +34 -0
- mindspore/ops/_op_impl/aicpu/non_deterministic_ints.py +33 -0
- mindspore/ops/_op_impl/aicpu/non_max_suppression.py +36 -0
- mindspore/ops/_op_impl/aicpu/non_max_suppression_with_overlaps.py +35 -0
- mindspore/ops/_op_impl/aicpu/non_zero.py +43 -0
- mindspore/ops/_op_impl/aicpu/not_equal.py +39 -0
- mindspore/ops/_op_impl/aicpu/nth_element.py +39 -0
- mindspore/ops/_op_impl/aicpu/nuclear_norm.py +33 -0
- mindspore/ops/_op_impl/aicpu/one_hot.py +116 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +39 -0
- mindspore/ops/_op_impl/aicpu/orgqr.py +34 -0
- mindspore/ops/_op_impl/aicpu/pad_and_shift.py +33 -0
- mindspore/ops/_op_impl/aicpu/pad_v3.py +61 -0
- mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +59 -0
- mindspore/ops/_op_impl/aicpu/padding.py +41 -0
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +54 -0
- mindspore/ops/_op_impl/aicpu/pdist_grad.py +33 -0
- mindspore/ops/_op_impl/aicpu/poisson.py +37 -0
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/pow.py +39 -0
- mindspore/ops/_op_impl/aicpu/print_tensor.py +39 -0
- mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +113 -0
- mindspore/ops/_op_impl/aicpu/qr.py +36 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_range.py +49 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
- mindspore/ops/_op_impl/aicpu/random_categorical.py +68 -0
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +36 -0
- mindspore/ops/_op_impl/aicpu/random_gamma.py +38 -0
- mindspore/ops/_op_impl/aicpu/random_poisson.py +134 -0
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +47 -0
- mindspore/ops/_op_impl/aicpu/randperm.py +38 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/range.py +36 -0
- mindspore/ops/_op_impl/aicpu/range_v2.py +35 -0
- mindspore/ops/_op_impl/aicpu/real.py +31 -0
- mindspore/ops/_op_impl/aicpu/real_div.py +40 -0
- mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
- mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/reduce_mean.py +57 -0
- mindspore/ops/_op_impl/aicpu/reduce_prod.py +57 -0
- mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/relu_grad_v3.py +41 -0
- mindspore/ops/_op_impl/aicpu/relu_v3.py +38 -0
- mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +96 -0
- mindspore/ops/_op_impl/aicpu/reshape.py +42 -0
- mindspore/ops/_op_impl/aicpu/resize_area.py +40 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +20 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +19 -0
- mindspore/ops/_op_impl/aicpu/resize_bilinear.py +32 -0
- mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +32 -0
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +36 -0
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/reverse_sequence.py +55 -0
- mindspore/ops/_op_impl/aicpu/reversev2.py +54 -0
- mindspore/ops/_op_impl/aicpu/rgb_to_hsv.py +32 -0
- mindspore/ops/_op_impl/aicpu/right_shift.py +38 -0
- mindspore/ops/_op_impl/aicpu/rnnt_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/round.py +34 -0
- mindspore/ops/_op_impl/aicpu/rsqrt.py +33 -0
- mindspore/ops/_op_impl/aicpu/rsqrt_grad.py +36 -0
- mindspore/ops/_op_impl/aicpu/sample_distorted_bounding_box_v2.py +49 -0
- mindspore/ops/_op_impl/aicpu/scale_and_translate.py +52 -0
- mindspore/ops/_op_impl/aicpu/scale_and_translate_grad.py +36 -0
- mindspore/ops/_op_impl/aicpu/scatter.py +79 -0
- mindspore/ops/_op_impl/aicpu/scatter_add_with_axis.py +53 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +39 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd.py +59 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_max.py +54 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_min.py +54 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/search_sorted.py +44 -0
- mindspore/ops/_op_impl/aicpu/segment_max.py +52 -0
- mindspore/ops/_op_impl/aicpu/segment_mean.py +56 -0
- mindspore/ops/_op_impl/aicpu/segment_min.py +52 -0
- mindspore/ops/_op_impl/aicpu/segment_prod.py +56 -0
- mindspore/ops/_op_impl/aicpu/segment_sum.py +56 -0
- mindspore/ops/_op_impl/aicpu/select.py +45 -0
- mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
- mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
- mindspore/ops/_op_impl/aicpu/set_size.py +38 -0
- mindspore/ops/_op_impl/aicpu/sign.py +36 -0
- mindspore/ops/_op_impl/aicpu/sin.py +34 -0
- mindspore/ops/_op_impl/aicpu/sinc.py +43 -0
- mindspore/ops/_op_impl/aicpu/sinh.py +34 -0
- mindspore/ops/_op_impl/aicpu/slice.py +59 -0
- mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sort.py +39 -0
- mindspore/ops/_op_impl/aicpu/space_to_depth.py +44 -0
- mindspore/ops/_op_impl/aicpu/sparse_addmm.py +87 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +80 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_centered_rms_prop.py +105 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_momentum.py +80 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_proximal_gradient_descent.py +79 -0
- mindspore/ops/_op_impl/aicpu/sparse_concat.py +59 -0
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_add.py +58 -0
- mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_div.py +58 -0
- mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_mul.py +58 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_nnz.py +81 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_transpose.py +116 -0
- mindspore/ops/_op_impl/aicpu/sparse_reorder.py +56 -0
- mindspore/ops/_op_impl/aicpu/sparse_reshape.py +34 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_mean_grad.py +36 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_mean_with_num_segments.py +44 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n.py +43 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_grad.py +38 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_with_num_segments.py +44 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum.py +49 -0
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
- mindspore/ops/_op_impl/aicpu/sparse_softmax.py +33 -0
- mindspore/ops/_op_impl/aicpu/sparse_softmax_cross_entropy_with_logits_v2.py +35 -0
- mindspore/ops/_op_impl/aicpu/sparse_sparse_maximum.py +53 -0
- mindspore/ops/_op_impl/aicpu/sparse_sparse_minimum.py +53 -0
- mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_add.py +84 -0
- mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_mat_mul.py +190 -0
- mindspore/ops/_op_impl/aicpu/sparse_tensor_to_csr_sparse_matrix.py +51 -0
- mindspore/ops/_op_impl/aicpu/sparse_to_dense_v2.py +73 -0
- mindspore/ops/_op_impl/aicpu/split.py +45 -0
- mindspore/ops/_op_impl/aicpu/sqrt.py +34 -0
- mindspore/ops/_op_impl/aicpu/sqrt_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/square.py +35 -0
- mindspore/ops/_op_impl/aicpu/squared_difference.py +37 -0
- mindspore/ops/_op_impl/aicpu/squeeze.py +42 -0
- mindspore/ops/_op_impl/aicpu/sspaddmm.py +97 -0
- mindspore/ops/_op_impl/aicpu/stack.py +45 -0
- mindspore/ops/_op_impl/aicpu/stack_push_pop.py +87 -0
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +34 -0
- mindspore/ops/_op_impl/aicpu/standard_normal.py +34 -0
- mindspore/ops/_op_impl/aicpu/stateless_dropout_genmask.py +37 -0
- mindspore/ops/_op_impl/aicpu/stft.py +70 -0
- mindspore/ops/_op_impl/aicpu/strided_slice.py +43 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_grad.py +50 -0
- mindspore/ops/_op_impl/aicpu/sub.py +41 -0
- mindspore/ops/_op_impl/aicpu/sub_and_filter.py +36 -0
- mindspore/ops/_op_impl/aicpu/tan.py +34 -0
- mindspore/ops/_op_impl/aicpu/tanh.py +34 -0
- mindspore/ops/_op_impl/aicpu/tanh_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/aicpu/tile.py +56 -0
- mindspore/ops/_op_impl/aicpu/topk.py +34 -0
- mindspore/ops/_op_impl/aicpu/trace.py +40 -0
- mindspore/ops/_op_impl/aicpu/tracegrad.py +41 -0
- mindspore/ops/_op_impl/aicpu/trans_data.py +35 -0
- mindspore/ops/_op_impl/aicpu/transpose.py +58 -0
- mindspore/ops/_op_impl/aicpu/tridiagonal_matmul.py +42 -0
- mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/tril.py +42 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/triplet_margin_loss.py +62 -0
- mindspore/ops/_op_impl/aicpu/triu.py +43 -0
- mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +39 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +36 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +41 -0
- mindspore/ops/_op_impl/aicpu/uniform_int.py +36 -0
- mindspore/ops/_op_impl/aicpu/uniform_real.py +33 -0
- mindspore/ops/_op_impl/aicpu/unique.py +31 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +47 -0
- mindspore/ops/_op_impl/aicpu/unique_with_pad.py +32 -0
- mindspore/ops/_op_impl/aicpu/unravel_index.py +32 -0
- mindspore/ops/_op_impl/aicpu/unsorted_segment_prod.py +53 -0
- mindspore/ops/_op_impl/aicpu/unsorted_segment_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/unstack.py +45 -0
- mindspore/ops/_op_impl/aicpu/update_cache.py +44 -0
- mindspore/ops/_op_impl/aicpu/upper_bound.py +47 -0
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +40 -0
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +50 -0
- mindspore/ops/_op_impl/aicpu/xdivy.py +35 -0
- mindspore/ops/_op_impl/aicpu/xlogy.py +33 -0
- mindspore/ops/_op_impl/aicpu/zeros_like.py +42 -0
- mindspore/ops/_op_impl/aicpu/zeta.py +31 -0
- mindspore/ops/_op_impl/akg/__init__.py +19 -0
- mindspore/ops/_op_impl/akg/ascend/__init__.py +48 -0
- mindspore/ops/_op_impl/akg/ascend/abs.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/add.py +42 -0
- mindspore/ops/_op_impl/akg/ascend/add_n.py +37 -0
- mindspore/ops/_op_impl/akg/ascend/batchmatmul.py +33 -0
- mindspore/ops/_op_impl/akg/ascend/cast.py +46 -0
- mindspore/ops/_op_impl/akg/ascend/equal.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/exp.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/expand_dims.py +33 -0
- mindspore/ops/_op_impl/akg/ascend/greater.py +34 -0
- mindspore/ops/_op_impl/akg/ascend/greater_equal.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/less.py +31 -0
- mindspore/ops/_op_impl/akg/ascend/less_equal.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/load_im2col.py +33 -0
- mindspore/ops/_op_impl/akg/ascend/log.py +34 -0
- mindspore/ops/_op_impl/akg/ascend/maximum.py +36 -0
- mindspore/ops/_op_impl/akg/ascend/minimum.py +39 -0
- mindspore/ops/_op_impl/akg/ascend/mul.py +41 -0
- mindspore/ops/_op_impl/akg/ascend/neg.py +37 -0
- mindspore/ops/_op_impl/akg/ascend/pow.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py +33 -0
- mindspore/ops/_op_impl/akg/ascend/real_div.py +36 -0
- mindspore/ops/_op_impl/akg/ascend/reciprocal.py +32 -0
- mindspore/ops/_op_impl/akg/ascend/reduce_max.py +32 -0
- mindspore/ops/_op_impl/akg/ascend/reduce_min.py +32 -0
- mindspore/ops/_op_impl/akg/ascend/reduce_sum.py +37 -0
- mindspore/ops/_op_impl/akg/ascend/rsqrt.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/select.py +37 -0
- mindspore/ops/_op_impl/akg/ascend/sqrt.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/square.py +35 -0
- mindspore/ops/_op_impl/akg/ascend/sub.py +42 -0
- mindspore/ops/_op_impl/akg/cpu/__init__.py +23 -0
- mindspore/ops/_op_impl/akg/cpu/coo2csr.py +29 -0
- mindspore/ops/_op_impl/akg/cpu/csr2coo.py +29 -0
- mindspore/ops/_op_impl/akg/cpu/csr_gather.py +33 -0
- mindspore/ops/_op_impl/akg/cpu/csr_mm.py +34 -0
- mindspore/ops/_op_impl/akg/cpu/csr_mul.py +33 -0
- mindspore/ops/_op_impl/akg/cpu/csr_mv.py +33 -0
- mindspore/ops/_op_impl/akg/cpu/csr_reduce_sum.py +31 -0
- mindspore/ops/_op_impl/akg/gpu/__init__.py +24 -0
- mindspore/ops/_op_impl/akg/gpu/coo2csr.py +29 -0
- mindspore/ops/_op_impl/akg/gpu/csr2coo.py +29 -0
- mindspore/ops/_op_impl/akg/gpu/csr_div.py +36 -0
- mindspore/ops/_op_impl/akg/gpu/csr_gather.py +33 -0
- mindspore/ops/_op_impl/akg/gpu/csr_mm.py +37 -0
- mindspore/ops/_op_impl/akg/gpu/csr_mul.py +36 -0
- mindspore/ops/_op_impl/akg/gpu/csr_mv.py +36 -0
- mindspore/ops/_op_impl/akg/gpu/csr_reduce_sum.py +33 -0
- mindspore/ops/_op_impl/cpu/__init__.py +78 -0
- mindspore/ops/_op_impl/cpu/adam.py +49 -0
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +47 -0
- mindspore/ops/_op_impl/cpu/arg_max.py +30 -0
- mindspore/ops/_op_impl/cpu/arg_max_with_value.py +31 -0
- mindspore/ops/_op_impl/cpu/arg_min_with_value.py +31 -0
- mindspore/ops/_op_impl/cpu/buffer_append.py +28 -0
- mindspore/ops/_op_impl/cpu/buffer_get.py +28 -0
- mindspore/ops/_op_impl/cpu/buffer_sample.py +28 -0
- mindspore/ops/_op_impl/cpu/cast.py +171 -0
- mindspore/ops/_op_impl/cpu/concat_offset.py +38 -0
- mindspore/ops/_op_impl/cpu/conv2d.py +30 -0
- mindspore/ops/_op_impl/cpu/conv3d.py +30 -0
- mindspore/ops/_op_impl/cpu/div.py +32 -0
- mindspore/ops/_op_impl/cpu/dropout.py +31 -0
- mindspore/ops/_op_impl/cpu/dropout_grad.py +30 -0
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +42 -0
- mindspore/ops/_op_impl/cpu/dynamic_stitch.py +41 -0
- mindspore/ops/_op_impl/cpu/equal_count.py +30 -0
- mindspore/ops/_op_impl/cpu/gather_d.py +49 -0
- mindspore/ops/_op_impl/cpu/gather_d_grad.py +38 -0
- mindspore/ops/_op_impl/cpu/gather_d_grad_v2.py +40 -0
- mindspore/ops/_op_impl/cpu/gather_v2.py +40 -0
- mindspore/ops/_op_impl/cpu/hsigmoid.py +33 -0
- mindspore/ops/_op_impl/cpu/hsigmoid_grad.py +34 -0
- mindspore/ops/_op_impl/cpu/hswish.py +32 -0
- mindspore/ops/_op_impl/cpu/hswish_grad.py +33 -0
- mindspore/ops/_op_impl/cpu/identity_n.py +40 -0
- mindspore/ops/_op_impl/cpu/is_finite.py +39 -0
- mindspore/ops/_op_impl/cpu/l2loss.py +30 -0
- mindspore/ops/_op_impl/cpu/layer_norm.py +36 -0
- mindspore/ops/_op_impl/cpu/layer_norm_grad.py +38 -0
- mindspore/ops/_op_impl/cpu/maximum.py +35 -0
- mindspore/ops/_op_impl/cpu/maximum_grad.py +47 -0
- mindspore/ops/_op_impl/cpu/minimum.py +40 -0
- mindspore/ops/_op_impl/cpu/minimum_grad.py +51 -0
- mindspore/ops/_op_impl/cpu/mirror_pad.py +36 -0
- mindspore/ops/_op_impl/cpu/mirror_pad_grad.py +36 -0
- mindspore/ops/_op_impl/cpu/mul.py +32 -0
- mindspore/ops/_op_impl/cpu/one_hot.py +31 -0
- mindspore/ops/_op_impl/cpu/pad.py +32 -0
- mindspore/ops/_op_impl/cpu/pow.py +32 -0
- mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +42 -0
- mindspore/ops/_op_impl/cpu/pyexecute.py +29 -0
- mindspore/ops/_op_impl/cpu/pyfunc.py +29 -0
- mindspore/ops/_op_impl/cpu/range.py +34 -0
- mindspore/ops/_op_impl/cpu/real_div.py +33 -0
- mindspore/ops/_op_impl/cpu/reduce_all.py +29 -0
- mindspore/ops/_op_impl/cpu/reduce_any.py +29 -0
- mindspore/ops/_op_impl/cpu/reduce_max.py +32 -0
- mindspore/ops/_op_impl/cpu/reduce_mean.py +40 -0
- mindspore/ops/_op_impl/cpu/reduce_min.py +32 -0
- mindspore/ops/_op_impl/cpu/reduce_prod.py +40 -0
- mindspore/ops/_op_impl/cpu/reduce_std.py +31 -0
- mindspore/ops/_op_impl/cpu/reduce_sum.py +41 -0
- mindspore/ops/_op_impl/cpu/space_to_batch_nd.py +38 -0
- mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
- mindspore/ops/_op_impl/cpu/split.py +34 -0
- mindspore/ops/_op_impl/cpu/sspaddmm.py +95 -0
- mindspore/ops/_op_impl/cpu/stack.py +38 -0
- mindspore/ops/_op_impl/cpu/sub.py +32 -0
- mindspore/ops/_op_impl/cpu/tensor_copy_slices.py +41 -0
- mindspore/ops/_op_impl/cpu/tile.py +37 -0
- mindspore/ops/_op_impl/cpu/top_k.py +31 -0
- mindspore/ops/_op_impl/cpu/transpose.py +39 -0
- mindspore/ops/_primitive_cache.py +90 -0
- mindspore/ops/_register_for_op.py +73 -0
- mindspore/ops/_utils/__init__.py +20 -0
- mindspore/ops/_utils/utils.py +147 -0
- mindspore/ops/_vmap/__init__.py +25 -0
- mindspore/ops/_vmap/vmap_array_ops.py +2149 -0
- mindspore/ops/_vmap/vmap_base.py +533 -0
- mindspore/ops/_vmap/vmap_convolution_ops.py +441 -0
- mindspore/ops/_vmap/vmap_debug_ops.py +50 -0
- mindspore/ops/_vmap/vmap_grad_math_ops.py +274 -0
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +806 -0
- mindspore/ops/_vmap/vmap_image_ops.py +194 -0
- mindspore/ops/_vmap/vmap_math_ops.py +993 -0
- mindspore/ops/_vmap/vmap_nn_ops.py +2250 -0
- mindspore/ops/_vmap/vmap_other_ops.py +105 -0
- mindspore/ops/_vmap/vmap_random_ops.py +122 -0
- mindspore/ops/_vmap/vmap_sparse_ops.py +89 -0
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
- mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
- mindspore/ops/composite/__init__.py +71 -0
- mindspore/ops/composite/base.py +1318 -0
- mindspore/ops/composite/env_ops.py +41 -0
- mindspore/ops/composite/math_ops.py +125 -0
- mindspore/ops/composite/multitype_ops/__init__.py +77 -0
- mindspore/ops/composite/multitype_ops/_compile_utils.py +1459 -0
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +897 -0
- mindspore/ops/composite/multitype_ops/add_impl.py +606 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +56 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +56 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +56 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +189 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +335 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +88 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +400 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +109 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +110 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +196 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +37 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +111 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +112 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +113 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +60 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +61 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +86 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +294 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +79 -0
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +290 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +196 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +96 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +87 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +37 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +884 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +116 -0
- mindspore/ops/composite/multitype_ops/uadd_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +228 -0
- mindspore/ops/deprecated.py +315 -0
- mindspore/ops/function/__init__.py +782 -0
- mindspore/ops/function/array_func.py +7226 -0
- mindspore/ops/function/clip_func.py +384 -0
- mindspore/ops/function/debug_func.py +181 -0
- mindspore/ops/function/fft_func.py +44 -0
- mindspore/ops/function/grad/__init__.py +34 -0
- mindspore/ops/function/grad/grad_func.py +1425 -0
- mindspore/ops/function/image_func.py +292 -0
- mindspore/ops/function/linalg_func.py +416 -0
- mindspore/ops/function/math_func.py +12228 -0
- mindspore/ops/function/nn_func.py +8609 -0
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +134 -0
- mindspore/ops/function/random_func.py +1715 -0
- mindspore/ops/function/reshard_func.py +104 -0
- mindspore/ops/function/sparse_func.py +884 -0
- mindspore/ops/function/sparse_unary_func.py +2422 -0
- mindspore/ops/function/spectral_func.py +150 -0
- mindspore/ops/function/vmap_func.py +117 -0
- mindspore/ops/functional.py +464 -0
- mindspore/ops/op_info_register.py +1572 -0
- mindspore/ops/operations/__init__.py +722 -0
- mindspore/ops/operations/_csr_ops.py +403 -0
- mindspore/ops/operations/_custom_grad.py +181 -0
- mindspore/ops/operations/_embedding_cache_ops.py +307 -0
- mindspore/ops/operations/_grad_ops.py +2978 -0
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +2544 -0
- mindspore/ops/operations/_map_tensor_ops.py +112 -0
- mindspore/ops/operations/_ms_kernel.py +601 -0
- mindspore/ops/operations/_ocr_ops.py +379 -0
- mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
- mindspore/ops/operations/_pyfunc_registry.py +58 -0
- mindspore/ops/operations/_quant_ops.py +1844 -0
- mindspore/ops/operations/_rl_inner_ops.py +1231 -0
- mindspore/ops/operations/_scalar_ops.py +106 -0
- mindspore/ops/operations/_sequence_ops.py +1155 -0
- mindspore/ops/operations/_sparse_grad_ops.py +56 -0
- mindspore/ops/operations/_tensor_array.py +359 -0
- mindspore/ops/operations/_thor_ops.py +807 -0
- mindspore/ops/operations/array_ops.py +6124 -0
- mindspore/ops/operations/comm_ops.py +1985 -0
- mindspore/ops/operations/control_ops.py +127 -0
- mindspore/ops/operations/custom_ops.py +1129 -0
- mindspore/ops/operations/debug_ops.py +678 -0
- mindspore/ops/operations/image_ops.py +1041 -0
- mindspore/ops/operations/inner_ops.py +697 -0
- mindspore/ops/operations/linalg_ops.py +95 -0
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +73 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
- mindspore/ops/operations/math_ops.py +5095 -0
- mindspore/ops/operations/nn_ops.py +9575 -0
- mindspore/ops/operations/other_ops.py +874 -0
- mindspore/ops/operations/random_ops.py +1288 -0
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/rl_ops.py +288 -0
- mindspore/ops/operations/sparse_ops.py +2753 -0
- mindspore/ops/operations/spectral_ops.py +111 -0
- mindspore/ops/primitive.py +1046 -0
- mindspore/ops/signature.py +54 -0
- mindspore/ops/vm_impl_registry.py +91 -0
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +252 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_constants.py +36 -0
- mindspore/ops_generate/gen_ops.py +1099 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +1052 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +145 -0
- mindspore/ops_generate/pyboost_utils.py +367 -0
- mindspore/ops_generate/template.py +261 -0
- mindspore/parallel/__init__.py +30 -0
- mindspore/parallel/_auto_parallel_context.py +1486 -0
- mindspore/parallel/_cell_wrapper.py +174 -0
- mindspore/parallel/_cost_model_context.py +700 -0
- mindspore/parallel/_dp_allreduce_fusion.py +159 -0
- mindspore/parallel/_offload_context.py +275 -0
- mindspore/parallel/_parallel_serialization.py +561 -0
- mindspore/parallel/_ps_context.py +242 -0
- mindspore/parallel/_recovery_context.py +110 -0
- mindspore/parallel/_tensor.py +730 -0
- mindspore/parallel/_transformer/__init__.py +35 -0
- mindspore/parallel/_transformer/layers.py +765 -0
- mindspore/parallel/_transformer/loss.py +251 -0
- mindspore/parallel/_transformer/moe.py +693 -0
- mindspore/parallel/_transformer/op_parallel_config.py +222 -0
- mindspore/parallel/_transformer/transformer.py +3119 -0
- mindspore/parallel/_utils.py +612 -0
- mindspore/parallel/algo_parameter_config.py +400 -0
- mindspore/parallel/checkpoint_transform.py +650 -0
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +352 -0
- mindspore/parallel/cluster/process_entity/_utils.py +101 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +14 -0
- mindspore/parallel/mpi/_mpi_config.py +116 -0
- mindspore/parallel/parameter_broadcast.py +151 -0
- mindspore/parallel/shard.py +481 -0
- mindspore/parallel/transform_safetensors.py +993 -0
- mindspore/profiler/__init__.py +28 -0
- mindspore/profiler/common/__init__.py +14 -0
- mindspore/profiler/common/constant.py +29 -0
- mindspore/profiler/common/exceptions/__init__.py +14 -0
- mindspore/profiler/common/exceptions/error_code.py +83 -0
- mindspore/profiler/common/exceptions/exceptions.py +286 -0
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/registry.py +47 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/struct_type.py +118 -0
- mindspore/profiler/common/util.py +472 -0
- mindspore/profiler/common/validator/__init__.py +14 -0
- mindspore/profiler/common/validator/validate_path.py +84 -0
- mindspore/profiler/dynamic_profiler.py +694 -0
- mindspore/profiler/envprofiling.py +254 -0
- mindspore/profiler/parser/__init__.py +14 -0
- mindspore/profiler/parser/aicpu_data_parser.py +272 -0
- mindspore/profiler/parser/ascend_analysis/__init__.py +14 -0
- mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
- mindspore/profiler/parser/ascend_flops_generator.py +116 -0
- mindspore/profiler/parser/ascend_fpbp_generator.py +82 -0
- mindspore/profiler/parser/ascend_hccl_generator.py +271 -0
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +282 -0
- mindspore/profiler/parser/ascend_msprof_generator.py +187 -0
- mindspore/profiler/parser/ascend_op_generator.py +334 -0
- mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
- mindspore/profiler/parser/ascend_timeline_generator.py +545 -0
- mindspore/profiler/parser/base_timeline_generator.py +483 -0
- mindspore/profiler/parser/container.py +229 -0
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +697 -0
- mindspore/profiler/parser/flops_parser.py +531 -0
- mindspore/profiler/parser/framework_enum.py +111 -0
- mindspore/profiler/parser/framework_parser.py +464 -0
- mindspore/profiler/parser/framework_struct.py +61 -0
- mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
- mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
- mindspore/profiler/parser/hccl_parser.py +573 -0
- mindspore/profiler/parser/hwts_log_parser.py +122 -0
- mindspore/profiler/parser/integrator.py +526 -0
- mindspore/profiler/parser/memory_usage_parser.py +277 -0
- mindspore/profiler/parser/minddata_analyzer.py +800 -0
- mindspore/profiler/parser/minddata_parser.py +186 -0
- mindspore/profiler/parser/minddata_pipeline_parser.py +299 -0
- mindspore/profiler/parser/op_intermediate_parser.py +149 -0
- mindspore/profiler/parser/optime_parser.py +250 -0
- mindspore/profiler/parser/profiler_info.py +213 -0
- mindspore/profiler/parser/step_trace_parser.py +666 -0
- mindspore/profiler/profiler.py +153 -0
- mindspore/profiler/profiling.py +1922 -0
- mindspore/rewrite/__init__.py +28 -0
- mindspore/rewrite/api/__init__.py +17 -0
- mindspore/rewrite/api/node.py +519 -0
- mindspore/rewrite/api/node_type.py +53 -0
- mindspore/rewrite/api/pattern_engine.py +490 -0
- mindspore/rewrite/api/scoped_value.py +181 -0
- mindspore/rewrite/api/symbol_tree.py +497 -0
- mindspore/rewrite/ast_helpers/__init__.py +25 -0
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +404 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +605 -0
- mindspore/rewrite/ast_helpers/ast_replacer.py +79 -0
- mindspore/rewrite/common/__init__.py +19 -0
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/error_log.py +39 -0
- mindspore/rewrite/common/event.py +28 -0
- mindspore/rewrite/common/namer.py +271 -0
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/common/observable.py +44 -0
- mindspore/rewrite/common/observer.py +54 -0
- mindspore/rewrite/node/__init__.py +22 -0
- mindspore/rewrite/node/call_function.py +95 -0
- mindspore/rewrite/node/cell_container.py +139 -0
- mindspore/rewrite/node/control_flow.py +113 -0
- mindspore/rewrite/node/node.py +1428 -0
- mindspore/rewrite/node/node_manager.py +283 -0
- mindspore/rewrite/node/node_topological_manager.py +223 -0
- mindspore/rewrite/parsers/__init__.py +29 -0
- mindspore/rewrite/parsers/arguments_parser.py +63 -0
- mindspore/rewrite/parsers/assign_parser.py +852 -0
- mindspore/rewrite/parsers/attribute_parser.py +57 -0
- mindspore/rewrite/parsers/class_def_parser.py +289 -0
- mindspore/rewrite/parsers/constant_parser.py +104 -0
- mindspore/rewrite/parsers/container_parser.py +88 -0
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +61 -0
- mindspore/rewrite/parsers/function_def_parser.py +84 -0
- mindspore/rewrite/parsers/if_parser.py +85 -0
- mindspore/rewrite/parsers/module_parser.py +117 -0
- mindspore/rewrite/parsers/parser.py +43 -0
- mindspore/rewrite/parsers/parser_register.py +86 -0
- mindspore/rewrite/parsers/return_parser.py +37 -0
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +457 -0
- mindspore/rewrite/sparsify/sparsify.py +112 -0
- mindspore/rewrite/sparsify/utils.py +179 -0
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/symbol_tree/symbol_tree.py +1819 -0
- mindspore/rewrite/symbol_tree/symbol_tree_builder.py +76 -0
- mindspore/rewrite/symbol_tree/symbol_tree_dumper.py +142 -0
- mindspore/run_check/__init__.py +20 -0
- mindspore/run_check/_check_version.py +507 -0
- mindspore/run_check/run_check.py +66 -0
- mindspore/safeguard/__init__.py +18 -0
- mindspore/safeguard/rewrite_obfuscation.py +875 -0
- mindspore/scipy/__init__.py +18 -0
- mindspore/scipy/fft.py +264 -0
- mindspore/scipy/linalg.py +919 -0
- mindspore/scipy/ops.py +165 -0
- mindspore/scipy/ops_grad.py +115 -0
- mindspore/scipy/ops_wrapper.py +74 -0
- mindspore/scipy/optimize/__init__.py +20 -0
- mindspore/scipy/optimize/_bfgs.py +230 -0
- mindspore/scipy/optimize/_lagrange.py +201 -0
- mindspore/scipy/optimize/_lbfgs.py +146 -0
- mindspore/scipy/optimize/gradient_optimization_algorithm.py +168 -0
- mindspore/scipy/optimize/line_search.py +370 -0
- mindspore/scipy/optimize/linear_sum_assignment.py +78 -0
- mindspore/scipy/optimize/minimize.py +200 -0
- mindspore/scipy/utils.py +156 -0
- mindspore/scipy/utils_const.py +246 -0
- mindspore/train/__init__.py +48 -0
- mindspore/train/_utils.py +465 -0
- mindspore/train/amp.py +935 -0
- mindspore/train/anf_ir_pb2.py +1517 -0
- mindspore/train/callback/__init__.py +44 -0
- mindspore/train/callback/_backup_and_restore.py +117 -0
- mindspore/train/callback/_callback.py +613 -0
- mindspore/train/callback/_checkpoint.py +814 -0
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_dataset_graph.py +150 -0
- mindspore/train/callback/_early_stop.py +239 -0
- mindspore/train/callback/_flops_collector.py +239 -0
- mindspore/train/callback/_history.py +92 -0
- mindspore/train/callback/_lambda_callback.py +80 -0
- mindspore/train/callback/_landscape.py +1049 -0
- mindspore/train/callback/_loss_monitor.py +107 -0
- mindspore/train/callback/_lr_scheduler_callback.py +76 -0
- mindspore/train/callback/_on_request_exit.py +298 -0
- mindspore/train/callback/_reduce_lr_on_plateau.py +226 -0
- mindspore/train/callback/_summary_collector.py +1184 -0
- mindspore/train/callback/_tft_register.py +352 -0
- mindspore/train/callback/_time_monitor.py +141 -0
- mindspore/train/checkpoint_pb2.py +233 -0
- mindspore/train/data_sink.py +219 -0
- mindspore/train/dataset_helper.py +692 -0
- mindspore/train/lineage_pb2.py +1260 -0
- mindspore/train/loss_scale_manager.py +213 -0
- mindspore/train/memory_profiling_pb2.py +298 -0
- mindspore/train/metrics/__init__.py +175 -0
- mindspore/train/metrics/accuracy.py +133 -0
- mindspore/train/metrics/auc.py +129 -0
- mindspore/train/metrics/bleu_score.py +170 -0
- mindspore/train/metrics/confusion_matrix.py +700 -0
- mindspore/train/metrics/cosine_similarity.py +109 -0
- mindspore/train/metrics/dice.py +116 -0
- mindspore/train/metrics/error.py +175 -0
- mindspore/train/metrics/fbeta.py +167 -0
- mindspore/train/metrics/hausdorff_distance.py +333 -0
- mindspore/train/metrics/loss.py +97 -0
- mindspore/train/metrics/mean_surface_distance.py +189 -0
- mindspore/train/metrics/metric.py +373 -0
- mindspore/train/metrics/occlusion_sensitivity.py +225 -0
- mindspore/train/metrics/perplexity.py +133 -0
- mindspore/train/metrics/precision.py +160 -0
- mindspore/train/metrics/recall.py +159 -0
- mindspore/train/metrics/roc.py +223 -0
- mindspore/train/metrics/root_mean_square_surface_distance.py +191 -0
- mindspore/train/metrics/topk.py +167 -0
- mindspore/train/mind_ir_pb2.py +1908 -0
- mindspore/train/model.py +2252 -0
- mindspore/train/node_strategy_pb2.py +653 -0
- mindspore/train/print_pb2.py +184 -0
- mindspore/train/profiling_parallel_pb2.py +151 -0
- mindspore/train/serialization.py +3325 -0
- mindspore/train/summary/__init__.py +23 -0
- mindspore/train/summary/_lineage_adapter.py +41 -0
- mindspore/train/summary/_summary_adapter.py +496 -0
- mindspore/train/summary/_writer_pool.py +207 -0
- mindspore/train/summary/enums.py +56 -0
- mindspore/train/summary/summary_record.py +581 -0
- mindspore/train/summary/writer.py +167 -0
- mindspore/train/summary_pb2.py +1165 -0
- mindspore/train/train_thor/__init__.py +20 -0
- mindspore/train/train_thor/convert_utils.py +268 -0
- mindspore/train/train_thor/dataset_helper.py +192 -0
- mindspore/train/train_thor/model_thor.py +257 -0
- mindspore/utils/__init__.py +21 -0
- mindspore/utils/utils.py +60 -0
- mindspore/version.py +1 -0
- mindspore-2.4.0.dist-info/METADATA +352 -0
- mindspore-2.4.0.dist-info/RECORD +1387 -0
- mindspore-2.4.0.dist-info/WHEEL +5 -0
- mindspore-2.4.0.dist-info/entry_points.txt +3 -0
- mindspore-2.4.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1822 @@
|
|
|
1
|
+
# Copyright 2020-2022 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""activation"""
|
|
16
|
+
from __future__ import absolute_import
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
|
|
20
|
+
from mindspore import _checkparam as validator
|
|
21
|
+
from mindspore._extends import cell_attr_register
|
|
22
|
+
from mindspore.common import dtype as mstype
|
|
23
|
+
from mindspore.common.parameter import Parameter
|
|
24
|
+
from mindspore.common.tensor import Tensor
|
|
25
|
+
from mindspore.ops import functional as F
|
|
26
|
+
from mindspore.ops import operations as P
|
|
27
|
+
from mindspore.ops.operations import nn_ops as NN_OPS
|
|
28
|
+
from mindspore.nn.cell import Cell
|
|
29
|
+
from mindspore import ops
|
|
30
|
+
from mindspore.ops.primitive import _primexpr
|
|
31
|
+
|
|
32
|
+
__all__ = ['Softmin',
|
|
33
|
+
'Softmax',
|
|
34
|
+
'Softmax2d',
|
|
35
|
+
'LogSoftmax',
|
|
36
|
+
'LogSoftmaxExt',
|
|
37
|
+
'ReLU',
|
|
38
|
+
'ReLU6',
|
|
39
|
+
'RReLU',
|
|
40
|
+
'SeLU',
|
|
41
|
+
'SiLU',
|
|
42
|
+
'Tanh',
|
|
43
|
+
'Tanhshrink',
|
|
44
|
+
'Hardtanh',
|
|
45
|
+
'GELU',
|
|
46
|
+
'FastGelu',
|
|
47
|
+
'Sigmoid',
|
|
48
|
+
'Softsign',
|
|
49
|
+
'PReLU',
|
|
50
|
+
'PReLUExt',
|
|
51
|
+
'get_activation',
|
|
52
|
+
'LeakyReLU',
|
|
53
|
+
'HSigmoid',
|
|
54
|
+
'HSwish',
|
|
55
|
+
'ELU',
|
|
56
|
+
'LogSigmoid',
|
|
57
|
+
'LRN',
|
|
58
|
+
'SoftShrink',
|
|
59
|
+
'HShrink',
|
|
60
|
+
'CELU',
|
|
61
|
+
'Threshold',
|
|
62
|
+
'Mish',
|
|
63
|
+
'GLU'
|
|
64
|
+
]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class CELU(Cell):
|
|
68
|
+
r"""
|
|
69
|
+
CELU Activation Operator.
|
|
70
|
+
|
|
71
|
+
Applies the continuously differentiable exponential linear units function element-wise.
|
|
72
|
+
|
|
73
|
+
.. math::
|
|
74
|
+
|
|
75
|
+
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
|
|
76
|
+
|
|
77
|
+
For more details, refer to `CELU <https://arxiv.org/abs/1704.07483>`_ .
|
|
78
|
+
|
|
79
|
+
CELU Activation Function Graph:
|
|
80
|
+
|
|
81
|
+
.. image:: ../images/CELU.png
|
|
82
|
+
:align: center
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
alpha (float): The :math:`\alpha` value for the Celu formulation. Default: ``1.0`` .
|
|
86
|
+
|
|
87
|
+
Inputs:
|
|
88
|
+
- **x** (Tensor) - The input of CELU. The required dtype is float16 or float32.
|
|
89
|
+
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
90
|
+
|
|
91
|
+
Outputs:
|
|
92
|
+
Tensor, with the same type and shape as the `x`.
|
|
93
|
+
|
|
94
|
+
Raises:
|
|
95
|
+
TypeError: If `alpha` is not a float.
|
|
96
|
+
ValueError: If `alpha` has the value of 0.
|
|
97
|
+
TypeError: If `x` is not a Tensor.
|
|
98
|
+
TypeError: If the dtype of `x` is neither float16 nor float32.
|
|
99
|
+
|
|
100
|
+
Supported Platforms:
|
|
101
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
102
|
+
|
|
103
|
+
Examples:
|
|
104
|
+
>>> import mindspore
|
|
105
|
+
>>> from mindspore import Tensor, nn
|
|
106
|
+
>>> import numpy as np
|
|
107
|
+
>>> x = Tensor(np.array([-2.0, -1.0, 1.0, 2.0]), mindspore.float32)
|
|
108
|
+
>>> celu = nn.CELU()
|
|
109
|
+
>>> output = celu(x)
|
|
110
|
+
>>> print(output)
|
|
111
|
+
[-0.86466473 -0.63212055 1. 2. ]
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def __init__(self, alpha=1.0):
|
|
115
|
+
"""Initialize CELU."""
|
|
116
|
+
super(CELU, self).__init__()
|
|
117
|
+
self.celu = P.CeLU(alpha=alpha)
|
|
118
|
+
|
|
119
|
+
def construct(self, x):
|
|
120
|
+
return self.celu(x)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class Softmin(Cell):
|
|
124
|
+
r"""
|
|
125
|
+
Softmin activation function, which is a two-category function :class:`mindspore.nn.Sigmoid` in the promotion of
|
|
126
|
+
multi-classification, and the purpose is to show the results of multi-classification in the form of probability.
|
|
127
|
+
|
|
128
|
+
Calculate the value of the exponential function for the elements of the input Tensor on the `axis`, and then
|
|
129
|
+
normalized to lie in range [0, 1] and sum up to 1.
|
|
130
|
+
|
|
131
|
+
Softmin is defined as:
|
|
132
|
+
|
|
133
|
+
.. math::
|
|
134
|
+
\text{softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_{j=0}^{n-1}\exp(-x_j)},
|
|
135
|
+
|
|
136
|
+
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
axis (Union[int, tuple[int]]): The axis to apply Softmin operation, if the dimension of input `x` is x.ndim,
|
|
140
|
+
the range of axis is `[-x.ndim, x.ndim)`. -1 means the last dimension. Default: ``-1`` .
|
|
141
|
+
|
|
142
|
+
Inputs:
|
|
143
|
+
- **x** (Tensor) - Tensor for computing Softmin functions with data type of float16 or float32.
|
|
144
|
+
|
|
145
|
+
Outputs:
|
|
146
|
+
Tensor, which has the same type and shape as `x` with values in the range [0,1].
|
|
147
|
+
|
|
148
|
+
Raises:
|
|
149
|
+
TypeError: If `axis` is neither an int nor a tuple.
|
|
150
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
151
|
+
ValueError: If `axis` is a tuple whose length is less than 1.
|
|
152
|
+
ValueError: If `axis` is a tuple whose elements are not all in the range [-x.ndim, x.ndim).
|
|
153
|
+
|
|
154
|
+
Supported Platforms:
|
|
155
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
156
|
+
|
|
157
|
+
Examples:
|
|
158
|
+
>>> import mindspore
|
|
159
|
+
>>> from mindspore import Tensor, nn
|
|
160
|
+
>>> import numpy as np
|
|
161
|
+
>>> # axis = -1(default), and the sum of return value is 1.0.
|
|
162
|
+
>>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
163
|
+
>>> softmin = nn.Softmin()
|
|
164
|
+
>>> output = softmin(x)
|
|
165
|
+
>>> print(output)
|
|
166
|
+
[0.2341 0.636 0.0862 0.01165 0.03168 ]
|
|
167
|
+
"""
|
|
168
|
+
|
|
169
|
+
def __init__(self, axis=-1):
|
|
170
|
+
"""Initialize Softmin."""
|
|
171
|
+
super(Softmin, self).__init__()
|
|
172
|
+
self.axis = axis
|
|
173
|
+
|
|
174
|
+
def construct(self, x):
|
|
175
|
+
return ops.function.softmin(x, self.axis)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
class Softmax2d(Cell):
|
|
179
|
+
r"""
|
|
180
|
+
Softmax function applied to 2D features data.
|
|
181
|
+
|
|
182
|
+
Applies `Softmax` to each location :math:`(c, h, w)` with an input Tensor of shape :math:`(C, H, W)` .
|
|
183
|
+
|
|
184
|
+
Inputs:
|
|
185
|
+
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
|
|
186
|
+
The input of Softmax with data type of float16 or float32.
|
|
187
|
+
|
|
188
|
+
Outputs:
|
|
189
|
+
Tensor, which has the same type and shape as `x` with values in the range[0,1].
|
|
190
|
+
|
|
191
|
+
Raises:
|
|
192
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
193
|
+
ValueError: If `data_format` is neither 'NCHW' nor 'CHW'.
|
|
194
|
+
|
|
195
|
+
Supported Platforms:
|
|
196
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
197
|
+
|
|
198
|
+
Examples:
|
|
199
|
+
>>> import mindspore
|
|
200
|
+
>>> from mindspore import Tensor, nn
|
|
201
|
+
>>> import numpy as np
|
|
202
|
+
>>> x = Tensor(np.array([[[[0.1, 0.2]], [[0.3, 0.4]], [[0.6, 0.5]]]]), mindspore.float32)
|
|
203
|
+
>>> softmax2d = nn.Softmax2d()
|
|
204
|
+
>>> output = softmax2d(x)
|
|
205
|
+
>>> print(output)
|
|
206
|
+
[[[[0.25838965 0.28001308]]
|
|
207
|
+
[[0.31559783 0.34200877]]
|
|
208
|
+
[[0.42601252 0.37797815]]]]
|
|
209
|
+
"""
|
|
210
|
+
|
|
211
|
+
def __init__(self):
|
|
212
|
+
"""Initialize Softmax2d."""
|
|
213
|
+
super(Softmax2d, self).__init__()
|
|
214
|
+
self.softmax = P.Softmax(axis=-3)
|
|
215
|
+
self.shape = P.Shape()
|
|
216
|
+
|
|
217
|
+
@staticmethod
|
|
218
|
+
@_primexpr
|
|
219
|
+
def _check_input_dim(shape, cls_name):
|
|
220
|
+
dim = len(shape)
|
|
221
|
+
if dim not in (3, 4):
|
|
222
|
+
raise ValueError(f"For '{cls_name}', the in_shape must have 3 or 4 dims, but got {dim}.")
|
|
223
|
+
|
|
224
|
+
def construct(self, x):
|
|
225
|
+
x_shape = self.shape(x)
|
|
226
|
+
self._check_input_dim(x_shape, self.cls_name)
|
|
227
|
+
return self.softmax(x)
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
class Softmax(Cell):
|
|
231
|
+
r"""
|
|
232
|
+
Softmax activation function, which is a two-category function :class:`mindspore.nn.Sigmoid` in the promotion of
|
|
233
|
+
multi-classification, the purpose is to show the results of multi-classification in the form of probability.
|
|
234
|
+
|
|
235
|
+
Calculate the value of the exponential function for the elements of the input Tensor on the `axis`, and then
|
|
236
|
+
normalized to lie in range [0, 1] and sum up to 1.
|
|
237
|
+
|
|
238
|
+
Softmax is defined as:
|
|
239
|
+
|
|
240
|
+
.. math::
|
|
241
|
+
\text{softmax}(input_{i}) = \frac{\exp(input_i)}{\sum_{j=0}^{n-1}\exp(input_j)},
|
|
242
|
+
|
|
243
|
+
where :math:`input_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
axis (int, optional): The axis to apply Softmax operation, if the dimension of `input` is input.ndim,
|
|
247
|
+
the range of axis is `[-input.ndim, input.ndim)`, -1 means the last dimension. Default: ``-1`` .
|
|
248
|
+
|
|
249
|
+
Inputs:
|
|
250
|
+
- **input** (Tensor) - The input of Softmax.
|
|
251
|
+
|
|
252
|
+
Outputs:
|
|
253
|
+
Tensor, which has the same type and shape as `input` with values in the range[0, 1].
|
|
254
|
+
|
|
255
|
+
Raises:
|
|
256
|
+
TypeError: If `axis` is neither an int nor a tuple.
|
|
257
|
+
ValueError: If `axis` is a tuple whose length is less than 1.
|
|
258
|
+
ValueError: If `axis` is a tuple whose elements are not all in range `[-input.ndim, input.ndim)`.
|
|
259
|
+
|
|
260
|
+
Supported Platforms:
|
|
261
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
262
|
+
|
|
263
|
+
Examples:
|
|
264
|
+
>>> import mindspore
|
|
265
|
+
>>> from mindspore import Tensor, nn
|
|
266
|
+
>>> import numpy as np
|
|
267
|
+
>>> # axis = -1(default), and the sum of return value is 1.0.
|
|
268
|
+
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
269
|
+
>>> softmax = nn.Softmax()
|
|
270
|
+
>>> output = softmax(input)
|
|
271
|
+
>>> print(output)
|
|
272
|
+
[0.03168 0.01166 0.0861 0.636 0.2341 ]
|
|
273
|
+
"""
|
|
274
|
+
|
|
275
|
+
def __init__(self, axis=-1):
|
|
276
|
+
"""Initialize Softmax."""
|
|
277
|
+
super(Softmax, self).__init__()
|
|
278
|
+
self.softmax = P.Softmax(axis)
|
|
279
|
+
|
|
280
|
+
def construct(self, input):
|
|
281
|
+
return self.softmax(input)
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
class SoftmaxExt(Cell):
|
|
285
|
+
r"""
|
|
286
|
+
Applies the Softmax function to an n-dimensional input Tensor.
|
|
287
|
+
|
|
288
|
+
For details, please refer to :func:`mindspore.mint.nn.functional.softmax`.
|
|
289
|
+
|
|
290
|
+
Supported Platforms:
|
|
291
|
+
``Ascend``
|
|
292
|
+
|
|
293
|
+
Examples:
|
|
294
|
+
>>> import mindspore
|
|
295
|
+
>>> from mindspore import Tensor, nn
|
|
296
|
+
>>> import numpy as np
|
|
297
|
+
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
298
|
+
>>> softmax = nn.SoftmaxExt()
|
|
299
|
+
>>> output = softmax(input)
|
|
300
|
+
>>> print(output)
|
|
301
|
+
[0.03168 0.01166 0.0861 0.636 0.2341 ]
|
|
302
|
+
"""
|
|
303
|
+
|
|
304
|
+
def __init__(self, dim=None):
|
|
305
|
+
"""Initialize Softmax."""
|
|
306
|
+
super(SoftmaxExt, self).__init__()
|
|
307
|
+
self.dim = dim
|
|
308
|
+
|
|
309
|
+
def construct(self, input):
|
|
310
|
+
return ops.function.nn_func.softmax_ext(input, self.dim)
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
class LogSoftmax(Cell):
|
|
314
|
+
r"""
|
|
315
|
+
Applies the LogSoftmax function to n-dimensional input tensor element-wise.
|
|
316
|
+
|
|
317
|
+
The input is transformed by the Softmax function and then by the log function to lie in range[-inf,0).
|
|
318
|
+
|
|
319
|
+
Logsoftmax is defined as:
|
|
320
|
+
|
|
321
|
+
.. math::
|
|
322
|
+
|
|
323
|
+
\text{logsoftmax}(x_i) = \log \left(\frac{\exp(x_i)}{\sum_{j=0}^{n-1} \exp(x_j)}\right)
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
axis (int): The axis to apply LogSoftmax operation, -1 means the last dimension. Default: ``-1`` .
|
|
327
|
+
|
|
328
|
+
Inputs:
|
|
329
|
+
- **x** (Tensor) - The input of LogSoftmax, with float16 or float32 data type.
|
|
330
|
+
|
|
331
|
+
Outputs:
|
|
332
|
+
Tensor, which has the same type and shape as `x` with output values in the range[-inf,0).
|
|
333
|
+
|
|
334
|
+
Raises:
|
|
335
|
+
TypeError: If `axis` is not an int.
|
|
336
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
337
|
+
ValueError: If `axis` is not in range [-len(x), len(x)).
|
|
338
|
+
|
|
339
|
+
Supported Platforms:
|
|
340
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
341
|
+
|
|
342
|
+
Examples:
|
|
343
|
+
>>> import mindspore
|
|
344
|
+
>>> from mindspore import Tensor, nn
|
|
345
|
+
>>> import numpy as np
|
|
346
|
+
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
347
|
+
>>> log_softmax = nn.LogSoftmax()
|
|
348
|
+
>>> output = log_softmax(x)
|
|
349
|
+
>>> print(output)
|
|
350
|
+
[[-5.00672150e+00 -6.72150636e-03 -1.20067215e+01]
|
|
351
|
+
[-7.00091219e+00 -1.40009127e+01 -9.12250078e-04]]
|
|
352
|
+
"""
|
|
353
|
+
|
|
354
|
+
def __init__(self, axis=-1):
|
|
355
|
+
"""Initialize LogSoftmax."""
|
|
356
|
+
super(LogSoftmax, self).__init__()
|
|
357
|
+
self.log_softmax = P.LogSoftmax(axis)
|
|
358
|
+
|
|
359
|
+
def construct(self, x):
|
|
360
|
+
return self.log_softmax(x)
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
class LogSoftmaxExt(Cell):
|
|
364
|
+
r"""
|
|
365
|
+
Applies the Log Softmax function to the input tensor on the specified axis.
|
|
366
|
+
Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
|
|
367
|
+
the Log Softmax function is shown as follows:
|
|
368
|
+
|
|
369
|
+
.. math::
|
|
370
|
+
\text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
|
|
371
|
+
|
|
372
|
+
where :math:`N` is the length of the Tensor.
|
|
373
|
+
|
|
374
|
+
Args:
|
|
375
|
+
dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
|
|
376
|
+
|
|
377
|
+
Returns:
|
|
378
|
+
Tensor, with the same shape as the input.
|
|
379
|
+
|
|
380
|
+
Raises:
|
|
381
|
+
ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
|
|
382
|
+
|
|
383
|
+
Supported Platforms:
|
|
384
|
+
``Ascend``
|
|
385
|
+
|
|
386
|
+
Examples:
|
|
387
|
+
>>> import mindspore
|
|
388
|
+
>>> from mindspore import Tensor, nn
|
|
389
|
+
>>> import numpy as np
|
|
390
|
+
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
391
|
+
>>> log_softmax = nn.LogSoftmaxExt(dim=-1)
|
|
392
|
+
>>> output = log_softmax(x)
|
|
393
|
+
>>> print(output)
|
|
394
|
+
[[-5.00672150e+00 -6.72150636e-03 -1.20067215e+01]
|
|
395
|
+
[-7.00091219e+00 -1.40009127e+01 -9.12250078e-04]]
|
|
396
|
+
"""
|
|
397
|
+
|
|
398
|
+
def __init__(self, dim=None):
|
|
399
|
+
"""Initialize LogSoftmaxExt."""
|
|
400
|
+
super(LogSoftmaxExt, self).__init__()
|
|
401
|
+
self.log_softmax = P.LogSoftmaxExt()
|
|
402
|
+
self.dim = dim
|
|
403
|
+
|
|
404
|
+
def construct(self, x):
|
|
405
|
+
return self.log_softmax(x, dim=self.dim)
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
class ELU(Cell):
|
|
409
|
+
r"""
|
|
410
|
+
Applies the exponential linear unit function element-wise.
|
|
411
|
+
|
|
412
|
+
The activation function is defined as:
|
|
413
|
+
|
|
414
|
+
.. math::
|
|
415
|
+
E_{i} =
|
|
416
|
+
\begin{cases}
|
|
417
|
+
x_i, &\text{if } x_i \geq 0; \cr
|
|
418
|
+
\alpha * (\exp(x_i) - 1), &\text{otherwise.}
|
|
419
|
+
\end{cases}
|
|
420
|
+
|
|
421
|
+
where :math:`x_i` represents the element of the input and :math:`\alpha` represents the `alpha` parameter.
|
|
422
|
+
|
|
423
|
+
ELU Activation Function Graph:
|
|
424
|
+
|
|
425
|
+
.. image:: ../images/ELU.png
|
|
426
|
+
:align: center
|
|
427
|
+
|
|
428
|
+
Args:
|
|
429
|
+
alpha (float): The alpha value of ELU, the data type is float. Default: ``1.0`` .
|
|
430
|
+
Only alpha equal to ``1.0`` is supported currently.
|
|
431
|
+
|
|
432
|
+
Inputs:
|
|
433
|
+
- **input_x** (Tensor) - The input of ELU is a Tensor of any dimension with data type of float16 or float32.
|
|
434
|
+
|
|
435
|
+
Outputs:
|
|
436
|
+
Tensor, with the same type and shape as the `input_x`.
|
|
437
|
+
|
|
438
|
+
Raises:
|
|
439
|
+
TypeError: If `alpha` is not a float.
|
|
440
|
+
TypeError: If dtype of `input_x` is neither float16 nor float32.
|
|
441
|
+
ValueError: If `alpha` is not equal to 1.0.
|
|
442
|
+
|
|
443
|
+
Supported Platforms:
|
|
444
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
445
|
+
|
|
446
|
+
Examples:
|
|
447
|
+
>>> import mindspore
|
|
448
|
+
>>> from mindspore import Tensor, nn
|
|
449
|
+
>>> import numpy as np
|
|
450
|
+
>>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float32)
|
|
451
|
+
>>> elu = nn.ELU()
|
|
452
|
+
>>> result = elu(x)
|
|
453
|
+
>>> print(result)
|
|
454
|
+
[-0.63212055 -0.86466473 0. 2. 1.]
|
|
455
|
+
"""
|
|
456
|
+
|
|
457
|
+
def __init__(self, alpha=1.0):
|
|
458
|
+
"""Initialize ELU."""
|
|
459
|
+
super(ELU, self).__init__()
|
|
460
|
+
self.elu = P.Elu(alpha)
|
|
461
|
+
|
|
462
|
+
def construct(self, x):
|
|
463
|
+
return self.elu(x)
|
|
464
|
+
|
|
465
|
+
|
|
466
|
+
class ReLU(Cell):
|
|
467
|
+
r"""
|
|
468
|
+
Applies ReLU (Rectified Linear Unit activation function) element-wise.
|
|
469
|
+
|
|
470
|
+
.. math::
|
|
471
|
+
|
|
472
|
+
\text{ReLU}(input) = (input)^+ = \max(0, input),
|
|
473
|
+
|
|
474
|
+
It returns element-wise :math:`\max(0, input)`.
|
|
475
|
+
|
|
476
|
+
.. note::
|
|
477
|
+
The neurons with the negative output
|
|
478
|
+
will be suppressed and the active neurons will stay the same.
|
|
479
|
+
|
|
480
|
+
ReLU Activation Function Graph:
|
|
481
|
+
|
|
482
|
+
.. image:: ../images/ReLU.png
|
|
483
|
+
:align: center
|
|
484
|
+
|
|
485
|
+
Inputs:
|
|
486
|
+
- **input** (Tensor) - The input of ReLU is a Tensor of any dimension.
|
|
487
|
+
|
|
488
|
+
Outputs:
|
|
489
|
+
Tensor, with the same type and shape as the `input`.
|
|
490
|
+
|
|
491
|
+
Raises:
|
|
492
|
+
TypeError: If dtype of `input` is not supported.
|
|
493
|
+
|
|
494
|
+
Supported Platforms:
|
|
495
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
496
|
+
|
|
497
|
+
Examples:
|
|
498
|
+
>>> import numpy as np
|
|
499
|
+
>>> import mindspore
|
|
500
|
+
>>> from mindspore import Tensor, nn
|
|
501
|
+
>>> input = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
|
|
502
|
+
>>> relu = nn.ReLU()
|
|
503
|
+
>>> output = relu(input)
|
|
504
|
+
>>> print(output)
|
|
505
|
+
[0. 2. 0. 2. 0.]
|
|
506
|
+
"""
|
|
507
|
+
|
|
508
|
+
def __init__(self):
|
|
509
|
+
"""Initialize ReLU."""
|
|
510
|
+
super(ReLU, self).__init__()
|
|
511
|
+
self.relu = P.ReLU()
|
|
512
|
+
|
|
513
|
+
def construct(self, input):
|
|
514
|
+
return self.relu(input)
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
class ReLU6(Cell):
|
|
518
|
+
r"""
|
|
519
|
+
Compute ReLU6 activation function element-wise.
|
|
520
|
+
|
|
521
|
+
ReLU6 is similar to ReLU with a upper limit of 6, which if the inputs are greater than 6, the outputs
|
|
522
|
+
will be suppressed to 6.
|
|
523
|
+
It computes element-wise as
|
|
524
|
+
|
|
525
|
+
.. math::
|
|
526
|
+
|
|
527
|
+
Y = \min(\max(0, x), 6)
|
|
528
|
+
|
|
529
|
+
ReLU6 Activation Function Graph:
|
|
530
|
+
|
|
531
|
+
.. image:: ../images/ReLU6.png
|
|
532
|
+
:align: center
|
|
533
|
+
|
|
534
|
+
Inputs:
|
|
535
|
+
- **x** (Tensor) - The input of ReLU6 with data type of float16 or float32 and that
|
|
536
|
+
is a Tensor of any valid shape.
|
|
537
|
+
|
|
538
|
+
Outputs:
|
|
539
|
+
Tensor, which has the same type as `x`.
|
|
540
|
+
|
|
541
|
+
Raises:
|
|
542
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
543
|
+
|
|
544
|
+
Supported Platforms:
|
|
545
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
546
|
+
|
|
547
|
+
Examples:
|
|
548
|
+
>>> import mindspore
|
|
549
|
+
>>> from mindspore import Tensor, nn
|
|
550
|
+
>>> import numpy as np
|
|
551
|
+
>>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
552
|
+
>>> relu6 = nn.ReLU6()
|
|
553
|
+
>>> output = relu6(x)
|
|
554
|
+
>>> print(output)
|
|
555
|
+
[0. 0. 0. 2. 1.]
|
|
556
|
+
"""
|
|
557
|
+
|
|
558
|
+
def __init__(self):
|
|
559
|
+
"""Initialize ReLU6."""
|
|
560
|
+
super(ReLU6, self).__init__()
|
|
561
|
+
self.relu6 = P.ReLU6()
|
|
562
|
+
|
|
563
|
+
def construct(self, x):
|
|
564
|
+
return self.relu6(x)
|
|
565
|
+
|
|
566
|
+
|
|
567
|
+
class LeakyReLU(Cell):
|
|
568
|
+
r"""
|
|
569
|
+
Leaky ReLU activation function.
|
|
570
|
+
|
|
571
|
+
The activation function is defined as:
|
|
572
|
+
|
|
573
|
+
.. math::
|
|
574
|
+
\text{leaky_relu}(x) = \begin{cases}x, &\text{if } x \geq 0; \cr
|
|
575
|
+
{\alpha} * x, &\text{otherwise.}\end{cases}
|
|
576
|
+
|
|
577
|
+
where :math:`\alpha` represents the `alpha` parameter.
|
|
578
|
+
|
|
579
|
+
For more details, see `Rectifier Nonlinearities Improve Neural Network Acoustic Models
|
|
580
|
+
<https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`_.
|
|
581
|
+
|
|
582
|
+
LeakyReLU Activation Function Graph:
|
|
583
|
+
|
|
584
|
+
.. image:: ../images/LeakyReLU.png
|
|
585
|
+
:align: center
|
|
586
|
+
|
|
587
|
+
Args:
|
|
588
|
+
alpha (Union[int, float]): Slope of the activation function at x < 0. Default: ``0.2`` .
|
|
589
|
+
|
|
590
|
+
Inputs:
|
|
591
|
+
- **x** (Tensor) - The input of LeakyReLU is a Tensor of any dimension.
|
|
592
|
+
|
|
593
|
+
Outputs:
|
|
594
|
+
Tensor, has the same type and shape as the `x`.
|
|
595
|
+
|
|
596
|
+
Raises:
|
|
597
|
+
TypeError: If `alpha` is not a float or an int.
|
|
598
|
+
|
|
599
|
+
Supported Platforms:
|
|
600
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
601
|
+
|
|
602
|
+
Examples:
|
|
603
|
+
>>> import mindspore
|
|
604
|
+
>>> from mindspore import Tensor, nn
|
|
605
|
+
>>> import numpy as np
|
|
606
|
+
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
607
|
+
>>> leaky_relu = nn.LeakyReLU()
|
|
608
|
+
>>> output = leaky_relu(x)
|
|
609
|
+
>>> print(output)
|
|
610
|
+
[[-0.2 4. -1.6]
|
|
611
|
+
[ 2. -1. 9. ]]
|
|
612
|
+
"""
|
|
613
|
+
|
|
614
|
+
def __init__(self, alpha=0.2):
|
|
615
|
+
"""Initialize LeakyReLU."""
|
|
616
|
+
super(LeakyReLU, self).__init__()
|
|
617
|
+
self.alpha = alpha
|
|
618
|
+
|
|
619
|
+
def construct(self, x):
|
|
620
|
+
out = ops.leaky_relu(x, self.alpha)
|
|
621
|
+
return out
|
|
622
|
+
|
|
623
|
+
|
|
624
|
+
class RReLU(Cell):
|
|
625
|
+
r"""
|
|
626
|
+
Applies RReLU (Randomized Leaky ReLU activation function) element-wise.
|
|
627
|
+
|
|
628
|
+
The activation function is defined as:
|
|
629
|
+
|
|
630
|
+
.. math::
|
|
631
|
+
\text{RReLU}(x_{ji}) = \begin{cases}x_{ji}, &\text{if } x_{ji} \geq 0; \cr
|
|
632
|
+
{\alpha_{ji}} * x_{ji}, &\text{otherwise.}\end{cases}
|
|
633
|
+
|
|
634
|
+
where :math:`\alpha_{ji}` ~ :math:`U(l, u)`, :math:`l \le u`.
|
|
635
|
+
|
|
636
|
+
Applies the RReLU function elementally, as described in the paper:
|
|
637
|
+
`Empirical Evaluation of Rectified Activations in Convolution Network <https://arxiv.org/pdf/1505.00853.pdf>`_ .
|
|
638
|
+
|
|
639
|
+
Args:
|
|
640
|
+
lower (Union[int, float]): Slope of the activation function at x < 0. Default: ``1 / 8`` .
|
|
641
|
+
upper (Union[int, float]): Slope of the activation function at x < 0. Default: ``1 / 3`` .
|
|
642
|
+
|
|
643
|
+
Inputs:
|
|
644
|
+
- **x** (Tensor) - The input of RReLU is a Tensor of any dimension.
|
|
645
|
+
|
|
646
|
+
Outputs:
|
|
647
|
+
Tensor, after RReLU, has the same type and shape as the `x`.
|
|
648
|
+
|
|
649
|
+
Raises:
|
|
650
|
+
TypeError: If `lower` is not a float or an int.
|
|
651
|
+
TypeError: If `upper` is not a float or an int.
|
|
652
|
+
TypeError: If `x` is not a Tensor.
|
|
653
|
+
TypeError: If `x` is not a Tensor of mindspore.float16 or mindspore.float32.
|
|
654
|
+
ValueError: If `lower` is greater than upper.
|
|
655
|
+
|
|
656
|
+
Supported Platforms:
|
|
657
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
658
|
+
|
|
659
|
+
Examples:
|
|
660
|
+
>>> import mindspore
|
|
661
|
+
>>> from mindspore import Tensor, nn
|
|
662
|
+
>>> import numpy as np
|
|
663
|
+
>>> x = Tensor(np.array([[-1.0, 4.0], [2.0, 0]]), mindspore.float32)
|
|
664
|
+
>>> r_relu = nn.RReLU()
|
|
665
|
+
>>> output = r_relu(x)
|
|
666
|
+
>>> print(output)
|
|
667
|
+
[[-0.31465699 4. ]
|
|
668
|
+
[ 2. 0. ]]
|
|
669
|
+
"""
|
|
670
|
+
|
|
671
|
+
def __init__(self, lower=1 / 8, upper=1 / 3):
|
|
672
|
+
super(RReLU, self).__init__()
|
|
673
|
+
validator.check_value_type('upper', upper, [float, int], self.cls_name)
|
|
674
|
+
validator.check_value_type('lower', lower, [float, int], self.cls_name)
|
|
675
|
+
if lower > upper:
|
|
676
|
+
raise ValueError(f"For {self.cls_name}, the value of 'upper' must be greater than or equal to 'lower', "
|
|
677
|
+
f"but got upper: {upper}, lower: {lower}. ")
|
|
678
|
+
self.lower = Tensor(lower, dtype=mstype.float32)
|
|
679
|
+
self.upper = Tensor(upper, dtype=mstype.float32)
|
|
680
|
+
self.sign = P.Sign()
|
|
681
|
+
|
|
682
|
+
def construct(self, x):
|
|
683
|
+
if not isinstance(x, Tensor):
|
|
684
|
+
raise TypeError(f"For 'rrelu', the input must be a Tensor, but got {type(x)}.")
|
|
685
|
+
_size = x.shape
|
|
686
|
+
_dtype = x.dtype
|
|
687
|
+
sign_matrix = self.sign(x)
|
|
688
|
+
negative_filter = sign_matrix.clip(None, 0)
|
|
689
|
+
positive_filter = sign_matrix.clip(0, None)
|
|
690
|
+
mask = ops.uniform(_size, self.lower, self.upper).astype(_dtype)
|
|
691
|
+
negative_mask = negative_filter * mask * -1
|
|
692
|
+
total_mask = negative_mask + positive_filter
|
|
693
|
+
out = total_mask * x
|
|
694
|
+
return out
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
class SeLU(Cell):
|
|
698
|
+
r"""
|
|
699
|
+
Applies activation function SeLU (Scaled exponential Linear Unit) element-wise.
|
|
700
|
+
|
|
701
|
+
SeLU Activation Function Graph:
|
|
702
|
+
|
|
703
|
+
.. image:: ../images/SeLU.png
|
|
704
|
+
:align: center
|
|
705
|
+
|
|
706
|
+
Refer to :func:`mindspore.ops.selu` for more details.
|
|
707
|
+
|
|
708
|
+
Supported Platforms:
|
|
709
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
710
|
+
|
|
711
|
+
Examples:
|
|
712
|
+
>>> import mindspore
|
|
713
|
+
>>> from mindspore import Tensor, nn
|
|
714
|
+
>>> import numpy as np
|
|
715
|
+
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
716
|
+
>>> selu = nn.SeLU()
|
|
717
|
+
>>> output = selu(input_x)
|
|
718
|
+
>>> print(output)
|
|
719
|
+
[[-1.1113307 4.202804 -1.7575096]
|
|
720
|
+
[ 2.101402 -1.7462534 9.456309 ]]
|
|
721
|
+
"""
|
|
722
|
+
|
|
723
|
+
def __init__(self):
|
|
724
|
+
"""Initialize SeLU"""
|
|
725
|
+
super(SeLU, self).__init__()
|
|
726
|
+
self.selu = P.SeLU()
|
|
727
|
+
|
|
728
|
+
def construct(self, input_x):
|
|
729
|
+
return self.selu(input_x)
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
class SiLU(Cell):
|
|
733
|
+
r"""
|
|
734
|
+
Applies the silu linear unit function element-wise.
|
|
735
|
+
|
|
736
|
+
.. math::
|
|
737
|
+
|
|
738
|
+
\text{SiLU}(x) = x * \sigma(x),
|
|
739
|
+
|
|
740
|
+
where :math:`x_i` is an element of the input, :math:`\sigma(x)` is Sigmoid function.
|
|
741
|
+
|
|
742
|
+
.. math::
|
|
743
|
+
|
|
744
|
+
\text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)},
|
|
745
|
+
|
|
746
|
+
SiLU Activation Function Graph:
|
|
747
|
+
|
|
748
|
+
.. image:: ../images/SiLU.png
|
|
749
|
+
:align: center
|
|
750
|
+
|
|
751
|
+
Inputs:
|
|
752
|
+
- **input** (Tensor) - `input` is :math:`x` in the preceding formula.
|
|
753
|
+
Input with the data type float16 or float32. Tensor of any dimension.
|
|
754
|
+
|
|
755
|
+
Outputs:
|
|
756
|
+
Tensor, with the same type and shape as the `input`.
|
|
757
|
+
|
|
758
|
+
Raises:
|
|
759
|
+
TypeError: If dtype of `input` is neither float16 nor float32.
|
|
760
|
+
|
|
761
|
+
Supported Platforms:
|
|
762
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
763
|
+
|
|
764
|
+
Examples:
|
|
765
|
+
>>> import mindspore
|
|
766
|
+
>>> from mindspore import Tensor, nn
|
|
767
|
+
>>> import numpy as np
|
|
768
|
+
>>> input = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
|
|
769
|
+
>>> silu = nn.SiLU()
|
|
770
|
+
>>> output = silu(input)
|
|
771
|
+
>>> print(output)
|
|
772
|
+
[-0.269 1.762 -0.1423 1.762 -0.269]
|
|
773
|
+
"""
|
|
774
|
+
|
|
775
|
+
def __init__(self):
|
|
776
|
+
"""Initialize SiLU."""
|
|
777
|
+
super(SiLU, self).__init__()
|
|
778
|
+
|
|
779
|
+
def construct(self, x):
|
|
780
|
+
return ops.function.silu(x)
|
|
781
|
+
|
|
782
|
+
|
|
783
|
+
class Tanh(Cell):
|
|
784
|
+
r"""
|
|
785
|
+
Applies the Tanh function element-wise, returns a new tensor with the hyperbolic tangent of the elements of input,
|
|
786
|
+
The input is a Tensor with any valid shape.
|
|
787
|
+
|
|
788
|
+
Tanh function is defined as:
|
|
789
|
+
|
|
790
|
+
.. math::
|
|
791
|
+
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
|
|
792
|
+
|
|
793
|
+
where :math:`x_i` is an element of the input Tensor.
|
|
794
|
+
|
|
795
|
+
Tanh Activation Function Graph:
|
|
796
|
+
|
|
797
|
+
.. image:: ../images/Tanh.png
|
|
798
|
+
:align: center
|
|
799
|
+
|
|
800
|
+
Inputs:
|
|
801
|
+
- **x** (Tensor) - Tensor of any dimension, input with data type of float16 or float32.
|
|
802
|
+
|
|
803
|
+
Outputs:
|
|
804
|
+
Tensor, with the same type and shape as the `x`.
|
|
805
|
+
|
|
806
|
+
Raises:
|
|
807
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
808
|
+
|
|
809
|
+
Supported Platforms:
|
|
810
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
811
|
+
|
|
812
|
+
Examples:
|
|
813
|
+
>>> import mindspore
|
|
814
|
+
>>> from mindspore import Tensor, nn
|
|
815
|
+
>>> import numpy as np
|
|
816
|
+
>>> x = Tensor(np.array([1, 2, 3, 2, 1]), mindspore.float16)
|
|
817
|
+
>>> tanh = nn.Tanh()
|
|
818
|
+
>>> output = tanh(x)
|
|
819
|
+
>>> print(output)
|
|
820
|
+
[0.7617 0.964 0.995 0.964 0.7617]
|
|
821
|
+
"""
|
|
822
|
+
|
|
823
|
+
def __init__(self):
|
|
824
|
+
"""Initialize Tanh."""
|
|
825
|
+
super(Tanh, self).__init__()
|
|
826
|
+
self.tanh = P.Tanh()
|
|
827
|
+
|
|
828
|
+
def construct(self, x):
|
|
829
|
+
return self.tanh(x)
|
|
830
|
+
|
|
831
|
+
|
|
832
|
+
class Tanhshrink(Cell):
|
|
833
|
+
r"""
|
|
834
|
+
Applies Tanhshrink activation function element-wise and returns a new tensor.
|
|
835
|
+
|
|
836
|
+
Tanh function is defined as:
|
|
837
|
+
|
|
838
|
+
.. math::
|
|
839
|
+
tanhshrink(x_i) =x_i- \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)}
|
|
840
|
+
= x_i-\frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
|
|
841
|
+
|
|
842
|
+
where :math:`x_i` is an element of the input Tensor.
|
|
843
|
+
|
|
844
|
+
Inputs:
|
|
845
|
+
- **x** (Tensor) - Tensor of any dimension.
|
|
846
|
+
|
|
847
|
+
Outputs:
|
|
848
|
+
Tensor, with the same shape as the `x`.
|
|
849
|
+
|
|
850
|
+
Raises:
|
|
851
|
+
TypeError: If `x` is not a Tensor.
|
|
852
|
+
|
|
853
|
+
Supported Platforms:
|
|
854
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
855
|
+
|
|
856
|
+
Examples:
|
|
857
|
+
>>> import mindspore as ms
|
|
858
|
+
>>> from mindspore import Tensor, nn
|
|
859
|
+
>>> import numpy as np
|
|
860
|
+
>>> x = Tensor(np.array([1, 2, 3, 2, 1]), ms.float16)
|
|
861
|
+
>>> tanhshrink = nn.Tanhshrink()
|
|
862
|
+
>>> output = tanhshrink(x)
|
|
863
|
+
>>> print(output)
|
|
864
|
+
[0.2383 1.036 2.004 1.036 0.2383]
|
|
865
|
+
"""
|
|
866
|
+
|
|
867
|
+
def __init__(self):
|
|
868
|
+
"""Initialize Tanhshrink."""
|
|
869
|
+
super(Tanhshrink, self).__init__()
|
|
870
|
+
|
|
871
|
+
def construct(self, x):
|
|
872
|
+
return F.tanhshrink(x)
|
|
873
|
+
|
|
874
|
+
|
|
875
|
+
class Hardtanh(Cell):
|
|
876
|
+
r"""
|
|
877
|
+
Applies the Hardtanh function element-wise. The activation function is defined as:
|
|
878
|
+
|
|
879
|
+
.. math::
|
|
880
|
+
\text{Hardtanh}(x) = \begin{cases}
|
|
881
|
+
1, & \text{ if } x > 1; \\
|
|
882
|
+
-1, & \text{ if } x < -1; \\
|
|
883
|
+
x, & \text{ otherwise. }
|
|
884
|
+
\end{cases}
|
|
885
|
+
|
|
886
|
+
Linear region range :math:`[-1, 1]` can be adjusted using `min_val` and `max_val`.
|
|
887
|
+
|
|
888
|
+
Hardtanh Activation Function Graph:
|
|
889
|
+
|
|
890
|
+
.. image:: ../images/Hardtanh.png
|
|
891
|
+
:align: center
|
|
892
|
+
|
|
893
|
+
Note:
|
|
894
|
+
On Ascend, data type of float16 might lead to accidental accuracy problem.
|
|
895
|
+
|
|
896
|
+
Args:
|
|
897
|
+
min_val (Union[int, float]): Minimum value of the linear region range. Default: ``-1.0`` .
|
|
898
|
+
max_val (Union[int, float]): Maximum value of the linear region range. Default: ``1.0`` .
|
|
899
|
+
|
|
900
|
+
Inputs:
|
|
901
|
+
- **x** (Tensor) - Input Tensor with data type of float16 or float32.
|
|
902
|
+
On CPU and Ascend support dimension 0-7D. On GPU support dimension 0-4D.
|
|
903
|
+
|
|
904
|
+
Outputs:
|
|
905
|
+
Tensor, with the same dtype and shape as `x`.
|
|
906
|
+
|
|
907
|
+
Raises:
|
|
908
|
+
TypeError: If `x` is not a Tensor.
|
|
909
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
910
|
+
TypeError: If dtype of `min_val` is neither float nor int.
|
|
911
|
+
TypeError: If dtype of `max_val` is neither float nor int.
|
|
912
|
+
ValueError: If `min_val` is not less than `max_val`.
|
|
913
|
+
|
|
914
|
+
Supported Platforms:
|
|
915
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
916
|
+
|
|
917
|
+
Examples:
|
|
918
|
+
>>> import mindspore
|
|
919
|
+
>>> from mindspore import Tensor, nn
|
|
920
|
+
>>> import numpy as np
|
|
921
|
+
>>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
922
|
+
>>> hardtanh = nn.Hardtanh(min_val=-1.0, max_val=1.0)
|
|
923
|
+
>>> output = hardtanh(x)
|
|
924
|
+
>>> print(output)
|
|
925
|
+
[-1. -1. 0. 1. 1.]
|
|
926
|
+
"""
|
|
927
|
+
|
|
928
|
+
def __init__(self, min_val=-1.0, max_val=1.0):
|
|
929
|
+
"""Initialize Hardtanh."""
|
|
930
|
+
super(Hardtanh, self).__init__()
|
|
931
|
+
self.min_val = min_val
|
|
932
|
+
self.max_val = max_val
|
|
933
|
+
if self.min_val >= self.max_val:
|
|
934
|
+
raise ValueError(f"For Hardtanh, min_val should be less than max_val,"
|
|
935
|
+
f"but got {self.min_val} and {self.max_val}")
|
|
936
|
+
|
|
937
|
+
def construct(self, x):
|
|
938
|
+
return F.hardtanh(x, self.min_val, self.max_val)
|
|
939
|
+
|
|
940
|
+
|
|
941
|
+
class GELU(Cell):
|
|
942
|
+
r"""
|
|
943
|
+
Applies GELU function to each element of the input. The input is a Tensor with any valid shape.
|
|
944
|
+
|
|
945
|
+
GELU is defined as:
|
|
946
|
+
|
|
947
|
+
.. math::
|
|
948
|
+
|
|
949
|
+
GELU(x_i) = x_i*P(X < x_i),
|
|
950
|
+
|
|
951
|
+
where :math:`P` is the cumulative distribution function
|
|
952
|
+
of standard Gaussian distribution and :math:`x_i` is the element of the input.
|
|
953
|
+
|
|
954
|
+
GELU Activation Function Graph:
|
|
955
|
+
|
|
956
|
+
.. image:: ../images/GELU.png
|
|
957
|
+
:align: center
|
|
958
|
+
|
|
959
|
+
Args:
|
|
960
|
+
approximate (bool): Whether to enable approximation. Default: ``True`` .
|
|
961
|
+
|
|
962
|
+
If `approximate` is ``True``, The gaussian error linear activation is:
|
|
963
|
+
|
|
964
|
+
:math:`0.5 * x * (1 + tanh(\sqrt(2 / \pi) * (x + 0.044715 * x^3)))`
|
|
965
|
+
|
|
966
|
+
else, it is:
|
|
967
|
+
|
|
968
|
+
:math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`, where P(X) ~ N(0, 1).
|
|
969
|
+
|
|
970
|
+
Inputs:
|
|
971
|
+
- **x** (Tensor) - The input of GELU with data type of float16, float32, or float64.
|
|
972
|
+
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
973
|
+
|
|
974
|
+
Outputs:
|
|
975
|
+
Tensor, with the same type and shape as the `x`.
|
|
976
|
+
|
|
977
|
+
Note:
|
|
978
|
+
when calculating the input gradient of GELU with an input value of infinity, there are differences
|
|
979
|
+
in the output of the backward between ``Ascend`` and ``GPU``.
|
|
980
|
+
when x is -inf, the computation result of ``Ascend`` is 0, and the computation result of ``GPU`` is Nan.
|
|
981
|
+
when x is inf, the computation result of ``Ascend`` is dy, and the computation result of ``GPU`` is Nan.
|
|
982
|
+
In mathematical terms, the result of Ascend has higher precision.
|
|
983
|
+
|
|
984
|
+
Raises:
|
|
985
|
+
TypeError: If dtype of `x` is not one of float16, float32, or float64.
|
|
986
|
+
|
|
987
|
+
Supported Platforms:
|
|
988
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
989
|
+
|
|
990
|
+
Examples:
|
|
991
|
+
>>> import mindspore
|
|
992
|
+
>>> from mindspore import Tensor, nn
|
|
993
|
+
>>> import numpy as np
|
|
994
|
+
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
995
|
+
>>> gelu = nn.GELU()
|
|
996
|
+
>>> output = gelu(x)
|
|
997
|
+
>>> print(output)
|
|
998
|
+
[[-1.5880802e-01 3.9999299e+00 -3.1077917e-21]
|
|
999
|
+
[ 1.9545976e+00 -2.2918017e-07 9.0000000e+00]]
|
|
1000
|
+
>>> gelu = nn.GELU(approximate=False)
|
|
1001
|
+
>>> # CPU not support "approximate=False", using "approximate=True" instead
|
|
1002
|
+
>>> output = gelu(x)
|
|
1003
|
+
>>> print(output)
|
|
1004
|
+
[[-1.5865526e-01 3.9998732e+00 -0.0000000e+00]
|
|
1005
|
+
[ 1.9544997e+00 -1.4901161e-06 9.0000000e+00]]
|
|
1006
|
+
"""
|
|
1007
|
+
|
|
1008
|
+
def __init__(self, approximate=True):
|
|
1009
|
+
"""Initialize GELU."""
|
|
1010
|
+
super(GELU, self).__init__()
|
|
1011
|
+
validator.check_bool(approximate, 'approximate', self.cls_name)
|
|
1012
|
+
self.approximate = 'tanh'
|
|
1013
|
+
if not approximate:
|
|
1014
|
+
self.approximate = 'none'
|
|
1015
|
+
|
|
1016
|
+
def construct(self, x):
|
|
1017
|
+
return ops.gelu(x, approximate=self.approximate)
|
|
1018
|
+
|
|
1019
|
+
|
|
1020
|
+
class FastGelu(Cell):
|
|
1021
|
+
r"""
|
|
1022
|
+
Applies FastGelu function to each element of the input. The input is a Tensor with any valid shape.
|
|
1023
|
+
|
|
1024
|
+
FastGelu is defined as:
|
|
1025
|
+
|
|
1026
|
+
.. math::
|
|
1027
|
+
FastGelu(x_i) = \frac {x_i} {1 + \exp(-1.702 * \left| x_i \right|)} *
|
|
1028
|
+
\exp(0.851 * (x_i - \left| x_i \right|))
|
|
1029
|
+
|
|
1030
|
+
where :math:`x_i` is the element of the input.
|
|
1031
|
+
|
|
1032
|
+
FastGelu Activation Function Graph:
|
|
1033
|
+
|
|
1034
|
+
.. image:: ../images/FastGelu.png
|
|
1035
|
+
:align: center
|
|
1036
|
+
|
|
1037
|
+
Inputs:
|
|
1038
|
+
- **x** (Tensor) - The input of FastGelu with data type of float16 or float32.
|
|
1039
|
+
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
1040
|
+
|
|
1041
|
+
Outputs:
|
|
1042
|
+
Tensor, with the same type and shape as the `x`.
|
|
1043
|
+
|
|
1044
|
+
Raises:
|
|
1045
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
1046
|
+
|
|
1047
|
+
Supported Platforms:
|
|
1048
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1049
|
+
|
|
1050
|
+
Examples:
|
|
1051
|
+
>>> import mindspore
|
|
1052
|
+
>>> from mindspore import Tensor, nn
|
|
1053
|
+
>>> import numpy as np
|
|
1054
|
+
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
1055
|
+
>>> fast_gelu = nn.FastGelu()
|
|
1056
|
+
>>> output = fast_gelu(x)
|
|
1057
|
+
>>> print(output)
|
|
1058
|
+
[[-1.5418735e-01 3.9921875e+00 -9.7473649e-06]
|
|
1059
|
+
[ 1.9375000e+00 -1.0052517e-03 8.9824219e+00]]
|
|
1060
|
+
"""
|
|
1061
|
+
|
|
1062
|
+
def __init__(self):
|
|
1063
|
+
"""Initialize FastGelu."""
|
|
1064
|
+
super(FastGelu, self).__init__()
|
|
1065
|
+
self.fast_gelu = P.FastGeLU()
|
|
1066
|
+
|
|
1067
|
+
def construct(self, x):
|
|
1068
|
+
return self.fast_gelu(x)
|
|
1069
|
+
|
|
1070
|
+
|
|
1071
|
+
class Sigmoid(Cell):
|
|
1072
|
+
r"""
|
|
1073
|
+
Applies sigmoid activation function element-wise.
|
|
1074
|
+
|
|
1075
|
+
Sigmoid function is defined as:
|
|
1076
|
+
|
|
1077
|
+
.. math::
|
|
1078
|
+
|
|
1079
|
+
\text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)},
|
|
1080
|
+
|
|
1081
|
+
where :math:`x_i` is the element of `x`.
|
|
1082
|
+
|
|
1083
|
+
Sigmoid Activation Function Graph:
|
|
1084
|
+
|
|
1085
|
+
.. image:: ../images/Sigmoid.png
|
|
1086
|
+
:align: center
|
|
1087
|
+
|
|
1088
|
+
Inputs:
|
|
1089
|
+
- **input** (Tensor) - `input` is :math:`x` in the preceding formula. Tensor of any dimension,
|
|
1090
|
+
the data type is float16, float32, float64, complex64 or complex128.
|
|
1091
|
+
|
|
1092
|
+
Outputs:
|
|
1093
|
+
Tensor, with the same type and shape as the `input`.
|
|
1094
|
+
|
|
1095
|
+
Raises:
|
|
1096
|
+
TypeError: If dtype of `input` is not float16, float32, float64, complex64 or complex128.
|
|
1097
|
+
TypeError: If `input` is not a Tensor.
|
|
1098
|
+
|
|
1099
|
+
Supported Platforms:
|
|
1100
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1101
|
+
|
|
1102
|
+
Examples:
|
|
1103
|
+
>>> import mindspore
|
|
1104
|
+
>>> from mindspore import Tensor, nn
|
|
1105
|
+
>>> import numpy as np
|
|
1106
|
+
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
1107
|
+
>>> sigmoid = nn.Sigmoid()
|
|
1108
|
+
>>> output = sigmoid(input)
|
|
1109
|
+
>>> print(output)
|
|
1110
|
+
[0.2688 0.11914 0.5 0.881 0.7305 ]
|
|
1111
|
+
"""
|
|
1112
|
+
|
|
1113
|
+
def __init__(self):
|
|
1114
|
+
"""Initialize Sigmoid."""
|
|
1115
|
+
super(Sigmoid, self).__init__()
|
|
1116
|
+
self.sigmoid = P.Sigmoid()
|
|
1117
|
+
|
|
1118
|
+
def construct(self, x):
|
|
1119
|
+
return self.sigmoid(x)
|
|
1120
|
+
|
|
1121
|
+
|
|
1122
|
+
class Softsign(Cell):
|
|
1123
|
+
r"""
|
|
1124
|
+
Applies softsign activation function element-wise.
|
|
1125
|
+
|
|
1126
|
+
Softsign Activation Function Graph:
|
|
1127
|
+
|
|
1128
|
+
.. image:: ../images/Softsign.png
|
|
1129
|
+
:align: center
|
|
1130
|
+
|
|
1131
|
+
Refer to :func:`mindspore.ops.softsign` for more details.
|
|
1132
|
+
|
|
1133
|
+
Supported Platforms:
|
|
1134
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1135
|
+
|
|
1136
|
+
Examples:
|
|
1137
|
+
>>> import mindspore
|
|
1138
|
+
>>> from mindspore import Tensor, nn
|
|
1139
|
+
>>> import numpy as np
|
|
1140
|
+
>>> x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
|
|
1141
|
+
>>> softsign = nn.Softsign()
|
|
1142
|
+
>>> output = softsign(x)
|
|
1143
|
+
>>> print(output)
|
|
1144
|
+
[ 0. -0.5 0.6666667 0.9677419 -0.9677419]
|
|
1145
|
+
"""
|
|
1146
|
+
|
|
1147
|
+
def __init__(self):
|
|
1148
|
+
"""Initialize Softsign."""
|
|
1149
|
+
super(Softsign, self).__init__()
|
|
1150
|
+
self.softsign = P.Softsign()
|
|
1151
|
+
|
|
1152
|
+
def construct(self, x):
|
|
1153
|
+
return self.softsign(x)
|
|
1154
|
+
|
|
1155
|
+
|
|
1156
|
+
class PReLU(Cell):
|
|
1157
|
+
r"""
|
|
1158
|
+
Applies PReLU activation function element-wise.
|
|
1159
|
+
|
|
1160
|
+
PReLU is defined as:
|
|
1161
|
+
|
|
1162
|
+
.. math::
|
|
1163
|
+
|
|
1164
|
+
PReLU(x_i)= \max(0, x_i) + w * \min(0, x_i),
|
|
1165
|
+
|
|
1166
|
+
where :math:`x_i` is an element of an channel of the input.
|
|
1167
|
+
|
|
1168
|
+
Here :math:`w` is a learnable parameter with a default initial value 0.25.
|
|
1169
|
+
Parameter :math:`w` has dimensionality of the argument channel. If called without argument
|
|
1170
|
+
channel, a single parameter :math:`w` will be shared across all channels.
|
|
1171
|
+
|
|
1172
|
+
PReLU Activation Function Graph:
|
|
1173
|
+
|
|
1174
|
+
.. image:: ../images/PReLU.png
|
|
1175
|
+
:align: center
|
|
1176
|
+
|
|
1177
|
+
Args:
|
|
1178
|
+
channel (int): The elements number of parameter :math:`w`.
|
|
1179
|
+
It could be an int, and the value is 1 or the channels number of input tensor `x`. Default: ``1`` .
|
|
1180
|
+
w (Union[float, list, Tensor]): The initial value of parameter. It could be a float, a float list or
|
|
1181
|
+
a tensor has the same dtype as the input tensor `x`. Default: ``0.25`` .
|
|
1182
|
+
|
|
1183
|
+
Inputs:
|
|
1184
|
+
- **x** (Tensor) - The input of PReLU with data type of float16 or float32.
|
|
1185
|
+
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1186
|
+
|
|
1187
|
+
Outputs:
|
|
1188
|
+
Tensor, with the same dtype and shape as the `x`.
|
|
1189
|
+
|
|
1190
|
+
Raises:
|
|
1191
|
+
TypeError: If `channel` is not an int.
|
|
1192
|
+
TypeError: If `w` is not one of a float, a float list, a float Tensor.
|
|
1193
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
1194
|
+
ValueError: If the `x` is a 0-D or 1-D Tensor on Ascend.
|
|
1195
|
+
ValueError: If `channel` is less than 1.
|
|
1196
|
+
|
|
1197
|
+
Supported Platforms:
|
|
1198
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1199
|
+
|
|
1200
|
+
Examples:
|
|
1201
|
+
>>> import mindspore
|
|
1202
|
+
>>> from mindspore import Tensor, nn
|
|
1203
|
+
>>> import numpy as np
|
|
1204
|
+
>>> x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32)
|
|
1205
|
+
>>> prelu = nn.PReLU()
|
|
1206
|
+
>>> output = prelu(x)
|
|
1207
|
+
>>> print(output)
|
|
1208
|
+
[[[[0.1 0.6]
|
|
1209
|
+
[0.9 0.9]]]]
|
|
1210
|
+
|
|
1211
|
+
"""
|
|
1212
|
+
|
|
1213
|
+
@cell_attr_register(attrs="")
|
|
1214
|
+
def __init__(self, channel=1, w=0.25):
|
|
1215
|
+
"""Initialize PReLU."""
|
|
1216
|
+
super(PReLU, self).__init__()
|
|
1217
|
+
validator.check_positive_int(channel, 'channel', self.cls_name)
|
|
1218
|
+
if isinstance(w, (float, np.float32)):
|
|
1219
|
+
tmp = np.empty((channel,), dtype=np.float32)
|
|
1220
|
+
tmp.fill(w)
|
|
1221
|
+
w = Tensor(tmp, dtype=mstype.float32)
|
|
1222
|
+
elif isinstance(w, list):
|
|
1223
|
+
if len(w) != channel:
|
|
1224
|
+
raise ValueError(f"For '{self.cls_name}', the length of 'w' must be equal to the 'channel' when "
|
|
1225
|
+
f"the 'w' is a list, but got the length of 'w': {len(w)}, the 'channel': {channel}.")
|
|
1226
|
+
|
|
1227
|
+
for i in w:
|
|
1228
|
+
if not isinstance(i, (float, np.float32)):
|
|
1229
|
+
raise ValueError(f"For '{self.cls_name}', all elements in 'w' must be "
|
|
1230
|
+
f"float when the 'w' is a list, but got {i}.")
|
|
1231
|
+
w = Tensor(w, dtype=mstype.float32)
|
|
1232
|
+
elif isinstance(w, Tensor):
|
|
1233
|
+
if w.dtype not in (mstype.float16, mstype.float32):
|
|
1234
|
+
raise ValueError(f"For '{self.cls_name}', the dtype of 'w' must be float16 or "
|
|
1235
|
+
f"float32 when the 'w' is a tensor, but got {w.dtype}.")
|
|
1236
|
+
if len(w.shape) != 1 or w.shape[0] != channel:
|
|
1237
|
+
raise ValueError(f"For '{self.cls_name}', the dimension of 'w' must be 1, and the elements number "
|
|
1238
|
+
f"should be equal to the 'channel' when the 'w' is a tensor, "
|
|
1239
|
+
f"but got 'w' shape {w.shape}, the 'channel' {channel}.")
|
|
1240
|
+
else:
|
|
1241
|
+
raise TypeError(f"For '{self.cls_name}', the 'w' only supported float, list and tensor, "
|
|
1242
|
+
f"but got {type(w).__name__}.")
|
|
1243
|
+
self.w = Parameter(w, name='a')
|
|
1244
|
+
self.prelu = P.PReLU()
|
|
1245
|
+
|
|
1246
|
+
def construct(self, x):
|
|
1247
|
+
return self.prelu(x, F.cast(self.w, x.dtype))
|
|
1248
|
+
|
|
1249
|
+
|
|
1250
|
+
class PReLUExt(Cell):
|
|
1251
|
+
r"""
|
|
1252
|
+
Applies PReLU activation function element-wise.
|
|
1253
|
+
|
|
1254
|
+
PReLU is defined as:
|
|
1255
|
+
|
|
1256
|
+
.. math::
|
|
1257
|
+
|
|
1258
|
+
PReLU(x_i)= \max(0, x_i) + w * \min(0, x_i),
|
|
1259
|
+
|
|
1260
|
+
where :math:`x_i` is an element of an channel of the input.
|
|
1261
|
+
|
|
1262
|
+
Here :math:`w` is a learnable parameter with a default initial value 0.25.
|
|
1263
|
+
Parameter :math:`w` has dimensionality of the argument channel. If called without argument
|
|
1264
|
+
channel, a single parameter :math:`w` will be shared across all channels.
|
|
1265
|
+
|
|
1266
|
+
PReLU Activation Function Graph:
|
|
1267
|
+
|
|
1268
|
+
.. image:: ../images/PReLU2.png
|
|
1269
|
+
:align: center
|
|
1270
|
+
|
|
1271
|
+
.. note::
|
|
1272
|
+
Channel dim is the 2nd dim of input. When input has dims < 2, then there is
|
|
1273
|
+
no channel dim and the number of channels = 1.
|
|
1274
|
+
|
|
1275
|
+
Args:
|
|
1276
|
+
num_parameters (int): number of `w` to learn. Although it takes an int as input,
|
|
1277
|
+
there is only two legitimate values: 1, or the number of channels at Tensor `input`. Default: ``1`` .
|
|
1278
|
+
init (float): the initial value of `w`. Default: ``0.25`` .
|
|
1279
|
+
dtype (mindspore.dtype, optional): the type of `w`. Default: ``None`` . Supported data type
|
|
1280
|
+
is {float16, float32, bfloat16}.
|
|
1281
|
+
|
|
1282
|
+
Inputs:
|
|
1283
|
+
- **input** (Tensor) - The input of PReLU.
|
|
1284
|
+
|
|
1285
|
+
Outputs:
|
|
1286
|
+
Tensor, with the same dtype and shape as the `input`.
|
|
1287
|
+
|
|
1288
|
+
Supported Platforms:
|
|
1289
|
+
``Ascend``
|
|
1290
|
+
|
|
1291
|
+
Examples:
|
|
1292
|
+
>>> import mindspore
|
|
1293
|
+
>>> from mindspore import Tensor, nn
|
|
1294
|
+
>>> import numpy as np
|
|
1295
|
+
>>> x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32)
|
|
1296
|
+
>>> prelu = nn.PReLUExt()
|
|
1297
|
+
>>> output = prelu(x)
|
|
1298
|
+
>>> print(output)
|
|
1299
|
+
[[[[0.1 0.6]
|
|
1300
|
+
[0.9 0.9]]]]
|
|
1301
|
+
|
|
1302
|
+
"""
|
|
1303
|
+
|
|
1304
|
+
def __init__(self, num_parameters=1, init=0.25, dtype=None):
|
|
1305
|
+
"""Initialize PReLUExt."""
|
|
1306
|
+
super(PReLUExt, self).__init__()
|
|
1307
|
+
tmp = np.empty((num_parameters,), dtype=np.float32)
|
|
1308
|
+
tmp.fill(init)
|
|
1309
|
+
w = Tensor(tmp, dtype=dtype)
|
|
1310
|
+
self.weight = Parameter(w, name='weight')
|
|
1311
|
+
|
|
1312
|
+
def construct(self, input):
|
|
1313
|
+
return ops.prelu(input, self.weight)
|
|
1314
|
+
|
|
1315
|
+
|
|
1316
|
+
class HSwish(Cell):
|
|
1317
|
+
r"""
|
|
1318
|
+
Applies Hard Swish activation function element-wise.
|
|
1319
|
+
|
|
1320
|
+
Hard swish is defined as:
|
|
1321
|
+
|
|
1322
|
+
.. math::
|
|
1323
|
+
\text{Hardswish}(input) =
|
|
1324
|
+
\begin{cases}
|
|
1325
|
+
0, & \text{ if } input \leq -3, \\
|
|
1326
|
+
input, & \text{ if } input \geq +3, \\
|
|
1327
|
+
input*(input + 3)/6, & \text{ otherwise }
|
|
1328
|
+
\end{cases}
|
|
1329
|
+
|
|
1330
|
+
HSwish Activation Function Graph:
|
|
1331
|
+
|
|
1332
|
+
.. image:: ../images/HSwish.png
|
|
1333
|
+
:align: center
|
|
1334
|
+
|
|
1335
|
+
Inputs:
|
|
1336
|
+
- **input** (Tensor) - The input of HSwish.
|
|
1337
|
+
|
|
1338
|
+
Outputs:
|
|
1339
|
+
Tensor, with the same type and shape as the `input`.
|
|
1340
|
+
|
|
1341
|
+
Raises:
|
|
1342
|
+
TypeError: If `input` is not a tensor.
|
|
1343
|
+
TypeError: If `input` is neither int nor float.
|
|
1344
|
+
|
|
1345
|
+
Supported Platforms:
|
|
1346
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1347
|
+
|
|
1348
|
+
Examples:
|
|
1349
|
+
>>> import mindspore
|
|
1350
|
+
>>> from mindspore import Tensor, nn
|
|
1351
|
+
>>> import numpy as np
|
|
1352
|
+
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
1353
|
+
>>> hswish = nn.HSwish()
|
|
1354
|
+
>>> result = hswish(input)
|
|
1355
|
+
>>> print(result)
|
|
1356
|
+
[-0.3333 -0.3333 0. 1.667 0.6665]
|
|
1357
|
+
"""
|
|
1358
|
+
|
|
1359
|
+
def __init__(self):
|
|
1360
|
+
"""Initialize HSwish."""
|
|
1361
|
+
super(HSwish, self).__init__()
|
|
1362
|
+
self.hswish = P.HSwish()
|
|
1363
|
+
|
|
1364
|
+
def construct(self, input):
|
|
1365
|
+
return self.hswish(input)
|
|
1366
|
+
|
|
1367
|
+
|
|
1368
|
+
class HSigmoid(Cell):
|
|
1369
|
+
r"""
|
|
1370
|
+
Applies Hard Sigmoid activation function element-wise.
|
|
1371
|
+
|
|
1372
|
+
Hard Sigmoid is defined as:
|
|
1373
|
+
|
|
1374
|
+
.. math::
|
|
1375
|
+
\text{Hardsigmoid}(input) =
|
|
1376
|
+
\begin{cases}
|
|
1377
|
+
0, & \text{ if } input \leq -3, \\
|
|
1378
|
+
1, & \text{ if } input \geq +3, \\
|
|
1379
|
+
input/6 + 1/2, & \text{ otherwise }
|
|
1380
|
+
\end{cases}
|
|
1381
|
+
|
|
1382
|
+
HSigmoid Activation Function Graph:
|
|
1383
|
+
|
|
1384
|
+
.. image:: ../images/HSigmoid.png
|
|
1385
|
+
:align: center
|
|
1386
|
+
|
|
1387
|
+
Inputs:
|
|
1388
|
+
- **input** (Tensor) - The input of HSigmoid.
|
|
1389
|
+
|
|
1390
|
+
Outputs:
|
|
1391
|
+
Tensor, with the same type and shape as the `input`.
|
|
1392
|
+
|
|
1393
|
+
Raises:
|
|
1394
|
+
TypeError: If `input` is not a Tensor.
|
|
1395
|
+
TypeError: If `input` is neither int nor float.
|
|
1396
|
+
|
|
1397
|
+
Supported Platforms:
|
|
1398
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1399
|
+
|
|
1400
|
+
Examples:
|
|
1401
|
+
>>> import mindspore
|
|
1402
|
+
>>> from mindspore import Tensor, nn
|
|
1403
|
+
>>> import numpy as np
|
|
1404
|
+
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
1405
|
+
>>> hsigmoid = nn.HSigmoid()
|
|
1406
|
+
>>> result = hsigmoid(input)
|
|
1407
|
+
>>> print(result)
|
|
1408
|
+
[0.3333 0.1666 0.5 0.8335 0.6665]
|
|
1409
|
+
"""
|
|
1410
|
+
|
|
1411
|
+
def __init__(self):
|
|
1412
|
+
"""Initialize HSigmoid."""
|
|
1413
|
+
super(HSigmoid, self).__init__()
|
|
1414
|
+
self.hsigmoid = P.HSigmoid()
|
|
1415
|
+
|
|
1416
|
+
def construct(self, input):
|
|
1417
|
+
return self.hsigmoid(input)
|
|
1418
|
+
|
|
1419
|
+
|
|
1420
|
+
class LogSigmoid(Cell):
|
|
1421
|
+
r"""
|
|
1422
|
+
Applies logsigmoid activation element-wise. The input is a Tensor with any valid shape.
|
|
1423
|
+
|
|
1424
|
+
Logsigmoid is defined as:
|
|
1425
|
+
|
|
1426
|
+
.. math::
|
|
1427
|
+
\text{logsigmoid}(x_{i}) = \log(\frac{1}{1 + \exp(-x_i)}),
|
|
1428
|
+
|
|
1429
|
+
where :math:`x_{i}` is the element of the input.
|
|
1430
|
+
|
|
1431
|
+
LogSigmoid Activation Function Graph:
|
|
1432
|
+
|
|
1433
|
+
.. image:: ../images/LogSigmoid.png
|
|
1434
|
+
:align: center
|
|
1435
|
+
|
|
1436
|
+
Inputs:
|
|
1437
|
+
- **x** (Tensor) - The input of LogSigmoid with data type of float16 or float32.
|
|
1438
|
+
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
|
1439
|
+
|
|
1440
|
+
Outputs:
|
|
1441
|
+
Tensor, with the same type and shape as the `x`.
|
|
1442
|
+
|
|
1443
|
+
Raises:
|
|
1444
|
+
TypeError: If dtype of `x` is neither float16 nor float32.
|
|
1445
|
+
|
|
1446
|
+
Supported Platforms:
|
|
1447
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1448
|
+
|
|
1449
|
+
Examples:
|
|
1450
|
+
>>> import mindspore
|
|
1451
|
+
>>> from mindspore import Tensor, nn
|
|
1452
|
+
>>> import numpy as np
|
|
1453
|
+
>>> net = nn.LogSigmoid()
|
|
1454
|
+
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
1455
|
+
>>> output = net(x)
|
|
1456
|
+
>>> print(output)
|
|
1457
|
+
[-0.31326166 -0.12692806 -0.04858734]
|
|
1458
|
+
"""
|
|
1459
|
+
|
|
1460
|
+
def __init__(self):
|
|
1461
|
+
"""Initialize LogSigmoid."""
|
|
1462
|
+
super(LogSigmoid, self).__init__()
|
|
1463
|
+
self.mul = P.Mul()
|
|
1464
|
+
self.exp = P.Exp()
|
|
1465
|
+
self.add = P.Add()
|
|
1466
|
+
self.rec = P.Reciprocal()
|
|
1467
|
+
self.log = P.Log()
|
|
1468
|
+
|
|
1469
|
+
def construct(self, input_x):
|
|
1470
|
+
neg_input = self.mul(input_x, -1)
|
|
1471
|
+
exp_neg_input = self.exp(neg_input)
|
|
1472
|
+
exp_neg_input_1 = self.add(exp_neg_input, 1)
|
|
1473
|
+
rec_exp_neg_input_1 = self.rec(exp_neg_input_1)
|
|
1474
|
+
ret = self.log(rec_exp_neg_input_1)
|
|
1475
|
+
return ret
|
|
1476
|
+
|
|
1477
|
+
|
|
1478
|
+
class LRN(Cell):
|
|
1479
|
+
r"""
|
|
1480
|
+
Local Response Normalization.
|
|
1481
|
+
|
|
1482
|
+
.. warning::
|
|
1483
|
+
LRN is deprecated on Ascend due to potential accuracy problem. It's recommended to use other
|
|
1484
|
+
normalization methods, e.g. :class:`mindspore.nn.BatchNorm1d` ,
|
|
1485
|
+
:class:`mindspore.nn.BatchNorm2d` , :class:`mindspore.nn.BatchNorm3d`.
|
|
1486
|
+
|
|
1487
|
+
Refer to :func:`mindspore.ops.lrn` for more details.
|
|
1488
|
+
|
|
1489
|
+
Supported Platforms:
|
|
1490
|
+
``GPU`` ``CPU``
|
|
1491
|
+
|
|
1492
|
+
Examples:
|
|
1493
|
+
>>> import mindspore
|
|
1494
|
+
>>> from mindspore import Tensor, nn
|
|
1495
|
+
>>> import numpy as np
|
|
1496
|
+
>>> input_x = Tensor(np.array([[[[0.1], [0.2]],
|
|
1497
|
+
... [[0.3], [0.4]]]]), mindspore.float32)
|
|
1498
|
+
>>> output = nn.LRN()(input_x)
|
|
1499
|
+
>>> print(output)
|
|
1500
|
+
[[[[0.09534626]
|
|
1501
|
+
[0.1825742 ]]
|
|
1502
|
+
[[0.2860388 ]
|
|
1503
|
+
[0.3651484 ]]]]
|
|
1504
|
+
"""
|
|
1505
|
+
|
|
1506
|
+
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
|
|
1507
|
+
"""Initialize LRN."""
|
|
1508
|
+
super(LRN, self).__init__()
|
|
1509
|
+
self.lrn_op = NN_OPS.LRN(depth_radius, bias, alpha, beta, norm_region)
|
|
1510
|
+
|
|
1511
|
+
def construct(self, input_x):
|
|
1512
|
+
return self.lrn_op(input_x)
|
|
1513
|
+
|
|
1514
|
+
|
|
1515
|
+
class SoftShrink(Cell):
|
|
1516
|
+
r"""
|
|
1517
|
+
Applies the SoftShrink function element-wise.
|
|
1518
|
+
|
|
1519
|
+
.. math::
|
|
1520
|
+
\text{SoftShrink}(x) =
|
|
1521
|
+
\begin{cases}
|
|
1522
|
+
x - \lambda, & \text{ if } x > \lambda \\
|
|
1523
|
+
x + \lambda, & \text{ if } x < -\lambda \\
|
|
1524
|
+
0, & \text{ otherwise }
|
|
1525
|
+
\end{cases}
|
|
1526
|
+
|
|
1527
|
+
SoftShrink Activation Function Graph:
|
|
1528
|
+
|
|
1529
|
+
.. image:: ../images/Softshrink.png
|
|
1530
|
+
:align: center
|
|
1531
|
+
|
|
1532
|
+
Args:
|
|
1533
|
+
lambd (number, optional): The threshold :math:`\lambda` defined by the Soft Shrink formula.
|
|
1534
|
+
It should be greater than or equal to 0, default: ``0.5`` .
|
|
1535
|
+
|
|
1536
|
+
Inputs:
|
|
1537
|
+
- **input** (Tensor) - The input of Soft Shrink. Supported dtypes:
|
|
1538
|
+
|
|
1539
|
+
- Ascend: float16, float32, bfloat16.
|
|
1540
|
+
- CPU/GPU: float16, float32.
|
|
1541
|
+
|
|
1542
|
+
Outputs:
|
|
1543
|
+
Tensor, the same shape and data type as the input.
|
|
1544
|
+
|
|
1545
|
+
Raises:
|
|
1546
|
+
TypeError: If `lambd` is not a float, int or bool.
|
|
1547
|
+
TypeError: If `input` is not a tensor.
|
|
1548
|
+
TypeError: If dtype of `input` is not float16, float32 or bfloat16.
|
|
1549
|
+
|
|
1550
|
+
Supported Platforms:
|
|
1551
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1552
|
+
|
|
1553
|
+
Examples:
|
|
1554
|
+
>>> import mindspore
|
|
1555
|
+
>>> from mindspore import Tensor, nn
|
|
1556
|
+
>>> import numpy as np
|
|
1557
|
+
>>> input = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mindspore.float16)
|
|
1558
|
+
>>> softshrink = nn.SoftShrink()
|
|
1559
|
+
>>> output = softshrink(input)
|
|
1560
|
+
>>> print(output)
|
|
1561
|
+
[[ 0.02979 0.287 0.676 ]
|
|
1562
|
+
[ 0.2837 0.1216 -0.6543 ]]
|
|
1563
|
+
"""
|
|
1564
|
+
|
|
1565
|
+
def __init__(self, lambd=0.5):
|
|
1566
|
+
super(SoftShrink, self).__init__()
|
|
1567
|
+
self.softshrink = P.SoftShrink(lambd)
|
|
1568
|
+
|
|
1569
|
+
def construct(self, input):
|
|
1570
|
+
output = self.softshrink(input)
|
|
1571
|
+
return output
|
|
1572
|
+
|
|
1573
|
+
|
|
1574
|
+
class HShrink(Cell):
|
|
1575
|
+
r"""
|
|
1576
|
+
Applies Hard Shrink activation function element-wise.
|
|
1577
|
+
|
|
1578
|
+
The formula is defined as follows:
|
|
1579
|
+
|
|
1580
|
+
.. math::
|
|
1581
|
+
\text{HardShrink}(x) =
|
|
1582
|
+
\begin{cases}
|
|
1583
|
+
x, & \text{ if } x > \lambda \\
|
|
1584
|
+
x, & \text{ if } x < -\lambda \\
|
|
1585
|
+
0, & \text{ otherwise }
|
|
1586
|
+
\end{cases}
|
|
1587
|
+
|
|
1588
|
+
HShrink Activation Function Graph:
|
|
1589
|
+
|
|
1590
|
+
.. image:: ../images/HShrink.png
|
|
1591
|
+
:align: center
|
|
1592
|
+
|
|
1593
|
+
Args:
|
|
1594
|
+
lambd (number, optional): The threshold :math:`\lambda` defined by the Hard Shrink formula. Default: ``0.5`` .
|
|
1595
|
+
|
|
1596
|
+
Inputs:
|
|
1597
|
+
- **input** (Tensor) - The input of Hard Shrink. Supported dtypes:
|
|
1598
|
+
|
|
1599
|
+
- Ascend: float16, float32, bfloat16.
|
|
1600
|
+
- CPU/GPU: float16, float32.
|
|
1601
|
+
|
|
1602
|
+
Outputs:
|
|
1603
|
+
Tensor, the same shape and data type as the input.
|
|
1604
|
+
|
|
1605
|
+
Raises:
|
|
1606
|
+
TypeError: If `lambd` is not a float, int or bool.
|
|
1607
|
+
TypeError: If `input` is not a tensor.
|
|
1608
|
+
TypeError: If dtype of `input` is not float16, float32 or bfloat16.
|
|
1609
|
+
|
|
1610
|
+
Supported Platforms:
|
|
1611
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1612
|
+
|
|
1613
|
+
Examples:
|
|
1614
|
+
>>> import mindspore
|
|
1615
|
+
>>> from mindspore import Tensor, nn
|
|
1616
|
+
>>> import numpy as np
|
|
1617
|
+
>>> input = Tensor(np.array([[0.5, 1, 2.0], [0.0533, 0.0776, -2.1233]]), mindspore.float32)
|
|
1618
|
+
>>> hshrink = nn.HShrink()
|
|
1619
|
+
>>> output = hshrink(input)
|
|
1620
|
+
>>> print(output)
|
|
1621
|
+
[[ 0. 1. 2. ]
|
|
1622
|
+
[ 0. 0. -2.1233]]
|
|
1623
|
+
"""
|
|
1624
|
+
|
|
1625
|
+
def __init__(self, lambd=0.5):
|
|
1626
|
+
super(HShrink, self).__init__()
|
|
1627
|
+
self.hshrink = P.HShrink(lambd)
|
|
1628
|
+
|
|
1629
|
+
def construct(self, input):
|
|
1630
|
+
return self.hshrink(input)
|
|
1631
|
+
|
|
1632
|
+
|
|
1633
|
+
class Threshold(Cell):
|
|
1634
|
+
r"""
|
|
1635
|
+
Thresholds each element of the input Tensor.
|
|
1636
|
+
|
|
1637
|
+
The formula is defined as follows:
|
|
1638
|
+
|
|
1639
|
+
.. math::
|
|
1640
|
+
y =
|
|
1641
|
+
\begin{cases}
|
|
1642
|
+
x, &\text{ if } x > \text{threshold} \\
|
|
1643
|
+
\text{value}, &\text{ otherwise }
|
|
1644
|
+
\end{cases}
|
|
1645
|
+
|
|
1646
|
+
Args:
|
|
1647
|
+
threshold (Union[int, float]): The value to threshold at.
|
|
1648
|
+
value (Union[int, float]): The value to replace with when element is less than threshold.
|
|
1649
|
+
|
|
1650
|
+
Inputs:
|
|
1651
|
+
- **input_x** (Tensor) - The input of Threshold with data type of float16 or float32.
|
|
1652
|
+
|
|
1653
|
+
Outputs:
|
|
1654
|
+
Tensor, the same shape and data type as the `input_x`.
|
|
1655
|
+
|
|
1656
|
+
Raises:
|
|
1657
|
+
TypeError: If `threshold` is not a float or an int.
|
|
1658
|
+
TypeError: If `value` is not a float or an int.
|
|
1659
|
+
|
|
1660
|
+
Supported Platforms:
|
|
1661
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1662
|
+
|
|
1663
|
+
Examples:
|
|
1664
|
+
>>> import mindspore
|
|
1665
|
+
>>> from mindspore import Tensor, nn
|
|
1666
|
+
>>> m = nn.Threshold(0.1, 20)
|
|
1667
|
+
>>> inputs = Tensor([0.1, 0.2, 0.3], mindspore.float32)
|
|
1668
|
+
>>> outputs = m(inputs)
|
|
1669
|
+
>>> print(outputs)
|
|
1670
|
+
[ 20.0 0.2 0.3]
|
|
1671
|
+
"""
|
|
1672
|
+
|
|
1673
|
+
def __init__(self, threshold, value):
|
|
1674
|
+
"""Initialize Threshold."""
|
|
1675
|
+
super(Threshold, self).__init__()
|
|
1676
|
+
self.threshold = threshold
|
|
1677
|
+
self.value = value
|
|
1678
|
+
|
|
1679
|
+
def construct(self, input_x):
|
|
1680
|
+
return F.threshold(input_x, self.threshold, self.value)
|
|
1681
|
+
|
|
1682
|
+
|
|
1683
|
+
class Mish(Cell):
|
|
1684
|
+
r"""
|
|
1685
|
+
Computes MISH (A Self Regularized Non-Monotonic Neural Activation Function)
|
|
1686
|
+
of input tensors element-wise.
|
|
1687
|
+
|
|
1688
|
+
Refer to :func:`mindspore.ops.mish` for more details.
|
|
1689
|
+
|
|
1690
|
+
Mish Activation Function Graph:
|
|
1691
|
+
|
|
1692
|
+
.. image:: ../images/Mish.png
|
|
1693
|
+
:align: center
|
|
1694
|
+
|
|
1695
|
+
Supported Platforms:
|
|
1696
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1697
|
+
|
|
1698
|
+
Examples:
|
|
1699
|
+
>>> import mindspore
|
|
1700
|
+
>>> from mindspore import Tensor, nn
|
|
1701
|
+
>>> import numpy as np
|
|
1702
|
+
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
1703
|
+
>>> mish = nn.Mish()
|
|
1704
|
+
>>> output = mish(x)
|
|
1705
|
+
>>> print(output)
|
|
1706
|
+
[[-3.03401530e-01 3.99741292e+00 -2.68321624e-03]
|
|
1707
|
+
[ 1.94395900e+00 -3.35762873e-02 9.00000000e+00]]
|
|
1708
|
+
"""
|
|
1709
|
+
|
|
1710
|
+
def __init__(self):
|
|
1711
|
+
"""Initialize Mish."""
|
|
1712
|
+
super().__init__("Mish")
|
|
1713
|
+
self.mish = NN_OPS.Mish()
|
|
1714
|
+
|
|
1715
|
+
def construct(self, input_x):
|
|
1716
|
+
return self.mish(input_x)
|
|
1717
|
+
|
|
1718
|
+
|
|
1719
|
+
class GLU(Cell):
|
|
1720
|
+
r"""
|
|
1721
|
+
The gated linear unit function.
|
|
1722
|
+
|
|
1723
|
+
.. math::
|
|
1724
|
+
{GLU}(a, b)= a \otimes \sigma(b)
|
|
1725
|
+
|
|
1726
|
+
where :math:`a` is the first half of the input matrices and :math:`b` is the second half.
|
|
1727
|
+
|
|
1728
|
+
Here :math:`\sigma` is the sigmoid function, and :math:`\otimes` is the Hadamard product.
|
|
1729
|
+
|
|
1730
|
+
Args:
|
|
1731
|
+
axis (int): the axis to split the input. Default: ``-1`` , the last axis in `x`.
|
|
1732
|
+
|
|
1733
|
+
Inputs:
|
|
1734
|
+
- **x** (Tensor) - :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional dimensions.
|
|
1735
|
+
|
|
1736
|
+
Outputs:
|
|
1737
|
+
Tensor, the same dtype as the `x`, with the shape :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`.
|
|
1738
|
+
|
|
1739
|
+
Supported Platforms:
|
|
1740
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1741
|
+
|
|
1742
|
+
Examples:
|
|
1743
|
+
>>> import mindspore as ms
|
|
1744
|
+
>>> m = ms.nn.GLU()
|
|
1745
|
+
>>> input = ms.Tensor([[0.1,0.2,0.3,0.4],[0.5,0.6,0.7,0.8]])
|
|
1746
|
+
>>> output = m(input)
|
|
1747
|
+
>>> print(output)
|
|
1748
|
+
[[0.05744425 0.11973753]
|
|
1749
|
+
[0.33409387 0.41398472]]
|
|
1750
|
+
"""
|
|
1751
|
+
|
|
1752
|
+
def __init__(self, axis=-1):
|
|
1753
|
+
"""Initialize GLU."""
|
|
1754
|
+
super().__init__("GLU")
|
|
1755
|
+
self.dim = axis
|
|
1756
|
+
self.spilt = P.Split(axis=axis, output_num=2)
|
|
1757
|
+
self.sigmoid = P.Sigmoid()
|
|
1758
|
+
|
|
1759
|
+
def construct(self, x):
|
|
1760
|
+
x1, x2 = self.spilt(x)
|
|
1761
|
+
x2 = self.sigmoid(x2)
|
|
1762
|
+
return x1 * x2
|
|
1763
|
+
|
|
1764
|
+
|
|
1765
|
+
_activation = {
|
|
1766
|
+
'softmin': Softmin,
|
|
1767
|
+
'softmax': Softmax,
|
|
1768
|
+
'softmax2d': Softmax2d,
|
|
1769
|
+
'logsoftmax': LogSoftmax,
|
|
1770
|
+
'logsoftmaxExt': LogSoftmaxExt,
|
|
1771
|
+
'relu': ReLU,
|
|
1772
|
+
'relu6': ReLU6,
|
|
1773
|
+
'rrelu': RReLU,
|
|
1774
|
+
'silu': SiLU,
|
|
1775
|
+
'tanh': Tanh,
|
|
1776
|
+
'tanhshrink': Tanhshrink,
|
|
1777
|
+
'hardtanh': Hardtanh,
|
|
1778
|
+
'gelu': GELU,
|
|
1779
|
+
'fast_gelu': FastGelu,
|
|
1780
|
+
'elu': ELU,
|
|
1781
|
+
'sigmoid': Sigmoid,
|
|
1782
|
+
'softsign': Softsign,
|
|
1783
|
+
'prelu': PReLU,
|
|
1784
|
+
'preluExt': PReLUExt,
|
|
1785
|
+
'leakyrelu': LeakyReLU,
|
|
1786
|
+
'hswish': HSwish,
|
|
1787
|
+
'hsigmoid': HSigmoid,
|
|
1788
|
+
'logsigmoid': LogSigmoid,
|
|
1789
|
+
'softshrink': SoftShrink,
|
|
1790
|
+
'hshrink': HShrink,
|
|
1791
|
+
'threshold': Threshold,
|
|
1792
|
+
'mish': Mish,
|
|
1793
|
+
}
|
|
1794
|
+
|
|
1795
|
+
|
|
1796
|
+
def get_activation(name, prim_name=None):
|
|
1797
|
+
"""
|
|
1798
|
+
Gets the activation function.
|
|
1799
|
+
|
|
1800
|
+
Args:
|
|
1801
|
+
name (str): The name of the activation function.
|
|
1802
|
+
prim_name (Union[str, None]): The name of primitive. Default: ``None`` .
|
|
1803
|
+
|
|
1804
|
+
Returns:
|
|
1805
|
+
Function, the activation function.
|
|
1806
|
+
|
|
1807
|
+
Supported Platforms:
|
|
1808
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1809
|
+
|
|
1810
|
+
Examples:
|
|
1811
|
+
>>> import mindspore.nn as nn
|
|
1812
|
+
>>> sigmoid = nn.get_activation('sigmoid')
|
|
1813
|
+
>>> print(sigmoid)
|
|
1814
|
+
Sigmoid<>
|
|
1815
|
+
"""
|
|
1816
|
+
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
1817
|
+
if name is None:
|
|
1818
|
+
return None
|
|
1819
|
+
|
|
1820
|
+
if name not in _activation:
|
|
1821
|
+
raise KeyError(f"{msg_prefix} 'name' must be in {list(_activation.keys())}, but got {name}.")
|
|
1822
|
+
return _activation[name]()
|