mindspore 2.4.10__cp311-cp311-win_amd64.whl → 2.6.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +13 -6
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -38
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +83 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +47 -198
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +229 -99
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +11 -5
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +138 -43
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +6 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -2
- mindspore/common/_stub_tensor.py +30 -14
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +4760 -0
- mindspore/common/api.py +480 -372
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +39 -36
- mindspore/common/dump.py +9 -6
- mindspore/common/file_system.py +9 -1
- mindspore/common/generator.py +5 -0
- mindspore/common/hook_handle.py +6 -2
- mindspore/common/initializer.py +13 -10
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +9 -3
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +135 -52
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +975 -1981
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +52 -2
- mindspore/communication/comm_func.py +240 -181
- mindspore/communication/management.py +95 -26
- mindspore/context.py +324 -573
- mindspore/dataset/__init__.py +65 -37
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +87 -6
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +6 -5
- mindspore/dataset/engine/datasets.py +292 -267
- mindspore/dataset/engine/datasets_audio.py +22 -8
- mindspore/dataset/engine/datasets_standard_format.py +46 -27
- mindspore/dataset/engine/datasets_text.py +78 -48
- mindspore/dataset/engine/datasets_user_defined.py +183 -117
- mindspore/dataset/engine/datasets_vision.py +120 -44
- mindspore/dataset/engine/iterators.py +283 -63
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +289 -43
- mindspore/dataset/engine/serializer_deserializer.py +3 -2
- mindspore/dataset/engine/validators.py +53 -11
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +31 -14
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +153 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +123 -0
- mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +170 -0
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +209 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +6 -6
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +7 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +73 -46
- mindspore/experimental/optim/radam.py +34 -31
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -53
- mindspore/hal/event.py +52 -52
- mindspore/hal/memory.py +179 -120
- mindspore/hal/stream.py +150 -109
- mindspore/include/api/context.h +0 -1
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +50 -0
- mindspore/mindrecord/__init__.py +21 -8
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +798 -761
- mindspore/mint/distributed/__init__.py +70 -4
- mindspore/mint/distributed/distributed.py +2679 -44
- mindspore/mint/linalg/__init__.py +8 -0
- mindspore/mint/nn/__init__.py +743 -22
- mindspore/mint/nn/functional.py +716 -23
- mindspore/mint/nn/layer/__init__.py +21 -4
- mindspore/mint/nn/layer/_functions.py +334 -0
- mindspore/mint/nn/layer/activation.py +276 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +933 -0
- mindspore/mint/nn/layer/normalization.py +223 -28
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +235 -0
- mindspore/mint/optim/__init__.py +3 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/mint/special/__init__.py +2 -1
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1373 -192
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +29 -27
- mindspore/nn/layer/basic.py +51 -35
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +53 -42
- mindspore/nn/layer/embedding.py +12 -11
- mindspore/nn/layer/normalization.py +56 -49
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +120 -42
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +59 -36
- mindspore/nn/learning_rate_schedule.py +8 -4
- mindspore/nn/loss/loss.py +58 -55
- mindspore/nn/optim/ada_grad.py +7 -5
- mindspore/nn/optim/adadelta.py +11 -9
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +19 -15
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +3 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +3 -3
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +11 -9
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +181 -122
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +6 -7
- mindspore/numpy/array_creations.py +63 -65
- mindspore/numpy/array_ops.py +149 -144
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +17 -18
- mindspore/numpy/utils_const.py +5 -6
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +5 -3
- mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
- mindspore/ops/_vmap/vmap_array_ops.py +52 -25
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
- mindspore/ops/_vmap/vmap_math_ops.py +15 -16
- mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +258 -46
- mindspore/ops/auto_generate/gen_extend_func.py +757 -185
- mindspore/ops/auto_generate/gen_ops_def.py +4197 -2243
- mindspore/ops/auto_generate/gen_ops_prim.py +16976 -6055
- mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +20 -25
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +40 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +2089 -2403
- mindspore/ops/function/clip_func.py +80 -23
- mindspore/ops/function/debug_func.py +57 -57
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +2 -2
- mindspore/ops/function/linalg_func.py +47 -78
- mindspore/ops/function/math_func.py +4351 -3813
- mindspore/ops/function/nn_func.py +1712 -637
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +18 -84
- mindspore/ops/function/random_func.py +452 -387
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +6 -6
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +26 -18
- mindspore/ops/functional.py +23 -7
- mindspore/ops/functional_overload.py +1548 -0
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +23 -15
- mindspore/ops/operations/_custom_ops_utils.py +235 -0
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -43
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +43 -84
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +81 -324
- mindspore/ops/operations/comm_ops.py +154 -108
- mindspore/ops/operations/custom_ops.py +298 -87
- mindspore/ops/operations/debug_ops.py +157 -59
- mindspore/ops/operations/inner_ops.py +7 -5
- mindspore/ops/operations/linalg_ops.py +1 -57
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +928 -180
- mindspore/ops/operations/math_ops.py +32 -234
- mindspore/ops/operations/nn_ops.py +212 -531
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +66 -53
- mindspore/ops/tensor_method.py +1895 -0
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
- mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
- mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
- mindspore/ops_generate/api/functions_cc_generator.py +237 -0
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/api/op_api_proto.py +235 -0
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/base_generator.py +11 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/common/gen_utils.py +348 -0
- mindspore/ops_generate/common/op_proto.py +473 -0
- mindspore/ops_generate/common/template.py +523 -0
- mindspore/ops_generate/gen_ops.py +22 -1069
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +296 -0
- mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +7 -3
- mindspore/parallel/_auto_parallel_context.py +159 -40
- mindspore/parallel/_cell_wrapper.py +132 -15
- mindspore/parallel/_parallel_serialization.py +107 -5
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +199 -23
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +700 -35
- mindspore/parallel/cluster/process_entity/_api.py +276 -50
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +21 -4
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +258 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +25 -14
- mindspore/parallel/shard.py +137 -59
- mindspore/parallel/transform_safetensors.py +364 -305
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +22 -5
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +109 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +186 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +221 -0
- mindspore/profiler/common/path_manager.py +395 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +500 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +251 -0
- mindspore/profiler/common/profiler_path_manager.py +179 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +341 -75
- mindspore/profiler/envprofiler.py +163 -0
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +242 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +335 -0
- mindspore/profiler/profiler.py +1073 -90
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +118 -0
- mindspore/profiler/schedule.py +243 -0
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +2 -3
- mindspore/run_check/_check_version.py +27 -20
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +177 -0
- mindspore/runtime/memory.py +416 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +96 -27
- mindspore/train/amp.py +9 -5
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +53 -55
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +103 -68
- mindspore/train/callback/_history.py +8 -5
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +52 -19
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +228 -108
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +15 -16
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +11 -10
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +174 -46
- mindspore/train/model.py +269 -136
- mindspore/train/serialization.py +622 -978
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +2 -3
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dryrun.py +140 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +552 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/METADATA +3 -3
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/RECORD +587 -418
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +1 -1
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_aclnn_implement.py +0 -263
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/gen_pyboost_func.py +0 -1052
- mindspore/ops_generate/gen_utils.py +0 -209
- mindspore/ops_generate/op_proto.py +0 -145
- mindspore/ops_generate/template.py +0 -261
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
mindspore/numpy/math_ops.py
CHANGED
|
@@ -23,9 +23,6 @@ import sys
|
|
|
23
23
|
from numpy import dtype as nptype
|
|
24
24
|
|
|
25
25
|
import mindspore.ops as ops
|
|
26
|
-
from mindspore.ops import operations as P
|
|
27
|
-
from mindspore.ops import functional as F
|
|
28
|
-
from mindspore.ops import composite as C
|
|
29
26
|
from mindspore.ops.primitive import constexpr, _primexpr
|
|
30
27
|
from mindspore.common import dtype as mstype
|
|
31
28
|
from mindspore.common import Tensor
|
|
@@ -54,20 +51,20 @@ from mindspore.ops.composite.multitype_ops._compile_utils import reduce_
|
|
|
54
51
|
ZERO_TENSOR = asarray_const(0)
|
|
55
52
|
|
|
56
53
|
|
|
57
|
-
_mean_keepdims =
|
|
58
|
-
_matmul =
|
|
59
|
-
_matmul_t =
|
|
60
|
-
_reduce_sum_default =
|
|
61
|
-
_reduce_sum_keepdims =
|
|
62
|
-
_reduce_min_default =
|
|
63
|
-
_reduce_min_keepdims =
|
|
64
|
-
_reduce_max_default =
|
|
65
|
-
_reduce_max_keepdims =
|
|
66
|
-
_cumsum_default =
|
|
67
|
-
_concat =
|
|
68
|
-
_cumprod_default =
|
|
69
|
-
_round =
|
|
70
|
-
_rint =
|
|
54
|
+
_mean_keepdims = ops.ReduceMean(True)
|
|
55
|
+
_matmul = ops.MatMul(False, False)
|
|
56
|
+
_matmul_t = ops.MatMul(False, True)
|
|
57
|
+
_reduce_sum_default = ops.ReduceSum()
|
|
58
|
+
_reduce_sum_keepdims = ops.ReduceSum(True)
|
|
59
|
+
_reduce_min_default = ops.ReduceMin()
|
|
60
|
+
_reduce_min_keepdims = ops.ReduceMin(True)
|
|
61
|
+
_reduce_max_default = ops.ReduceMax()
|
|
62
|
+
_reduce_max_keepdims = ops.ReduceMax(True)
|
|
63
|
+
_cumsum_default = ops.CumSum()
|
|
64
|
+
_concat = ops.Concat(-1)
|
|
65
|
+
_cumprod_default = ops.CumProd()
|
|
66
|
+
_round = ops.Round()
|
|
67
|
+
_rint = ops.Rint()
|
|
71
68
|
|
|
72
69
|
|
|
73
70
|
|
|
@@ -110,8 +107,8 @@ def absolute(x, dtype=None):
|
|
|
110
107
|
allowed_types = (mstype.int32, mstype.float16, mstype.float32, mstype.float64)
|
|
111
108
|
if original_dtype not in allowed_types and dtype is None:
|
|
112
109
|
x = x.astype(mstype.float32)
|
|
113
|
-
return _apply_tensor_op(
|
|
114
|
-
return _apply_tensor_op(
|
|
110
|
+
return _apply_tensor_op(ops.absolute, x, dtype=dtype).astype(original_dtype)
|
|
111
|
+
return _apply_tensor_op(ops.absolute, x, dtype=dtype)
|
|
115
112
|
|
|
116
113
|
|
|
117
114
|
def count_nonzero(x, axis=None, keepdims=False):
|
|
@@ -295,8 +292,8 @@ def add(x1, x2, dtype=None):
|
|
|
295
292
|
# broadcast is not fully supported in tensor_add on CPU,
|
|
296
293
|
# so we use tensor_sub as a substitute solution
|
|
297
294
|
if _get_device() == 'CPU':
|
|
298
|
-
return subtract(x1,
|
|
299
|
-
return _apply_tensor_op(
|
|
295
|
+
return subtract(x1, ops.neg(_to_tensor(x2)), dtype=dtype)
|
|
296
|
+
return _apply_tensor_op(ops.tensor_add, x1, x2, dtype=dtype)
|
|
300
297
|
|
|
301
298
|
|
|
302
299
|
def subtract(x1, x2, dtype=None):
|
|
@@ -330,7 +327,7 @@ def subtract(x1, x2, dtype=None):
|
|
|
330
327
|
[-2 -2]
|
|
331
328
|
[-2 -2]]
|
|
332
329
|
"""
|
|
333
|
-
return _apply_tensor_op(
|
|
330
|
+
return _apply_tensor_op(ops.tensor_sub, x1, x2, dtype=dtype)
|
|
334
331
|
|
|
335
332
|
|
|
336
333
|
def multiply(x1, x2, dtype=None):
|
|
@@ -368,10 +365,10 @@ def multiply(x1, x2, dtype=None):
|
|
|
368
365
|
_check_input_tensor(x1, x2)
|
|
369
366
|
# broadcast is not fully supported on CPU backend,
|
|
370
367
|
# and explicit broadcasting is performed
|
|
371
|
-
shape_out = _infer_out_shape(
|
|
368
|
+
shape_out = _infer_out_shape(ops.shape(x1), ops.shape(x2))
|
|
372
369
|
x1 = _broadcast_to_shape(x1, shape_out)
|
|
373
370
|
x2 = _broadcast_to_shape(x2, shape_out)
|
|
374
|
-
return _apply_tensor_op(
|
|
371
|
+
return _apply_tensor_op(ops.tensor_mul, x1, x2, dtype=dtype)
|
|
375
372
|
|
|
376
373
|
|
|
377
374
|
def divide(x1, x2, dtype=None):
|
|
@@ -408,10 +405,10 @@ def divide(x1, x2, dtype=None):
|
|
|
408
405
|
[0.33333334 0.5 ]]
|
|
409
406
|
"""
|
|
410
407
|
x1, x2 = _to_tensor(x1, x2)
|
|
411
|
-
if not _check_is_float(
|
|
412
|
-
x1 =
|
|
413
|
-
x2 =
|
|
414
|
-
return _apply_tensor_op(
|
|
408
|
+
if not _check_is_float(ops.dtype(x1)) and not _check_is_float(ops.dtype(x2)):
|
|
409
|
+
x1 = ops.cast(x1, mstype.float32)
|
|
410
|
+
x2 = ops.cast(x2, mstype.float32)
|
|
411
|
+
return _apply_tensor_op(ops.tensor_div, x1, x2, dtype=dtype)
|
|
415
412
|
|
|
416
413
|
|
|
417
414
|
def true_divide(x1, x2, dtype=None):
|
|
@@ -484,7 +481,7 @@ def power(x1, x2, dtype=None):
|
|
|
484
481
|
[ 1. 16.]
|
|
485
482
|
[ 1. 16.]]
|
|
486
483
|
"""
|
|
487
|
-
return _apply_tensor_op(
|
|
484
|
+
return _apply_tensor_op(ops.tensor_pow, x1, x2, dtype=dtype)
|
|
488
485
|
|
|
489
486
|
|
|
490
487
|
def float_power(x1, x2, dtype=None):
|
|
@@ -524,12 +521,12 @@ def float_power(x1, x2, dtype=None):
|
|
|
524
521
|
>>> print(output)
|
|
525
522
|
[ 0. 1. 8. 27. 64. 125.]
|
|
526
523
|
"""
|
|
527
|
-
if not _check_same_type(
|
|
528
|
-
x1 =
|
|
529
|
-
if not _check_same_type(
|
|
530
|
-
x2 =
|
|
524
|
+
if not _check_same_type(ops.dtype(x1), mstype.float32):
|
|
525
|
+
x1 = ops.cast(x1, mstype.float32)
|
|
526
|
+
if not _check_same_type(ops.dtype(x2), mstype.float32):
|
|
527
|
+
x2 = ops.cast(x2, mstype.float32)
|
|
531
528
|
|
|
532
|
-
return _apply_tensor_op(
|
|
529
|
+
return _apply_tensor_op(ops.tensor_pow, x1, x2, dtype=dtype)
|
|
533
530
|
|
|
534
531
|
|
|
535
532
|
def minimum(x1, x2, dtype=None):
|
|
@@ -581,12 +578,12 @@ def minimum(x1, x2, dtype=None):
|
|
|
581
578
|
# comparisons with 2 scalars
|
|
582
579
|
if x1.ndim == 0 and x2.ndim == 0:
|
|
583
580
|
x1 = expand_dims(x1, 0)
|
|
584
|
-
return _apply_tensor_op(functools.partial(_prop_nan,
|
|
581
|
+
return _apply_tensor_op(functools.partial(_prop_nan, ops.minimum), x1, x2, dtype=dtype).squeeze()
|
|
585
582
|
if x1.ndim == 0:
|
|
586
583
|
dtype = x2.dtype
|
|
587
584
|
elif x2.ndim == 0:
|
|
588
585
|
dtype = x1.dtype
|
|
589
|
-
return _apply_tensor_op(functools.partial(_prop_nan,
|
|
586
|
+
return _apply_tensor_op(functools.partial(_prop_nan, ops.minimum), x1, x2, dtype=dtype)
|
|
590
587
|
|
|
591
588
|
|
|
592
589
|
def mean(a, axis=None, keepdims=False, dtype=None):
|
|
@@ -632,7 +629,7 @@ def mean(a, axis=None, keepdims=False, dtype=None):
|
|
|
632
629
|
>>> print(output)
|
|
633
630
|
2.5
|
|
634
631
|
"""
|
|
635
|
-
return _reduce(a,
|
|
632
|
+
return _reduce(a, ops.ReduceMean(keepdims), axis=axis, keepdims=keepdims, dtype=dtype)
|
|
636
633
|
|
|
637
634
|
|
|
638
635
|
def inner(a, b):
|
|
@@ -681,17 +678,17 @@ def inner(a, b):
|
|
|
681
678
|
[[3. 3. 3. 3. 3. 3. 3.]
|
|
682
679
|
[3. 3. 3. 3. 3. 3. 3.]]]
|
|
683
680
|
"""
|
|
684
|
-
if
|
|
685
|
-
return
|
|
681
|
+
if ops.rank(a) == 0 or ops.rank(b) == 0:
|
|
682
|
+
return ops.tensor_mul(a, b)
|
|
686
683
|
|
|
687
|
-
_check_shape_aligned(
|
|
688
|
-
aligned_shape_a = (
|
|
689
|
-
aligned_shape_b = (
|
|
690
|
-
a_aligned =
|
|
691
|
-
b_aligned =
|
|
684
|
+
_check_shape_aligned(ops.shape(a), ops.shape(b))
|
|
685
|
+
aligned_shape_a = (ops.shape_mul(ops.shape(a)[:-1]), ops.shape(a)[-1])
|
|
686
|
+
aligned_shape_b = (ops.shape_mul(ops.shape(b)[:-1]), ops.shape(a)[-1])
|
|
687
|
+
a_aligned = ops.reshape(a, aligned_shape_a)
|
|
688
|
+
b_aligned = ops.reshape(b, aligned_shape_b)
|
|
692
689
|
|
|
693
690
|
res = _matmul_t(a_aligned, b_aligned)
|
|
694
|
-
res =
|
|
691
|
+
res = ops.reshape(res, ops.shape(a)[:-1] + ops.shape(b)[:-1])
|
|
695
692
|
return res
|
|
696
693
|
|
|
697
694
|
|
|
@@ -745,20 +742,20 @@ def dot(a, b):
|
|
|
745
742
|
if dim_a != dim_b:
|
|
746
743
|
raise ValueError('shapes are not aligned')
|
|
747
744
|
|
|
748
|
-
ndim_a, ndim_b =
|
|
745
|
+
ndim_a, ndim_b = ops.rank(a), ops.rank(b)
|
|
749
746
|
if ndim_a == 0 or ndim_b == 0:
|
|
750
|
-
return
|
|
747
|
+
return ops.tensor_mul(a, b)
|
|
751
748
|
if ndim_a > 0 and ndim_b >= 2:
|
|
752
|
-
perm =
|
|
749
|
+
perm = ops.make_range(ndim_b)
|
|
753
750
|
perm = perm[:-2] + (perm[-1],) + (perm[-2],)
|
|
754
|
-
b =
|
|
751
|
+
b = ops.transpose(b, perm)
|
|
755
752
|
|
|
756
|
-
_check(
|
|
757
|
-
a_aligned =
|
|
758
|
-
b_aligned =
|
|
753
|
+
_check(ops.shape(a)[-1], ops.shape(b)[-1])
|
|
754
|
+
a_aligned = ops.reshape(a, (-1, ops.shape(a)[-1]))
|
|
755
|
+
b_aligned = ops.reshape(b, (-1, ops.shape(b)[-1]))
|
|
759
756
|
|
|
760
757
|
res = _matmul_t(a_aligned, b_aligned)
|
|
761
|
-
res =
|
|
758
|
+
res = ops.reshape(res, ops.shape(a)[:-1] + ops.shape(b)[:-1])
|
|
762
759
|
return res
|
|
763
760
|
|
|
764
761
|
|
|
@@ -813,11 +810,11 @@ def outer(a, b):
|
|
|
813
810
|
[6. 6. 6. 6.]]
|
|
814
811
|
"""
|
|
815
812
|
_check_input_tensor(a, b)
|
|
816
|
-
if
|
|
813
|
+
if ops.rank(a) != 1:
|
|
817
814
|
a = ravel(a)
|
|
818
|
-
if
|
|
815
|
+
if ops.rank(b) != 1:
|
|
819
816
|
b = ravel(b)
|
|
820
|
-
a =
|
|
817
|
+
a = ops.reshape(a, (ops.shape(a)[0], 1))
|
|
821
818
|
b = _expand(b, 2)
|
|
822
819
|
return _matmul(a, b)
|
|
823
820
|
|
|
@@ -878,8 +875,8 @@ def tensordot(a, b, axes=2):
|
|
|
878
875
|
>>> print(output.shape)
|
|
879
876
|
(5, 2)
|
|
880
877
|
"""
|
|
881
|
-
if
|
|
882
|
-
return
|
|
878
|
+
if ops.rank(a)*ops.rank(b) == 0 and axes == 0:
|
|
879
|
+
return ops.tensor_mul(a, b)
|
|
883
880
|
return ops.tensor_dot(a, b, axes)
|
|
884
881
|
|
|
885
882
|
|
|
@@ -1041,7 +1038,7 @@ def average(x, axis=None, weights=None, returned=False):
|
|
|
1041
1038
|
_check_axis_type(axis, True, True, False)
|
|
1042
1039
|
axis = _canonicalize_axis(axis, x.ndim)
|
|
1043
1040
|
|
|
1044
|
-
x_avg = full((), nan,
|
|
1041
|
+
x_avg = full((), nan, ops.dtype(x))
|
|
1045
1042
|
sum_of_weights = None
|
|
1046
1043
|
|
|
1047
1044
|
if weights is None:
|
|
@@ -1051,7 +1048,7 @@ def average(x, axis=None, weights=None, returned=False):
|
|
|
1051
1048
|
_check_input_tensor(weights)
|
|
1052
1049
|
if x.shape == weights.shape:
|
|
1053
1050
|
x_avg, sum_of_weights = comput_avg(x, axis, weights)
|
|
1054
|
-
elif
|
|
1051
|
+
elif ops.rank(weights) == 1:
|
|
1055
1052
|
if not isinstance(axis, int):
|
|
1056
1053
|
_raise_type_error("Axis must be specified when shapes of x and weights differ.")
|
|
1057
1054
|
perm = _expanded_shape(x.ndim, weights.shape[0], axis)
|
|
@@ -1070,10 +1067,10 @@ def average(x, axis=None, weights=None, returned=False):
|
|
|
1070
1067
|
def compute_weights_for_mean(x, x_avg, axis):
|
|
1071
1068
|
"""Computes weights for np.average."""
|
|
1072
1069
|
if axis is None:
|
|
1073
|
-
sum_of_weights = full((), x.size,
|
|
1070
|
+
sum_of_weights = full((), x.size, ops.dtype(x))
|
|
1074
1071
|
else:
|
|
1075
1072
|
fill_value = 1
|
|
1076
|
-
if isinstance(axis, int) or (isinstance(axis, tuple) and
|
|
1073
|
+
if isinstance(axis, int) or (isinstance(axis, tuple) and ops.tuple_len(axis) == 1):
|
|
1077
1074
|
fill_value = x.shape[axis] if isinstance(axis, int) else x.shape[axis[0]]
|
|
1078
1075
|
elif axis is None:
|
|
1079
1076
|
for sh in x.shape:
|
|
@@ -1081,17 +1078,17 @@ def compute_weights_for_mean(x, x_avg, axis):
|
|
|
1081
1078
|
else:
|
|
1082
1079
|
for ax in axis:
|
|
1083
1080
|
fill_value *= x.shape[ax]
|
|
1084
|
-
sum_of_weights = full_like(x_avg, fill_value,
|
|
1081
|
+
sum_of_weights = full_like(x_avg, fill_value, ops.dtype(x))
|
|
1085
1082
|
return sum_of_weights
|
|
1086
1083
|
|
|
1087
1084
|
|
|
1088
1085
|
def comput_avg(x, axis, weights):
|
|
1089
1086
|
"""Computes average value of input x with given parameters."""
|
|
1090
1087
|
axis = () if axis is None else axis
|
|
1091
|
-
x_mul =
|
|
1088
|
+
x_mul = ops.tensor_mul(x, weights)
|
|
1092
1089
|
x_sum = _reduce_sum_default(x_mul, axis)
|
|
1093
1090
|
sum_of_weights = _reduce_sum_default(weights, axis)
|
|
1094
|
-
x_avg =
|
|
1091
|
+
x_avg = ops.tensor_div(x_sum, sum_of_weights)
|
|
1095
1092
|
return x_avg, sum_of_weights
|
|
1096
1093
|
|
|
1097
1094
|
|
|
@@ -1135,7 +1132,10 @@ def matmul(x1, x2, dtype=None):
|
|
|
1135
1132
|
[ 550. 620. 690. 760. 830.]
|
|
1136
1133
|
[ 670. 756. 842. 928. 1014.]]]
|
|
1137
1134
|
"""
|
|
1138
|
-
|
|
1135
|
+
res = ops.matmul(x1, x2)
|
|
1136
|
+
if dtype is not None:
|
|
1137
|
+
res = res.astype(dtype)
|
|
1138
|
+
return res
|
|
1139
1139
|
|
|
1140
1140
|
|
|
1141
1141
|
def square(x, dtype=None):
|
|
@@ -1166,7 +1166,7 @@ def square(x, dtype=None):
|
|
|
1166
1166
|
[[ 0. 1. 4.]
|
|
1167
1167
|
[ 9. 16. 25.]]
|
|
1168
1168
|
"""
|
|
1169
|
-
return _apply_tensor_op(
|
|
1169
|
+
return _apply_tensor_op(ops.square, x, dtype=dtype)
|
|
1170
1170
|
|
|
1171
1171
|
|
|
1172
1172
|
def sqrt(x, dtype=None):
|
|
@@ -1200,7 +1200,7 @@ def sqrt(x, dtype=None):
|
|
|
1200
1200
|
[[ 0. 1. 2.]
|
|
1201
1201
|
[ 3. 4. 5.]]
|
|
1202
1202
|
"""
|
|
1203
|
-
return _apply_tensor_op(
|
|
1203
|
+
return _apply_tensor_op(ops.sqrt, x, dtype=dtype)
|
|
1204
1204
|
|
|
1205
1205
|
|
|
1206
1206
|
def reciprocal(x, dtype=None):
|
|
@@ -1237,7 +1237,7 @@ def reciprocal(x, dtype=None):
|
|
|
1237
1237
|
[[1. 0.5 0.33333334]
|
|
1238
1238
|
[0.25 0.2 0.16666667]]
|
|
1239
1239
|
"""
|
|
1240
|
-
return _apply_tensor_op(lambda x:
|
|
1240
|
+
return _apply_tensor_op(lambda x: ops.tensor_div(1, x), x, dtype=dtype)
|
|
1241
1241
|
|
|
1242
1242
|
|
|
1243
1243
|
def log(x, dtype=None):
|
|
@@ -1272,15 +1272,15 @@ def log(x, dtype=None):
|
|
|
1272
1272
|
>>> print(output)
|
|
1273
1273
|
[0.69314575 1.09861 1.3862929 ]
|
|
1274
1274
|
"""
|
|
1275
|
-
return _apply_tensor_op(
|
|
1275
|
+
return _apply_tensor_op(ops.log, x, dtype=dtype)
|
|
1276
1276
|
|
|
1277
1277
|
|
|
1278
1278
|
def _prop_nan(fn, x1, x2):
|
|
1279
1279
|
"""Selects NaN if either element is NaN"""
|
|
1280
|
-
has_nan =
|
|
1281
|
-
nan_tensor =
|
|
1280
|
+
has_nan = ops.logical_or(_isnan(x1), _isnan(x2))
|
|
1281
|
+
nan_tensor = ops.fill(_promote(ops.dtype(x1), ops.dtype(x2)), ops.shape(has_nan), nan)
|
|
1282
1282
|
res = fn(x1, x2)
|
|
1283
|
-
return
|
|
1283
|
+
return ops.select(has_nan, nan_tensor, res)
|
|
1284
1284
|
|
|
1285
1285
|
|
|
1286
1286
|
def maximum(x1, x2, dtype=None):
|
|
@@ -1325,15 +1325,15 @@ def maximum(x1, x2, dtype=None):
|
|
|
1325
1325
|
elif not isinstance(x2, Tensor):
|
|
1326
1326
|
_raise_type_error("Input x2 is expected to be array_like")
|
|
1327
1327
|
|
|
1328
|
-
#
|
|
1328
|
+
# ops.maximum does not support when both operands are scalar
|
|
1329
1329
|
if x1.ndim == 0 and x2.ndim == 0:
|
|
1330
1330
|
x1 = expand_dims(x1, 0)
|
|
1331
|
-
return _apply_tensor_op(functools.partial(_prop_nan,
|
|
1331
|
+
return _apply_tensor_op(functools.partial(_prop_nan, ops.maximum), x1, x2, dtype=dtype).squeeze()
|
|
1332
1332
|
if x1.ndim == 0:
|
|
1333
1333
|
dtype = x2.dtype
|
|
1334
1334
|
elif x2.ndim == 0:
|
|
1335
1335
|
dtype = x1.dtype
|
|
1336
|
-
return _apply_tensor_op(functools.partial(_prop_nan,
|
|
1336
|
+
return _apply_tensor_op(functools.partial(_prop_nan, ops.maximum), x1, x2, dtype=dtype)
|
|
1337
1337
|
|
|
1338
1338
|
|
|
1339
1339
|
def heaviside(x1, x2, dtype=None):
|
|
@@ -1372,21 +1372,21 @@ def heaviside(x1, x2, dtype=None):
|
|
|
1372
1372
|
def _heaviside(x1, x2):
|
|
1373
1373
|
"""Computes heaviside without passing keyword arguments"""
|
|
1374
1374
|
# performs type promotion
|
|
1375
|
-
dtype1 =
|
|
1376
|
-
dtype2 =
|
|
1375
|
+
dtype1 = ops.dtype(x1)
|
|
1376
|
+
dtype2 = ops.dtype(x2)
|
|
1377
1377
|
dtype_out = _promote(dtype1, dtype2)
|
|
1378
1378
|
if not _check_same_type(dtype1, dtype_out):
|
|
1379
|
-
x1 =
|
|
1379
|
+
x1 = ops.cast(x1, dtype_out)
|
|
1380
1380
|
if not _check_same_type(dtype2, dtype_out):
|
|
1381
|
-
x2 =
|
|
1381
|
+
x2 = ops.cast(x2, dtype_out)
|
|
1382
1382
|
|
|
1383
1383
|
# performs broadcast
|
|
1384
|
-
shape_out = _infer_out_shape(
|
|
1384
|
+
shape_out = _infer_out_shape(ops.shape(x1), ops.shape(x2))
|
|
1385
1385
|
x1 = _broadcast_to_shape(x1, shape_out)
|
|
1386
1386
|
x2 = _broadcast_to_shape(x2, shape_out)
|
|
1387
1387
|
|
|
1388
|
-
x2 =
|
|
1389
|
-
x2 =
|
|
1388
|
+
x2 = ops.select(x1 < 0, zeros(shape_out, dtype_out), x2)
|
|
1389
|
+
x2 = ops.select(x1 > 0, ones(shape_out, dtype_out), x2)
|
|
1390
1390
|
return x2
|
|
1391
1391
|
|
|
1392
1392
|
return _apply_tensor_op(_heaviside, x1, x2, dtype=dtype)
|
|
@@ -1444,7 +1444,7 @@ def amax(a, axis=None, keepdims=False, initial=None, where=True):
|
|
|
1444
1444
|
>>> print(output)
|
|
1445
1445
|
[-1. 3.]
|
|
1446
1446
|
"""
|
|
1447
|
-
return reduce_(a,
|
|
1447
|
+
return reduce_(a, ops.ReduceMax(keepdims), cmp_fn=ops.maximum, axis=axis, keepdims=keepdims,
|
|
1448
1448
|
initial=initial, where=where)
|
|
1449
1449
|
|
|
1450
1450
|
|
|
@@ -1500,7 +1500,7 @@ def amin(a, axis=None, keepdims=False, initial=None, where=True):
|
|
|
1500
1500
|
>>> print(output)
|
|
1501
1501
|
[10. 1.]
|
|
1502
1502
|
"""
|
|
1503
|
-
return reduce_(a,
|
|
1503
|
+
return reduce_(a, ops.ReduceMin(keepdims), cmp_fn=ops.minimum, axis=axis, keepdims=keepdims,
|
|
1504
1504
|
initial=initial, where=where)
|
|
1505
1505
|
|
|
1506
1506
|
|
|
@@ -1552,8 +1552,8 @@ def hypot(x1, x2, dtype=None):
|
|
|
1552
1552
|
if _get_device() == 'CPU':
|
|
1553
1553
|
# broadcast is not fully supported in tensor_add on CPU,
|
|
1554
1554
|
# so we use tensor_sub as a substitute solution
|
|
1555
|
-
return
|
|
1556
|
-
return
|
|
1555
|
+
return ops.sqrt(ops.tensor_sub(ops.square(x1), ops.neg(ops.square(x2))))
|
|
1556
|
+
return ops.sqrt(ops.tensor_add(ops.square(x1), ops.square(x2)))
|
|
1557
1557
|
|
|
1558
1558
|
return _apply_tensor_op(_hypot, x1, x2, dtype=dtype)
|
|
1559
1559
|
|
|
@@ -1588,7 +1588,7 @@ def floor(x, dtype=None):
|
|
|
1588
1588
|
>>> print(output)
|
|
1589
1589
|
[-2. -2. -1. 0. 1. 1. 2.]
|
|
1590
1590
|
"""
|
|
1591
|
-
return _apply_tensor_op(
|
|
1591
|
+
return _apply_tensor_op(ops.floor, x, dtype=dtype)
|
|
1592
1592
|
|
|
1593
1593
|
|
|
1594
1594
|
def floor_divide(x1, x2, dtype=None):
|
|
@@ -1619,30 +1619,30 @@ def floor_divide(x1, x2, dtype=None):
|
|
|
1619
1619
|
>>> print(output)
|
|
1620
1620
|
[0. 0. 1. 1.]
|
|
1621
1621
|
"""
|
|
1622
|
-
return _apply_tensor_op(
|
|
1622
|
+
return _apply_tensor_op(ops.tensor_floordiv, x1, x2, dtype=dtype)
|
|
1623
1623
|
|
|
1624
1624
|
|
|
1625
1625
|
def _remainder(x1, x2, c_style=False):
|
|
1626
1626
|
"""Computes remainder without applying keyword arguments."""
|
|
1627
|
-
dtype = _promote(
|
|
1627
|
+
dtype = _promote(ops.dtype(x1), ops.dtype(x2))
|
|
1628
1628
|
if not _check_is_float(dtype):
|
|
1629
|
-
x1 =
|
|
1630
|
-
x2 =
|
|
1629
|
+
x1 = ops.cast(x1, mstype.float32)
|
|
1630
|
+
x2 = ops.cast(x2, mstype.float32)
|
|
1631
1631
|
|
|
1632
|
-
quotient =
|
|
1632
|
+
quotient = ops.tensor_div(x1, x2)
|
|
1633
1633
|
if c_style:
|
|
1634
1634
|
quotient = fix(quotient)
|
|
1635
1635
|
else:
|
|
1636
|
-
quotient =
|
|
1637
|
-
prod =
|
|
1638
|
-
res =
|
|
1636
|
+
quotient = ops.floor(quotient)
|
|
1637
|
+
prod = ops.tensor_mul(x2, quotient)
|
|
1638
|
+
res = ops.tensor_sub(x1, prod)
|
|
1639
1639
|
if _check_is_int(dtype):
|
|
1640
|
-
zeros_tensor = zeros(
|
|
1641
|
-
x2_zeros =
|
|
1642
|
-
res =
|
|
1640
|
+
zeros_tensor = zeros(ops.shape(quotient), ops.dtype(quotient))
|
|
1641
|
+
x2_zeros = ops.equal(x2, zeros_tensor)
|
|
1642
|
+
res = ops.select(x2_zeros, zeros_tensor, res)
|
|
1643
1643
|
|
|
1644
|
-
if not _check_same_type(
|
|
1645
|
-
res =
|
|
1644
|
+
if not _check_same_type(ops.dtype(res), dtype):
|
|
1645
|
+
res = ops.cast(res, dtype)
|
|
1646
1646
|
return res
|
|
1647
1647
|
|
|
1648
1648
|
|
|
@@ -1712,13 +1712,13 @@ def fix(x):
|
|
|
1712
1712
|
[ 2. 2. -2. -2.]
|
|
1713
1713
|
"""
|
|
1714
1714
|
_check_input_tensor(x)
|
|
1715
|
-
if not _check_is_float(
|
|
1716
|
-
x =
|
|
1717
|
-
floored =
|
|
1718
|
-
# change to
|
|
1719
|
-
ceiled =
|
|
1720
|
-
is_neg =
|
|
1721
|
-
return
|
|
1715
|
+
if not _check_is_float(ops.dtype(x)):
|
|
1716
|
+
x = ops.cast(x, mstype.float32)
|
|
1717
|
+
floored = ops.floor(x)
|
|
1718
|
+
# change to ops.ceil once supported on CPU.
|
|
1719
|
+
ceiled = ops.neg(ops.floor(ops.neg(x)))
|
|
1720
|
+
is_neg = ops.tensor_lt(x, zeros(ops.shape(x), ops.dtype(x)))
|
|
1721
|
+
return ops.select(is_neg, ceiled, floored)
|
|
1722
1722
|
|
|
1723
1723
|
|
|
1724
1724
|
def fmod(x1, x2, dtype=None):
|
|
@@ -1818,7 +1818,7 @@ def exp(x, dtype=None):
|
|
|
1818
1818
|
>>> print(output)
|
|
1819
1819
|
[ 1. 2.718282 7.3890557 20.085537 54.598145 ]
|
|
1820
1820
|
"""
|
|
1821
|
-
return _apply_tensor_op(
|
|
1821
|
+
return _apply_tensor_op(ops.tensor_exp, x, dtype=dtype)
|
|
1822
1822
|
|
|
1823
1823
|
|
|
1824
1824
|
def expm1(x, dtype=None):
|
|
@@ -1849,7 +1849,7 @@ def expm1(x, dtype=None):
|
|
|
1849
1849
|
>>> print(output)
|
|
1850
1850
|
[ 0. 1.7182819 6.389056 19.085537 53.59815 ]
|
|
1851
1851
|
"""
|
|
1852
|
-
return _apply_tensor_op(
|
|
1852
|
+
return _apply_tensor_op(ops.tensor_expm1, x, dtype=dtype)
|
|
1853
1853
|
|
|
1854
1854
|
|
|
1855
1855
|
def divmod_(x1, x2, dtype=None):
|
|
@@ -1881,7 +1881,7 @@ def divmod_(x1, x2, dtype=None):
|
|
|
1881
1881
|
Tensor(shape=[5], dtype=Float32,
|
|
1882
1882
|
value= [ 1.00000000e+00, 5.00000000e-01, 0.00000000e+00, 1.00000000e+00, 5.00000000e-01]))
|
|
1883
1883
|
"""
|
|
1884
|
-
q =
|
|
1884
|
+
q = ops.tensor_floordiv(x1, x2)
|
|
1885
1885
|
r = remainder(x1, x2)
|
|
1886
1886
|
if dtype is not None:
|
|
1887
1887
|
q = q.astype(dtype)
|
|
@@ -1971,16 +1971,16 @@ def diff(a, n=1, axis=-1, prepend=None, append=None):
|
|
|
1971
1971
|
_raise_value_error("n is bigger then the specified dimension, this will result in an empty tensor.")
|
|
1972
1972
|
|
|
1973
1973
|
original_dtype = a.dtype
|
|
1974
|
-
# will change once
|
|
1974
|
+
# will change once ops.tensor_slice supports types other than float32
|
|
1975
1975
|
if not _check_is_float(original_dtype):
|
|
1976
1976
|
a = a.astype(mstype.float32)
|
|
1977
1977
|
a = moveaxis(a, new_axis, -1)
|
|
1978
|
-
for _ in
|
|
1979
|
-
slice_start = _list_comprehensions(
|
|
1980
|
-
slice_size =
|
|
1981
|
-
minuend =
|
|
1982
|
-
subtrahend =
|
|
1983
|
-
a =
|
|
1978
|
+
for _ in ops.make_range(n):
|
|
1979
|
+
slice_start = _list_comprehensions(ops.rank(a) - 1, 0, True)
|
|
1980
|
+
slice_size = ops.shape(a)[:-1] + (ops.shape(a)[-1] - 1,)
|
|
1981
|
+
minuend = ops.tensor_slice(a, slice_start + (1,), slice_size)
|
|
1982
|
+
subtrahend = ops.tensor_slice(a, slice_start + (0,), slice_size)
|
|
1983
|
+
a = ops.tensor_sub(minuend, subtrahend)
|
|
1984
1984
|
if not _check_is_float(original_dtype):
|
|
1985
1985
|
a = a.astype(original_dtype)
|
|
1986
1986
|
return moveaxis(a, -1, new_axis)
|
|
@@ -2033,7 +2033,7 @@ def ediff1d(ary, to_end=None, to_begin=None):
|
|
|
2033
2033
|
to_end = to_end.astype(ary.dtype)
|
|
2034
2034
|
combined += (to_end,)
|
|
2035
2035
|
|
|
2036
|
-
return
|
|
2036
|
+
return ops.Concat(0)(combined)
|
|
2037
2037
|
|
|
2038
2038
|
|
|
2039
2039
|
def trapz(y, x=None, dx=1.0, axis=-1):
|
|
@@ -2070,12 +2070,12 @@ def trapz(y, x=None, dx=1.0, axis=-1):
|
|
|
2070
2070
|
[ 4.5 7.5 10.5]
|
|
2071
2071
|
"""
|
|
2072
2072
|
y = _to_tensor(y)
|
|
2073
|
-
ndim =
|
|
2073
|
+
ndim = ops.rank(y)
|
|
2074
2074
|
_check_axis_in_range(axis, ndim)
|
|
2075
2075
|
axis = axis + ndim if axis < 0 else axis
|
|
2076
2076
|
y_start_axis_left = _list_comprehensions(axis, 0, True)
|
|
2077
2077
|
y_start_axis_right = _list_comprehensions(ndim - axis - 1, 0, True)
|
|
2078
|
-
shape =
|
|
2078
|
+
shape = ops.shape(y)
|
|
2079
2079
|
y_slice_size = _tuple_setitem(shape, axis, shape[axis] - 1)
|
|
2080
2080
|
if x is not None:
|
|
2081
2081
|
x = _to_tensor(x)
|
|
@@ -2084,42 +2084,42 @@ def trapz(y, x=None, dx=1.0, axis=-1):
|
|
|
2084
2084
|
dx = _to_tensor(dx)
|
|
2085
2085
|
dx = _expand(dx, ndim - axis, axis=-1)
|
|
2086
2086
|
dx = _broadcast_to_shape(dx, y_slice_size)
|
|
2087
|
-
if not _check_is_float(
|
|
2087
|
+
if not _check_is_float(ops.dtype(y)):
|
|
2088
2088
|
# trapz returns float
|
|
2089
|
-
y =
|
|
2090
|
-
dx =
|
|
2089
|
+
y = ops.cast(y, mstype.float32)
|
|
2090
|
+
dx = ops.cast(dx, ops.dtype(y))
|
|
2091
2091
|
|
|
2092
2092
|
# product of dx and y with the last column removed
|
|
2093
|
-
y_slice_left =
|
|
2094
|
-
prod_left =
|
|
2093
|
+
y_slice_left = ops.tensor_slice(y, y_start_axis_left + (0,) + y_start_axis_right, y_slice_size)
|
|
2094
|
+
prod_left = ops.tensor_mul(y_slice_left, dx)
|
|
2095
2095
|
# product of dx and y with the first column removed
|
|
2096
|
-
y_slice_right =
|
|
2097
|
-
prod_right =
|
|
2098
|
-
prod_sum =
|
|
2099
|
-
return
|
|
2096
|
+
y_slice_right = ops.tensor_slice(y, y_start_axis_left + (1,) + y_start_axis_right, y_slice_size)
|
|
2097
|
+
prod_right = ops.tensor_mul(y_slice_right, dx)
|
|
2098
|
+
prod_sum = ops.tensor_div(ops.tensor_add(prod_left, prod_right), _to_tensor(2.0).astype(ops.dtype(y)))
|
|
2099
|
+
return ops.reduce_sum(prod_sum, axis)
|
|
2100
2100
|
|
|
2101
2101
|
|
|
2102
2102
|
def _gcd(x1, x2):
|
|
2103
2103
|
"""Calculates gcd without applying keyword arguments."""
|
|
2104
|
-
dtype = _promote(
|
|
2104
|
+
dtype = _promote(ops.dtype(x1), ops.dtype(x2))
|
|
2105
2105
|
if not _check_is_float(dtype):
|
|
2106
|
-
#
|
|
2107
|
-
x1 =
|
|
2108
|
-
x2 =
|
|
2109
|
-
x1 =
|
|
2110
|
-
x2 =
|
|
2111
|
-
cond_ge =
|
|
2106
|
+
# ops.reduce_sum only supports float
|
|
2107
|
+
x1 = ops.cast(x1, mstype.float32)
|
|
2108
|
+
x2 = ops.cast(x2, mstype.float32)
|
|
2109
|
+
x1 = ops.absolute(x1)
|
|
2110
|
+
x2 = ops.absolute(x2)
|
|
2111
|
+
cond_ge = ops.tensor_ge(x1, x2)
|
|
2112
2112
|
a = where_(cond_ge, x1, x2)
|
|
2113
2113
|
b = where_(cond_ge, x2, x1)
|
|
2114
|
-
b = where_(
|
|
2114
|
+
b = where_(ops.equal(b, ZERO_TENSOR), a, b)
|
|
2115
2115
|
r = _remainder(a, b)
|
|
2116
|
-
while
|
|
2116
|
+
while ops.tensor_gt(ops.reduce_sum(r), ZERO_TENSOR):
|
|
2117
2117
|
r = _remainder(a, b)
|
|
2118
|
-
has_terminated =
|
|
2118
|
+
has_terminated = ops.equal(r, ZERO_TENSOR)
|
|
2119
2119
|
a = where_(has_terminated, a, b)
|
|
2120
2120
|
b = where_(has_terminated, b, r)
|
|
2121
|
-
if not _check_same_type(
|
|
2122
|
-
b =
|
|
2121
|
+
if not _check_same_type(ops.dtype(b), dtype):
|
|
2122
|
+
b = ops.cast(b, dtype)
|
|
2123
2123
|
return b
|
|
2124
2124
|
|
|
2125
2125
|
|
|
@@ -2183,15 +2183,15 @@ def lcm(x1, x2, dtype=None):
|
|
|
2183
2183
|
def _lcm(x1, x2):
|
|
2184
2184
|
"""Calculates lcm without applying keyword arguments"""
|
|
2185
2185
|
common_divisor = _gcd(x1, x2)
|
|
2186
|
-
dtype = _promote(
|
|
2186
|
+
dtype = _promote(ops.dtype(x1), ops.dtype(x2))
|
|
2187
2187
|
x1 = x1.astype(mstype.float32)
|
|
2188
2188
|
x2 = x2.astype(mstype.float32)
|
|
2189
|
-
q1 =
|
|
2190
|
-
q2 =
|
|
2191
|
-
res =
|
|
2192
|
-
has_zero =
|
|
2189
|
+
q1 = ops.tensor_div(x1, common_divisor)
|
|
2190
|
+
q2 = ops.tensor_div(x2, common_divisor)
|
|
2191
|
+
res = ops.tensor_mul(ops.tensor_mul(q1, q2), common_divisor)
|
|
2192
|
+
has_zero = ops.equal(multiply(x1, x2), ZERO_TENSOR)
|
|
2193
2193
|
res = where_(has_zero, ZERO_TENSOR, res)
|
|
2194
|
-
return
|
|
2194
|
+
return ops.absolute(res).astype(dtype)
|
|
2195
2195
|
|
|
2196
2196
|
return _apply_tensor_op(_lcm, x1, x2, dtype=dtype)
|
|
2197
2197
|
|
|
@@ -2237,8 +2237,8 @@ def convolve(a, v, mode='full'):
|
|
|
2237
2237
|
a = asarray_const(a)
|
|
2238
2238
|
if not isinstance(v, Tensor):
|
|
2239
2239
|
v = asarray_const(v)
|
|
2240
|
-
a_size =
|
|
2241
|
-
v_size =
|
|
2240
|
+
a_size = ops.shape_mul(a.shape)
|
|
2241
|
+
v_size = ops.shape_mul(v.shape)
|
|
2242
2242
|
if a_size == 0 or v_size == 0:
|
|
2243
2243
|
_raise_value_error("Inputs cannot be empty.")
|
|
2244
2244
|
a = _expand(a, 1)
|
|
@@ -2297,7 +2297,7 @@ def _handle_facts(w, m, ddof, aweights):
|
|
|
2297
2297
|
elif aweights is None:
|
|
2298
2298
|
fact = w_sum - ddof
|
|
2299
2299
|
else:
|
|
2300
|
-
fact = w_sum - ddof *
|
|
2300
|
+
fact = w_sum - ddof * ops.reduce_sum(w * aweights) / w_sum
|
|
2301
2301
|
return fact
|
|
2302
2302
|
|
|
2303
2303
|
|
|
@@ -2393,7 +2393,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=N
|
|
|
2393
2393
|
# Determine the Normalization
|
|
2394
2394
|
fact = _handle_facts(w, m, ddof, aweights)
|
|
2395
2395
|
|
|
2396
|
-
m = m -
|
|
2396
|
+
m = m - ops.expand_dims(avg, -1)
|
|
2397
2397
|
if w is None:
|
|
2398
2398
|
m_t = m.T
|
|
2399
2399
|
else:
|
|
@@ -2408,7 +2408,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=N
|
|
|
2408
2408
|
def _real_axes(ndim_orig, ndim_out, axes_orig):
|
|
2409
2409
|
"""Returns the real axes to be reduced after performing broadcast"""
|
|
2410
2410
|
_diff = ndim_out - ndim_orig
|
|
2411
|
-
axes =
|
|
2411
|
+
axes = ops.make_range(_diff)
|
|
2412
2412
|
axes_orig = map(functools.partial(operator.add, _diff), axes_orig)
|
|
2413
2413
|
return axes + tuple(axes_orig)
|
|
2414
2414
|
|
|
@@ -2419,7 +2419,7 @@ def _shape_reduced_keepdims(shape, axes):
|
|
|
2419
2419
|
Reduces dimensions corresponding to argument axes while
|
|
2420
2420
|
keeping the number of dimensions unchanged.
|
|
2421
2421
|
"""
|
|
2422
|
-
ndim_out =
|
|
2422
|
+
ndim_out = ops.tuple_len(shape)
|
|
2423
2423
|
shape_out = [1] * ndim_out
|
|
2424
2424
|
for i in range(ndim_out):
|
|
2425
2425
|
if i not in axes:
|
|
@@ -2430,8 +2430,8 @@ def _shape_reduced_keepdims(shape, axes):
|
|
|
2430
2430
|
@_primexpr
|
|
2431
2431
|
def _shape_reduced(shape, axes):
|
|
2432
2432
|
"""Removes dimensions corresponding to argument axes"""
|
|
2433
|
-
ndim_orig =
|
|
2434
|
-
ndim_out = ndim_orig -
|
|
2433
|
+
ndim_orig = ops.tuple_len(shape)
|
|
2434
|
+
ndim_out = ndim_orig - ops.tuple_len(axes)
|
|
2435
2435
|
shape_out = [0] * ndim_out
|
|
2436
2436
|
idx_out = 0
|
|
2437
2437
|
for i in range(ndim_orig):
|
|
@@ -2448,13 +2448,13 @@ def _reduce(a, reduce_fn, cmp_fn=None, axis=None, keepdims=False, initial=None,
|
|
|
2448
2448
|
"""
|
|
2449
2449
|
a = _to_tensor(a)
|
|
2450
2450
|
|
|
2451
|
-
shape =
|
|
2452
|
-
ndim =
|
|
2451
|
+
shape = ops.shape(a)
|
|
2452
|
+
ndim = ops.rank(a)
|
|
2453
2453
|
if dtype is None:
|
|
2454
|
-
dtype =
|
|
2454
|
+
dtype = ops.dtype(a)
|
|
2455
2455
|
axes = _check_axis_valid(axis, ndim)
|
|
2456
2456
|
if initial is not None:
|
|
2457
|
-
if ((isinstance(initial, Tensor) and
|
|
2457
|
+
if ((isinstance(initial, Tensor) and ops.rank(initial) > 0) or
|
|
2458
2458
|
not isinstance(initial, (int, float, bool, Tensor))):
|
|
2459
2459
|
_raise_type_error('initial should be scalar')
|
|
2460
2460
|
|
|
@@ -2481,9 +2481,9 @@ def _reduce(a, reduce_fn, cmp_fn=None, axis=None, keepdims=False, initial=None,
|
|
|
2481
2481
|
if isinstance(where, Tensor):
|
|
2482
2482
|
if initial is None:
|
|
2483
2483
|
_raise_value_error('initial value must be provided for where masks')
|
|
2484
|
-
ndim_orig =
|
|
2484
|
+
ndim_orig = ops.rank(a)
|
|
2485
2485
|
a = where_(where, a, initial)
|
|
2486
|
-
axes = _real_axes(ndim_orig,
|
|
2486
|
+
axes = _real_axes(ndim_orig, ops.rank(a), axes)
|
|
2487
2487
|
|
|
2488
2488
|
return reduce_fn(a, axes).astype(dtype)
|
|
2489
2489
|
|
|
@@ -2531,7 +2531,7 @@ def nanmax(a, axis=None, dtype=None, keepdims=False):
|
|
|
2531
2531
|
if not isinstance(keepdims, int):
|
|
2532
2532
|
_raise_type_error("integer argument expected, got", keepdims)
|
|
2533
2533
|
nan_mask = _isnan(a)
|
|
2534
|
-
a =
|
|
2534
|
+
a = ops.select(nan_mask, ops.FillV2()(ops.shape(a), Tensor(-sys.maxsize - 1, ops.dtype(a))), a)
|
|
2535
2535
|
reduce_fn = _reduce_max_keepdims if keepdims else _reduce_max_default
|
|
2536
2536
|
return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
2537
2537
|
|
|
@@ -2581,14 +2581,14 @@ def nanmin(a, axis=None, dtype=None, keepdims=False):
|
|
|
2581
2581
|
if not isinstance(keepdims, int):
|
|
2582
2582
|
_raise_type_error("integer argument expected, got", keepdims)
|
|
2583
2583
|
nan_mask = _isnan(a)
|
|
2584
|
-
a =
|
|
2584
|
+
a = ops.select(nan_mask, ops.FillV2()(ops.shape(a), Tensor(sys.maxsize, ops.dtype(a))), a)
|
|
2585
2585
|
reduce_fn = _reduce_min_keepdims if keepdims else _reduce_min_default
|
|
2586
2586
|
return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
2587
2587
|
|
|
2588
2588
|
|
|
2589
2589
|
def _reduce_nansum(x, axis, keepdims=False):
|
|
2590
2590
|
"""Computes reduce sum treating NaNs as zeros."""
|
|
2591
|
-
x =
|
|
2591
|
+
x = ops.select(_isnan(x), zeros(ops.shape(x), ops.dtype(x)), x)
|
|
2592
2592
|
if keepdims:
|
|
2593
2593
|
return _reduce_sum_keepdims(x, axis)
|
|
2594
2594
|
return _reduce_sum_default(x, axis)
|
|
@@ -2634,14 +2634,14 @@ def nansum(a, axis=None, dtype=None, keepdims=False):
|
|
|
2634
2634
|
"""
|
|
2635
2635
|
a = _to_tensor(a)
|
|
2636
2636
|
nan_mask = _isnan(a)
|
|
2637
|
-
a =
|
|
2637
|
+
a = ops.select(nan_mask, zeros(ops.shape(a), ops.dtype(a)), a)
|
|
2638
2638
|
return _reduce(a, functools.partial(_reduce_nansum, keepdims=keepdims), axis=axis,
|
|
2639
2639
|
keepdims=keepdims, dtype=dtype)
|
|
2640
2640
|
|
|
2641
2641
|
|
|
2642
2642
|
def _count_nonnan(a, axis, keepdims=False):
|
|
2643
2643
|
"""Counts the number of elements excluding NaNs."""
|
|
2644
|
-
nonnan_mask =
|
|
2644
|
+
nonnan_mask = ops.select(_isnan(a), zeros(ops.shape(a), ops.dtype(a)), ones(ops.shape(a), ops.dtype(a)))
|
|
2645
2645
|
if keepdims:
|
|
2646
2646
|
return _reduce_sum_keepdims(nonnan_mask, axis)
|
|
2647
2647
|
return _reduce_sum_default(nonnan_mask, axis)
|
|
@@ -2695,18 +2695,18 @@ def nanmean(a, axis=None, dtype=None, keepdims=False):
|
|
|
2695
2695
|
if dtype is None:
|
|
2696
2696
|
dtype = mstype.float32
|
|
2697
2697
|
a = _to_tensor(a)
|
|
2698
|
-
axis = _check_axis_valid(axis,
|
|
2698
|
+
axis = _check_axis_valid(axis, ops.rank(a))
|
|
2699
2699
|
sum_a = nansum(a, axis=axis, dtype=dtype, keepdims=keepdims)
|
|
2700
|
-
return
|
|
2700
|
+
return ops.tensor_div(sum_a, _count_nonnan(a, axis, keepdims))
|
|
2701
2701
|
|
|
2702
2702
|
|
|
2703
2703
|
def _nanvar(a, axis, ddof=0, keepdims=False):
|
|
2704
2704
|
"""Computes nanvar without applying keyword arguments."""
|
|
2705
2705
|
mean_a = nanmean(a, axis=axis, keepdims=True)
|
|
2706
|
-
pow_a =
|
|
2706
|
+
pow_a = ops.tensor_pow(ops.tensor_sub(a, mean_a), 2)
|
|
2707
2707
|
sum_a = _reduce_nansum(pow_a, axis, keepdims)
|
|
2708
2708
|
count = _count_nonnan(a, axis, keepdims)
|
|
2709
|
-
return divide(sum_a,
|
|
2709
|
+
return divide(sum_a, ops.tensor_sub(count, ddof))
|
|
2710
2710
|
|
|
2711
2711
|
|
|
2712
2712
|
def nanvar(a, axis=None, dtype=None, ddof=0, keepdims=False):
|
|
@@ -2814,7 +2814,7 @@ def nanstd(a, axis=None, dtype=None, ddof=0, keepdims=False):
|
|
|
2814
2814
|
"""
|
|
2815
2815
|
if dtype is None:
|
|
2816
2816
|
dtype = mstype.float32
|
|
2817
|
-
return _reduce(a, lambda a, axis:
|
|
2817
|
+
return _reduce(a, lambda a, axis: ops.sqrt(_nanvar(a, axis, ddof=ddof, keepdims=keepdims)),
|
|
2818
2818
|
axis=axis, keepdims=keepdims, dtype=dtype)
|
|
2819
2819
|
|
|
2820
2820
|
|
|
@@ -2845,7 +2845,7 @@ def exp2(x, dtype=None):
|
|
|
2845
2845
|
>>> print(output)
|
|
2846
2846
|
[4. 8.]
|
|
2847
2847
|
"""
|
|
2848
|
-
return _apply_tensor_op(lambda x:
|
|
2848
|
+
return _apply_tensor_op(lambda x: ops.tensor_pow(2, x), x, dtype=dtype)
|
|
2849
2849
|
|
|
2850
2850
|
|
|
2851
2851
|
def kron(a, b):
|
|
@@ -2884,23 +2884,23 @@ def kron(a, b):
|
|
|
2884
2884
|
[0. 0. 1. 1.]]
|
|
2885
2885
|
"""
|
|
2886
2886
|
a, b = _to_tensor(a, b)
|
|
2887
|
-
ndim = _max(
|
|
2887
|
+
ndim = _max(ops.rank(a), ops.rank(b))
|
|
2888
2888
|
if ndim == 0:
|
|
2889
|
-
return
|
|
2889
|
+
return ops.tensor_mul(a, b)
|
|
2890
2890
|
a = _expand(a, ndim)
|
|
2891
2891
|
b = _expand(b, ndim)
|
|
2892
|
-
shape_a =
|
|
2893
|
-
shape_b =
|
|
2892
|
+
shape_a = ops.shape(a)
|
|
2893
|
+
shape_b = ops.shape(b)
|
|
2894
2894
|
|
|
2895
2895
|
# scales a by the shape of b
|
|
2896
2896
|
kron_shape = _seq_prod(shape_a, shape_b)
|
|
2897
|
-
a =
|
|
2898
|
-
a =
|
|
2899
|
-
a = moveaxis(a,
|
|
2900
|
-
a =
|
|
2897
|
+
a = ops.reshape(a, _add_unit_axes(shape_a, 2 * ndim, True))
|
|
2898
|
+
a = ops.tile(a, _add_unit_axes(shape_b, 2 * ndim, False))
|
|
2899
|
+
a = moveaxis(a, ops.make_range(ndim, 2 * ndim), ops.make_range(1, 2 * ndim, 2))
|
|
2900
|
+
a = ops.reshape(a, kron_shape)
|
|
2901
2901
|
# scales b by the shape of a
|
|
2902
|
-
b =
|
|
2903
|
-
return
|
|
2902
|
+
b = ops.tile(b, shape_a)
|
|
2903
|
+
return ops.tensor_mul(a, b)
|
|
2904
2904
|
|
|
2905
2905
|
|
|
2906
2906
|
def cross(a, b, axisa=- 1, axisb=- 1, axisc=- 1, axis=None):
|
|
@@ -2957,13 +2957,13 @@ def cross(a, b, axisa=- 1, axisb=- 1, axisc=- 1, axis=None):
|
|
|
2957
2957
|
if axis is not None:
|
|
2958
2958
|
axisa, axisb, axisc = axis, axis, axis
|
|
2959
2959
|
|
|
2960
|
-
_check_axis_in_range(axisa,
|
|
2961
|
-
_check_axis_in_range(axisb,
|
|
2960
|
+
_check_axis_in_range(axisa, ops.rank(a))
|
|
2961
|
+
_check_axis_in_range(axisb, ops.rank(b))
|
|
2962
2962
|
a = moveaxis(a, axisa, -1)
|
|
2963
2963
|
b = moveaxis(b, axisb, -1)
|
|
2964
|
-
shape_a =
|
|
2965
|
-
shape_b =
|
|
2966
|
-
if
|
|
2964
|
+
shape_a = ops.shape(a)
|
|
2965
|
+
shape_b = ops.shape(b)
|
|
2966
|
+
if ops.shape(a)[-1] not in (2, 3) or ops.shape(b)[-1] not in (2, 3):
|
|
2967
2967
|
_raise_value_error('incompatible dimensions for cross product (dimension must be 2 or 3)')
|
|
2968
2968
|
a_has_z = shape_a[-1] == 3
|
|
2969
2969
|
b_has_z = shape_b[-1] == 3
|
|
@@ -2972,36 +2972,36 @@ def cross(a, b, axisa=- 1, axisb=- 1, axisc=- 1, axis=None):
|
|
|
2972
2972
|
shape_out += (3,)
|
|
2973
2973
|
_check_axis_in_range(axisc, len(shape_out))
|
|
2974
2974
|
|
|
2975
|
-
dtype = _promote(
|
|
2975
|
+
dtype = _promote(ops.dtype(a), ops.dtype(b))
|
|
2976
2976
|
if _get_device() == 'CPU':
|
|
2977
|
-
#
|
|
2978
|
-
if not _check_is_float(
|
|
2979
|
-
a =
|
|
2980
|
-
if not _check_is_float(
|
|
2981
|
-
b =
|
|
2977
|
+
# ops.tensor_slice only supports float on CPU
|
|
2978
|
+
if not _check_is_float(ops.dtype(a)):
|
|
2979
|
+
a = ops.cast(a, mstype.float32)
|
|
2980
|
+
if not _check_is_float(ops.dtype(b)):
|
|
2981
|
+
b = ops.cast(b, mstype.float32)
|
|
2982
2982
|
|
|
2983
|
-
a_slice_start = _list_comprehensions(
|
|
2983
|
+
a_slice_start = _list_comprehensions(ops.rank(a) - 1, 0, True)
|
|
2984
2984
|
a_slice_size = shape_a[:-1] + (1,)
|
|
2985
|
-
b_slice_start = _list_comprehensions(
|
|
2985
|
+
b_slice_start = _list_comprehensions(ops.rank(b) - 1, 0, True)
|
|
2986
2986
|
b_slice_size = shape_b[:-1] + (1,)
|
|
2987
2987
|
|
|
2988
2988
|
def _get_slice_product(idx_a, idx_b):
|
|
2989
|
-
return multiply(
|
|
2990
|
-
|
|
2989
|
+
return multiply(ops.tensor_slice(a, a_slice_start + (idx_a,), a_slice_size),
|
|
2990
|
+
ops.tensor_slice(b, b_slice_start + (idx_b,), b_slice_size))
|
|
2991
2991
|
|
|
2992
|
-
cz =
|
|
2992
|
+
cz = ops.tensor_sub(_get_slice_product(0, 1), _get_slice_product(1, 0)) # ax*by - ay*bx
|
|
2993
2993
|
if not a_has_z and not b_has_z:
|
|
2994
|
-
return
|
|
2994
|
+
return ops.reshape(cz, shape_out).astype(dtype)
|
|
2995
2995
|
|
|
2996
2996
|
if a_has_z and b_has_z:
|
|
2997
|
-
cx =
|
|
2998
|
-
cy =
|
|
2997
|
+
cx = ops.tensor_sub(_get_slice_product(1, 2), _get_slice_product(2, 1)) # ay*bz - az*by
|
|
2998
|
+
cy = ops.tensor_sub(_get_slice_product(2, 0), _get_slice_product(0, 2)) # az*bx - ax*bz
|
|
2999
2999
|
elif a_has_z:
|
|
3000
|
-
cx =
|
|
3000
|
+
cx = ops.neg(_get_slice_product(2, 1)) # -az*by
|
|
3001
3001
|
cy = _get_slice_product(2, 0) # az*bx
|
|
3002
3002
|
else: # b_has_z
|
|
3003
3003
|
cx = _get_slice_product(1, 2) # ay*bz
|
|
3004
|
-
cy =
|
|
3004
|
+
cy = ops.neg(_get_slice_product(0, 2)) # -ax*bz
|
|
3005
3005
|
res = _concat((cx, cy, cz)).reshape(shape_out)
|
|
3006
3006
|
return moveaxis(res, -1, axisc).astype(dtype)
|
|
3007
3007
|
|
|
@@ -3035,7 +3035,7 @@ def ceil(x, dtype=None):
|
|
|
3035
3035
|
>>> print(output)
|
|
3036
3036
|
[-1. -1. -0. 1. 2. 2. 2.]
|
|
3037
3037
|
"""
|
|
3038
|
-
return _apply_tensor_op(lambda x:
|
|
3038
|
+
return _apply_tensor_op(lambda x: ops.neg(ops.floor(ops.neg(x.astype(mstype.float32)))),
|
|
3039
3039
|
x, dtype=dtype)
|
|
3040
3040
|
|
|
3041
3041
|
|
|
@@ -3080,8 +3080,8 @@ def positive(a, dtype=None):
|
|
|
3080
3080
|
[1. -1.]
|
|
3081
3081
|
"""
|
|
3082
3082
|
_check_input_tensor(a)
|
|
3083
|
-
neg_tensor =
|
|
3084
|
-
return _apply_tensor_op(
|
|
3083
|
+
neg_tensor = ops.neg(a)
|
|
3084
|
+
return _apply_tensor_op(ops.neg, neg_tensor, dtype=dtype)
|
|
3085
3085
|
|
|
3086
3086
|
|
|
3087
3087
|
def negative(a, dtype=None):
|
|
@@ -3110,7 +3110,7 @@ def negative(a, dtype=None):
|
|
|
3110
3110
|
>>> print(output)
|
|
3111
3111
|
[-1. 1.]
|
|
3112
3112
|
"""
|
|
3113
|
-
return _apply_tensor_op(
|
|
3113
|
+
return _apply_tensor_op(ops.neg, a, dtype=dtype)
|
|
3114
3114
|
|
|
3115
3115
|
|
|
3116
3116
|
def cumsum(a, axis=None, dtype=None):
|
|
@@ -3198,7 +3198,7 @@ def nancumsum(a, axis=None, dtype=None):
|
|
|
3198
3198
|
[[1. 3.]
|
|
3199
3199
|
[3. 3.]]
|
|
3200
3200
|
"""
|
|
3201
|
-
a =
|
|
3201
|
+
a = ops.select(_isnan(a), zeros(ops.shape(a), ops.dtype(a)), a)
|
|
3202
3202
|
return a.cumsum(axis, dtype)
|
|
3203
3203
|
|
|
3204
3204
|
|
|
@@ -3231,10 +3231,10 @@ def cbrt(x, dtype=None):
|
|
|
3231
3231
|
def _cbrt(x):
|
|
3232
3232
|
compute_type = promote_types(x.dtype, "float32")
|
|
3233
3233
|
x = x.astype(compute_type)
|
|
3234
|
-
# use
|
|
3235
|
-
abs_x =
|
|
3234
|
+
# use ops.Sign() once gpu support is added
|
|
3235
|
+
abs_x = ops.absolute(x)
|
|
3236
3236
|
sign_x = abs_x / x
|
|
3237
|
-
return sign_x *
|
|
3237
|
+
return sign_x * ops.tensor_pow(abs_x, 1. / 3.)
|
|
3238
3238
|
return _apply_tensor_op(_cbrt, x, dtype=dtype)
|
|
3239
3239
|
|
|
3240
3240
|
|
|
@@ -3266,7 +3266,7 @@ def log1p(x, dtype=None):
|
|
|
3266
3266
|
>>> print(output)
|
|
3267
3267
|
[0.6934 1.099 1.387 ]
|
|
3268
3268
|
"""
|
|
3269
|
-
return _apply_tensor_op(lambda x:
|
|
3269
|
+
return _apply_tensor_op(lambda x: ops.log(x + 1), x, dtype=dtype)
|
|
3270
3270
|
|
|
3271
3271
|
|
|
3272
3272
|
def logaddexp(x1, x2, dtype=None):
|
|
@@ -3299,7 +3299,7 @@ def logaddexp(x1, x2, dtype=None):
|
|
|
3299
3299
|
[2.312 2.693 3.312]
|
|
3300
3300
|
"""
|
|
3301
3301
|
def _logaddexp(x1, x2):
|
|
3302
|
-
return
|
|
3302
|
+
return ops.log(ops.tensor_add(ops.tensor_exp(x1), ops.tensor_exp(x2)))
|
|
3303
3303
|
return _apply_tensor_op(_logaddexp, x1, x2, dtype=dtype)
|
|
3304
3304
|
|
|
3305
3305
|
|
|
@@ -3332,7 +3332,7 @@ def log2(x, dtype=None):
|
|
|
3332
3332
|
tensor_2 = _make_tensor(2, x.dtype)
|
|
3333
3333
|
|
|
3334
3334
|
def _log2(x):
|
|
3335
|
-
return
|
|
3335
|
+
return ops.log(x) / ops.log(tensor_2)
|
|
3336
3336
|
|
|
3337
3337
|
return _apply_tensor_op(_log2, x, dtype=dtype)
|
|
3338
3338
|
|
|
@@ -3373,7 +3373,7 @@ def logaddexp2(x1, x2, dtype=None):
|
|
|
3373
3373
|
[3. 4.32 8.02]
|
|
3374
3374
|
"""
|
|
3375
3375
|
_check_input_tensor(x1, x2)
|
|
3376
|
-
add_exp =
|
|
3376
|
+
add_exp = ops.tensor_add(ops.tensor_pow(2, x1), ops.tensor_pow(2, x2))
|
|
3377
3377
|
return log2(add_exp, dtype=dtype)
|
|
3378
3378
|
|
|
3379
3379
|
|
|
@@ -3406,7 +3406,7 @@ def log10(x, dtype=None):
|
|
|
3406
3406
|
tensor_10 = _make_tensor(10, x.dtype)
|
|
3407
3407
|
|
|
3408
3408
|
def _log10(x):
|
|
3409
|
-
return
|
|
3409
|
+
return ops.log(x) / ops.log(tensor_10)
|
|
3410
3410
|
|
|
3411
3411
|
return _apply_tensor_op(_log10, x, dtype=dtype)
|
|
3412
3412
|
|
|
@@ -3415,7 +3415,7 @@ def _cast_type_for_trigonometric(x):
|
|
|
3415
3415
|
_check_input_tensor(x)
|
|
3416
3416
|
if x.dtype != mstype.float16 or x.dtype != mstype.float32 or x.dtype != mstype.float64:
|
|
3417
3417
|
dtype = _promote_for_trigonometric(x.dtype)
|
|
3418
|
-
x =
|
|
3418
|
+
x = ops.cast(x, dtype)
|
|
3419
3419
|
return x
|
|
3420
3420
|
|
|
3421
3421
|
|
|
@@ -3446,7 +3446,7 @@ def sin(x, dtype=None):
|
|
|
3446
3446
|
[ 0.9589243 -0.84147096 0. 0.9092974 -0.7568025 -0.50636566]
|
|
3447
3447
|
"""
|
|
3448
3448
|
x = _cast_type_for_trigonometric(x)
|
|
3449
|
-
return _apply_tensor_op(
|
|
3449
|
+
return _apply_tensor_op(ops.sin, x, dtype=dtype)
|
|
3450
3450
|
|
|
3451
3451
|
|
|
3452
3452
|
def cos(x, dtype=None):
|
|
@@ -3475,7 +3475,7 @@ def cos(x, dtype=None):
|
|
|
3475
3475
|
[ 1. 0.5403023 -0.41614684 -0.9899925 -0.6536436 ]
|
|
3476
3476
|
"""
|
|
3477
3477
|
x = _cast_type_for_trigonometric(x)
|
|
3478
|
-
return _apply_tensor_op(
|
|
3478
|
+
return _apply_tensor_op(ops.cos, x, dtype=dtype)
|
|
3479
3479
|
|
|
3480
3480
|
|
|
3481
3481
|
def tan(x, dtype=None):
|
|
@@ -3509,7 +3509,7 @@ def tan(x, dtype=None):
|
|
|
3509
3509
|
[ 3.380515 -1.5574077 0. -2.1850398 1.1578213 -0.58721393]
|
|
3510
3510
|
"""
|
|
3511
3511
|
x = _cast_type_for_trigonometric(x)
|
|
3512
|
-
return _apply_tensor_op(
|
|
3512
|
+
return _apply_tensor_op(ops.tan, x, dtype=dtype)
|
|
3513
3513
|
|
|
3514
3514
|
|
|
3515
3515
|
def arcsin(x, dtype=None):
|
|
@@ -3542,7 +3542,7 @@ def arcsin(x, dtype=None):
|
|
|
3542
3542
|
[ 1.5707964 -1.5707964]
|
|
3543
3543
|
"""
|
|
3544
3544
|
x = _cast_type_for_trigonometric(x)
|
|
3545
|
-
return _apply_tensor_op(
|
|
3545
|
+
return _apply_tensor_op(ops.asin, x, dtype=dtype)
|
|
3546
3546
|
|
|
3547
3547
|
|
|
3548
3548
|
def arccos(input, dtype=None):
|
|
@@ -3576,7 +3576,7 @@ def arccos(input, dtype=None):
|
|
|
3576
3576
|
[0. 3.1415927]
|
|
3577
3577
|
"""
|
|
3578
3578
|
input = _cast_type_for_trigonometric(input)
|
|
3579
|
-
return _apply_tensor_op(
|
|
3579
|
+
return _apply_tensor_op(ops.acos, input, dtype=dtype)
|
|
3580
3580
|
|
|
3581
3581
|
|
|
3582
3582
|
def arctan(x, dtype=None):
|
|
@@ -3607,7 +3607,7 @@ def arctan(x, dtype=None):
|
|
|
3607
3607
|
[0. 0.7853982 1.1071488 1.2490457 1.3258177]
|
|
3608
3608
|
"""
|
|
3609
3609
|
x = _cast_type_for_trigonometric(x)
|
|
3610
|
-
return _apply_tensor_op(
|
|
3610
|
+
return _apply_tensor_op(ops.atan, x, dtype=dtype)
|
|
3611
3611
|
|
|
3612
3612
|
|
|
3613
3613
|
def sinh(x, dtype=None):
|
|
@@ -3636,7 +3636,7 @@ def sinh(x, dtype=None):
|
|
|
3636
3636
|
[ 0. 1.1752012 3.6268604 10.017875 27.289917 ]
|
|
3637
3637
|
"""
|
|
3638
3638
|
x = _cast_type_for_trigonometric(x)
|
|
3639
|
-
return _apply_tensor_op(
|
|
3639
|
+
return _apply_tensor_op(ops.sinh, x, dtype=dtype)
|
|
3640
3640
|
|
|
3641
3641
|
|
|
3642
3642
|
def cosh(x, dtype=None):
|
|
@@ -3665,7 +3665,7 @@ def cosh(x, dtype=None):
|
|
|
3665
3665
|
[ 1. 1.5430807 3.7621956 10.067662 27.308233 ]
|
|
3666
3666
|
"""
|
|
3667
3667
|
x = _cast_type_for_trigonometric(x)
|
|
3668
|
-
return _apply_tensor_op(
|
|
3668
|
+
return _apply_tensor_op(ops.cosh, x, dtype=dtype)
|
|
3669
3669
|
|
|
3670
3670
|
|
|
3671
3671
|
def tanh(x, dtype=None):
|
|
@@ -3694,7 +3694,7 @@ def tanh(x, dtype=None):
|
|
|
3694
3694
|
[0. 0.7615942 0.9640276 0.9950548 0.9993293]
|
|
3695
3695
|
"""
|
|
3696
3696
|
x = _cast_type_for_trigonometric(x)
|
|
3697
|
-
return _apply_tensor_op(
|
|
3697
|
+
return _apply_tensor_op(ops.tanh, x, dtype=dtype)
|
|
3698
3698
|
|
|
3699
3699
|
|
|
3700
3700
|
def arcsinh(x, dtype=None):
|
|
@@ -3723,7 +3723,7 @@ def arcsinh(x, dtype=None):
|
|
|
3723
3723
|
[0.8813736 1.4436355 1.8184465 2.0947125]
|
|
3724
3724
|
"""
|
|
3725
3725
|
x = _cast_type_for_trigonometric(x)
|
|
3726
|
-
return _apply_tensor_op(
|
|
3726
|
+
return _apply_tensor_op(ops.asinh, x, dtype=dtype)
|
|
3727
3727
|
|
|
3728
3728
|
|
|
3729
3729
|
def arccosh(x, dtype=None):
|
|
@@ -3752,7 +3752,7 @@ def arccosh(x, dtype=None):
|
|
|
3752
3752
|
[0. 1.316958 1.7627472 2.063437 ]
|
|
3753
3753
|
"""
|
|
3754
3754
|
x = _cast_type_for_trigonometric(x)
|
|
3755
|
-
return _apply_tensor_op(
|
|
3755
|
+
return _apply_tensor_op(ops.acosh, x, dtype=dtype)
|
|
3756
3756
|
|
|
3757
3757
|
|
|
3758
3758
|
def arctanh(x, dtype=None):
|
|
@@ -3781,7 +3781,7 @@ def arctanh(x, dtype=None):
|
|
|
3781
3781
|
[-2.646653 -0.97295505 -0.54930615 0. 0.54930615]
|
|
3782
3782
|
"""
|
|
3783
3783
|
x = _cast_type_for_trigonometric(x)
|
|
3784
|
-
return _apply_tensor_op(
|
|
3784
|
+
return _apply_tensor_op(ops.atanh, x, dtype=dtype)
|
|
3785
3785
|
|
|
3786
3786
|
|
|
3787
3787
|
def arctan2(x1, x2, dtype=None):
|
|
@@ -3815,7 +3815,7 @@ def arctan2(x1, x2, dtype=None):
|
|
|
3815
3815
|
"""
|
|
3816
3816
|
x1 = _cast_type_for_trigonometric(x1)
|
|
3817
3817
|
x2 = _cast_type_for_trigonometric(x2)
|
|
3818
|
-
return _apply_tensor_op(
|
|
3818
|
+
return _apply_tensor_op(ops.atan2, x1, x2, dtype=dtype)
|
|
3819
3819
|
|
|
3820
3820
|
|
|
3821
3821
|
def promote_types(type1, type2):
|
|
@@ -3896,11 +3896,11 @@ def corrcoef(x, y=None, rowvar=True, dtype=None):
|
|
|
3896
3896
|
# This implementation was adapted from original Numpy.
|
|
3897
3897
|
c = cov(x, y, rowvar)
|
|
3898
3898
|
if not c.shape:
|
|
3899
|
-
return
|
|
3899
|
+
return ops.tensor_div(c, c)
|
|
3900
3900
|
d = diag(c)
|
|
3901
3901
|
stddev = sqrt(d)
|
|
3902
|
-
c /=
|
|
3903
|
-
c /=
|
|
3902
|
+
c /= ops.expand_dims(stddev, -1)
|
|
3903
|
+
c /= ops.expand_dims(stddev, 0)
|
|
3904
3904
|
c = clip(c, -1, 1)
|
|
3905
3905
|
if dtype is not None:
|
|
3906
3906
|
return c.astype(dtype)
|
|
@@ -3925,7 +3925,7 @@ def _slice_along_axis(f, axis, slice_start, slice_end):
|
|
|
3925
3925
|
index_end = f.shape
|
|
3926
3926
|
index_start = _tuple_setitem(index_start, axis, slice_start)
|
|
3927
3927
|
index_end = _tuple_setitem(index_end, axis, slice_size)
|
|
3928
|
-
return
|
|
3928
|
+
return ops.tensor_slice(f, index_start, index_end)
|
|
3929
3929
|
|
|
3930
3930
|
|
|
3931
3931
|
def _gradient_along_axis(f, h, axis):
|
|
@@ -3950,7 +3950,7 @@ def check_gradient_arguments(f, axis, edge_order):
|
|
|
3950
3950
|
if f.dtype != mstype.float64:
|
|
3951
3951
|
f = f.astype(mstype.float32)
|
|
3952
3952
|
if axis is None:
|
|
3953
|
-
axis =
|
|
3953
|
+
axis = ops.make_range(f.ndim)
|
|
3954
3954
|
else:
|
|
3955
3955
|
_check_axis_type(axis, True, True, True)
|
|
3956
3956
|
axis = _canonicalize_axis(axis, f.ndim)
|
|
@@ -4023,7 +4023,7 @@ def gradient(f, *varargs, axis=None, edge_order=1):
|
|
|
4023
4023
|
|
|
4024
4024
|
a_grad = []
|
|
4025
4025
|
|
|
4026
|
-
for idx in
|
|
4026
|
+
for idx in ops.make_range(len_axes):
|
|
4027
4027
|
h = dx[idx]
|
|
4028
4028
|
ax = axis[idx]
|
|
4029
4029
|
if f.shape[ax] < 2:
|
|
@@ -4187,7 +4187,7 @@ def multi_dot(arrays):
|
|
|
4187
4187
|
else:
|
|
4188
4188
|
arrays = _to_tensor(arrays)
|
|
4189
4189
|
num = len(arrays)
|
|
4190
|
-
arrays =
|
|
4190
|
+
arrays = ops.reshape(arrays, (-1,) + _tuple_slice(ops.shape(arrays), 2, None))
|
|
4191
4191
|
arrays = split(arrays, num)
|
|
4192
4192
|
if len(arrays) == 2:
|
|
4193
4193
|
return dot(*arrays)
|
|
@@ -4197,22 +4197,22 @@ def multi_dot(arrays):
|
|
|
4197
4197
|
for arr in arrays:
|
|
4198
4198
|
arrs.append(arr)
|
|
4199
4199
|
|
|
4200
|
-
if
|
|
4201
|
-
arrs[0] =
|
|
4200
|
+
if ops.rank(arrs[0]) == 1:
|
|
4201
|
+
arrs[0] = ops.reshape(arrs[0], (1, arrs[0].size))
|
|
4202
4202
|
else:
|
|
4203
|
-
shape_out += (
|
|
4204
|
-
if
|
|
4205
|
-
arrs[-1] =
|
|
4203
|
+
shape_out += (ops.shape(arrs[0])[0],)
|
|
4204
|
+
if ops.rank(arrs[-1]) == 1:
|
|
4205
|
+
arrs[-1] = ops.reshape(arrs[-1], (arrs[-1].size, 1))
|
|
4206
4206
|
else:
|
|
4207
|
-
shape_out += (
|
|
4207
|
+
shape_out += (ops.shape(arrs[-1])[1],)
|
|
4208
4208
|
|
|
4209
4209
|
shapes = []
|
|
4210
4210
|
for arr in arrs:
|
|
4211
|
-
shapes.append(
|
|
4211
|
+
shapes.append(ops.shape(arr))
|
|
4212
4212
|
dims = _get_dims(shapes)
|
|
4213
4213
|
order = _min_cost_chain_matmul(dims)
|
|
4214
4214
|
res = _multi_dot(arrs, 0, len(arrs) - 1, order)
|
|
4215
|
-
return
|
|
4215
|
+
return ops.reshape(res, shape_out)
|
|
4216
4216
|
|
|
4217
4217
|
|
|
4218
4218
|
def argmax(a, axis=None):
|
|
@@ -4290,6 +4290,8 @@ def argmin(a, axis=None):
|
|
|
4290
4290
|
[0 0]
|
|
4291
4291
|
"""
|
|
4292
4292
|
a = _to_tensor(a)
|
|
4293
|
+
if a.dtype == mstype.bool_:
|
|
4294
|
+
a = a.astype(mstype.int32)
|
|
4293
4295
|
return a.argmin(axis)
|
|
4294
4296
|
|
|
4295
4297
|
|
|
@@ -4338,26 +4340,26 @@ def searchsorted(a, v, side='left', sorter=None):
|
|
|
4338
4340
|
if side not in ('left', 'right'):
|
|
4339
4341
|
_raise_value_error('invalid value for keyword "side"')
|
|
4340
4342
|
a = _to_tensor(a).astype(mstype.float32)
|
|
4341
|
-
if
|
|
4343
|
+
if ops.rank(a) != 1:
|
|
4342
4344
|
_raise_value_error('`a` should be 1-D array')
|
|
4343
4345
|
v = _to_tensor(v)
|
|
4344
|
-
shape =
|
|
4346
|
+
shape = ops.shape(v)
|
|
4345
4347
|
if sorter is not None:
|
|
4346
|
-
if
|
|
4348
|
+
if ops.rank(sorter) != 1 or sorter.size != a.size:
|
|
4347
4349
|
_raise_value_error('sorter must be 1-D array with the same size as `a`')
|
|
4348
4350
|
sorter = _to_tensor(sorter)
|
|
4349
|
-
sorter =
|
|
4350
|
-
a =
|
|
4351
|
-
less_op =
|
|
4352
|
-
i =
|
|
4353
|
-
j =
|
|
4354
|
-
two =
|
|
4351
|
+
sorter = ops.expand_dims(sorter, -1)
|
|
4352
|
+
a = ops.gather_nd(a, sorter)
|
|
4353
|
+
less_op = ops.tensor_le if side == 'left' else ops.tensor_lt
|
|
4354
|
+
i = ops.fill(mstype.int32, shape, 0)
|
|
4355
|
+
j = ops.fill(mstype.int32, shape, a.size)
|
|
4356
|
+
two = ops.fill(mstype.int32, shape, 2)
|
|
4355
4357
|
|
|
4356
4358
|
for _ in _get_sort_range(a.size):
|
|
4357
4359
|
mid = floor_divide(add(i, j), two)
|
|
4358
|
-
mask = less_op(v,
|
|
4359
|
-
i =
|
|
4360
|
-
j =
|
|
4360
|
+
mask = less_op(v, ops.gather_nd(a, ops.expand_dims(mid, -1)))
|
|
4361
|
+
i = ops.select(mask, i, mid)
|
|
4362
|
+
j = ops.select(mask, mid, j)
|
|
4361
4363
|
return j
|
|
4362
4364
|
|
|
4363
4365
|
|
|
@@ -4405,7 +4407,7 @@ def interp(x, xp, fp, left=None, right=None):
|
|
|
4405
4407
|
"""
|
|
4406
4408
|
# implement period once sort is supported
|
|
4407
4409
|
x, xp, fp = _to_tensor(x, xp, fp)
|
|
4408
|
-
if
|
|
4410
|
+
if ops.rank(xp) != 1 or ops.rank(fp) != 1:
|
|
4409
4411
|
_raise_value_error('xp and fp must be 1-d sequences')
|
|
4410
4412
|
size = xp.size
|
|
4411
4413
|
if fp.size != size:
|
|
@@ -4416,25 +4418,25 @@ def interp(x, xp, fp, left=None, right=None):
|
|
|
4416
4418
|
|
|
4417
4419
|
indices_1 = clip(searchsorted(xp, x), 0, size - 1)
|
|
4418
4420
|
indices_0 = clip(indices_1 - _to_tensor(1), 0, size - 1)
|
|
4419
|
-
indices_0 =
|
|
4420
|
-
indices_1 =
|
|
4421
|
-
x_0 =
|
|
4422
|
-
x_1 =
|
|
4423
|
-
y_0 =
|
|
4424
|
-
y_1 =
|
|
4421
|
+
indices_0 = ops.expand_dims(indices_0, -1)
|
|
4422
|
+
indices_1 = ops.expand_dims(indices_1, -1)
|
|
4423
|
+
x_0 = ops.gather_nd(xp, indices_0)
|
|
4424
|
+
x_1 = ops.gather_nd(xp, indices_1)
|
|
4425
|
+
y_0 = ops.gather_nd(fp, indices_0)
|
|
4426
|
+
y_1 = ops.gather_nd(fp, indices_1)
|
|
4425
4427
|
res = (y_0 * (x_1 - x) + y_1 * (x - x_0)) / (x_1 - x_0)
|
|
4426
|
-
res =
|
|
4428
|
+
res = ops.select(ops.equal(x_0, x_1), y_0, res)
|
|
4427
4429
|
|
|
4428
4430
|
idx_0 = _to_tensor([0])
|
|
4429
4431
|
idx_last = _to_tensor([size - 1])
|
|
4430
4432
|
if left is None:
|
|
4431
|
-
left =
|
|
4432
|
-
left = full(
|
|
4433
|
+
left = ops.gather_nd(fp, idx_0)
|
|
4434
|
+
left = full(ops.shape(x), left, mstype.float32)
|
|
4433
4435
|
if right is None:
|
|
4434
|
-
right =
|
|
4435
|
-
right = full(
|
|
4436
|
-
res =
|
|
4437
|
-
res =
|
|
4436
|
+
right = ops.gather_nd(fp, idx_last)
|
|
4437
|
+
right = full(ops.shape(x), right, mstype.float32)
|
|
4438
|
+
res = ops.select(ops.tensor_lt(x, ops.gather_nd(xp, idx_0)), left, res)
|
|
4439
|
+
res = ops.select(ops.tensor_gt(x, ops.gather_nd(xp, idx_last)), right, res)
|
|
4438
4440
|
return res
|
|
4439
4441
|
|
|
4440
4442
|
|
|
@@ -4445,8 +4447,8 @@ def _apply_tensor_op(fn, *args, dtype=None):
|
|
|
4445
4447
|
res = fn(args)
|
|
4446
4448
|
else:
|
|
4447
4449
|
res = fn(*args)
|
|
4448
|
-
if dtype is not None and not _check_same_type(
|
|
4449
|
-
res =
|
|
4450
|
+
if dtype is not None and not _check_same_type(ops.dtype(res), dtype):
|
|
4451
|
+
res = ops.cast(res, dtype)
|
|
4450
4452
|
return res
|
|
4451
4453
|
|
|
4452
4454
|
|
|
@@ -4486,16 +4488,16 @@ def sign(x, dtype=None):
|
|
|
4486
4488
|
if not isinstance(x, (int, float, list, tuple, Tensor)):
|
|
4487
4489
|
_raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x)
|
|
4488
4490
|
x = _to_tensor(x)
|
|
4489
|
-
if _check_same_type(
|
|
4491
|
+
if _check_same_type(ops.dtype(x), mstype.bool_):
|
|
4490
4492
|
_raise_type_error("sign does not accept dtype bool.")
|
|
4491
4493
|
|
|
4492
4494
|
_non_zero_sign = x / absolute(x)
|
|
4493
4495
|
_zero = _broadcast_to_shape(_make_tensor(0, x.dtype), x.shape)
|
|
4494
|
-
is_zero =
|
|
4495
|
-
res =
|
|
4496
|
+
is_zero = ops.equal(x, 0)
|
|
4497
|
+
res = ops.select(is_zero, _zero, _non_zero_sign)
|
|
4496
4498
|
|
|
4497
|
-
if dtype is not None and not _check_same_type(
|
|
4498
|
-
res =
|
|
4499
|
+
if dtype is not None and not _check_same_type(ops.dtype(res), dtype):
|
|
4500
|
+
res = ops.cast(res, dtype)
|
|
4499
4501
|
return res
|
|
4500
4502
|
|
|
4501
4503
|
|
|
@@ -4538,24 +4540,24 @@ def copysign(x1, x2, dtype=None):
|
|
|
4538
4540
|
if not isinstance(x2, (int, float, list, tuple, Tensor)):
|
|
4539
4541
|
_raise_type_error('integer, float, list, tuple or Tensor are expected, but got', x2)
|
|
4540
4542
|
x1, x2 = _to_tensor(x1, x2)
|
|
4541
|
-
shape_out = _infer_out_shape(
|
|
4543
|
+
shape_out = _infer_out_shape(ops.shape(x1), ops.shape(x2))
|
|
4542
4544
|
x1 = _broadcast_to_shape(x1, shape_out)
|
|
4543
4545
|
x2 = _broadcast_to_shape(x2, shape_out)
|
|
4544
|
-
if _check_same_type(
|
|
4546
|
+
if _check_same_type(ops.dtype(x1), mstype.bool_) or _check_same_type(ops.dtype(x2), mstype.bool_):
|
|
4545
4547
|
_raise_type_error("sign does not accept dtype bool.")
|
|
4546
4548
|
|
|
4547
4549
|
original_dtype = x1.dtype
|
|
4548
4550
|
if not _check_is_float(original_dtype):
|
|
4549
|
-
pos_tensor =
|
|
4551
|
+
pos_tensor = ops.absolute(x1.astype('float32')).astype(original_dtype)
|
|
4550
4552
|
else:
|
|
4551
|
-
pos_tensor =
|
|
4553
|
+
pos_tensor = ops.absolute(x1)
|
|
4552
4554
|
|
|
4553
|
-
neg_tensor =
|
|
4554
|
-
less_zero =
|
|
4555
|
-
res =
|
|
4555
|
+
neg_tensor = ops.neg(pos_tensor)
|
|
4556
|
+
less_zero = ops.less(x2, 0)
|
|
4557
|
+
res = ops.select(less_zero, neg_tensor, pos_tensor)
|
|
4556
4558
|
|
|
4557
|
-
if dtype is not None and not _check_same_type(
|
|
4558
|
-
res =
|
|
4559
|
+
if dtype is not None and not _check_same_type(ops.dtype(res), dtype):
|
|
4560
|
+
res = ops.cast(res, dtype)
|
|
4559
4561
|
return res
|
|
4560
4562
|
|
|
4561
4563
|
|
|
@@ -4590,12 +4592,12 @@ def digitize(x, bins, right=False):
|
|
|
4590
4592
|
[1 3 3 4 5]
|
|
4591
4593
|
"""
|
|
4592
4594
|
x, bins = _to_tensor(x, bins)
|
|
4593
|
-
if
|
|
4595
|
+
if ops.rank(bins) != 1:
|
|
4594
4596
|
_raise_value_error('bins should be 1-dimensional')
|
|
4595
4597
|
if x.size == 0:
|
|
4596
4598
|
return x
|
|
4597
4599
|
if bins.size == 0:
|
|
4598
|
-
return zeros(
|
|
4600
|
+
return zeros(ops.shape(x), mstype.int32)
|
|
4599
4601
|
side = 'left' if right else 'right'
|
|
4600
4602
|
first_bin = bins[0]
|
|
4601
4603
|
last_bin = bins[_type_convert(int, bins.size) - 1]
|
|
@@ -4652,24 +4654,24 @@ def bincount(x, weights=None, minlength=0, length=None):
|
|
|
4652
4654
|
[0.3 0.7 1.1]
|
|
4653
4655
|
"""
|
|
4654
4656
|
x = _to_tensor(x)
|
|
4655
|
-
if
|
|
4657
|
+
if ops.rank(x) != 1:
|
|
4656
4658
|
_raise_value_error('`x` should be one-dimensional')
|
|
4657
|
-
if not _check_is_int(
|
|
4659
|
+
if not _check_is_int(ops.dtype(x)):
|
|
4658
4660
|
_raise_type_error('`x` should be an array of ints')
|
|
4659
4661
|
x = clip(x, 0, None)
|
|
4660
4662
|
if length is None:
|
|
4661
|
-
if
|
|
4662
|
-
length = int(maximum(
|
|
4663
|
+
if ops.isconstant(x):
|
|
4664
|
+
length = int(maximum(ops.reduce_max(x.astype(mstype.float32)), minlength - 1).asnumpy()) + 1
|
|
4663
4665
|
else:
|
|
4664
4666
|
_raise_value_error('argument `length` must be provided in graph mode')
|
|
4665
4667
|
idx = arange(length).reshape(length, 1)
|
|
4666
|
-
idx_mapping =
|
|
4668
|
+
idx_mapping = ops.equal(x, idx)
|
|
4667
4669
|
if weights is not None:
|
|
4668
4670
|
weights = _to_tensor(weights)
|
|
4669
|
-
if
|
|
4671
|
+
if ops.shape(x) != ops.shape(weights):
|
|
4670
4672
|
_raise_value_error('`x` and `weights` must have the same length')
|
|
4671
|
-
idx_mapping
|
|
4672
|
-
return
|
|
4673
|
+
idx_mapping = weights * idx_mapping
|
|
4674
|
+
return ops.reduce_sum(idx_mapping.astype(mstype.float32), 1).ravel()
|
|
4673
4675
|
|
|
4674
4676
|
|
|
4675
4677
|
def histogram(a, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin
|
|
@@ -4727,7 +4729,7 @@ def histogram(a, bins=10, range=None, weights=None, density=False): # pylint: di
|
|
|
4727
4729
|
a = _to_tensor(a)
|
|
4728
4730
|
if weights is not None:
|
|
4729
4731
|
weights = _to_tensor(weights)
|
|
4730
|
-
if
|
|
4732
|
+
if ops.shape(a) != ops.shape(weights):
|
|
4731
4733
|
_raise_value_error('weights should have the same shape as a')
|
|
4732
4734
|
weights = weights.ravel()
|
|
4733
4735
|
a = a.ravel()
|
|
@@ -4739,8 +4741,8 @@ def histogram(a, bins=10, range=None, weights=None, density=False): # pylint: di
|
|
|
4739
4741
|
if count.size == 0:
|
|
4740
4742
|
return count, bin_edges
|
|
4741
4743
|
if density:
|
|
4742
|
-
count =
|
|
4743
|
-
count = count / diff(bin_edges) /
|
|
4744
|
+
count = ops.cast(count, mstype.float32)
|
|
4745
|
+
count = count / diff(bin_edges) / ops.reduce_sum(count)
|
|
4744
4746
|
return count, bin_edges
|
|
4745
4747
|
|
|
4746
4748
|
|
|
@@ -4757,7 +4759,7 @@ def _get_histogramdd_count(ndim, bin_edges, sample, weights):
|
|
|
4757
4759
|
data_indices = []
|
|
4758
4760
|
nbin = ()
|
|
4759
4761
|
flattened_bin_size = 1
|
|
4760
|
-
for i in
|
|
4762
|
+
for i in ops.make_range(ndim):
|
|
4761
4763
|
data_to_bins = searchsorted(bin_edges[i], sample[:, i], 'right')
|
|
4762
4764
|
bin_size = _type_convert(int, bin_edges[i].size)
|
|
4763
4765
|
data_to_bins = where_(sample[:, i] == bin_edges[i][-1], _to_tensor(bin_size - 1), data_to_bins)
|
|
@@ -4765,14 +4767,14 @@ def _get_histogramdd_count(ndim, bin_edges, sample, weights):
|
|
|
4765
4767
|
nbin += (bin_size + 1,)
|
|
4766
4768
|
flattened_bin_size *= (bin_size + 1)
|
|
4767
4769
|
|
|
4768
|
-
factor =
|
|
4770
|
+
factor = ops.reshape(_to_tensor(_factor_flattened_hist(nbin)), (ndim, 1))
|
|
4769
4771
|
stacked_indices = stack(data_indices) * factor
|
|
4770
4772
|
if _get_device() == 'Ascend':
|
|
4771
|
-
stacked_indices =
|
|
4772
|
-
flattened_hist =
|
|
4773
|
+
stacked_indices = ops.cast(stacked_indices, mstype.float32)
|
|
4774
|
+
flattened_hist = ops.reduce_sum(stacked_indices.astype(mstype.float32), 0)
|
|
4773
4775
|
count = bincount(flattened_hist.astype(mstype.int32), weights, length=flattened_bin_size)
|
|
4774
|
-
count =
|
|
4775
|
-
slices = _list_comprehensions(ndim,
|
|
4776
|
+
count = ops.reshape(count, nbin)
|
|
4777
|
+
slices = _list_comprehensions(ndim, ops.make_slice(1, -1, 1), True)
|
|
4776
4778
|
count = count[slices]
|
|
4777
4779
|
return count
|
|
4778
4780
|
|
|
@@ -4852,9 +4854,9 @@ def histogramdd(sample, bins=10, range=None, weights=None, density=False): # pyl
|
|
|
4852
4854
|
sample = stack(sample, -1)
|
|
4853
4855
|
elif not isinstance(sample, Tensor):
|
|
4854
4856
|
_raise_type_error('sample should be (N, D) array, or (D, N) array_like')
|
|
4855
|
-
if
|
|
4857
|
+
if ops.rank(sample) != 2:
|
|
4856
4858
|
_raise_value_error('when an array, sample should be 2-dimensional')
|
|
4857
|
-
ndim =
|
|
4859
|
+
ndim = ops.shape(sample)[1]
|
|
4858
4860
|
|
|
4859
4861
|
if isinstance(bins, int):
|
|
4860
4862
|
bins = _list_comprehensions(ndim, bins)
|
|
@@ -4872,7 +4874,7 @@ def histogramdd(sample, bins=10, range=None, weights=None, density=False): # pyl
|
|
|
4872
4874
|
|
|
4873
4875
|
bin_edges = []
|
|
4874
4876
|
dedges = []
|
|
4875
|
-
for i in
|
|
4877
|
+
for i in ops.make_range(ndim):
|
|
4876
4878
|
edges = histogram_bin_edges(sample[:, i], bins[i], range[i], weights)
|
|
4877
4879
|
bin_edges.append(edges)
|
|
4878
4880
|
dedges.append(diff(edges))
|
|
@@ -4880,8 +4882,8 @@ def histogramdd(sample, bins=10, range=None, weights=None, density=False): # pyl
|
|
|
4880
4882
|
count = _get_histogramdd_count(ndim, bin_edges, sample, weights)
|
|
4881
4883
|
|
|
4882
4884
|
if density:
|
|
4883
|
-
s =
|
|
4884
|
-
for i in
|
|
4885
|
+
s = ops.reduce_sum(count.astype(mstype.float32))
|
|
4886
|
+
for i in ops.make_range(ndim):
|
|
4885
4887
|
shape = _expanded_shape(ndim, dedges[i].size, i)
|
|
4886
4888
|
count /= _to_tensor(dedges[i]).reshape(shape)
|
|
4887
4889
|
count /= s
|
|
@@ -5001,7 +5003,7 @@ def matrix_power(a, n):
|
|
|
5001
5003
|
return a
|
|
5002
5004
|
res = a
|
|
5003
5005
|
while n > 1:
|
|
5004
|
-
res =
|
|
5006
|
+
res = ops.matmul(res, a)
|
|
5005
5007
|
n = n - 1
|
|
5006
5008
|
return res
|
|
5007
5009
|
|
|
@@ -5042,12 +5044,12 @@ def around(a, decimals=0):
|
|
|
5042
5044
|
_raise_value_error("decimals < 0 is not supported now.")
|
|
5043
5045
|
if decimals == 0:
|
|
5044
5046
|
return _round(a)
|
|
5045
|
-
return
|
|
5047
|
+
return ops.tensor_div(_round(a * 10**decimals), 10**decimals)
|
|
5046
5048
|
|
|
5047
5049
|
|
|
5048
5050
|
def _to_poly1d(x):
|
|
5049
5051
|
x = atleast_1d(_to_tensor(x))
|
|
5050
|
-
if
|
|
5052
|
+
if ops.rank(x) > 1:
|
|
5051
5053
|
_raise_value_error('input array must be scalar or 1-d sequence')
|
|
5052
5054
|
return x
|
|
5053
5055
|
|
|
@@ -5114,7 +5116,7 @@ def polysub(a1, a2):
|
|
|
5114
5116
|
>>> print(np.polysub([2, 10, -2], [3, 10, -4]))
|
|
5115
5117
|
[-1 0 2]
|
|
5116
5118
|
"""
|
|
5117
|
-
return polyadd(a1,
|
|
5119
|
+
return polyadd(a1, ops.neg(_to_tensor(a2)))
|
|
5118
5120
|
|
|
5119
5121
|
|
|
5120
5122
|
def polyval(p, x):
|
|
@@ -5150,10 +5152,10 @@ def polyval(p, x):
|
|
|
5150
5152
|
"""
|
|
5151
5153
|
p = _to_poly1d(p)
|
|
5152
5154
|
x = _to_tensor(x)
|
|
5153
|
-
shape =
|
|
5155
|
+
shape = ops.shape(x)
|
|
5154
5156
|
exp_p = arange(_type_convert(int, p.size) - 1, -1, -1).astype(mstype.float32)
|
|
5155
5157
|
var_p = (x.reshape(shape + (1,)))**exp_p
|
|
5156
|
-
return
|
|
5158
|
+
return ops.reduce_sum(p * var_p, -1)
|
|
5157
5159
|
|
|
5158
5160
|
|
|
5159
5161
|
def polyder(p, m=1):
|
|
@@ -5188,7 +5190,7 @@ def polyder(p, m=1):
|
|
|
5188
5190
|
if m >= p.size:
|
|
5189
5191
|
return _to_tensor([])
|
|
5190
5192
|
for _ in range(m):
|
|
5191
|
-
coeff = _to_tensor(
|
|
5193
|
+
coeff = _to_tensor(ops.make_range(_type_convert(int, p.size) - 1, 0, -1))
|
|
5192
5194
|
p = p[:-1] * coeff
|
|
5193
5195
|
return p
|
|
5194
5196
|
|
|
@@ -5259,13 +5261,13 @@ def polyint(p, m=1, k=None):
|
|
|
5259
5261
|
if m == 0:
|
|
5260
5262
|
return p
|
|
5261
5263
|
if k is None:
|
|
5262
|
-
k = zeros(m,
|
|
5264
|
+
k = zeros(m, ops.dtype(p))
|
|
5263
5265
|
k = atleast_1d(_to_tensor(k))
|
|
5264
5266
|
if k.size == 1:
|
|
5265
|
-
k =
|
|
5266
|
-
k =
|
|
5267
|
+
k = ops.tile(k, (m,))
|
|
5268
|
+
k = ops.expand_dims(k, -1)
|
|
5267
5269
|
for i in range(m):
|
|
5268
|
-
coeff = _to_tensor(
|
|
5270
|
+
coeff = _to_tensor(ops.make_range(_type_convert(int, p.size), 0, -1))
|
|
5269
5271
|
p = concatenate((true_divide(p, coeff), k[i]))
|
|
5270
5272
|
return p
|
|
5271
5273
|
|
|
@@ -5318,7 +5320,7 @@ def result_type(*arrays_and_dtypes):
|
|
|
5318
5320
|
"""
|
|
5319
5321
|
def get_dtype(x):
|
|
5320
5322
|
if isinstance(x, Tensor):
|
|
5321
|
-
return
|
|
5323
|
+
return ops.dtype(_to_tensor(x))
|
|
5322
5324
|
return _get_dtype(x)
|
|
5323
5325
|
|
|
5324
5326
|
dtype_out = get_dtype(arrays_and_dtypes[0])
|
|
@@ -5362,16 +5364,16 @@ def unwrap(p, discont=3.141592653589793, axis=-1):
|
|
|
5362
5364
|
if not isinstance(discont, (int, float)):
|
|
5363
5365
|
_raise_type_error('discont should be a float')
|
|
5364
5366
|
p = _to_tensor(p)
|
|
5365
|
-
ndim =
|
|
5367
|
+
ndim = ops.rank(p)
|
|
5366
5368
|
axis = _check_axis_in_range(axis, ndim)
|
|
5367
5369
|
dd = diff(p, axis=axis)
|
|
5368
5370
|
ddmod = remainder(add(dd, pi), 2*pi) - pi
|
|
5369
|
-
ddmod =
|
|
5371
|
+
ddmod = ops.masked_fill(ddmod, ops.logical_and(ddmod == -pi, dd > 0), ops.cast(pi, ddmod.dtype))
|
|
5370
5372
|
ph_correct = ddmod - dd
|
|
5371
|
-
ph_correct =
|
|
5372
|
-
slice_all = _list_comprehensions(
|
|
5373
|
-
slice0 = _tuple_setitem(slice_all, axis,
|
|
5374
|
-
slice1 = _tuple_setitem(slice_all, axis,
|
|
5373
|
+
ph_correct = ops.masked_fill(ph_correct, absolute(dd) < discont, ops.cast(0, ph_correct.dtype))
|
|
5374
|
+
slice_all = _list_comprehensions(ops.rank(p), ops.make_slice(None, None, None), True)
|
|
5375
|
+
slice0 = _tuple_setitem(slice_all, axis, ops.make_slice(0, 1, None))
|
|
5376
|
+
slice1 = _tuple_setitem(slice_all, axis, ops.make_slice(1, None, None))
|
|
5375
5377
|
head = p[slice0]
|
|
5376
5378
|
tail = add(p[slice1], cumsum(ph_correct, axis))
|
|
5377
5379
|
return concatenate((head, tail), axis=axis)
|
|
@@ -5408,7 +5410,7 @@ def cumprod(a, axis=None, dtype=None):
|
|
|
5408
5410
|
[1 2 6]
|
|
5409
5411
|
"""
|
|
5410
5412
|
a = _to_tensor_origin_dtype(a)
|
|
5411
|
-
original_dtype =
|
|
5413
|
+
original_dtype = ops.dtype(a)
|
|
5412
5414
|
|
|
5413
5415
|
if axis is not None and not isinstance(axis, int):
|
|
5414
5416
|
_raise_type_error("integer axis is expected, but got", axis)
|
|
@@ -5442,9 +5444,9 @@ def _process_index(index, dims, mode='raise'):
|
|
|
5442
5444
|
idx = clip(idx, 0, d - 1)
|
|
5443
5445
|
elif mode == "wrap":
|
|
5444
5446
|
idx = remainder(idx, d)
|
|
5445
|
-
idx =
|
|
5447
|
+
idx = ops.expand_dims(idx, 0) if idx.ndim < 1 else idx
|
|
5446
5448
|
tup += (idx,)
|
|
5447
|
-
return
|
|
5449
|
+
return ops.Concat(0)(tup).reshape(ori_shape)
|
|
5448
5450
|
|
|
5449
5451
|
|
|
5450
5452
|
def _get_strides(dims, order='C'):
|
|
@@ -5456,10 +5458,10 @@ def _get_strides(dims, order='C'):
|
|
|
5456
5458
|
for d in dims:
|
|
5457
5459
|
tensor = tup[-1] * d
|
|
5458
5460
|
if tensor.ndim < 1:
|
|
5459
|
-
tensor =
|
|
5461
|
+
tensor = ops.expand_dims(tensor, 0)
|
|
5460
5462
|
tup += (tensor,)
|
|
5461
5463
|
tup = tup[::-1] if order == 'C' else tup
|
|
5462
|
-
return
|
|
5464
|
+
return ops.Concat(0)(tup)
|
|
5463
5465
|
|
|
5464
5466
|
|
|
5465
5467
|
def ravel_multi_index(multi_index, dims, mode='clip', order='C'):
|
|
@@ -5529,13 +5531,13 @@ def _vector_norm(x, _ord, axis, keepdims):
|
|
|
5529
5531
|
if _ord is None:
|
|
5530
5532
|
_ord = 2
|
|
5531
5533
|
if _ord == inf:
|
|
5532
|
-
res =
|
|
5534
|
+
res = ops.ReduceMax(keepdims)(absolute(x), axis)
|
|
5533
5535
|
elif _ord == -inf:
|
|
5534
|
-
res =
|
|
5536
|
+
res = ops.ReduceMin(keepdims)(absolute(x), axis)
|
|
5535
5537
|
elif _ord == 0:
|
|
5536
|
-
res =
|
|
5538
|
+
res = ops.ReduceSum(keepdims)(ops.not_equal(x, 0).astype(mstype.float32), axis)
|
|
5537
5539
|
else:
|
|
5538
|
-
res = power(
|
|
5540
|
+
res = power(ops.ReduceSum(keepdims)(power(absolute(x), _ord), axis), 1. / _ord)
|
|
5539
5541
|
return res
|
|
5540
5542
|
|
|
5541
5543
|
|
|
@@ -5548,7 +5550,7 @@ def _matrix_norm(x, _ord, axis, keepdims):
|
|
|
5548
5550
|
if _in(_ord, (2, -2)):
|
|
5549
5551
|
_raise_unimplemented_error('2-norm is not implemented for matrices')
|
|
5550
5552
|
if _in(_ord, (None, 'fro')):
|
|
5551
|
-
return
|
|
5553
|
+
return ops.sqrt(ops.ReduceSum(keepdims)(ops.square(x), axis))
|
|
5552
5554
|
axis0, axis1 = axis
|
|
5553
5555
|
if not keepdims:
|
|
5554
5556
|
if _check_is_inf(_abs(_ord)) and axis0 > axis1:
|
|
@@ -5556,13 +5558,13 @@ def _matrix_norm(x, _ord, axis, keepdims):
|
|
|
5556
5558
|
elif _abs(_ord) == 1 and axis1 > axis0:
|
|
5557
5559
|
axis1 -= 1
|
|
5558
5560
|
if _check_is_inf(_ord):
|
|
5559
|
-
return
|
|
5561
|
+
return ops.ReduceMax(keepdims)(ops.ReduceSum(keepdims)(absolute(x), axis1), axis0)
|
|
5560
5562
|
if _check_is_inf(_ord, True):
|
|
5561
|
-
return
|
|
5563
|
+
return ops.ReduceMin(keepdims)(ops.ReduceSum(keepdims)(absolute(x), axis1), axis0)
|
|
5562
5564
|
if _ord == 1:
|
|
5563
|
-
return
|
|
5565
|
+
return ops.ReduceMax(keepdims)(ops.ReduceSum(keepdims)(absolute(x), axis0), axis1)
|
|
5564
5566
|
if _ord == -1:
|
|
5565
|
-
return
|
|
5567
|
+
return ops.ReduceMin(keepdims)(ops.ReduceSum(keepdims)(absolute(x), axis0), axis1)
|
|
5566
5568
|
return _raise_value_error('invalid norm order for matrices')
|
|
5567
5569
|
|
|
5568
5570
|
|
|
@@ -5608,14 +5610,14 @@ def norm(x, ord=None, axis=None, keepdims=False): # pylint: disable=redefined-bu
|
|
|
5608
5610
|
if not isinstance(ord, (int, float)) and not _in(ord, (None, 'fro', 'nuc', inf, -inf)):
|
|
5609
5611
|
_raise_value_error('invalid value for `ord`')
|
|
5610
5612
|
x = _to_tensor(x)
|
|
5611
|
-
ndim =
|
|
5613
|
+
ndim = ops.rank(x)
|
|
5612
5614
|
if axis is None:
|
|
5613
5615
|
if ord is None:
|
|
5614
5616
|
x = x.ravel()
|
|
5615
|
-
if
|
|
5617
|
+
if ops.rank(x) not in (1, 2):
|
|
5616
5618
|
_raise_value_error('for None axis, array must a vector or a 2-D matrix')
|
|
5617
|
-
axis =
|
|
5618
|
-
axis = _check_axis_valid(axis,
|
|
5619
|
+
axis = ops.make_range(ops.rank(x))
|
|
5620
|
+
axis = _check_axis_valid(axis, ops.rank(x))
|
|
5619
5621
|
|
|
5620
5622
|
if len(axis) == 1:
|
|
5621
5623
|
res = _vector_norm(x, ord, axis, keepdims)
|
|
@@ -5624,7 +5626,7 @@ def norm(x, ord=None, axis=None, keepdims=False): # pylint: disable=redefined-bu
|
|
|
5624
5626
|
else:
|
|
5625
5627
|
return _raise_value_error('invalid number of dimensions to norm')
|
|
5626
5628
|
|
|
5627
|
-
if keepdims and ndim >
|
|
5629
|
+
if keepdims and ndim > ops.rank(res):
|
|
5628
5630
|
res = _expand(res, ndim)
|
|
5629
5631
|
return res
|
|
5630
5632
|
|
|
@@ -5658,7 +5660,7 @@ def bitwise_and(x1, x2, dtype=None):
|
|
|
5658
5660
|
>>> print(np.bitwise_and(13, 17))
|
|
5659
5661
|
1
|
|
5660
5662
|
"""
|
|
5661
|
-
return _apply_tensor_op(
|
|
5663
|
+
return _apply_tensor_op(ops.bitwise_and, x1, x2, dtype=dtype)
|
|
5662
5664
|
|
|
5663
5665
|
|
|
5664
5666
|
def bitwise_or(x1, x2, dtype=None):
|
|
@@ -5690,7 +5692,7 @@ def bitwise_or(x1, x2, dtype=None):
|
|
|
5690
5692
|
>>> print(np.bitwise_or(13, 16))
|
|
5691
5693
|
29
|
|
5692
5694
|
"""
|
|
5693
|
-
return _apply_tensor_op(
|
|
5695
|
+
return _apply_tensor_op(ops.bitwise_or, x1, x2, dtype=dtype)
|
|
5694
5696
|
|
|
5695
5697
|
|
|
5696
5698
|
def bitwise_xor(x1, x2, dtype=None):
|
|
@@ -5722,7 +5724,7 @@ def bitwise_xor(x1, x2, dtype=None):
|
|
|
5722
5724
|
>>> print(np.bitwise_xor(13, 17))
|
|
5723
5725
|
28
|
|
5724
5726
|
"""
|
|
5725
|
-
return _apply_tensor_op(
|
|
5727
|
+
return _apply_tensor_op(ops.bitwise_xor, x1, x2, dtype=dtype)
|
|
5726
5728
|
|
|
5727
5729
|
|
|
5728
5730
|
def invert(x, dtype=None):
|
|
@@ -5757,7 +5759,7 @@ def invert(x, dtype=None):
|
|
|
5757
5759
|
>>> print(np.invert(np.array(13, dtype=np.uint16)))
|
|
5758
5760
|
65522
|
|
5759
5761
|
"""
|
|
5760
|
-
return _apply_tensor_op(
|
|
5762
|
+
return _apply_tensor_op(ops.invert, x, dtype=dtype)
|
|
5761
5763
|
|
|
5762
5764
|
|
|
5763
5765
|
def rint(x, dtype=None):
|
|
@@ -5790,8 +5792,8 @@ def rint(x, dtype=None):
|
|
|
5790
5792
|
"""
|
|
5791
5793
|
x = _to_tensor_origin_dtype(x)
|
|
5792
5794
|
res = _rint(x)
|
|
5793
|
-
if dtype is not None and not _check_same_type(
|
|
5794
|
-
res =
|
|
5795
|
+
if dtype is not None and not _check_same_type(ops.dtype(res), dtype):
|
|
5796
|
+
res = ops.cast(res, dtype)
|
|
5795
5797
|
return res
|
|
5796
5798
|
|
|
5797
5799
|
|
|
@@ -5865,20 +5867,20 @@ def correlate(a, v, mode='valid'):
|
|
|
5865
5867
|
|
|
5866
5868
|
def _compute_1d_conv(a, v, mode):
|
|
5867
5869
|
"""Returns a 1-D sequence which is the cross-correlate of two 1-D sequences (`a` and `v`)."""
|
|
5868
|
-
v_size =
|
|
5870
|
+
v_size = ops.shape_mul(v.shape)
|
|
5869
5871
|
if mode not in ('same', 'full', 'valid'):
|
|
5870
5872
|
_raise_value_error("mode must be one of ['full', 'same', 'valid']")
|
|
5871
5873
|
if v_size > 1:
|
|
5872
5874
|
if mode == 'same':
|
|
5873
5875
|
pad_left = _to_tensor(_list_comprehensions(v_size // 2, 0.0, True))
|
|
5874
5876
|
pad_right = _to_tensor(_list_comprehensions(v_size - v_size // 2 - 1, 0.0, True))
|
|
5875
|
-
a =
|
|
5877
|
+
a = ops.Concat(0)((pad_left, a, pad_right))
|
|
5876
5878
|
elif mode == 'full':
|
|
5877
5879
|
pad = _to_tensor(_list_comprehensions(v_size - 1, 0.0, True))
|
|
5878
|
-
a =
|
|
5880
|
+
a = ops.Concat(0)((pad, a, pad))
|
|
5879
5881
|
a = a.reshape(1, 1, 1, a.size)
|
|
5880
5882
|
v = v.reshape(1, 1, 1, v.size)
|
|
5881
|
-
_conv =
|
|
5883
|
+
_conv = ops.Conv2D(1, (1, v.size))
|
|
5882
5884
|
return _conv(a, v).reshape(-1)
|
|
5883
5885
|
|
|
5884
5886
|
|