mindspore 2.1.0__cp39-cp39-win_amd64.whl → 2.2.11__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -1
- mindspore/_checkparam.py +23 -29
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/graph_split.py +84 -76
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +4 -11
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +122 -15
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +84 -67
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/akg_compiler/util.py +10 -7
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +2 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +6 -5
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +13 -15
- mindspore/_extends/parse/namespace.py +7 -33
- mindspore/_extends/parse/parser.py +67 -72
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +86 -106
- mindspore/_extends/parse/trope.py +1 -1
- mindspore/_extends/remote/kernel_build_server.py +25 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/amp.py +47 -11
- mindspore/boost/boost.py +1 -8
- mindspore/boost/boost_cell_wrapper.py +3 -2
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +8 -7
- mindspore/common/__init__.py +5 -3
- mindspore/common/_jit_fallback_utils.py +6 -0
- mindspore/common/_register_for_adapter.py +2 -0
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +13 -0
- mindspore/common/_utils.py +29 -0
- mindspore/common/api.py +174 -259
- mindspore/common/auto_dynamic_shape.py +494 -0
- mindspore/common/dtype.py +18 -11
- mindspore/common/dump.py +6 -4
- mindspore/common/initializer.py +14 -14
- mindspore/common/jit_config.py +33 -15
- mindspore/common/lazy_inline.py +126 -7
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/parameter.py +51 -41
- mindspore/common/seed.py +4 -4
- mindspore/common/sparse_tensor.py +13 -14
- mindspore/common/tensor.py +243 -165
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +83 -4
- mindspore/communication/management.py +152 -84
- mindspore/config/op_info.config +14 -3
- mindspore/context.py +152 -61
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +2 -2
- mindspore/dataset/audio/transforms.py +52 -52
- mindspore/dataset/callback/ds_callback.py +16 -2
- mindspore/dataset/core/config.py +68 -51
- mindspore/dataset/engine/cache_client.py +33 -7
- mindspore/dataset/engine/datasets.py +250 -112
- mindspore/dataset/engine/datasets_audio.py +43 -211
- mindspore/dataset/engine/datasets_standard_format.py +16 -35
- mindspore/dataset/engine/datasets_text.py +43 -67
- mindspore/dataset/engine/datasets_user_defined.py +86 -100
- mindspore/dataset/engine/datasets_vision.py +219 -1029
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +4 -0
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/samplers.py +1 -1
- mindspore/dataset/engine/validators.py +19 -5
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +101 -127
- mindspore/dataset/text/utils.py +205 -138
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +95 -40
- mindspore/dataset/utils/browse_dataset.py +8 -2
- mindspore/dataset/utils/line_reader.py +17 -19
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +6 -3
- mindspore/dataset/vision/transforms.py +409 -287
- mindspore/dataset/vision/utils.py +13 -14
- mindspore/dataset/vision/validators.py +11 -1
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/map_parameter.py +14 -0
- mindspore/{nn/optim_ex → experimental/optim}/__init__.py +30 -29
- mindspore/{nn/optim_ex → experimental/optim}/adam.py +60 -67
- mindspore/{nn/optim_ex → experimental/optim}/adamw.py +181 -203
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/{nn/optim_ex → experimental/optim}/optimizer.py +252 -259
- mindspore/{nn/optim_ex → experimental/optim}/sgd.py +147 -152
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +17 -14
- mindspore/include/api/status.h +8 -3
- mindspore/include/api/types.h +37 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/dataset/constants.h +6 -5
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +13 -13
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +2 -2
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/__init__.py +0 -2
- mindspore/nn/cell.py +313 -74
- mindspore/nn/dynamic_lr.py +21 -21
- mindspore/nn/layer/activation.py +22 -30
- mindspore/nn/layer/basic.py +15 -13
- mindspore/nn/layer/channel_shuffle.py +1 -1
- mindspore/nn/layer/container.py +271 -9
- mindspore/nn/layer/conv.py +323 -204
- mindspore/nn/layer/dense.py +8 -5
- mindspore/nn/layer/embedding.py +33 -27
- mindspore/nn/layer/flash_attention.py +61 -95
- mindspore/nn/layer/image.py +8 -6
- mindspore/nn/layer/math.py +16 -25
- mindspore/nn/layer/normalization.py +107 -66
- mindspore/nn/layer/padding.py +1 -1
- mindspore/nn/layer/pooling.py +131 -109
- mindspore/nn/layer/rnn_cells.py +27 -22
- mindspore/nn/layer/rnns.py +13 -16
- mindspore/nn/layer/thor_layer.py +1 -1
- mindspore/nn/layer/transformer.py +221 -154
- mindspore/nn/learning_rate_schedule.py +9 -1
- mindspore/nn/loss/loss.py +235 -174
- mindspore/nn/optim/ada_grad.py +2 -1
- mindspore/nn/optim/adadelta.py +1 -0
- mindspore/nn/optim/adafactor.py +2 -1
- mindspore/nn/optim/adam.py +7 -4
- mindspore/nn/optim/adamax.py +3 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -3
- mindspore/nn/optim/ftrl.py +6 -5
- mindspore/nn/optim/lamb.py +7 -4
- mindspore/nn/optim/lars.py +1 -1
- mindspore/nn/optim/lazyadam.py +5 -3
- mindspore/nn/optim/momentum.py +2 -1
- mindspore/nn/optim/optimizer.py +53 -4
- mindspore/nn/optim/proximal_ada_grad.py +3 -4
- mindspore/nn/optim/rmsprop.py +4 -3
- mindspore/nn/optim/rprop.py +23 -12
- mindspore/nn/optim/sgd.py +26 -11
- mindspore/nn/optim/thor.py +9 -7
- mindspore/nn/probability/bijector/bijector.py +5 -5
- mindspore/nn/probability/bijector/power_transform.py +27 -27
- mindspore/nn/probability/bijector/softplus.py +3 -3
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -3
- mindspore/nn/probability/distribution/bernoulli.py +5 -5
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +7 -7
- mindspore/nn/probability/distribution/cauchy.py +0 -1
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +4 -4
- mindspore/nn/probability/distribution/gumbel.py +4 -4
- mindspore/nn/probability/distribution/log_normal.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +4 -4
- mindspore/nn/probability/distribution/transformed_distribution.py +3 -3
- mindspore/nn/probability/distribution/uniform.py +6 -6
- mindspore/nn/wrap/__init__.py +4 -2
- mindspore/nn/wrap/cell_wrapper.py +87 -34
- mindspore/nn/wrap/grad_reducer.py +8 -5
- mindspore/nn/wrap/loss_scale.py +105 -42
- mindspore/numpy/array_creations.py +1 -2
- mindspore/numpy/array_ops.py +3 -2
- mindspore/numpy/utils_const.py +5 -5
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/__init__.py +0 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +2 -3
- mindspore/ops/_grad_experimental/grad_comm_ops.py +15 -2
- mindspore/ops/_grad_experimental/grad_debug_ops.py +0 -37
- mindspore/ops/_grad_experimental/grad_implementations.py +11 -1
- mindspore/ops/_grad_experimental/grad_inner_ops.py +2 -216
- mindspore/ops/_grad_experimental/grad_math_ops.py +19 -199
- mindspore/ops/_grad_experimental/grad_sparse.py +15 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +14 -2
- mindspore/ops/_op_impl/aicpu/add.py +3 -3
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/{_custom_op/flash_attention/constants.py → aicpu/eps.py} +18 -27
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +21 -2
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +3 -3
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -5
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -5
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/tbe/__init__.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +45 -13
- mindspore/ops/_utils/utils.py +6 -1
- mindspore/ops/_vmap/vmap_array_ops.py +3 -3
- mindspore/ops/_vmap/vmap_base.py +3 -3
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +61 -7
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/base.py +37 -10
- mindspore/ops/composite/math_ops.py +5 -4
- mindspore/ops/composite/multitype_ops/_compile_utils.py +275 -73
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +16 -9
- mindspore/ops/composite/multitype_ops/add_impl.py +43 -4
- mindspore/ops/composite/multitype_ops/getitem_impl.py +42 -4
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/array_func.py +174 -193
- mindspore/ops/function/clip_func.py +81 -13
- mindspore/ops/function/debug_func.py +1 -1
- mindspore/ops/function/grad/grad_func.py +18 -9
- mindspore/ops/function/image_func.py +10 -4
- mindspore/ops/function/linalg_func.py +5 -5
- mindspore/ops/function/math_func.py +575 -386
- mindspore/ops/function/nn_func.py +568 -260
- mindspore/ops/function/random_func.py +88 -57
- mindspore/ops/function/sparse_func.py +1 -1
- mindspore/ops/function/sparse_unary_func.py +14 -12
- mindspore/ops/function/vmap_func.py +6 -5
- mindspore/ops/functional.py +15 -10
- mindspore/ops/op_info_register.py +244 -25
- mindspore/ops/operations/__init__.py +31 -19
- mindspore/ops/operations/_grad_ops.py +71 -7
- mindspore/ops/operations/_inner_ops.py +350 -17
- mindspore/ops/operations/_quant_ops.py +4 -8
- mindspore/ops/operations/_sequence_ops.py +42 -0
- mindspore/ops/operations/array_ops.py +68 -282
- mindspore/ops/operations/comm_ops.py +107 -59
- mindspore/ops/operations/custom_ops.py +94 -70
- mindspore/ops/operations/debug_ops.py +8 -4
- mindspore/ops/operations/image_ops.py +18 -12
- mindspore/ops/operations/inner_ops.py +26 -3
- mindspore/ops/operations/math_ops.py +192 -144
- mindspore/ops/operations/nn_ops.py +857 -489
- mindspore/ops/operations/other_ops.py +0 -22
- mindspore/ops/operations/random_ops.py +53 -111
- mindspore/ops/operations/sparse_ops.py +3 -1
- mindspore/ops/primitive.py +24 -18
- mindspore/parallel/_auto_parallel_context.py +68 -8
- mindspore/parallel/_cost_model_context.py +2 -2
- mindspore/parallel/_offload_context.py +17 -3
- mindspore/parallel/_parallel_serialization.py +12 -5
- mindspore/parallel/_ps_context.py +12 -0
- mindspore/parallel/_tensor.py +18 -13
- mindspore/parallel/_transformer/layers.py +5 -3
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +2 -2
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +23 -3
- mindspore/parallel/_utils.py +11 -7
- mindspore/parallel/algo_parameter_config.py +85 -5
- mindspore/parallel/checkpoint_transform.py +19 -12
- mindspore/parallel/shard.py +21 -14
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +4 -2
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +2 -2
- mindspore/profiler/parser/ascend_fpbp_generator.py +1 -1
- mindspore/profiler/parser/ascend_hccl_generator.py +249 -12
- mindspore/profiler/parser/ascend_msprof_exporter.py +150 -255
- mindspore/profiler/parser/ascend_msprof_generator.py +204 -17
- mindspore/profiler/parser/ascend_op_generator.py +6 -6
- mindspore/profiler/parser/ascend_steptrace_generator.py +6 -4
- mindspore/profiler/parser/ascend_timeline_generator.py +14 -187
- mindspore/profiler/parser/base_timeline_generator.py +10 -8
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +16 -12
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +38 -22
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +2 -2
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +21 -2
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +179 -89
- mindspore/rewrite/api/node.py +102 -19
- mindspore/rewrite/api/node_type.py +5 -1
- mindspore/rewrite/api/pattern_engine.py +1 -1
- mindspore/rewrite/api/scoped_value.py +9 -17
- mindspore/rewrite/api/symbol_tree.py +131 -47
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +93 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +33 -24
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{_extends/graph_kernel/expanders/complex → rewrite/node}/__init__.py +9 -9
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +273 -234
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/{topological_manager.py → node/node_topological_manager.py} +13 -46
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +216 -221
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +174 -113
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +42 -21
- mindspore/rewrite/parsers/function_def_parser.py +24 -16
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +196 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree.py +523 -578
- mindspore/rewrite/symbol_tree_builder.py +9 -193
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +6 -4
- mindspore/{ops/bprop_mindir → safeguard}/__init__.py +4 -3
- mindspore/safeguard/rewrite_obfuscation.py +541 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +7 -3
- mindspore/train/amp.py +323 -123
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/_backup_and_restore.py +2 -12
- mindspore/train/callback/_callback.py +29 -4
- mindspore/train/callback/_checkpoint.py +23 -8
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +4 -4
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +3 -4
- mindspore/train/callback/_summary_collector.py +15 -8
- mindspore/train/callback/_time_monitor.py +58 -5
- mindspore/train/data_sink.py +5 -11
- mindspore/train/dataset_helper.py +84 -57
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/__init__.py +3 -3
- mindspore/train/metrics/cosine_similarity.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +3 -2
- mindspore/train/metrics/mean_surface_distance.py +3 -2
- mindspore/train/metrics/metric.py +39 -19
- mindspore/train/metrics/roc.py +2 -2
- mindspore/train/metrics/root_mean_square_surface_distance.py +4 -3
- mindspore/train/mind_ir_pb2.py +85 -36
- mindspore/train/model.py +187 -47
- mindspore/train/serialization.py +487 -161
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/_writer_pool.py +3 -2
- mindspore/train/summary/summary_record.py +37 -17
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/train/train_thor/dataset_helper.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/METADATA +7 -4
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/RECORD +406 -463
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -54
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/dataset/datapreprocess/__init__.py +0 -20
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/include/api/net.h +0 -142
- mindspore/nn/lr_scheduler.py +0 -262
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -248
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -181
- mindspore/ops/_grad_experimental/grad_other_ops.py +0 -72
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/_grad_experimental/grad_sequence_ops.py +0 -351
- mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +0 -350
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +0 -409
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +0 -578
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +0 -199
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +0 -446
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +0 -45
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +0 -67
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +0 -62
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/WHEEL +0 -0
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/entry_points.txt +0 -0
- {mindspore-2.1.0.dist-info → mindspore-2.2.11.dist-info}/top_level.txt +0 -0
mindspore/amp.py
CHANGED
|
@@ -15,11 +15,13 @@
|
|
|
15
15
|
"""ms function for mixed precision."""
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
|
|
18
|
+
import os
|
|
18
19
|
from abc import ABC, abstractmethod
|
|
19
20
|
from mindspore.common import mutable
|
|
20
21
|
from mindspore.ops._primitive_cache import _get_cache_prim
|
|
21
22
|
from mindspore.ops.operations.math_ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
|
|
22
23
|
from mindspore import _checkparam as validator
|
|
24
|
+
from mindspore._c_expression import MSContext
|
|
23
25
|
from .common import dtype as mstype
|
|
24
26
|
from . import context
|
|
25
27
|
from . import ops
|
|
@@ -37,8 +39,13 @@ _partial = ops.Partial()
|
|
|
37
39
|
|
|
38
40
|
|
|
39
41
|
@constexpr
|
|
40
|
-
def
|
|
41
|
-
return
|
|
42
|
+
def _ascend_910A_target():
|
|
43
|
+
return MSContext.get_instance().get_ascend_soc_version() == "ascend910"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@constexpr
|
|
47
|
+
def _ascend_910B_target():
|
|
48
|
+
return MSContext.get_instance().get_ascend_soc_version() == "ascend910b"
|
|
42
49
|
|
|
43
50
|
|
|
44
51
|
@constexpr
|
|
@@ -72,9 +79,10 @@ def _overflow(inputs):
|
|
|
72
79
|
|
|
73
80
|
|
|
74
81
|
@jit
|
|
75
|
-
def _all_finite(inputs):
|
|
82
|
+
def _all_finite(inputs, check_overflow_mode):
|
|
76
83
|
"""all finite check"""
|
|
77
|
-
if
|
|
84
|
+
if (_ascend_910A_target()) or \
|
|
85
|
+
(_ascend_910B_target() and check_overflow_mode != "INFNAN_MODE"):
|
|
78
86
|
status = Tensor([0] * 8, mstype.int32)
|
|
79
87
|
status = ops.depend(status, inputs)
|
|
80
88
|
get_status = _get_cache_prim(NPUGetFloatStatusV2)()(status)
|
|
@@ -83,6 +91,7 @@ def _all_finite(inputs):
|
|
|
83
91
|
get_status = ops.depend(get_status, clear_status)
|
|
84
92
|
status_finite = get_status.equal(Tensor(0, mstype.int32)).all()
|
|
85
93
|
return status_finite
|
|
94
|
+
|
|
86
95
|
outputs = _hypermap(_partial(_overflow), inputs)
|
|
87
96
|
flag_sum = ops.addn(outputs).reshape(())
|
|
88
97
|
status_finite = ops.less(flag_sum, 1)
|
|
@@ -117,10 +126,11 @@ def all_finite(inputs):
|
|
|
117
126
|
|
|
118
127
|
Tutorial Examples:
|
|
119
128
|
- `Automatic Mix Precision - Loss Scaling
|
|
120
|
-
<https://mindspore.cn/tutorials/en/r2.
|
|
129
|
+
<https://mindspore.cn/tutorials/en/r2.2/advanced/mixed_precision.html#loss-scaling>`_
|
|
121
130
|
"""
|
|
122
131
|
inputs = mutable(inputs)
|
|
123
|
-
|
|
132
|
+
_check_overflow_mode = os.environ.get('MS_ASCEND_CHECK_OVERFLOW_MODE')
|
|
133
|
+
return _all_finite(inputs, _check_overflow_mode)
|
|
124
134
|
|
|
125
135
|
|
|
126
136
|
@jit_class
|
|
@@ -132,11 +142,36 @@ class LossScaler(ABC):
|
|
|
132
142
|
to scale and unscale the loss value and gradients to avoid overflow, `adjust` is used to update the
|
|
133
143
|
loss scale value.
|
|
134
144
|
|
|
135
|
-
For more information, refer to the `tutorials <https://mindspore.cn/tutorials/en/r2.
|
|
145
|
+
For more information, refer to the `tutorials <https://mindspore.cn/tutorials/en/r2.2/advanced/
|
|
136
146
|
mixed_precision.html#loss-scaling>`_.
|
|
137
147
|
|
|
138
148
|
.. warning::
|
|
139
149
|
This is an experimental API that is subject to change or deletion.
|
|
150
|
+
|
|
151
|
+
Examples:
|
|
152
|
+
>>> from mindspore.amp import LossScaler, _grad_scale_map, _grad_unscale_map
|
|
153
|
+
>>> from mindspore import ops, Parameter, Tensor
|
|
154
|
+
>>> from mindspore.common import dtype as mstype
|
|
155
|
+
>>>
|
|
156
|
+
>>> class MyLossScaler(LossScaler):
|
|
157
|
+
... def __init__(self, scale_value):
|
|
158
|
+
... self.scale_value = Parameter(Tensor(scale_value, dtype=mstype.float32), name="scale_value")
|
|
159
|
+
...
|
|
160
|
+
... def scale(self, inputs):
|
|
161
|
+
... inputs = mutable(inputs)
|
|
162
|
+
... return _grad_scale_map(self.scale_value, inputs)
|
|
163
|
+
...
|
|
164
|
+
... def unscale(self, inputs):
|
|
165
|
+
... inputs = mutable(inputs)
|
|
166
|
+
... return _grad_unscale_map(self.scale_value, inputs)
|
|
167
|
+
...
|
|
168
|
+
... def adjust(self, grads_finite):
|
|
169
|
+
... scale_mul_factor = self.scale_value * self.scale_factor
|
|
170
|
+
... scale_value = ops.select(grads_finite, scale_mul_factor, self.scale_value)
|
|
171
|
+
... ops.assign(self.scale_value, scale_value)
|
|
172
|
+
... return True
|
|
173
|
+
>>>
|
|
174
|
+
>>> loss_scaler = MyLossScaler(1024)
|
|
140
175
|
"""
|
|
141
176
|
@abstractmethod
|
|
142
177
|
def scale(self, inputs):
|
|
@@ -234,7 +269,8 @@ class StaticLossScaler(LossScaler):
|
|
|
234
269
|
|
|
235
270
|
def adjust(self, grads_finite):
|
|
236
271
|
"""
|
|
237
|
-
`scale_value` is fixed
|
|
272
|
+
Adjust `scale_value` in `LossScaler`. `scale_value` is fixed in `StaticLossScaler`, so this method
|
|
273
|
+
return False directly.
|
|
238
274
|
|
|
239
275
|
Args:
|
|
240
276
|
grads_finite (Tensor): a scalar bool Tensor indicating whether the grads are finite.
|
|
@@ -298,7 +334,7 @@ class DynamicLossScaler(LossScaler):
|
|
|
298
334
|
|
|
299
335
|
Tutorial Examples:
|
|
300
336
|
- `Automatic Mix Precision - Loss Scaling
|
|
301
|
-
<https://mindspore.cn/tutorials/en/r2.
|
|
337
|
+
<https://mindspore.cn/tutorials/en/r2.2/advanced/mixed_precision.html#loss-scaling>`_
|
|
302
338
|
"""
|
|
303
339
|
inputs = mutable(inputs)
|
|
304
340
|
return _grad_scale_map(self.scale_value, inputs)
|
|
@@ -315,7 +351,7 @@ class DynamicLossScaler(LossScaler):
|
|
|
315
351
|
|
|
316
352
|
Tutorial Examples:
|
|
317
353
|
- `Automatic Mix Precision - Loss Scaling
|
|
318
|
-
<https://mindspore.cn/tutorials/en/r2.
|
|
354
|
+
<https://mindspore.cn/tutorials/en/r2.2/advanced/mixed_precision.html#loss-scaling>`_
|
|
319
355
|
"""
|
|
320
356
|
inputs = mutable(inputs)
|
|
321
357
|
return _grad_unscale_map(self.scale_value, inputs)
|
|
@@ -329,7 +365,7 @@ class DynamicLossScaler(LossScaler):
|
|
|
329
365
|
|
|
330
366
|
Tutorial Examples:
|
|
331
367
|
- `Automatic Mix Precision - Loss Scaling
|
|
332
|
-
<https://mindspore.cn/tutorials/en/r2.
|
|
368
|
+
<https://mindspore.cn/tutorials/en/r2.2/advanced/mixed_precision.html#loss-scaling>`_
|
|
333
369
|
"""
|
|
334
370
|
one = ops.ones((), self.scale_value.dtype)
|
|
335
371
|
scale_mul_factor = self.scale_value * self.scale_factor
|
mindspore/boost/boost.py
CHANGED
|
@@ -85,9 +85,6 @@ class AutoBoost:
|
|
|
85
85
|
"freeze_p": 0.7,
|
|
86
86
|
"total_steps": 65536
|
|
87
87
|
}
|
|
88
|
-
"grad_accumulation": {
|
|
89
|
-
"grad_accumulation_step": 1
|
|
90
|
-
},
|
|
91
88
|
"dim_reduce": {
|
|
92
89
|
"rho": 0.55,
|
|
93
90
|
"gamma": 0.9,
|
|
@@ -139,10 +136,6 @@ class AutoBoost:
|
|
|
139
136
|
- freeze_p (float): Gradient freezing probability. Default: ``0.7`` .
|
|
140
137
|
- total_steps (int): Total training steps. Default: ``65536`` .
|
|
141
138
|
|
|
142
|
-
- grad_accumulation:
|
|
143
|
-
|
|
144
|
-
- grad_accumulation_step (int): Steps to accumulate gradients. Default: ``1`` .
|
|
145
|
-
|
|
146
139
|
- dim_reduce:
|
|
147
140
|
|
|
148
141
|
The leading principles of dim_reduce:
|
|
@@ -204,7 +197,7 @@ class AutoBoost:
|
|
|
204
197
|
>>> import json
|
|
205
198
|
>>> boost_json = "/path/boost_config.json"
|
|
206
199
|
>>> with open(boost_json, 'r') as fp:
|
|
207
|
-
|
|
200
|
+
... boost_config_dict = json.load(fp)
|
|
208
201
|
>>> boost = AutoBoost("O1", boost_config_dict)
|
|
209
202
|
"""
|
|
210
203
|
_instance_lock = threading.Lock()
|
|
@@ -136,7 +136,7 @@ class BoostTrainOneStepCell(TrainOneStepCell):
|
|
|
136
136
|
>>> from mindspore import boost
|
|
137
137
|
>>> from mindspore import nn
|
|
138
138
|
>>> # Define the network structure of LeNet5. Refer to
|
|
139
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
139
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/lenet.py
|
|
140
140
|
>>> net = LeNet5()
|
|
141
141
|
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()
|
|
142
142
|
>>> optim = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
|
@@ -412,7 +412,8 @@ class BoostTrainOneStepWithLossScaleCell(BoostTrainOneStepCell):
|
|
|
412
412
|
network (Cell): The training network. The network only supports single output.
|
|
413
413
|
optimizer (Cell): Optimizer for updating the weights.
|
|
414
414
|
scale_sense (Union[Tensor, Cell]): If this value is Cell type, the loss scaling update logic cell.If this value
|
|
415
|
-
|
|
415
|
+
is Tensor type, :func:`mindspore.nn.TrainOneStepWithLossScaleCell.set_sense_scale` can be called to update
|
|
416
|
+
loss scale factor, Tensor with shape :math:`()` or :math:`(1,)`.
|
|
416
417
|
|
|
417
418
|
Inputs:
|
|
418
419
|
- **\*inputs** (Tuple(Tensor)) - Tuple of input tensors with shape :math:`(N, \ldots)`.
|
|
@@ -71,7 +71,7 @@ class GradientAccumulation(Cell):
|
|
|
71
71
|
|
|
72
72
|
if self._accumulation_step >= self._max_accumulation_step:
|
|
73
73
|
loss = F.depend(loss, self.optimizer(self._grad_accumulation))
|
|
74
|
-
self._accumulation_step
|
|
74
|
+
F.assign(self._accumulation_step, 0)
|
|
75
75
|
|
|
76
76
|
if self._accumulation_step == 0:
|
|
77
77
|
loss = F.depend(loss, self.hyper_map(F.partial(gradient_clear_op), self._grad_accumulation))
|
|
@@ -27,7 +27,7 @@ __all__ = ["GroupLossScaleManager"]
|
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
class GroupLossScaleManager(Cell):
|
|
30
|
-
"""
|
|
30
|
+
r"""
|
|
31
31
|
Enhanced hybrid precision algorithm supports multi-layer application of different loss scales and
|
|
32
32
|
dynamic updating of loss scales.
|
|
33
33
|
|
|
@@ -41,7 +41,8 @@ class GroupLossScaleManager(Cell):
|
|
|
41
41
|
- **layer2** (Int) - Last network layer value.
|
|
42
42
|
|
|
43
43
|
Outputs:
|
|
44
|
-
- **
|
|
44
|
+
- **out** (Tensor) - A tensor with a group of loss scale tags that marks
|
|
45
|
+
the loss scale group number of the current tensor.
|
|
45
46
|
|
|
46
47
|
Supported Platforms:
|
|
47
48
|
``Ascend``
|
|
@@ -80,10 +81,10 @@ class GroupLossScaleManager(Cell):
|
|
|
80
81
|
>>> param_group1 = []
|
|
81
82
|
>>> param_group2 = []
|
|
82
83
|
>>> for param in net.trainable_params():
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
84
|
+
... if 'conv' in param.name:
|
|
85
|
+
... param_group1.append(param)
|
|
86
|
+
... else:
|
|
87
|
+
... param_group2.append(param)
|
|
87
88
|
>>> loss_scale_manager.loss_scale_groups = [param_group1, param_group2]
|
|
88
89
|
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
|
89
90
|
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
|
@@ -93,7 +94,7 @@ class GroupLossScaleManager(Cell):
|
|
|
93
94
|
... loss_scale_manager=loss_scale_manager,
|
|
94
95
|
... boost_level="O1", boost_config_dict=boost_config_dict)
|
|
95
96
|
>>> # Create the dataset taking MNIST as an example. Refer to
|
|
96
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
97
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/mnist.py
|
|
97
98
|
>>> dataset = create_dataset()
|
|
98
99
|
>>> model.train(2, dataset)
|
|
99
100
|
"""
|
mindspore/common/__init__.py
CHANGED
|
@@ -18,7 +18,7 @@ from mindspore.common import dtype
|
|
|
18
18
|
from mindspore.common.api import ms_function, ms_memory_recycle, ms_class, jit, jit_class, _no_grad
|
|
19
19
|
from mindspore.common.dtype import Type, int8, byte, int16, short, int32, intc, int64, intp, \
|
|
20
20
|
uint8, ubyte, uint16, ushort, uint32, uintc, uint64, uintp, float16, half, \
|
|
21
|
-
float32, single, float64, double, bool_, float_, list_, tuple_, int_, \
|
|
21
|
+
float32, single, float64, bfloat16, double, bool_, float_, list_, tuple_, int_, \
|
|
22
22
|
uint, number, tensor_type, string, type_none, TensorType, Int, \
|
|
23
23
|
complex64, complex128, dtype_to_nptype, _null, _NullType, \
|
|
24
24
|
dtype_to_pytype, pytype_to_dtype, get_py_obj_dtype, QuantDtype
|
|
@@ -30,6 +30,7 @@ from mindspore.common.sparse_tensor import RowTensor, RowTensorInner, SparseTens
|
|
|
30
30
|
from mindspore.common.mutable import mutable
|
|
31
31
|
from mindspore.common.jit_config import JitConfig
|
|
32
32
|
from mindspore.common.lazy_inline import lazy_inline
|
|
33
|
+
from mindspore.common.mindir_util import load_mindir, save_mindir
|
|
33
34
|
|
|
34
35
|
# symbols from dtype
|
|
35
36
|
__all__ = [
|
|
@@ -55,7 +56,8 @@ __all__ = [
|
|
|
55
56
|
"complex64", "complex128",
|
|
56
57
|
# __method__ from dtype
|
|
57
58
|
"dtype_to_nptype", "dtype_to_pytype",
|
|
58
|
-
"pytype_to_dtype", "get_py_obj_dtype"
|
|
59
|
+
"pytype_to_dtype", "get_py_obj_dtype",
|
|
60
|
+
"bfloat16",
|
|
59
61
|
]
|
|
60
62
|
|
|
61
63
|
__all__.extend([
|
|
@@ -67,5 +69,5 @@ __all__.extend([
|
|
|
67
69
|
"set_dump",
|
|
68
70
|
"ms_memory_recycle",
|
|
69
71
|
"mutable", "JitConfig",
|
|
70
|
-
"lazy_inline"
|
|
72
|
+
"lazy_inline", "load_mindir", "save_mindir"
|
|
71
73
|
])
|
|
@@ -103,3 +103,9 @@ def list_inplace_clear(list_obj):
|
|
|
103
103
|
list_obj = []
|
|
104
104
|
list_obj.clear()
|
|
105
105
|
return list_obj
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def dict_inplace_setitem(dict_obj, key, target):
|
|
109
|
+
"""Inplace dictionary setitem operation for dict_obj."""
|
|
110
|
+
dict_obj[key] = target
|
|
111
|
+
return dict_obj
|
|
@@ -20,6 +20,7 @@ class Registry:
|
|
|
20
20
|
"""Registry class for ms adapter."""
|
|
21
21
|
|
|
22
22
|
def __init__(self):
|
|
23
|
+
self.is_registered = False
|
|
23
24
|
self._tensor = None
|
|
24
25
|
self._parameter = None
|
|
25
26
|
self._convert_map = {}
|
|
@@ -48,6 +49,7 @@ class Registry:
|
|
|
48
49
|
if self._tensor is not None:
|
|
49
50
|
raise ValueError("Repeated registration of tensor in ms adapter config.")
|
|
50
51
|
self._tensor = value
|
|
52
|
+
self.is_registered = True
|
|
51
53
|
|
|
52
54
|
def register_parameter(self, value):
|
|
53
55
|
"""Register the parameter of ms adapter."""
|
|
@@ -17,7 +17,7 @@
|
|
|
17
17
|
|
|
18
18
|
from __future__ import absolute_import
|
|
19
19
|
from collections import UserDict
|
|
20
|
-
from mindspore import
|
|
20
|
+
from mindspore._c_expression import Tensor as Tensor_
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
class Registry(UserDict):
|
|
@@ -31,7 +31,7 @@ class Registry(UserDict):
|
|
|
31
31
|
"""Get the value by str."""
|
|
32
32
|
if not isinstance(obj_str, str):
|
|
33
33
|
raise TypeError("key for tensor registry must be string.")
|
|
34
|
-
if
|
|
34
|
+
if Tensor_._is_test_stub() is True: # pylint: disable=W0212
|
|
35
35
|
def wrap(*args):
|
|
36
36
|
new_args = list(args)
|
|
37
37
|
new_args.append(obj_str)
|
mindspore/common/_stub_tensor.py
CHANGED
|
@@ -144,6 +144,8 @@ class StubTensor:
|
|
|
144
144
|
slice_num_of_persistent_data = _stub_method(Tensor.slice_num_of_persistent_data)
|
|
145
145
|
slice_shape_of_persistent_data = _stub_method(Tensor.slice_shape_of_persistent_data)
|
|
146
146
|
flush_from_cache = _stub_method(Tensor.flush_from_cache)
|
|
147
|
+
contiguous = _stub_method(Tensor.contiguous)
|
|
148
|
+
is_contiguous = _stub_method(Tensor.is_contiguous)
|
|
147
149
|
|
|
148
150
|
def stub_sync(self):
|
|
149
151
|
"""sync real tensor."""
|
|
@@ -156,6 +158,17 @@ class StubTensor:
|
|
|
156
158
|
self.stub = None
|
|
157
159
|
return self.tensor
|
|
158
160
|
|
|
161
|
+
def __getstate__(self):
|
|
162
|
+
state = {}
|
|
163
|
+
value = self.stub.get_value() if self.stub else self.tensor.__getstate__()
|
|
164
|
+
state["value"] = value
|
|
165
|
+
return state
|
|
166
|
+
|
|
167
|
+
def __setstate__(self, state):
|
|
168
|
+
value = state.pop("value")
|
|
169
|
+
self.stub = None
|
|
170
|
+
self.tensor = Tensor(value, internal=True)
|
|
171
|
+
|
|
159
172
|
|
|
160
173
|
def _init_stub_tensor_api():
|
|
161
174
|
"""adapt to python tensor and cpp tensor api"""
|
mindspore/common/_utils.py
CHANGED
|
@@ -18,9 +18,11 @@
|
|
|
18
18
|
|
|
19
19
|
import os
|
|
20
20
|
import math
|
|
21
|
+
import ctypes
|
|
21
22
|
import functools
|
|
22
23
|
|
|
23
24
|
import mindspore
|
|
25
|
+
from mindspore import log as logger
|
|
24
26
|
from mindspore.common import dtype as mstype
|
|
25
27
|
from mindspore.parallel._ps_context import _is_ps_mode, _is_role_pserver, _is_role_sched
|
|
26
28
|
|
|
@@ -103,3 +105,30 @@ def ones_like(x):
|
|
|
103
105
|
def zeros_like(x):
|
|
104
106
|
"""Implement `zeroslike`."""
|
|
105
107
|
return mindspore.ops.composite.zeros_like(x)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def load_lib(lib_path):
|
|
111
|
+
"""load specified library."""
|
|
112
|
+
try:
|
|
113
|
+
ctypes.CDLL(lib_path)
|
|
114
|
+
# pylint: disable=broad-except
|
|
115
|
+
except Exception:
|
|
116
|
+
logger.warning(f'Loading {lib_path} lib error.')
|
|
117
|
+
return False
|
|
118
|
+
return True
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _jit_fallback_next_func(xs):
|
|
122
|
+
"""Generate ms_next for xs"""
|
|
123
|
+
if hasattr(xs, "__next__"):
|
|
124
|
+
# Convert an iterator to tuple first.
|
|
125
|
+
xs = tuple(xs)
|
|
126
|
+
return xs[0], xs[1:]
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _jit_fallback_has_next_func(xs):
|
|
130
|
+
"""Determine whether xs has next value"""
|
|
131
|
+
if hasattr(xs, "__next__"):
|
|
132
|
+
# Convert an iterator to tuple first.
|
|
133
|
+
xs = tuple(xs)
|
|
134
|
+
return len(xs) > 0
|