mindspore 2.4.10__cp39-cp39-win_amd64.whl → 2.5.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +8 -3
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +0 -5
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/compile_config.py +64 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +375 -0
- mindspore/_extends/parse/parser.py +23 -5
- mindspore/_extends/parse/standard_method.py +123 -27
- mindspore/_extends/pijit/pijit_func_white_list.py +1 -1
- mindspore/amp.py +7 -1
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/boost_cell_wrapper.py +136 -41
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +3 -1
- mindspore/common/_register_for_tensor.py +0 -1
- mindspore/common/_stub_tensor.py +25 -4
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +6132 -0
- mindspore/common/api.py +98 -21
- mindspore/common/dtype.py +34 -34
- mindspore/common/dump.py +2 -1
- mindspore/common/file_system.py +8 -3
- mindspore/common/generator.py +2 -0
- mindspore/common/hook_handle.py +3 -1
- mindspore/common/initializer.py +3 -4
- mindspore/common/lazy_inline.py +8 -2
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/parameter.py +31 -15
- mindspore/common/tensor.py +713 -1337
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +215 -173
- mindspore/communication/management.py +23 -20
- mindspore/context.py +285 -191
- mindspore/dataset/__init__.py +23 -19
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +84 -3
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +5 -4
- mindspore/dataset/engine/datasets.py +192 -149
- mindspore/dataset/engine/datasets_audio.py +14 -0
- mindspore/dataset/engine/datasets_standard_format.py +11 -11
- mindspore/dataset/engine/datasets_text.py +38 -1
- mindspore/dataset/engine/datasets_user_defined.py +100 -66
- mindspore/dataset/engine/datasets_vision.py +81 -8
- mindspore/dataset/engine/iterators.py +281 -63
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +26 -2
- mindspore/dataset/engine/serializer_deserializer.py +1 -1
- mindspore/dataset/engine/validators.py +43 -11
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +29 -12
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +94 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +127 -0
- mindspore/device_context/cpu/__init__.py +25 -0
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +134 -0
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/optim/adadelta.py +26 -22
- mindspore/experimental/optim/adam.py +3 -0
- mindspore/experimental/optim/lr_scheduler.py +33 -24
- mindspore/experimental/optim/radam.py +33 -30
- mindspore/hal/device.py +28 -0
- mindspore/hal/event.py +17 -0
- mindspore/hal/memory.py +94 -3
- mindspore/hal/stream.py +91 -6
- mindspore/include/api/context.h +0 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +12 -0
- mindspore/mindrecord/__init__.py +1 -1
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +824 -218
- mindspore/mint/distributed/__init__.py +66 -4
- mindspore/mint/distributed/distributed.py +2594 -44
- mindspore/mint/linalg/__init__.py +6 -0
- mindspore/mint/nn/__init__.py +473 -14
- mindspore/mint/nn/functional.py +486 -11
- mindspore/mint/nn/layer/__init__.py +17 -4
- mindspore/mint/nn/layer/_functions.py +330 -0
- mindspore/mint/nn/layer/activation.py +169 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +727 -0
- mindspore/mint/nn/layer/normalization.py +215 -19
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +170 -0
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/special/__init__.py +2 -1
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/cell.py +126 -19
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +6 -6
- mindspore/nn/layer/basic.py +35 -25
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/embedding.py +3 -3
- mindspore/nn/layer/normalization.py +8 -7
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +47 -13
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +48 -26
- mindspore/nn/learning_rate_schedule.py +5 -3
- mindspore/nn/loss/loss.py +31 -36
- mindspore/nn/optim/ada_grad.py +1 -0
- mindspore/nn/optim/adadelta.py +2 -2
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/cell_wrapper.py +4 -6
- mindspore/nn/wrap/loss_scale.py +3 -4
- mindspore/numpy/array_creations.py +60 -62
- mindspore/numpy/array_ops.py +148 -143
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +16 -16
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +2 -1
- mindspore/ops/_grad_experimental/grad_comm_ops.py +94 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +6 -1
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_vmap/vmap_array_ops.py +20 -19
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +19 -13
- mindspore/ops/_vmap/vmap_math_ops.py +11 -9
- mindspore/ops/_vmap/vmap_nn_ops.py +20 -34
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +149 -12
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -61
- mindspore/ops/auto_generate/gen_extend_func.py +554 -60
- mindspore/ops/auto_generate/gen_ops_def.py +1621 -115
- mindspore/ops/auto_generate/gen_ops_prim.py +8024 -3409
- mindspore/ops/auto_generate/pyboost_inner_prim.py +183 -79
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +229 -30
- mindspore/ops/composite/multitype_ops/pow_impl.py +0 -29
- mindspore/ops/function/__init__.py +12 -0
- mindspore/ops/function/array_func.py +561 -159
- mindspore/ops/function/clip_func.py +64 -0
- mindspore/ops/function/debug_func.py +28 -20
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +5 -4
- mindspore/ops/function/math_func.py +1659 -290
- mindspore/ops/function/nn_func.py +988 -317
- mindspore/ops/function/parameter_func.py +3 -56
- mindspore/ops/function/random_func.py +243 -33
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/functional.py +18 -5
- mindspore/ops/functional_overload.py +897 -0
- mindspore/ops/operations/__init__.py +3 -2
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -34
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +38 -8
- mindspore/ops/operations/array_ops.py +45 -303
- mindspore/ops/operations/comm_ops.py +19 -16
- mindspore/ops/operations/custom_ops.py +11 -55
- mindspore/ops/operations/debug_ops.py +42 -47
- mindspore/ops/operations/inner_ops.py +6 -4
- mindspore/ops/operations/linalg_ops.py +3 -2
- mindspore/ops/operations/manually_defined/ops_def.py +185 -104
- mindspore/ops/operations/math_ops.py +11 -216
- mindspore/ops/operations/nn_ops.py +146 -308
- mindspore/ops/primitive.py +23 -21
- mindspore/ops/tensor_method.py +1669 -0
- mindspore/ops_generate/aclnn_kernel_register_auto_cc_generator.py +110 -0
- mindspore/ops_generate/add_tensor_docs_generator.py +54 -0
- mindspore/ops_generate/arg_handler.py +0 -61
- mindspore/ops_generate/auto_grad_impl_cc_generator.py +135 -0
- mindspore/ops_generate/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/base_generator.py +11 -0
- mindspore/ops_generate/cpp_create_prim_instance_helper_generator.py +108 -0
- mindspore/ops_generate/functional_map_cpp_generator.py +491 -0
- mindspore/ops_generate/functional_overload_py_generator.py +110 -0
- mindspore/ops_generate/functions_cc_generator.py +233 -0
- mindspore/ops_generate/gen_aclnn_implement.py +110 -114
- mindspore/ops_generate/gen_constants.py +157 -3
- mindspore/ops_generate/gen_ops.py +245 -990
- mindspore/ops_generate/gen_pyboost_func.py +97 -998
- mindspore/ops_generate/gen_utils.py +119 -33
- mindspore/ops_generate/lite_ops_cpp_generator.py +155 -0
- mindspore/ops_generate/op_api_proto.py +206 -0
- mindspore/ops_generate/op_def_py_generator.py +131 -0
- mindspore/ops_generate/op_prim_py_generator.py +480 -0
- mindspore/ops_generate/op_proto.py +373 -108
- mindspore/ops_generate/op_template_parser.py +436 -0
- mindspore/ops_generate/ops_def_cc_generator.py +288 -0
- mindspore/ops_generate/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/ops_name_h_generator.py +68 -0
- mindspore/ops_generate/ops_primitive_h_generator.py +81 -0
- mindspore/ops_generate/pyboost_functions_cpp_generator.py +370 -0
- mindspore/ops_generate/pyboost_functions_h_generator.py +68 -0
- mindspore/ops_generate/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost_grad_function_cpp_generator.py +154 -0
- mindspore/ops_generate/pyboost_inner_prim_generator.py +131 -0
- mindspore/ops_generate/pyboost_native_grad_functions_generator.py +268 -0
- mindspore/ops_generate/pyboost_op_cpp_code_generator.py +851 -0
- mindspore/ops_generate/pyboost_overload_functions_cpp_generator.py +344 -0
- mindspore/ops_generate/pyboost_utils.py +92 -33
- mindspore/ops_generate/template.py +294 -44
- mindspore/ops_generate/tensor_func_reg_cpp_generator.py +422 -0
- mindspore/parallel/__init__.py +3 -3
- mindspore/parallel/_auto_parallel_context.py +24 -33
- mindspore/parallel/_parallel_serialization.py +13 -2
- mindspore/parallel/_utils.py +4 -1
- mindspore/parallel/algo_parameter_config.py +1 -1
- mindspore/parallel/checkpoint_transform.py +44 -0
- mindspore/parallel/cluster/process_entity/_api.py +131 -37
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +20 -3
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +3 -0
- mindspore/parallel/transform_safetensors.py +119 -253
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +17 -4
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +166 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +261 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +84 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +260 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +333 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +252 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +313 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +322 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +265 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +97 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +138 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +174 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +202 -0
- mindspore/profiler/common/path_manager.py +371 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +476 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +210 -0
- mindspore/profiler/common/profiler_path_manager.py +120 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +270 -37
- mindspore/profiler/envprofiler.py +138 -0
- mindspore/profiler/mstx.py +199 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +309 -0
- mindspore/profiler/profiler.py +580 -93
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +114 -0
- mindspore/profiler/schedule.py +208 -0
- mindspore/rewrite/api/symbol_tree.py +1 -2
- mindspore/run_check/_check_version.py +2 -6
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +148 -0
- mindspore/runtime/memory.py +392 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +2 -2
- mindspore/train/_utils.py +53 -18
- mindspore/train/amp.py +8 -4
- mindspore/train/callback/_checkpoint.py +32 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +105 -69
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_summary_collector.py +44 -6
- mindspore/train/callback/_tft_register.py +31 -10
- mindspore/train/dataset_helper.py +11 -11
- mindspore/train/metrics/precision.py +4 -5
- mindspore/train/mind_ir_pb2.py +167 -46
- mindspore/train/model.py +13 -15
- mindspore/train/serialization.py +462 -76
- mindspore/train/summary/summary_record.py +1 -2
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +4 -2
- mindspore/utils/dryrun.py +138 -0
- mindspore/utils/runtime_execution_order_check.py +550 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/METADATA +2 -3
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/RECORD +385 -261
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/entry_points.txt +1 -1
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.5.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""Device manager interfaces."""
|
|
17
|
+
|
|
18
|
+
import os
|
|
19
|
+
from mindspore import log as logger
|
|
20
|
+
from mindspore._c_expression import DeviceManagerConf, DeviceContextManager, MSContext, CollectiveManager
|
|
21
|
+
from mindspore._checkparam import args_type_check
|
|
22
|
+
from mindspore.parallel._ps_context import _need_reset_device_target_for_ps
|
|
23
|
+
|
|
24
|
+
__all__ = ['set_device', 'set_deterministic']
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@args_type_check(device_target=str, device_id=int)
|
|
28
|
+
def set_device(device_target, device_id=None):
|
|
29
|
+
"""
|
|
30
|
+
Set device target and device id for running environment.
|
|
31
|
+
|
|
32
|
+
Note:
|
|
33
|
+
- The `device_target` must be set in the ["CPU", "GPU", "Ascend"], there is no default value.
|
|
34
|
+
- Suggest setting `device_target` and `device_id` before calling :func:`mindspore.communication.init`.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
device_target (str): The target device to run, only support "Ascend", "GPU", and "CPU".
|
|
38
|
+
device_id (int): ID of the target device, the value must be in [0, device_num_per_host-1].
|
|
39
|
+
The frame will set different default behaviours according to the scenario:
|
|
40
|
+
if it is a single-card scenario, the frame will be set to 0.
|
|
41
|
+
In a distributed scenario where msrun is started, the framework will
|
|
42
|
+
automatically negotiate the available device_id values.
|
|
43
|
+
In a distributed scenario with other startup methods, the frame is set to 0.
|
|
44
|
+
"device_num_per_host" refers to the total number of devices on the host.
|
|
45
|
+
|
|
46
|
+
Examples:
|
|
47
|
+
>>> import mindspore as ms
|
|
48
|
+
>>> ms.set_device("Ascend", 1)
|
|
49
|
+
"""
|
|
50
|
+
valid_targets = ["CPU", "GPU", "Ascend"]
|
|
51
|
+
if device_target not in valid_targets:
|
|
52
|
+
raise ValueError(f"The argument 'device_target' must be one of {valid_targets}, but got {device_target}.")
|
|
53
|
+
# If in Parameter Server mode, Ascend card should not be used by server and scheduler.
|
|
54
|
+
if _need_reset_device_target_for_ps(device_target):
|
|
55
|
+
logger.info("Reset device target to CPU when set_device.")
|
|
56
|
+
device_target = "CPU"
|
|
57
|
+
|
|
58
|
+
is_default = False
|
|
59
|
+
if device_id is None:
|
|
60
|
+
device_id = 0
|
|
61
|
+
is_default = True
|
|
62
|
+
if device_id < 0:
|
|
63
|
+
raise ValueError("The device id must bigger than or equal to 0.")
|
|
64
|
+
|
|
65
|
+
MSContext.get_instance().set_device_target_inner(device_target)
|
|
66
|
+
|
|
67
|
+
if DeviceManagerConf.get_instance().is_device_enable():
|
|
68
|
+
old_device_target = DeviceManagerConf.get_instance().get_device_target()
|
|
69
|
+
old_device_id = DeviceManagerConf.get_instance().get_device_id()
|
|
70
|
+
if old_device_target != device_target or old_device_id != device_id:
|
|
71
|
+
raise RuntimeError("The 'mindspore.set_device' can not be modified.")
|
|
72
|
+
return
|
|
73
|
+
|
|
74
|
+
device_context = DeviceContextManager.get_instance().get_device_context(device_target)
|
|
75
|
+
if device_context is not None and device_context.initialized():
|
|
76
|
+
raise RuntimeError("The runtime has been initialized, please set it before the kernel is executed, "
|
|
77
|
+
"or before calling 'mindspore.communication.init()'. "
|
|
78
|
+
"Suggest setting it as early as possible.")
|
|
79
|
+
DeviceManagerConf.get_instance().set_device(device_target, device_id, is_default)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@args_type_check(deterministic=bool)
|
|
83
|
+
def set_deterministic(deterministic):
|
|
84
|
+
"""
|
|
85
|
+
Enables or disables deterministic computing.
|
|
86
|
+
|
|
87
|
+
When deterministic computing is enabled, the same output is generated if an operator is executed
|
|
88
|
+
for multiple times with the same hardware and input.This often slows down operator execution.
|
|
89
|
+
In distributed scenario, we suggest user to set deterministic mode before
|
|
90
|
+
calling :func:`mindspore.communication.init` to enable deterministic operation for
|
|
91
|
+
communication operators in the global communication group.
|
|
92
|
+
|
|
93
|
+
The framework not enabled deterministic computation by default.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
deterministic (bool): Whether to enable deterministic computing.
|
|
97
|
+
|
|
98
|
+
Examples:
|
|
99
|
+
>>> import mindspore as ms
|
|
100
|
+
>>> ms.set_deterministic(True)
|
|
101
|
+
"""
|
|
102
|
+
# Check the configuration environment whether valid.
|
|
103
|
+
if DeviceManagerConf.get_instance().is_deterministic_configured():
|
|
104
|
+
raise RuntimeError("The 'mindspore.set_deterministic' can not be set repeatedly.")
|
|
105
|
+
|
|
106
|
+
# Must wait for all async created groups to be initialized so that
|
|
107
|
+
# deterministic feature could be consistent between all processes.
|
|
108
|
+
CollectiveManager.get_instance().wait_all_comm_init()
|
|
109
|
+
|
|
110
|
+
# Check the hccl_deterministic and te_parallel_compiler.
|
|
111
|
+
hccl_deterministic = os.getenv("HCCL_DETERMINISTIC")
|
|
112
|
+
te_parallel_compiler = os.getenv("TE_PARALLEL_COMPILER")
|
|
113
|
+
if deterministic:
|
|
114
|
+
if hccl_deterministic and hccl_deterministic != "true":
|
|
115
|
+
logger.warning(f"Environment 'HCCL_DETERMINISTIC' should be 'true' when set deterministic='True', but "
|
|
116
|
+
f"got '{hccl_deterministic}'. 'HCCL_DETERMINISTIC' will be set to 'true'.")
|
|
117
|
+
if te_parallel_compiler and te_parallel_compiler != "1":
|
|
118
|
+
logger.warning(f"Environment 'TE_PARALLEL_COMPILER' should be '1' when set deterministic='True', but "
|
|
119
|
+
f"got '{te_parallel_compiler}'. 'TE_PARALLEL_COMPILER' will be set to '1'.")
|
|
120
|
+
os.environ["HCCL_DETERMINISTIC"] = "true"
|
|
121
|
+
os.environ["TE_PARALLEL_COMPILER"] = "1"
|
|
122
|
+
else:
|
|
123
|
+
if hccl_deterministic and hccl_deterministic != "false":
|
|
124
|
+
logger.warning(f"Environment 'HCCL_DETERMINISTIC' should not be set or be 'false' when set "
|
|
125
|
+
f"deterministic='False', but got '{hccl_deterministic}'. 'HCCL_DETERMINISTIC' "
|
|
126
|
+
f"will be unset.")
|
|
127
|
+
del os.environ["HCCL_DETERMINISTIC"]
|
|
128
|
+
if te_parallel_compiler and te_parallel_compiler != "0":
|
|
129
|
+
logger.warning(f"Environment 'TE_PARALLEL_COMPILER' should not be set or be '0' when set "
|
|
130
|
+
f"deterministic='False', but got '{te_parallel_compiler}'. 'TE_PARALLEL_COMPILER' "
|
|
131
|
+
f"will be unset.")
|
|
132
|
+
del os.environ["TE_PARALLEL_COMPILER"]
|
|
133
|
+
|
|
134
|
+
DeviceManagerConf.get_instance().set_deterministic(deterministic)
|
mindspore/dnnl.dll
CHANGED
|
Binary file
|
mindspore/dpcmi.dll
CHANGED
|
Binary file
|
|
@@ -16,6 +16,7 @@
|
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
|
|
18
18
|
from mindspore.experimental.llm_boost.atb import LlamaBoost, QwenBoost
|
|
19
|
+
from mindspore.experimental.llm_boost.ascend_native import *
|
|
19
20
|
from mindspore.experimental.llm_boost.register import LlmBoostRegister
|
|
20
21
|
|
|
21
22
|
__all__ = ["LlmBoostRegister"]
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""
|
|
16
|
+
Provide llm boost for inference, such as LlamaBoost.
|
|
17
|
+
"""
|
|
18
|
+
from __future__ import absolute_import
|
|
19
|
+
|
|
20
|
+
from mindspore.experimental.llm_boost.ascend_native.llama_boost_ascend_native import LlamaBoostAscendNative
|
|
21
|
+
|
|
22
|
+
__all__ = ['LlamaBoostAscendNative']
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""AscendNative Llama Boost APIs."""
|
|
16
|
+
|
|
17
|
+
import os
|
|
18
|
+
import numpy as np
|
|
19
|
+
from mindspore.common import Tensor, dtype
|
|
20
|
+
from mindspore.experimental.llm_boost.ascend_native.llm_boost import LLMBoost
|
|
21
|
+
from mindspore.experimental.llm_boost.register import LlmBoostRegister, LlmBoostType
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def RoundUp(val: int, align: int) -> int:
|
|
25
|
+
if align == 0:
|
|
26
|
+
return 0
|
|
27
|
+
return -(val // -align) * align
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def ConvertTensor(nd_mat: np.ndarray, transpose: bool = True, nd2nz: bool = True) -> np.ndarray:
|
|
31
|
+
""" Transforms tensor format from Nd to Nz """
|
|
32
|
+
if transpose:
|
|
33
|
+
nd_mat = np.transpose(nd_mat)
|
|
34
|
+
if not nd2nz:
|
|
35
|
+
return nd_mat
|
|
36
|
+
block_size = (16, 16)
|
|
37
|
+
r = RoundUp(nd_mat.shape[0], block_size[0])
|
|
38
|
+
c = RoundUp(nd_mat.shape[1], block_size[1])
|
|
39
|
+
r_pad = r - nd_mat.shape[0]
|
|
40
|
+
c_pad = c - nd_mat.shape[1]
|
|
41
|
+
nd_mat = np.pad(nd_mat, ((0, r_pad), (0, c_pad)))
|
|
42
|
+
nz_mat = np.transpose(np.reshape(
|
|
43
|
+
nd_mat, (r, c // block_size[1], block_size[1])), (1, 0, 2))
|
|
44
|
+
nz_mat = nz_mat.reshape(r, c)
|
|
45
|
+
return nz_mat
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@LlmBoostRegister.register(LlmBoostType.ASCEND_NATIVE, "Llama")
|
|
49
|
+
class LlamaBoostAscendNative(LLMBoost):
|
|
50
|
+
r"""
|
|
51
|
+
Implements an Llama model in a single kernel.
|
|
52
|
+
it forwards the python functions to the C++ binded object
|
|
53
|
+
"""
|
|
54
|
+
def _get_from_dict(self, dictionary, name):
|
|
55
|
+
""" internal function to get a specific tensor from the dictionary """
|
|
56
|
+
all_relevant_layers = [value for key, value in dictionary.items() if name in key]
|
|
57
|
+
if all_relevant_layers:
|
|
58
|
+
return all_relevant_layers[0].asnumpy()
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
def _get_quant_triplet_from_dict(self, dictionary, name):
|
|
62
|
+
""" internal function to get a weight triple tensor from the dictionary """
|
|
63
|
+
weights = self._get_from_dict(dictionary, name + "._handler.weight")
|
|
64
|
+
scale = self._get_from_dict(dictionary, name + "._weight_quantizer.scale")
|
|
65
|
+
offset = self._get_from_dict(dictionary, name + "._weight_quantizer.zp_neg")
|
|
66
|
+
return weights, scale, offset
|
|
67
|
+
|
|
68
|
+
def _prepare_single_layer(self, ckpt, config, id):
|
|
69
|
+
""" prepares the dictionary of weights of a single layer """
|
|
70
|
+
prefix = 'model.layers.' + str(id)
|
|
71
|
+
is_last = (id == config.num_layers-1)
|
|
72
|
+
layer = 'layers.' + str(id) + '.'
|
|
73
|
+
l_dict = {key: value for key, value in ckpt.items() if layer in key}
|
|
74
|
+
if config.n_kv_heads is None:
|
|
75
|
+
config.n_kv_heads = config.num_heads
|
|
76
|
+
start = 0
|
|
77
|
+
end = config.hidden_size
|
|
78
|
+
kv_start = 0
|
|
79
|
+
kv_end = int(config.hidden_size*config.n_kv_heads/config.num_heads)
|
|
80
|
+
ffn_hid = [value for key, value in l_dict.items() if "w3" in key][0].shape[0]
|
|
81
|
+
ffn_start = 0
|
|
82
|
+
ffn_end = ffn_hid
|
|
83
|
+
rank_size = int(os.getenv('RANK_SIZE', '1'))
|
|
84
|
+
#Emir if (config.parallel_mode != 2): # 2 - AUTO_PARALLEL
|
|
85
|
+
hid_size = end
|
|
86
|
+
kv_hid_size = kv_end
|
|
87
|
+
embed_size = config.vocab_size
|
|
88
|
+
rank_id = int(os.getenv('RANK_ID', '0'))
|
|
89
|
+
if (hid_size % rank_size == 0) and (ffn_hid % rank_size == 0) and (embed_size % rank_size == 0):
|
|
90
|
+
start = int(rank_id * hid_size / rank_size)
|
|
91
|
+
end = int((rank_id + 1) * hid_size / rank_size)
|
|
92
|
+
kv_start = int(rank_id * kv_hid_size / rank_size)
|
|
93
|
+
kv_end = int((rank_id + 1) * kv_hid_size / rank_size)
|
|
94
|
+
ffn_start = int(rank_id * ffn_hid / rank_size)
|
|
95
|
+
ffn_end = int((rank_id + 1) * ffn_hid / rank_size)
|
|
96
|
+
else:
|
|
97
|
+
raise RuntimeError("hidden size and ffn hidden size must be divided by rank size without remainder. \
|
|
98
|
+
hidden_size: ", hid_size, " ffn_hidden_size: ", ffn_hid, " rank_size: ", rank_size)
|
|
99
|
+
quant = (self._get_from_dict(l_dict, "_weight_quantizer") is not None)
|
|
100
|
+
unite_qkv = (config.num_heads == config.n_kv_heads)
|
|
101
|
+
self.dictionary[prefix + ".attention_norm.weight"] = \
|
|
102
|
+
Tensor(self._get_from_dict(l_dict, "attention_norm"), dtype=dtype.float16)
|
|
103
|
+
self.dictionary[prefix + ".ffn_norm.weight"] = \
|
|
104
|
+
Tensor(self._get_from_dict(l_dict, "ffn_norm"), dtype=dtype.float16)
|
|
105
|
+
if is_last:
|
|
106
|
+
self.dictionary['lm_head.weight'] = Tensor(ConvertTensor(ckpt['lm_head.weight'].asnumpy()[:, start:end]))
|
|
107
|
+
|
|
108
|
+
if not quant:
|
|
109
|
+
self._pack_attn_weights(l_dict, prefix, start, end, kv_start, kv_end, unite_qkv)
|
|
110
|
+
self._pack_ffn_weights(l_dict, prefix, ffn_start, ffn_end)
|
|
111
|
+
else:
|
|
112
|
+
self._pack_attn_quant_weights(l_dict, prefix, start, end, kv_start, kv_end, unite_qkv)
|
|
113
|
+
self._pack_ffn_quant_weights(l_dict, prefix, ffn_start, ffn_end)
|
|
114
|
+
|
|
115
|
+
def _pack_attn_weights(self, l_dict, prefix, start, end, kv_start, kv_end, unite_qkv):
|
|
116
|
+
""" prepares the dictionary of weights of an attention block """
|
|
117
|
+
wq = self._get_from_dict(l_dict, "wq")[start:end, :]
|
|
118
|
+
wk = self._get_from_dict(l_dict, "wk")[kv_start:kv_end, :]
|
|
119
|
+
wv = self._get_from_dict(l_dict, "wv")[kv_start:kv_end, :]
|
|
120
|
+
self.dictionary[prefix + ".attention.wo.weight"] = \
|
|
121
|
+
Tensor(ConvertTensor(self._get_from_dict(l_dict, "wo")[:, start:end]))
|
|
122
|
+
if unite_qkv:
|
|
123
|
+
self.dictionary[prefix + ".attention.wqkv.weight"] = Tensor(ConvertTensor(np.concatenate((wq, wk, wv))))
|
|
124
|
+
else:
|
|
125
|
+
self.dictionary[prefix + ".attention.wq.weight"] = Tensor(ConvertTensor(wq))
|
|
126
|
+
self.dictionary[prefix + ".attention.wkv.weight"] = Tensor(ConvertTensor(np.concatenate((wk, wv))))
|
|
127
|
+
|
|
128
|
+
def _pack_ffn_weights(self, l_dict, prefix, ffn_start, ffn_end):
|
|
129
|
+
""" prepares the dictionary of weights of an ffn block """
|
|
130
|
+
self.dictionary[prefix + ".feed_forward.w2.weight"] = \
|
|
131
|
+
Tensor(ConvertTensor(self._get_from_dict(l_dict, "w2")[:, ffn_start:ffn_end]))
|
|
132
|
+
w1 = self._get_from_dict(l_dict, "w1")[ffn_start:ffn_end, :]
|
|
133
|
+
w3 = self._get_from_dict(l_dict, "w3")[ffn_start:ffn_end, :]
|
|
134
|
+
self.dictionary[prefix + ".feed_forward.w13.weight"] = Tensor(ConvertTensor(np.concatenate((w1, w3))))
|
|
135
|
+
|
|
136
|
+
def _pack_attn_quant_weights(self, l_dict, prefix, start, end, kv_start, kv_end, unite_qkv):
|
|
137
|
+
""" prepares the dictionary of weights of a quantized attention block """
|
|
138
|
+
wq, wq_scale, wq_offset = self._get_quant_triplet_from_dict(l_dict, "wq")
|
|
139
|
+
wk, wk_scale, wk_offset = self._get_quant_triplet_from_dict(l_dict, "wk")
|
|
140
|
+
wv, wv_scale, wv_offset = self._get_quant_triplet_from_dict(l_dict, "wv")
|
|
141
|
+
wo, wo_scale, wo_offset = self._get_quant_triplet_from_dict(l_dict, "wo")
|
|
142
|
+
self.dictionary[prefix + ".attention.wo.weight"] = Tensor(ConvertTensor(wo[:, start:end], nd2nz=False))
|
|
143
|
+
self.dictionary[prefix + ".attention.wo.weight.scale"] = Tensor(wo_scale[start:end])
|
|
144
|
+
self.dictionary[prefix + ".attention.wo.weight.offset"] = Tensor(wo_offset[start:end])
|
|
145
|
+
|
|
146
|
+
if unite_qkv:
|
|
147
|
+
self.dictionary[prefix + ".attention.wqkv.weight"] = \
|
|
148
|
+
Tensor(ConvertTensor(np.concatenate((wq[start:end, :], wk[kv_start:kv_end, :], wv[kv_start:kv_end, :])),
|
|
149
|
+
nd2nz=False))
|
|
150
|
+
self.dictionary[prefix + ".attention.wqkv.weight.scale"] = \
|
|
151
|
+
Tensor(np.concatenate((wq_scale[start:end], wk_scale[kv_start:kv_end], wv_scale[kv_start:kv_end])))
|
|
152
|
+
self.dictionary[prefix + ".attention.wqkv.weight.offset"] = \
|
|
153
|
+
Tensor(np.concatenate((wq_offset[start:end], wk_offset[kv_start:kv_end], wv_offset[kv_start:kv_end])))
|
|
154
|
+
else:
|
|
155
|
+
self.dictionary[prefix + ".attention.wq.weight"] = Tensor(ConvertTensor(wq[start:end, :], nd2nz=False))
|
|
156
|
+
self.dictionary[prefix + ".attention.wq.weight.scale"] = Tensor(wq_scale[start:end])
|
|
157
|
+
self.dictionary[prefix + ".attention.wq.weight.offset"] = Tensor(wq_offset[start:end])
|
|
158
|
+
self.dictionary[prefix + ".attention.wkv.weight"] = \
|
|
159
|
+
Tensor(ConvertTensor(np.concatenate((wk[kv_start:kv_end, :], wv[kv_start:kv_end, :])), nd2nz=False))
|
|
160
|
+
self.dictionary[prefix + ".attention.wkv.weight.scale"] = \
|
|
161
|
+
Tensor(np.concatenate((wk_scale[kv_start:kv_end], wv_scale[kv_start:kv_end])))
|
|
162
|
+
self.dictionary[prefix + ".attention.wkv.weight.offset"] = \
|
|
163
|
+
Tensor(np.concatenate((wk_offset[kv_start:kv_end], wv_offset[kv_start:kv_end])))
|
|
164
|
+
|
|
165
|
+
def _pack_ffn_quant_weights(self, l_dict, prefix, ffn_start, ffn_end):
|
|
166
|
+
""" prepares the dictionary of weights of a quantized ffn block """
|
|
167
|
+
w1, w1_scale, w1_offset = self._get_quant_triplet_from_dict(l_dict, "w1")
|
|
168
|
+
w2, w2_scale, w2_offset = self._get_quant_triplet_from_dict(l_dict, "w2")
|
|
169
|
+
w3, w3_scale, w3_offset = self._get_quant_triplet_from_dict(l_dict, "w3")
|
|
170
|
+
self.dictionary[prefix + ".feed_forward.w2.weight"] = Tensor(ConvertTensor(w2[:, ffn_start:ffn_end],
|
|
171
|
+
nd2nz=False))
|
|
172
|
+
self.dictionary[prefix + ".feed_forward.w2.weight.scale"] = Tensor(w2_scale[ffn_start:ffn_end])
|
|
173
|
+
self.dictionary[prefix + ".feed_forward.w2.weight.offset"] = Tensor(w2_offset[ffn_start:ffn_end])
|
|
174
|
+
|
|
175
|
+
self.dictionary[prefix + ".feed_forward.w13.weight"] = \
|
|
176
|
+
Tensor(ConvertTensor(np.concatenate((w1[ffn_start:ffn_end, :], w3[ffn_start:ffn_end, :])), nd2nz=False))
|
|
177
|
+
self.dictionary[prefix + ".feed_forward.w13.weight.scale"] = \
|
|
178
|
+
Tensor(np.concatenate((w1_scale[ffn_start:ffn_end], w3_scale[ffn_start:ffn_end])))
|
|
179
|
+
self.dictionary[prefix + ".feed_forward.w13.weight.offset"] = \
|
|
180
|
+
Tensor(np.concatenate((w1_offset[ffn_start:ffn_end], w3_offset[ffn_start:ffn_end])))
|
|
181
|
+
|
|
182
|
+
def _prepare_cos_sin_arrays(self, config, theta=10000):
|
|
183
|
+
""" prepares the cosine and sine arrays """
|
|
184
|
+
head_dim = config.hidden_size // config.num_heads
|
|
185
|
+
max_position_embedding = \
|
|
186
|
+
config.max_position_embedding if config.max_position_embedding is not None else config.seq_length
|
|
187
|
+
freqs_base = np.arange(0, head_dim, 2)[: (head_dim // 2)].astype(np.float32)
|
|
188
|
+
freqs = 1.0 / (theta ** (freqs_base / head_dim))
|
|
189
|
+
t = np.arange(0, max_position_embedding, 1).astype(np.float32)
|
|
190
|
+
freqs = np.outer(t, freqs)
|
|
191
|
+
emb = np.concatenate((freqs, freqs), axis=-1)
|
|
192
|
+
freqs_cos = Tensor(np.cos(emb), dtype=dtype.float16)
|
|
193
|
+
sin = np.sin(emb)
|
|
194
|
+
|
|
195
|
+
sin[:, :int(emb.shape[1]/2)] = -sin[:, :int(emb.shape[1]/2)]
|
|
196
|
+
self.dictionary['model.cos.weight'] = freqs_cos
|
|
197
|
+
freqs_sin = Tensor(sin, dtype=dtype.float16)
|
|
198
|
+
self.dictionary['model.sin.weight'] = freqs_sin
|
|
199
|
+
|
|
200
|
+
def set_weights(self, ckpt_dict):
|
|
201
|
+
""" load the checkpoint """
|
|
202
|
+
self.dictionary = {}
|
|
203
|
+
self.dictionary['model.tok_embeddings.embedding_weight'] = \
|
|
204
|
+
Tensor(ckpt_dict['model.tok_embeddings.embedding_weight'].asnumpy())
|
|
205
|
+
self.dictionary['model.norm_out.weight'] = \
|
|
206
|
+
Tensor(ckpt_dict['model.norm_out.weight'].asnumpy(), dtype=dtype.float16)
|
|
207
|
+
self._prepare_cos_sin_arrays(self.config)
|
|
208
|
+
for layer_id in range(self.config.num_layers):
|
|
209
|
+
self._prepare_single_layer(ckpt_dict, self.config, layer_id)
|
|
210
|
+
|
|
211
|
+
self.binder.set_weights_map(self.dictionary)
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""LLMBoost APIs."""
|
|
16
|
+
|
|
17
|
+
from mindspore.common import Tensor
|
|
18
|
+
|
|
19
|
+
class LLMBoost():
|
|
20
|
+
r"""
|
|
21
|
+
Implements an LLM in a single kernel.
|
|
22
|
+
it forwards the python function to the C++ binded object
|
|
23
|
+
"""
|
|
24
|
+
def __init__(self, config):
|
|
25
|
+
r"""
|
|
26
|
+
initialize the parameters of the llm binder.
|
|
27
|
+
config is simply the config object of the model
|
|
28
|
+
"""
|
|
29
|
+
from mindspore._c_expression import LlmBoostBinder
|
|
30
|
+
self.config = config
|
|
31
|
+
self.binder = LlmBoostBinder("AscendNative", config.model_type)
|
|
32
|
+
self.binder.init_model(config.to_dict())
|
|
33
|
+
|
|
34
|
+
def init(self):
|
|
35
|
+
"""
|
|
36
|
+
Initialize the object
|
|
37
|
+
returns True if object needs input manipulation by mindformers
|
|
38
|
+
"""
|
|
39
|
+
return False
|
|
40
|
+
|
|
41
|
+
def set_kvcache(self, k_caches=None, v_caches=None):
|
|
42
|
+
return
|
|
43
|
+
|
|
44
|
+
def forward(self, input_ids, batch_valid_length, position_ids=None):
|
|
45
|
+
ret = self.binder.forward([input_ids, batch_valid_length], "nothing really")
|
|
46
|
+
return Tensor(ret[0])
|
|
47
|
+
|
|
48
|
+
def set_weights(self, ckpt_dict):
|
|
49
|
+
self.binder.set_weights_map(ckpt_dict)
|
|
50
|
+
|
|
51
|
+
def add_flags(self, is_first_iteration=False):
|
|
52
|
+
self.binder.add_flags(is_first_iteration=is_first_iteration)
|
|
@@ -112,8 +112,7 @@ class AtbBoostBase:
|
|
|
112
112
|
|
|
113
113
|
def _convert_qkv_concat_weight(self, param_dict):
|
|
114
114
|
"""convert qkv concat weight"""
|
|
115
|
-
|
|
116
|
-
for i in range(assume_num_layers):
|
|
115
|
+
for i in range(self.num_layers):
|
|
117
116
|
# qkv weight concat
|
|
118
117
|
wq_weight_name = f"model.layers.{i}.attention.wq.weight"
|
|
119
118
|
wk_weight_name = f"model.layers.{i}.attention.wk.weight"
|
|
@@ -151,7 +150,7 @@ class AtbBoostBase:
|
|
|
151
150
|
logger.info(f"transform: {qkv_concat_weight_name}")
|
|
152
151
|
logger.info(f"transform: {gate_hidden_concat_weight_name}")
|
|
153
152
|
|
|
154
|
-
for i in range(
|
|
153
|
+
for i in range(self.num_layers):
|
|
155
154
|
# qkv bias concat
|
|
156
155
|
wq_bias_name = f"model.layers.{i}.attention.wq.bias"
|
|
157
156
|
wk_bias_name = f"model.layers.{i}.attention.wk.bias"
|
|
@@ -43,7 +43,11 @@ class LlamaBoost(AtbBoostBase):
|
|
|
43
43
|
)
|
|
44
44
|
|
|
45
45
|
def init(self):
|
|
46
|
-
"""
|
|
46
|
+
"""
|
|
47
|
+
Initialize the object
|
|
48
|
+
returns True if object needs input manipulation by mindformers
|
|
49
|
+
"""
|
|
50
|
+
|
|
47
51
|
coder_param = {
|
|
48
52
|
"normEps": self.config.rms_norm_eps,
|
|
49
53
|
"normType": NormType.RMS_NORM,
|
|
@@ -93,6 +97,7 @@ class LlamaBoost(AtbBoostBase):
|
|
|
93
97
|
}
|
|
94
98
|
self.atb_encoder_operation.init(json.dumps({**encoder_param}))
|
|
95
99
|
self.atb_decoder_operation.init(json.dumps({**decoder_param}))
|
|
100
|
+
return True
|
|
96
101
|
|
|
97
102
|
def _prepare_inputs(
|
|
98
103
|
self,
|
|
@@ -37,28 +37,32 @@ class Adadelta(Optimizer):
|
|
|
37
37
|
Implements Adadelta algorithm.
|
|
38
38
|
|
|
39
39
|
.. math::
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
&\textbf{
|
|
46
|
-
\:
|
|
47
|
-
|
|
48
|
-
&\textbf{
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
&\
|
|
54
|
-
|
|
55
|
-
&\
|
|
56
|
-
|
|
57
|
-
&\
|
|
58
|
-
&\
|
|
59
|
-
&\
|
|
60
|
-
&\
|
|
61
|
-
|
|
40
|
+
\newcommand{\grad}[2]{\nabla_{#1} f_{#2}(#2_{#2 - 1})}
|
|
41
|
+
\newcommand{\updateVar}[3]{#1_{#2} \leftarrow #1_{#2 - 1} \rho + #3_{#2} (1 - \rho)}
|
|
42
|
+
|
|
43
|
+
\begin{align*}
|
|
44
|
+
&\rule{150mm}{0.4pt} \\
|
|
45
|
+
&\textbf{Input}:
|
|
46
|
+
\gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)},
|
|
47
|
+
\: \rho \text{ (decay)}, \: \lambda \text{ (weight decay)} \\
|
|
48
|
+
&\textbf{Initialize}:
|
|
49
|
+
\begin{cases}
|
|
50
|
+
v_0 \leftarrow 0 \text{ (square avg)} \\
|
|
51
|
+
u_0 \leftarrow 0 \text{ (accumulate variables)}
|
|
52
|
+
\end{cases} \\
|
|
53
|
+
&\rule{110mm}{0.4pt} \\
|
|
54
|
+
&\textbf{For } t = 1 \text{ to } \ldots \text{ do}: \\
|
|
55
|
+
&\quad g_t \leftarrow \grad{\theta}{t} \\
|
|
56
|
+
&\quad \text{If } \lambda \neq 0: \\
|
|
57
|
+
&\quad\quad g_t \leftarrow g_t + \lambda \theta_{t - 1} \\
|
|
58
|
+
&\quad v_t \leftarrow \updateVar{v}{t}{g^2} \\
|
|
59
|
+
&\quad \Delta x_t \leftarrow \frac{\sqrt{u_{t - 1} + \epsilon}}{\sqrt{v_t + \epsilon}} g_t \\
|
|
60
|
+
&\quad u_t \leftarrow \updateVar{u}{t}{\Delta x^2} \\
|
|
61
|
+
&\quad \theta_t \leftarrow \theta_{t - 1} - \gamma \Delta x_t \\
|
|
62
|
+
&\rule{110mm}{0.4pt} \\
|
|
63
|
+
&\bf{Return}: \theta_t \\
|
|
64
|
+
&\rule{110mm}{0.4pt}
|
|
65
|
+
\end{align*}
|
|
62
66
|
|
|
63
67
|
.. warning::
|
|
64
68
|
This is an experimental optimizer API that is subject to change.
|
|
@@ -78,6 +78,9 @@ class Adam(Optimizer):
|
|
|
78
78
|
\end{aligned}
|
|
79
79
|
|
|
80
80
|
.. warning::
|
|
81
|
+
The implementation formula of this optimizer interface is not completely consistent with that in the paper.
|
|
82
|
+
If you want to use an interface that is completely consistent, it is recommended to use
|
|
83
|
+
:class:`mindspore.mint.optim.Adam`, which currently only supports Ascend.
|
|
81
84
|
This is an experimental optimizer API that is subject to change.
|
|
82
85
|
This module must be used with lr scheduler module in `LRScheduler Class
|
|
83
86
|
<https://www.mindspore.cn/docs/en/master/api_python/mindspore.nn.html#learningrateschedule-class>`_ .
|