mindspore 2.3.0__cp39-cp39-win_amd64.whl → 2.4.1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +3 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +50 -9
- mindspore/_extends/parse/compile_config.py +41 -0
- mindspore/_extends/parse/parser.py +9 -7
- mindspore/_extends/parse/standard_method.py +52 -14
- mindspore/_extends/pijit/pijit_func_white_list.py +350 -24
- mindspore/amp.py +24 -10
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/common/__init__.py +6 -4
- mindspore/common/_pijit_context.py +190 -0
- mindspore/common/_register_for_tensor.py +2 -1
- mindspore/common/_tensor_overload.py +139 -0
- mindspore/common/api.py +102 -87
- mindspore/common/dump.py +5 -6
- mindspore/common/generator.py +1 -7
- mindspore/common/hook_handle.py +14 -26
- mindspore/common/initializer.py +51 -15
- mindspore/common/mindir_util.py +2 -2
- mindspore/common/parameter.py +62 -15
- mindspore/common/recompute.py +39 -9
- mindspore/common/sparse_tensor.py +7 -3
- mindspore/common/tensor.py +183 -37
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +38 -3
- mindspore/communication/comm_func.py +315 -60
- mindspore/communication/management.py +14 -14
- mindspore/context.py +132 -22
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/__init__.py +1 -1
- mindspore/dataset/core/config.py +7 -0
- mindspore/dataset/core/validator_helpers.py +7 -0
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +72 -44
- mindspore/dataset/engine/datasets_audio.py +7 -7
- mindspore/dataset/engine/datasets_standard_format.py +53 -3
- mindspore/dataset/engine/datasets_text.py +20 -20
- mindspore/dataset/engine/datasets_user_defined.py +174 -104
- mindspore/dataset/engine/datasets_vision.py +33 -33
- mindspore/dataset/engine/iterators.py +29 -0
- mindspore/dataset/engine/obs/util.py +7 -0
- mindspore/dataset/engine/queue.py +114 -60
- mindspore/dataset/engine/serializer_deserializer.py +2 -2
- mindspore/dataset/engine/validators.py +34 -14
- mindspore/dataset/text/__init__.py +1 -4
- mindspore/dataset/transforms/__init__.py +0 -3
- mindspore/dataset/utils/line_reader.py +2 -0
- mindspore/dataset/vision/__init__.py +1 -4
- mindspore/dataset/vision/utils.py +1 -1
- mindspore/dataset/vision/validators.py +2 -1
- mindspore/dnnl.dll +0 -0
- mindspore/{nn/extend → experimental/es}/__init__.py +4 -11
- mindspore/experimental/es/embedding_service.py +883 -0
- mindspore/{nn/layer → experimental/es}/embedding_service_layer.py +218 -30
- mindspore/experimental/llm_boost/__init__.py +21 -0
- mindspore/{nn/extend/layer → experimental/llm_boost/atb}/__init__.py +4 -8
- mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
- mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
- mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
- mindspore/experimental/llm_boost/register.py +129 -0
- mindspore/experimental/llm_boost/utils.py +31 -0
- mindspore/experimental/optim/adamw.py +85 -0
- mindspore/experimental/optim/optimizer.py +3 -0
- mindspore/hal/__init__.py +3 -3
- mindspore/hal/contiguous_tensors_handle.py +175 -0
- mindspore/hal/stream.py +18 -0
- mindspore/include/api/model_group.h +13 -1
- mindspore/include/api/types.h +10 -10
- mindspore/include/dataset/config.h +2 -2
- mindspore/include/dataset/constants.h +2 -2
- mindspore/include/dataset/execute.h +2 -2
- mindspore/include/dataset/vision.h +4 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filewriter.py +68 -51
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +983 -46
- mindspore/mint/distributed/__init__.py +31 -0
- mindspore/mint/distributed/distributed.py +254 -0
- mindspore/mint/nn/__init__.py +268 -23
- mindspore/mint/nn/functional.py +125 -19
- mindspore/mint/nn/layer/__init__.py +39 -0
- mindspore/mint/nn/layer/activation.py +133 -0
- mindspore/mint/nn/layer/normalization.py +477 -0
- mindspore/mint/nn/layer/pooling.py +110 -0
- mindspore/mint/optim/adamw.py +26 -13
- mindspore/mint/special/__init__.py +63 -0
- mindspore/multiprocessing/__init__.py +2 -1
- mindspore/nn/__init__.py +0 -1
- mindspore/nn/cell.py +276 -96
- mindspore/nn/layer/activation.py +211 -44
- mindspore/nn/layer/basic.py +137 -10
- mindspore/nn/layer/embedding.py +137 -2
- mindspore/nn/layer/normalization.py +101 -5
- mindspore/nn/layer/padding.py +34 -48
- mindspore/nn/layer/pooling.py +161 -7
- mindspore/nn/layer/transformer.py +3 -3
- mindspore/nn/loss/__init__.py +2 -2
- mindspore/nn/loss/loss.py +84 -6
- mindspore/nn/optim/__init__.py +2 -1
- mindspore/nn/optim/adadelta.py +1 -1
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +124 -0
- mindspore/nn/wrap/cell_wrapper.py +12 -23
- mindspore/nn/wrap/grad_reducer.py +5 -5
- mindspore/nn/wrap/loss_scale.py +17 -3
- mindspore/numpy/__init__.py +1 -1
- mindspore/numpy/array_creations.py +65 -68
- mindspore/numpy/array_ops.py +64 -60
- mindspore/numpy/fft.py +610 -75
- mindspore/numpy/logic_ops.py +11 -10
- mindspore/numpy/math_ops.py +85 -84
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -4
- mindspore/ops/_grad_experimental/grad_array_ops.py +0 -11
- mindspore/ops/_grad_experimental/grad_comm_ops.py +67 -4
- mindspore/ops/_grad_experimental/grad_math_ops.py +0 -22
- mindspore/ops/_vmap/vmap_array_ops.py +2 -4
- mindspore/ops/_vmap/vmap_math_ops.py +17 -1
- mindspore/ops/_vmap/vmap_nn_ops.py +43 -2
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +91 -7
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +2 -0
- mindspore/ops/auto_generate/gen_extend_func.py +767 -13
- mindspore/ops/auto_generate/gen_ops_def.py +2452 -364
- mindspore/ops/auto_generate/gen_ops_prim.py +5442 -1756
- mindspore/ops/auto_generate/pyboost_inner_prim.py +176 -56
- mindspore/ops/composite/base.py +85 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -2
- mindspore/ops/function/__init__.py +22 -0
- mindspore/ops/function/array_func.py +492 -153
- mindspore/ops/function/debug_func.py +113 -1
- mindspore/ops/function/fft_func.py +15 -2
- mindspore/ops/function/grad/grad_func.py +3 -2
- mindspore/ops/function/math_func.py +564 -207
- mindspore/ops/function/nn_func.py +817 -383
- mindspore/ops/function/other_func.py +3 -2
- mindspore/ops/function/random_func.py +402 -12
- mindspore/ops/function/reshard_func.py +13 -11
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/function/vmap_func.py +3 -2
- mindspore/ops/functional.py +24 -14
- mindspore/ops/op_info_register.py +3 -3
- mindspore/ops/operations/__init__.py +7 -2
- mindspore/ops/operations/_grad_ops.py +2 -76
- mindspore/ops/operations/_infer_ops.py +1 -1
- mindspore/ops/operations/_inner_ops.py +71 -94
- mindspore/ops/operations/array_ops.py +14 -146
- mindspore/ops/operations/comm_ops.py +63 -53
- mindspore/ops/operations/custom_ops.py +83 -19
- mindspore/ops/operations/debug_ops.py +42 -10
- mindspore/ops/operations/manually_defined/_inner.py +12 -0
- mindspore/ops/operations/manually_defined/ops_def.py +273 -20
- mindspore/ops/operations/math_ops.py +12 -223
- mindspore/ops/operations/nn_ops.py +20 -114
- mindspore/ops/operations/other_ops.py +7 -4
- mindspore/ops/operations/random_ops.py +46 -1
- mindspore/ops/primitive.py +18 -6
- mindspore/ops_generate/arg_dtype_cast.py +2 -0
- mindspore/ops_generate/gen_aclnn_implement.py +11 -11
- mindspore/ops_generate/gen_constants.py +36 -0
- mindspore/ops_generate/gen_ops.py +67 -52
- mindspore/ops_generate/gen_ops_inner_prim.py +1 -1
- mindspore/ops_generate/gen_pyboost_func.py +131 -47
- mindspore/ops_generate/op_proto.py +10 -3
- mindspore/ops_generate/pyboost_utils.py +14 -1
- mindspore/ops_generate/template.py +43 -21
- mindspore/parallel/__init__.py +3 -1
- mindspore/parallel/_auto_parallel_context.py +31 -9
- mindspore/parallel/_cell_wrapper.py +85 -0
- mindspore/parallel/_parallel_serialization.py +47 -19
- mindspore/parallel/_tensor.py +127 -13
- mindspore/parallel/_utils.py +53 -22
- mindspore/parallel/algo_parameter_config.py +5 -5
- mindspore/parallel/checkpoint_transform.py +46 -39
- mindspore/parallel/cluster/process_entity/__init__.py +1 -1
- mindspore/parallel/cluster/process_entity/_api.py +31 -23
- mindspore/parallel/cluster/process_entity/_utils.py +2 -27
- mindspore/parallel/parameter_broadcast.py +3 -4
- mindspore/parallel/shard.py +162 -31
- mindspore/parallel/transform_safetensors.py +1146 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/constant.py +29 -0
- mindspore/profiler/common/registry.py +47 -0
- mindspore/profiler/common/util.py +28 -0
- mindspore/profiler/dynamic_profiler.py +694 -0
- mindspore/profiler/envprofiling.py +17 -19
- mindspore/profiler/parser/ascend_analysis/constant.py +18 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +25 -4
- mindspore/profiler/parser/ascend_analysis/function_event.py +43 -19
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +31 -26
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +56 -10
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +55 -8
- mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +27 -20
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +9 -2
- mindspore/profiler/parser/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/parser/ascend_timeline_generator.py +27 -25
- mindspore/profiler/parser/base_timeline_generator.py +19 -25
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
- mindspore/profiler/parser/framework_parser.py +1 -391
- mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
- mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
- mindspore/profiler/parser/memory_usage_parser.py +0 -154
- mindspore/profiler/parser/profiler_info.py +78 -6
- mindspore/profiler/profiler.py +153 -0
- mindspore/profiler/profiling.py +285 -413
- mindspore/rewrite/__init__.py +1 -2
- mindspore/rewrite/common/namespace.py +4 -4
- mindspore/rewrite/symbol_tree/symbol_tree.py +3 -3
- mindspore/run_check/_check_version.py +39 -104
- mindspore/safeguard/rewrite_obfuscation.py +591 -247
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +4 -3
- mindspore/train/_utils.py +105 -19
- mindspore/train/amp.py +171 -53
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +97 -31
- mindspore/train/callback/_cluster_monitor.py +1 -1
- mindspore/train/callback/_flops_collector.py +1 -0
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +145 -31
- mindspore/train/callback/_summary_collector.py +5 -5
- mindspore/train/callback/_tft_register.py +375 -0
- mindspore/train/dataset_helper.py +15 -3
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/roc.py +4 -4
- mindspore/train/mind_ir_pb2.py +44 -39
- mindspore/train/model.py +154 -58
- mindspore/train/serialization.py +342 -128
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +21 -0
- mindspore/utils/utils.py +60 -0
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/METADATA +13 -7
- {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/RECORD +260 -254
- {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/WHEEL +1 -1
- mindspore/include/c_api/ms/abstract.h +0 -67
- mindspore/include/c_api/ms/attribute.h +0 -197
- mindspore/include/c_api/ms/base/handle_types.h +0 -43
- mindspore/include/c_api/ms/base/macros.h +0 -32
- mindspore/include/c_api/ms/base/status.h +0 -33
- mindspore/include/c_api/ms/base/types.h +0 -283
- mindspore/include/c_api/ms/context.h +0 -102
- mindspore/include/c_api/ms/graph.h +0 -160
- mindspore/include/c_api/ms/node.h +0 -606
- mindspore/include/c_api/ms/tensor.h +0 -161
- mindspore/include/c_api/ms/value.h +0 -84
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/extend/basic.py +0 -140
- mindspore/nn/extend/embedding.py +0 -143
- mindspore/nn/extend/layer/normalization.py +0 -109
- mindspore/nn/extend/pooling.py +0 -117
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
- mindspore/ops/extend/__init__.py +0 -53
- mindspore/ops/extend/array_func.py +0 -218
- mindspore/ops/extend/math_func.py +0 -76
- mindspore/ops/extend/nn_func.py +0 -308
- mindspore/ops/silent_check.py +0 -162
- mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
- mindspore/profiler/parser/msadvisor_parser.py +0 -240
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.4.1.dist-info}/top_level.txt +0 -0
|
@@ -23,7 +23,7 @@ from types import FunctionType, MethodType
|
|
|
23
23
|
from mindspore import log as logger
|
|
24
24
|
from mindspore.parallel._utils import _get_device_num, _get_gradients_mean,\
|
|
25
25
|
_get_parallel_mode, _get_enable_parallel_optimizer, _is_pynative_parallel
|
|
26
|
-
from mindspore.context import ParallelMode
|
|
26
|
+
from mindspore.context import ParallelMode
|
|
27
27
|
from mindspore import _checkparam as validator
|
|
28
28
|
from mindspore import ops, nn
|
|
29
29
|
from mindspore.common import dtype as mstype
|
|
@@ -36,6 +36,7 @@ from mindspore.ops import operations as P
|
|
|
36
36
|
from mindspore.ops.operations.comm_ops import _VirtualDataset
|
|
37
37
|
from mindspore.nn.cell import Cell
|
|
38
38
|
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
|
|
39
|
+
from mindspore.utils import ExitByRequest
|
|
39
40
|
|
|
40
41
|
_get_datatype = C.MultitypeFuncGraph("_get_datatype")
|
|
41
42
|
|
|
@@ -414,6 +415,11 @@ class TrainOneStepCell(Cell):
|
|
|
414
415
|
group = server_group_name
|
|
415
416
|
self.grad_reducer = DistributedGradReducer(self.weights, self.mean, self.degree, group=group)
|
|
416
417
|
self._get_attr_from_cell(network)
|
|
418
|
+
self.use_graceful_exit = os.environ.get("MS_ENABLE_GRACEFUL_EXIT") == "1"
|
|
419
|
+
if self.use_graceful_exit:
|
|
420
|
+
self.graceful_exit = ExitByRequest()
|
|
421
|
+
self.exit_param = Parameter(Tensor(False, mstype.bool_), name="graceful_exit") # update by reduce value
|
|
422
|
+
self.init_param = Parameter(Tensor([0], mstype.int32), name="graceful_init") # update by config file
|
|
417
423
|
|
|
418
424
|
def construct(self, *inputs):
|
|
419
425
|
if not self.sense_flag:
|
|
@@ -422,6 +428,8 @@ class TrainOneStepCell(Cell):
|
|
|
422
428
|
sens = F.fill(loss.dtype, loss.shape, self.sens)
|
|
423
429
|
grads = self.grad(self.network, self.weights)(*inputs, sens)
|
|
424
430
|
grads = self.grad_reducer(grads)
|
|
431
|
+
if self.use_graceful_exit:
|
|
432
|
+
grads = self.graceful_exit.exit_by_request(grads, self.init_param, self.exit_param)
|
|
425
433
|
loss = F.depend(loss, self.optimizer(grads))
|
|
426
434
|
if self.return_grad:
|
|
427
435
|
grad_with_param_name = {}
|
|
@@ -435,6 +443,8 @@ class TrainOneStepCell(Cell):
|
|
|
435
443
|
loss = self.network(*inputs)
|
|
436
444
|
grads = self.grad_no_sens(self.network, self.weights)(*inputs)
|
|
437
445
|
grads = self.grad_reducer(grads)
|
|
446
|
+
if self.use_graceful_exit:
|
|
447
|
+
grads = self.graceful_exit.exit_by_request(grads, self.init_param, self.exit_param)
|
|
438
448
|
loss = F.depend(loss, self.optimizer(grads))
|
|
439
449
|
if self.return_grad:
|
|
440
450
|
grad_with_param_name = {}
|
|
@@ -742,18 +752,7 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
|
742
752
|
self.hyper_map = ops.HyperMap()
|
|
743
753
|
self.opt_shard = _get_enable_parallel_optimizer()
|
|
744
754
|
self._get_attr_from_cell(network)
|
|
745
|
-
self.
|
|
746
|
-
mode = get_context("mode")
|
|
747
|
-
device_type = get_context("device_target")
|
|
748
|
-
if device_type != "Ascend" or mode != GRAPH_MODE:
|
|
749
|
-
return
|
|
750
|
-
graceful_exit = os.getenv("MS_ENABLE_MINDIO_GRACEFUL_EXIT")
|
|
751
|
-
ttp_lib_path = os.getenv("MS_MINDIO_TTP_LIB_PATH")
|
|
752
|
-
ttp_path_check = ttp_lib_path is not None and os.path.isfile(ttp_lib_path)
|
|
753
|
-
if graceful_exit == "true" and ttp_path_check:
|
|
754
|
-
self.g_one = Tensor([0.1])
|
|
755
|
-
self.allreduce_sum = ops.AllReduce()
|
|
756
|
-
self.enable_mindio = True
|
|
755
|
+
self.enable_tft = False
|
|
757
756
|
|
|
758
757
|
def construct(self, *inputs):
|
|
759
758
|
if not self.sense_flag:
|
|
@@ -762,11 +761,6 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
|
762
761
|
sens = ops.fill(ops.DType()(loss), ops.Shape()(loss), self.sens)
|
|
763
762
|
grads = self.grad(self.network, self.weights)(*inputs, sens)
|
|
764
763
|
accu_grads = ops.depend(self.accu_grads, grads)
|
|
765
|
-
if self.enable_mindio:
|
|
766
|
-
g_one = ops.depend(self.g_one, accu_grads)
|
|
767
|
-
g_one_res = self.allreduce_sum(g_one)
|
|
768
|
-
accu_grads = ops.depend(accu_grads, g_one_res)
|
|
769
|
-
grads = ops.depend(grads, g_one_res)
|
|
770
764
|
if self.opt_shard:
|
|
771
765
|
succ = self.optimizer(grads)
|
|
772
766
|
else:
|
|
@@ -781,11 +775,6 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
|
781
775
|
loss = self.network(*inputs)
|
|
782
776
|
grads = self.grad_no_sens(self.network, self.weights)(*inputs)
|
|
783
777
|
accu_grads = ops.depend(self.accu_grads, grads)
|
|
784
|
-
if self.enable_mindio:
|
|
785
|
-
g_one = ops.depend(self.g_one, accu_grads)
|
|
786
|
-
g_one_res = self.allreduce_sum(g_one)
|
|
787
|
-
accu_grads = ops.depend(accu_grads, g_one_res)
|
|
788
|
-
grads = ops.depend(grads, g_one_res)
|
|
789
778
|
if self.opt_shard:
|
|
790
779
|
succ = self.optimizer(grads)
|
|
791
780
|
else:
|
|
@@ -335,14 +335,14 @@ class DistributedGradReducer(Cell):
|
|
|
335
335
|
|
|
336
336
|
For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
|
|
337
337
|
Please see the `rank table Startup
|
|
338
|
-
<https://www.mindspore.cn/
|
|
338
|
+
<https://www.mindspore.cn/docs/en/master/model_train/parallel/rank_table.html>`_
|
|
339
339
|
for more details.
|
|
340
340
|
|
|
341
341
|
For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
|
|
342
|
-
<https://www.mindspore.cn/
|
|
342
|
+
<https://www.mindspore.cn/docs/en/master/model_train/parallel/mpirun.html>`_ .
|
|
343
343
|
|
|
344
344
|
For the CPU device, users need to write a dynamic cluster startup script, please see the `Dynamic Cluster
|
|
345
|
-
Startup <https://www.mindspore.cn/
|
|
345
|
+
Startup <https://www.mindspore.cn/docs/en/master/model_train/parallel/dynamic_cluster.html>`_ .
|
|
346
346
|
|
|
347
347
|
This example should be run with multiple devices.
|
|
348
348
|
|
|
@@ -509,11 +509,11 @@ class PipelineGradReducer(Cell):
|
|
|
509
509
|
|
|
510
510
|
For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
|
|
511
511
|
Please see the `rank table Startup
|
|
512
|
-
<https://www.mindspore.cn/
|
|
512
|
+
<https://www.mindspore.cn/docs/en/master/model_train/parallel/rank_table.html>`_
|
|
513
513
|
for more details.
|
|
514
514
|
|
|
515
515
|
For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
|
|
516
|
-
<https://www.mindspore.cn/
|
|
516
|
+
<https://www.mindspore.cn/docs/en/master/model_train/parallel/mpirun.html>`_ .
|
|
517
517
|
|
|
518
518
|
This example should be run with multiple devices.
|
|
519
519
|
|
mindspore/nn/wrap/loss_scale.py
CHANGED
|
@@ -33,6 +33,8 @@ from mindspore.ops.operations.nn_ops import AllFinite
|
|
|
33
33
|
from mindspore.common import dtype as mstype
|
|
34
34
|
from mindspore.common.api import jit
|
|
35
35
|
from mindspore._c_expression import MSContext
|
|
36
|
+
from mindspore.run_check._check_version import AscendEnvChecker
|
|
37
|
+
from mindspore import log as logger
|
|
36
38
|
|
|
37
39
|
_grad_scale = C.MultitypeFuncGraph("grad_scale")
|
|
38
40
|
reciprocal = P.Reciprocal()
|
|
@@ -49,6 +51,7 @@ def tensor_grad_scale_row_tensor(scale, grad):
|
|
|
49
51
|
grad.values * F.cast(reciprocal(scale), F.dtype(grad.values)),
|
|
50
52
|
grad.dense_shape)
|
|
51
53
|
|
|
54
|
+
|
|
52
55
|
_grad_overflow = C.MultitypeFuncGraph("_grad_overflow")
|
|
53
56
|
grad_overflow = P.FloatStatus()
|
|
54
57
|
|
|
@@ -355,6 +358,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
355
358
|
>>> train_network.set_sense_scale(scaling_sens)
|
|
356
359
|
>>> output = train_network(inputs, label)
|
|
357
360
|
"""
|
|
361
|
+
|
|
358
362
|
def __init__(self, network, optimizer, scale_sense):
|
|
359
363
|
super(TrainOneStepWithLossScaleCell, self).__init__(network, optimizer, sens=None)
|
|
360
364
|
self.hyper_map = C.HyperMap()
|
|
@@ -369,7 +373,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
369
373
|
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
|
|
370
374
|
self.gpu_target = (context.get_context("device_target") == "GPU")
|
|
371
375
|
self.ascend_910a_target = (MSContext.get_instance().get_ascend_soc_version() == 'ascend910')
|
|
372
|
-
self.
|
|
376
|
+
self.ascend_910b_target = (MSContext.get_instance().get_ascend_soc_version() in ['ascend910b', 'ascend910_93'])
|
|
373
377
|
self.loss_scaling_manager = None
|
|
374
378
|
self._ascend_check_overflow_mode = os.environ.get('MS_ASCEND_CHECK_OVERFLOW_MODE')
|
|
375
379
|
|
|
@@ -377,12 +381,21 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
377
381
|
runtime_conf = os.environ.get('MS_DEV_RUNTIME_CONF')
|
|
378
382
|
global_jit_config = context.get_jit_config()
|
|
379
383
|
if runtime_conf is not None and ("all_finite:True" in runtime_conf or "all_finite:true" in runtime_conf):
|
|
384
|
+
logger.debug("Enable AllFinite through the environment variable MS_DEV_RUNTIME_CONF.")
|
|
380
385
|
self.enable_allfinite = True
|
|
381
386
|
elif runtime_conf is not None and ("all_finite:False" in runtime_conf or "all_finite:false" in runtime_conf):
|
|
387
|
+
logger.debug("Disable AllFinite through the environment variable MS_DEV_RUNTIME_CONF.")
|
|
382
388
|
self.enable_allfinite = False
|
|
383
389
|
elif global_jit_config:
|
|
390
|
+
logger.debug("Current global jit config is: {}".format(global_jit_config["jit_level"]))
|
|
384
391
|
self.enable_allfinite = global_jit_config["jit_level"] == "O0" or global_jit_config["jit_level"] == "O1"
|
|
385
392
|
|
|
393
|
+
if self.ascend_910b_target:
|
|
394
|
+
checker = AscendEnvChecker(None)
|
|
395
|
+
if not checker.check_custom_version():
|
|
396
|
+
logger.debug("Disable AllFinite due to version check failure.")
|
|
397
|
+
self.enable_allfinite = False
|
|
398
|
+
|
|
386
399
|
if isinstance(scale_sense, Cell):
|
|
387
400
|
self.loss_scaling_manager = scale_sense
|
|
388
401
|
self.scale_sense = Parameter(Tensor(scale_sense.get_loss_scale(), dtype=mstype.float32),
|
|
@@ -460,7 +473,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
460
473
|
is cleaned up when the function returns.
|
|
461
474
|
"""
|
|
462
475
|
status = Tensor([0] * 8, mstype.int32)
|
|
463
|
-
if self.ascend_910a_target or (self.
|
|
476
|
+
if self.ascend_910a_target or (self.ascend_910b_target and \
|
|
464
477
|
self._ascend_check_overflow_mode == "SATURATION_MODE"):
|
|
465
478
|
status = F.depend(status, pre_cond)
|
|
466
479
|
# clear overflow buffer
|
|
@@ -554,7 +567,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
554
567
|
"""
|
|
555
568
|
if self.gpu_target:
|
|
556
569
|
overflow = self._get_gpu_overflow_status(compute_output)
|
|
557
|
-
elif self.
|
|
570
|
+
elif self.ascend_910b_target:
|
|
558
571
|
if self._ascend_check_overflow_mode == "SATURATION_MODE":
|
|
559
572
|
overflow = self._get_ascend_overflow_status_on_saturation_mode(status, compute_output)
|
|
560
573
|
else:
|
|
@@ -613,6 +626,7 @@ class _TrainGradAccuWithLossScaleCell(TrainOneStepCell):
|
|
|
613
626
|
optimizer (Optimizer): Optimizer for updating the weights.
|
|
614
627
|
scale_sense (Cell): Cell to do the loss scale.
|
|
615
628
|
"""
|
|
629
|
+
|
|
616
630
|
def __init__(self, network, optimizer, scale_sense):
|
|
617
631
|
super(_TrainGradAccuWithLossScaleCell, self).__init__(network, optimizer, sens=None)
|
|
618
632
|
self.network = network
|
mindspore/numpy/__init__.py
CHANGED
|
@@ -64,7 +64,7 @@ from mindspore.numpy.logic_ops import (not_equal, less_equal, less, greater_equa
|
|
|
64
64
|
logical_or, logical_xor, in1d, isin, isclose, signbit, sometrue,
|
|
65
65
|
array_equal, array_equiv, setdiff1d)
|
|
66
66
|
|
|
67
|
-
from . import fft
|
|
67
|
+
from mindspore.numpy import fft
|
|
68
68
|
|
|
69
69
|
mod = remainder
|
|
70
70
|
fabs = absolute
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -278,7 +278,7 @@ def ones(shape, dtype=mstype.float32):
|
|
|
278
278
|
Args:
|
|
279
279
|
shape (Union[int, tuple, list]): the shape of the new tensor.
|
|
280
280
|
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
|
|
281
|
-
Default is
|
|
281
|
+
Default is ``mstype.float32``.
|
|
282
282
|
|
|
283
283
|
Returns:
|
|
284
284
|
Tensor, with the designated `shape` and `dtype`, filled with ones.
|
|
@@ -311,7 +311,7 @@ def zeros(shape, dtype=mstype.float32):
|
|
|
311
311
|
Args:
|
|
312
312
|
shape (Union[int, tuple, list]): the shape of the new tensor.
|
|
313
313
|
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
|
|
314
|
-
Default is
|
|
314
|
+
Default is mstype.float32.
|
|
315
315
|
|
|
316
316
|
Returns:
|
|
317
317
|
Tensor, with the designated `shape` and `dtype`, filled with zeros.
|
|
@@ -433,7 +433,7 @@ def randn(*shape, dtype=mstype.float32):
|
|
|
433
433
|
*shape (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g.,
|
|
434
434
|
:math:`(2, 3)` or :math:`2`.
|
|
435
435
|
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, it must
|
|
436
|
-
be float type. Default is
|
|
436
|
+
be float type. Default is ``mindspore.float32``.
|
|
437
437
|
|
|
438
438
|
Returns:
|
|
439
439
|
Tensor, with the designated shape and dtype, filled with a sample (or samples)
|
|
@@ -474,7 +474,7 @@ def rand(*shape, dtype=mstype.float32):
|
|
|
474
474
|
*shape (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g.,
|
|
475
475
|
:math:`(2, 3)` or :math:`2`.
|
|
476
476
|
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, it must
|
|
477
|
-
be float type. Default is
|
|
477
|
+
be float type. Default is ``mindspore.float32``.
|
|
478
478
|
|
|
479
479
|
Returns:
|
|
480
480
|
Tensor, with the designated shape and dtype, filled with random numbers from the
|
|
@@ -519,7 +519,7 @@ def randint(minval, maxval=None, shape=None, dtype=mstype.int32):
|
|
|
519
519
|
maxval(Union[int], optional): End value of interval. The interval does not include this value.
|
|
520
520
|
shape (Union[int, tuple(int)]): Shape of the new tensor, e.g., :math:`(2, 3)` or :math:`2`.
|
|
521
521
|
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, it must
|
|
522
|
-
be int type. Default is
|
|
522
|
+
be int type. Default is `mindspore.int32`.
|
|
523
523
|
|
|
524
524
|
Returns:
|
|
525
525
|
Tensor, with the designated shape and dtype, filled with random integers from minval (inclusive)
|
|
@@ -1068,9 +1068,9 @@ def ones_like(a, dtype=None, shape=None):
|
|
|
1068
1068
|
a (Union[Tensor, list, tuple]): The shape and data-type of a define these same
|
|
1069
1069
|
attributes of the returned array.
|
|
1070
1070
|
dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
|
|
1071
|
-
result.
|
|
1072
|
-
shape (int
|
|
1073
|
-
of the result.
|
|
1071
|
+
result. Default: ``None``.
|
|
1072
|
+
shape (int, sequence of ints, optional): Overrides the shape
|
|
1073
|
+
of the result. Default: ``None``.
|
|
1074
1074
|
|
|
1075
1075
|
Returns:
|
|
1076
1076
|
Tensor, array of ones with the same shape and type as `a`.
|
|
@@ -1107,7 +1107,7 @@ def zeros_like(a, dtype=None, shape=None):
|
|
|
1107
1107
|
attributes of the returned array.
|
|
1108
1108
|
dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
|
|
1109
1109
|
result.
|
|
1110
|
-
shape (int
|
|
1110
|
+
shape (int, sequence of ints, optional): Overrides the shape
|
|
1111
1111
|
of the result.
|
|
1112
1112
|
|
|
1113
1113
|
Returns:
|
|
@@ -1137,8 +1137,8 @@ def full_like(a, fill_value, dtype=None, shape=None):
|
|
|
1137
1137
|
Returns a full array with the same shape and type as a given array.
|
|
1138
1138
|
|
|
1139
1139
|
Note:
|
|
1140
|
-
Input array must have the same size across a dimension.
|
|
1141
|
-
If `a` is not a Tensor, dtype is float32 by default if not provided.
|
|
1140
|
+
- Input array must have the same size across a dimension.
|
|
1141
|
+
- If `a` is not a Tensor, dtype is float32 by default if not provided.
|
|
1142
1142
|
|
|
1143
1143
|
Args:
|
|
1144
1144
|
a (Union[Tensor, list, tuple]): The shape and data-type of `a` define these same
|
|
@@ -1146,7 +1146,7 @@ def full_like(a, fill_value, dtype=None, shape=None):
|
|
|
1146
1146
|
fill_value (scalar): Fill value.
|
|
1147
1147
|
dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
|
|
1148
1148
|
result.
|
|
1149
|
-
shape (int
|
|
1149
|
+
shape (int, sequence of ints, optional): Overrides the shape
|
|
1150
1150
|
of the result.
|
|
1151
1151
|
|
|
1152
1152
|
Returns:
|
|
@@ -1178,7 +1178,7 @@ def tri(N, M=None, k=0, dtype=mstype.float32):
|
|
|
1178
1178
|
Args:
|
|
1179
1179
|
N(int): Number of rows in the array.
|
|
1180
1180
|
M(int, optional): Number of columns in the array. By default, `M` is taken
|
|
1181
|
-
equal to N.
|
|
1181
|
+
equal to N. Default: ``None`` .
|
|
1182
1182
|
k(int, optional): The sub-diagonal at and below which the array is filled.
|
|
1183
1183
|
:math:`k = 0` is the main diagonal, while :math:`k < 0` is below it, and :math:`k > 0` is above.
|
|
1184
1184
|
Default: ``0`` .
|
|
@@ -1382,47 +1382,44 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
|
|
|
1382
1382
|
|
|
1383
1383
|
def trace(a, offset=0, axis1=0, axis2=1, dtype=None):
|
|
1384
1384
|
"""
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
If `a` is 2-D, the sum along its diagonal with the given offset is returned,
|
|
1388
|
-
i.e., the sum of elements ``a[i,i+offset]`` for all `i`.
|
|
1389
|
-
If `a` has more than two dimensions, then the axes specified by `axis1` and
|
|
1390
|
-
`axis2` are used to determine the 2-D sub-arrays whose traces are returned.
|
|
1391
|
-
The shape of the resulting array is the same as that of a with `axis1` and
|
|
1392
|
-
`axis2` removed.
|
|
1385
|
+
Return the sum along diagonals of the tensor.
|
|
1393
1386
|
|
|
1394
1387
|
Note:
|
|
1395
|
-
|
|
1396
|
-
|
|
1388
|
+
- `trace` is currently only used in `mindscience` scientific computing scenarios and
|
|
1389
|
+
dose not support other usage scenarios.
|
|
1390
|
+
- `trace` is not supported on Windows platform yet.
|
|
1397
1391
|
|
|
1398
1392
|
Args:
|
|
1399
|
-
a (Tensor):
|
|
1393
|
+
a (Tensor): A matrix to be calculated.
|
|
1400
1394
|
offset (int, optional): Offset of the diagonal from the main diagonal.
|
|
1401
|
-
|
|
1395
|
+
Can be positive or negative. Default: ``0``.
|
|
1402
1396
|
axis1 (int, optional): Axis to be used as the first axis of the 2-D
|
|
1403
|
-
|
|
1404
|
-
first axis (0).
|
|
1397
|
+
sub-arrays from which the diagonals should be taken. Default: ``0``.
|
|
1405
1398
|
axis2 (int, optional): Axis to be used as the second axis of the 2-D
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
output Tensor.
|
|
1399
|
+
sub-arrays from which the diagonals should be taken. Default: ``1``.
|
|
1400
|
+
dtype (:class:`mindspore.dtype`, optional): Overrides the dtype of the
|
|
1401
|
+
output Tensor if not ``None``. Default: ``None``.
|
|
1410
1402
|
|
|
1411
1403
|
Returns:
|
|
1412
|
-
Tensor,
|
|
1413
|
-
|
|
1414
|
-
|
|
1404
|
+
Tensor, the sum along diagonals. If `a` is 2-D, the sum along the diagonal is returned.
|
|
1405
|
+
If `a` has more than two dimensions, then the axes specified by `axis1` and `axis2` are used
|
|
1406
|
+
to determine the 2-D sub-arrays whose traces are returned. The shape of the resulting
|
|
1407
|
+
array is the same as that of `a` with `axis1` and `axis2` removed.
|
|
1415
1408
|
|
|
1416
1409
|
Raises:
|
|
1417
|
-
ValueError: If
|
|
1410
|
+
ValueError: If `a` has less than two dimensions.
|
|
1411
|
+
ValueError: If `axis1` or `axis2` is not in [-dims, dims), which dims is dimension of `a`.
|
|
1412
|
+
ValueError: If axes specified by `axis1` and `axis2` are same.
|
|
1418
1413
|
|
|
1419
1414
|
Supported Platforms:
|
|
1420
1415
|
``Ascend`` ``GPU`` ``CPU``
|
|
1421
1416
|
|
|
1422
1417
|
Examples:
|
|
1423
|
-
>>> import
|
|
1424
|
-
>>>
|
|
1425
|
-
>>>
|
|
1418
|
+
>>> import numpy as np
|
|
1419
|
+
>>> from mindspore import Tensor
|
|
1420
|
+
>>> from mindspore.numpy import trace
|
|
1421
|
+
>>> x = Tensor(np.eye(3, dtype=np.float32))
|
|
1422
|
+
>>> print(trace(x))
|
|
1426
1423
|
3.0
|
|
1427
1424
|
"""
|
|
1428
1425
|
return a.trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
|
|
@@ -1589,7 +1586,7 @@ class NdGrid:
|
|
|
1589
1586
|
|
|
1590
1587
|
class MGridClass(NdGrid):
|
|
1591
1588
|
"""
|
|
1592
|
-
mgrid is an
|
|
1589
|
+
mgrid is an `NdGrid` instance with ``sparse=False``.
|
|
1593
1590
|
|
|
1594
1591
|
The dimension and number of the output arrays are equal to the number
|
|
1595
1592
|
of indexing dimensions. If the step length is not a complex number,
|
|
@@ -1638,7 +1635,7 @@ class MGridClass(NdGrid):
|
|
|
1638
1635
|
|
|
1639
1636
|
class OGridClass(NdGrid):
|
|
1640
1637
|
"""
|
|
1641
|
-
ogrid is an
|
|
1638
|
+
ogrid is an `NdGrid` instance with ``sparse=True``.
|
|
1642
1639
|
|
|
1643
1640
|
The dimension and number of the output arrays are equal to the number
|
|
1644
1641
|
of indexing dimensions. If the step length is not a complex number,
|
|
@@ -1957,8 +1954,8 @@ def indices(dimensions, dtype=mstype.int32, sparse=False):
|
|
|
1957
1954
|
varying only along the corresponding axis.
|
|
1958
1955
|
|
|
1959
1956
|
Args:
|
|
1960
|
-
dimensions (
|
|
1961
|
-
dtype (:class:`mindspore.dtype`, optional): Data type of the result.
|
|
1957
|
+
dimensions (Union[list(int), tuple]): The shape of the grid.
|
|
1958
|
+
dtype (:class:`mindspore.dtype`, optional): Data type of the result. Default: ``mstype.int32``.
|
|
1962
1959
|
sparse (boolean, optional): Default: ``False`` . Return a sparse
|
|
1963
1960
|
representation of the grid instead of a dense representation.
|
|
1964
1961
|
|
|
@@ -2167,7 +2164,7 @@ def triu_indices(n, k=0, m=None):
|
|
|
2167
2164
|
n (int): The size of the arrays for which the returned indices will be valid.
|
|
2168
2165
|
k (int, optional): Diagonal offset, default: ``0`` .
|
|
2169
2166
|
m (int, optional): The column dimension of the arrays for which the returned
|
|
2170
|
-
arrays will be valid. By default `m` is taken equal to `n`.
|
|
2167
|
+
arrays will be valid. By default `m` is taken equal to `n`. Default: ``None`` .
|
|
2171
2168
|
|
|
2172
2169
|
Returns:
|
|
2173
2170
|
The indices for the triangle. The returned tuple contains two tensors, each
|
|
@@ -2196,7 +2193,7 @@ def tril_indices(n, k=0, m=None):
|
|
|
2196
2193
|
n (int): The size of the arrays for which the returned indices will be valid.
|
|
2197
2194
|
k (int, optional): Diagonal offset, default: ``0`` .
|
|
2198
2195
|
m (int, optional): The column dimension of the arrays for which the returned
|
|
2199
|
-
arrays will be valid. By default `m` is taken equal to `n`.
|
|
2196
|
+
arrays will be valid. By default `m` is taken equal to `n`. Default: ``None`` .
|
|
2200
2197
|
|
|
2201
2198
|
Returns:
|
|
2202
2199
|
The indices for the triangle. The returned tuple contains two tensors, each
|
|
@@ -2259,7 +2256,7 @@ def tril_indices_from(arr, k=0):
|
|
|
2259
2256
|
|
|
2260
2257
|
Returns:
|
|
2261
2258
|
triu_indices_from, tuple of 2 tensor, shape(N)
|
|
2262
|
-
Indices for the
|
|
2259
|
+
Indices for the lower-triangle of `arr`.
|
|
2263
2260
|
|
|
2264
2261
|
Raises:
|
|
2265
2262
|
TypeError: If `arr` cannot be converted to tensor, or `k` is not a number.
|
|
@@ -2291,15 +2288,15 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): # pylint: disabl
|
|
|
2291
2288
|
Args:
|
|
2292
2289
|
a (Union[int, float, bool, list, tuple, Tensor]): Input data. The histogram
|
|
2293
2290
|
is computed over the flattened array.
|
|
2294
|
-
bins (
|
|
2291
|
+
bins (Union[int, tuple, list, Tensor], optional): If `bins` is an int, it defines the number
|
|
2295
2292
|
of equal-width bins in the given range (10, by default). If `bins` is a
|
|
2296
2293
|
sequence, it defines the bin edges, including the rightmost edge,
|
|
2297
2294
|
allowing for non-uniform bin widths.
|
|
2298
|
-
range((float, float), optional): The lower and upper range of the bins. If
|
|
2295
|
+
range ((float, float), optional): The lower and upper range of the bins. If
|
|
2299
2296
|
not provided, `range` is simply ``(a.min(), a.max())``. Values outside
|
|
2300
2297
|
the range are ignored. The first element of the range must be less than
|
|
2301
2298
|
or equal to the second. Default: ``None`` .
|
|
2302
|
-
weights(Union[int, float, bool, list, tuple, Tensor], optional): An array of weights,
|
|
2299
|
+
weights (Union[int, float, bool, list, tuple, Tensor], optional): An array of weights,
|
|
2303
2300
|
of the same shape as `a`. Each value in `a` only contributes its associated weight
|
|
2304
2301
|
towards the bin count (instead of 1). This is currently not used by any of the bin
|
|
2305
2302
|
estimators, but may be in the future. Default: ``None`` .
|
|
@@ -2623,10 +2620,10 @@ def pad(arr, pad_width, mode="constant", stat_length=None, constant_values=0,
|
|
|
2623
2620
|
Args:
|
|
2624
2621
|
arr (Union[list, tuple, Tensor]): The array to pad.
|
|
2625
2622
|
pad_width (Union[int, tuple, list]): Number of values padded to the edges of
|
|
2626
|
-
each axis.
|
|
2627
|
-
unique pad widths for each axis.
|
|
2628
|
-
before and after pad for each axis.
|
|
2629
|
-
for
|
|
2623
|
+
each axis. ``((before_1, after_1), ... (before_N, after_N))`` creates
|
|
2624
|
+
unique pad widths for each axis. ``((before, after),)`` yields same
|
|
2625
|
+
before and after pad for each axis. ``(pad,)`` or int is a shortcut
|
|
2626
|
+
for ``before = after = pad width`` for all axes.
|
|
2630
2627
|
mode (string, optional):
|
|
2631
2628
|
One of the following string values:
|
|
2632
2629
|
|
|
@@ -2645,26 +2642,26 @@ def pad(arr, pad_width, mode="constant", stat_length=None, constant_values=0,
|
|
|
2645
2642
|
are used to pad the end and the end values are used to pad the beginning.
|
|
2646
2643
|
- empty: Pads with undefined values.
|
|
2647
2644
|
- <function>: The padding function, if used, should modify and return a new 1-d tensor.
|
|
2648
|
-
It has the following signature:
|
|
2645
|
+
It has the following signature: ``padding_func(tensor, iaxis_pad_width, iaxis, kwargs)``
|
|
2649
2646
|
stat_length (Union[tuple, list, int], optional): Used in \'maximum\', \'mean\',
|
|
2650
2647
|
\'median\', and \'minimum\'. Number of values at edge of each axis used
|
|
2651
|
-
to calculate the statistic value.
|
|
2652
|
-
creates unique statistic lengths for each axis.
|
|
2653
|
-
yields same before and after statistic lengths for each axis.
|
|
2654
|
-
or int is a shortcut for
|
|
2648
|
+
to calculate the statistic value. ``((before_1, after_1), ... (before_N, after_N))``
|
|
2649
|
+
creates unique statistic lengths for each axis. ``((before, after),)``
|
|
2650
|
+
yields same before and after statistic lengths for each axis. ``(stat_length,)``
|
|
2651
|
+
or int is a shortcut for ``before = after = statistic length`` for all
|
|
2655
2652
|
axes. Default: ``None``, to use the entire axis.
|
|
2656
2653
|
constant_values (Union[tuple, list, int], optional):
|
|
2657
|
-
Used in
|
|
2658
|
-
axis.
|
|
2659
|
-
constants for each axis.
|
|
2660
|
-
after constants for each axis.
|
|
2661
|
-
a shortcut for
|
|
2654
|
+
Used in ``constant mode``. The values to set the padded values for each
|
|
2655
|
+
axis. ``((before_1, after_1), ... (before_N, after_N))`` creates unique pad
|
|
2656
|
+
constants for each axis. ``((before, after),)`` yields same before and
|
|
2657
|
+
after constants for each axis. ``(constant,)`` or ``constant`` is
|
|
2658
|
+
a shortcut for ``before = after = constant`` for all axes. Default: ``0`` .
|
|
2662
2659
|
end_values (Union[tuple, list, int], optional): Used in 'linear_ramp'. The values
|
|
2663
2660
|
used for the ending value of the linear_ramp and that will form the edge of
|
|
2664
|
-
the padded `arr`.
|
|
2665
|
-
unique end values for each axis.
|
|
2666
|
-
and after end values for each axis.
|
|
2667
|
-
is a shortcut for
|
|
2661
|
+
the padded `arr`. ``((before_1, after_1), ... (before_N, after_N))``
|
|
2662
|
+
unique end values for each axis. ``((before, after),)`` yields same before
|
|
2663
|
+
and after end values for each axis. ``(constant,)`` or ``constant``
|
|
2664
|
+
is a shortcut for ``before = after = constant`` for all axes. Default: ``0`` .
|
|
2668
2665
|
reflect_type(string, optional) can choose between \'even\' and \'odd\'. Used in
|
|
2669
2666
|
\'reflect\', and \'symmetric\'. The \'even\' style is the default with an
|
|
2670
2667
|
unaltered reflection around the edge value. For the \'odd\' style, the extended
|
|
@@ -2680,7 +2677,7 @@ def pad(arr, pad_width, mode="constant", stat_length=None, constant_values=0,
|
|
|
2680
2677
|
TypeError: If `arr`, `pad_width`, `stat_length`, `constant_values` or `end_values`
|
|
2681
2678
|
have types not specified above.
|
|
2682
2679
|
ValueError: If `mode` cannot be recognized, or if `pad_width`, `stat_length`,
|
|
2683
|
-
`constant_values`, `end_values` cannot broadcast to
|
|
2680
|
+
`constant_values`, `end_values` cannot broadcast to ``(arr.ndim, 2)``,
|
|
2684
2681
|
or if keyword arguments got unexpected inputs.
|
|
2685
2682
|
NotImplementedError: If mode is function or \'median\'.
|
|
2686
2683
|
|