mindspore 2.7.0__cp310-cp310-win_amd64.whl → 2.7.0rc1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +2 -2
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -0
- mindspore/_extends/parse/parser.py +22 -28
- mindspore/_extends/parse/standard_method.py +1 -15
- mindspore/_extends/pijit/pijit_func_white_list.py +5 -2
- mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
- mindspore/amp.py +18 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/common/__init__.py +12 -18
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +38 -102
- mindspore/common/_utils.py +1 -9
- mindspore/common/api.py +106 -155
- mindspore/common/{dynamic_shape/auto_dynamic_shape.py → auto_dynamic_shape.py} +23 -17
- mindspore/common/dtype.py +57 -98
- mindspore/common/dump.py +1 -1
- mindspore/common/file_system.py +9 -59
- mindspore/common/hook_handle.py +3 -22
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +20 -4
- mindspore/common/recompute.py +4 -2
- mindspore/common/tensor.py +52 -38
- mindspore/communication/_hccl_management.py +297 -0
- mindspore/context.py +21 -15
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +1 -35
- mindspore/dataset/engine/datasets.py +315 -330
- mindspore/dataset/engine/datasets_user_defined.py +22 -38
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +5 -17
- mindspore/dataset/vision/utils.py +21 -632
- mindspore/device_context/ascend/op_tuning.py +1 -35
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -3
- mindspore/include/api/cell.h +4 -28
- mindspore/include/api/cfg.h +7 -24
- mindspore/include/api/context.h +0 -1
- mindspore/include/api/delegate.h +2 -0
- mindspore/include/api/dual_abi_helper.h +19 -100
- mindspore/include/api/graph.h +1 -14
- mindspore/include/api/kernel.h +3 -16
- mindspore/include/api/kernel_api.h +1 -9
- mindspore/include/api/metrics/accuracy.h +0 -9
- mindspore/include/api/model.h +1 -5
- mindspore/include/api/model_group.h +0 -4
- mindspore/include/api/model_parallel_runner.h +0 -2
- mindspore/include/api/status.h +10 -48
- mindspore/include/api/types.h +1 -6
- mindspore/include/dataset/constants.h +0 -9
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +2 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -5
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/distributed/__init__.py +0 -4
- mindspore/mint/distributed/distributed.py +14 -217
- mindspore/mint/nn/layer/_functions.py +2 -1
- mindspore/mint/nn/layer/conv.py +6 -6
- mindspore/mint/nn/layer/normalization.py +3 -3
- mindspore/nn/cell.py +174 -216
- mindspore/nn/layer/activation.py +2 -4
- mindspore/nn/layer/basic.py +13 -7
- mindspore/nn/layer/image.py +1 -1
- mindspore/nn/optim/adam.py +3 -1
- mindspore/nn/optim/lamb.py +3 -1
- mindspore/nn/optim/tft_wrapper.py +3 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +5 -39
- mindspore/nn/wrap/grad_reducer.py +15 -0
- mindspore/numpy/array_creations.py +2 -2
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_op_impl/cpu/__init__.py +0 -1
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +2 -12
- mindspore/ops/auto_generate/gen_extend_func.py +4 -4
- mindspore/ops/auto_generate/gen_ops_def.py +16 -290
- mindspore/ops/auto_generate/gen_ops_prim.py +76 -563
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/function/__init__.py +0 -1
- mindspore/ops/function/array_func.py +6 -10
- mindspore/ops/function/debug_func.py +2 -4
- mindspore/ops/function/grad/grad_func.py +12 -4
- mindspore/ops/function/math_func.py +32 -44
- mindspore/ops/function/nn_func.py +20 -18
- mindspore/ops/functional.py +1 -2
- mindspore/ops/functional_overload.py +12 -23
- mindspore/ops/operations/_inner_ops.py +12 -11
- mindspore/ops/operations/array_ops.py +50 -4
- mindspore/ops/operations/comm_ops.py +15 -1
- mindspore/ops/operations/custom_ops.py +4 -10
- mindspore/ops/operations/debug_ops.py +6 -6
- mindspore/ops/operations/manually_defined/ops_def.py +12 -12
- mindspore/ops/operations/math_ops.py +5 -5
- mindspore/ops/operations/nn_ops.py +1 -1
- mindspore/ops/primitive.py +10 -3
- mindspore/ops/tensor_method.py +7 -16
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +16 -0
- mindspore/parallel/_auto_parallel_context.py +15 -5
- mindspore/parallel/_parallel_serialization.py +2 -3
- mindspore/parallel/_ps_context.py +2 -2
- mindspore/parallel/_transformer/transformer.py +4 -4
- mindspore/parallel/_utils.py +11 -5
- mindspore/parallel/auto_parallel.py +9 -23
- mindspore/parallel/checkpoint_transform.py +0 -2
- mindspore/parallel/cluster/process_entity/_api.py +1 -4
- mindspore/parallel/cluster/run.py +3 -5
- mindspore/parallel/function/reshard_func.py +5 -6
- mindspore/parallel/nn/parallel_cell_wrapper.py +3 -40
- mindspore/parallel/nn/parallel_grad_reducer.py +8 -0
- mindspore/parallel/shard.py +21 -7
- mindspore/parallel/transform_safetensors.py +4 -10
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +9 -10
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +0 -9
- mindspore/profiler/common/profiler_context.py +2 -25
- mindspore/profiler/common/profiler_meta_data.py +0 -1
- mindspore/profiler/common/profiler_op_analyse.py +6 -10
- mindspore/{ops/_op_impl/cpu/joinedstr_op.py → profiler/common/validator/__init__.py} +1 -15
- mindspore/profiler/common/validator/validate_path.py +84 -0
- mindspore/profiler/dynamic_profiler.py +46 -91
- mindspore/profiler/envprofiler.py +5 -30
- mindspore/profiler/experimental_config.py +1 -16
- mindspore/profiler/platform/cpu_profiler.py +4 -10
- mindspore/profiler/platform/npu_profiler.py +1 -1
- mindspore/profiler/profiler.py +145 -193
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/runtime/__init__.py +4 -6
- mindspore/runtime/executor.py +0 -27
- mindspore/runtime/memory.py +0 -1
- mindspore/runtime/thread_bind_core.py +1 -1
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +3 -3
- mindspore/train/amp.py +3 -0
- mindspore/train/callback/_callback.py +1 -2
- mindspore/train/callback/_checkpoint.py +8 -1
- mindspore/train/callback/_flops_collector.py +6 -10
- mindspore/train/callback/_train_fault_tolerance.py +7 -3
- mindspore/train/data_sink.py +4 -4
- mindspore/train/dataset_helper.py +5 -5
- mindspore/train/model.py +20 -4
- mindspore/train/serialization.py +15 -35
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +8 -8
- mindspore/version.py +1 -1
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +1 -1
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +193 -192
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +0 -1109
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/dynamic_shape/enable_dynamic.py +0 -197
- /mindspore/common/{dynamic_shape/_auto_dynamic.py → _auto_dynamic.py} +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
mindspore/include/api/status.h
CHANGED
|
@@ -128,75 +128,37 @@ enum StatusCode : uint32_t {
|
|
|
128
128
|
|
|
129
129
|
class MS_API Status {
|
|
130
130
|
public:
|
|
131
|
-
/// \brief Constructor of Status.
|
|
132
131
|
Status();
|
|
133
|
-
/// \brief Constructor of Status.
|
|
134
|
-
///
|
|
135
|
-
/// \param[in] status_code Status code.
|
|
136
|
-
///
|
|
137
|
-
/// \param[in] status_msg Status message.
|
|
138
132
|
inline Status(enum StatusCode status_code, const std::string &status_msg = ""); // NOLINT(runtime/explicit)
|
|
139
|
-
/// \brief Constructor of Status.
|
|
140
133
|
inline Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = "");
|
|
141
|
-
|
|
134
|
+
|
|
142
135
|
~Status() = default;
|
|
143
|
-
|
|
144
|
-
///
|
|
145
|
-
/// \return Enum of status code.
|
|
136
|
+
|
|
146
137
|
enum StatusCode StatusCode() const;
|
|
147
|
-
/// \brief Exchange status to string.
|
|
148
|
-
///
|
|
149
|
-
/// \return Status code exchanged to string.
|
|
150
138
|
inline std::string ToString() const;
|
|
151
|
-
|
|
152
|
-
///
|
|
153
|
-
/// \return Line of code to get.
|
|
139
|
+
|
|
154
140
|
int GetLineOfCode() const;
|
|
155
|
-
/// \brief Get file name of status.
|
|
156
|
-
///
|
|
157
|
-
/// \return File name to get.
|
|
158
141
|
inline std::string GetFileName() const;
|
|
159
|
-
/// \brief Get error description of status.
|
|
160
|
-
///
|
|
161
|
-
/// \return Error description to get.
|
|
162
142
|
inline std::string GetErrDescription() const;
|
|
163
|
-
/// \brief Get error description of status.
|
|
164
|
-
///
|
|
165
|
-
/// \param[in] err_description Error description to be set.
|
|
166
143
|
inline std::string SetErrDescription(const std::string &err_description);
|
|
167
|
-
/// \brief Status message to be set.
|
|
168
|
-
///
|
|
169
|
-
/// \param[in] status_msg Status message to be set.
|
|
170
144
|
inline void SetStatusMsg(const std::string &status_msg);
|
|
171
|
-
|
|
145
|
+
|
|
172
146
|
MS_API friend std::ostream &operator<<(std::ostream &os, const Status &s);
|
|
173
|
-
|
|
147
|
+
|
|
174
148
|
bool operator==(const Status &other) const;
|
|
175
|
-
/// \brief Operator ==.
|
|
176
149
|
bool operator==(enum StatusCode other_code) const;
|
|
177
|
-
/// \brief Operator !=.
|
|
178
150
|
bool operator!=(const Status &other) const;
|
|
179
|
-
/// \brief Operator !=.
|
|
180
151
|
bool operator!=(enum StatusCode other_code) const;
|
|
181
|
-
|
|
152
|
+
|
|
182
153
|
explicit operator bool() const;
|
|
183
|
-
/// \brief Operator int().
|
|
184
154
|
explicit operator int() const;
|
|
185
|
-
|
|
186
|
-
///
|
|
187
|
-
/// \return Status Code of ok.
|
|
155
|
+
|
|
188
156
|
static Status OK();
|
|
189
|
-
|
|
190
|
-
///
|
|
191
|
-
/// \return True if it is ok.
|
|
157
|
+
|
|
192
158
|
bool IsOk() const;
|
|
193
|
-
|
|
194
|
-
///
|
|
195
|
-
/// \return True if it is error.
|
|
159
|
+
|
|
196
160
|
bool IsError() const;
|
|
197
|
-
|
|
198
|
-
///
|
|
199
|
-
/// \return The code name as string type.
|
|
161
|
+
|
|
200
162
|
static inline std::string CodeAsString(enum StatusCode c);
|
|
201
163
|
|
|
202
164
|
private:
|
mindspore/include/api/types.h
CHANGED
|
@@ -74,7 +74,6 @@ class Allocator;
|
|
|
74
74
|
/// \brief The MSTensor class defines a tensor in MindSpore.
|
|
75
75
|
class MS_API MSTensor {
|
|
76
76
|
public:
|
|
77
|
-
/// \brief Impl class of MSTensor.
|
|
78
77
|
class Impl;
|
|
79
78
|
/// \brief Creates a MSTensor object, whose data need to be copied before accessed by Model, must be used in pairs
|
|
80
79
|
/// with DestroyTensorPtr.
|
|
@@ -162,17 +161,13 @@ class MS_API MSTensor {
|
|
|
162
161
|
///
|
|
163
162
|
/// \param[in] tensor A MSTensor object.
|
|
164
163
|
static void DestroyTensorPtr(MSTensor *tensor) noexcept;
|
|
165
|
-
|
|
164
|
+
|
|
166
165
|
MSTensor();
|
|
167
|
-
/// \brief Constructor of MSTensor.
|
|
168
166
|
explicit MSTensor(const std::shared_ptr<Impl> &impl);
|
|
169
|
-
/// \brief Constructor of MSTensor.
|
|
170
167
|
// if malloc data, user need to free after constructing MSTensor, else memory leak.
|
|
171
168
|
inline MSTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data,
|
|
172
169
|
size_t data_len);
|
|
173
|
-
/// \brief Constructor of MSTensor.
|
|
174
170
|
explicit MSTensor(std::nullptr_t);
|
|
175
|
-
/// \brief Destructor of MSTensor.
|
|
176
171
|
~MSTensor();
|
|
177
172
|
|
|
178
173
|
/// \brief Obtains the name of the MSTensor.
|
|
@@ -356,15 +356,6 @@ constexpr int32_t kMaxLegalPort = 65535;
|
|
|
356
356
|
// Invalid OpenCV type should not be from 0 to 7 (opencv4/opencv2/core/hal/interface.h)
|
|
357
357
|
constexpr uint8_t kCVInvalidType = 255;
|
|
358
358
|
|
|
359
|
-
constexpr size_t dimension_zero = 0;
|
|
360
|
-
constexpr size_t dimension_one = 1;
|
|
361
|
-
constexpr size_t dimension_two = 2;
|
|
362
|
-
constexpr size_t dimension_three = 3;
|
|
363
|
-
constexpr size_t size_one = 1;
|
|
364
|
-
constexpr size_t size_two = 2;
|
|
365
|
-
constexpr size_t size_three = 3;
|
|
366
|
-
constexpr size_t size_four = 4;
|
|
367
|
-
|
|
368
359
|
using connection_id_type = uint64_t;
|
|
369
360
|
using session_id_type = uint32_t;
|
|
370
361
|
using row_id_type = int64_t;
|
mindspore/jpeg62.dll
CHANGED
|
Binary file
|
|
@@ -40,8 +40,7 @@ class CifarMD5Validator:
|
|
|
40
40
|
'data_batch_5': '482c414d41f54cd18b22e5b47cb7c3cb',
|
|
41
41
|
'test_batch': '40351d587109b95175f43aff81a1287e'}
|
|
42
42
|
|
|
43
|
-
|
|
44
|
-
def calculate_md5(file_path):
|
|
43
|
+
def calculate_md5(self, file_path):
|
|
45
44
|
"""
|
|
46
45
|
Calculate MD5 hash of a file.
|
|
47
46
|
|
|
@@ -77,7 +76,7 @@ class CifarMD5Validator:
|
|
|
77
76
|
KeyError: If file_name is not found in md5_map.
|
|
78
77
|
"""
|
|
79
78
|
expected_md5 = self.md5_map.get(file_name)
|
|
80
|
-
actual_md5 =
|
|
79
|
+
actual_md5 = self.calculate_md5(os.path.join(file_path, file_name))
|
|
81
80
|
|
|
82
81
|
if actual_md5 is None or expected_md5 is None or actual_md5 != expected_md5:
|
|
83
82
|
logger.warning(f"The MD5 value of {file_name} does not match the official CIFAR10 file."
|
|
@@ -49,6 +49,11 @@ class Cifar10ToMR:
|
|
|
49
49
|
>>> mindrecord_file = "/path/to/mindrecord/file"
|
|
50
50
|
>>> cifar10_to_mr = Cifar10ToMR(cifar10_dir, mindrecord_file)
|
|
51
51
|
>>> cifar10_to_mr.transform()
|
|
52
|
+
|
|
53
|
+
.. warning::
|
|
54
|
+
Cifar10ToMR.transform() uses `pickle` module implicitly, which is known to be insecure.
|
|
55
|
+
It is possible to construct malicious pickle data which will execute arbitrary code during unpickling.
|
|
56
|
+
Never load data that could have come from an untrusted source, or that could have been tampered with.
|
|
52
57
|
"""
|
|
53
58
|
|
|
54
59
|
def __init__(self, source, destination):
|
|
@@ -107,11 +112,6 @@ class Cifar10ToMR:
|
|
|
107
112
|
Note:
|
|
108
113
|
Please refer to the Examples of :class:`mindspore.mindrecord.Cifar10ToMR` .
|
|
109
114
|
|
|
110
|
-
.. warning::
|
|
111
|
-
`Cifar10ToMR.transform()` uses `pickle` module implicitly, which is known to be insecure.
|
|
112
|
-
It is possible to construct malicious pickle data which will execute arbitrary code during unpickling.
|
|
113
|
-
Never load data that could have come from an untrusted source, or that could have been tampered with.
|
|
114
|
-
|
|
115
115
|
Args:
|
|
116
116
|
fields (list[str], optional): A list of index fields. Default: ``None`` . For index field settings,
|
|
117
117
|
please refer to :func:`mindspore.mindrecord.FileWriter.add_index` .
|
|
Binary file
|
|
Binary file
|
mindspore/mindspore_common.dll
CHANGED
|
Binary file
|
mindspore/mindspore_core.dll
CHANGED
|
Binary file
|
|
Binary file
|
mindspore/mindspore_dump.dll
CHANGED
|
Binary file
|
mindspore/mindspore_frontend.dll
CHANGED
|
Binary file
|
mindspore/mindspore_glog.dll
CHANGED
|
Binary file
|
|
Binary file
|
|
Binary file
|
mindspore/mindspore_ops.dll
CHANGED
|
Binary file
|
mindspore/mindspore_ops_host.dll
CHANGED
|
Binary file
|
|
Binary file
|
mindspore/mindspore_profiler.dll
CHANGED
|
Binary file
|
mindspore/mindspore_pyboost.dll
CHANGED
|
Binary file
|
mindspore/mindspore_pynative.dll
CHANGED
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -37,11 +37,9 @@ from mindspore.mint.distributed.distributed import (
|
|
|
37
37
|
get_group_rank,
|
|
38
38
|
all_reduce,
|
|
39
39
|
all_gather_into_tensor,
|
|
40
|
-
all_gather_into_tensor_uneven,
|
|
41
40
|
all_to_all,
|
|
42
41
|
all_to_all_single,
|
|
43
42
|
reduce_scatter_tensor,
|
|
44
|
-
reduce_scatter_tensor_uneven,
|
|
45
43
|
isend,
|
|
46
44
|
irecv,
|
|
47
45
|
send,
|
|
@@ -75,11 +73,9 @@ __all__ = [
|
|
|
75
73
|
"get_group_rank",
|
|
76
74
|
"all_reduce",
|
|
77
75
|
"all_gather_into_tensor",
|
|
78
|
-
"all_gather_into_tensor_uneven",
|
|
79
76
|
"all_to_all",
|
|
80
77
|
"all_to_all_single",
|
|
81
78
|
"reduce_scatter_tensor",
|
|
82
|
-
"reduce_scatter_tensor_uneven",
|
|
83
79
|
"isend",
|
|
84
80
|
"irecv",
|
|
85
81
|
"send",
|
|
@@ -58,11 +58,9 @@ from mindspore.ops.auto_generate.gen_ops_prim import (
|
|
|
58
58
|
dist_comm_isend_op,
|
|
59
59
|
dist_comm_all_to_all_v_op,
|
|
60
60
|
dist_comm_reduce_scatter_tensor_op,
|
|
61
|
-
dist_comm_reduce_scatter_tensor_uneven_op,
|
|
62
61
|
dist_comm_all_to_all_v_single_op,
|
|
63
62
|
dist_comm_broadcast_op,
|
|
64
63
|
dist_comm_all_gather_into_tensor_op,
|
|
65
|
-
dist_comm_all_gather_into_tensor_uneven_op,
|
|
66
64
|
dist_comm_irecv_op,
|
|
67
65
|
dist_comm_scatter_tensor_op,
|
|
68
66
|
dist_comm_gather_into_tensor_op,
|
|
@@ -991,22 +989,6 @@ def _check_all_tensor_same_dtype_and_shape(*tensor_lists):
|
|
|
991
989
|
)
|
|
992
990
|
|
|
993
991
|
|
|
994
|
-
@_primexpr
|
|
995
|
-
def _check_output_shape(output, expected_shape, op_name):
|
|
996
|
-
if output.shape != expected_shape:
|
|
997
|
-
raise TypeError(
|
|
998
|
-
f"For {op_name}, the output shape should be {expected_shape}, "
|
|
999
|
-
f"but got {output.shape}.")
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
@_primexpr
|
|
1003
|
-
def _check_output_dtype(output, expected_dtype, op_name):
|
|
1004
|
-
if output.dtype != expected_dtype:
|
|
1005
|
-
raise TypeError(
|
|
1006
|
-
f"For {op_name}, the output dtype should be {expected_dtype}, "
|
|
1007
|
-
f"but got {output.dtype}.")
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
992
|
def all_reduce(tensor, op=ReduceOp.SUM, group=None, async_op=False):
|
|
1011
993
|
"""
|
|
1012
994
|
Reduce tensors across all devices in such a way that all deviceswill get the same final result,
|
|
@@ -1171,91 +1153,6 @@ def all_gather_into_tensor(output_tensor, input_tensor, group=None, async_op=Fal
|
|
|
1171
1153
|
return handle
|
|
1172
1154
|
|
|
1173
1155
|
|
|
1174
|
-
def all_gather_into_tensor_uneven(output, input, output_split_sizes=None, group=None, async_op=False):
|
|
1175
|
-
r"""
|
|
1176
|
-
Gathers and concatenates tensors across devices with uneven first dimensions.
|
|
1177
|
-
|
|
1178
|
-
Note:
|
|
1179
|
-
- Input tensors must have identical shapes except for the first dimension.
|
|
1180
|
-
- Output tensor's first dimension should equal to the sum of all devices' input first dimensions.
|
|
1181
|
-
|
|
1182
|
-
Args:
|
|
1183
|
-
output (Tensor): Concatenated output tensor with shape :math:`(\sum_{i=0}^{N-1} x_{i1}, x_2, ..., x_R)`,
|
|
1184
|
-
where N is the number of devices in the group.
|
|
1185
|
-
input (Tensor): Local input tensor with shape :math:`(x_{k1}, x_2, ..., x_R)`, where k is current device's rank.
|
|
1186
|
-
output_split_sizes (list[int], optional): Specifies first dimension sizes from each device.
|
|
1187
|
-
Must match actual input dimensions when provided.
|
|
1188
|
-
If ``None``, assumes equal split sizes across devices. Default: ``None``.
|
|
1189
|
-
group (str, optional): The communication group to work on. If ``None``,
|
|
1190
|
-
which means ``"hccl_world_group"`` in Ascend. Default: ``None``.
|
|
1191
|
-
async_op (bool, optional): Whether this operator should be an async operator. Default: ``False``.
|
|
1192
|
-
|
|
1193
|
-
Returns:
|
|
1194
|
-
CommHandle, CommHandle is an async work handle, if `async_op` is set to True.
|
|
1195
|
-
CommHandle will be None, when `async_op` is False.
|
|
1196
|
-
|
|
1197
|
-
Raises:
|
|
1198
|
-
ValueError: If the shape of `input` does not match the constraints of `output_split_sizes`.
|
|
1199
|
-
RuntimeError: If device target is invalid, or backend is invalid, or distributed initialization fails.
|
|
1200
|
-
|
|
1201
|
-
Supported Platforms:
|
|
1202
|
-
``Ascend``
|
|
1203
|
-
|
|
1204
|
-
Examples:
|
|
1205
|
-
.. note::
|
|
1206
|
-
Before running the following examples, you need to configure the communication environment variables.
|
|
1207
|
-
|
|
1208
|
-
For Ascend devices, it is recommended to use the msrun startup method
|
|
1209
|
-
without any third-party or configuration file dependencies.
|
|
1210
|
-
Please see the `msrun start up
|
|
1211
|
-
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1212
|
-
for more details.
|
|
1213
|
-
|
|
1214
|
-
This example should be run with 2 devices.
|
|
1215
|
-
|
|
1216
|
-
>>> import numpy as np
|
|
1217
|
-
>>> import mindspore as ms
|
|
1218
|
-
>>> from mindspore import ops
|
|
1219
|
-
>>> from mindspore.mint.distributed import init_process_group, get_rank
|
|
1220
|
-
>>> from mindspore.mint.distributed import all_gather_into_tensor_uneven
|
|
1221
|
-
>>> from mindspore import Tensor
|
|
1222
|
-
>>>
|
|
1223
|
-
>>> ms.set_device(device_target="Ascend")
|
|
1224
|
-
>>> init_process_group()
|
|
1225
|
-
>>> if get_rank() == 0:
|
|
1226
|
-
>>> input_tensor = Tensor(np.ones([3, 4]).astype(np.float32))
|
|
1227
|
-
>>> else:
|
|
1228
|
-
>>> input_tensor = Tensor(np.ones([2, 4]).astype(np.float32))
|
|
1229
|
-
>>> out_tensor = Tensor(np.zeros([5, 4]).astype(np.float32))
|
|
1230
|
-
>>> output_split_sizes = [3, 2]
|
|
1231
|
-
>>> output = all_gather_into_tensor_uneven(out_tensor, input_tensor, output_split_sizes)
|
|
1232
|
-
>>> print(out_tensor)
|
|
1233
|
-
[[1. 1. 1. 1.]
|
|
1234
|
-
[1. 1. 1. 1.]
|
|
1235
|
-
[1. 1. 1. 1.]
|
|
1236
|
-
[1. 1. 1. 1.]
|
|
1237
|
-
[1. 1. 1. 1.]]
|
|
1238
|
-
"""
|
|
1239
|
-
if group is None:
|
|
1240
|
-
group = GlobalComm.WORLD_COMM_GROUP
|
|
1241
|
-
if not isinstance(group, str):
|
|
1242
|
-
raise TypeError(
|
|
1243
|
-
"The argument 'group' must be type of string, "
|
|
1244
|
-
"but got 'group' type : {}.".format(type(group))
|
|
1245
|
-
)
|
|
1246
|
-
if not isinstance(async_op, bool):
|
|
1247
|
-
raise TypeError(
|
|
1248
|
-
f"The argument 'async_op' must be a bool, but got {type(async_op)}."
|
|
1249
|
-
)
|
|
1250
|
-
group_size = get_cache_group_size(group)
|
|
1251
|
-
output_split_sizes = [] if output_split_sizes is None else output_split_sizes
|
|
1252
|
-
result = dist_comm_all_gather_into_tensor_uneven_op(
|
|
1253
|
-
output, input, output_split_sizes, group_size, group
|
|
1254
|
-
)
|
|
1255
|
-
_, handle = _deal_comm_outputs(result, async_op)
|
|
1256
|
-
return handle
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
1156
|
def reduce_scatter_tensor(output, input, op=ReduceOp.SUM, group=None, async_op=False):
|
|
1260
1157
|
r"""
|
|
1261
1158
|
Reduces and scatters tensors from the specified communication group and
|
|
@@ -1346,101 +1243,6 @@ def reduce_scatter_tensor(output, input, op=ReduceOp.SUM, group=None, async_op=F
|
|
|
1346
1243
|
return handle
|
|
1347
1244
|
|
|
1348
1245
|
|
|
1349
|
-
def reduce_scatter_tensor_uneven(output, input, input_split_sizes=None, op=ReduceOp.SUM, group=None, async_op=False):
|
|
1350
|
-
r"""
|
|
1351
|
-
Reduce tensors from the specified communication group and scatter to the output tensor
|
|
1352
|
-
according to `input_split_sizes`.
|
|
1353
|
-
|
|
1354
|
-
Note:
|
|
1355
|
-
- The input tensor must have identical shape and format across all processes.
|
|
1356
|
-
- The first dimension of input tensor should equal to the sum of `input_split_sizes`.
|
|
1357
|
-
|
|
1358
|
-
Args:
|
|
1359
|
-
output(Tensor): the output tensor has the same dtype as `input` with a shape of
|
|
1360
|
-
:math:`(input_split_sizes[rank], *)`, where rank is the local rank id of the device.
|
|
1361
|
-
input(Tensor): The input tensor to be reduced and scattered, Expected shape :math:`(N, *)`, where `*`
|
|
1362
|
-
means any number of additional dimensions. N must equal the sum of `input_split_sizes` across ranks.
|
|
1363
|
-
input_split_sizes (list[int], optional): List specifying how to split the first dimension of input tensor.
|
|
1364
|
-
If ``None``, splits evenly according to group size. Default: ``None``.
|
|
1365
|
-
op (str, optional): Specifies an operation used for element-wise reductions,
|
|
1366
|
-
One of ReduceOp: 'SUM', 'MIN', 'MAX'. Default: ``ReduceOp.SUM``.
|
|
1367
|
-
group (str, optional): The communication group to work on. If ``None``, which means ``"hccl_world_group"`` in
|
|
1368
|
-
Ascend. Default: ``None``.
|
|
1369
|
-
async_op (bool, optional): Whether this operator should be an async operator. Default: ``False``.
|
|
1370
|
-
|
|
1371
|
-
Returns:
|
|
1372
|
-
CommHandle, CommHandle is an async work handle, if `async_op` is set to True.
|
|
1373
|
-
CommHandle will be None, when `async_op` is False.
|
|
1374
|
-
|
|
1375
|
-
Raises:
|
|
1376
|
-
ValueError: If the shape of `output` does not match the constraints of `input_split_sizes`.
|
|
1377
|
-
RuntimeError: If device target is invalid, or backend is invalid, or distributed initialization fails.
|
|
1378
|
-
|
|
1379
|
-
Supported Platforms:
|
|
1380
|
-
``Ascend``
|
|
1381
|
-
|
|
1382
|
-
Examples:
|
|
1383
|
-
.. note::
|
|
1384
|
-
Before running the following examples, you need to configure the communication environment variables.
|
|
1385
|
-
|
|
1386
|
-
For Ascend devices, it is recommended to use the msrun startup method
|
|
1387
|
-
without any third-party or configuration file dependencies.
|
|
1388
|
-
Please see the `msrun start up
|
|
1389
|
-
<https://www.mindspore.cn/tutorials/en/master/parallel/msrun_launcher.html>`_
|
|
1390
|
-
for more details.
|
|
1391
|
-
|
|
1392
|
-
This example should be run with 2 devices.
|
|
1393
|
-
|
|
1394
|
-
>>> import mindspore as ms
|
|
1395
|
-
>>> from mindspore import Tensor
|
|
1396
|
-
>>> from mindspore.mint.distributed import init_process_group, get_rank
|
|
1397
|
-
>>> from mindspore.mint.distributed import reduce_scatter_tensor_uneven
|
|
1398
|
-
>>> import numpy as np
|
|
1399
|
-
>>>
|
|
1400
|
-
>>> ms.set_device(device_target="Ascend")
|
|
1401
|
-
>>> init_process_group()
|
|
1402
|
-
>>> input_tensor = Tensor(np.ones([5, 8]).astype(np.float32))
|
|
1403
|
-
>>> if get_rank() == 0:
|
|
1404
|
-
>>> output_tensor = Tensor(np.ones([2, 8]).astype(np.float32))
|
|
1405
|
-
>>> else:
|
|
1406
|
-
>>> output_tensor = Tensor(np.ones([3, 8]).astype(np.float32))
|
|
1407
|
-
>>> input_split_sizes = [2, 3]
|
|
1408
|
-
>>> output = reduce_scatter_tensor_uneven(output_tensor, input_tensor, input_split_sizes)
|
|
1409
|
-
>>> print(output_tensor)
|
|
1410
|
-
rank 0:
|
|
1411
|
-
[[2. 2. 2. 2. 2. 2. 2. 2.]
|
|
1412
|
-
[2. 2. 2. 2. 2. 2. 2. 2.]]
|
|
1413
|
-
rank 1:
|
|
1414
|
-
[[2. 2. 2. 2. 2. 2. 2. 2.]
|
|
1415
|
-
[2. 2. 2. 2. 2. 2. 2. 2.]
|
|
1416
|
-
[2. 2. 2. 2. 2. 2. 2. 2.]]
|
|
1417
|
-
"""
|
|
1418
|
-
if not isinstance(op, str):
|
|
1419
|
-
raise TypeError("For reduce_scatter_tensor_uneven, the input op type must be str")
|
|
1420
|
-
if op not in ("sum", "min", "max"):
|
|
1421
|
-
raise TypeError(
|
|
1422
|
-
"For reduce_scatter_tensor_uneven, the input op value must be one of sum, prod, min, max"
|
|
1423
|
-
)
|
|
1424
|
-
if group is None:
|
|
1425
|
-
group = GlobalComm.WORLD_COMM_GROUP
|
|
1426
|
-
if not isinstance(group, str):
|
|
1427
|
-
raise TypeError(
|
|
1428
|
-
"The argument 'group' must be type of string, "
|
|
1429
|
-
"but got 'group' type : {}.".format(type(group))
|
|
1430
|
-
)
|
|
1431
|
-
if not isinstance(async_op, bool):
|
|
1432
|
-
raise TypeError(
|
|
1433
|
-
f"The argument 'async_op' must be a bool, but got {type(async_op)}."
|
|
1434
|
-
)
|
|
1435
|
-
input_split_sizes = [] if input_split_sizes is None else input_split_sizes
|
|
1436
|
-
rank_size = get_cache_group_size(group)
|
|
1437
|
-
result = dist_comm_reduce_scatter_tensor_uneven_op(
|
|
1438
|
-
output, input, input_split_sizes, rank_size, op, group
|
|
1439
|
-
)
|
|
1440
|
-
_, handle = _deal_comm_outputs(result, async_op)
|
|
1441
|
-
return handle
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
1246
|
def reduce(tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
|
|
1445
1247
|
"""
|
|
1446
1248
|
Reduces tensors across the processes in the specified communication group, sends the result
|
|
@@ -2584,7 +2386,10 @@ def all_to_all_single(output,
|
|
|
2584
2386
|
|
|
2585
2387
|
def _check_tensor_list(tensor_list, tensor, group_size):
|
|
2586
2388
|
"""check all elements in tensor_list are type of Tensor or tuple or list"""
|
|
2587
|
-
|
|
2389
|
+
if not tensor_list or len(tensor_list) != group_size:
|
|
2390
|
+
raise TypeError(
|
|
2391
|
+
f"The argument list tensor len must be equal to group rank size, but got {len(tensor_list)}."
|
|
2392
|
+
)
|
|
2588
2393
|
if tensor.dtype != tensor_list[0].dtype:
|
|
2589
2394
|
raise TypeError(
|
|
2590
2395
|
f"The argument list tensor type must be equal to tensor type, but got {tensor_list[0].dtype}."
|
|
@@ -2595,17 +2400,13 @@ def _check_tensor_list(tensor_list, tensor, group_size):
|
|
|
2595
2400
|
)
|
|
2596
2401
|
|
|
2597
2402
|
|
|
2598
|
-
def _check_group_tensor_list(tensor_list, group_size):
|
|
2599
|
-
if not tensor_list or len(tensor_list) != group_size:
|
|
2600
|
-
raise TypeError(
|
|
2601
|
-
f"The argument list tensor len must be equal to group rank size, but got {len(tensor_list)}."
|
|
2602
|
-
)
|
|
2603
|
-
|
|
2604
|
-
|
|
2605
2403
|
def all_gather(tensor_list, tensor, group=None, async_op=False):
|
|
2606
2404
|
"""
|
|
2607
2405
|
Gathers tensors from the specified communication group and returns the tensor list which is all gathered.
|
|
2608
2406
|
|
|
2407
|
+
Note:
|
|
2408
|
+
The tensors must have the same shape and format in all processes of the collection.
|
|
2409
|
+
|
|
2609
2410
|
Args:
|
|
2610
2411
|
tensor_list (list[Tensor]): Output list.
|
|
2611
2412
|
tensor (Tensor): The input tensor to be all gathered into tensor.
|
|
@@ -2660,7 +2461,7 @@ def all_gather(tensor_list, tensor, group=None, async_op=False):
|
|
|
2660
2461
|
|
|
2661
2462
|
"""
|
|
2662
2463
|
_check_all_tensors(tensor_list)
|
|
2663
|
-
|
|
2464
|
+
_check_all_tensor_same_dtype_and_shape(tensor_list)
|
|
2664
2465
|
if not isinstance(tensor, (Tensor, Tensor_)):
|
|
2665
2466
|
raise TypeError("For all_gather_into_tensor, the input tensor must be tensor")
|
|
2666
2467
|
if group is None:
|
|
@@ -2675,10 +2476,7 @@ def all_gather(tensor_list, tensor, group=None, async_op=False):
|
|
|
2675
2476
|
f"The argument 'async_op' must be a bool, but got {type(async_op)}."
|
|
2676
2477
|
)
|
|
2677
2478
|
group_size = get_cache_group_size(group)
|
|
2678
|
-
|
|
2679
|
-
rank_id = get_group_rank_from_world_rank(get_rank(), group)
|
|
2680
|
-
_check_output_shape(tensor, tensor_list[rank_id].shape, "all_gather")
|
|
2681
|
-
_check_output_dtype(tensor, tensor_list[0].dtype, "all_gather")
|
|
2479
|
+
_check_tensor_list(tensor_list, tensor, group_size)
|
|
2682
2480
|
result = dist_comm_all_gather_op(tensor_list, tensor, group_size, group)
|
|
2683
2481
|
_, handle = _deal_comm_outputs(result, async_op)
|
|
2684
2482
|
return handle
|
|
@@ -2689,6 +2487,9 @@ def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=None, async_op=Fal
|
|
|
2689
2487
|
Reduces and scatters tensors from the specified communication group and
|
|
2690
2488
|
returns the tensor which is reduced and scattered.
|
|
2691
2489
|
|
|
2490
|
+
Note:
|
|
2491
|
+
The tensors must have the same shape and format in all processes of the collection.
|
|
2492
|
+
|
|
2692
2493
|
Args:
|
|
2693
2494
|
output (Tensor): the output tensor.
|
|
2694
2495
|
input_list (list[Tensor]): List of tensors to reduce and scatter.
|
|
@@ -2742,7 +2543,7 @@ def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=None, async_op=Fal
|
|
|
2742
2543
|
"""
|
|
2743
2544
|
|
|
2744
2545
|
_check_all_tensors(input_list)
|
|
2745
|
-
|
|
2546
|
+
_check_all_tensor_same_dtype_and_shape(input_list)
|
|
2746
2547
|
if not isinstance(output, (Tensor, Tensor_)):
|
|
2747
2548
|
raise TypeError("For reduce_scatter, the output tensor must be tensor")
|
|
2748
2549
|
if group is None:
|
|
@@ -2763,11 +2564,7 @@ def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=None, async_op=Fal
|
|
|
2763
2564
|
"For reduce_scatter, the input op value must be one of sum, prod, min, max"
|
|
2764
2565
|
)
|
|
2765
2566
|
rank_size = get_cache_group_size(group)
|
|
2766
|
-
|
|
2767
|
-
|
|
2768
|
-
rank_id = get_group_rank_from_world_rank(get_rank(), group)
|
|
2769
|
-
_check_output_shape(output, input_list[rank_id].shape, "reduce_scatter")
|
|
2770
|
-
_check_output_dtype(output, input_list[0].dtype, "reduce_scatter")
|
|
2567
|
+
_check_tensor_list(input_list, output, rank_size)
|
|
2771
2568
|
result = dist_comm_reduce_scatter_op(output, input_list, rank_size, op, group)
|
|
2772
2569
|
_, handle = _deal_comm_outputs(result, async_op)
|
|
2773
2570
|
return handle
|
|
@@ -23,7 +23,7 @@ from mindspore.communication.management import get_rank, get_group_size, GlobalC
|
|
|
23
23
|
from mindspore.ops.auto_generate.gen_ops_prim import BatchNormReduceGrad
|
|
24
24
|
from mindspore.ops.auto_generate.gen_ops_prim import BatchNormElemtGrad
|
|
25
25
|
from mindspore.ops.primitive import Primitive, prim_arg_register, PrimitiveWithInfer, prim_attr_register
|
|
26
|
-
from mindspore.ops.operations.comm_ops import ReduceOp, check_collective_target_dtype
|
|
26
|
+
from mindspore.ops.operations.comm_ops import ReduceOp, check_hcom_group_valid, check_collective_target_dtype
|
|
27
27
|
|
|
28
28
|
batch_norm_reduce_grad = BatchNormReduceGrad()
|
|
29
29
|
batch_norm_elemt_grad = BatchNormElemtGrad()
|
|
@@ -71,6 +71,7 @@ class AllReduce(Primitive):
|
|
|
71
71
|
if not isinstance(self.group, str):
|
|
72
72
|
raise TypeError(f"For '{self.name}', the 'group' must be str, "
|
|
73
73
|
f"but got {type(self.group).__name__}.")
|
|
74
|
+
check_hcom_group_valid(self.group, prim_name=self.name)
|
|
74
75
|
self.op = op
|
|
75
76
|
self.add_prim_attr('group', self.group)
|
|
76
77
|
self.add_prim_attr('fusion', 0)
|
mindspore/mint/nn/layer/conv.py
CHANGED
|
@@ -200,6 +200,8 @@ class Conv1d(_Conv):
|
|
|
200
200
|
possible length. Extra sequence that could not complete a full stride will
|
|
201
201
|
be discarded.
|
|
202
202
|
|
|
203
|
+
padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
204
|
+
``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
|
|
203
205
|
dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation
|
|
204
206
|
rate to use for dilated convolution.
|
|
205
207
|
It can be a single int or a tuple/list of 1 integer.
|
|
@@ -217,8 +219,6 @@ class Conv1d(_Conv):
|
|
|
217
219
|
- :math:`(\text{weight[1]} = C_{in} / \text{groups})`
|
|
218
220
|
|
|
219
221
|
bias (bool, optional): Whether the Conv1d layer has a bias parameter. Default: ``True`` .
|
|
220
|
-
padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
221
|
-
``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
|
|
222
222
|
dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
|
|
223
223
|
|
|
224
224
|
Variables:
|
|
@@ -393,6 +393,8 @@ class Conv2d(_Conv):
|
|
|
393
393
|
possible height and width. Extra pixels that could not complete a full stride will
|
|
394
394
|
be discarded.
|
|
395
395
|
|
|
396
|
+
padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
397
|
+
``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
|
|
396
398
|
dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use
|
|
397
399
|
for dilated convolution.
|
|
398
400
|
It can be a single int or a tuple/list of 2 integers. A single int means the dilation size is the same
|
|
@@ -413,8 +415,6 @@ class Conv2d(_Conv):
|
|
|
413
415
|
- :math:`(\text{weight[1]} = C_{in} / \text{groups})`
|
|
414
416
|
|
|
415
417
|
bias (bool, optional): Whether the Conv2d layer has a bias parameter. Default: ``True`` .
|
|
416
|
-
padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
417
|
-
``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
|
|
418
418
|
dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
|
|
419
419
|
|
|
420
420
|
Variables:
|
|
@@ -600,6 +600,8 @@ class Conv3d(_Conv):
|
|
|
600
600
|
possible height and width. Extra pixels that could not complete a full stride will
|
|
601
601
|
be discarded.
|
|
602
602
|
|
|
603
|
+
padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
604
|
+
``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
|
|
603
605
|
dilation (Union[int, tuple[int], list[int]], optional): Controlling the space between the kernel points.
|
|
604
606
|
Default: ``1`` .
|
|
605
607
|
groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
|
|
@@ -613,8 +615,6 @@ class Conv3d(_Conv):
|
|
|
613
615
|
- :math:`(\text{weight[1]} = C_{in} / \text{groups})`
|
|
614
616
|
|
|
615
617
|
bias (bool, optional): Whether the Conv3d layer has a bias parameter. Default: ``True`` .
|
|
616
|
-
padding_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
617
|
-
``"zeros"`` , ``"reflect"`` or ``"replicate"`` . Default: ``"zeros"`` .
|
|
618
618
|
dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Default: ``None``, using ``mstype.float32``.
|
|
619
619
|
|
|
620
620
|
Variables:
|
|
@@ -218,7 +218,7 @@ class BatchNorm1d(_BatchNorm):
|
|
|
218
218
|
dim = len(shape)
|
|
219
219
|
if dim != 2 and dim != 3:
|
|
220
220
|
raise ValueError(
|
|
221
|
-
"expected 2D or 3D input
|
|
221
|
+
"expected 2D or 3D input (got {}D input)".format(dim)
|
|
222
222
|
)
|
|
223
223
|
|
|
224
224
|
|
|
@@ -288,7 +288,7 @@ class BatchNorm2d(_BatchNorm):
|
|
|
288
288
|
dim = len(shape)
|
|
289
289
|
if dim != 4:
|
|
290
290
|
raise ValueError(
|
|
291
|
-
"expected 4D input
|
|
291
|
+
"expected 4D input (got {}D input)".format(dim)
|
|
292
292
|
)
|
|
293
293
|
|
|
294
294
|
|
|
@@ -356,7 +356,7 @@ class BatchNorm3d(_BatchNorm):
|
|
|
356
356
|
dim = len(shape)
|
|
357
357
|
if dim != 5:
|
|
358
358
|
raise ValueError(
|
|
359
|
-
"expected 5D input
|
|
359
|
+
"expected 5D input (got {}D input)".format(dim)
|
|
360
360
|
)
|
|
361
361
|
|
|
362
362
|
|