mindspore 2.3.0rc1__cp38-cp38-manylinux1_x86_64.whl → 2.3.0rc2__cp38-cp38-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
- mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +20 -0
- mindspore/_extends/parse/parser.py +1 -1
- mindspore/_extends/parse/standard_method.py +6 -5
- mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -5
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -2
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_stub_tensor.py +1 -0
- mindspore/common/api.py +56 -4
- mindspore/common/dtype.py +5 -3
- mindspore/common/dump.py +2 -2
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +17 -6
- mindspore/common/parameter.py +7 -2
- mindspore/common/recompute.py +247 -0
- mindspore/common/sparse_tensor.py +2 -2
- mindspore/common/symbol.py +1 -1
- mindspore/common/tensor.py +74 -36
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/management.py +30 -30
- mindspore/context.py +28 -15
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +2 -2
- mindspore/dataset/audio/transforms.py +51 -51
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +3 -3
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +3 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +3 -3
- mindspore/dataset/engine/datasets_vision.py +68 -68
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +26 -26
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/transforms.py +92 -92
- mindspore/dataset/vision/utils.py +1 -1
- mindspore/experimental/optim/adadelta.py +2 -2
- mindspore/experimental/optim/adagrad.py +2 -2
- mindspore/experimental/optim/adam.py +2 -2
- mindspore/experimental/optim/adamax.py +2 -2
- mindspore/experimental/optim/adamw.py +2 -2
- mindspore/experimental/optim/asgd.py +2 -2
- mindspore/experimental/optim/lr_scheduler.py +24 -20
- mindspore/experimental/optim/nadam.py +2 -2
- mindspore/experimental/optim/optimizer.py +1 -1
- mindspore/experimental/optim/radam.py +2 -2
- mindspore/experimental/optim/rmsprop.py +2 -2
- mindspore/experimental/optim/rprop.py +2 -2
- mindspore/experimental/optim/sgd.py +2 -2
- mindspore/hal/stream.py +2 -0
- mindspore/include/mindapi/base/types.h +5 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +2 -2
- mindspore/mint/__init__.py +457 -0
- mindspore/mint/nn/__init__.py +430 -0
- mindspore/mint/nn/functional.py +424 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +186 -0
- mindspore/multiprocessing/__init__.py +4 -0
- mindspore/nn/__init__.py +3 -0
- mindspore/nn/cell.py +51 -47
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/nn/extend/layer/__init__.py +27 -0
- mindspore/nn/extend/layer/normalization.py +107 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/basic.py +109 -1
- mindspore/nn/layer/container.py +2 -2
- mindspore/nn/layer/conv.py +6 -6
- mindspore/nn/layer/embedding.py +1 -1
- mindspore/nn/layer/normalization.py +21 -43
- mindspore/nn/layer/padding.py +4 -0
- mindspore/nn/optim/ada_grad.py +2 -2
- mindspore/nn/optim/adadelta.py +1 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +7 -7
- mindspore/nn/optim/adamax.py +2 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +1 -1
- mindspore/nn/optim/lamb.py +3 -3
- mindspore/nn/optim/lars.py +1 -1
- mindspore/nn/optim/lazyadam.py +2 -2
- mindspore/nn/optim/momentum.py +2 -2
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +2 -2
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/sgd.py +2 -2
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +9 -9
- mindspore/nn/wrap/grad_reducer.py +5 -5
- mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
- mindspore/ops/_vmap/vmap_math_ops.py +27 -8
- mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
- mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
- mindspore/ops/auto_generate/gen_extend_func.py +274 -0
- mindspore/ops/auto_generate/gen_ops_def.py +889 -22
- mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
- mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
- mindspore/ops/extend/__init__.py +9 -1
- mindspore/ops/extend/array_func.py +134 -27
- mindspore/ops/extend/math_func.py +3 -3
- mindspore/ops/extend/nn_func.py +363 -2
- mindspore/ops/function/__init__.py +19 -2
- mindspore/ops/function/array_func.py +463 -439
- mindspore/ops/function/clip_func.py +7 -18
- mindspore/ops/function/grad/grad_func.py +5 -5
- mindspore/ops/function/linalg_func.py +4 -4
- mindspore/ops/function/math_func.py +260 -243
- mindspore/ops/function/nn_func.py +825 -62
- mindspore/ops/function/random_func.py +73 -4
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/function/vmap_func.py +1 -1
- mindspore/ops/functional.py +2 -2
- mindspore/ops/op_info_register.py +1 -31
- mindspore/ops/operations/__init__.py +2 -3
- mindspore/ops/operations/_grad_ops.py +2 -107
- mindspore/ops/operations/_inner_ops.py +5 -5
- mindspore/ops/operations/_sequence_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +11 -233
- mindspore/ops/operations/comm_ops.py +32 -32
- mindspore/ops/operations/custom_ops.py +7 -89
- mindspore/ops/operations/manually_defined/ops_def.py +329 -4
- mindspore/ops/operations/math_ops.py +13 -163
- mindspore/ops/operations/nn_ops.py +9 -316
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +3 -3
- mindspore/ops/primitive.py +2 -2
- mindspore/ops_generate/arg_dtype_cast.py +12 -3
- mindspore/ops_generate/arg_handler.py +24 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
- mindspore/ops_generate/gen_pyboost_func.py +13 -6
- mindspore/ops_generate/pyboost_utils.py +2 -17
- mindspore/parallel/__init__.py +3 -2
- mindspore/parallel/_auto_parallel_context.py +106 -1
- mindspore/parallel/_parallel_serialization.py +34 -2
- mindspore/parallel/_utils.py +16 -0
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/checkpoint_transform.py +249 -77
- mindspore/parallel/cluster/process_entity/_api.py +1 -1
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +1 -1
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
- mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
- mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
- mindspore/profiler/parser/ascend_op_generator.py +26 -9
- mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
- mindspore/profiler/parser/profiler_info.py +11 -1
- mindspore/profiler/profiling.py +13 -5
- mindspore/rewrite/api/node.py +12 -12
- mindspore/rewrite/api/symbol_tree.py +11 -11
- mindspore/run_check/_check_version.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +2 -2
- mindspore/train/amp.py +4 -4
- mindspore/train/anf_ir_pb2.py +8 -2
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +2 -2
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +4 -4
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +2 -2
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/dataset_helper.py +8 -3
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/mind_ir_pb2.py +22 -17
- mindspore/train/model.py +15 -15
- mindspore/train/serialization.py +18 -18
- mindspore/train/summary/summary_record.py +7 -7
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +223 -209
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""normalization"""
|
|
16
|
+
from __future__ import absolute_import
|
|
17
|
+
from __future__ import division
|
|
18
|
+
|
|
19
|
+
from mindspore.ops import functional as F
|
|
20
|
+
from mindspore.common.parameter import Parameter
|
|
21
|
+
from mindspore.common.initializer import initializer
|
|
22
|
+
from mindspore.common import dtype as mstype
|
|
23
|
+
from mindspore.nn.cell import Cell
|
|
24
|
+
|
|
25
|
+
__all__ = ['LayerNorm']
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class LayerNorm(Cell):
|
|
29
|
+
r"""
|
|
30
|
+
Applies Layer Normalization over a mini-batch of inputs.
|
|
31
|
+
|
|
32
|
+
Layer Normalization is widely used in recurrent neural networks. It applies
|
|
33
|
+
normalization on a mini-batch of inputs for each single training case as described
|
|
34
|
+
in the paper `Layer Normalization <https://arxiv.org/pdf/1607.06450.pdf>`_. Unlike Batch
|
|
35
|
+
Normalization, Layer Normalization performs exactly the same computation at training and
|
|
36
|
+
testing time. It is applied across all channels and pixel but only one batch size.
|
|
37
|
+
:math:`\gamma` and :math:`\beta` are trainable scale and shift.
|
|
38
|
+
It can be described using the following formula:
|
|
39
|
+
|
|
40
|
+
.. math::
|
|
41
|
+
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
normalized_shape (Union(tuple[int], list[int])): The normalized shape of `x` for LayerNorm
|
|
45
|
+
gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the :math:`\gamma` weight.
|
|
46
|
+
The values of str refer to the function `initializer` including ``'zeros'`` , ``'ones'`` ,
|
|
47
|
+
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'ones'`` .
|
|
48
|
+
beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the :math:`\beta` weight.
|
|
49
|
+
The values of str refer to the function `initializer` including ``'zeros'`` , ``'ones'`` ,
|
|
50
|
+
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'zeros'`` .
|
|
51
|
+
epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-5`` .
|
|
52
|
+
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
53
|
+
|
|
54
|
+
Inputs:
|
|
55
|
+
- **x** (Tensor) - The shape is :math:`(N, *)`, where :math:`*` means, any number of additional dimensions.
|
|
56
|
+
|
|
57
|
+
Outputs:
|
|
58
|
+
Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `x`.
|
|
59
|
+
|
|
60
|
+
Raises:
|
|
61
|
+
TypeError: If `epsilon` is not a float.
|
|
62
|
+
|
|
63
|
+
Supported Platforms:
|
|
64
|
+
``Ascend``
|
|
65
|
+
|
|
66
|
+
Examples:
|
|
67
|
+
>>> import mindspore as ms
|
|
68
|
+
>>> import numpy as np
|
|
69
|
+
>>> x = ms.Tensor(np.ones([20, 5, 10, 10]), ms.float32)
|
|
70
|
+
>>> shape1 = x.shape[1:]
|
|
71
|
+
>>> m = ms.nn.extend.LayerNorm(shape1)
|
|
72
|
+
>>> output = m(x).shape
|
|
73
|
+
>>> print(output)
|
|
74
|
+
(20, 5, 10, 10)
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
def __init__(self,
|
|
78
|
+
normalized_shape,
|
|
79
|
+
gamma_init='ones',
|
|
80
|
+
beta_init='zeros',
|
|
81
|
+
epsilon=1e-5,
|
|
82
|
+
dtype=mstype.float32
|
|
83
|
+
):
|
|
84
|
+
"""Initialize LayerNorm."""
|
|
85
|
+
super(LayerNorm, self).__init__()
|
|
86
|
+
if not isinstance(normalized_shape, (tuple, list)):
|
|
87
|
+
raise TypeError(f"For '{self.cls_name}', the type of 'normalized_shape' must be tuple[int] or list[int], "
|
|
88
|
+
f"but got {normalized_shape} and the type is {type(normalized_shape)}.")
|
|
89
|
+
if not normalized_shape:
|
|
90
|
+
raise ValueError(
|
|
91
|
+
f"Expected normalized_shape to be at least 1-dimensional, i.e., containing at "
|
|
92
|
+
f"least one element, but got normalized_shape = {normalized_shape}"
|
|
93
|
+
)
|
|
94
|
+
self.normalized_shape = normalized_shape
|
|
95
|
+
self.epsilon = epsilon
|
|
96
|
+
self.gamma = Parameter(initializer(
|
|
97
|
+
gamma_init, normalized_shape, dtype=dtype), name="gamma")
|
|
98
|
+
self.beta = Parameter(initializer(
|
|
99
|
+
beta_init, normalized_shape, dtype=dtype), name="beta")
|
|
100
|
+
|
|
101
|
+
def construct(self, input_x):
|
|
102
|
+
y = F.layer_norm(input_x, self.normalized_shape, self.gamma.astype(input_x.dtype),
|
|
103
|
+
self.beta.astype(input_x.dtype), self.epsilon)
|
|
104
|
+
return y
|
|
105
|
+
|
|
106
|
+
def extend_repr(self):
|
|
107
|
+
return 'normalized_shape={}, gamma{}, beta={}'.format(self.normalized_shape, self.gamma, self.beta)
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
#Copyright 2020-2022 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""pooling"""
|
|
16
|
+
from __future__ import absolute_import
|
|
17
|
+
|
|
18
|
+
from mindspore.ops.auto_generate.gen_ops_prim import MaxPoolWithIndices, MaxPoolWithMask
|
|
19
|
+
from mindspore.nn.cell import Cell
|
|
20
|
+
|
|
21
|
+
__all__ = ['MaxPool2d']
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MaxPool2d(Cell):
|
|
25
|
+
r"""
|
|
26
|
+
Applies a 2D max pooling over an input Tensor which can be regarded as a composition of 2D planes.
|
|
27
|
+
|
|
28
|
+
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool2d outputs
|
|
29
|
+
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
|
|
30
|
+
:math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows.
|
|
31
|
+
|
|
32
|
+
.. math::
|
|
33
|
+
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
|
|
34
|
+
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
|
|
35
|
+
|
|
36
|
+
.. warning::
|
|
37
|
+
Only support on Atlas training series.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the max value,
|
|
41
|
+
is an int number or a single element tuple that represents height and width are both kernel_size,
|
|
42
|
+
or a tuple of two int numbers that represent height and width respectively.
|
|
43
|
+
Default: ``1`` .
|
|
44
|
+
stride (Union[int, tuple[int], None]): The distance of kernel moving, an int number or a single element tuple
|
|
45
|
+
that represents the height and width of movement are both stride, or a tuple of two int numbers that
|
|
46
|
+
represent height and width of movement respectively.
|
|
47
|
+
Default: ``None`` , which indicates the moving step is `kernel_size` .
|
|
48
|
+
padding (Union(int, tuple[int], list[int])): Specifies the padding value of the pooling operation.
|
|
49
|
+
Default: ``0`` . `padding` can only be an integer or a tuple/list containing one or two integers. If
|
|
50
|
+
`padding` is an integer or a tuple/list containing one integer, it will be padded `padding` times in the
|
|
51
|
+
four directions of the input. If `padding` is a tuple/list containing two integers, it will be padded
|
|
52
|
+
`padding[0]` times in the up-down direction of the input and `padding[1]` times in the left-right direction
|
|
53
|
+
of the input.
|
|
54
|
+
dilation (Union(int, tuple[int])): The spacing between the elements of the kernel in convolution,
|
|
55
|
+
used to increase the receptive field of the pooling operation. If it is a tuple, it must contain one or two
|
|
56
|
+
integers. Default: ``1`` .
|
|
57
|
+
return_indices (bool): If ``True`` , the function will return both the result of max pooling and the indices of
|
|
58
|
+
the max elements. Default: ``False`` .
|
|
59
|
+
ceil_mode (bool): If ``True`` , use ceil to compute the output shape instead of floor. Default: ``False`` .
|
|
60
|
+
|
|
61
|
+
Inputs:
|
|
62
|
+
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
|
|
63
|
+
|
|
64
|
+
Outputs:
|
|
65
|
+
If `return_indices` is ``False`` , return a Tensor `output`, else return a tuple (`output`, `argmax`).
|
|
66
|
+
|
|
67
|
+
- **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`. It has the
|
|
68
|
+
same data type as `input`.
|
|
69
|
+
- **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int32.
|
|
70
|
+
|
|
71
|
+
.. math::
|
|
72
|
+
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
|
|
73
|
+
\times (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
|
|
74
|
+
|
|
75
|
+
.. math::
|
|
76
|
+
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
|
|
77
|
+
\times (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
|
|
78
|
+
|
|
79
|
+
Raises:
|
|
80
|
+
TypeError: If `input` is not a Tensor.
|
|
81
|
+
ValueError: If length of shape of `input` is not equal to 4.
|
|
82
|
+
TypeError: If `kernel_size` , `stride` , `padding` or `dilation` is not int or tuple.
|
|
83
|
+
ValueError: If `kernel_size`, `stride` or `dilation` is less than 1.
|
|
84
|
+
ValueError: If `dilation` is not all 1.
|
|
85
|
+
ValueError: If `padding` is less than 0.
|
|
86
|
+
ValueError: If `padding` is more than half of `kernel_size`.
|
|
87
|
+
TypeError: If `ceil_mode` is not bool.
|
|
88
|
+
|
|
89
|
+
Supported Platforms:
|
|
90
|
+
``Ascend``
|
|
91
|
+
|
|
92
|
+
Examples:
|
|
93
|
+
>>> import mindspore as ms
|
|
94
|
+
>>> import numpy as np
|
|
95
|
+
>>> pool = ms.nn.extend.MaxPool2d(kernel_size=3, stride=1)
|
|
96
|
+
>>> input = ms.Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), ms.float32)
|
|
97
|
+
>>> output = pool(input)
|
|
98
|
+
>>> print(output.shape)
|
|
99
|
+
(1, 2, 2, 2)
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
def __init__(self, kernel_size=1, stride=None, padding=0, dilation=1, return_indices=False,
|
|
103
|
+
ceil_mode=False):
|
|
104
|
+
"""Initialize MaxPool2d."""
|
|
105
|
+
super(MaxPool2d, self).__init__()
|
|
106
|
+
self.return_indices = return_indices
|
|
107
|
+
strides = stride if (stride is not None) else kernel_size
|
|
108
|
+
if return_indices:
|
|
109
|
+
self.max_pool_func_ = MaxPoolWithIndices(kernel_size, strides, padding, dilation, ceil_mode)
|
|
110
|
+
else:
|
|
111
|
+
self.max_pool_func_ = MaxPoolWithMask(kernel_size, strides, padding, dilation, ceil_mode)
|
|
112
|
+
|
|
113
|
+
def construct(self, input):
|
|
114
|
+
out, indices = self.max_pool_func_(input)
|
|
115
|
+
if self.return_indices:
|
|
116
|
+
return out, indices
|
|
117
|
+
return out
|
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
# Copyright 2024 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""Generator"""
|
|
16
|
+
import os
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
|
|
20
|
+
from mindspore import context
|
|
21
|
+
from mindspore.common.parameter import Parameter
|
|
22
|
+
from mindspore.nn.cell import Cell
|
|
23
|
+
from mindspore.ops.operations import Assign, AssignAdd, Depend
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Generator(Cell):
|
|
27
|
+
"""
|
|
28
|
+
A generator that manages the state of random numbers and provides seed and offset for random functions.
|
|
29
|
+
When the seed and offset are fixed, the random function generates the same random sequence.
|
|
30
|
+
|
|
31
|
+
Inputs:
|
|
32
|
+
- **step** (int) - Set the step size for offset update.
|
|
33
|
+
|
|
34
|
+
Outputs:
|
|
35
|
+
Tuple consisting of the seed and offset of generator.
|
|
36
|
+
|
|
37
|
+
Supported Platforms:
|
|
38
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
39
|
+
|
|
40
|
+
Examples:
|
|
41
|
+
>>> import mindspore as ms
|
|
42
|
+
>>> from mindspore.nn import Generator
|
|
43
|
+
>>> import numpy as np
|
|
44
|
+
>>> np.random.seed(10)
|
|
45
|
+
>>> ms.set_context(mode=1)
|
|
46
|
+
>>> generator = Generator()
|
|
47
|
+
>>> print(generator.get_state())
|
|
48
|
+
(Tensor(shape=[], dtype=Int32, value= 0), Tensor(shape=[], dtype=Int32, value= 0))
|
|
49
|
+
>>> print(generator(12))
|
|
50
|
+
(0, 0)
|
|
51
|
+
>>> print(generator.get_state())
|
|
52
|
+
(Tensor(shape=[], dtype=Int32, value= 0), Tensor(shape=[], dtype=Int32, value= 12))
|
|
53
|
+
>>> generator.manual_seed(20)
|
|
54
|
+
>>> print(generator.get_state())
|
|
55
|
+
(Tensor(shape=[], dtype=Int32, value= 20), Tensor(shape=[], dtype=Int32, value= 0))
|
|
56
|
+
>>> print(generator.seed())
|
|
57
|
+
1165313289
|
|
58
|
+
>>> print(generator.initial_seed())
|
|
59
|
+
1165313289
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
def __init__(self):
|
|
63
|
+
super(Generator, self).__init__()
|
|
64
|
+
self._assign = Assign().set_device("CPU")
|
|
65
|
+
self._assign_add = AssignAdd().set_device("CPU")
|
|
66
|
+
self._depend = Depend()
|
|
67
|
+
self._seed = Parameter(0, name="seed", requires_grad=False)
|
|
68
|
+
self._offset = Parameter(0, name="offset", requires_grad=False)
|
|
69
|
+
self._seed_val = 0
|
|
70
|
+
self._offset_val = 0
|
|
71
|
+
|
|
72
|
+
def set_state(self, seed, offset=None): # pylint: disable=redefined-outer-name
|
|
73
|
+
"""
|
|
74
|
+
Sets the generator state.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
seed (int): Seed of the generator.
|
|
78
|
+
offset (int, optional): Offset of the generator, default: ``None`` , means ``0``.
|
|
79
|
+
"""
|
|
80
|
+
self._seed_val = int(seed)
|
|
81
|
+
self._assign(self._seed, self._seed_val)
|
|
82
|
+
if offset is None:
|
|
83
|
+
offset = 0
|
|
84
|
+
self._offset_val = int(offset)
|
|
85
|
+
self._assign(self._offset, self._offset_val)
|
|
86
|
+
|
|
87
|
+
def get_state(self):
|
|
88
|
+
"""
|
|
89
|
+
Get the generator state.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Tuple consisting of the seed and offset of generator.
|
|
93
|
+
"""
|
|
94
|
+
return self._seed.value(), self._offset.value()
|
|
95
|
+
|
|
96
|
+
def seed(self): # pylint: disable=redefined-outer-name
|
|
97
|
+
"""
|
|
98
|
+
Generate random seeds that can be used as seeds for generator.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Tensor, randomly generated seeds.
|
|
102
|
+
"""
|
|
103
|
+
seed_ = np.random.randint(np.iinfo(np.int32).min, np.iinfo(np.int32).max)
|
|
104
|
+
self.set_state(seed_)
|
|
105
|
+
return self._seed.value()
|
|
106
|
+
|
|
107
|
+
def manual_seed(self, seed): # pylint: disable=redefined-outer-name
|
|
108
|
+
"""
|
|
109
|
+
Sets the generator seed.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
seed (int): Sets the generator seed.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
The generator self.
|
|
116
|
+
"""
|
|
117
|
+
self.set_state(seed)
|
|
118
|
+
return self
|
|
119
|
+
|
|
120
|
+
def initial_seed(self):
|
|
121
|
+
"""
|
|
122
|
+
Return the initial seed of generator.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
The initial seed of generator.
|
|
126
|
+
"""
|
|
127
|
+
return self._seed.value()
|
|
128
|
+
|
|
129
|
+
def construct(self, step):
|
|
130
|
+
"""
|
|
131
|
+
Update the value of offset, and return the seed and the previous offset.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
step (int): Update offset by step.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Seed and offset before update.
|
|
138
|
+
"""
|
|
139
|
+
offset = self._offset.value()
|
|
140
|
+
step = self._depend(step, offset)
|
|
141
|
+
self._assign_add(self._offset, step)
|
|
142
|
+
return self._seed.value(), offset
|
|
143
|
+
|
|
144
|
+
def __call__(self, step):
|
|
145
|
+
if os.getenv("MS_JIT") != '0' and context.get_context("mode") == context.GRAPH_MODE:
|
|
146
|
+
return super().__call__(step)
|
|
147
|
+
|
|
148
|
+
offset_val = self._offset_val
|
|
149
|
+
self._offset_val += step
|
|
150
|
+
self._offset.set_data(self._offset_val)
|
|
151
|
+
return self._seed_val, offset_val
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
default_generator_ = None
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def _init_default_generator():
|
|
158
|
+
global default_generator_
|
|
159
|
+
default_generator_ = Generator()
|
|
160
|
+
default_generator_.seed()
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def default_generator():
|
|
164
|
+
"""
|
|
165
|
+
Return the default generator object.
|
|
166
|
+
|
|
167
|
+
When the user does not specify generator, the random operator invokes default generator to generate random numbers.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
The default generator.
|
|
171
|
+
|
|
172
|
+
Supported Platforms:
|
|
173
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
174
|
+
|
|
175
|
+
Examples:
|
|
176
|
+
>>> from mindspore.nn import default_generator
|
|
177
|
+
>>> default_gen = default_generator()
|
|
178
|
+
>>> print(type(default_gen))
|
|
179
|
+
<class 'mindspore.nn.generator.Generator'>
|
|
180
|
+
"""
|
|
181
|
+
if default_generator_ is None:
|
|
182
|
+
_init_default_generator()
|
|
183
|
+
return default_generator_
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def seed(): # pylint: disable=redefined-outer-name
|
|
187
|
+
"""
|
|
188
|
+
Generate random seeds that can be used as seeds for default generator.
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Randomly generated seeds.
|
|
192
|
+
|
|
193
|
+
Supported Platforms:
|
|
194
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
195
|
+
|
|
196
|
+
Examples:
|
|
197
|
+
>>> import numpy as np
|
|
198
|
+
>>> from mindspore.nn import seed
|
|
199
|
+
>>> np.random.seed(20)
|
|
200
|
+
>>> print(seed())
|
|
201
|
+
1663920602
|
|
202
|
+
"""
|
|
203
|
+
if default_generator_ is None:
|
|
204
|
+
_init_default_generator()
|
|
205
|
+
return default_generator_.seed()
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def manual_seed(seed): # pylint: disable=redefined-outer-name
|
|
209
|
+
"""
|
|
210
|
+
Sets the default generator seed.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
seed (int): Sets the default generator seed.
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
The default generator self.
|
|
217
|
+
|
|
218
|
+
Supported Platforms:
|
|
219
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
220
|
+
|
|
221
|
+
Examples:
|
|
222
|
+
>>> from mindspore.nn import manual_seed, initial_seed
|
|
223
|
+
>>> manual_seed(13)
|
|
224
|
+
>>> print(initial_seed())
|
|
225
|
+
13
|
|
226
|
+
"""
|
|
227
|
+
if default_generator_ is None:
|
|
228
|
+
_init_default_generator()
|
|
229
|
+
default_generator_.manual_seed(seed)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def initial_seed():
|
|
233
|
+
"""
|
|
234
|
+
Return the initial seed of default generator.
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
The initial seed of default generator.
|
|
238
|
+
|
|
239
|
+
Supported Platforms:
|
|
240
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
241
|
+
|
|
242
|
+
Examples:
|
|
243
|
+
>>> from mindspore.nn import manual_seed, initial_seed
|
|
244
|
+
>>> manual_seed(14)
|
|
245
|
+
>>> print(initial_seed())
|
|
246
|
+
14
|
|
247
|
+
"""
|
|
248
|
+
if default_generator_ is None:
|
|
249
|
+
_init_default_generator()
|
|
250
|
+
return default_generator_.initial_seed()
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def get_rng_state():
|
|
254
|
+
"""
|
|
255
|
+
Get the default generator state.
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
Tuple consisting of the seed and offset of default generator.
|
|
259
|
+
|
|
260
|
+
Supported Platforms:
|
|
261
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
262
|
+
|
|
263
|
+
Examples:
|
|
264
|
+
>>> import numpy as np
|
|
265
|
+
>>> from mindspore.nn import get_rng_state
|
|
266
|
+
>>> np.random.seed(20)
|
|
267
|
+
>>> print(get_rng_state())
|
|
268
|
+
(Tensor(shape=[], dtype=Int32, value= 378518883), Tensor(shape=[], dtype=Int32, value= 0))
|
|
269
|
+
"""
|
|
270
|
+
if default_generator_ is None:
|
|
271
|
+
_init_default_generator()
|
|
272
|
+
return default_generator_.get_state()
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
def set_rng_state(seed, offset=None): # pylint: disable=redefined-outer-name
|
|
276
|
+
"""
|
|
277
|
+
Sets the default generator state.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
seed (int): Seed of the default generator.
|
|
281
|
+
offset (int, optional): Offset of the default generator, default: ``None`` , means ``0``.
|
|
282
|
+
|
|
283
|
+
Supported Platforms:
|
|
284
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
285
|
+
|
|
286
|
+
Examples:
|
|
287
|
+
>>> from mindspore.nn import set_rng_state, get_rng_state
|
|
288
|
+
>>> set_rng_state(10)
|
|
289
|
+
>>> print(get_rng_state())
|
|
290
|
+
(Tensor(shape=[], dtype=Int32, value= 10), Tensor(shape=[], dtype=Int32, value= 0))
|
|
291
|
+
"""
|
|
292
|
+
if default_generator_ is None:
|
|
293
|
+
_init_default_generator()
|
|
294
|
+
default_generator_.set_state(seed, offset)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
__all__ = ["Generator", "default_generator", "seed", "manual_seed", "initial_seed", "set_rng_state", "get_rng_state"]
|
mindspore/nn/layer/basic.py
CHANGED
|
@@ -27,6 +27,7 @@ from mindspore.common.tensor import Tensor
|
|
|
27
27
|
from mindspore.common.initializer import initializer, HeUniform, Uniform
|
|
28
28
|
from mindspore.ops import operations as P
|
|
29
29
|
from mindspore.ops import functional as F
|
|
30
|
+
from mindspore.ops.function.nn_func import interpolate_ext
|
|
30
31
|
from mindspore.ops.operations import _inner_ops as inner
|
|
31
32
|
from mindspore.ops.primitive import constexpr, Primitive, _primexpr
|
|
32
33
|
from mindspore.common.parameter import Parameter
|
|
@@ -35,10 +36,12 @@ from mindspore import _checkparam as Validator
|
|
|
35
36
|
from mindspore.nn.cell import Cell
|
|
36
37
|
from mindspore.nn.layer.activation import get_activation
|
|
37
38
|
from mindspore.common._decorator import deprecated
|
|
39
|
+
from mindspore.ops.auto_generate import dropout_ext_op
|
|
40
|
+
from mindspore.nn.generator import default_generator
|
|
38
41
|
|
|
39
42
|
__all__ = ['Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'Pad', 'Unfold', 'Tril', 'Triu',
|
|
40
43
|
'MatrixDiag', 'MatrixDiagPart', 'MatrixSetDiag', 'L1Regularizer', 'Dropout1d',
|
|
41
|
-
'Dropout2d', 'Dropout3d', 'Upsample', 'Roll', 'Identity', 'Unflatten']
|
|
44
|
+
'Dropout2d', 'Dropout3d', 'Upsample', 'Roll', 'Identity', 'Unflatten', 'DropoutExt']
|
|
42
45
|
|
|
43
46
|
|
|
44
47
|
class L1Regularizer(Cell):
|
|
@@ -201,6 +204,70 @@ class Dropout(Cell):
|
|
|
201
204
|
return f'p={self.p}'
|
|
202
205
|
|
|
203
206
|
|
|
207
|
+
class DropoutExt(Cell):
|
|
208
|
+
r"""
|
|
209
|
+
Dropout layer for the input.
|
|
210
|
+
|
|
211
|
+
Dropout is a means of regularization that reduces overfitting by preventing correlations between neuronal nodes.
|
|
212
|
+
The operator randomly sets some neurons output to 0 according to `p`, which means the probability of discarding
|
|
213
|
+
during training. And the return will be multiplied by :math:`\frac{1}{1-p}` during training.
|
|
214
|
+
During the reasoning, this layer returns the same Tensor as the `x`.
|
|
215
|
+
|
|
216
|
+
This technique is proposed in paper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
|
|
217
|
+
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ and proved to be effective to reduce
|
|
218
|
+
over-fitting and prevents neurons from co-adaptation. See more details in `Improving neural networks by
|
|
219
|
+
preventing co-adaptation of feature detectors
|
|
220
|
+
<https://arxiv.org/pdf/1207.0580.pdf>`_.
|
|
221
|
+
|
|
222
|
+
Note:
|
|
223
|
+
- Each channel will be zeroed out independently on every construct call.
|
|
224
|
+
Parameter `p` means the probability of the element of the input tensor to be zeroed.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
p (float): The dropout rate, greater than or equal to 0 and less than 1.
|
|
228
|
+
E.g. rate=0.9, dropping out 90% of input neurons. Default: ``0.5`` .
|
|
229
|
+
|
|
230
|
+
Inputs:
|
|
231
|
+
- **x** (Tensor) - The input of Dropout with data type of float16 or float32.
|
|
232
|
+
|
|
233
|
+
Outputs:
|
|
234
|
+
Tensor, output tensor with the same shape as the `x`.
|
|
235
|
+
|
|
236
|
+
Raises:
|
|
237
|
+
ValueError: If `p` is not in range [0, 1).
|
|
238
|
+
ValueError: If length of shape of `x` is less than 1.
|
|
239
|
+
|
|
240
|
+
Supported Platforms:
|
|
241
|
+
``Ascend``
|
|
242
|
+
|
|
243
|
+
Examples:
|
|
244
|
+
>>> import mindspore
|
|
245
|
+
>>> from mindspore import Tensor, nn
|
|
246
|
+
>>> import numpy as np
|
|
247
|
+
>>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32)
|
|
248
|
+
>>> net = nn.DropoutExt(p=0.2)
|
|
249
|
+
>>> net.set_train()
|
|
250
|
+
>>> output = net(x)
|
|
251
|
+
>>> print(output.shape)
|
|
252
|
+
(2, 2, 3)
|
|
253
|
+
"""
|
|
254
|
+
|
|
255
|
+
def __init__(self, p=0.5):
|
|
256
|
+
"""Initialize DropoutExt."""
|
|
257
|
+
super(DropoutExt, self).__init__()
|
|
258
|
+
self.generator = default_generator()
|
|
259
|
+
self.dropout = dropout_ext_op
|
|
260
|
+
self.p = p
|
|
261
|
+
|
|
262
|
+
def construct(self, x):
|
|
263
|
+
if not self.training or self.p == 0:
|
|
264
|
+
return x
|
|
265
|
+
|
|
266
|
+
seed, offset = self.generator(1)
|
|
267
|
+
out, _ = self.dropout(x, self.p, seed, offset)
|
|
268
|
+
return out
|
|
269
|
+
|
|
270
|
+
|
|
204
271
|
class Dropout1d(Cell):
|
|
205
272
|
r"""
|
|
206
273
|
During training, randomly zeroes entire channels of the input tensor with probability `p`
|
|
@@ -408,6 +475,47 @@ class Upsample(Cell):
|
|
|
408
475
|
return out
|
|
409
476
|
|
|
410
477
|
|
|
478
|
+
class UpsampleExt(Cell):
|
|
479
|
+
r"""
|
|
480
|
+
For details, please refer to :func:`mindspore.mint.interpolate`.
|
|
481
|
+
|
|
482
|
+
Supported Platforms:
|
|
483
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
484
|
+
|
|
485
|
+
Examples:
|
|
486
|
+
>>> import mindspore as ms
|
|
487
|
+
>>> from mindspore import mint
|
|
488
|
+
>>> x = ms.Tensor([[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]])
|
|
489
|
+
>>> upsample = mint.Upsample(size=(5, 5))
|
|
490
|
+
>>> out = upsample(x)
|
|
491
|
+
>>> print(x.asnumpy())
|
|
492
|
+
[[[[1. 2. 3. 4.]
|
|
493
|
+
[5. 6. 7. 8.]]]]
|
|
494
|
+
>>> print(out.asnumpy())
|
|
495
|
+
[[[[1. 1. 2. 3. 4.]
|
|
496
|
+
[1. 1. 2. 3. 4.]
|
|
497
|
+
[1. 1. 2. 3. 4.]
|
|
498
|
+
[5. 5. 6. 7. 8.]
|
|
499
|
+
[5. 5. 6. 7. 8.]]]]
|
|
500
|
+
>>> print(out.shape)
|
|
501
|
+
(1, 1, 5, 5)
|
|
502
|
+
"""
|
|
503
|
+
|
|
504
|
+
def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None, recompute_scale_factor=None):
|
|
505
|
+
"""Initialize Upsample."""
|
|
506
|
+
super(UpsampleExt, self).__init__()
|
|
507
|
+
self.size = size
|
|
508
|
+
self.scale_factor = scale_factor
|
|
509
|
+
self.mode = mode
|
|
510
|
+
self.align_corners = align_corners
|
|
511
|
+
self.recompute_scale_factor = recompute_scale_factor
|
|
512
|
+
|
|
513
|
+
def construct(self, x):
|
|
514
|
+
out = interpolate_ext(x, self.size, self.scale_factor, self.mode,
|
|
515
|
+
self.align_corners, self.recompute_scale_factor)
|
|
516
|
+
return out
|
|
517
|
+
|
|
518
|
+
|
|
411
519
|
class Flatten(Cell):
|
|
412
520
|
r"""
|
|
413
521
|
Flatten the input Tensor along dimensions from `start_dim` to `end_dim`.
|
mindspore/nn/layer/container.py
CHANGED
|
@@ -123,7 +123,7 @@ class _CellListBase:
|
|
|
123
123
|
class SequentialCell(Cell):
|
|
124
124
|
"""
|
|
125
125
|
Sequential Cell container. For more details about Cell, please refer to
|
|
126
|
-
`Cell <https://www.mindspore.cn/docs/en/
|
|
126
|
+
`Cell <https://www.mindspore.cn/docs/en/master/api_python/nn/mindspore.nn.Cell.html#mindspore.nn.Cell>`_.
|
|
127
127
|
|
|
128
128
|
A list of Cells will be added to it in the order they are passed in the constructor.
|
|
129
129
|
Alternatively, an ordered dict of cells can also be passed in.
|
|
@@ -325,7 +325,7 @@ class SequentialCell(Cell):
|
|
|
325
325
|
class CellList(_CellListBase, Cell):
|
|
326
326
|
"""
|
|
327
327
|
Holds Cells in a list. For more details about Cell, please refer to
|
|
328
|
-
`Cell <https://www.mindspore.cn/docs/en/
|
|
328
|
+
`Cell <https://www.mindspore.cn/docs/en/master/api_python/nn/mindspore.nn.Cell.html#mindspore.nn.Cell>`_.
|
|
329
329
|
|
|
330
330
|
CellList can be used like a regular Python list, the Cells it contains have been initialized and
|
|
331
331
|
the types of Cells it contains can not be CellDict.
|