mindspore 2.2.10__cp38-none-any.whl → 2.2.14__cp38-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (152) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +2 -1
  3. mindspore/_akg/akg/composite/build_module.py +95 -5
  4. mindspore/_akg/akg/topi/cpp/impl.py +1 -1
  5. mindspore/_akg/akg/tvm/_ffi/base.py +1 -1
  6. mindspore/_akg/akg/utils/composite_op_helper.py +7 -2
  7. mindspore/_akg/akg/utils/dump_ascend_meta.py +22 -3
  8. mindspore/_akg/akg/utils/util.py +18 -1
  9. mindspore/_c_dataengine.cpython-38-aarch64-linux-gnu.so +0 -0
  10. mindspore/_c_expression.cpython-38-aarch64-linux-gnu.so +0 -0
  11. mindspore/_c_mindrecord.cpython-38-aarch64-linux-gnu.so +0 -0
  12. mindspore/_extends/parse/__init__.py +3 -2
  13. mindspore/_extends/parse/parser.py +6 -1
  14. mindspore/_extends/parse/standard_method.py +12 -2
  15. mindspore/_mindspore_offline_debug.cpython-38-aarch64-linux-gnu.so +0 -0
  16. mindspore/bin/cache_admin +0 -0
  17. mindspore/bin/cache_server +0 -0
  18. mindspore/common/_utils.py +16 -0
  19. mindspore/common/tensor.py +0 -2
  20. mindspore/communication/management.py +3 -0
  21. mindspore/context.py +34 -4
  22. mindspore/dataset/engine/cache_client.py +8 -5
  23. mindspore/dataset/engine/datasets.py +23 -0
  24. mindspore/dataset/engine/validators.py +1 -1
  25. mindspore/dataset/vision/py_transforms_util.py +2 -2
  26. mindspore/experimental/optim/lr_scheduler.py +5 -6
  27. mindspore/lib/libdnnl.so.2 +0 -0
  28. mindspore/lib/libmindspore.so +0 -0
  29. mindspore/lib/libmindspore_backend.so +0 -0
  30. mindspore/lib/libmindspore_common.so +0 -0
  31. mindspore/lib/libmindspore_core.so +0 -0
  32. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  33. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  34. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  35. mindspore/lib/libmindspore_shared_lib.so +0 -0
  36. mindspore/lib/libopencv_core.so.4.5 +0 -0
  37. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  38. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  39. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  40. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  41. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +118 -0
  42. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  43. mindspore/lib/plugin/ascend/libakg.so +0 -0
  44. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  45. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  46. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  47. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  48. mindspore/lib/plugin/cpu/libakg.so +0 -0
  49. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  50. mindspore/mindrecord/tools/cifar100_to_mr.py +49 -57
  51. mindspore/mindrecord/tools/cifar10_to_mr.py +46 -55
  52. mindspore/mindrecord/tools/csv_to_mr.py +3 -8
  53. mindspore/mindrecord/tools/mnist_to_mr.py +4 -9
  54. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -4
  55. mindspore/nn/layer/activation.py +1 -1
  56. mindspore/nn/layer/embedding.py +2 -2
  57. mindspore/nn/layer/flash_attention.py +48 -135
  58. mindspore/nn/loss/loss.py +1 -1
  59. mindspore/nn/optim/ada_grad.py +2 -2
  60. mindspore/nn/optim/sgd.py +3 -2
  61. mindspore/nn/wrap/__init__.py +4 -2
  62. mindspore/nn/wrap/cell_wrapper.py +6 -3
  63. mindspore/numpy/math_ops.py +1 -1
  64. mindspore/ops/__init__.py +3 -0
  65. mindspore/ops/_grad_experimental/grad_array_ops.py +0 -31
  66. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  67. mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
  68. mindspore/ops/_grad_experimental/grad_math_ops.py +37 -17
  69. mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
  70. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
  71. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +21 -2
  72. mindspore/ops/function/array_func.py +6 -5
  73. mindspore/ops/function/debug_func.py +1 -1
  74. mindspore/ops/function/linalg_func.py +21 -11
  75. mindspore/ops/function/math_func.py +3 -0
  76. mindspore/ops/function/nn_func.py +13 -11
  77. mindspore/ops/function/parameter_func.py +2 -0
  78. mindspore/ops/function/sparse_unary_func.py +2 -2
  79. mindspore/ops/function/vmap_func.py +1 -0
  80. mindspore/ops/operations/__init__.py +5 -2
  81. mindspore/ops/operations/_embedding_cache_ops.py +1 -1
  82. mindspore/ops/operations/_grad_ops.py +3 -4
  83. mindspore/ops/operations/_inner_ops.py +56 -1
  84. mindspore/ops/operations/_quant_ops.py +4 -4
  85. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  86. mindspore/ops/operations/array_ops.py +15 -4
  87. mindspore/ops/operations/custom_ops.py +1 -1
  88. mindspore/ops/operations/debug_ops.py +1 -1
  89. mindspore/ops/operations/image_ops.py +3 -3
  90. mindspore/ops/operations/inner_ops.py +49 -0
  91. mindspore/ops/operations/math_ops.py +65 -3
  92. mindspore/ops/operations/nn_ops.py +95 -28
  93. mindspore/ops/operations/random_ops.py +2 -0
  94. mindspore/ops/operations/sparse_ops.py +4 -4
  95. mindspore/ops/silent_check.py +162 -0
  96. mindspore/parallel/__init__.py +3 -2
  97. mindspore/parallel/_auto_parallel_context.py +82 -3
  98. mindspore/parallel/_parallel_serialization.py +34 -2
  99. mindspore/parallel/_tensor.py +3 -1
  100. mindspore/parallel/_transformer/transformer.py +8 -8
  101. mindspore/parallel/checkpoint_transform.py +191 -45
  102. mindspore/profiler/parser/ascend_cluster_generator.py +111 -0
  103. mindspore/profiler/parser/ascend_communicate_generator.py +315 -0
  104. mindspore/profiler/parser/ascend_flops_generator.py +8 -2
  105. mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
  106. mindspore/profiler/parser/ascend_hccl_generator.py +2 -2
  107. mindspore/profiler/parser/ascend_msprof_exporter.py +30 -6
  108. mindspore/profiler/parser/ascend_msprof_generator.py +16 -5
  109. mindspore/profiler/parser/ascend_op_generator.py +15 -7
  110. mindspore/profiler/parser/ascend_timeline_generator.py +5 -2
  111. mindspore/profiler/parser/base_timeline_generator.py +11 -3
  112. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
  113. mindspore/profiler/parser/framework_parser.py +8 -2
  114. mindspore/profiler/parser/memory_usage_parser.py +8 -2
  115. mindspore/profiler/parser/minddata_analyzer.py +8 -2
  116. mindspore/profiler/parser/minddata_parser.py +1 -1
  117. mindspore/profiler/parser/msadvisor_analyzer.py +4 -2
  118. mindspore/profiler/parser/msadvisor_parser.py +9 -3
  119. mindspore/profiler/profiling.py +97 -25
  120. mindspore/rewrite/api/node.py +1 -1
  121. mindspore/rewrite/api/symbol_tree.py +2 -2
  122. mindspore/rewrite/parsers/for_parser.py +6 -6
  123. mindspore/rewrite/parsers/module_parser.py +4 -4
  124. mindspore/scipy/ops.py +55 -5
  125. mindspore/scipy/optimize/__init__.py +3 -2
  126. mindspore/scipy/optimize/linear_sum_assignment.py +38 -33
  127. mindspore/train/callback/_checkpoint.py +8 -8
  128. mindspore/train/callback/_landscape.py +2 -3
  129. mindspore/train/callback/_summary_collector.py +6 -7
  130. mindspore/train/dataset_helper.py +6 -0
  131. mindspore/train/model.py +17 -5
  132. mindspore/train/serialization.py +6 -1
  133. mindspore/train/summary/_writer_pool.py +1 -1
  134. mindspore/train/summary/summary_record.py +5 -6
  135. mindspore/version.py +1 -1
  136. {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/METADATA +3 -2
  137. {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/RECORD +140 -148
  138. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  139. mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
  140. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +0 -406
  141. mindspore/ops/_op_impl/_custom_op/flash_attention/constants.py +0 -41
  142. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +0 -467
  143. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +0 -563
  144. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +0 -193
  145. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +0 -435
  146. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
  147. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +0 -45
  148. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +0 -67
  149. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +0 -62
  150. {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/WHEEL +0 -0
  151. {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/entry_points.txt +0 -0
  152. {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -15,59 +15,64 @@
15
15
  """Linear Sum Assignment"""
16
16
  import sys
17
17
  from ..ops import LinearSumAssignment
18
- from ...common import dtype as mstype
19
- from ..utils import _mstype_check, _dtype_check
20
18
  from ... import Tensor
21
19
 
22
20
 
23
- def _linear_sum_assignment(cost_matrix, maximize, dimension_limit=Tensor(sys.maxsize)):
24
- """
21
+ def linear_sum_assignment(cost_matrix, maximize, dimension_limit=Tensor(sys.maxsize)):
22
+ r"""
25
23
  Solve the linear sum assignment problem.
26
24
 
25
+ The assignment problem is represented as follows:
26
+
27
+ .. math::
28
+ min\sum_{i}^{} \sum_{j}^{} C_{i,j} X_{i,j}
29
+
30
+ where :math:`C` is cost matrix, :math:`X_{i,j} = 1` means column :math:`j` is assigned to row :math:`i` .
31
+
27
32
  Args:
28
- cost_matrix: 2-D Input Tensor.
29
- The cost matrix of the bipartite graph.
30
- maximize: 0-D Input bool Tensor.
31
- Calculates a maximum weight matching if true.
32
- dimension_limit: 0-D Input Tensor.
33
- A scalar used to limit the actual size of the 1st dimension. Optimized for
34
- padding scenes. Default means no dimension limit.
33
+ cost_matrix (Tensor): 2-D cost matrix. Tensor of shape :math:`(M, N)` .
34
+ maximize (bool): Calculate a maximum weight matching if true, otherwise calculate a minimum weight matching.
35
+ dimension_limit (Tensor, optional): A scalar used to limit the actual size of the 2nd dimension of
36
+ ``cost_matrix``. Default is ``Tensor(sys.maxsize)``, which means no limitation. The type is 0-D int64
37
+ Tensor.
35
38
 
36
39
  Returns:
37
- 1-D Output Tensors with 'row_indx' and 'col_idx'. An array of row indices and
38
- one of corresponding column indices giving the optimal assignment. If specified
39
- dimension_limit, padding value at the end would be -1.
40
+ A tuple of tensors containing 'row_idx' and 'col_idx'.
41
+
42
+ - **row_idx** (Tensor) - Row indices of the problem. If `dimension_limit` is given, -1 would be padded at the
43
+ end. The shape is :math:`(N, )` , where :math:`N` is the minimum value of `cost_matrix` dimension.
44
+ - **col_idx** (Tensor) - Column indices of the problem. If `dimension_limit` is given, -1 would be padded at
45
+ the end. The shape is :math:`(N, )` , where :math:`N` is the minimum value of `cost_matrix` dimension.
46
+
47
+ Raises:
48
+ TypeError: If the data type of `cost_matrix` is not the type in [float16, float32, float64,
49
+ int8, int16, int32, int64, uint8, uint16, uint32, uint64, bool]
50
+ TypeError: If the type of `maximize` is not bool.
51
+ TypeError: If the data type of `dimension_limit` is not int64.
52
+ ValueError: If the rank of `cost_matrix` is not 2.
53
+
40
54
 
41
55
  Supported Platforms:
42
- ``CPU``
56
+ ``Ascend`` ``CPU``
43
57
 
44
- Examples:
58
+ Examples:
45
59
  >>> import mindspore as ms
46
60
  >>> import numpy as np
47
61
  >>> from mindspore import Tensor
48
- >>> from mindspore.scipy.optimize.linear_sum_assignment import _linear_sum_assignment as lsap
49
- >>> cost_matrix = Tensor(np.array([[2, 3, 3], [3, 2, 3], [3, 3, 2]])).astype("float64")
62
+ >>> import mindspore.scipy.optimize.linear_sum_assignment as lsap
63
+ >>> cost_matrix = Tensor(np.array([[2, 3, 3], [3, 2, 3], [3, 3, 2]])).astype(ms.float64)
50
64
  >>> dimension_limit = Tensor(2)
51
- >>> maximize = Tensor(False)
65
+ >>> maximize = False
52
66
  >>> a, b = lsap(cost_matrix, maximize, dimension_limit)
53
67
  >>> print(a)
54
- [[0 1 -1]]
68
+ [0 1 -1]
55
69
  >>> print(b)
56
- [[0 1 -1]]
70
+ [0 1 -1]
57
71
  >>> a, b = lsap(cost_matrix, maximize)
58
72
  >>> print(a)
59
- [[0 1 2]]
73
+ [0 1 2]
60
74
  >>> print(b)
61
- [[0 1 2]]
75
+ [0 1 2]
62
76
  """
63
- func_name = 'linear_sum_assignment'
64
- _mstype_check(func_name, cost_matrix, mstype.TensorType, 'cost_matrix')
65
- _mstype_check(func_name, dimension_limit,
66
- mstype.TensorType, 'dimension_limit')
67
- _mstype_check(func_name, maximize, mstype.TensorType, 'maximize')
68
- _dtype_check(func_name, cost_matrix, [mstype.float32, mstype.float64])
69
- _dtype_check(func_name, dimension_limit, [mstype.int64])
70
- _dtype_check(func_name, maximize, [mstype.bool_])
71
-
72
77
  solve = LinearSumAssignment()
73
78
  return solve(cost_matrix, dimension_limit, maximize)
@@ -205,7 +205,7 @@ class CheckpointConfig:
205
205
  Get the value of steps to save checkpoint.
206
206
 
207
207
  Returns:
208
- Int, steps to save checkpoint.
208
+ int, steps to save checkpoint.
209
209
  """
210
210
  return self._save_checkpoint_steps
211
211
 
@@ -214,7 +214,7 @@ class CheckpointConfig:
214
214
  """Get the value of _save_checkpoint_seconds.
215
215
 
216
216
  Returns:
217
- Int, seconds to save the checkpoint file.
217
+ int, seconds to save the checkpoint file.
218
218
  """
219
219
  return self._save_checkpoint_seconds
220
220
 
@@ -224,7 +224,7 @@ class CheckpointConfig:
224
224
  Get the value of maximum number of checkpoint files can be saved.
225
225
 
226
226
  Returns:
227
- Int, Maximum number of checkpoint files can be saved.
227
+ int, Maximum number of checkpoint files can be saved.
228
228
  """
229
229
  return self._keep_checkpoint_max
230
230
 
@@ -244,7 +244,7 @@ class CheckpointConfig:
244
244
  Get the value of whether to merge and save the split Tensor in the automatic parallel scenario.
245
245
 
246
246
  Returns:
247
- Bool, whether to merge and save the split Tensor in the automatic parallel scenario.
247
+ bool, whether to merge and save the split Tensor in the automatic parallel scenario.
248
248
  """
249
249
  return self._integrated_save
250
250
 
@@ -254,7 +254,7 @@ class CheckpointConfig:
254
254
  Get the value of whether asynchronous execution saves the checkpoint to a file.
255
255
 
256
256
  Returns:
257
- Bool, whether asynchronous execution saves the checkpoint to a file.
257
+ bool, whether asynchronous execution saves the checkpoint to a file.
258
258
  """
259
259
  return self._async_save
260
260
 
@@ -294,7 +294,7 @@ class CheckpointConfig:
294
294
  Get the value of information dict saved to checkpoint file.
295
295
 
296
296
  Returns:
297
- Dict, the information saved to checkpoint file.
297
+ dict, the information saved to checkpoint file.
298
298
  """
299
299
  return self._append_dict
300
300
 
@@ -304,7 +304,7 @@ class CheckpointConfig:
304
304
  Get the value of whether to save map Parameter incrementally.
305
305
 
306
306
  Returns:
307
- Bool, whether to save map Parameter incrementally.
307
+ bool, whether to save map Parameter incrementally.
308
308
  """
309
309
  return self._map_param_inc
310
310
 
@@ -313,7 +313,7 @@ class CheckpointConfig:
313
313
  Get the policy of checkpoint.
314
314
 
315
315
  Returns:
316
- Dict, the information of checkpoint policy.
316
+ dict, the information of checkpoint policy.
317
317
  """
318
318
  checkpoint_policy = {'save_checkpoint_steps': self.save_checkpoint_steps,
319
319
  'save_checkpoint_seconds': self.save_checkpoint_seconds,
@@ -1,4 +1,4 @@
1
- # Copyright 2021-2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2021-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -170,8 +170,7 @@ class SummaryLandscape:
170
170
  It can create landscape in PCA direction or random direction by calculating loss.
171
171
 
172
172
  Note:
173
- 1. When using SummaryLandscape, you need to run the code in `if __name__ == "__main__"` .
174
- 2. SummaryLandscape only supports Linux systems.
173
+ 1. SummaryLandscape only supports Linux systems.
175
174
 
176
175
  Args:
177
176
  summary_dir (str): The path of summary is used to save the model weight,
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -77,12 +77,11 @@ class SummaryCollector(Callback):
77
77
  SummaryCollector also enables the summary operator to collect data to summary files.
78
78
 
79
79
  Note:
80
- 1. When using SummaryCollector, you need to run the code in `if __name__ == "__main__"` .
81
- 2. Multiple SummaryCollector instances in callback list are not allowed.
82
- 3. Not all information is collected at the training phase or at the eval phase.
83
- 4. SummaryCollector always record the data collected by the summary operator.
84
- 5. SummaryCollector only supports Linux systems.
85
- 6. The Summary is not supported when compile source with `-s on` option.
80
+ 1. Multiple SummaryCollector instances in callback list are not allowed.
81
+ 2. Not all information is collected at the training phase or at the eval phase.
82
+ 3. SummaryCollector always record the data collected by the summary operator.
83
+ 4. SummaryCollector only supports Linux systems.
84
+ 5. The Summary is not supported when compile source with `-s on` option.
86
85
 
87
86
  Args:
88
87
  summary_dir (str): The collected data will be persisted to this directory.
@@ -444,6 +444,11 @@ class DatasetHelper:
444
444
  # Generally, it works in dynamic shape scenarios.
445
445
  return self.iter.get_data_info()
446
446
 
447
+ # pylint: disable=missing-docstring
448
+ def get_mbuf_queue_size(self):
449
+ # In sink mode, it returns the element numbers inside mbuf channel.
450
+ return self.iter.get_mbuf_queue_size()
451
+
447
452
  # pylint: disable=missing-docstring
448
453
  def get_send_info(self, run_context):
449
454
  # In sink mode, it returns the send information of dataset at this moment.
@@ -526,6 +531,7 @@ class _DatasetIter:
526
531
  self.release = dataset.__transfer_dataset__.release
527
532
  self.continue_send = dataset.__transfer_dataset__.continue_send
528
533
  self.get_data_info = dataset.__transfer_dataset__.get_data_info
534
+ self.get_mbuf_queue_size = dataset.__transfer_dataset__.get_mbuf_queue_size
529
535
  self.get_send_info = dataset.__transfer_dataset__.get_send_info
530
536
  if hasattr(dataset.__transfer_dataset__, "_reset"):
531
537
  self._reset = dataset.__transfer_dataset__._reset # pylint: disable=protected-access
mindspore/train/model.py CHANGED
@@ -22,6 +22,7 @@ import os
22
22
  import math
23
23
  import copy
24
24
  import importlib
25
+ import time
25
26
  import numpy as np
26
27
 
27
28
  import mindspore
@@ -523,6 +524,16 @@ class Model:
523
524
  dataset_sink_mode=True,
524
525
  sink_size=sink_size)
525
526
  self._warmup_dataset(epoch, train_dataset, sink_size)
527
+ if train_dataset.get_init_step() > 0:
528
+ mbuf_size = train_dataset.__transfer_dataset__.get_mbuf_queue_size()
529
+ while mbuf_size == 0:
530
+ time.sleep(10)
531
+ mbuf_size = train_dataset.__transfer_dataset__.get_mbuf_queue_size()
532
+ if mbuf_size != 0:
533
+ break
534
+ logger.warning(f"Failover mode, waiting for dataset recover to specify step, "
535
+ f"current device queue size: {mbuf_size}")
536
+
526
537
  if context.get_auto_parallel_context("pipeline_stages") > 1 and valid_dataset:
527
538
  train_network.add_flags_recursive(is_first_iteration=True)
528
539
  for inputs in train_dataset_helper:
@@ -967,7 +978,7 @@ class Model:
967
978
  of data will be transferred one by one. The limitation of data transmission per time is 256M.
968
979
 
969
980
  When dataset_sink_mode is True, the `step_end` method of the instance of Callback will be called at the end
970
- of step in PyNative mode or will be called at the end of epoch in Graph mode.
981
+ of step in PyNative mode, or will be called at the end of epoch in Graph mode.
971
982
 
972
983
  If dataset_sink_mode is True, dataset will be bound to this model and cannot be used by other models.
973
984
 
@@ -1004,6 +1015,7 @@ class Model:
1004
1015
  Default: 0.
1005
1016
 
1006
1017
  Examples:
1018
+ >>> import mindspore as ms
1007
1019
  >>> from mindspore import nn
1008
1020
  >>> from mindspore.train import Model
1009
1021
  >>>
@@ -1515,8 +1527,8 @@ class Model:
1515
1527
  [ascend_context]
1516
1528
  rank_table_file = [path_a](storage initial path of the rank table file)
1517
1529
  [execution_plan]
1518
- [op_name1] = data_type:float16 (operator named op_name1 is set to data type Float16)
1519
- [op_name2] = data_type:float32 (operator named op_name2 is set to data type Float32)
1530
+ [op_name1] = data_type:float16 (operator named op_name1 is set to data type float16)
1531
+ [op_name2] = data_type:float32 (operator named op_name2 is set to data type float32)
1520
1532
 
1521
1533
  When only the config_path is configured, it is done as follows:
1522
1534
 
@@ -1649,8 +1661,8 @@ class Model:
1649
1661
  [ascend_context]
1650
1662
  rank_table_file = [path_a](storage initial path of the rank table file)
1651
1663
  [execution_plan]
1652
- [op_name1] = data_type:float16 (operator named op_name1 is set to data type Float16)
1653
- [op_name2] = data_type:float32 (operator named op_name2 is set to data type Float32)
1664
+ [op_name1] = data_type:float16 (operator named op_name1 is set to data type float16)
1665
+ [op_name2] = data_type:float32 (operator named op_name2 is set to data type float32)
1654
1666
 
1655
1667
  When only the config_path is configured, it is done as follows:
1656
1668
 
@@ -176,7 +176,7 @@ def _update_param(param, new_param, strict_load):
176
176
 
177
177
  def _type_convert(param, new_param, strict_load):
178
178
  """Whether to convert parameter's type during load checkpoint into network."""
179
- float_type = (mstype.float16, mstype.float32, mstype.float64)
179
+ float_type = (mstype.float16, mstype.float32, mstype.float64, mstype.bfloat16)
180
180
  int_type = (mstype.int8, mstype.int16, mstype.int32, mstype.int64)
181
181
  if not strict_load and ({param.data.dtype, new_param.data.dtype}.issubset(float_type) or
182
182
  {param.data.dtype, new_param.data.dtype}.issubset(int_type)):
@@ -1560,6 +1560,8 @@ def export(net, *inputs, file_name, file_format, **kwargs):
1560
1560
  3. Exporting functions decorated with :func:`mindspore.jit` to mindir format is supported.
1561
1561
  4. When exporting a function decorated with :func:`mindspore.jit`, the function should not involve
1562
1562
  class properties in calculations.
1563
+ 5. AIR format is deprecated, and will be removed in a future version, please use other format or use
1564
+ MindSpore Lite to do offline inference.
1563
1565
 
1564
1566
  Args:
1565
1567
  net (Union[Cell, function]): MindSpore network.
@@ -1633,6 +1635,9 @@ def export(net, *inputs, file_name, file_format, **kwargs):
1633
1635
  supported_formats = ['AIR', 'ONNX', 'MINDIR']
1634
1636
  if file_format not in supported_formats:
1635
1637
  raise ValueError(f"For 'export', 'file_format' must be one of {supported_formats}, but got {file_format}.")
1638
+ if file_format == 'AIR':
1639
+ logger.warning("AIR format is deprecated, and will be removed in a future version, please use other format or "
1640
+ "use MindSpore Lite to do offline inference")
1636
1641
  Validator.check_file_name_by_regular(file_name)
1637
1642
  logger.info("exporting model file:%s format:%s.", file_name, file_format)
1638
1643
 
@@ -31,7 +31,7 @@ from mindspore.train.summary.writer import LineageWriter, SummaryWriter, ExportW
31
31
 
32
32
  try:
33
33
  from multiprocessing import get_context
34
- ctx = get_context('forkserver')
34
+ ctx = get_context('fork')
35
35
  except ValueError:
36
36
  import multiprocessing as ctx
37
37
 
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -141,12 +141,11 @@ class SummaryRecord:
141
141
  can be added by calling add_value.
142
142
 
143
143
  Note:
144
- 1. When using SummaryRecord, you need to run the code in `if __name__ == "__main__"` .
145
- 2. Make sure to close the SummaryRecord at the end, otherwise the process will not exit.
144
+ 1. Make sure to close the SummaryRecord at the end, otherwise the process will not exit.
146
145
  Please see the Example section below to learn how to close properly in two ways.
147
- 3. Only one SummaryRecord instance is allowed at a time, otherwise it will cause data writing problems.
148
- 4. SummaryRecord only supports Linux systems.
149
- 5. The Summary is not supported when compile source with `-s on` option.
146
+ 2. Only one SummaryRecord instance is allowed at a time, otherwise it will cause data writing problems.
147
+ 3. SummaryRecord only supports Linux systems.
148
+ 4. The Summary is not supported when compile source with `-s on` option.
150
149
 
151
150
  Args:
152
151
  log_dir (str): The log_dir is a directory location to save the summary.
mindspore/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '2.2.10'
1
+ __version__ = '2.2.14'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mindspore
3
- Version: 2.2.10
3
+ Version: 2.2.14
4
4
  Summary: MindSpore is a new open source deep learning training/inference framework that could be used for mobile, edge and cloud scenarios.
5
5
  Home-page: https://www.mindspore.cn
6
6
  Download-URL: https://github.com/mindspore-ai/mindspore/tags
@@ -318,10 +318,11 @@ Project stable branches will be in one of the following states:
318
318
 
319
319
  | **Branch** | **Status** | **Initial Release Date** | **Next Phase** | **EOL Date**|
320
320
  |------------|--------------|--------------------------|----------------------------------------|-------------|
321
+ | **r2.2** | Maintained | 2023-10-18 | Unmaintained <br> 2024-10-18 estimated | |
321
322
  | **r2.1** | Maintained | 2023-07-29 | Unmaintained <br> 2024-07-29 estimated | |
322
323
  | **r2.0** | Maintained | 2023-06-15 | Unmaintained <br> 2024-06-15 estimated | |
323
324
  | **r1.10** | Maintained | 2023-02-02 | Unmaintained <br> 2024-02-02 estimated | |
324
- | **r1.9** | Maintained | 2022-10-26 | Unmaintained <br> 2023-10-26 estimated | |
325
+ | **r1.9** | End Of Life | 2022-10-26 | | 2023-10-26 |
325
326
  | **r1.8** | End Of Life | 2022-07-29 | | 2023-07-29 |
326
327
  | **r1.7** | End Of Life | 2022-04-29 | | 2023-04-29 |
327
328
  | **r1.6** | End Of Life | 2022-01-29 | | 2023-01-29 |