mindspore 2.2.0__cp38-cp38-manylinux1_x86_64.whl → 2.2.11__cp38-cp38-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (170) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/_akg/akg/composite/build_module.py +104 -20
  3. mindspore/_akg/akg/utils/ascend_profilier/cann_file_parser.py +76 -0
  4. mindspore/_akg/akg/utils/ascend_profilier/file_manager.py +56 -0
  5. mindspore/_akg/akg/utils/ascend_profilier/op_summary_bean.py +23 -0
  6. mindspore/_akg/akg/utils/ascend_profilier/op_summary_headers.py +8 -0
  7. mindspore/_akg/akg/utils/ascend_profilier/op_summary_parser.py +42 -0
  8. mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +65 -0
  9. mindspore/_akg/akg/utils/composite_op_helper.py +7 -2
  10. mindspore/_akg/akg/utils/dump_ascend_meta.py +22 -3
  11. mindspore/_akg/akg/utils/kernel_exec.py +41 -15
  12. mindspore/_akg/akg/utils/tbe_codegen_utils.py +27 -6
  13. mindspore/_akg/akg/utils/util.py +56 -1
  14. mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
  15. mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
  16. mindspore/_checkparam.py +3 -3
  17. mindspore/_extends/graph_kernel/model/graph_split.py +84 -76
  18. mindspore/_extends/graph_kernel/splitter.py +3 -2
  19. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +83 -66
  20. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -4
  21. mindspore/_extends/parallel_compile/akg_compiler/util.py +10 -7
  22. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +2 -1
  23. mindspore/_extends/parse/__init__.py +3 -2
  24. mindspore/_extends/parse/parser.py +6 -1
  25. mindspore/_extends/parse/standard_method.py +14 -11
  26. mindspore/_extends/remote/kernel_build_server.py +2 -1
  27. mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
  28. mindspore/bin/cache_admin +0 -0
  29. mindspore/bin/cache_server +0 -0
  30. mindspore/common/_utils.py +16 -0
  31. mindspore/common/api.py +1 -1
  32. mindspore/common/auto_dynamic_shape.py +81 -85
  33. mindspore/common/dump.py +1 -1
  34. mindspore/common/tensor.py +3 -20
  35. mindspore/config/op_info.config +1 -1
  36. mindspore/context.py +11 -4
  37. mindspore/dataset/engine/cache_client.py +8 -5
  38. mindspore/dataset/engine/datasets_standard_format.py +5 -0
  39. mindspore/dataset/vision/transforms.py +21 -21
  40. mindspore/experimental/optim/adam.py +1 -1
  41. mindspore/gen_ops.py +1 -1
  42. mindspore/include/api/model.h +17 -0
  43. mindspore/include/api/status.h +8 -3
  44. mindspore/lib/libdnnl.so.2 +0 -0
  45. mindspore/lib/libmindspore.so +0 -0
  46. mindspore/lib/libmindspore_backend.so +0 -0
  47. mindspore/lib/libmindspore_common.so +0 -0
  48. mindspore/lib/libmindspore_core.so +0 -0
  49. mindspore/lib/libmindspore_glog.so.0 +0 -0
  50. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  51. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  52. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  53. mindspore/lib/libmindspore_shared_lib.so +0 -0
  54. mindspore/lib/libnnacl.so +0 -0
  55. mindspore/lib/libopencv_core.so.4.5 +0 -0
  56. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  57. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  58. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310/aic-ascend310-ops-info.json +123 -0
  59. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +123 -0
  60. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +158 -0
  61. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +37 -0
  62. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/add_dsl.py +46 -0
  63. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/add_tik.py +51 -0
  64. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +241 -0
  65. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/matmul_tik.py +212 -0
  66. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/add_dsl.py +46 -0
  67. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/add_tik.py +51 -0
  68. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +241 -0
  69. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/matmul_tik.py +212 -0
  70. mindspore/lib/plugin/ascend/custom_aicore_ops/op_proto/libop_proto.so +0 -0
  71. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  72. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  73. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +78 -80
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  75. mindspore/lib/plugin/ascend/libakg.so +0 -0
  76. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  77. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  78. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  79. mindspore/lib/plugin/cpu/libakg.so +0 -0
  80. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  81. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  82. mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
  83. mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
  84. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  85. mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
  86. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  87. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  88. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  89. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  90. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  91. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  92. mindspore/nn/cell.py +0 -3
  93. mindspore/nn/layer/activation.py +4 -5
  94. mindspore/nn/layer/conv.py +39 -23
  95. mindspore/nn/layer/flash_attention.py +54 -129
  96. mindspore/nn/layer/math.py +3 -7
  97. mindspore/nn/layer/rnn_cells.py +5 -5
  98. mindspore/nn/wrap/__init__.py +4 -2
  99. mindspore/nn/wrap/cell_wrapper.py +12 -3
  100. mindspore/numpy/utils_const.py +5 -5
  101. mindspore/ops/_grad_experimental/grad_array_ops.py +1 -1
  102. mindspore/ops/_grad_experimental/grad_implementations.py +2 -2
  103. mindspore/ops/_grad_experimental/grad_math_ops.py +19 -18
  104. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  105. mindspore/ops/_op_impl/aicpu/add.py +3 -3
  106. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +21 -2
  107. mindspore/ops/_utils/utils.py +2 -0
  108. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  109. mindspore/ops/composite/multitype_ops/getitem_impl.py +2 -2
  110. mindspore/ops/function/array_func.py +10 -7
  111. mindspore/ops/function/grad/grad_func.py +0 -1
  112. mindspore/ops/function/nn_func.py +98 -9
  113. mindspore/ops/function/random_func.py +2 -1
  114. mindspore/ops/op_info_register.py +24 -21
  115. mindspore/ops/operations/__init__.py +6 -2
  116. mindspore/ops/operations/_grad_ops.py +25 -6
  117. mindspore/ops/operations/_inner_ops.py +155 -23
  118. mindspore/ops/operations/array_ops.py +9 -7
  119. mindspore/ops/operations/comm_ops.py +2 -2
  120. mindspore/ops/operations/custom_ops.py +85 -68
  121. mindspore/ops/operations/inner_ops.py +26 -3
  122. mindspore/ops/operations/math_ops.py +7 -6
  123. mindspore/ops/operations/nn_ops.py +193 -49
  124. mindspore/parallel/_parallel_serialization.py +10 -3
  125. mindspore/parallel/_tensor.py +4 -1
  126. mindspore/parallel/checkpoint_transform.py +13 -2
  127. mindspore/parallel/shard.py +17 -10
  128. mindspore/profiler/common/util.py +1 -0
  129. mindspore/profiler/parser/ascend_hccl_generator.py +232 -0
  130. mindspore/profiler/parser/ascend_msprof_exporter.py +86 -43
  131. mindspore/profiler/parser/ascend_msprof_generator.py +196 -9
  132. mindspore/profiler/parser/ascend_op_generator.py +1 -1
  133. mindspore/profiler/parser/ascend_timeline_generator.py +6 -182
  134. mindspore/profiler/parser/base_timeline_generator.py +1 -1
  135. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -2
  136. mindspore/profiler/parser/framework_parser.py +1 -1
  137. mindspore/profiler/parser/profiler_info.py +19 -0
  138. mindspore/profiler/profiling.py +46 -24
  139. mindspore/rewrite/api/pattern_engine.py +1 -1
  140. mindspore/rewrite/parsers/for_parser.py +7 -7
  141. mindspore/rewrite/parsers/module_parser.py +4 -4
  142. mindspore/rewrite/symbol_tree.py +1 -4
  143. mindspore/run_check/_check_version.py +5 -3
  144. mindspore/safeguard/rewrite_obfuscation.py +52 -28
  145. mindspore/scipy/ops.py +55 -5
  146. mindspore/scipy/optimize/__init__.py +3 -2
  147. mindspore/scipy/optimize/linear_sum_assignment.py +38 -33
  148. mindspore/train/callback/_summary_collector.py +1 -1
  149. mindspore/train/dataset_helper.py +1 -0
  150. mindspore/train/model.py +2 -2
  151. mindspore/train/serialization.py +97 -11
  152. mindspore/train/summary/_summary_adapter.py +1 -1
  153. mindspore/train/summary/summary_record.py +23 -7
  154. mindspore/version.py +1 -1
  155. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/METADATA +3 -2
  156. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/RECORD +160 -151
  157. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +0 -406
  158. mindspore/ops/_op_impl/_custom_op/flash_attention/constants.py +0 -41
  159. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +0 -467
  160. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +0 -563
  161. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +0 -193
  162. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +0 -435
  163. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
  164. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +0 -45
  165. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +0 -67
  166. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +0 -62
  167. /mindspore/{ops/_op_impl/_custom_op/flash_attention → _akg/akg/utils/ascend_profilier}/__init__.py +0 -0
  168. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/WHEEL +0 -0
  169. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/entry_points.txt +0 -0
  170. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/top_level.txt +0 -0
@@ -27,6 +27,7 @@ SHAPE_RANK_ANY = -2
27
27
 
28
28
  auto_dynamic_shepe_dict = {}
29
29
 
30
+
30
31
  class _AutoDynamicShapeManager:
31
32
  """
32
33
  Represents a function to manage auto identify dynamic shape.
@@ -101,14 +102,14 @@ class _AutoDynamicShapeManager:
101
102
 
102
103
  def get_compile_args_shape_without_sink(self, input_args, res_shape):
103
104
  """get compile args shape with out sink mode"""
104
- for input in input_args:
105
- if isinstance(input, Tensor):
106
- res_shape.append(input.shape)
107
- elif isinstance(input, (int, float)):
105
+ for arg in input_args:
106
+ if isinstance(arg, Tensor):
107
+ res_shape.append(arg.shape)
108
+ elif isinstance(arg, (int, float)):
108
109
  res_shape.append([])
109
- elif isinstance(input, (tuple, list)):
110
+ elif isinstance(arg, (tuple, list)):
110
111
  tmp_shape = []
111
- self.get_compile_args_shape_without_sink(input, tmp_shape)
112
+ self.get_compile_args_shape_without_sink(arg, tmp_shape)
112
113
  res_shape.append(tmp_shape)
113
114
 
114
115
 
@@ -179,14 +180,6 @@ class _AutoDynamicShapeManager:
179
180
  self.generalize_shape_cache.append(compile_args)
180
181
 
181
182
 
182
- def check_tuple_of_scalar(self, input):
183
- """check tuple of scalar"""
184
- for elem in input:
185
- if not isinstance(elem, int):
186
- return False
187
- return True
188
-
189
-
190
183
  def _compare_input_args_and_cache_args(self, input_args, cache_args):
191
184
  """compare input args and cache args"""
192
185
  for (input, cache) in zip(input_args, cache_args):
@@ -233,23 +226,25 @@ class _AutoIdentifyDynamicShape:
233
226
 
234
227
  def _check_input_tensor_type(self, args_list, cache_list):
235
228
  """check input args type"""
236
- for (input, cache) in zip(args_list, cache_list):
237
- if isinstance(input, Tensor) and isinstance(cache, Tensor):
238
- if input.dtype != cache.dtype:
239
- logger.debug((f'input tensor type = {input.dtype}, cache tensor type = {cache.dtype}, '
229
+ for (arg, cache) in zip(args_list, cache_list):
230
+ if isinstance(arg, Tensor) and isinstance(cache, Tensor):
231
+ if arg.dtype != cache.dtype:
232
+ logger.debug((f'input tensor type = {arg.dtype}, cache tensor type = {cache.dtype}, '
240
233
  f'tensor types are not same.'))
241
234
  return False
242
- elif isinstance(input, (tuple, list)) and isinstance(cache, (tuple, list)):
243
- res = self._check_input_tensor_type(input, cache)
235
+ elif isinstance(arg, (tuple, list)) and isinstance(cache, (tuple, list)):
236
+ res = self._check_input_tensor_type(arg, cache)
244
237
  if not res:
245
238
  return False
246
- elif (isinstance(input, int) and isinstance(cache, int)) or \
247
- (isinstance(input, float) and isinstance(cache, float)):
248
- if input != cache:
239
+ elif (isinstance(arg, int) and isinstance(cache, int)) or \
240
+ (isinstance(arg, float) and isinstance(cache, float)):
241
+ if arg != cache:
249
242
  return False
250
- elif (isinstance(input, Tensor) and not isinstance(cache, Tensor)) or \
251
- (isinstance(input, (int, float)) and not isinstance(cache, (int, float))) or \
252
- (isinstance(input, (tuple, list))) and not isinstance(cache, (tuple, list)):
243
+ elif isinstance(arg, Tensor) and not isinstance(cache, Tensor):
244
+ return False
245
+ elif isinstance(arg, (int, float)) and not isinstance(cache, (int, float)):
246
+ return False
247
+ elif isinstance(arg, (tuple, list)) and not isinstance(cache, (tuple, list)):
253
248
  return False
254
249
  return True
255
250
 
@@ -292,21 +287,21 @@ class _AutoIdentifyDynamicShape:
292
287
 
293
288
 
294
289
  @staticmethod
295
- def _do_generalize_in_sink(input, cache, input_index, cache_index, cache_type):
290
+ def _do_generalize_in_sink(arg, cache, input_index, cache_index, cache_type):
296
291
  """do generalize in sink, input rank must be 2"""
297
- if not input:
292
+ if not arg:
298
293
  raise ValueError("In sink mode, cell input can not be scalar.")
299
294
 
300
- if input == cache:
295
+ if arg == cache:
301
296
  return cache
302
297
 
303
298
  shape_value = []
304
- if len(input) != len(cache):
299
+ if len(arg) != len(cache):
305
300
  shape_value.append(SHAPE_RANK_ANY)
306
301
  else:
307
- for _ in input:
302
+ for _ in arg:
308
303
  shape_value.append(SHAPE_DIM_ANY)
309
- logger.info((f'In the {cache_type} cache[{cache_index}], the {input_index}th input tensor shape is {input},'
304
+ logger.info((f'In the {cache_type} cache[{cache_index}], the {input_index}th input tensor shape is {arg},'
310
305
  f'cache shape is {cache}, not equal, need generalize to {shape_value}.'))
311
306
  return shape_value
312
307
 
@@ -318,6 +313,16 @@ class _AutoIdentifyDynamicShape:
318
313
  is_sink_mode, aux)
319
314
 
320
315
 
316
+ def _generate_with_generalize_shape(self, generalize_shape_args, is_sink_mode, args_list):
317
+ """generate with generalize_shape """
318
+ new_generalize_shape, can_generalize = self._do_generalize_shape("generalize", generalize_shape_args,
319
+ is_sink_mode)
320
+ if not can_generalize:
321
+ return args_list
322
+
323
+ res_shape = self.auto_dynamic_shape_manager.get_compile_args_shape(new_generalize_shape, is_sink_mode)
324
+ logger.info((f'generalize with generalize shape cache, compile args shape = {res_shape}'))
325
+ return new_generalize_shape
321
326
 
322
327
  def auto_dynamic_generate_compile_args(self, args_list, is_sink_mode):
323
328
  """generate compile args in auto dynamic shape"""
@@ -331,15 +336,7 @@ class _AutoIdentifyDynamicShape:
331
336
  logger.debug((f'input args list shape = {res_shape}.'))
332
337
 
333
338
  # step1: find cache in real_shape_cache.
334
- real_cache_number = self.auto_dynamic_shape_manager.get_real_shape_cache_number()
335
- if real_cache_number < 2:
336
- logger.info((f'real shape cache cap is {real_cache_number}, smaller than 2, '
337
- f'compile args shape={res_shape}.'))
338
- return args_list
339
-
340
- is_real_shape_exist = self.auto_dynamic_shape_manager.find_compile_args_in_shape_cache(args_list, "real")
341
- if is_real_shape_exist:
342
- logger.debug((f'find compile args in real shape cache, compile args shape={res_shape}'))
339
+ if self._check_real_shape_cache(res_shape, args_list):
343
340
  return args_list
344
341
 
345
342
  # step2: if can not find cache in real_shape_cache, then generate it
@@ -358,14 +355,7 @@ class _AutoIdentifyDynamicShape:
358
355
 
359
356
  # step 4: if can not find cache in generalize_shape_cache, then generate it again
360
357
  if not is_generalize_shape_exist:
361
- new_generalize_shape, can_generalize = self._do_generalize_shape("generalize", generalize_shape_args,
362
- is_sink_mode)
363
- if not can_generalize:
364
- return args_list
365
-
366
- res_shape = self.auto_dynamic_shape_manager.get_compile_args_shape(new_generalize_shape, is_sink_mode)
367
- logger.info((f'generalize with generalize shape cache, compile args shape = {res_shape}'))
368
- return new_generalize_shape
358
+ return self._generate_with_generalize_shape(generalize_shape_args, is_sink_mode, args_list)
369
359
 
370
360
  res_shape = self.auto_dynamic_shape_manager.get_compile_args_shape(generalize_shape_args, is_sink_mode)
371
361
  logger.debug((f'find compile args in generalize shape cache, compile args shape={res_shape}'))
@@ -396,54 +386,61 @@ class _AutoIdentifyDynamicShape:
396
386
 
397
387
  def _do_generalize_one_input_shape(self, input_args, cache_args, cache_type, index, is_sink_mode):
398
388
  """do generalize shape one input by cache"""
389
+ def generalize_tensor(arg, cache, i):
390
+ if self.auto_dynamic_shape_manager.is_tensor_equal(arg, cache):
391
+ return arg
392
+
393
+ shape_value = []
394
+ if len(arg.shape) != len(cache.shape):
395
+ shape_value.append(SHAPE_RANK_ANY)
396
+ else:
397
+ shape_value = [SHAPE_DIM_ANY for _ in range(len(arg.shape))]
398
+ shape_tuple = tuple(shape_value)
399
+ logger.info((f'In the {cache_type} cache[{index}], the {i}th input tensor shape is {arg.shape},'
400
+ f'cache shape is {cache.shape}, not equal, need generalize to {shape_tuple}.'))
401
+ return Tensor(shape=shape_tuple, dtype=arg.dtype)
402
+
403
+ def generalize_sequence(arg, cache, i):
404
+ if is_sink_mode:
405
+ # when is_sink_mode=True, input must be the shape of Tensor.
406
+ res = self._do_generalize_in_sink(arg, cache, i, index, cache_type)
407
+ return res
408
+
409
+ res = self._do_generalize_one_input_shape(arg, cache, cache_type, index, is_sink_mode)
410
+ return res
411
+
399
412
  generalize_one_shape = []
400
- for i, (input, cache) in enumerate(zip(input_args, cache_args)):
401
- if isinstance(input, Parameter) and isinstance(cache, Parameter):
402
- if self.auto_dynamic_shape_manager.is_tensor_equal(input, cache):
403
- generalize_one_shape.append(input)
413
+ for i, (arg, cache) in enumerate(zip(input_args, cache_args)):
414
+ if isinstance(arg, Parameter) and isinstance(cache, Parameter):
415
+ if self.auto_dynamic_shape_manager.is_tensor_equal(arg, cache):
416
+ generalize_one_shape.append(arg)
404
417
  continue
405
- else:
406
- logger.info("In auto dynamic shape mode, parameter must be equal, it can not be generalize.")
407
- return input_args, False
408
418
 
409
- if isinstance(input, Tensor) and isinstance(cache, Tensor):
410
- if self.auto_dynamic_shape_manager.is_tensor_equal(input, cache):
411
- generalize_one_shape.append(input)
412
- else:
413
- shape_value = []
414
- if len(input.shape) != len(cache.shape):
415
- shape_value.append(SHAPE_RANK_ANY)
416
- else:
417
- for _ in range(len(input.shape)):
418
- shape_value.append(SHAPE_DIM_ANY)
419
- shape_tuple = tuple(shape_value)
420
- generalize_one_shape.append(Tensor(shape=shape_tuple, dtype=input.dtype))
421
- logger.info((f'In the {cache_type} cache[{index}], the {i}th input tensor shape is {input.shape},'
422
- f'cache shape is {cache.shape}, not equal, need generalize to {shape_tuple}.'))
419
+ logger.info("In auto dynamic shape mode, parameter must be equal, it can not be generalize.")
420
+ return input_args, False
423
421
 
424
- elif isinstance(input, (tuple, list)) and isinstance(cache, (tuple, list)):
425
- if is_sink_mode:
426
- # when is_sink_mode=True, input must be the shape of Tensor.
427
- res = self._do_generalize_in_sink(input, cache, i, index, cache_type)
428
- generalize_one_shape.append(res)
429
- else:
430
- res = self._do_generalize_one_input_shape(input, cache, cache_type, index, is_sink_mode)
431
- generalize_one_shape.append(res)
432
- elif isinstance(input, int) and isinstance(cache, int):
422
+ if isinstance(arg, Tensor) and isinstance(cache, Tensor):
423
+ res = generalize_tensor(arg, cache, i)
424
+ generalize_one_shape.append(res)
425
+ elif isinstance(arg, (tuple, list)) and isinstance(cache, (tuple, list)):
426
+ res = generalize_sequence(arg, cache, i)
427
+ generalize_one_shape.append(res)
428
+ elif isinstance(arg, int) and isinstance(cache, int):
433
429
  # when is_sink_mode=False, the input must may be scalar, or the value of list/tuple.
434
430
  # is_sink_mode can not be True
435
- if input == cache:
436
- generalize_one_shape.append(input)
431
+ if arg == cache:
432
+ generalize_one_shape.append(arg)
437
433
  else:
438
434
  logger.info("In auto dynamic shape mode, scalar/tuple/list must be equal, it can not be " \
439
435
  "generalize.")
440
436
  return input_args, False
441
- elif input is None and cache is None:
442
- generalize_one_shape.append(input)
437
+ elif arg is None and cache is None:
438
+ generalize_one_shape.append(arg)
443
439
 
444
440
  return generalize_one_shape, True
445
441
 
446
442
 
443
+
447
444
  def _do_generalize_shape(self, cache_type, input_args, is_sink_mode):
448
445
  """do generalize shape by cache"""
449
446
  shape_cache = self.auto_dynamic_shape_manager.get_cache_by_type(cache_type)
@@ -464,8 +461,7 @@ class _AutoIdentifyDynamicShape:
464
461
 
465
462
  keys = list(unknown_shape_dict.keys())
466
463
  keys.sort(key=lambda x: (x[0], x[1]))
467
- index = keys[0]
468
- return unknown_shape_dict.get(index), True
464
+ return unknown_shape_dict.get(keys[0]), True
469
465
 
470
466
  _auto_dynamic_shape = _AutoIdentifyDynamicShape()
471
467
 
mindspore/common/dump.py CHANGED
@@ -57,7 +57,7 @@ def set_dump(target, enabled=True):
57
57
  ``Ascend``
58
58
 
59
59
  Examples:
60
- .. node::
60
+ .. note::
61
61
  Please set environment variable `MINDSPORE_DUMP_CONFIG` to the dump config file and set `dump_mode` field
62
62
  in dump config file to 2 before running this example.
63
63
  See `dump document <https://www.mindspore.cn/tutorials/experts/en/r2.2/debug/dump.html>`_ for details.
@@ -2186,7 +2186,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2186
2186
  [0.7] [3]
2187
2187
  """
2188
2188
  if self.shape == ():
2189
- return (Tensor(0), self)
2189
+ return (self, Tensor(0))
2190
2190
  self._init_check()
2191
2191
  return tensor_operator_registry.get('argmax_with_value')(self, axis, keep_dims)
2192
2192
 
@@ -2234,7 +2234,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2234
2234
  [0.0] [0]
2235
2235
  """
2236
2236
  if self.shape == ():
2237
- return (Tensor(0), self)
2237
+ return (self, Tensor(0))
2238
2238
  self._init_check()
2239
2239
  return tensor_operator_registry.get('argmin_with_value')(self, axis, keep_dims)
2240
2240
 
@@ -2683,7 +2683,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
2683
2683
  def _init_check(self):
2684
2684
  if self.has_init:
2685
2685
  self.init_data()
2686
- return self
2687
2686
 
2688
2687
  def init_data(self, slice_index=None, shape=None, opt_shard_group=None):
2689
2688
  """
@@ -4631,23 +4630,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
4631
4630
 
4632
4631
  def imag(self):
4633
4632
  r"""
4634
- Returns a new tensor containing imaginary value of the input tensor.
4635
- If input tensor is real, it will return zeros.
4636
-
4637
- Returns:
4638
- Tensor, the shape is the same as the input tensor.
4639
-
4640
- Supported Platforms:
4641
- ``GPU`` ``CPU``
4642
-
4643
- Examples:
4644
- >>> import numpy as np
4645
- >>> import mindspore
4646
- >>> from mindspore import Tensor
4647
- >>> x = Tensor(np.asarray(np.complex(1.3 + 0.4j)), mindspore.complex64)
4648
- >>> output = x.imag()
4649
- >>> print(output)
4650
- 0.4
4633
+ For details, please refer to :func:`mindspore.ops.imag`.
4651
4634
  """
4652
4635
  self._init_check()
4653
4636
  return tensor_operator_registry.get('imag')(self)
@@ -9,7 +9,7 @@
9
9
  {"op_name": "Less", "inputs": [{"index": 0, "name": "x1", "paramType": "required"}, {"index": 1, "name": "x2", "paramType": "required"}], "outputs": [{"index": 0, "name": "y", "paramType": "required"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["int8", "DefaultFormat"], ["int8", "DefaultFormat"], ["bool", "DefaultFormat"]], [["int16", "DefaultFormat"], ["int16", "DefaultFormat"], ["bool", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["bool", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["bool", "DefaultFormat"]], [["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"], ["bool", "DefaultFormat"]], [["uint16", "DefaultFormat"], ["uint16", "DefaultFormat"], ["bool", "DefaultFormat"]], [["uint32", "DefaultFormat"], ["uint32", "DefaultFormat"], ["bool", "DefaultFormat"]], [["uint64", "DefaultFormat"], ["uint64", "DefaultFormat"], ["bool", "DefaultFormat"]], [["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["bool", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["bool", "DefaultFormat"]], [["float64", "DefaultFormat"], ["float64", "DefaultFormat"], ["bool", "DefaultFormat"]]], "imply_type": "AiCPU"}
10
10
  {"op_name": "Lstsq", "inputs": [{"index": 0, "name": "matrix", "paramType": "required"}, {"index": 1, "name": "rhs", "paramType": "required"}], "outputs": [{"index": 0, "name": "y", "paramType": "required"}], "attr": [{"name": "l2_regularizer", "type": "float", "value": "0.0"}, {"name": "fast", "type": "bool", "value": "True"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float64", "DefaultFormat"], ["float64", "DefaultFormat"], ["float64", "DefaultFormat"]]], "imply_type": "AiCPU"}
11
11
  {"op_name": "LeftShift", "inputs": [{"index": 0, "name": "x1", "paramType": "required"}, {"index": 1, "name": "x2", "paramType": "required"}], "outputs": [{"index": 0, "name": "y", "paramType": "required"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["int8", "DefaultFormat"], ["int8", "DefaultFormat"], ["int8", "DefaultFormat"]], [["int16", "DefaultFormat"], ["int16", "DefaultFormat"], ["int16", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"]], [["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"]], [["uint16", "DefaultFormat"], ["uint16", "DefaultFormat"], ["uint16", "DefaultFormat"]], [["uint32", "DefaultFormat"], ["uint32", "DefaultFormat"], ["uint32", "DefaultFormat"]], [["uint64", "DefaultFormat"], ["uint64", "DefaultFormat"], ["uint64", "DefaultFormat"]]], "imply_type": "AiCPU"}
12
- {"op_name": "Add", "inputs": [{"index": 0, "name": "x", "paramType": "required"}, {"index": 1, "name": "y", "paramType": "required"}], "outputs": [{"index": 0, "name": "output", "paramType": "required"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float64", "DefaultFormat"], ["float64", "DefaultFormat"], ["float64", "DefaultFormat"]], [["int8", "DefaultFormat"], ["int8", "DefaultFormat"], ["int8", "DefaultFormat"]], [["int16", "DefaultFormat"], ["int16", "DefaultFormat"], ["int16", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"]], [["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"]], [["uint16", "DefaultFormat"], ["int16", "DefaultFormat"], ["int16", "DefaultFormat"]], [["uint32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["uint64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"]], [["complex64", "DefaultFormat"], ["complex64", "DefaultFormat"], ["complex64", "DefaultFormat"]], [["complex128", "DefaultFormat"], ["complex128", "DefaultFormat"], ["complex128", "DefaultFormat"]]], "imply_type": "AiCPU"}
12
+ {"op_name": "Add", "inputs": [{"index": 0, "name": "x", "paramType": "required"}, {"index": 1, "name": "y", "paramType": "required"}], "outputs": [{"index": 0, "name": "output", "paramType": "required"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float64", "DefaultFormat"], ["float64", "DefaultFormat"], ["float64", "DefaultFormat"]], [["int8", "DefaultFormat"], ["int8", "DefaultFormat"], ["int8", "DefaultFormat"]], [["int16", "DefaultFormat"], ["int16", "DefaultFormat"], ["int16", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"]], [["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"]], [["uint16", "DefaultFormat"], ["uint16", "DefaultFormat"], ["uint16", "DefaultFormat"]], [["uint32", "DefaultFormat"], ["uint32", "DefaultFormat"], ["uint32", "DefaultFormat"]], [["uint64", "DefaultFormat"], ["uint64", "DefaultFormat"], ["uint64", "DefaultFormat"]], [["complex64", "DefaultFormat"], ["complex64", "DefaultFormat"], ["complex64", "DefaultFormat"]], [["complex128", "DefaultFormat"], ["complex128", "DefaultFormat"], ["complex128", "DefaultFormat"]]], "imply_type": "AiCPU"}
13
13
  {"op_name": "SparseMatrixTranspose", "inputs": [{"index": 0, "name": "x_dense_shape", "paramType": "required"}, {"index": 1, "name": "x_batch_pointers", "paramType": "required"}, {"index": 2, "name": "x_row_pointers", "paramType": "required"}, {"index": 3, "name": "x_col_indices", "paramType": "required"}, {"index": 4, "name": "x_values", "paramType": "required"}], "outputs": [{"index": 0, "name": "y_dense_shape", "paramType": "required"}, {"index": 1, "name": "y_batch_pointers", "paramType": "required"}, {"index": 2, "name": "y_row_pointers", "paramType": "required"}, {"index": 3, "name": "y_col_indices", "paramType": "required"}, {"index": 4, "name": "y_values", "paramType": "required"}], "attr": [{"name": "conjugate", "type": "bool"}], "fusion_type": "OPAQUE", "dtype_format": [[["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int8", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int8", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint8", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint8", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int16", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int16", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint16", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint16", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int64", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int64", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint64", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint64", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float16", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float16", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float64", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float64", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["complex64", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["complex64", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["complex128", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["complex128", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int8", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int8", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint8", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint8", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int16", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int16", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint16", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint16", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int32", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint32", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint64", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float16", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float16", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float32", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float64", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["complex64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["complex64", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["complex128", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["complex128", "DefaultFormat"]]], "imply_type": "AiCPU"}
14
14
  {"op_name": "SparseMatrixNNZ", "inputs": [{"index": 0, "name": "x_dense_shape", "paramType": "required"}, {"index": 1, "name": "x_batch_pointers", "paramType": "required"}, {"index": 2, "name": "x_row_pointers", "paramType": "required"}, {"index": 3, "name": "x_col_indices", "paramType": "required"}, {"index": 4, "name": "x_values", "paramType": "required"}], "outputs": [{"index": 0, "name": "y", "paramType": "required"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int8", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint8", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int16", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["uint16", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int64", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float16", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["float64", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["bool", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["complex64", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["complex128", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int8", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint8", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int16", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint16", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float16", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float64", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["bool", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["complex64", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["complex128", "DefaultFormat"], ["int32", "DefaultFormat"]]], "imply_type": "AiCPU"}
15
15
  {"op_name": "SparseDenseCwiseMul", "inputs": [{"index": 0, "name": "x1_indices", "paramType": "required"}, {"index": 1, "name": "x1_values", "paramType": "required"}, {"index": 2, "name": "x1_sparse", "paramType": "required"}, {"index": 3, "name": "x2", "paramType": "required"}], "outputs": [{"index": 0, "name": "y", "paramType": "required"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["int64", "DefaultFormat"], ["int8", "DefaultFormat"], ["int64", "DefaultFormat"], ["int8", "DefaultFormat"], ["int8", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int16", "DefaultFormat"], ["int64", "DefaultFormat"], ["int16", "DefaultFormat"], ["int16", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int32", "DefaultFormat"], ["int64", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"]], [["int64", "DefaultFormat"], ["uint8", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint8", "DefaultFormat"], ["uint8", "DefaultFormat"]], [["int64", "DefaultFormat"], ["uint16", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint16", "DefaultFormat"], ["uint16", "DefaultFormat"]], [["int64", "DefaultFormat"], ["uint32", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint32", "DefaultFormat"], ["uint32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["uint64", "DefaultFormat"], ["int64", "DefaultFormat"], ["uint64", "DefaultFormat"], ["uint64", "DefaultFormat"]], [["int64", "DefaultFormat"], ["float16", "DefaultFormat"], ["int64", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["int64", "DefaultFormat"], ["float32", "DefaultFormat"], ["int64", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int64", "DefaultFormat"], ["float64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float64", "DefaultFormat"], ["float64", "DefaultFormat"]], [["int64", "DefaultFormat"], ["complex64", "DefaultFormat"], ["int64", "DefaultFormat"], ["complex64", "DefaultFormat"], ["complex64", "DefaultFormat"]], [["int64", "DefaultFormat"], ["complex128", "DefaultFormat"], ["int64", "DefaultFormat"], ["complex128", "DefaultFormat"], ["complex128", "DefaultFormat"]]], "imply_type": "AiCPU"}
mindspore/context.py CHANGED
@@ -268,6 +268,8 @@ class _Context:
268
268
  "allow_mix_precision_fp16" and "allow_mix_precision_bf16".
269
269
  - jit_compile (bool): ``False`` and ``True``.
270
270
  - atomic_clean_policy (int): ``0`` and ``1``. Default: ``1`` .
271
+ - exception_dump (str): Enable exception dump for Ascend operators. ``"0"`` , ``"1"`` and ``"2"``.
272
+ Default: ``"2"`` .
271
273
  - op_precision_mode (str): config file path.
272
274
  - parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file.
273
275
  If its value is None or '', it does not take effect. Default None.
@@ -280,6 +282,7 @@ class _Context:
280
282
  'atomic_clean_policy': [0, 1],
281
283
  'matmul_allow_hf32': [True, False],
282
284
  'conv_allow_hf32': [True, False],
285
+ 'exception_dump': ["0", "1", "2"],
283
286
  'op_precision_mode': (str,),
284
287
  'parallel_speed_up_json_path': (str, None)
285
288
  }
@@ -289,6 +292,7 @@ class _Context:
289
292
  'atomic_clean_policy': self._get_ascend_config_setter('atomic_clean_policy', str),
290
293
  'matmul_allow_hf32': self._get_ascend_config_setter('matmul_allow_hf32', lambda v: "1" if v else "0"),
291
294
  'conv_allow_hf32': self._get_ascend_config_setter('conv_allow_hf32', lambda v: "1" if v else "0"),
295
+ 'exception_dump': self._get_ascend_config_setter('exception_dump'),
292
296
  'op_precision_mode': self._set_op_precision_mode,
293
297
  'parallel_speed_up_json_path': self._set_speedup_config_path
294
298
  }
@@ -629,6 +633,7 @@ class _Context:
629
633
  "enable_task_opt": ms_ctx_param.enable_task_opt,
630
634
  "enable_grad_comm_opt": ms_ctx_param.enable_grad_comm_opt,
631
635
  "interleaved_matmul_comm": ms_ctx_param.interleaved_matmul_comm,
636
+ "enable_opt_shard_comm_opt": ms_ctx_param.enable_opt_shard_comm_opt,
632
637
  "interleaved_layernorm_comm": ms_ctx_param.interleaved_layernorm_comm}
633
638
  with open(speedup_config_real_path, 'r') as f:
634
639
  speedup_config = json.load(f)
@@ -1277,12 +1282,10 @@ def set_context(**kwargs):
1277
1282
  memory_optimize_level is set 'O1'.
1278
1283
  - OFF: Turn off the memory Offload function.
1279
1284
  ascend_config (dict): Set the parameters specific to Ascend hardware platform. It is not set by default.
1280
- Currently, configurations except `parallel_speed_up_json_path` and `precision_mode.force_fp32` are only
1281
- supported on Ascend910B hardware platform. The default value of `precision_mode`, `jit_compile` and
1285
+ The default value of `precision_mode`, `jit_compile` and
1282
1286
  `atomic_clean_policy` are experimental parameters, may change in the future.
1283
1287
 
1284
- - precision_mode (str): Mixed precision mode setting, on Ascend910B hardware platform, the default
1285
- value of training network is based on the value of CANN, and the default value of inference network
1288
+ - precision_mode (str): Mixed precision mode setting, and the default value of inference network
1286
1289
  is ``force_fp16`` . The value range is as follows:
1287
1290
 
1288
1291
  - force_fp16: When the operator supports both float16 and float32, select float16 directly.
@@ -1315,6 +1318,10 @@ def set_context(**kwargs):
1315
1318
  - conv_allow_hf32 (bool): Whether to convert FP32 to HF32 for Conv operators. Default value: ``True``.
1316
1319
  This is an experimental prototype that is subject to change and/or deletion.
1317
1320
  For detailed information, please refer to `Ascend community <https://www.hiascend.com/>`_ .
1321
+ - exception_dump (str): Enable exception dump for Ascend operators, providing the input and output data for
1322
+ failing Ascend operators. The value can be ``"0"`` , ``"1"`` and ``"2"``. For ``"0"`` , exception dump is
1323
+ turned off; for ``"1"``, all inputs and outputs will be dumped for AICore and AICPU exception operators;
1324
+ for ``"2"``, inputs will be dumped for AICore exception operators. Default: ``"2"`` .
1318
1325
  - op_precision_mode (str): Path to config file of op precision mode. For detailed information, please refer
1319
1326
  to `Ascend community <https://www.hiascend.com/>`_ .
1320
1327
  - parallel_speed_up_json_path(Union[str, None]): The path to the parallel speed up json file, configuration
@@ -23,7 +23,7 @@ from ..core.validator_helpers import type_check, check_pos_int32, check_pos_uint
23
23
 
24
24
 
25
25
  class DatasetCache:
26
- """
26
+ r"""
27
27
  A client to interface with tensor caching service.
28
28
 
29
29
  For details, please check `Tutorial <https://www.mindspore.cn/
@@ -46,7 +46,8 @@ class DatasetCache:
46
46
  >>>
47
47
  >>> # Create a cache instance with command line `cache_admin --start` and create a session with `cache_admin -g`
48
48
  >>> # After creating cache with a valid session, get session id with command `cache_admin --list_sessions`
49
- >>> session_id = subprocess.getoutput('cache_admin --list_sessions | tail -1 | awk -F " " \'{{print $1;}}\'')
49
+ >>> command = "cache_admin --list_sessions | tail -1 | awk -F ' ' '{{print $1;}}'"
50
+ >>> session_id = subprocess.getoutput(command).split('\n')[-1]
50
51
  >>> some_cache = ds.DatasetCache(session_id=int(session_id), size=0)
51
52
  >>>
52
53
  >>> dataset_dir = "/path/to/image_folder_dataset_directory"
@@ -81,18 +82,20 @@ class DatasetCache:
81
82
  self.cache_client = CacheClient(session_id, size, spilling, hostname, port, num_connections, prefetch_size)
82
83
 
83
84
  def get_stat(self):
84
- """
85
+ r"""
85
86
  Get the statistics from a cache. After data pipeline, three types of statistics can be obtained,
86
87
  including average number of cache hits (avg_cache_sz), number of caches in memory (num_mem_cached)
87
88
  and number of caches in disk (num_disk_cached).
88
89
 
89
90
  Examples:
90
91
  >>> import os
92
+ >>> import subprocess
91
93
  >>> import mindspore.dataset as ds
92
94
  >>>
93
95
  >>> # In example above, we created cache with a valid session id
94
- >>> id = int(os.popen('cache_admin --list_sessions | tail -1 | awk -F " " \'{{print $1;}}\'').read())
95
- >>> some_cache = ds.DatasetCache(session_id=id, size=0)
96
+ >>> command = "cache_admin --list_sessions | tail -1 | awk -F ' ' '{{print $1;}}'"
97
+ >>> id = subprocess.getoutput(command).split('\n')[-1]
98
+ >>> some_cache = ds.DatasetCache(session_id=int(id), size=0)
96
99
  >>>
97
100
  >>> # run the dataset pipeline to trigger cache
98
101
  >>> dataset = ds.ImageFolderDataset("/path/to/image_folder_dataset_directory", cache=some_cache)
@@ -232,6 +232,9 @@ class TFRecordDataset(SourceDataset, UnionBaseDataset):
232
232
 
233
233
  The columns of generated dataset depend on the source TFRecord files.
234
234
 
235
+ Note:
236
+ 'TFRecordDataset' is not support on Windows platform yet.
237
+
235
238
  Args:
236
239
  dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a
237
240
  pattern of files. The list will be sorted in lexicographical order.
@@ -318,6 +321,8 @@ class TFRecordDataset(SourceDataset, UnionBaseDataset):
318
321
  cache=None, compression_type=None):
319
322
  super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
320
323
  num_shards=num_shards, shard_id=shard_id, cache=cache)
324
+ if platform.system().lower() == "windows":
325
+ raise NotImplementedError("TFRecordDataset is not supported for windows.")
321
326
  self.dataset_files = self._find_files(dataset_files)
322
327
  self.dataset_files.sort()
323
328