mindspore 2.2.0__cp38-cp38-manylinux1_x86_64.whl → 2.2.11__cp38-cp38-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (170) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/_akg/akg/composite/build_module.py +104 -20
  3. mindspore/_akg/akg/utils/ascend_profilier/cann_file_parser.py +76 -0
  4. mindspore/_akg/akg/utils/ascend_profilier/file_manager.py +56 -0
  5. mindspore/_akg/akg/utils/ascend_profilier/op_summary_bean.py +23 -0
  6. mindspore/_akg/akg/utils/ascend_profilier/op_summary_headers.py +8 -0
  7. mindspore/_akg/akg/utils/ascend_profilier/op_summary_parser.py +42 -0
  8. mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +65 -0
  9. mindspore/_akg/akg/utils/composite_op_helper.py +7 -2
  10. mindspore/_akg/akg/utils/dump_ascend_meta.py +22 -3
  11. mindspore/_akg/akg/utils/kernel_exec.py +41 -15
  12. mindspore/_akg/akg/utils/tbe_codegen_utils.py +27 -6
  13. mindspore/_akg/akg/utils/util.py +56 -1
  14. mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
  15. mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
  16. mindspore/_checkparam.py +3 -3
  17. mindspore/_extends/graph_kernel/model/graph_split.py +84 -76
  18. mindspore/_extends/graph_kernel/splitter.py +3 -2
  19. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +83 -66
  20. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -4
  21. mindspore/_extends/parallel_compile/akg_compiler/util.py +10 -7
  22. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +2 -1
  23. mindspore/_extends/parse/__init__.py +3 -2
  24. mindspore/_extends/parse/parser.py +6 -1
  25. mindspore/_extends/parse/standard_method.py +14 -11
  26. mindspore/_extends/remote/kernel_build_server.py +2 -1
  27. mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
  28. mindspore/bin/cache_admin +0 -0
  29. mindspore/bin/cache_server +0 -0
  30. mindspore/common/_utils.py +16 -0
  31. mindspore/common/api.py +1 -1
  32. mindspore/common/auto_dynamic_shape.py +81 -85
  33. mindspore/common/dump.py +1 -1
  34. mindspore/common/tensor.py +3 -20
  35. mindspore/config/op_info.config +1 -1
  36. mindspore/context.py +11 -4
  37. mindspore/dataset/engine/cache_client.py +8 -5
  38. mindspore/dataset/engine/datasets_standard_format.py +5 -0
  39. mindspore/dataset/vision/transforms.py +21 -21
  40. mindspore/experimental/optim/adam.py +1 -1
  41. mindspore/gen_ops.py +1 -1
  42. mindspore/include/api/model.h +17 -0
  43. mindspore/include/api/status.h +8 -3
  44. mindspore/lib/libdnnl.so.2 +0 -0
  45. mindspore/lib/libmindspore.so +0 -0
  46. mindspore/lib/libmindspore_backend.so +0 -0
  47. mindspore/lib/libmindspore_common.so +0 -0
  48. mindspore/lib/libmindspore_core.so +0 -0
  49. mindspore/lib/libmindspore_glog.so.0 +0 -0
  50. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  51. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  52. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  53. mindspore/lib/libmindspore_shared_lib.so +0 -0
  54. mindspore/lib/libnnacl.so +0 -0
  55. mindspore/lib/libopencv_core.so.4.5 +0 -0
  56. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  57. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  58. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310/aic-ascend310-ops-info.json +123 -0
  59. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +123 -0
  60. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +158 -0
  61. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +37 -0
  62. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/add_dsl.py +46 -0
  63. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/add_tik.py +51 -0
  64. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +241 -0
  65. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/matmul_tik.py +212 -0
  66. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/add_dsl.py +46 -0
  67. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/add_tik.py +51 -0
  68. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +241 -0
  69. mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/matmul_tik.py +212 -0
  70. mindspore/lib/plugin/ascend/custom_aicore_ops/op_proto/libop_proto.so +0 -0
  71. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  72. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  73. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +78 -80
  74. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  75. mindspore/lib/plugin/ascend/libakg.so +0 -0
  76. mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
  77. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  78. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  79. mindspore/lib/plugin/cpu/libakg.so +0 -0
  80. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  81. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  82. mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
  83. mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
  84. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  85. mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
  86. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  87. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  88. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  89. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  90. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  91. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  92. mindspore/nn/cell.py +0 -3
  93. mindspore/nn/layer/activation.py +4 -5
  94. mindspore/nn/layer/conv.py +39 -23
  95. mindspore/nn/layer/flash_attention.py +54 -129
  96. mindspore/nn/layer/math.py +3 -7
  97. mindspore/nn/layer/rnn_cells.py +5 -5
  98. mindspore/nn/wrap/__init__.py +4 -2
  99. mindspore/nn/wrap/cell_wrapper.py +12 -3
  100. mindspore/numpy/utils_const.py +5 -5
  101. mindspore/ops/_grad_experimental/grad_array_ops.py +1 -1
  102. mindspore/ops/_grad_experimental/grad_implementations.py +2 -2
  103. mindspore/ops/_grad_experimental/grad_math_ops.py +19 -18
  104. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  105. mindspore/ops/_op_impl/aicpu/add.py +3 -3
  106. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +21 -2
  107. mindspore/ops/_utils/utils.py +2 -0
  108. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  109. mindspore/ops/composite/multitype_ops/getitem_impl.py +2 -2
  110. mindspore/ops/function/array_func.py +10 -7
  111. mindspore/ops/function/grad/grad_func.py +0 -1
  112. mindspore/ops/function/nn_func.py +98 -9
  113. mindspore/ops/function/random_func.py +2 -1
  114. mindspore/ops/op_info_register.py +24 -21
  115. mindspore/ops/operations/__init__.py +6 -2
  116. mindspore/ops/operations/_grad_ops.py +25 -6
  117. mindspore/ops/operations/_inner_ops.py +155 -23
  118. mindspore/ops/operations/array_ops.py +9 -7
  119. mindspore/ops/operations/comm_ops.py +2 -2
  120. mindspore/ops/operations/custom_ops.py +85 -68
  121. mindspore/ops/operations/inner_ops.py +26 -3
  122. mindspore/ops/operations/math_ops.py +7 -6
  123. mindspore/ops/operations/nn_ops.py +193 -49
  124. mindspore/parallel/_parallel_serialization.py +10 -3
  125. mindspore/parallel/_tensor.py +4 -1
  126. mindspore/parallel/checkpoint_transform.py +13 -2
  127. mindspore/parallel/shard.py +17 -10
  128. mindspore/profiler/common/util.py +1 -0
  129. mindspore/profiler/parser/ascend_hccl_generator.py +232 -0
  130. mindspore/profiler/parser/ascend_msprof_exporter.py +86 -43
  131. mindspore/profiler/parser/ascend_msprof_generator.py +196 -9
  132. mindspore/profiler/parser/ascend_op_generator.py +1 -1
  133. mindspore/profiler/parser/ascend_timeline_generator.py +6 -182
  134. mindspore/profiler/parser/base_timeline_generator.py +1 -1
  135. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -2
  136. mindspore/profiler/parser/framework_parser.py +1 -1
  137. mindspore/profiler/parser/profiler_info.py +19 -0
  138. mindspore/profiler/profiling.py +46 -24
  139. mindspore/rewrite/api/pattern_engine.py +1 -1
  140. mindspore/rewrite/parsers/for_parser.py +7 -7
  141. mindspore/rewrite/parsers/module_parser.py +4 -4
  142. mindspore/rewrite/symbol_tree.py +1 -4
  143. mindspore/run_check/_check_version.py +5 -3
  144. mindspore/safeguard/rewrite_obfuscation.py +52 -28
  145. mindspore/scipy/ops.py +55 -5
  146. mindspore/scipy/optimize/__init__.py +3 -2
  147. mindspore/scipy/optimize/linear_sum_assignment.py +38 -33
  148. mindspore/train/callback/_summary_collector.py +1 -1
  149. mindspore/train/dataset_helper.py +1 -0
  150. mindspore/train/model.py +2 -2
  151. mindspore/train/serialization.py +97 -11
  152. mindspore/train/summary/_summary_adapter.py +1 -1
  153. mindspore/train/summary/summary_record.py +23 -7
  154. mindspore/version.py +1 -1
  155. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/METADATA +3 -2
  156. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/RECORD +160 -151
  157. mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +0 -406
  158. mindspore/ops/_op_impl/_custom_op/flash_attention/constants.py +0 -41
  159. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +0 -467
  160. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +0 -563
  161. mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +0 -193
  162. mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +0 -435
  163. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
  164. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +0 -45
  165. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +0 -67
  166. mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +0 -62
  167. /mindspore/{ops/_op_impl/_custom_op/flash_attention → _akg/akg/utils/ascend_profilier}/__init__.py +0 -0
  168. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/WHEEL +0 -0
  169. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/entry_points.txt +0 -0
  170. {mindspore-2.2.0.dist-info → mindspore-2.2.11.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2021 Huawei Technologies Co., Ltd
1
+ # Copyright 2021-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -25,6 +25,20 @@ from tbe.common.buildcfg import build_config
25
25
  from tbe.dsl import auto_schedule
26
26
  from tbe.dsl import build as tbe_build
27
27
  import tbe.common.context.op_context as op_context
28
+ from impl.dynamic.add import _add_check_format, _infer_shape
29
+
30
+ SHAPE = "shape"
31
+ FORMAT = "format"
32
+ DATA_TYPE = "data_type"
33
+ NEW_SHAPE = "new_shape"
34
+ ORI_SHAPE = "ori_shape"
35
+ ORI_FORMAT = "ori_format"
36
+ DST_TYPE = "dst_type"
37
+ DST_ORI_SHAPE = "dst_ori_shape"
38
+ INPUT_DESC = "input_desc"
39
+ OUTPUT_DESC = "output_desc"
40
+ ENABLE_VECTOR_2X = "enable_vector_2x"
41
+ ENABLE_GROUP_INPLACE = "enable_group_inplace"
28
42
 
29
43
 
30
44
  def initialize(kernel_meta_parent_dir):
@@ -49,14 +63,14 @@ def update_config(config, op_names):
49
63
  change_type_dict = {"MatMul": (True, False),
50
64
  "BatchMatMul": (True, False)}
51
65
  config["bool_storage_as_1bit"] = True
52
- config["enable_group_inplace"] = False
53
- config["enable_vector_2x"] = True
66
+ config[ENABLE_GROUP_INPLACE] = False
67
+ config[ENABLE_VECTOR_2X] = True
54
68
  for op in op_names:
55
69
  if op in bool_storage_as_1bit_oplist:
56
70
  config["bool_storage_as_1bit"] = False
57
71
  enable_group_inplace, enable_vector_2x = change_type_dict.get(op, (False, True))
58
- config["enable_group_inplace"] = config["enable_group_inplace"] or enable_group_inplace
59
- config["enable_vector_2x"] = config["enable_vector_2x"] and enable_vector_2x
72
+ config[ENABLE_GROUP_INPLACE] = config[ENABLE_GROUP_INPLACE] or enable_group_inplace
73
+ config[ENABLE_VECTOR_2X] = config[ENABLE_VECTOR_2X] and enable_vector_2x
60
74
 
61
75
 
62
76
  def add_new_shape(names, shapes, new_shapes, inputs):
@@ -70,11 +84,11 @@ def add_new_shape(names, shapes, new_shapes, inputs):
70
84
  continue
71
85
  if name not in inputs:
72
86
  raise RuntimeError("Can not support reshape on output tensor {}".format(name))
73
- if "new_shape" not in inputs[name]:
74
- inputs[name]["new_shape"] = new_shapes[i]
75
- elif new_shapes[i] != inputs[name]["new_shape"]:
87
+ if NEW_SHAPE not in inputs[name]:
88
+ inputs[name][NEW_SHAPE] = new_shapes[i]
89
+ elif new_shapes[i] != inputs[name][NEW_SHAPE]:
76
90
  raise RuntimeError("Find different new_shape {} and {} for {}"
77
- .format(inputs[name]["new_shape"], new_shapes[i], name))
91
+ .format(inputs[name][NEW_SHAPE], new_shapes[i], name))
78
92
 
79
93
 
80
94
  class TransShape:
@@ -93,22 +107,21 @@ class TransShape:
93
107
  if v.get("value") is not None:
94
108
  continue
95
109
  names.append(k)
96
- shapes.append(v["shape"])
97
- ori_shapes.append(v["ori_shape"] if v.get("ori_shape") else None)
98
- formats.append(v["format"])
99
- ori_formats.append(v["ori_format"])
110
+ shapes.append(v[SHAPE])
111
+ ori_shapes.append(v[ORI_SHAPE] if v.get(ORI_SHAPE) else None)
112
+ formats.append(v[FORMAT])
113
+ ori_formats.append(v[ORI_FORMAT])
100
114
  if len(shapes) == 2 and len(shapes[0]) != len(shapes[1]):
101
- from impl.dynamic.add import _add_check_format, _infer_shape
102
- format_pattern = _add_check_format({"shape": shapes[0], "format": formats[0]},
103
- {"shape": shapes[1], "format": formats[1]})
115
+ format_pattern = _add_check_format({SHAPE: shapes[0], FORMAT: formats[0]},
116
+ {SHAPE: shapes[1], FORMAT: formats[1]})
104
117
  ori_shape0 = ori_shapes[0] if ori_shapes[0] is not None else infer_ori_shape(
105
118
  shapes[0], formats[0], ori_formats[0])
106
119
  ori_shape1 = ori_shapes[1] if ori_shapes[1] is not None else infer_ori_shape(
107
120
  shapes[1], formats[1], ori_formats[1])
108
121
  new_shapes = [None, None]
109
122
  new_shapes[0], new_shapes[1] = _infer_shape(format_pattern,
110
- {"shape": shapes[0], "ori_shape": ori_shape0},
111
- {"shape": shapes[1], "ori_shape": ori_shape1})
123
+ {SHAPE: shapes[0], ORI_SHAPE: ori_shape0},
124
+ {SHAPE: shapes[1], ORI_SHAPE: ori_shape1})
112
125
  new_shapes[0], new_shapes[1], _ = shape_util.broadcast_shapes(new_shapes[0], new_shapes[1],
113
126
  param_name_input1="input0",
114
127
  param_name_input2="input1")
@@ -119,7 +132,7 @@ class TransShape:
119
132
  """deal with batch_matmul."""
120
133
  for k, v in op_inputs.items():
121
134
  # batch dimension of BatchMatMul must be fused to 1D
122
- shape = v["shape"]
135
+ shape = v[SHAPE]
123
136
  if len(shape) > 5:
124
137
  new_shape = [functools.reduce(lambda x, y: x * y, shape[:-4])] + shape[-4:]
125
138
  add_new_shape(k, shape, new_shape, inputs)
@@ -135,6 +148,10 @@ class TransShape:
135
148
 
136
149
  def infer_ori_shape(shape, cur_format, ori_format):
137
150
  """Given current format and shape, infer the shape with ori_format."""
151
+
152
+ def _shape_error(current_shape, current_format):
153
+ raise ValueError("Invalid shape {} for format {}".format(current_shape, current_format))
154
+
138
155
  if cur_format == ori_format:
139
156
  return shape
140
157
  default_formats = ["DefaultFormat", "ND", "NCHW"]
@@ -145,7 +162,7 @@ def infer_ori_shape(shape, cur_format, ori_format):
145
162
  if cur_format == "FRACTAL_NZ" and ori_format in default_formats:
146
163
  dims = len(shape)
147
164
  if dims < 4:
148
- raise ValueError("Invalid shape {} for format {}".format(shape, cur_format))
165
+ _shape_error(shape, cur_format)
149
166
  ori_shape = shape[:dims - 4]
150
167
  m = shape[-3] * shape[-2]
151
168
  n = shape[-4] * shape[-1]
@@ -155,13 +172,13 @@ def infer_ori_shape(shape, cur_format, ori_format):
155
172
 
156
173
  if cur_format == "NC1HWC0" and ori_format in default_formats:
157
174
  if len(shape) != 5:
158
- raise ValueError("Invalid shape {} for format {}".format(shape, cur_format))
175
+ _shape_error(shape, cur_format)
159
176
  ori_shape = [shape[0], shape[1] * shape[4], shape[2], shape[3]]
160
177
  return ori_shape
161
178
 
162
179
  if cur_format == "NHWC" and ori_format in default_formats:
163
180
  if len(shape) != 4:
164
- raise ValueError("Invalid shape {} for format {}".format(shape, cur_format))
181
+ _shape_error(shape, cur_format)
165
182
  ori_shape = [shape[0], shape[3], shape[1], shape[2]]
166
183
  return ori_shape
167
184
 
@@ -202,7 +219,7 @@ def get_input_desc(input_desc):
202
219
  res = {}
203
220
  for desc in input_desc:
204
221
  for item in desc:
205
- item["shape"] = [1] if not item["shape"] else item["shape"]
222
+ item[SHAPE] = [1] if not item[SHAPE] else item[SHAPE]
206
223
  res[item["tensor_name"]] = item
207
224
  return res
208
225
 
@@ -215,7 +232,7 @@ def get_inputs_tensor(input_desc, all_tensors):
215
232
  name = item["tensor_name"]
216
233
  if item.get("value") is not None:
217
234
  # const value
218
- all_tensors[name] = tvm.const(item["value"], item["data_type"])
235
+ all_tensors[name] = tvm.const(item["value"], item[DATA_TYPE])
219
236
  if all_tensors.get(name) is None:
220
237
  raise ValueError("Tensor [{}] not found.".format(name))
221
238
  inputs.append(all_tensors[name])
@@ -237,17 +254,17 @@ def get_op_attrs(op, fusion_op_name):
237
254
  op_name = op["name"]
238
255
  op_attrs = get_attr_dict(op.get("attr"))
239
256
  if op_name == "BatchMatMul":
240
- op_attrs["dst_type"] = op["output_desc"][0]["data_type"]
241
- op_attrs["dst_ori_shape"] = op["output_desc"][0].get("ori_shape")
242
- if op_attrs.get("dst_ori_shape") is None:
243
- op_attrs["dst_ori_shape"] = infer_ori_shape(op["output_desc"][0]["shape"],
244
- op["output_desc"][0]["format"],
245
- op["output_desc"][0]["ori_format"])
257
+ op_attrs[DST_TYPE] = op[OUTPUT_DESC][0][DATA_TYPE]
258
+ op_attrs[DST_ORI_SHAPE] = op[OUTPUT_DESC][0].get(ORI_SHAPE)
259
+ if op_attrs.get(DST_ORI_SHAPE) is None:
260
+ op_attrs[DST_ORI_SHAPE] = infer_ori_shape(op[OUTPUT_DESC][0][SHAPE],
261
+ op[OUTPUT_DESC][0][FORMAT],
262
+ op[OUTPUT_DESC][0][ORI_FORMAT])
246
263
  elif op_name == "MatMul":
247
- op_attrs["dst_type"] = op["output_desc"][0]["data_type"]
248
- op_attrs["dst_format"] = op["output_desc"][0]["format"]
264
+ op_attrs[DST_TYPE] = op[OUTPUT_DESC][0][DATA_TYPE]
265
+ op_attrs["dst_format"] = op[OUTPUT_DESC][0][FORMAT]
249
266
  elif op_name == "Cast":
250
- op_attrs["dst_type"] = op["output_desc"][0]["data_type"]
267
+ op_attrs[DST_TYPE] = op[OUTPUT_DESC][0][DATA_TYPE]
251
268
  op_attrs["fusion_op_name"] = fusion_op_name
252
269
  return op_attrs
253
270
 
@@ -256,17 +273,17 @@ def create_placeholders(inputs):
256
273
  """Create placeholders."""
257
274
  tensors = {}
258
275
  for k, v in inputs.items():
259
- dtype = v["data_type"]
276
+ dtype = v[DATA_TYPE]
260
277
  if dtype == "bool":
261
278
  dtype = "int8"
262
- shape = v["shape"]
263
- if "new_shape" in v:
264
- shape = v["new_shape"]
279
+ shape = v[SHAPE]
280
+ if NEW_SHAPE in v:
281
+ shape = v[NEW_SHAPE]
265
282
  attr = {
266
- "format": v.get("format"),
283
+ FORMAT: v.get(FORMAT),
267
284
  "sub_format": v.get("sub_format", ""),
268
- "ori_shape": v.get("ori_shape"),
269
- "ori_format": v.get("ori_format"),
285
+ ORI_SHAPE: v.get(ORI_SHAPE),
286
+ ORI_FORMAT: v.get(ORI_FORMAT),
270
287
  "addr_type": v.get("addr_type", 0),
271
288
  "valid_shape": v.get("valid_shape", []),
272
289
  "slice_offset": v.get("slice_offset", []),
@@ -276,8 +293,8 @@ def create_placeholders(inputs):
276
293
  "L1_valid_size": v.get("L1_valid_size", -1),
277
294
  "range": v.get("range", [])
278
295
  }
279
- if attr.get("ori_shape") is None:
280
- attr["ori_shape"] = infer_ori_shape(v.get("shape"), v.get("format"), attr.get("ori_format"))
296
+ if attr.get(ORI_SHAPE) is None:
297
+ attr[ORI_SHAPE] = infer_ori_shape(v.get(SHAPE), v.get(FORMAT), attr.get(ORI_FORMAT))
281
298
  tensors[k] = tvm.placeholder(shape=shape, name=k, dtype=dtype, attrs=attr)
282
299
  return tensors
283
300
 
@@ -289,8 +306,8 @@ def same_shape(inputs):
289
306
  base_shape = -1
290
307
  for _, v in inputs.items():
291
308
  if base_shape == -1:
292
- base_shape = v["shape"]
293
- if v["shape"] != base_shape:
309
+ base_shape = v[SHAPE]
310
+ if v[SHAPE] != base_shape:
294
311
  return False
295
312
  return True
296
313
 
@@ -298,17 +315,17 @@ def same_shape(inputs):
298
315
  def create_input_tensors(json_dict):
299
316
  """Create input placeholders."""
300
317
  fold_dim = True
301
- inputs = get_input_desc(json_dict.get("input_desc", []))
318
+ inputs = get_input_desc(json_dict.get(INPUT_DESC, []))
302
319
  for op in json_dict["op_desc"]:
303
320
  op_name = op["name"]
304
321
  pattern = get_op_reg_info(op_name, "pattern")
305
- op_inputs = get_input_desc(op.get("input_desc", []))
322
+ op_inputs = get_input_desc(op.get(INPUT_DESC, []))
306
323
  TransShape.run(op_name, pattern, op_inputs, inputs)
307
324
  if pattern != OpPattern.ELEMWISE or not same_shape(op_inputs):
308
325
  fold_dim = False
309
326
  if fold_dim:
310
327
  for k, v in inputs.items():
311
- shape = v["shape"]
328
+ shape = v[SHAPE]
312
329
  new_shape = [functools.reduce(lambda x, y: x * y, shape[:])]
313
330
  add_new_shape(k, shape, new_shape, inputs)
314
331
  return create_placeholders(inputs)
@@ -324,28 +341,28 @@ def create_fusion_op_name(op_names):
324
341
 
325
342
 
326
343
  def update_format(json_dict):
327
- """Some format like DefaultFormat is not recognized in TBE, need to covert these formats."""
344
+ """Some format like DefaultFormat is not recognized in TBE, need to convert these formats."""
328
345
 
329
346
  def _update_input_format(input_desc):
330
347
  for desc in input_desc:
331
348
  for item in desc:
332
- if item["format"] == "DefaultFormat":
333
- item["format"] = "ND"
334
- if item.get("ori_format") is None or item["ori_format"] == "DefaultFormat":
335
- item["ori_format"] = "NCHW"
349
+ if item[FORMAT] == "DefaultFormat":
350
+ item[FORMAT] = "ND"
351
+ if item.get(ORI_FORMAT) is None or item[ORI_FORMAT] == "DefaultFormat":
352
+ item[ORI_FORMAT] = "NCHW"
336
353
 
337
354
  def _update_output_format(output_desc):
338
355
  for item in output_desc:
339
- if item["format"] == "DefaultFormat":
340
- item["format"] = "ND"
341
- if item.get("ori_format") is None or item["ori_format"] == "DefaultFormat":
342
- item["ori_format"] = "NCHW"
356
+ if item[FORMAT] == "DefaultFormat":
357
+ item[FORMAT] = "ND"
358
+ if item.get(ORI_FORMAT) is None or item[ORI_FORMAT] == "DefaultFormat":
359
+ item[ORI_FORMAT] = "NCHW"
343
360
 
344
- _update_input_format(json_dict.get("input_desc", []))
345
- _update_output_format(json_dict["output_desc"])
361
+ _update_input_format(json_dict.get(INPUT_DESC, []))
362
+ _update_output_format(json_dict[OUTPUT_DESC])
346
363
  for op in json_dict["op_desc"]:
347
- _update_input_format(op.get("input_desc", []))
348
- _update_output_format(op["output_desc"])
364
+ _update_input_format(op.get(INPUT_DESC, []))
365
+ _update_output_format(op[OUTPUT_DESC])
349
366
 
350
367
 
351
368
  def gen_args_remap(orig_inputs_name, orig_outputs_name, inputs_name, outputs_name, inplace_names):
@@ -441,8 +458,8 @@ def build(json_str, kernel_meta_parent_dir):
441
458
  """Build kernel."""
442
459
  json_dict = json.loads(json_str)
443
460
  update_format(json_dict)
444
- inputs_name = get_inputs_name(json_dict.get("input_desc", []))
445
- outputs_name, inplace_names = get_outputs_info(json_dict["output_desc"])
461
+ inputs_name = get_inputs_name(json_dict.get(INPUT_DESC, []))
462
+ outputs_name, inplace_names = get_outputs_info(json_dict[OUTPUT_DESC])
446
463
  op_names = get_all_op_name(json_dict["op_desc"])
447
464
  fusion_op_name = create_fusion_op_name(op_names)
448
465
 
@@ -458,7 +475,7 @@ def build(json_str, kernel_meta_parent_dir):
458
475
  for op in json_dict["op_desc"]:
459
476
  op_name = op["name"]
460
477
  # get op input tensor
461
- op_inputs = get_inputs_tensor(op.get("input_desc", []), all_tensors)
478
+ op_inputs = get_inputs_tensor(op.get(INPUT_DESC, []), all_tensors)
462
479
  # get op attrs
463
480
  op_attrs = get_op_attrs(op, fusion_op_name)
464
481
  # op compute
@@ -466,10 +483,10 @@ def build(json_str, kernel_meta_parent_dir):
466
483
  # update op output tensor
467
484
  if not isinstance(op_outputs, (list, tuple)):
468
485
  op_outputs = [op_outputs]
469
- if len(op["output_desc"]) != len(op_outputs):
486
+ if len(op[OUTPUT_DESC]) != len(op_outputs):
470
487
  raise ValueError("len(op[\"output_desc\"] is not equal to the number of real output tensors in op[{}]: "
471
- "{} vs {}".format(op_name, len(op["output_desc"]), len(op_outputs)))
472
- for i, desc in enumerate(op["output_desc"]):
488
+ "{} vs {}".format(op_name, len(op[OUTPUT_DESC]), len(op_outputs)))
489
+ for i, desc in enumerate(op[OUTPUT_DESC]):
473
490
  all_tensors[desc["tensor_name"]] = op_outputs[i]
474
491
 
475
492
  # Collect input, output tensors
@@ -310,8 +310,8 @@ def _log(x, attrs=None):
310
310
  if base <= 0 and not math.isclose(base, -1.0, rel_tol=1e-8, abs_tol=0.0):
311
311
  raise ValueError("base must be strictly positive or -1, but got {}".format(base))
312
312
  from impl.log import log_compute
313
- outputDesc = {"dtype": x.dtype, "shape": x.shape}
314
- return log_compute(x, outputDesc, base, scale, shift, kernel_name=attrs["fusion_op_name"])
313
+ output_desc = {"dtype": x.dtype, "shape": x.shape}
314
+ return log_compute(x, output_desc, base, scale, shift, kernel_name=attrs["fusion_op_name"])
315
315
 
316
316
 
317
317
  @reg_op("Maximum", pattern=OpPattern.ELEMWISE)
@@ -350,8 +350,8 @@ def _mul(x0, x1, attrs=None):
350
350
  return tbe.dsl.vmuls(x1, x0)
351
351
  x0, x1 = _broadcast(x0, x1)
352
352
  from impl.mul import mul_compute
353
- outputDesc = {"dtype": x0.dtype, "shape": x0.shape}
354
- return mul_compute(x0, x1, outputDesc, kernel_name=attrs["fusion_op_name"])
353
+ output_desc = {"dtype": x0.dtype, "shape": x0.shape}
354
+ return mul_compute(x0, x1, output_desc, kernel_name=attrs["fusion_op_name"])
355
355
 
356
356
 
357
357
  @reg_op("Neg", pattern=OpPattern.ELEMWISE)
@@ -1,4 +1,4 @@
1
- # Copyright 2022 Huawei Technologies Co., Ltd
1
+ # Copyright 2022-2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -19,6 +19,9 @@ import shutil
19
19
  from mindspore import log as logger
20
20
  from mindspore._extends.parallel_compile.akg_compiler.tbe_topi import get_op_reg_info
21
21
 
22
+ O_SUFFIX = ".o"
23
+ JSON_SUFFIX = ".json"
24
+
22
25
 
23
26
  def update_attr(attr, new_attr):
24
27
  """Update new_attr to attr."""
@@ -111,8 +114,8 @@ def select_best(src_dirs, dst_dir, op_name):
111
114
  max_block_dim = 1
112
115
  max_block_dim_idx = -1
113
116
  for i, src_dir in enumerate(src_dirs):
114
- o_path = os.path.join(src_dir, op_name + ".o")
115
- json_path = os.path.join(src_dir, op_name + ".json")
117
+ o_path = os.path.join(src_dir, op_name + O_SUFFIX)
118
+ json_path = os.path.join(src_dir, op_name + JSON_SUFFIX)
116
119
  if os.path.isfile(o_path) and os.path.isfile(json_path):
117
120
  with open(json_path, 'r') as f:
118
121
  json_str = f.read()
@@ -121,10 +124,10 @@ def select_best(src_dirs, dst_dir, op_name):
121
124
  max_block_dim_idx = i
122
125
  max_block_dim = json_dict["blockDim"]
123
126
  if max_block_dim_idx >= 0:
124
- o_path = os.path.join(src_dirs[max_block_dim_idx], op_name + ".o")
125
- json_path = os.path.join(src_dirs[max_block_dim_idx], op_name + ".json")
126
- _copy_file(o_path, os.path.join(dst_dir, op_name + ".o"))
127
- _copy_file(json_path, os.path.join(dst_dir, op_name + ".json"))
127
+ o_path = os.path.join(src_dirs[max_block_dim_idx], op_name + O_SUFFIX)
128
+ json_path = os.path.join(src_dirs[max_block_dim_idx], op_name + JSON_SUFFIX)
129
+ _copy_file(o_path, os.path.join(dst_dir, op_name + O_SUFFIX))
130
+ _copy_file(json_path, os.path.join(dst_dir, op_name + JSON_SUFFIX))
128
131
  logger.info("{}, best compile result dir: {}".format(op_name, src_dirs[max_block_dim_idx]))
129
132
  return True
130
133
  logger.info("{}, best compile result dir not found".format(op_name))
@@ -65,7 +65,8 @@ class LocalLock:
65
65
 
66
66
  def __init__(self, lock_file):
67
67
  if not os.path.exists(lock_file):
68
- write_to_file(lock_file)
68
+ if not write_to_file(lock_file):
69
+ raise IOError("write_to_file failed.")
69
70
  self.lock_fd = os.open(lock_file, os.O_WRONLY | os.O_CREAT, stat.S_IWUSR | stat.S_IRUSR)
70
71
 
71
72
  def __del__(self):
@@ -28,7 +28,8 @@ from .parser import (Parser, create_instance, is_supported_create_instance_type,
28
28
  convert_class_to_function, convert_cell_list_to_sequence, is_cell_list, get_obj_from_sequence,
29
29
  get_type, is_class_member_recursive, get_global_params, get_adapter_tensor_attr,
30
30
  get_obj_defined_from_obj_type, is_from_third_party_library, get_const_abs, get_const_round,
31
- get_const_len, is_adapter_tensor_class, is_adapter_parameter_class, convert_to_namedtuple)
31
+ get_const_len, is_adapter_tensor_class, is_adapter_parameter_class, convert_to_namedtuple,
32
+ is_module_list)
32
33
 
33
34
  __all__ = ['Parser', 'create_instance', 'is_supported_create_instance_type', 'generate_scope', 'get_attr_from_object',
34
35
  'get_bprop_method_of_class', 'get_class_instance_type', 'get_class_member_namespace_symbol',
@@ -41,4 +42,4 @@ __all__ = ['Parser', 'create_instance', 'is_supported_create_instance_type', 'ge
41
42
  'convert_class_to_function', 'convert_cell_list_to_sequence', 'is_cell_list', 'get_obj_from_sequence',
42
43
  'get_type', 'is_class_member_recursive', 'get_adapter_tensor_attr', 'get_obj_defined_from_obj_type',
43
44
  'is_from_third_party_library', 'get_const_abs', 'get_const_round', 'get_const_len',
44
- 'is_adapter_tensor_class', 'is_adapter_parameter_class', 'convert_to_namedtuple',]
45
+ 'is_adapter_tensor_class', 'is_adapter_parameter_class', 'convert_to_namedtuple', 'is_module_list']
@@ -493,9 +493,14 @@ def is_cell_list(obj):
493
493
  return isinstance(obj, nn.CellList)
494
494
 
495
495
 
496
+ def is_module_list(obj):
497
+ """Check if obj is nn.ModuleList"""
498
+ return hasattr(obj, "__cell_as_list__") and not isinstance(obj, nn.CellList)
499
+
500
+
496
501
  def convert_cell_list_to_sequence(obj):
497
502
  """Convert nn.CellList to sequence."""
498
- if not isinstance(obj, nn.CellList):
503
+ if not hasattr(obj, "__cell_as_list__"):
499
504
  raise TypeError(f"Obj should be nn.CellList, but got {obj}")
500
505
  if not hasattr(obj, "_cells"):
501
506
  raise AttributeError(f"nn.CellList is missing _cells property.")
@@ -2349,12 +2349,22 @@ def ms_iter(xs):
2349
2349
 
2350
2350
  def ms_next(it):
2351
2351
  """Implementation of `next`."""
2352
- return it.__ms_next__()
2352
+ return it.__ms_next__
2353
2353
 
2354
2354
 
2355
2355
  def hasnext(it):
2356
2356
  """Implementation of `hasnext`."""
2357
- return it.__ms_hasnext__()
2357
+ return it.__ms_hasnext__
2358
+
2359
+
2360
+ def str_next(xs):
2361
+ """Next string."""
2362
+ return xs[0], xs[1:]
2363
+
2364
+
2365
+ def str_hasnext(xs):
2366
+ """Whether the string is empty or not."""
2367
+ return len(xs) > 0
2358
2368
 
2359
2369
 
2360
2370
  @constexpr
@@ -2819,6 +2829,8 @@ def enumerate_(x, start=0):
2819
2829
  x_type = F.typeof(x)
2820
2830
  ret = ()
2821
2831
  op_name = "enumerate"
2832
+ if isinstance(x, (int, float, bool)):
2833
+ raise TypeError(f"For 'enumerate', the 'first input' should be tuple or list or tensor, but got {type(x)}.")
2822
2834
  if check_is_const_int(start, op_name, "start"):
2823
2835
  if check_is_tensor(x_type):
2824
2836
  for i in range(x.shape[0]):
@@ -3254,15 +3266,6 @@ def check_is_tensor(x):
3254
3266
  return False
3255
3267
 
3256
3268
 
3257
- @constexpr
3258
- def check_is_tuple_or_list_or_tensor(x, op_name, arg_name):
3259
- """check whether x is list or tuple or tensor."""
3260
- if isinstance(x, (mstype.List, mstype.Tuple, mstype.TensorType)):
3261
- return True
3262
- raise TypeError(
3263
- f"For '{op_name}', the '{arg_name}' should be tuple or list or tensor, but got {x}.")
3264
-
3265
-
3266
3269
  def check_is_const_int(x, op_name, arg_name):
3267
3270
  """check whether x is const int."""
3268
3271
  if x is None:
@@ -15,7 +15,8 @@
15
15
  """kernel build server"""
16
16
  import os
17
17
  from mindspore import log as logger
18
- from mindspore._extends.parallel_compile.akg_compiler.akg_process import create_akg_parallel_process, create_akg_v2_parallel_process
18
+ from mindspore._extends.parallel_compile.akg_compiler.akg_process import create_akg_parallel_process, \
19
+ create_akg_v2_parallel_process
19
20
 
20
21
 
21
22
  class Messager:
mindspore/bin/cache_admin CHANGED
Binary file
Binary file
@@ -116,3 +116,19 @@ def load_lib(lib_path):
116
116
  logger.warning(f'Loading {lib_path} lib error.')
117
117
  return False
118
118
  return True
119
+
120
+
121
+ def _jit_fallback_next_func(xs):
122
+ """Generate ms_next for xs"""
123
+ if hasattr(xs, "__next__"):
124
+ # Convert an iterator to tuple first.
125
+ xs = tuple(xs)
126
+ return xs[0], xs[1:]
127
+
128
+
129
+ def _jit_fallback_has_next_func(xs):
130
+ """Determine whether xs has next value"""
131
+ if hasattr(xs, "__next__"):
132
+ # Convert an iterator to tuple first.
133
+ xs = tuple(xs)
134
+ return len(xs) > 0
mindspore/common/api.py CHANGED
@@ -1060,7 +1060,7 @@ def _build_broadcast_graph(broadcast_params_dict, broadcast_phase):
1060
1060
  _broadcast_net.phase = broadcast_phase
1061
1061
  broadcasted_params = _broadcast_net()
1062
1062
  for param_name, param in zip(broadcast_params_dict.keys(), broadcasted_params):
1063
- broadcast_params_dict[param_name].set_data(param)
1063
+ broadcast_params_dict.get(param_name).set_data(param)
1064
1064
 
1065
1065
 
1066
1066
  def _get_auto_split_param_names(parameter_layout_dict):