warp-lang 0.9.0__py3-none-win_amd64.whl → 0.11.0__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (315) hide show
  1. warp/__init__.py +15 -7
  2. warp/__init__.pyi +1 -0
  3. warp/bin/warp-clang.dll +0 -0
  4. warp/bin/warp.dll +0 -0
  5. warp/build.py +22 -443
  6. warp/build_dll.py +384 -0
  7. warp/builtins.py +998 -488
  8. warp/codegen.py +1307 -739
  9. warp/config.py +5 -3
  10. warp/constants.py +6 -0
  11. warp/context.py +1291 -548
  12. warp/dlpack.py +31 -31
  13. warp/fabric.py +326 -0
  14. warp/fem/__init__.py +27 -0
  15. warp/fem/cache.py +389 -0
  16. warp/fem/dirichlet.py +181 -0
  17. warp/fem/domain.py +263 -0
  18. warp/fem/field/__init__.py +101 -0
  19. warp/fem/field/field.py +149 -0
  20. warp/fem/field/nodal_field.py +299 -0
  21. warp/fem/field/restriction.py +21 -0
  22. warp/fem/field/test.py +181 -0
  23. warp/fem/field/trial.py +183 -0
  24. warp/fem/geometry/__init__.py +19 -0
  25. warp/fem/geometry/closest_point.py +70 -0
  26. warp/fem/geometry/deformed_geometry.py +271 -0
  27. warp/fem/geometry/element.py +744 -0
  28. warp/fem/geometry/geometry.py +186 -0
  29. warp/fem/geometry/grid_2d.py +373 -0
  30. warp/fem/geometry/grid_3d.py +435 -0
  31. warp/fem/geometry/hexmesh.py +953 -0
  32. warp/fem/geometry/partition.py +376 -0
  33. warp/fem/geometry/quadmesh_2d.py +532 -0
  34. warp/fem/geometry/tetmesh.py +840 -0
  35. warp/fem/geometry/trimesh_2d.py +577 -0
  36. warp/fem/integrate.py +1616 -0
  37. warp/fem/operator.py +191 -0
  38. warp/fem/polynomial.py +213 -0
  39. warp/fem/quadrature/__init__.py +2 -0
  40. warp/fem/quadrature/pic_quadrature.py +245 -0
  41. warp/fem/quadrature/quadrature.py +294 -0
  42. warp/fem/space/__init__.py +292 -0
  43. warp/fem/space/basis_space.py +489 -0
  44. warp/fem/space/collocated_function_space.py +105 -0
  45. warp/fem/space/dof_mapper.py +236 -0
  46. warp/fem/space/function_space.py +145 -0
  47. warp/fem/space/grid_2d_function_space.py +267 -0
  48. warp/fem/space/grid_3d_function_space.py +306 -0
  49. warp/fem/space/hexmesh_function_space.py +352 -0
  50. warp/fem/space/partition.py +350 -0
  51. warp/fem/space/quadmesh_2d_function_space.py +369 -0
  52. warp/fem/space/restriction.py +160 -0
  53. warp/fem/space/shape/__init__.py +15 -0
  54. warp/fem/space/shape/cube_shape_function.py +738 -0
  55. warp/fem/space/shape/shape_function.py +103 -0
  56. warp/fem/space/shape/square_shape_function.py +611 -0
  57. warp/fem/space/shape/tet_shape_function.py +567 -0
  58. warp/fem/space/shape/triangle_shape_function.py +429 -0
  59. warp/fem/space/tetmesh_function_space.py +292 -0
  60. warp/fem/space/topology.py +295 -0
  61. warp/fem/space/trimesh_2d_function_space.py +221 -0
  62. warp/fem/types.py +77 -0
  63. warp/fem/utils.py +495 -0
  64. warp/native/array.h +164 -55
  65. warp/native/builtin.h +150 -174
  66. warp/native/bvh.cpp +75 -328
  67. warp/native/bvh.cu +406 -23
  68. warp/native/bvh.h +37 -45
  69. warp/native/clang/clang.cpp +136 -24
  70. warp/native/crt.cpp +1 -76
  71. warp/native/crt.h +111 -104
  72. warp/native/cuda_crt.h +1049 -0
  73. warp/native/cuda_util.cpp +15 -3
  74. warp/native/cuda_util.h +3 -1
  75. warp/native/cutlass/tools/library/scripts/conv2d_operation.py +463 -0
  76. warp/native/cutlass/tools/library/scripts/conv3d_operation.py +321 -0
  77. warp/native/cutlass/tools/library/scripts/gemm_operation.py +988 -0
  78. warp/native/cutlass/tools/library/scripts/generator.py +4625 -0
  79. warp/native/cutlass/tools/library/scripts/library.py +799 -0
  80. warp/native/cutlass/tools/library/scripts/manifest.py +402 -0
  81. warp/native/cutlass/tools/library/scripts/pycutlass/docs/source/conf.py +96 -0
  82. warp/native/cutlass/tools/library/scripts/pycutlass/profile/conv/conv2d_f16_sm80.py +106 -0
  83. warp/native/cutlass/tools/library/scripts/pycutlass/profile/gemm/gemm_f32_sm80.py +91 -0
  84. warp/native/cutlass/tools/library/scripts/pycutlass/setup.py +80 -0
  85. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/__init__.py +48 -0
  86. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/arguments.py +118 -0
  87. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/c_types.py +241 -0
  88. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/compiler.py +432 -0
  89. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/conv2d_operation.py +631 -0
  90. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/epilogue.py +1026 -0
  91. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/frontend.py +104 -0
  92. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/gemm_operation.py +1276 -0
  93. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/library.py +744 -0
  94. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/memory_manager.py +74 -0
  95. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/operation.py +110 -0
  96. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/parser.py +619 -0
  97. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/reduction_operation.py +398 -0
  98. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/tensor_ref.py +70 -0
  99. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/test/__init__.py +4 -0
  100. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/test/conv2d_testbed.py +646 -0
  101. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/test/gemm_grouped_testbed.py +235 -0
  102. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/test/gemm_testbed.py +557 -0
  103. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/test/profiler.py +70 -0
  104. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/type_hint.py +39 -0
  105. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/utils/__init__.py +1 -0
  106. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/utils/device.py +76 -0
  107. warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/utils/reference_model.py +255 -0
  108. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/__init__.py +0 -0
  109. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.py +201 -0
  110. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py +177 -0
  111. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_dgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.py +98 -0
  112. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_dgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.py +95 -0
  113. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.py +163 -0
  114. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.py +187 -0
  115. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.py +309 -0
  116. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py +54 -0
  117. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.py +96 -0
  118. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.py +107 -0
  119. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py +253 -0
  120. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.py +97 -0
  121. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py +242 -0
  122. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_wgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.py +96 -0
  123. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_wgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.py +107 -0
  124. warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/run_all_tests.py +10 -0
  125. warp/native/cutlass/tools/library/scripts/pycutlass/test/frontend/test_frontend.py +146 -0
  126. warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/__init__.py +0 -0
  127. warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_bf16_sm80.py +96 -0
  128. warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_f16_sm80.py +447 -0
  129. warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_f32_sm80.py +146 -0
  130. warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_f64_sm80.py +102 -0
  131. warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_grouped_sm80.py +203 -0
  132. warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_s8_sm80.py +229 -0
  133. warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/run_all_tests.py +9 -0
  134. warp/native/cutlass/tools/library/scripts/pycutlass/test/unit/test_sm80.py +453 -0
  135. warp/native/cutlass/tools/library/scripts/rank_2k_operation.py +398 -0
  136. warp/native/cutlass/tools/library/scripts/rank_k_operation.py +387 -0
  137. warp/native/cutlass/tools/library/scripts/rt.py +796 -0
  138. warp/native/cutlass/tools/library/scripts/symm_operation.py +400 -0
  139. warp/native/cutlass/tools/library/scripts/trmm_operation.py +407 -0
  140. warp/native/cutlass_gemm.cu +5 -3
  141. warp/native/exports.h +1240 -949
  142. warp/native/fabric.h +228 -0
  143. warp/native/hashgrid.cpp +4 -4
  144. warp/native/hashgrid.h +22 -2
  145. warp/native/initializer_array.h +2 -2
  146. warp/native/intersect.h +22 -7
  147. warp/native/intersect_adj.h +8 -8
  148. warp/native/intersect_tri.h +13 -16
  149. warp/native/marching.cu +157 -161
  150. warp/native/mat.h +119 -19
  151. warp/native/matnn.h +2 -2
  152. warp/native/mesh.cpp +108 -83
  153. warp/native/mesh.cu +243 -6
  154. warp/native/mesh.h +1547 -458
  155. warp/native/nanovdb/NanoVDB.h +1 -1
  156. warp/native/noise.h +272 -329
  157. warp/native/quat.h +51 -8
  158. warp/native/rand.h +45 -35
  159. warp/native/range.h +6 -2
  160. warp/native/reduce.cpp +157 -0
  161. warp/native/reduce.cu +348 -0
  162. warp/native/runlength_encode.cpp +62 -0
  163. warp/native/runlength_encode.cu +46 -0
  164. warp/native/scan.cu +11 -13
  165. warp/native/scan.h +1 -0
  166. warp/native/solid_angle.h +442 -0
  167. warp/native/sort.cpp +13 -0
  168. warp/native/sort.cu +9 -1
  169. warp/native/sparse.cpp +338 -0
  170. warp/native/sparse.cu +545 -0
  171. warp/native/spatial.h +2 -2
  172. warp/native/temp_buffer.h +30 -0
  173. warp/native/vec.h +126 -24
  174. warp/native/volume.h +120 -0
  175. warp/native/warp.cpp +658 -53
  176. warp/native/warp.cu +660 -68
  177. warp/native/warp.h +112 -12
  178. warp/optim/__init__.py +1 -0
  179. warp/optim/linear.py +922 -0
  180. warp/optim/sgd.py +92 -0
  181. warp/render/render_opengl.py +392 -152
  182. warp/render/render_usd.py +11 -11
  183. warp/sim/__init__.py +2 -2
  184. warp/sim/articulation.py +385 -185
  185. warp/sim/collide.py +21 -8
  186. warp/sim/import_mjcf.py +297 -106
  187. warp/sim/import_urdf.py +389 -210
  188. warp/sim/import_usd.py +198 -97
  189. warp/sim/inertia.py +17 -18
  190. warp/sim/integrator_euler.py +14 -8
  191. warp/sim/integrator_xpbd.py +161 -19
  192. warp/sim/model.py +795 -291
  193. warp/sim/optimizer.py +2 -6
  194. warp/sim/render.py +65 -3
  195. warp/sim/utils.py +3 -0
  196. warp/sparse.py +1227 -0
  197. warp/stubs.py +665 -223
  198. warp/tape.py +66 -15
  199. warp/tests/__main__.py +3 -6
  200. warp/tests/assets/curlnoise_golden.npy +0 -0
  201. warp/tests/assets/pnoise_golden.npy +0 -0
  202. warp/tests/assets/torus.usda +105 -105
  203. warp/tests/{test_class_kernel.py → aux_test_class_kernel.py} +9 -1
  204. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -0
  205. warp/tests/{test_dependent.py → aux_test_dependent.py} +2 -2
  206. warp/tests/{test_reference.py → aux_test_reference.py} +1 -1
  207. warp/tests/aux_test_unresolved_func.py +14 -0
  208. warp/tests/aux_test_unresolved_symbol.py +14 -0
  209. warp/tests/disabled_kinematics.py +239 -0
  210. warp/tests/run_coverage_serial.py +31 -0
  211. warp/tests/test_adam.py +103 -106
  212. warp/tests/test_arithmetic.py +128 -74
  213. warp/tests/test_array.py +1497 -211
  214. warp/tests/test_array_reduce.py +150 -0
  215. warp/tests/test_atomic.py +64 -28
  216. warp/tests/test_bool.py +99 -0
  217. warp/tests/test_builtins_resolution.py +1292 -0
  218. warp/tests/test_bvh.py +75 -43
  219. warp/tests/test_closest_point_edge_edge.py +54 -57
  220. warp/tests/test_codegen.py +233 -128
  221. warp/tests/test_compile_consts.py +28 -20
  222. warp/tests/test_conditional.py +108 -24
  223. warp/tests/test_copy.py +10 -12
  224. warp/tests/test_ctypes.py +112 -88
  225. warp/tests/test_dense.py +21 -14
  226. warp/tests/test_devices.py +98 -0
  227. warp/tests/test_dlpack.py +136 -108
  228. warp/tests/test_examples.py +277 -0
  229. warp/tests/test_fabricarray.py +955 -0
  230. warp/tests/test_fast_math.py +15 -11
  231. warp/tests/test_fem.py +1271 -0
  232. warp/tests/test_fp16.py +53 -19
  233. warp/tests/test_func.py +187 -74
  234. warp/tests/test_generics.py +194 -49
  235. warp/tests/test_grad.py +180 -116
  236. warp/tests/test_grad_customs.py +176 -0
  237. warp/tests/test_hash_grid.py +52 -37
  238. warp/tests/test_import.py +10 -23
  239. warp/tests/test_indexedarray.py +577 -24
  240. warp/tests/test_intersect.py +18 -9
  241. warp/tests/test_large.py +141 -0
  242. warp/tests/test_launch.py +251 -15
  243. warp/tests/test_lerp.py +64 -65
  244. warp/tests/test_linear_solvers.py +154 -0
  245. warp/tests/test_lvalue.py +493 -0
  246. warp/tests/test_marching_cubes.py +12 -13
  247. warp/tests/test_mat.py +508 -2778
  248. warp/tests/test_mat_lite.py +115 -0
  249. warp/tests/test_mat_scalar_ops.py +2889 -0
  250. warp/tests/test_math.py +103 -9
  251. warp/tests/test_matmul.py +305 -69
  252. warp/tests/test_matmul_lite.py +410 -0
  253. warp/tests/test_mesh.py +71 -14
  254. warp/tests/test_mesh_query_aabb.py +41 -25
  255. warp/tests/test_mesh_query_point.py +325 -34
  256. warp/tests/test_mesh_query_ray.py +39 -22
  257. warp/tests/test_mlp.py +30 -22
  258. warp/tests/test_model.py +92 -89
  259. warp/tests/test_modules_lite.py +39 -0
  260. warp/tests/test_multigpu.py +88 -114
  261. warp/tests/test_noise.py +12 -11
  262. warp/tests/test_operators.py +16 -20
  263. warp/tests/test_options.py +11 -11
  264. warp/tests/test_pinned.py +17 -18
  265. warp/tests/test_print.py +32 -11
  266. warp/tests/test_quat.py +275 -129
  267. warp/tests/test_rand.py +18 -16
  268. warp/tests/test_reload.py +38 -34
  269. warp/tests/test_rounding.py +50 -43
  270. warp/tests/test_runlength_encode.py +190 -0
  271. warp/tests/test_smoothstep.py +9 -11
  272. warp/tests/test_snippet.py +143 -0
  273. warp/tests/test_sparse.py +460 -0
  274. warp/tests/test_spatial.py +276 -243
  275. warp/tests/test_streams.py +110 -85
  276. warp/tests/test_struct.py +331 -85
  277. warp/tests/test_tape.py +39 -21
  278. warp/tests/test_torch.py +118 -89
  279. warp/tests/test_transient_module.py +12 -13
  280. warp/tests/test_types.py +614 -0
  281. warp/tests/test_utils.py +494 -0
  282. warp/tests/test_vec.py +354 -1987
  283. warp/tests/test_vec_lite.py +73 -0
  284. warp/tests/test_vec_scalar_ops.py +2099 -0
  285. warp/tests/test_volume.py +457 -293
  286. warp/tests/test_volume_write.py +124 -134
  287. warp/tests/unittest_serial.py +35 -0
  288. warp/tests/unittest_suites.py +341 -0
  289. warp/tests/unittest_utils.py +568 -0
  290. warp/tests/unused_test_misc.py +71 -0
  291. warp/tests/{test_debug.py → walkthough_debug.py} +3 -17
  292. warp/thirdparty/appdirs.py +36 -45
  293. warp/thirdparty/unittest_parallel.py +549 -0
  294. warp/torch.py +72 -30
  295. warp/types.py +1744 -713
  296. warp/utils.py +360 -350
  297. warp_lang-0.11.0.dist-info/LICENSE.md +36 -0
  298. warp_lang-0.11.0.dist-info/METADATA +238 -0
  299. warp_lang-0.11.0.dist-info/RECORD +332 -0
  300. {warp_lang-0.9.0.dist-info → warp_lang-0.11.0.dist-info}/WHEEL +1 -1
  301. warp/bin/warp-clang.exp +0 -0
  302. warp/bin/warp-clang.lib +0 -0
  303. warp/bin/warp.exp +0 -0
  304. warp/bin/warp.lib +0 -0
  305. warp/tests/test_all.py +0 -215
  306. warp/tests/test_array_scan.py +0 -60
  307. warp/tests/test_base.py +0 -208
  308. warp/tests/test_unresolved_func.py +0 -7
  309. warp/tests/test_unresolved_symbol.py +0 -7
  310. warp_lang-0.9.0.dist-info/METADATA +0 -20
  311. warp_lang-0.9.0.dist-info/RECORD +0 -177
  312. /warp/tests/{test_compile_consts_dummy.py → aux_test_compile_consts_dummy.py} +0 -0
  313. /warp/tests/{test_reference_reference.py → aux_test_reference_reference.py} +0 -0
  314. /warp/tests/{test_square.py → aux_test_square.py} +0 -0
  315. {warp_lang-0.9.0.dist-info → warp_lang-0.11.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,107 @@
1
+ # test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
2
+ import pycutlass
3
+ from pycutlass import *
4
+ from pycutlass.test import *
5
+ from pycutlass.utils.device import device_cc
6
+ import unittest
7
+
8
+
9
+ @unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
10
+ class Conv2dFpropImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
11
+ def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
12
+ math_inst = MathInstruction(
13
+ instruction_shape=[16, 8, 8],
14
+ element_a=cutlass.float32, element_b=cutlass.float32,
15
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
16
+ math_operation=MathOperation.multiply_add
17
+ )
18
+
19
+ A = TensorDescription(
20
+ element=math_inst.element_a,
21
+ layout=cutlass.TensorNHWC,
22
+ alignment=4)
23
+ B = TensorDescription(
24
+ element=math_inst.element_b,
25
+ layout=cutlass.TensorNHWC,
26
+ alignment=4)
27
+ C = TensorDescription(
28
+ element=cutlass.float32,
29
+ layout=cutlass.TensorNHWC,
30
+ alignment=8)
31
+
32
+ tile_description = TileDescription(
33
+ threadblock_shape=[128, 128, 16], stages=3,
34
+ warp_count=[2, 2, 1],
35
+ math_instruction=math_inst
36
+ )
37
+
38
+ epilogue_functor = LinearCombination(
39
+ C.element, C.alignment,
40
+ math_inst.element_accumulator, cutlass.float32)
41
+
42
+ operation = Conv2dOperation(
43
+ conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
44
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
45
+ stride_support=StrideSupport.Strided,
46
+ epilogue_functor=epilogue_functor,
47
+ swizzling_functor=cutlass.IdentitySwizzle1
48
+ )
49
+
50
+ self.assertTrue(test_all_conv2d(operation))
51
+
52
+ def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align2(self):
53
+ math_inst = MathInstruction(
54
+ instruction_shape=[16, 8, 8],
55
+ element_a=cutlass.float32, element_b=cutlass.float32,
56
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
57
+ math_operation=MathOperation.multiply_add
58
+ )
59
+
60
+ A = TensorDescription(
61
+ element=math_inst.element_a,
62
+ layout=cutlass.TensorNHWC,
63
+ alignment=2)
64
+ B = TensorDescription(
65
+ element=math_inst.element_b,
66
+ layout=cutlass.TensorNHWC,
67
+ alignment=2)
68
+ C = TensorDescription(
69
+ element=cutlass.float32,
70
+ layout=cutlass.TensorNHWC,
71
+ alignment=8)
72
+
73
+ tile_description = TileDescription(
74
+ threadblock_shape=[128, 128, 16], stages=3,
75
+ warp_count=[2, 2, 1],
76
+ math_instruction=math_inst
77
+ )
78
+
79
+ epilogue_functor = LinearCombination(
80
+ C.element, C.alignment,
81
+ math_inst.element_accumulator, cutlass.float32)
82
+
83
+ operation = Conv2dOperation(
84
+ conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
85
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
86
+ stride_support=StrideSupport.Strided,
87
+ epilogue_functor=epilogue_functor,
88
+ swizzling_functor=cutlass.IdentitySwizzle1
89
+ )
90
+
91
+ problem_sizes = [
92
+ cutlass.conv.Conv2dProblemSize(
93
+ cutlass.Tensor4DCoord(1, 4, 4, 12),
94
+ cutlass.Tensor4DCoord(8, 3, 3, 12),
95
+ cutlass.Tensor4DCoord(0, 0, 0, 0),
96
+ cutlass.MatrixCoord(3, 3),
97
+ cutlass.MatrixCoord(1, 1),
98
+ cutlass.conv.Mode.cross_correlation,
99
+ 1, 1
100
+ )
101
+ ]
102
+
103
+ self.assertTrue(test_all_conv2d(operation, problem_sizes))
104
+
105
+ if __name__ == '__main__':
106
+ pycutlass.get_memory_pool(2**26, 2**26)
107
+ unittest.main()
@@ -0,0 +1,253 @@
1
+ # test/unit/conv/device/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
2
+ import pycutlass
3
+ from pycutlass import *
4
+ from pycutlass.test import *
5
+ from pycutlass.utils.device import device_cc
6
+ import unittest
7
+
8
+
9
+ @unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
10
+ class Conv2dStridedDgradImplicitGemmF16NHWCF16NHWCF32NHWCTensorOpF32SM80(unittest.TestCase):
11
+ def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
12
+ math_inst = MathInstruction(
13
+ instruction_shape=[16, 8, 16],
14
+ element_a=cutlass.float16, element_b=cutlass.float16,
15
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
16
+ math_operation=MathOperation.multiply_add
17
+ )
18
+
19
+ A = TensorDescription(
20
+ element=math_inst.element_a,
21
+ layout=cutlass.TensorNHWC,
22
+ alignment=8)
23
+ B = TensorDescription(
24
+ element=math_inst.element_b,
25
+ layout=cutlass.TensorNHWC,
26
+ alignment=8)
27
+ C = TensorDescription(
28
+ element=cutlass.float32,
29
+ layout=cutlass.TensorNHWC,
30
+ alignment=4)
31
+
32
+ tile_description = TileDescription(
33
+ threadblock_shape=[128, 128, 32], stages=3,
34
+ warp_count=[2, 2, 1],
35
+ math_instruction=math_inst
36
+ )
37
+
38
+ epilogue_functor = LinearCombination(
39
+ C.element, C.alignment,
40
+ math_inst.element_accumulator, cutlass.float32)
41
+
42
+ operation = Conv2dOperation(
43
+ conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
44
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
45
+ stride_support=StrideSupport.Strided,
46
+ epilogue_functor=epilogue_functor,
47
+ swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
48
+ )
49
+
50
+ self.assertTrue(test_all_conv2d(operation))
51
+
52
+ def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x256_64x3_64x64x64(self):
53
+ math_inst = MathInstruction(
54
+ instruction_shape=[16, 8, 16],
55
+ element_a=cutlass.float16, element_b=cutlass.float16,
56
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
57
+ math_operation=MathOperation.multiply_add
58
+ )
59
+
60
+ A = TensorDescription(
61
+ element=math_inst.element_a,
62
+ layout=cutlass.TensorNHWC,
63
+ alignment=8)
64
+ B = TensorDescription(
65
+ element=math_inst.element_b,
66
+ layout=cutlass.TensorNHWC,
67
+ alignment=8)
68
+ C = TensorDescription(
69
+ element=cutlass.float32,
70
+ layout=cutlass.TensorNHWC,
71
+ alignment=4)
72
+
73
+ tile_description = TileDescription(
74
+ threadblock_shape=[128, 256, 64], stages=3,
75
+ warp_count=[2, 4, 1],
76
+ math_instruction=math_inst
77
+ )
78
+
79
+ epilogue_functor = LinearCombination(
80
+ C.element, C.alignment,
81
+ math_inst.element_accumulator, cutlass.float32)
82
+
83
+ operation = Conv2dOperation(
84
+ conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
85
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
86
+ stride_support=StrideSupport.Strided,
87
+ epilogue_functor=epilogue_functor,
88
+ swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
89
+ )
90
+
91
+ self.assertTrue(test_all_conv2d(operation))
92
+
93
+ def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4_128x128_32x3_64x64x32(self):
94
+ math_inst = MathInstruction(
95
+ instruction_shape=[16, 8, 16],
96
+ element_a=cutlass.float16, element_b=cutlass.float16,
97
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
98
+ math_operation=MathOperation.multiply_add
99
+ )
100
+
101
+ A = TensorDescription(
102
+ element=math_inst.element_a,
103
+ layout=cutlass.TensorNHWC,
104
+ alignment=4)
105
+ B = TensorDescription(
106
+ element=math_inst.element_b,
107
+ layout=cutlass.TensorNHWC,
108
+ alignment=4)
109
+ C = TensorDescription(
110
+ element=cutlass.float32,
111
+ layout=cutlass.TensorNHWC,
112
+ alignment=4)
113
+
114
+ tile_description = TileDescription(
115
+ threadblock_shape=[128, 128, 32], stages=3,
116
+ warp_count=[2, 2, 1],
117
+ math_instruction=math_inst
118
+ )
119
+
120
+ epilogue_functor = LinearCombination(
121
+ C.element, C.alignment,
122
+ math_inst.element_accumulator, cutlass.float32)
123
+
124
+ operation = Conv2dOperation(
125
+ conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
126
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
127
+ stride_support=StrideSupport.Strided,
128
+ epilogue_functor=epilogue_functor,
129
+ swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
130
+ )
131
+
132
+ problem_sizes = [
133
+ cutlass.conv.Conv2dProblemSize(
134
+ cutlass.Tensor4DCoord(1, 4, 4, 12),
135
+ cutlass.Tensor4DCoord(8, 3, 3, 12),
136
+ cutlass.Tensor4DCoord(0, 0, 0, 0),
137
+ cutlass.MatrixCoord(3, 3),
138
+ cutlass.MatrixCoord(1, 1),
139
+ cutlass.conv.Mode.cross_correlation,
140
+ 1, 1
141
+ ),
142
+ ]
143
+
144
+ self.assertTrue(test_all_conv2d(operation, problem_sizes))
145
+
146
+ def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
147
+ math_inst = MathInstruction(
148
+ instruction_shape=[16, 8, 16],
149
+ element_a=cutlass.float16, element_b=cutlass.float16,
150
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
151
+ math_operation=MathOperation.multiply_add
152
+ )
153
+
154
+ A = TensorDescription(
155
+ element=math_inst.element_a,
156
+ layout=cutlass.TensorNHWC,
157
+ alignment=8)
158
+ B = TensorDescription(
159
+ element=math_inst.element_b,
160
+ layout=cutlass.TensorNHWC,
161
+ alignment=8)
162
+ C = TensorDescription(
163
+ element=cutlass.float32,
164
+ layout=cutlass.TensorNHWC,
165
+ alignment=4)
166
+
167
+ tile_description = TileDescription(
168
+ threadblock_shape=[128, 128, 32], stages=3,
169
+ warp_count=[2, 2, 1],
170
+ math_instruction=math_inst
171
+ )
172
+
173
+ epilogue_functor = LinearCombination(
174
+ C.element, C.alignment,
175
+ math_inst.element_accumulator, cutlass.float32)
176
+
177
+ operation = Conv2dOperation(
178
+ conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
179
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
180
+ stride_support=StrideSupport.Strided,
181
+ epilogue_functor=epilogue_functor,
182
+ swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
183
+ )
184
+
185
+ self.assertTrue(test_all_conv2d(operation))
186
+
187
+ def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32_align4(self):
188
+ math_inst = MathInstruction(
189
+ instruction_shape=[16, 8, 16],
190
+ element_a=cutlass.float16, element_b=cutlass.float16,
191
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
192
+ math_operation=MathOperation.multiply_add
193
+ )
194
+
195
+ A = TensorDescription(
196
+ element=math_inst.element_a,
197
+ layout=cutlass.TensorNHWC,
198
+ alignment=4)
199
+ B = TensorDescription(
200
+ element=math_inst.element_b,
201
+ layout=cutlass.TensorNHWC,
202
+ alignment=4)
203
+ C = TensorDescription(
204
+ element=cutlass.float32,
205
+ layout=cutlass.TensorNHWC,
206
+ alignment=4)
207
+
208
+ tile_description = TileDescription(
209
+ threadblock_shape=[128, 128, 32], stages=3,
210
+ warp_count=[2, 2, 1],
211
+ math_instruction=math_inst
212
+ )
213
+
214
+ epilogue_functor = LinearCombination(
215
+ C.element, C.alignment,
216
+ math_inst.element_accumulator, cutlass.float32)
217
+
218
+ operation = Conv2dOperation(
219
+ conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
220
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
221
+ stride_support=StrideSupport.Strided,
222
+ epilogue_functor=epilogue_functor,
223
+ swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
224
+ )
225
+
226
+ problem_sizes = [
227
+ cutlass.conv.Conv2dProblemSize(
228
+ cutlass.Tensor4DCoord(1, 56, 56, 12),
229
+ cutlass.Tensor4DCoord(8, 1, 1, 12),
230
+ cutlass.Tensor4DCoord(0, 0, 0, 0),
231
+ cutlass.MatrixCoord(2, 2),
232
+ cutlass.MatrixCoord(1, 1),
233
+ cutlass.conv.Mode.cross_correlation,
234
+ 1, 1
235
+ ),
236
+ cutlass.conv.Conv2dProblemSize(
237
+ cutlass.Tensor4DCoord(1, 55, 55, 12),
238
+ cutlass.Tensor4DCoord(8, 1, 1, 12),
239
+ cutlass.Tensor4DCoord(0, 0, 0, 0),
240
+ cutlass.MatrixCoord(2, 2),
241
+ cutlass.MatrixCoord(1, 1),
242
+ cutlass.conv.Mode.cross_correlation,
243
+ 1, 1
244
+ ),
245
+ ]
246
+
247
+ self.assertTrue(test_all_conv2d(operation, problem_sizes))
248
+
249
+
250
+
251
+ if __name__ == '__main__':
252
+ pycutlass.get_memory_pool(2**26, 2**26)
253
+ unittest.main()
@@ -0,0 +1,97 @@
1
+ # test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
2
+ import pycutlass
3
+ from pycutlass import *
4
+ from pycutlass.test import *
5
+ from pycutlass.utils.device import device_cc
6
+ import unittest
7
+
8
+
9
+ @unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
10
+ class Conv2dWgradImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
11
+ def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
12
+ math_inst = MathInstruction(
13
+ instruction_shape=[16, 8, 16],
14
+ element_a=cutlass.float16, element_b=cutlass.float16,
15
+ element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
16
+ math_operation=MathOperation.multiply_add
17
+ )
18
+
19
+ A = TensorDescription(
20
+ element=math_inst.element_a,
21
+ layout=cutlass.TensorNHWC,
22
+ alignment=8)
23
+ B = TensorDescription(
24
+ element=math_inst.element_b,
25
+ layout=cutlass.TensorNHWC,
26
+ alignment=8)
27
+ C = TensorDescription(
28
+ element=cutlass.float16,
29
+ layout=cutlass.TensorNHWC,
30
+ alignment=8)
31
+
32
+ tile_description = TileDescription(
33
+ threadblock_shape=[128, 128, 64], stages=3,
34
+ warp_count=[2, 2, 1],
35
+ math_instruction=math_inst
36
+ )
37
+
38
+ epilogue_functor = LinearCombination(
39
+ C.element, C.alignment, math_inst.element_accumulator,
40
+ cutlass.float16
41
+ )
42
+
43
+ operation = Conv2dOperation(
44
+ conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
45
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
46
+ stride_support=StrideSupport.Strided,
47
+ epilogue_functor=epilogue_functor,
48
+ swizzling_functor=cutlass.IdentitySwizzle1
49
+ )
50
+
51
+ self.assertTrue(test_all_conv2d(operation))
52
+
53
+ def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
54
+ math_inst = MathInstruction(
55
+ instruction_shape=[16, 8, 16],
56
+ element_a=cutlass.float16, element_b=cutlass.float16,
57
+ element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
58
+ math_operation=MathOperation.multiply_add
59
+ )
60
+
61
+ A = TensorDescription(
62
+ element=math_inst.element_a,
63
+ layout=cutlass.TensorNHWC,
64
+ alignment=8)
65
+ B = TensorDescription(
66
+ element=math_inst.element_b,
67
+ layout=cutlass.TensorNHWC,
68
+ alignment=8)
69
+ C = TensorDescription(
70
+ element=cutlass.float16,
71
+ layout=cutlass.TensorNHWC,
72
+ alignment=8)
73
+
74
+ tile_description = TileDescription(
75
+ threadblock_shape=[128, 128, 64], stages=3,
76
+ warp_count=[2, 2, 1],
77
+ math_instruction=math_inst
78
+ )
79
+
80
+ epilogue_functor = LinearCombination(
81
+ C.element, C.alignment, math_inst.element_accumulator,
82
+ cutlass.float16
83
+ )
84
+
85
+ operation = Conv2dOperation(
86
+ conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
87
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
88
+ stride_support=StrideSupport.Strided,
89
+ epilogue_functor=epilogue_functor,
90
+ swizzling_functor=cutlass.IdentitySwizzle1
91
+ )
92
+
93
+ self.assertTrue(test_all_conv2d(operation))
94
+
95
+ if __name__ == '__main__':
96
+ pycutlass.get_memory_pool(2**26, 2**26)
97
+ unittest.main()
@@ -0,0 +1,242 @@
1
+ # test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
2
+ import pycutlass
3
+ from pycutlass import *
4
+ from pycutlass.test import *
5
+ from pycutlass.utils.device import device_cc
6
+ import unittest
7
+
8
+
9
+ @unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
10
+ class Conv2dWgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
11
+ def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
12
+ math_inst = MathInstruction(
13
+ instruction_shape=[16, 8, 8],
14
+ element_a=cutlass.float16, element_b=cutlass.float16,
15
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
16
+ math_operation=MathOperation.multiply_add
17
+ )
18
+
19
+ A = TensorDescription(
20
+ element=math_inst.element_a,
21
+ layout=cutlass.TensorNHWC,
22
+ alignment=8)
23
+ B = TensorDescription(
24
+ element=math_inst.element_b,
25
+ layout=cutlass.TensorNHWC,
26
+ alignment=8)
27
+ C = TensorDescription(
28
+ element=cutlass.float32,
29
+ layout=cutlass.TensorNHWC,
30
+ alignment=4)
31
+
32
+ tile_description = TileDescription(
33
+ threadblock_shape=[128, 128, 16], stages=3,
34
+ warp_count=[2, 2, 1],
35
+ math_instruction=math_inst
36
+ )
37
+
38
+ epilogue_functor = LinearCombination(
39
+ C.element, C.alignment,
40
+ math_inst.element_accumulator, cutlass.float32)
41
+
42
+ operation = Conv2dOperation(
43
+ conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
44
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
45
+ stride_support=StrideSupport.Strided,
46
+ epilogue_functor=epilogue_functor,
47
+ swizzling_functor=cutlass.IdentitySwizzle1
48
+ )
49
+
50
+ self.assertTrue(test_all_conv2d(operation))
51
+
52
+ def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
53
+ math_inst = MathInstruction(
54
+ instruction_shape=[16, 8, 8],
55
+ element_a=cutlass.float16, element_b=cutlass.float16,
56
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
57
+ math_operation=MathOperation.multiply_add
58
+ )
59
+
60
+ A = TensorDescription(
61
+ element=math_inst.element_a,
62
+ layout=cutlass.TensorNHWC,
63
+ alignment=8)
64
+ B = TensorDescription(
65
+ element=math_inst.element_b,
66
+ layout=cutlass.TensorNHWC,
67
+ alignment=8)
68
+ C = TensorDescription(
69
+ element=cutlass.float32,
70
+ layout=cutlass.TensorNHWC,
71
+ alignment=4)
72
+
73
+ tile_description = TileDescription(
74
+ threadblock_shape=[128, 128, 16], stages=3,
75
+ warp_count=[2, 2, 1],
76
+ math_instruction=math_inst
77
+ )
78
+
79
+ epilogue_functor = LinearCombination(
80
+ C.element, C.alignment,
81
+ math_inst.element_accumulator, cutlass.float32)
82
+
83
+ operation = Conv2dOperation(
84
+ conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
85
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
86
+ stride_support=StrideSupport.Strided,
87
+ epilogue_functor=epilogue_functor,
88
+ swizzling_functor=cutlass.IdentitySwizzle1
89
+ )
90
+
91
+ self.assertTrue(test_all_conv2d(operation))
92
+
93
+ def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_64x256_32x4_64x64x32(self):
94
+ math_inst = MathInstruction(
95
+ instruction_shape=[16, 8, 16],
96
+ element_a=cutlass.float16, element_b=cutlass.float16,
97
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
98
+ math_operation=MathOperation.multiply_add
99
+ )
100
+
101
+ A = TensorDescription(
102
+ element=math_inst.element_a,
103
+ layout=cutlass.TensorNHWC,
104
+ alignment=8)
105
+ B = TensorDescription(
106
+ element=math_inst.element_b,
107
+ layout=cutlass.TensorNHWC,
108
+ alignment=8)
109
+ C = TensorDescription(
110
+ element=cutlass.float32,
111
+ layout=cutlass.TensorNHWC,
112
+ alignment=4)
113
+
114
+ tile_description = TileDescription(
115
+ threadblock_shape=[64, 256, 32], stages=3,
116
+ warp_count=[1, 4, 1],
117
+ math_instruction=math_inst
118
+ )
119
+
120
+ epilogue_functor = LinearCombination(
121
+ C.element, C.alignment,
122
+ math_inst.element_accumulator, cutlass.float32)
123
+
124
+ operation = Conv2dOperation(
125
+ conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
126
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
127
+ stride_support=StrideSupport.Strided,
128
+ epilogue_functor=epilogue_functor,
129
+ swizzling_functor=cutlass.IdentitySwizzle1
130
+ )
131
+
132
+ self.assertTrue(test_all_conv2d(operation))
133
+
134
+ def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
135
+ math_inst = MathInstruction(
136
+ instruction_shape=[16, 8, 8],
137
+ element_a=cutlass.float16, element_b=cutlass.float16,
138
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
139
+ math_operation=MathOperation.multiply_add
140
+ )
141
+
142
+ A = TensorDescription(
143
+ element=math_inst.element_a,
144
+ layout=cutlass.TensorNHWC,
145
+ alignment=4)
146
+ B = TensorDescription(
147
+ element=math_inst.element_b,
148
+ layout=cutlass.TensorNHWC,
149
+ alignment=4)
150
+ C = TensorDescription(
151
+ element=cutlass.float32,
152
+ layout=cutlass.TensorNHWC,
153
+ alignment=4)
154
+
155
+ tile_description = TileDescription(
156
+ threadblock_shape=[128, 128, 16], stages=3,
157
+ warp_count=[2, 2, 1],
158
+ math_instruction=math_inst
159
+ )
160
+
161
+ epilogue_functor = LinearCombination(
162
+ C.element, C.alignment,
163
+ math_inst.element_accumulator, cutlass.float32)
164
+
165
+ operation = Conv2dOperation(
166
+ conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
167
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
168
+ stride_support=StrideSupport.Strided,
169
+ epilogue_functor=epilogue_functor,
170
+ swizzling_functor=cutlass.IdentitySwizzle1
171
+ )
172
+
173
+ problem_sizes = [
174
+ cutlass.conv.Conv2dProblemSize(
175
+ cutlass.Tensor4DCoord(1, 4, 4, 12),
176
+ cutlass.Tensor4DCoord(8, 3, 3, 12),
177
+ cutlass.Tensor4DCoord(0, 0, 0, 0),
178
+ cutlass.MatrixCoord(3, 3),
179
+ cutlass.MatrixCoord(1, 1),
180
+ cutlass.conv.Mode.cross_correlation,
181
+ 1, 1
182
+ ),
183
+ ]
184
+
185
+ self.assertTrue(test_all_conv2d(operation, problem_sizes))
186
+
187
+ def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
188
+ math_inst = MathInstruction(
189
+ instruction_shape=[16, 8, 8],
190
+ element_a=cutlass.float16, element_b=cutlass.float16,
191
+ element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
192
+ math_operation=MathOperation.multiply_add
193
+ )
194
+
195
+ A = TensorDescription(
196
+ element=math_inst.element_a,
197
+ layout=cutlass.TensorNHWC,
198
+ alignment=4)
199
+ B = TensorDescription(
200
+ element=math_inst.element_b,
201
+ layout=cutlass.TensorNHWC,
202
+ alignment=4)
203
+ C = TensorDescription(
204
+ element=cutlass.float32,
205
+ layout=cutlass.TensorNHWC,
206
+ alignment=4)
207
+
208
+ tile_description = TileDescription(
209
+ threadblock_shape=[128, 128, 16], stages=3,
210
+ warp_count=[2, 2, 1],
211
+ math_instruction=math_inst
212
+ )
213
+
214
+ epilogue_functor = LinearCombination(
215
+ C.element, C.alignment,
216
+ math_inst.element_accumulator, cutlass.float32)
217
+
218
+ operation = Conv2dOperation(
219
+ conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
220
+ arch=80, tile_description=tile_description, A=A, B=B, C=C,
221
+ stride_support=StrideSupport.Strided,
222
+ epilogue_functor=epilogue_functor,
223
+ swizzling_functor=cutlass.IdentitySwizzle1
224
+ )
225
+
226
+ problem_sizes = [
227
+ cutlass.conv.Conv2dProblemSize(
228
+ cutlass.Tensor4DCoord(1, 4, 4, 12),
229
+ cutlass.Tensor4DCoord(8, 3, 3, 12),
230
+ cutlass.Tensor4DCoord(0, 0, 0, 0),
231
+ cutlass.MatrixCoord(3, 3),
232
+ cutlass.MatrixCoord(1, 1),
233
+ cutlass.conv.Mode.cross_correlation,
234
+ 1, 1
235
+ ),
236
+ ]
237
+
238
+ self.assertTrue(test_all_conv2d(operation, problem_sizes))
239
+
240
+ if __name__ == '__main__':
241
+ pycutlass.get_memory_pool(2**26, 2**26)
242
+ unittest.main()