warp-lang 1.10.0__py3-none-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +334 -0
- warp/__init__.pyi +5856 -0
- warp/_src/__init__.py +14 -0
- warp/_src/autograd.py +1077 -0
- warp/_src/build.py +620 -0
- warp/_src/build_dll.py +642 -0
- warp/_src/builtins.py +10555 -0
- warp/_src/codegen.py +4361 -0
- warp/_src/config.py +178 -0
- warp/_src/constants.py +59 -0
- warp/_src/context.py +8352 -0
- warp/_src/dlpack.py +464 -0
- warp/_src/fabric.py +362 -0
- warp/_src/fem/__init__.py +14 -0
- warp/_src/fem/adaptivity.py +510 -0
- warp/_src/fem/cache.py +689 -0
- warp/_src/fem/dirichlet.py +190 -0
- warp/_src/fem/domain.py +553 -0
- warp/_src/fem/field/__init__.py +131 -0
- warp/_src/fem/field/field.py +703 -0
- warp/_src/fem/field/nodal_field.py +403 -0
- warp/_src/fem/field/restriction.py +39 -0
- warp/_src/fem/field/virtual.py +1021 -0
- warp/_src/fem/geometry/__init__.py +32 -0
- warp/_src/fem/geometry/adaptive_nanogrid.py +782 -0
- warp/_src/fem/geometry/closest_point.py +99 -0
- warp/_src/fem/geometry/deformed_geometry.py +277 -0
- warp/_src/fem/geometry/element.py +854 -0
- warp/_src/fem/geometry/geometry.py +693 -0
- warp/_src/fem/geometry/grid_2d.py +478 -0
- warp/_src/fem/geometry/grid_3d.py +539 -0
- warp/_src/fem/geometry/hexmesh.py +956 -0
- warp/_src/fem/geometry/nanogrid.py +660 -0
- warp/_src/fem/geometry/partition.py +483 -0
- warp/_src/fem/geometry/quadmesh.py +597 -0
- warp/_src/fem/geometry/tetmesh.py +762 -0
- warp/_src/fem/geometry/trimesh.py +588 -0
- warp/_src/fem/integrate.py +2507 -0
- warp/_src/fem/linalg.py +385 -0
- warp/_src/fem/operator.py +398 -0
- warp/_src/fem/polynomial.py +231 -0
- warp/_src/fem/quadrature/__init__.py +17 -0
- warp/_src/fem/quadrature/pic_quadrature.py +318 -0
- warp/_src/fem/quadrature/quadrature.py +665 -0
- warp/_src/fem/space/__init__.py +248 -0
- warp/_src/fem/space/basis_function_space.py +499 -0
- warp/_src/fem/space/basis_space.py +681 -0
- warp/_src/fem/space/dof_mapper.py +253 -0
- warp/_src/fem/space/function_space.py +312 -0
- warp/_src/fem/space/grid_2d_function_space.py +179 -0
- warp/_src/fem/space/grid_3d_function_space.py +229 -0
- warp/_src/fem/space/hexmesh_function_space.py +255 -0
- warp/_src/fem/space/nanogrid_function_space.py +199 -0
- warp/_src/fem/space/partition.py +435 -0
- warp/_src/fem/space/quadmesh_function_space.py +222 -0
- warp/_src/fem/space/restriction.py +221 -0
- warp/_src/fem/space/shape/__init__.py +152 -0
- warp/_src/fem/space/shape/cube_shape_function.py +1107 -0
- warp/_src/fem/space/shape/shape_function.py +134 -0
- warp/_src/fem/space/shape/square_shape_function.py +928 -0
- warp/_src/fem/space/shape/tet_shape_function.py +829 -0
- warp/_src/fem/space/shape/triangle_shape_function.py +674 -0
- warp/_src/fem/space/tetmesh_function_space.py +270 -0
- warp/_src/fem/space/topology.py +461 -0
- warp/_src/fem/space/trimesh_function_space.py +193 -0
- warp/_src/fem/types.py +114 -0
- warp/_src/fem/utils.py +488 -0
- warp/_src/jax.py +188 -0
- warp/_src/jax_experimental/__init__.py +14 -0
- warp/_src/jax_experimental/custom_call.py +389 -0
- warp/_src/jax_experimental/ffi.py +1286 -0
- warp/_src/jax_experimental/xla_ffi.py +658 -0
- warp/_src/marching_cubes.py +710 -0
- warp/_src/math.py +416 -0
- warp/_src/optim/__init__.py +14 -0
- warp/_src/optim/adam.py +165 -0
- warp/_src/optim/linear.py +1608 -0
- warp/_src/optim/sgd.py +114 -0
- warp/_src/paddle.py +408 -0
- warp/_src/render/__init__.py +14 -0
- warp/_src/render/imgui_manager.py +291 -0
- warp/_src/render/render_opengl.py +3638 -0
- warp/_src/render/render_usd.py +939 -0
- warp/_src/render/utils.py +162 -0
- warp/_src/sparse.py +2718 -0
- warp/_src/tape.py +1208 -0
- warp/_src/thirdparty/__init__.py +0 -0
- warp/_src/thirdparty/appdirs.py +598 -0
- warp/_src/thirdparty/dlpack.py +145 -0
- warp/_src/thirdparty/unittest_parallel.py +676 -0
- warp/_src/torch.py +393 -0
- warp/_src/types.py +5888 -0
- warp/_src/utils.py +1695 -0
- warp/autograd.py +33 -0
- warp/bin/libwarp-clang.dylib +0 -0
- warp/bin/libwarp.dylib +0 -0
- warp/build.py +29 -0
- warp/build_dll.py +24 -0
- warp/codegen.py +24 -0
- warp/constants.py +24 -0
- warp/context.py +33 -0
- warp/dlpack.py +24 -0
- warp/examples/__init__.py +24 -0
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nonuniform.usd +0 -0
- warp/examples/assets/nvidia_logo.png +0 -0
- warp/examples/assets/pixel.jpg +0 -0
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/assets/square_cloth.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +389 -0
- warp/examples/benchmarks/benchmark_cloth.py +296 -0
- warp/examples/benchmarks/benchmark_cloth_cupy.py +96 -0
- warp/examples/benchmarks/benchmark_cloth_jax.py +105 -0
- warp/examples/benchmarks/benchmark_cloth_numba.py +161 -0
- warp/examples/benchmarks/benchmark_cloth_numpy.py +85 -0
- warp/examples/benchmarks/benchmark_cloth_paddle.py +94 -0
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +94 -0
- warp/examples/benchmarks/benchmark_cloth_taichi.py +120 -0
- warp/examples/benchmarks/benchmark_cloth_warp.py +153 -0
- warp/examples/benchmarks/benchmark_gemm.py +164 -0
- warp/examples/benchmarks/benchmark_interop_paddle.py +166 -0
- warp/examples/benchmarks/benchmark_interop_torch.py +166 -0
- warp/examples/benchmarks/benchmark_launches.py +301 -0
- warp/examples/benchmarks/benchmark_tile_load_store.py +103 -0
- warp/examples/benchmarks/benchmark_tile_sort.py +155 -0
- warp/examples/browse.py +37 -0
- warp/examples/core/example_cupy.py +86 -0
- warp/examples/core/example_dem.py +241 -0
- warp/examples/core/example_fluid.py +299 -0
- warp/examples/core/example_graph_capture.py +150 -0
- warp/examples/core/example_marching_cubes.py +195 -0
- warp/examples/core/example_mesh.py +180 -0
- warp/examples/core/example_mesh_intersect.py +211 -0
- warp/examples/core/example_nvdb.py +182 -0
- warp/examples/core/example_raycast.py +111 -0
- warp/examples/core/example_raymarch.py +205 -0
- warp/examples/core/example_render_opengl.py +290 -0
- warp/examples/core/example_sample_mesh.py +300 -0
- warp/examples/core/example_sph.py +411 -0
- warp/examples/core/example_spin_lock.py +93 -0
- warp/examples/core/example_torch.py +211 -0
- warp/examples/core/example_wave.py +269 -0
- warp/examples/core/example_work_queue.py +118 -0
- warp/examples/distributed/example_jacobi_mpi.py +506 -0
- warp/examples/fem/example_adaptive_grid.py +286 -0
- warp/examples/fem/example_apic_fluid.py +469 -0
- warp/examples/fem/example_burgers.py +261 -0
- warp/examples/fem/example_convection_diffusion.py +181 -0
- warp/examples/fem/example_convection_diffusion_dg.py +225 -0
- warp/examples/fem/example_darcy_ls_optimization.py +489 -0
- warp/examples/fem/example_deformed_geometry.py +172 -0
- warp/examples/fem/example_diffusion.py +196 -0
- warp/examples/fem/example_diffusion_3d.py +225 -0
- warp/examples/fem/example_diffusion_mgpu.py +225 -0
- warp/examples/fem/example_distortion_energy.py +228 -0
- warp/examples/fem/example_elastic_shape_optimization.py +387 -0
- warp/examples/fem/example_magnetostatics.py +242 -0
- warp/examples/fem/example_mixed_elasticity.py +293 -0
- warp/examples/fem/example_navier_stokes.py +263 -0
- warp/examples/fem/example_nonconforming_contact.py +300 -0
- warp/examples/fem/example_stokes.py +213 -0
- warp/examples/fem/example_stokes_transfer.py +262 -0
- warp/examples/fem/example_streamlines.py +357 -0
- warp/examples/fem/utils.py +1047 -0
- warp/examples/interop/example_jax_callable.py +146 -0
- warp/examples/interop/example_jax_ffi_callback.py +132 -0
- warp/examples/interop/example_jax_kernel.py +232 -0
- warp/examples/optim/example_diffray.py +561 -0
- warp/examples/optim/example_fluid_checkpoint.py +497 -0
- warp/examples/tile/example_tile_block_cholesky.py +502 -0
- warp/examples/tile/example_tile_cholesky.py +88 -0
- warp/examples/tile/example_tile_convolution.py +66 -0
- warp/examples/tile/example_tile_fft.py +55 -0
- warp/examples/tile/example_tile_filtering.py +113 -0
- warp/examples/tile/example_tile_matmul.py +85 -0
- warp/examples/tile/example_tile_mcgp.py +191 -0
- warp/examples/tile/example_tile_mlp.py +385 -0
- warp/examples/tile/example_tile_nbody.py +199 -0
- warp/fabric.py +24 -0
- warp/fem/__init__.py +173 -0
- warp/fem/adaptivity.py +26 -0
- warp/fem/cache.py +30 -0
- warp/fem/dirichlet.py +24 -0
- warp/fem/field/__init__.py +24 -0
- warp/fem/field/field.py +26 -0
- warp/fem/geometry/__init__.py +21 -0
- warp/fem/geometry/closest_point.py +31 -0
- warp/fem/linalg.py +38 -0
- warp/fem/operator.py +32 -0
- warp/fem/polynomial.py +29 -0
- warp/fem/space/__init__.py +22 -0
- warp/fem/space/basis_space.py +24 -0
- warp/fem/space/shape/__init__.py +68 -0
- warp/fem/space/topology.py +24 -0
- warp/fem/types.py +24 -0
- warp/fem/utils.py +32 -0
- warp/jax.py +29 -0
- warp/jax_experimental/__init__.py +29 -0
- warp/jax_experimental/custom_call.py +29 -0
- warp/jax_experimental/ffi.py +39 -0
- warp/jax_experimental/xla_ffi.py +24 -0
- warp/marching_cubes.py +24 -0
- warp/math.py +37 -0
- warp/native/array.h +1687 -0
- warp/native/builtin.h +2327 -0
- warp/native/bvh.cpp +562 -0
- warp/native/bvh.cu +826 -0
- warp/native/bvh.h +555 -0
- warp/native/clang/clang.cpp +541 -0
- warp/native/coloring.cpp +622 -0
- warp/native/crt.cpp +51 -0
- warp/native/crt.h +568 -0
- warp/native/cuda_crt.h +1058 -0
- warp/native/cuda_util.cpp +677 -0
- warp/native/cuda_util.h +313 -0
- warp/native/error.cpp +77 -0
- warp/native/error.h +36 -0
- warp/native/exports.h +2023 -0
- warp/native/fabric.h +246 -0
- warp/native/hashgrid.cpp +311 -0
- warp/native/hashgrid.cu +89 -0
- warp/native/hashgrid.h +240 -0
- warp/native/initializer_array.h +41 -0
- warp/native/intersect.h +1253 -0
- warp/native/intersect_adj.h +375 -0
- warp/native/intersect_tri.h +348 -0
- warp/native/mat.h +5189 -0
- warp/native/mathdx.cpp +93 -0
- warp/native/matnn.h +221 -0
- warp/native/mesh.cpp +266 -0
- warp/native/mesh.cu +406 -0
- warp/native/mesh.h +2097 -0
- warp/native/nanovdb/GridHandle.h +533 -0
- warp/native/nanovdb/HostBuffer.h +591 -0
- warp/native/nanovdb/NanoVDB.h +6246 -0
- warp/native/nanovdb/NodeManager.h +323 -0
- warp/native/nanovdb/PNanoVDB.h +3390 -0
- warp/native/noise.h +859 -0
- warp/native/quat.h +1664 -0
- warp/native/rand.h +342 -0
- warp/native/range.h +145 -0
- warp/native/reduce.cpp +174 -0
- warp/native/reduce.cu +363 -0
- warp/native/runlength_encode.cpp +79 -0
- warp/native/runlength_encode.cu +61 -0
- warp/native/scan.cpp +47 -0
- warp/native/scan.cu +55 -0
- warp/native/scan.h +23 -0
- warp/native/solid_angle.h +466 -0
- warp/native/sort.cpp +251 -0
- warp/native/sort.cu +286 -0
- warp/native/sort.h +35 -0
- warp/native/sparse.cpp +241 -0
- warp/native/sparse.cu +435 -0
- warp/native/spatial.h +1306 -0
- warp/native/svd.h +727 -0
- warp/native/temp_buffer.h +46 -0
- warp/native/tile.h +4124 -0
- warp/native/tile_radix_sort.h +1112 -0
- warp/native/tile_reduce.h +838 -0
- warp/native/tile_scan.h +240 -0
- warp/native/tuple.h +189 -0
- warp/native/vec.h +2199 -0
- warp/native/version.h +23 -0
- warp/native/volume.cpp +501 -0
- warp/native/volume.cu +68 -0
- warp/native/volume.h +970 -0
- warp/native/volume_builder.cu +483 -0
- warp/native/volume_builder.h +52 -0
- warp/native/volume_impl.h +70 -0
- warp/native/warp.cpp +1143 -0
- warp/native/warp.cu +4604 -0
- warp/native/warp.h +358 -0
- warp/optim/__init__.py +20 -0
- warp/optim/adam.py +24 -0
- warp/optim/linear.py +35 -0
- warp/optim/sgd.py +24 -0
- warp/paddle.py +24 -0
- warp/py.typed +0 -0
- warp/render/__init__.py +22 -0
- warp/render/imgui_manager.py +29 -0
- warp/render/render_opengl.py +24 -0
- warp/render/render_usd.py +24 -0
- warp/render/utils.py +24 -0
- warp/sparse.py +51 -0
- warp/tape.py +24 -0
- warp/tests/__init__.py +1 -0
- warp/tests/__main__.py +4 -0
- warp/tests/assets/curlnoise_golden.npy +0 -0
- warp/tests/assets/mlp_golden.npy +0 -0
- warp/tests/assets/pixel.npy +0 -0
- warp/tests/assets/pnoise_golden.npy +0 -0
- warp/tests/assets/spiky.usd +0 -0
- warp/tests/assets/test_grid.nvdb +0 -0
- warp/tests/assets/test_index_grid.nvdb +0 -0
- warp/tests/assets/test_int32_grid.nvdb +0 -0
- warp/tests/assets/test_vec_grid.nvdb +0 -0
- warp/tests/assets/torus.nvdb +0 -0
- warp/tests/assets/torus.usda +105 -0
- warp/tests/aux_test_class_kernel.py +34 -0
- warp/tests/aux_test_compile_consts_dummy.py +18 -0
- warp/tests/aux_test_conditional_unequal_types_kernels.py +29 -0
- warp/tests/aux_test_dependent.py +29 -0
- warp/tests/aux_test_grad_customs.py +29 -0
- warp/tests/aux_test_instancing_gc.py +26 -0
- warp/tests/aux_test_module_aot.py +7 -0
- warp/tests/aux_test_module_unload.py +23 -0
- warp/tests/aux_test_name_clash1.py +40 -0
- warp/tests/aux_test_name_clash2.py +40 -0
- warp/tests/aux_test_reference.py +9 -0
- warp/tests/aux_test_reference_reference.py +8 -0
- warp/tests/aux_test_square.py +16 -0
- warp/tests/aux_test_unresolved_func.py +22 -0
- warp/tests/aux_test_unresolved_symbol.py +22 -0
- warp/tests/cuda/__init__.py +0 -0
- warp/tests/cuda/test_async.py +676 -0
- warp/tests/cuda/test_conditional_captures.py +1147 -0
- warp/tests/cuda/test_ipc.py +124 -0
- warp/tests/cuda/test_mempool.py +233 -0
- warp/tests/cuda/test_multigpu.py +169 -0
- warp/tests/cuda/test_peer.py +139 -0
- warp/tests/cuda/test_pinned.py +84 -0
- warp/tests/cuda/test_streams.py +691 -0
- warp/tests/geometry/__init__.py +0 -0
- warp/tests/geometry/test_bvh.py +335 -0
- warp/tests/geometry/test_hash_grid.py +259 -0
- warp/tests/geometry/test_marching_cubes.py +294 -0
- warp/tests/geometry/test_mesh.py +318 -0
- warp/tests/geometry/test_mesh_query_aabb.py +392 -0
- warp/tests/geometry/test_mesh_query_point.py +935 -0
- warp/tests/geometry/test_mesh_query_ray.py +323 -0
- warp/tests/geometry/test_volume.py +1103 -0
- warp/tests/geometry/test_volume_write.py +346 -0
- warp/tests/interop/__init__.py +0 -0
- warp/tests/interop/test_dlpack.py +730 -0
- warp/tests/interop/test_jax.py +1673 -0
- warp/tests/interop/test_paddle.py +800 -0
- warp/tests/interop/test_torch.py +1001 -0
- warp/tests/run_coverage_serial.py +39 -0
- warp/tests/test_adam.py +162 -0
- warp/tests/test_arithmetic.py +1096 -0
- warp/tests/test_array.py +3756 -0
- warp/tests/test_array_reduce.py +156 -0
- warp/tests/test_assert.py +303 -0
- warp/tests/test_atomic.py +336 -0
- warp/tests/test_atomic_bitwise.py +209 -0
- warp/tests/test_atomic_cas.py +312 -0
- warp/tests/test_bool.py +220 -0
- warp/tests/test_builtins_resolution.py +732 -0
- warp/tests/test_closest_point_edge_edge.py +327 -0
- warp/tests/test_codegen.py +974 -0
- warp/tests/test_codegen_instancing.py +1495 -0
- warp/tests/test_compile_consts.py +215 -0
- warp/tests/test_conditional.py +298 -0
- warp/tests/test_context.py +35 -0
- warp/tests/test_copy.py +319 -0
- warp/tests/test_ctypes.py +618 -0
- warp/tests/test_dense.py +73 -0
- warp/tests/test_devices.py +127 -0
- warp/tests/test_enum.py +136 -0
- warp/tests/test_examples.py +424 -0
- warp/tests/test_fabricarray.py +998 -0
- warp/tests/test_fast_math.py +72 -0
- warp/tests/test_fem.py +2204 -0
- warp/tests/test_fixedarray.py +229 -0
- warp/tests/test_fp16.py +136 -0
- warp/tests/test_func.py +501 -0
- warp/tests/test_future_annotations.py +100 -0
- warp/tests/test_generics.py +656 -0
- warp/tests/test_grad.py +893 -0
- warp/tests/test_grad_customs.py +339 -0
- warp/tests/test_grad_debug.py +341 -0
- warp/tests/test_implicit_init.py +411 -0
- warp/tests/test_import.py +45 -0
- warp/tests/test_indexedarray.py +1140 -0
- warp/tests/test_intersect.py +103 -0
- warp/tests/test_iter.py +76 -0
- warp/tests/test_large.py +177 -0
- warp/tests/test_launch.py +411 -0
- warp/tests/test_lerp.py +151 -0
- warp/tests/test_linear_solvers.py +223 -0
- warp/tests/test_lvalue.py +427 -0
- warp/tests/test_map.py +526 -0
- warp/tests/test_mat.py +3515 -0
- warp/tests/test_mat_assign_copy.py +178 -0
- warp/tests/test_mat_constructors.py +573 -0
- warp/tests/test_mat_lite.py +122 -0
- warp/tests/test_mat_scalar_ops.py +2913 -0
- warp/tests/test_math.py +212 -0
- warp/tests/test_module_aot.py +287 -0
- warp/tests/test_module_hashing.py +258 -0
- warp/tests/test_modules_lite.py +70 -0
- warp/tests/test_noise.py +252 -0
- warp/tests/test_operators.py +299 -0
- warp/tests/test_options.py +129 -0
- warp/tests/test_overwrite.py +551 -0
- warp/tests/test_print.py +408 -0
- warp/tests/test_quat.py +2653 -0
- warp/tests/test_quat_assign_copy.py +145 -0
- warp/tests/test_rand.py +339 -0
- warp/tests/test_reload.py +303 -0
- warp/tests/test_rounding.py +157 -0
- warp/tests/test_runlength_encode.py +196 -0
- warp/tests/test_scalar_ops.py +133 -0
- warp/tests/test_smoothstep.py +108 -0
- warp/tests/test_snippet.py +318 -0
- warp/tests/test_sparse.py +845 -0
- warp/tests/test_spatial.py +2859 -0
- warp/tests/test_spatial_assign_copy.py +160 -0
- warp/tests/test_special_values.py +361 -0
- warp/tests/test_static.py +640 -0
- warp/tests/test_struct.py +901 -0
- warp/tests/test_tape.py +242 -0
- warp/tests/test_transient_module.py +93 -0
- warp/tests/test_triangle_closest_point.py +192 -0
- warp/tests/test_tuple.py +361 -0
- warp/tests/test_types.py +615 -0
- warp/tests/test_utils.py +594 -0
- warp/tests/test_vec.py +1408 -0
- warp/tests/test_vec_assign_copy.py +143 -0
- warp/tests/test_vec_constructors.py +325 -0
- warp/tests/test_vec_lite.py +80 -0
- warp/tests/test_vec_scalar_ops.py +2327 -0
- warp/tests/test_verify_fp.py +100 -0
- warp/tests/test_version.py +75 -0
- warp/tests/tile/__init__.py +0 -0
- warp/tests/tile/test_tile.py +1519 -0
- warp/tests/tile/test_tile_atomic_bitwise.py +403 -0
- warp/tests/tile/test_tile_cholesky.py +608 -0
- warp/tests/tile/test_tile_load.py +724 -0
- warp/tests/tile/test_tile_mathdx.py +156 -0
- warp/tests/tile/test_tile_matmul.py +179 -0
- warp/tests/tile/test_tile_mlp.py +400 -0
- warp/tests/tile/test_tile_reduce.py +950 -0
- warp/tests/tile/test_tile_shared_memory.py +376 -0
- warp/tests/tile/test_tile_sort.py +121 -0
- warp/tests/tile/test_tile_view.py +173 -0
- warp/tests/unittest_serial.py +47 -0
- warp/tests/unittest_suites.py +430 -0
- warp/tests/unittest_utils.py +469 -0
- warp/tests/walkthrough_debug.py +95 -0
- warp/torch.py +24 -0
- warp/types.py +51 -0
- warp/utils.py +31 -0
- warp_lang-1.10.0.dist-info/METADATA +459 -0
- warp_lang-1.10.0.dist-info/RECORD +468 -0
- warp_lang-1.10.0.dist-info/WHEEL +5 -0
- warp_lang-1.10.0.dist-info/licenses/LICENSE.md +176 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/Gaia-LICENSE.txt +6 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/appdirs-LICENSE.txt +22 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/asset_pixel_jpg-LICENSE.txt +3 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/cuda-LICENSE.txt +1582 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/dlpack-LICENSE.txt +201 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/fp16-LICENSE.txt +28 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/libmathdx-LICENSE.txt +220 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/llvm-LICENSE.txt +279 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/moller-LICENSE.txt +16 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/nanovdb-LICENSE.txt +2 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/nvrtc-LICENSE.txt +1592 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/svd-LICENSE.txt +23 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/unittest_parallel-LICENSE.txt +21 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/usd-LICENSE.txt +213 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/windingnumber-LICENSE.txt +21 -0
- warp_lang-1.10.0.dist-info/top_level.txt +1 -0
warp/native/vec.h
ADDED
|
@@ -0,0 +1,2199 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
3
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
*
|
|
5
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
* you may not use this file except in compliance with the License.
|
|
7
|
+
* You may obtain a copy of the License at
|
|
8
|
+
*
|
|
9
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
*
|
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
* See the License for the specific language governing permissions and
|
|
15
|
+
* limitations under the License.
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
#pragma once
|
|
19
|
+
|
|
20
|
+
#include "initializer_array.h"
|
|
21
|
+
|
|
22
|
+
namespace wp
|
|
23
|
+
{
|
|
24
|
+
|
|
25
|
+
template<unsigned Length, typename Type>
|
|
26
|
+
struct vec_t
|
|
27
|
+
{
|
|
28
|
+
Type c[Length < 1 ? 1 : Length];
|
|
29
|
+
|
|
30
|
+
inline CUDA_CALLABLE vec_t()
|
|
31
|
+
: c()
|
|
32
|
+
{}
|
|
33
|
+
|
|
34
|
+
inline CUDA_CALLABLE vec_t(Type s)
|
|
35
|
+
{
|
|
36
|
+
for( unsigned i=0; i < Length; ++i )
|
|
37
|
+
{
|
|
38
|
+
c[i] = s;
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
template <typename OtherType>
|
|
43
|
+
inline explicit CUDA_CALLABLE vec_t(const vec_t<Length, OtherType>& other)
|
|
44
|
+
{
|
|
45
|
+
for( unsigned i=0; i < Length; ++i )
|
|
46
|
+
{
|
|
47
|
+
c[i] = static_cast<Type>(other[i]);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
inline CUDA_CALLABLE vec_t(Type x, Type y)
|
|
52
|
+
{
|
|
53
|
+
assert(Length == 2);
|
|
54
|
+
c[0]=x;
|
|
55
|
+
c[1]=y;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
inline CUDA_CALLABLE vec_t(Type x, Type y, Type z)
|
|
59
|
+
{
|
|
60
|
+
assert(Length == 3);
|
|
61
|
+
c[0]=x;
|
|
62
|
+
c[1]=y;
|
|
63
|
+
c[2]=z;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
inline CUDA_CALLABLE vec_t(Type x, Type y, Type z, Type w)
|
|
68
|
+
{
|
|
69
|
+
assert(Length == 4);
|
|
70
|
+
c[0]=x;
|
|
71
|
+
c[1]=y;
|
|
72
|
+
c[2]=z;
|
|
73
|
+
c[3]=w;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
inline CUDA_CALLABLE vec_t(const initializer_array<Length, Type> &l)
|
|
77
|
+
{
|
|
78
|
+
for( unsigned i=0; i < Length; ++i )
|
|
79
|
+
{
|
|
80
|
+
c[i] = l[i];
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// special screw vector constructor for spatial_vectors:
|
|
85
|
+
inline CUDA_CALLABLE vec_t(vec_t<3,Type> w, vec_t<3,Type> v)
|
|
86
|
+
{
|
|
87
|
+
c[0] = w[0];
|
|
88
|
+
c[1] = w[1];
|
|
89
|
+
c[2] = w[2];
|
|
90
|
+
c[3] = v[0];
|
|
91
|
+
c[4] = v[1];
|
|
92
|
+
c[5] = v[2];
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
inline CUDA_CALLABLE Type operator[](int index) const
|
|
96
|
+
{
|
|
97
|
+
assert(index < Length);
|
|
98
|
+
return c[index];
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
inline CUDA_CALLABLE Type& operator[](int index)
|
|
102
|
+
{
|
|
103
|
+
assert(index < Length);
|
|
104
|
+
return c[index];
|
|
105
|
+
}
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
using vec2b = vec_t<2,int8>;
|
|
109
|
+
using vec3b = vec_t<3,int8>;
|
|
110
|
+
using vec4b = vec_t<4,int8>;
|
|
111
|
+
using vec2ub = vec_t<2,uint8>;
|
|
112
|
+
using vec3ub = vec_t<3,uint8>;
|
|
113
|
+
using vec4ub = vec_t<4,uint8>;
|
|
114
|
+
|
|
115
|
+
using vec2s = vec_t<2,int16>;
|
|
116
|
+
using vec3s = vec_t<3,int16>;
|
|
117
|
+
using vec4s = vec_t<4,int16>;
|
|
118
|
+
using vec2us = vec_t<2,uint16>;
|
|
119
|
+
using vec3us = vec_t<3,uint16>;
|
|
120
|
+
using vec4us = vec_t<4,uint16>;
|
|
121
|
+
|
|
122
|
+
using vec2i = vec_t<2,int32>;
|
|
123
|
+
using vec3i = vec_t<3,int32>;
|
|
124
|
+
using vec4i = vec_t<4,int32>;
|
|
125
|
+
using vec2ui = vec_t<2,uint32>;
|
|
126
|
+
using vec3ui = vec_t<3,uint32>;
|
|
127
|
+
using vec4ui = vec_t<4,uint32>;
|
|
128
|
+
|
|
129
|
+
using vec2l = vec_t<2,int64>;
|
|
130
|
+
using vec3l = vec_t<3,int64>;
|
|
131
|
+
using vec4l = vec_t<4,int64>;
|
|
132
|
+
using vec2ul = vec_t<2,uint64>;
|
|
133
|
+
using vec3ul = vec_t<3,uint64>;
|
|
134
|
+
using vec4ul = vec_t<4,uint64>;
|
|
135
|
+
|
|
136
|
+
using vec2h = vec_t<2,half>;
|
|
137
|
+
using vec3h = vec_t<3,half>;
|
|
138
|
+
using vec4h = vec_t<4,half>;
|
|
139
|
+
|
|
140
|
+
using vec2 = vec_t<2,float>;
|
|
141
|
+
using vec3 = vec_t<3,float>;
|
|
142
|
+
using vec4 = vec_t<4,float>;
|
|
143
|
+
|
|
144
|
+
using vec2f = vec_t<2,float>;
|
|
145
|
+
using vec3f = vec_t<3,float>;
|
|
146
|
+
using vec4f = vec_t<4,float>;
|
|
147
|
+
|
|
148
|
+
using vec2d = vec_t<2,double>;
|
|
149
|
+
using vec3d = vec_t<3,double>;
|
|
150
|
+
using vec4d = vec_t<4,double>;
|
|
151
|
+
|
|
152
|
+
// Type trait to detect if a type is a vec_t
|
|
153
|
+
template<typename T>
|
|
154
|
+
struct is_vector {
|
|
155
|
+
static constexpr bool value = false;
|
|
156
|
+
};
|
|
157
|
+
|
|
158
|
+
template<unsigned Length, typename Type>
|
|
159
|
+
struct is_vector<vec_t<Length, Type>> {
|
|
160
|
+
static constexpr bool value = true;
|
|
161
|
+
};
|
|
162
|
+
|
|
163
|
+
template<unsigned Length, typename Type>
|
|
164
|
+
inline CUDA_CALLABLE vec_t<Length, Type> operator - (const vec_t<Length, Type>& x)
|
|
165
|
+
{
|
|
166
|
+
vec_t<Length, Type> ret;
|
|
167
|
+
for(unsigned i=0; i < Length; ++i)
|
|
168
|
+
{
|
|
169
|
+
ret[i] = -x[i];
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
return ret;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
template<unsigned Length, typename Type>
|
|
176
|
+
CUDA_CALLABLE inline vec_t<Length, Type> pos(const vec_t<Length, Type>& x)
|
|
177
|
+
{
|
|
178
|
+
return x;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
template<unsigned Length, typename Type>
|
|
182
|
+
CUDA_CALLABLE inline vec_t<Length, Type> neg(const vec_t<Length, Type>& x)
|
|
183
|
+
{
|
|
184
|
+
return -x;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
template<typename Type>
|
|
188
|
+
CUDA_CALLABLE inline vec_t<3, Type> neg(const vec_t<3, Type>& x)
|
|
189
|
+
{
|
|
190
|
+
return vec_t<3, Type>(-x.c[0], -x.c[1], -x.c[2]);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
template<typename Type>
|
|
194
|
+
CUDA_CALLABLE inline vec_t<2, Type> neg(const vec_t<2, Type>& x)
|
|
195
|
+
{
|
|
196
|
+
return vec_t<2, Type>(-x.c[0], -x.c[1]);
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
template<unsigned Length, typename Type>
|
|
200
|
+
CUDA_CALLABLE inline void adj_neg(const vec_t<Length, Type>& x, vec_t<Length, Type>& adj_x, const vec_t<Length, Type>& adj_ret)
|
|
201
|
+
{
|
|
202
|
+
adj_x -= adj_ret;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
// equality:
|
|
206
|
+
template<unsigned Length, typename Type>
|
|
207
|
+
inline CUDA_CALLABLE bool operator ==(const vec_t<Length, Type>& a, const vec_t<Length, Type>& b)
|
|
208
|
+
{
|
|
209
|
+
for( unsigned i=0; i < Length; ++i )
|
|
210
|
+
{
|
|
211
|
+
if(a[i] != b[i])
|
|
212
|
+
{
|
|
213
|
+
return false;
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
return true;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
// scalar multiplication:
|
|
220
|
+
template<unsigned Length, typename Type>
|
|
221
|
+
inline CUDA_CALLABLE vec_t<Length, Type> mul(vec_t<Length, Type> a, Type s)
|
|
222
|
+
{
|
|
223
|
+
vec_t<Length, Type> ret;
|
|
224
|
+
for( unsigned i=0; i < Length; ++i )
|
|
225
|
+
{
|
|
226
|
+
ret[i] = a[i] * s;
|
|
227
|
+
}
|
|
228
|
+
return ret;
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
template<typename Type>
|
|
232
|
+
inline CUDA_CALLABLE vec_t<3, Type> mul(vec_t<3, Type> a, Type s)
|
|
233
|
+
{
|
|
234
|
+
return vec_t<3, Type>(a.c[0]*s,a.c[1]*s,a.c[2]*s);
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
template<typename Type>
|
|
238
|
+
inline CUDA_CALLABLE vec_t<2, Type> mul(vec_t<2, Type> a, Type s)
|
|
239
|
+
{
|
|
240
|
+
return vec_t<2, Type>(a.c[0]*s,a.c[1]*s);
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
template<unsigned Length, typename Type>
|
|
244
|
+
inline CUDA_CALLABLE vec_t<Length, Type> mul(Type s, vec_t<Length, Type> a)
|
|
245
|
+
{
|
|
246
|
+
return mul(a, s);
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
template<unsigned Length, typename Type>
|
|
250
|
+
inline CUDA_CALLABLE vec_t<Length, Type> operator*(Type s, vec_t<Length, Type> a)
|
|
251
|
+
{
|
|
252
|
+
return mul(a, s);
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
template<unsigned Length, typename Type>
|
|
256
|
+
inline CUDA_CALLABLE vec_t<Length, Type> operator*(vec_t<Length, Type> a, Type s)
|
|
257
|
+
{
|
|
258
|
+
return mul(a, s);
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
// component wise multiplication:
|
|
263
|
+
template<unsigned Length, typename Type>
|
|
264
|
+
inline CUDA_CALLABLE vec_t<Length, Type> cw_mul(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
265
|
+
{
|
|
266
|
+
vec_t<Length, Type> ret;
|
|
267
|
+
for( unsigned i=0; i < Length; ++i )
|
|
268
|
+
{
|
|
269
|
+
ret[i] = a[i] * b[i];
|
|
270
|
+
}
|
|
271
|
+
return ret;
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
// division
|
|
275
|
+
template<unsigned Length, typename Type>
|
|
276
|
+
inline CUDA_CALLABLE vec_t<Length, Type> div(vec_t<Length, Type> a, Type s)
|
|
277
|
+
{
|
|
278
|
+
vec_t<Length, Type> ret;
|
|
279
|
+
for( unsigned i=0; i < Length; ++i )
|
|
280
|
+
{
|
|
281
|
+
ret[i] = a[i] / s;
|
|
282
|
+
}
|
|
283
|
+
return ret;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
template<typename Type>
|
|
287
|
+
inline CUDA_CALLABLE vec_t<3, Type> div(vec_t<3, Type> a, Type s)
|
|
288
|
+
{
|
|
289
|
+
return vec_t<3, Type>(a.c[0]/s,a.c[1]/s,a.c[2]/s);
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
template<typename Type>
|
|
293
|
+
inline CUDA_CALLABLE vec_t<2, Type> div(vec_t<2, Type> a, Type s)
|
|
294
|
+
{
|
|
295
|
+
return vec_t<2, Type>(a.c[0]/s,a.c[1]/s);
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
template<unsigned Length, typename Type>
|
|
299
|
+
inline CUDA_CALLABLE vec_t<Length, Type> div(Type s, vec_t<Length, Type> a)
|
|
300
|
+
{
|
|
301
|
+
vec_t<Length, Type> ret;
|
|
302
|
+
for (unsigned i=0; i < Length; ++i)
|
|
303
|
+
{
|
|
304
|
+
ret[i] = s / a[i];
|
|
305
|
+
}
|
|
306
|
+
return ret;
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
template<typename Type>
|
|
310
|
+
inline CUDA_CALLABLE vec_t<3, Type> div(Type s, vec_t<3, Type> a)
|
|
311
|
+
{
|
|
312
|
+
return vec_t<3, Type>(s/a.c[0],s/a.c[1],s/a.c[2]);
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
template<typename Type>
|
|
316
|
+
inline CUDA_CALLABLE vec_t<2, Type> div(Type s, vec_t<2, Type> a)
|
|
317
|
+
{
|
|
318
|
+
return vec_t<2, Type>(s/a.c[0],s/a.c[1]);
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
template<unsigned Length, typename Type>
|
|
322
|
+
inline CUDA_CALLABLE vec_t<Length, Type> operator / (vec_t<Length, Type> a, Type s)
|
|
323
|
+
{
|
|
324
|
+
return div(a,s);
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
template<unsigned Length, typename Type>
|
|
328
|
+
inline CUDA_CALLABLE vec_t<Length, Type> operator / (Type s, vec_t<Length, Type> a)
|
|
329
|
+
{
|
|
330
|
+
return div(s, a);
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// component wise division
|
|
334
|
+
template<unsigned Length, typename Type>
|
|
335
|
+
inline CUDA_CALLABLE vec_t<Length, Type> cw_div(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
336
|
+
{
|
|
337
|
+
vec_t<Length, Type> ret;
|
|
338
|
+
for( unsigned i=0; i < Length; ++i )
|
|
339
|
+
{
|
|
340
|
+
ret[i] = a[i] / b[i];
|
|
341
|
+
}
|
|
342
|
+
return ret;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
// addition
|
|
346
|
+
template<unsigned Length, typename Type>
|
|
347
|
+
inline CUDA_CALLABLE vec_t<Length, Type> add(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
348
|
+
{
|
|
349
|
+
vec_t<Length, Type> ret;
|
|
350
|
+
for( unsigned i=0; i < Length; ++i )
|
|
351
|
+
{
|
|
352
|
+
ret[i] = a[i] + b[i];
|
|
353
|
+
}
|
|
354
|
+
return ret;
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
template<typename Type>
|
|
358
|
+
inline CUDA_CALLABLE vec_t<2, Type> add(vec_t<2, Type> a, vec_t<2, Type> b)
|
|
359
|
+
{
|
|
360
|
+
return vec_t<2, Type>( a.c[0] + b.c[0], a.c[1] + b.c[1]);
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
template<typename Type>
|
|
364
|
+
inline CUDA_CALLABLE vec_t<3, Type> add(vec_t<3, Type> a, vec_t<3, Type> b)
|
|
365
|
+
{
|
|
366
|
+
return vec_t<3, Type>( a.c[0] + b.c[0], a.c[1] + b.c[1], a.c[2] + b.c[2]);
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
// subtraction
|
|
370
|
+
template<unsigned Length, typename Type>
|
|
371
|
+
inline CUDA_CALLABLE vec_t<Length, Type> sub(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
372
|
+
{
|
|
373
|
+
vec_t<Length, Type> ret;
|
|
374
|
+
for( unsigned i=0; i < Length; ++i )
|
|
375
|
+
{
|
|
376
|
+
ret[i] = Type(a[i] - b[i]);
|
|
377
|
+
}
|
|
378
|
+
return ret;
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
template<typename Type>
|
|
382
|
+
inline CUDA_CALLABLE vec_t<2, Type> sub(vec_t<2, Type> a, vec_t<2, Type> b)
|
|
383
|
+
{
|
|
384
|
+
return vec_t<2, Type>( a.c[0] - b.c[0], a.c[1] - b.c[1]);
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
template<typename Type>
|
|
388
|
+
inline CUDA_CALLABLE vec_t<3, Type> sub(vec_t<3, Type> a, vec_t<3, Type> b)
|
|
389
|
+
{
|
|
390
|
+
return vec_t<3, Type>( a.c[0] - b.c[0], a.c[1] - b.c[1], a.c[2] - b.c[2]);
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// modulo
|
|
394
|
+
template<unsigned Length, typename Type>
|
|
395
|
+
inline CUDA_CALLABLE vec_t<Length, Type> mod(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
396
|
+
{
|
|
397
|
+
vec_t<Length, Type> ret;
|
|
398
|
+
for (unsigned i=0; i < Length; ++i)
|
|
399
|
+
{
|
|
400
|
+
ret[i] = mod(a[i], b[i]);
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
return ret;
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
template<typename Type>
|
|
407
|
+
inline CUDA_CALLABLE vec_t<2, Type> mod(vec_t<2, Type> a, vec_t<2, Type> b)
|
|
408
|
+
{
|
|
409
|
+
return vec_t<2, Type>(mod(a.c[0], b.c[0]), mod(a.c[1], b.c[1]));
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
template<typename Type>
|
|
413
|
+
inline CUDA_CALLABLE vec_t<3, Type> mod(vec_t<3, Type> a, vec_t<3, Type> b)
|
|
414
|
+
{
|
|
415
|
+
return vec_t<3, Type>(mod(a.c[0], b.c[0]), mod(a.c[1], b.c[1]), mod(a.c[2], b.c[2]));
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
// bitwise AND
|
|
419
|
+
template<unsigned Length, typename Type>
|
|
420
|
+
inline CUDA_CALLABLE vec_t<Length, Type> bit_and(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
421
|
+
{
|
|
422
|
+
vec_t<Length, Type> ret;
|
|
423
|
+
for( unsigned i=0; i < Length; ++i )
|
|
424
|
+
{
|
|
425
|
+
ret[i] = Type(a[i] & b[i]);
|
|
426
|
+
}
|
|
427
|
+
return ret;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
template<typename Type>
|
|
431
|
+
inline CUDA_CALLABLE vec_t<2, Type> bit_and(vec_t<2, Type> a, vec_t<2, Type> b)
|
|
432
|
+
{
|
|
433
|
+
return vec_t<2, Type>( a.c[0] & b.c[0], a.c[1] & b.c[1]);
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
template<typename Type>
|
|
437
|
+
inline CUDA_CALLABLE vec_t<3, Type> bit_and(vec_t<3, Type> a, vec_t<3, Type> b)
|
|
438
|
+
{
|
|
439
|
+
return vec_t<3, Type>( a.c[0] & b.c[0], a.c[1] & b.c[1], a.c[2] & b.c[2]);
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
// bitwise OR
|
|
443
|
+
template<unsigned Length, typename Type>
|
|
444
|
+
inline CUDA_CALLABLE vec_t<Length, Type> bit_or(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
445
|
+
{
|
|
446
|
+
vec_t<Length, Type> ret;
|
|
447
|
+
for( unsigned i=0; i < Length; ++i )
|
|
448
|
+
{
|
|
449
|
+
ret[i] = Type(a[i] | b[i]);
|
|
450
|
+
}
|
|
451
|
+
return ret;
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
template<typename Type>
|
|
455
|
+
inline CUDA_CALLABLE vec_t<2, Type> bit_or(vec_t<2, Type> a, vec_t<2, Type> b)
|
|
456
|
+
{
|
|
457
|
+
return vec_t<2, Type>( a.c[0] | b.c[0], a.c[1] | b.c[1]);
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
template<typename Type>
|
|
461
|
+
inline CUDA_CALLABLE vec_t<3, Type> bit_or(vec_t<3, Type> a, vec_t<3, Type> b)
|
|
462
|
+
{
|
|
463
|
+
return vec_t<3, Type>( a.c[0] | b.c[0], a.c[1] | b.c[1], a.c[2] | b.c[2]);
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
// bitwise XOR
|
|
467
|
+
template<unsigned Length, typename Type>
|
|
468
|
+
inline CUDA_CALLABLE vec_t<Length, Type> bit_xor(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
469
|
+
{
|
|
470
|
+
vec_t<Length, Type> ret;
|
|
471
|
+
for( unsigned i=0; i < Length; ++i )
|
|
472
|
+
{
|
|
473
|
+
ret[i] = Type(a[i] ^ b[i]);
|
|
474
|
+
}
|
|
475
|
+
return ret;
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
template<typename Type>
|
|
479
|
+
inline CUDA_CALLABLE vec_t<2, Type> bit_xor(vec_t<2, Type> a, vec_t<2, Type> b)
|
|
480
|
+
{
|
|
481
|
+
return vec_t<2, Type>( a.c[0] ^ b.c[0], a.c[1] ^ b.c[1]);
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
template<typename Type>
|
|
485
|
+
inline CUDA_CALLABLE vec_t<3, Type> bit_xor(vec_t<3, Type> a, vec_t<3, Type> b)
|
|
486
|
+
{
|
|
487
|
+
return vec_t<3, Type>( a.c[0] ^ b.c[0], a.c[1] ^ b.c[1], a.c[2] ^ b.c[2]);
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
// left shift
|
|
491
|
+
template<unsigned Length, typename Type>
|
|
492
|
+
inline CUDA_CALLABLE vec_t<Length, Type> lshift(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
493
|
+
{
|
|
494
|
+
vec_t<Length, Type> ret;
|
|
495
|
+
for( unsigned i=0; i < Length; ++i )
|
|
496
|
+
{
|
|
497
|
+
ret[i] = Type(a[i] << b[i]);
|
|
498
|
+
}
|
|
499
|
+
return ret;
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
template<typename Type>
|
|
503
|
+
inline CUDA_CALLABLE vec_t<2, Type> lshift(vec_t<2, Type> a, vec_t<2, Type> b)
|
|
504
|
+
{
|
|
505
|
+
return vec_t<2, Type>( a.c[0] << b.c[0], a.c[1] << b.c[1]);
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
template<typename Type>
|
|
509
|
+
inline CUDA_CALLABLE vec_t<3, Type> lshift(vec_t<3, Type> a, vec_t<3, Type> b)
|
|
510
|
+
{
|
|
511
|
+
return vec_t<3, Type>( a.c[0] << b.c[0], a.c[1] << b.c[1], a.c[2] << b.c[2]);
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
// right shift
|
|
515
|
+
template<unsigned Length, typename Type>
|
|
516
|
+
inline CUDA_CALLABLE vec_t<Length, Type> rshift(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
517
|
+
{
|
|
518
|
+
vec_t<Length, Type> ret;
|
|
519
|
+
for( unsigned i=0; i < Length; ++i )
|
|
520
|
+
{
|
|
521
|
+
ret[i] = Type(a[i] >> b[i]);
|
|
522
|
+
}
|
|
523
|
+
return ret;
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
template<typename Type>
|
|
527
|
+
inline CUDA_CALLABLE vec_t<2, Type> rshift(vec_t<2, Type> a, vec_t<2, Type> b)
|
|
528
|
+
{
|
|
529
|
+
return vec_t<2, Type>( a.c[0] >> b.c[0], a.c[1] >> b.c[1]);
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
template<typename Type>
|
|
533
|
+
inline CUDA_CALLABLE vec_t<3, Type> rshift(vec_t<3, Type> a, vec_t<3, Type> b)
|
|
534
|
+
{
|
|
535
|
+
return vec_t<3, Type>( a.c[0] >> b.c[0], a.c[1] >> b.c[1], a.c[2] >> b.c[2]);
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
// invert
|
|
539
|
+
template<unsigned Length, typename Type>
|
|
540
|
+
inline CUDA_CALLABLE vec_t<Length,Type> invert(vec_t<Length,Type> v)
|
|
541
|
+
{
|
|
542
|
+
vec_t<Length,Type> ret;
|
|
543
|
+
for (unsigned i=0; i < Length; ++i)
|
|
544
|
+
{
|
|
545
|
+
ret[i] = ~v[i];
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
return ret;
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
// dot product:
|
|
552
|
+
template<unsigned Length, typename Type>
|
|
553
|
+
inline CUDA_CALLABLE Type dot(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
554
|
+
{
|
|
555
|
+
Type ret(0);
|
|
556
|
+
for( unsigned i=0; i < Length; ++i )
|
|
557
|
+
{
|
|
558
|
+
ret += a[i] * b[i];
|
|
559
|
+
}
|
|
560
|
+
return ret;
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
template<typename Type>
|
|
564
|
+
inline CUDA_CALLABLE Type dot(vec_t<2, Type> a, vec_t<2, Type> b)
|
|
565
|
+
{
|
|
566
|
+
return a.c[0] * b.c[0] + a.c[1] * b.c[1];
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
template<typename Type>
|
|
570
|
+
inline CUDA_CALLABLE Type dot(vec_t<3, Type> a, vec_t<3, Type> b)
|
|
571
|
+
{
|
|
572
|
+
return a.c[0] * b.c[0] + a.c[1] * b.c[1] + a.c[2] * b.c[2];
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
template<unsigned Length, typename Type>
|
|
576
|
+
inline CUDA_CALLABLE Type tensordot(vec_t<Length, Type> a, vec_t<Length, Type> b)
|
|
577
|
+
{
|
|
578
|
+
// corresponds to `np.tensordot()` with all axes being contracted
|
|
579
|
+
return dot(a, b);
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
|
|
583
|
+
template<unsigned Length, typename Type>
|
|
584
|
+
inline CUDA_CALLABLE Type extract(const vec_t<Length, Type> & a, int idx)
|
|
585
|
+
{
|
|
586
|
+
#ifndef NDEBUG
|
|
587
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
588
|
+
{
|
|
589
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
590
|
+
assert(0);
|
|
591
|
+
}
|
|
592
|
+
#endif
|
|
593
|
+
|
|
594
|
+
if (idx < 0)
|
|
595
|
+
{
|
|
596
|
+
idx += Length;
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
return a[idx];
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
603
|
+
inline CUDA_CALLABLE vec_t<SliceLength, Type> extract(const vec_t<Length, Type> & a, slice_t slice)
|
|
604
|
+
{
|
|
605
|
+
vec_t<SliceLength, Type> ret;
|
|
606
|
+
|
|
607
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
608
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
609
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
610
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
611
|
+
|
|
612
|
+
bool is_reversed = slice.step < 0;
|
|
613
|
+
|
|
614
|
+
int ii = 0;
|
|
615
|
+
for (
|
|
616
|
+
int i = slice.start;
|
|
617
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
618
|
+
i += slice.step
|
|
619
|
+
)
|
|
620
|
+
{
|
|
621
|
+
ret[ii] = a[i];
|
|
622
|
+
++ii;
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
assert(ii == SliceLength);
|
|
626
|
+
return ret;
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
template<unsigned Length, typename Type>
|
|
630
|
+
inline CUDA_CALLABLE Type* index(vec_t<Length, Type>& v, int idx)
|
|
631
|
+
{
|
|
632
|
+
#ifndef NDEBUG
|
|
633
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
634
|
+
{
|
|
635
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
636
|
+
assert(0);
|
|
637
|
+
}
|
|
638
|
+
#endif
|
|
639
|
+
|
|
640
|
+
if (idx < 0)
|
|
641
|
+
{
|
|
642
|
+
idx += Length;
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
return &v[idx];
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
template<unsigned Length, typename Type>
|
|
649
|
+
inline CUDA_CALLABLE Type* indexref(vec_t<Length, Type>* v, int idx)
|
|
650
|
+
{
|
|
651
|
+
#ifndef NDEBUG
|
|
652
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
653
|
+
{
|
|
654
|
+
printf("vec store %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
655
|
+
assert(0);
|
|
656
|
+
}
|
|
657
|
+
#endif
|
|
658
|
+
|
|
659
|
+
if (idx < 0)
|
|
660
|
+
{
|
|
661
|
+
idx += Length;
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
return &((*v)[idx]);
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
template<unsigned Length, typename Type>
|
|
668
|
+
inline CUDA_CALLABLE void adj_index(vec_t<Length, Type>& v, int idx,
|
|
669
|
+
vec_t<Length, Type>& adj_v, int adj_idx, const Type& adj_value)
|
|
670
|
+
{
|
|
671
|
+
// nop
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
template<unsigned Length, typename Type>
|
|
676
|
+
inline CUDA_CALLABLE void adj_indexref(vec_t<Length, Type>* v, int idx,
|
|
677
|
+
vec_t<Length, Type>& adj_v, int adj_idx, const Type& adj_value)
|
|
678
|
+
{
|
|
679
|
+
// nop
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
|
|
683
|
+
template<unsigned Length, typename Type>
|
|
684
|
+
inline CUDA_CALLABLE void add_inplace(vec_t<Length, Type>& v, int idx, Type value)
|
|
685
|
+
{
|
|
686
|
+
#ifndef NDEBUG
|
|
687
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
688
|
+
{
|
|
689
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
690
|
+
assert(0);
|
|
691
|
+
}
|
|
692
|
+
#endif
|
|
693
|
+
|
|
694
|
+
if (idx < 0)
|
|
695
|
+
{
|
|
696
|
+
idx += Length;
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
v[idx] += value;
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
|
|
703
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
704
|
+
inline CUDA_CALLABLE void add_inplace(vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a)
|
|
705
|
+
{
|
|
706
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
707
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
708
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
709
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
710
|
+
|
|
711
|
+
bool is_reversed = slice.step < 0;
|
|
712
|
+
|
|
713
|
+
int ii = 0;
|
|
714
|
+
for (
|
|
715
|
+
int i = slice.start;
|
|
716
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
717
|
+
i += slice.step
|
|
718
|
+
)
|
|
719
|
+
{
|
|
720
|
+
v[i] += a[ii];
|
|
721
|
+
++ii;
|
|
722
|
+
}
|
|
723
|
+
|
|
724
|
+
assert(ii == SliceLength);
|
|
725
|
+
}
|
|
726
|
+
|
|
727
|
+
|
|
728
|
+
template<unsigned Length, typename Type>
|
|
729
|
+
inline CUDA_CALLABLE void adj_add_inplace(vec_t<Length, Type>& v, int idx, Type value,
|
|
730
|
+
vec_t<Length, Type>& adj_v, int adj_idx, Type& adj_value)
|
|
731
|
+
{
|
|
732
|
+
#ifndef NDEBUG
|
|
733
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
734
|
+
{
|
|
735
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
736
|
+
assert(0);
|
|
737
|
+
}
|
|
738
|
+
#endif
|
|
739
|
+
|
|
740
|
+
if (idx < 0)
|
|
741
|
+
{
|
|
742
|
+
idx += Length;
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
adj_value += adj_v[idx];
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
|
|
749
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
750
|
+
inline CUDA_CALLABLE void adj_add_inplace(
|
|
751
|
+
const vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a,
|
|
752
|
+
vec_t<Length, Type>& adj_v, slice_t& adj_slice, vec_t<SliceLength, Type>& adj_a
|
|
753
|
+
)
|
|
754
|
+
{
|
|
755
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
756
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
757
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
758
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
759
|
+
|
|
760
|
+
bool is_reversed = slice.step < 0;
|
|
761
|
+
|
|
762
|
+
int ii = 0;
|
|
763
|
+
for (
|
|
764
|
+
int i = slice.start;
|
|
765
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
766
|
+
i += slice.step
|
|
767
|
+
)
|
|
768
|
+
{
|
|
769
|
+
adj_a[ii] += adj_v[i];
|
|
770
|
+
++ii;
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
assert(ii == SliceLength);
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
|
|
777
|
+
template<unsigned Length, typename Type>
|
|
778
|
+
inline CUDA_CALLABLE void sub_inplace(vec_t<Length, Type>& v, int idx, Type value)
|
|
779
|
+
{
|
|
780
|
+
#ifndef NDEBUG
|
|
781
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
782
|
+
{
|
|
783
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
784
|
+
assert(0);
|
|
785
|
+
}
|
|
786
|
+
#endif
|
|
787
|
+
|
|
788
|
+
if (idx < 0)
|
|
789
|
+
{
|
|
790
|
+
idx += Length;
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
v[idx] -= value;
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
|
|
797
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
798
|
+
inline CUDA_CALLABLE void sub_inplace(vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a)
|
|
799
|
+
{
|
|
800
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
801
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
802
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
803
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
804
|
+
|
|
805
|
+
bool is_reversed = slice.step < 0;
|
|
806
|
+
|
|
807
|
+
int ii = 0;
|
|
808
|
+
for (
|
|
809
|
+
int i = slice.start;
|
|
810
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
811
|
+
i += slice.step
|
|
812
|
+
)
|
|
813
|
+
{
|
|
814
|
+
v[i] -= a[ii];
|
|
815
|
+
++ii;
|
|
816
|
+
}
|
|
817
|
+
|
|
818
|
+
assert(ii == SliceLength);
|
|
819
|
+
}
|
|
820
|
+
|
|
821
|
+
|
|
822
|
+
template<unsigned Length, typename Type>
|
|
823
|
+
inline CUDA_CALLABLE void adj_sub_inplace(vec_t<Length, Type>& v, int idx, Type value,
|
|
824
|
+
vec_t<Length, Type>& adj_v, int adj_idx, Type& adj_value)
|
|
825
|
+
{
|
|
826
|
+
#ifndef NDEBUG
|
|
827
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
828
|
+
{
|
|
829
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
830
|
+
assert(0);
|
|
831
|
+
}
|
|
832
|
+
#endif
|
|
833
|
+
|
|
834
|
+
if (idx < 0)
|
|
835
|
+
{
|
|
836
|
+
idx += Length;
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
adj_value -= adj_v[idx];
|
|
840
|
+
}
|
|
841
|
+
|
|
842
|
+
|
|
843
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
844
|
+
inline CUDA_CALLABLE void adj_sub_inplace(
|
|
845
|
+
const vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a,
|
|
846
|
+
vec_t<Length, Type>& adj_v, slice_t& adj_slice, vec_t<SliceLength, Type>& adj_a
|
|
847
|
+
)
|
|
848
|
+
{
|
|
849
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
850
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
851
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
852
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
853
|
+
|
|
854
|
+
bool is_reversed = slice.step < 0;
|
|
855
|
+
|
|
856
|
+
int ii = 0;
|
|
857
|
+
for (
|
|
858
|
+
int i = slice.start;
|
|
859
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
860
|
+
i += slice.step
|
|
861
|
+
)
|
|
862
|
+
{
|
|
863
|
+
adj_a[ii] -= adj_v[i];
|
|
864
|
+
++ii;
|
|
865
|
+
}
|
|
866
|
+
|
|
867
|
+
assert(ii == SliceLength);
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
template<unsigned Length, typename Type>
|
|
872
|
+
inline CUDA_CALLABLE void bit_and_inplace(vec_t<Length, Type>& v, int idx, Type value)
|
|
873
|
+
{
|
|
874
|
+
#ifndef NDEBUG
|
|
875
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
876
|
+
{
|
|
877
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
878
|
+
assert(0);
|
|
879
|
+
}
|
|
880
|
+
#endif
|
|
881
|
+
|
|
882
|
+
if (idx < 0)
|
|
883
|
+
{
|
|
884
|
+
idx += Length;
|
|
885
|
+
}
|
|
886
|
+
|
|
887
|
+
v[idx] &= value;
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
|
|
891
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
892
|
+
inline CUDA_CALLABLE void bit_and_inplace(vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a)
|
|
893
|
+
{
|
|
894
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
895
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
896
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
897
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
898
|
+
|
|
899
|
+
bool is_reversed = slice.step < 0;
|
|
900
|
+
|
|
901
|
+
int ii = 0;
|
|
902
|
+
for (
|
|
903
|
+
int i = slice.start;
|
|
904
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
905
|
+
i += slice.step
|
|
906
|
+
)
|
|
907
|
+
{
|
|
908
|
+
v[i] &= a[ii];
|
|
909
|
+
++ii;
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
assert(ii == SliceLength);
|
|
913
|
+
}
|
|
914
|
+
|
|
915
|
+
|
|
916
|
+
template<unsigned Length, typename Type>
|
|
917
|
+
inline CUDA_CALLABLE void adj_bit_and_inplace(
|
|
918
|
+
vec_t<Length, Type>& v, int idx, Type value,
|
|
919
|
+
vec_t<Length, Type>& adj_v, int adj_idx, Type& adj_value
|
|
920
|
+
) {}
|
|
921
|
+
|
|
922
|
+
|
|
923
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
924
|
+
inline CUDA_CALLABLE void adj_bit_and_inplace(
|
|
925
|
+
const vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a,
|
|
926
|
+
vec_t<Length, Type>& adj_v, slice_t& adj_slice, vec_t<SliceLength, Type>& adj_a
|
|
927
|
+
) {}
|
|
928
|
+
|
|
929
|
+
|
|
930
|
+
template<unsigned Length, typename Type>
|
|
931
|
+
inline CUDA_CALLABLE void bit_or_inplace(vec_t<Length, Type>& v, int idx, Type value)
|
|
932
|
+
{
|
|
933
|
+
#ifndef NDEBUG
|
|
934
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
935
|
+
{
|
|
936
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
937
|
+
assert(0);
|
|
938
|
+
}
|
|
939
|
+
#endif
|
|
940
|
+
|
|
941
|
+
if (idx < 0)
|
|
942
|
+
{
|
|
943
|
+
idx += Length;
|
|
944
|
+
}
|
|
945
|
+
|
|
946
|
+
v[idx] |= value;
|
|
947
|
+
}
|
|
948
|
+
|
|
949
|
+
|
|
950
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
951
|
+
inline CUDA_CALLABLE void bit_or_inplace(vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a)
|
|
952
|
+
{
|
|
953
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
954
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
955
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
956
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
957
|
+
|
|
958
|
+
bool is_reversed = slice.step < 0;
|
|
959
|
+
|
|
960
|
+
int ii = 0;
|
|
961
|
+
for (
|
|
962
|
+
int i = slice.start;
|
|
963
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
964
|
+
i += slice.step
|
|
965
|
+
)
|
|
966
|
+
{
|
|
967
|
+
v[i] |= a[ii];
|
|
968
|
+
++ii;
|
|
969
|
+
}
|
|
970
|
+
|
|
971
|
+
assert(ii == SliceLength);
|
|
972
|
+
}
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
template<unsigned Length, typename Type>
|
|
976
|
+
inline CUDA_CALLABLE void adj_bit_or_inplace(
|
|
977
|
+
vec_t<Length, Type>& v, int idx, Type value,
|
|
978
|
+
vec_t<Length, Type>& adj_v, int adj_idx, Type& adj_value
|
|
979
|
+
) {}
|
|
980
|
+
|
|
981
|
+
|
|
982
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
983
|
+
inline CUDA_CALLABLE void adj_bit_or_inplace(
|
|
984
|
+
const vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a,
|
|
985
|
+
vec_t<Length, Type>& adj_v, slice_t& adj_slice, vec_t<SliceLength, Type>& adj_a
|
|
986
|
+
) {}
|
|
987
|
+
|
|
988
|
+
|
|
989
|
+
template<unsigned Length, typename Type>
|
|
990
|
+
inline CUDA_CALLABLE void bit_xor_inplace(vec_t<Length, Type>& v, int idx, Type value)
|
|
991
|
+
{
|
|
992
|
+
#ifndef NDEBUG
|
|
993
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
994
|
+
{
|
|
995
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
996
|
+
assert(0);
|
|
997
|
+
}
|
|
998
|
+
#endif
|
|
999
|
+
|
|
1000
|
+
if (idx < 0)
|
|
1001
|
+
{
|
|
1002
|
+
idx += Length;
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
v[idx] ^= value;
|
|
1006
|
+
}
|
|
1007
|
+
|
|
1008
|
+
|
|
1009
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
1010
|
+
inline CUDA_CALLABLE void bit_xor_inplace(vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a)
|
|
1011
|
+
{
|
|
1012
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
1013
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
1014
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
1015
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
1016
|
+
|
|
1017
|
+
bool is_reversed = slice.step < 0;
|
|
1018
|
+
|
|
1019
|
+
int ii = 0;
|
|
1020
|
+
for (
|
|
1021
|
+
int i = slice.start;
|
|
1022
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
1023
|
+
i += slice.step
|
|
1024
|
+
)
|
|
1025
|
+
{
|
|
1026
|
+
v[i] ^= a[ii];
|
|
1027
|
+
++ii;
|
|
1028
|
+
}
|
|
1029
|
+
|
|
1030
|
+
assert(ii == SliceLength);
|
|
1031
|
+
}
|
|
1032
|
+
|
|
1033
|
+
|
|
1034
|
+
template<unsigned Length, typename Type>
|
|
1035
|
+
inline CUDA_CALLABLE void adj_bit_xor_inplace(
|
|
1036
|
+
vec_t<Length, Type>& v, int idx, Type value,
|
|
1037
|
+
vec_t<Length, Type>& adj_v, int adj_idx, Type& adj_value
|
|
1038
|
+
) {}
|
|
1039
|
+
|
|
1040
|
+
|
|
1041
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
1042
|
+
inline CUDA_CALLABLE void adj_bit_xor_inplace(
|
|
1043
|
+
const vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a,
|
|
1044
|
+
vec_t<Length, Type>& adj_v, slice_t& adj_slice, vec_t<SliceLength, Type>& adj_a
|
|
1045
|
+
) {}
|
|
1046
|
+
|
|
1047
|
+
|
|
1048
|
+
template<unsigned Length, typename Type>
|
|
1049
|
+
inline CUDA_CALLABLE void assign_inplace(vec_t<Length, Type>& v, int idx, Type value)
|
|
1050
|
+
{
|
|
1051
|
+
#ifndef NDEBUG
|
|
1052
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
1053
|
+
{
|
|
1054
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
1055
|
+
assert(0);
|
|
1056
|
+
}
|
|
1057
|
+
#endif
|
|
1058
|
+
|
|
1059
|
+
if (idx < 0)
|
|
1060
|
+
{
|
|
1061
|
+
idx += Length;
|
|
1062
|
+
}
|
|
1063
|
+
|
|
1064
|
+
v[idx] = value;
|
|
1065
|
+
}
|
|
1066
|
+
|
|
1067
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
1068
|
+
inline CUDA_CALLABLE void assign_inplace(vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a)
|
|
1069
|
+
{
|
|
1070
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
1071
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
1072
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
1073
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
1074
|
+
|
|
1075
|
+
bool is_reversed = slice.step < 0;
|
|
1076
|
+
|
|
1077
|
+
int ii = 0;
|
|
1078
|
+
for (
|
|
1079
|
+
int i = slice.start;
|
|
1080
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
1081
|
+
i += slice.step
|
|
1082
|
+
)
|
|
1083
|
+
{
|
|
1084
|
+
v[i] = a[ii];
|
|
1085
|
+
++ii;
|
|
1086
|
+
}
|
|
1087
|
+
|
|
1088
|
+
assert(ii == SliceLength);
|
|
1089
|
+
}
|
|
1090
|
+
|
|
1091
|
+
template<unsigned Length, typename Type>
|
|
1092
|
+
inline CUDA_CALLABLE void adj_assign_inplace(vec_t<Length, Type>& v, int idx, Type value, vec_t<Length, Type>& adj_v, int& adj_idx, Type& adj_value)
|
|
1093
|
+
{
|
|
1094
|
+
#ifndef NDEBUG
|
|
1095
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
1096
|
+
{
|
|
1097
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
1098
|
+
assert(0);
|
|
1099
|
+
}
|
|
1100
|
+
#endif
|
|
1101
|
+
|
|
1102
|
+
if (idx < 0)
|
|
1103
|
+
{
|
|
1104
|
+
idx += Length;
|
|
1105
|
+
}
|
|
1106
|
+
|
|
1107
|
+
adj_value += adj_v[idx];
|
|
1108
|
+
}
|
|
1109
|
+
|
|
1110
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
1111
|
+
inline CUDA_CALLABLE void adj_assign_inplace(
|
|
1112
|
+
const vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a,
|
|
1113
|
+
vec_t<Length, Type>& adj_v, slice_t& adj_slice, vec_t<SliceLength, Type>& adj_a
|
|
1114
|
+
)
|
|
1115
|
+
{
|
|
1116
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
1117
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
1118
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
1119
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
1120
|
+
|
|
1121
|
+
bool is_reversed = slice.step < 0;
|
|
1122
|
+
|
|
1123
|
+
int ii = 0;
|
|
1124
|
+
for (
|
|
1125
|
+
int i = slice.start;
|
|
1126
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
1127
|
+
i += slice.step
|
|
1128
|
+
)
|
|
1129
|
+
{
|
|
1130
|
+
adj_a[ii] += adj_v[i];
|
|
1131
|
+
++ii;
|
|
1132
|
+
}
|
|
1133
|
+
|
|
1134
|
+
assert(ii == SliceLength);
|
|
1135
|
+
}
|
|
1136
|
+
|
|
1137
|
+
|
|
1138
|
+
template<unsigned Length, typename Type>
|
|
1139
|
+
inline CUDA_CALLABLE vec_t<Length, Type> assign_copy(vec_t<Length, Type>& v, int idx, Type value)
|
|
1140
|
+
{
|
|
1141
|
+
#ifndef NDEBUG
|
|
1142
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
1143
|
+
{
|
|
1144
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
1145
|
+
assert(0);
|
|
1146
|
+
}
|
|
1147
|
+
#endif
|
|
1148
|
+
|
|
1149
|
+
if (idx < 0)
|
|
1150
|
+
{
|
|
1151
|
+
idx += Length;
|
|
1152
|
+
}
|
|
1153
|
+
|
|
1154
|
+
vec_t<Length, Type> ret(v);
|
|
1155
|
+
ret[idx] = value;
|
|
1156
|
+
return ret;
|
|
1157
|
+
}
|
|
1158
|
+
|
|
1159
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
1160
|
+
inline CUDA_CALLABLE vec_t<Length, Type> assign_copy(vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a)
|
|
1161
|
+
{
|
|
1162
|
+
vec_t<Length, Type> ret(v);
|
|
1163
|
+
assign_inplace<SliceLength>(ret, slice, a);
|
|
1164
|
+
return ret;
|
|
1165
|
+
}
|
|
1166
|
+
|
|
1167
|
+
template<unsigned Length, typename Type>
|
|
1168
|
+
inline CUDA_CALLABLE void adj_assign_copy(vec_t<Length, Type>& v, int idx, Type value, vec_t<Length, Type>& adj_v, int& adj_idx, Type& adj_value, const vec_t<Length, Type>& adj_ret)
|
|
1169
|
+
{
|
|
1170
|
+
#ifndef NDEBUG
|
|
1171
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
1172
|
+
{
|
|
1173
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
1174
|
+
assert(0);
|
|
1175
|
+
}
|
|
1176
|
+
#endif
|
|
1177
|
+
|
|
1178
|
+
if (idx < 0)
|
|
1179
|
+
{
|
|
1180
|
+
idx += Length;
|
|
1181
|
+
}
|
|
1182
|
+
|
|
1183
|
+
adj_value += adj_ret[idx];
|
|
1184
|
+
for(unsigned i=0; i < Length; ++i)
|
|
1185
|
+
{
|
|
1186
|
+
if (i != idx)
|
|
1187
|
+
adj_v[i] += adj_ret[i];
|
|
1188
|
+
}
|
|
1189
|
+
}
|
|
1190
|
+
|
|
1191
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
1192
|
+
inline CUDA_CALLABLE void adj_assign_copy(
|
|
1193
|
+
vec_t<Length, Type>& v, slice_t slice, const vec_t<SliceLength, Type> &a,
|
|
1194
|
+
vec_t<Length, Type>& adj_v, slice_t& adj_slice, vec_t<SliceLength, Type>& adj_a,
|
|
1195
|
+
const vec_t<Length, Type>& adj_ret)
|
|
1196
|
+
{
|
|
1197
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
1198
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
1199
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
1200
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
1201
|
+
|
|
1202
|
+
bool is_reversed = slice.step < 0;
|
|
1203
|
+
|
|
1204
|
+
int ii = 0;
|
|
1205
|
+
for (int i = 0; i < Length; ++i)
|
|
1206
|
+
{
|
|
1207
|
+
bool in_slice = is_reversed
|
|
1208
|
+
? (i <= slice.start && i > slice.stop && (slice.start - i) % (-slice.step) == 0)
|
|
1209
|
+
: (i >= slice.start && i < slice.stop && (i - slice.start) % slice.step == 0);
|
|
1210
|
+
|
|
1211
|
+
if (!in_slice)
|
|
1212
|
+
{
|
|
1213
|
+
adj_v[i] += adj_ret[i];
|
|
1214
|
+
}
|
|
1215
|
+
else
|
|
1216
|
+
{
|
|
1217
|
+
adj_a[ii] += adj_ret[i];
|
|
1218
|
+
++ii;
|
|
1219
|
+
}
|
|
1220
|
+
}
|
|
1221
|
+
|
|
1222
|
+
assert(ii == SliceLength);
|
|
1223
|
+
}
|
|
1224
|
+
|
|
1225
|
+
template<unsigned Length, typename Type>
|
|
1226
|
+
inline CUDA_CALLABLE Type length(vec_t<Length, Type> a)
|
|
1227
|
+
{
|
|
1228
|
+
return sqrt(dot(a, a));
|
|
1229
|
+
}
|
|
1230
|
+
|
|
1231
|
+
template<unsigned Length, typename Type>
|
|
1232
|
+
inline CUDA_CALLABLE Type length_sq(vec_t<Length, Type> a)
|
|
1233
|
+
{
|
|
1234
|
+
return dot(a, a);
|
|
1235
|
+
}
|
|
1236
|
+
|
|
1237
|
+
|
|
1238
|
+
template<typename Type>
|
|
1239
|
+
inline CUDA_CALLABLE Type length(vec_t<2, Type> a)
|
|
1240
|
+
{
|
|
1241
|
+
return sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1]);
|
|
1242
|
+
}
|
|
1243
|
+
|
|
1244
|
+
template<typename Type>
|
|
1245
|
+
inline CUDA_CALLABLE Type length(vec_t<3, Type> a)
|
|
1246
|
+
{
|
|
1247
|
+
return sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1] + a.c[2] * a.c[2]);
|
|
1248
|
+
}
|
|
1249
|
+
|
|
1250
|
+
template<unsigned Length, typename Type>
|
|
1251
|
+
inline CUDA_CALLABLE vec_t<Length, Type> normalize(vec_t<Length, Type> a)
|
|
1252
|
+
{
|
|
1253
|
+
Type l = length(a);
|
|
1254
|
+
if (l > Type(kEps))
|
|
1255
|
+
return div(a,l);
|
|
1256
|
+
else
|
|
1257
|
+
return vec_t<Length, Type>();
|
|
1258
|
+
}
|
|
1259
|
+
|
|
1260
|
+
template<typename Type>
|
|
1261
|
+
inline CUDA_CALLABLE vec_t<2, Type> normalize(vec_t<2, Type> a)
|
|
1262
|
+
{
|
|
1263
|
+
Type l = sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1]);
|
|
1264
|
+
if (l > Type(kEps))
|
|
1265
|
+
return vec_t<2, Type>(a.c[0]/l,a.c[1]/l);
|
|
1266
|
+
else
|
|
1267
|
+
return vec_t<2, Type>();
|
|
1268
|
+
}
|
|
1269
|
+
|
|
1270
|
+
template<typename Type>
|
|
1271
|
+
inline CUDA_CALLABLE vec_t<3, Type> normalize(vec_t<3, Type> a)
|
|
1272
|
+
{
|
|
1273
|
+
Type l = sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1] + a.c[2] * a.c[2]);
|
|
1274
|
+
if (l > Type(kEps))
|
|
1275
|
+
return vec_t<3, Type>(a.c[0]/l,a.c[1]/l,a.c[2]/l);
|
|
1276
|
+
else
|
|
1277
|
+
return vec_t<3, Type>();
|
|
1278
|
+
}
|
|
1279
|
+
|
|
1280
|
+
|
|
1281
|
+
template<typename Type>
|
|
1282
|
+
inline CUDA_CALLABLE vec_t<3,Type> cross(vec_t<3,Type> a, vec_t<3,Type> b)
|
|
1283
|
+
{
|
|
1284
|
+
return {
|
|
1285
|
+
Type(a[1]*b[2] - a[2]*b[1]),
|
|
1286
|
+
Type(a[2]*b[0] - a[0]*b[2]),
|
|
1287
|
+
Type(a[0]*b[1] - a[1]*b[0])
|
|
1288
|
+
};
|
|
1289
|
+
}
|
|
1290
|
+
|
|
1291
|
+
|
|
1292
|
+
template<unsigned Length, typename Type>
|
|
1293
|
+
inline bool CUDA_CALLABLE isfinite(vec_t<Length, Type> x)
|
|
1294
|
+
{
|
|
1295
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1296
|
+
{
|
|
1297
|
+
if(!isfinite(x[i]))
|
|
1298
|
+
{
|
|
1299
|
+
return false;
|
|
1300
|
+
}
|
|
1301
|
+
}
|
|
1302
|
+
return true;
|
|
1303
|
+
}
|
|
1304
|
+
|
|
1305
|
+
template<unsigned Length, typename Type>
|
|
1306
|
+
inline bool CUDA_CALLABLE isnan(vec_t<Length, Type> x)
|
|
1307
|
+
{
|
|
1308
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1309
|
+
{
|
|
1310
|
+
if(isnan(x[i]))
|
|
1311
|
+
{
|
|
1312
|
+
return true;
|
|
1313
|
+
}
|
|
1314
|
+
}
|
|
1315
|
+
return false;
|
|
1316
|
+
}
|
|
1317
|
+
|
|
1318
|
+
template<unsigned Length, typename Type>
|
|
1319
|
+
inline bool CUDA_CALLABLE isinf(vec_t<Length, Type> x)
|
|
1320
|
+
{
|
|
1321
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1322
|
+
{
|
|
1323
|
+
if(isinf(x[i]))
|
|
1324
|
+
{
|
|
1325
|
+
return true;
|
|
1326
|
+
}
|
|
1327
|
+
}
|
|
1328
|
+
return false;
|
|
1329
|
+
}
|
|
1330
|
+
|
|
1331
|
+
// These two functions seem to compile very slowly
|
|
1332
|
+
template<unsigned Length, typename Type>
|
|
1333
|
+
inline CUDA_CALLABLE vec_t<Length,Type> min(vec_t<Length,Type> a, vec_t<Length,Type> b)
|
|
1334
|
+
{
|
|
1335
|
+
vec_t<Length,Type> ret;
|
|
1336
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1337
|
+
{
|
|
1338
|
+
ret[i] = a[i] < b[i] ? a[i] : b[i];
|
|
1339
|
+
}
|
|
1340
|
+
return ret;
|
|
1341
|
+
}
|
|
1342
|
+
|
|
1343
|
+
template<unsigned Length, typename Type>
|
|
1344
|
+
inline CUDA_CALLABLE vec_t<Length,Type> max(vec_t<Length,Type> a, vec_t<Length,Type> b)
|
|
1345
|
+
{
|
|
1346
|
+
vec_t<Length,Type> ret;
|
|
1347
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1348
|
+
{
|
|
1349
|
+
ret[i] = a[i] > b[i] ? a[i] : b[i];
|
|
1350
|
+
}
|
|
1351
|
+
return ret;
|
|
1352
|
+
}
|
|
1353
|
+
|
|
1354
|
+
template<unsigned Length, typename Type>
|
|
1355
|
+
inline CUDA_CALLABLE Type min(vec_t<Length,Type> v)
|
|
1356
|
+
{
|
|
1357
|
+
Type ret = v[0];
|
|
1358
|
+
for( unsigned i=1; i < Length; ++i )
|
|
1359
|
+
{
|
|
1360
|
+
if (v[i] < ret)
|
|
1361
|
+
ret = v[i];
|
|
1362
|
+
}
|
|
1363
|
+
return ret;
|
|
1364
|
+
}
|
|
1365
|
+
|
|
1366
|
+
template<unsigned Length, typename Type>
|
|
1367
|
+
inline CUDA_CALLABLE Type max(vec_t<Length,Type> v)
|
|
1368
|
+
{
|
|
1369
|
+
Type ret = v[0];
|
|
1370
|
+
for( unsigned i=1; i < Length; ++i )
|
|
1371
|
+
{
|
|
1372
|
+
if (v[i] > ret)
|
|
1373
|
+
ret = v[i];
|
|
1374
|
+
}
|
|
1375
|
+
return ret;
|
|
1376
|
+
}
|
|
1377
|
+
|
|
1378
|
+
template<unsigned Length, typename Type>
|
|
1379
|
+
inline CUDA_CALLABLE unsigned argmin(vec_t<Length,Type> v)
|
|
1380
|
+
{
|
|
1381
|
+
unsigned ret = 0;
|
|
1382
|
+
for( unsigned i=1; i < Length; ++i )
|
|
1383
|
+
{
|
|
1384
|
+
if (v[i] < v[ret])
|
|
1385
|
+
ret = i;
|
|
1386
|
+
}
|
|
1387
|
+
return ret;
|
|
1388
|
+
}
|
|
1389
|
+
|
|
1390
|
+
template<unsigned Length, typename Type>
|
|
1391
|
+
inline CUDA_CALLABLE unsigned argmax(vec_t<Length,Type> v)
|
|
1392
|
+
{
|
|
1393
|
+
unsigned ret = 0;
|
|
1394
|
+
for( unsigned i=1; i < Length; ++i )
|
|
1395
|
+
{
|
|
1396
|
+
if (v[i] > v[ret])
|
|
1397
|
+
ret = i;
|
|
1398
|
+
}
|
|
1399
|
+
return ret;
|
|
1400
|
+
}
|
|
1401
|
+
|
|
1402
|
+
template<unsigned Length, typename Type>
|
|
1403
|
+
inline CUDA_CALLABLE vec_t<Length,Type> abs(vec_t<Length,Type> v)
|
|
1404
|
+
{
|
|
1405
|
+
vec_t<Length,Type> ret;
|
|
1406
|
+
for (unsigned i=0; i < Length; ++i)
|
|
1407
|
+
{
|
|
1408
|
+
ret[i] = abs(v[i]);
|
|
1409
|
+
}
|
|
1410
|
+
|
|
1411
|
+
return ret;
|
|
1412
|
+
}
|
|
1413
|
+
|
|
1414
|
+
template<unsigned Length, typename Type>
|
|
1415
|
+
inline CUDA_CALLABLE vec_t<Length,Type> sign(vec_t<Length,Type> v)
|
|
1416
|
+
{
|
|
1417
|
+
vec_t<Length,Type> ret;
|
|
1418
|
+
for (unsigned i=0; i < Length; ++i)
|
|
1419
|
+
{
|
|
1420
|
+
ret[i] = v[i] < Type(0) ? Type(-1) : Type(1);
|
|
1421
|
+
}
|
|
1422
|
+
|
|
1423
|
+
return ret;
|
|
1424
|
+
}
|
|
1425
|
+
|
|
1426
|
+
template<unsigned Length, typename Type>
|
|
1427
|
+
inline CUDA_CALLABLE void expect_near(const vec_t<Length, Type>& actual, const vec_t<Length, Type>& expected, const Type& tolerance)
|
|
1428
|
+
{
|
|
1429
|
+
Type diff(0);
|
|
1430
|
+
for(size_t i=0; i<Length; ++i)
|
|
1431
|
+
{
|
|
1432
|
+
diff = max(diff,abs(actual[i] - expected[i]));
|
|
1433
|
+
}
|
|
1434
|
+
if (diff > tolerance)
|
|
1435
|
+
{
|
|
1436
|
+
printf("Error, expect_near() failed with tolerance "); print(tolerance);
|
|
1437
|
+
printf(" Expected: "); print(expected);
|
|
1438
|
+
printf(" Actual: "); print(actual);
|
|
1439
|
+
printf(" Max absolute difference: "); print(diff);
|
|
1440
|
+
}
|
|
1441
|
+
}
|
|
1442
|
+
|
|
1443
|
+
template<unsigned Length, typename Type>
|
|
1444
|
+
inline CUDA_CALLABLE void adj_expect_near(const vec_t<Length, Type>& actual, const vec_t<Length, Type>& expected, Type tolerance, vec_t<Length, Type>& adj_actual, vec_t<Length, Type>& adj_expected, Type adj_tolerance)
|
|
1445
|
+
{
|
|
1446
|
+
// nop
|
|
1447
|
+
}
|
|
1448
|
+
|
|
1449
|
+
// adjoint for the initializer_array constructor:
|
|
1450
|
+
template<unsigned Length, typename Type>
|
|
1451
|
+
inline CUDA_CALLABLE void adj_vec_t(const initializer_array<Length, Type> &cmps, const initializer_array<Length, Type*> &adj_cmps, const vec_t<Length, Type>& adj_ret)
|
|
1452
|
+
{
|
|
1453
|
+
for(unsigned i=0; i < Length; ++i)
|
|
1454
|
+
{
|
|
1455
|
+
*(adj_cmps[i]) += adj_ret[i];
|
|
1456
|
+
}
|
|
1457
|
+
}
|
|
1458
|
+
|
|
1459
|
+
|
|
1460
|
+
// adjoint for the component constructors:
|
|
1461
|
+
template<typename Type>
|
|
1462
|
+
inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type &adj_cmpx, Type &adj_cmpy, const vec_t<2, Type>& adj_ret)
|
|
1463
|
+
{
|
|
1464
|
+
adj_cmpx += adj_ret.c[0];
|
|
1465
|
+
adj_cmpy += adj_ret.c[1];
|
|
1466
|
+
}
|
|
1467
|
+
|
|
1468
|
+
template<typename Type>
|
|
1469
|
+
inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type cmpz, Type &adj_cmpx, Type &adj_cmpy, Type &adj_cmpz, const vec_t<3, Type>& adj_ret)
|
|
1470
|
+
{
|
|
1471
|
+
adj_cmpx += adj_ret.c[0];
|
|
1472
|
+
adj_cmpy += adj_ret.c[1];
|
|
1473
|
+
adj_cmpz += adj_ret.c[2];
|
|
1474
|
+
}
|
|
1475
|
+
|
|
1476
|
+
template<typename Type>
|
|
1477
|
+
inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type cmpz, Type cmpw, Type &adj_cmpx, Type &adj_cmpy, Type &adj_cmpz, Type &adj_cmpw, const vec_t<4, Type>& adj_ret)
|
|
1478
|
+
{
|
|
1479
|
+
adj_cmpx += adj_ret.c[0];
|
|
1480
|
+
adj_cmpy += adj_ret.c[1];
|
|
1481
|
+
adj_cmpz += adj_ret.c[2];
|
|
1482
|
+
adj_cmpw += adj_ret.c[3];
|
|
1483
|
+
}
|
|
1484
|
+
|
|
1485
|
+
// adjoint for the constant constructor:
|
|
1486
|
+
template<unsigned Length, typename Type>
|
|
1487
|
+
inline CUDA_CALLABLE void adj_vec_t(Type s, Type& adj_s, const vec_t<Length, Type>& adj_ret)
|
|
1488
|
+
{
|
|
1489
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1490
|
+
{
|
|
1491
|
+
adj_s += adj_ret[i];
|
|
1492
|
+
}
|
|
1493
|
+
}
|
|
1494
|
+
|
|
1495
|
+
// adjoint for the casting constructor
|
|
1496
|
+
template<unsigned Length, typename Type, typename OtherType>
|
|
1497
|
+
inline CUDA_CALLABLE void adj_vec_t(const vec_t<Length, OtherType>& other, vec_t<Length, OtherType>& adj_other, const vec_t<Length, Type>& adj_ret)
|
|
1498
|
+
{
|
|
1499
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1500
|
+
{
|
|
1501
|
+
adj_other[i] += static_cast<OtherType>(adj_ret[i]);
|
|
1502
|
+
}
|
|
1503
|
+
}
|
|
1504
|
+
|
|
1505
|
+
template<typename Type>
|
|
1506
|
+
CUDA_CALLABLE inline void adj_vec_t(const vec_t<3,Type>& w, const vec_t<3,Type>& v, vec_t<3,Type>& adj_w, vec_t<3,Type>& adj_v, const vec_t<6,Type>& adj_ret)
|
|
1507
|
+
{
|
|
1508
|
+
adj_w[0] += adj_ret[0];
|
|
1509
|
+
adj_w[1] += adj_ret[1];
|
|
1510
|
+
adj_w[2] += adj_ret[2];
|
|
1511
|
+
adj_v[0] += adj_ret[3];
|
|
1512
|
+
adj_v[1] += adj_ret[4];
|
|
1513
|
+
adj_v[2] += adj_ret[5];
|
|
1514
|
+
}
|
|
1515
|
+
|
|
1516
|
+
template<unsigned Length, typename Type>
|
|
1517
|
+
inline CUDA_CALLABLE void adj_mul(vec_t<Length, Type> a, Type s, vec_t<Length, Type>& adj_a, Type& adj_s, const vec_t<Length, Type>& adj_ret)
|
|
1518
|
+
{
|
|
1519
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1520
|
+
{
|
|
1521
|
+
adj_a[i] += s*adj_ret[i];
|
|
1522
|
+
}
|
|
1523
|
+
|
|
1524
|
+
adj_s += dot(a, adj_ret);
|
|
1525
|
+
|
|
1526
|
+
#if FP_CHECK
|
|
1527
|
+
if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
|
|
1528
|
+
{
|
|
1529
|
+
// \TODO: How shall we implement this error message?
|
|
1530
|
+
//printf("adj_mul((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
|
|
1531
|
+
assert(0);
|
|
1532
|
+
}
|
|
1533
|
+
#endif
|
|
1534
|
+
}
|
|
1535
|
+
|
|
1536
|
+
template<unsigned Length, typename Type>
|
|
1537
|
+
inline CUDA_CALLABLE void adj_mul(Type s, vec_t<Length, Type> a, Type& adj_s, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret)
|
|
1538
|
+
{
|
|
1539
|
+
adj_mul(a, s, adj_a, adj_s, adj_ret);
|
|
1540
|
+
}
|
|
1541
|
+
|
|
1542
|
+
template<unsigned Length, typename Type>
|
|
1543
|
+
inline CUDA_CALLABLE void adj_cw_mul(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
|
|
1544
|
+
{
|
|
1545
|
+
adj_a += cw_mul(b, adj_ret);
|
|
1546
|
+
adj_b += cw_mul(a, adj_ret);
|
|
1547
|
+
}
|
|
1548
|
+
|
|
1549
|
+
template<unsigned Length, typename Type>
|
|
1550
|
+
inline CUDA_CALLABLE void adj_div(vec_t<Length, Type> a, Type s, vec_t<Length, Type>& adj_a, Type& adj_s, const vec_t<Length, Type>& adj_ret)
|
|
1551
|
+
{
|
|
1552
|
+
|
|
1553
|
+
adj_s -= dot(a , adj_ret)/ (s * s); // - a / s^2
|
|
1554
|
+
|
|
1555
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1556
|
+
{
|
|
1557
|
+
adj_a[i] += adj_ret[i] / s;
|
|
1558
|
+
}
|
|
1559
|
+
|
|
1560
|
+
#if FP_CHECK
|
|
1561
|
+
if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
|
|
1562
|
+
{
|
|
1563
|
+
// \TODO: How shall we implement this error message?
|
|
1564
|
+
// printf("adj_div((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
|
|
1565
|
+
assert(0);
|
|
1566
|
+
}
|
|
1567
|
+
#endif
|
|
1568
|
+
}
|
|
1569
|
+
|
|
1570
|
+
template<unsigned Length, typename Type>
|
|
1571
|
+
inline CUDA_CALLABLE void adj_div(Type s, vec_t<Length, Type> a, Type& adj_s, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret)
|
|
1572
|
+
{
|
|
1573
|
+
|
|
1574
|
+
for (unsigned i=0; i < Length; ++i)
|
|
1575
|
+
{
|
|
1576
|
+
Type inv = Type(1) / a[i];
|
|
1577
|
+
adj_a[i] -= s * adj_ret[i] * inv * inv;
|
|
1578
|
+
adj_s += adj_ret[i] * inv;
|
|
1579
|
+
}
|
|
1580
|
+
|
|
1581
|
+
#if FP_CHECK
|
|
1582
|
+
if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
|
|
1583
|
+
{
|
|
1584
|
+
// \TODO: How shall we implement this error message?
|
|
1585
|
+
// printf("adj_div((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
|
|
1586
|
+
assert(0);
|
|
1587
|
+
}
|
|
1588
|
+
#endif
|
|
1589
|
+
}
|
|
1590
|
+
|
|
1591
|
+
template<unsigned Length, typename Type>
|
|
1592
|
+
inline CUDA_CALLABLE void adj_cw_div(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& ret, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret) {
|
|
1593
|
+
adj_a += cw_div(adj_ret, b);
|
|
1594
|
+
adj_b -= cw_mul(adj_ret, cw_div(ret, b));
|
|
1595
|
+
}
|
|
1596
|
+
|
|
1597
|
+
template<unsigned Length, typename Type>
|
|
1598
|
+
inline CUDA_CALLABLE void adj_add(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
|
|
1599
|
+
{
|
|
1600
|
+
adj_a += adj_ret;
|
|
1601
|
+
adj_b += adj_ret;
|
|
1602
|
+
}
|
|
1603
|
+
|
|
1604
|
+
template<typename Type>
|
|
1605
|
+
inline CUDA_CALLABLE void adj_add(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
|
|
1606
|
+
{
|
|
1607
|
+
adj_a.c[0] += adj_ret.c[0];
|
|
1608
|
+
adj_a.c[1] += adj_ret.c[1];
|
|
1609
|
+
adj_b.c[0] += adj_ret.c[0];
|
|
1610
|
+
adj_b.c[1] += adj_ret.c[1];
|
|
1611
|
+
}
|
|
1612
|
+
|
|
1613
|
+
template<typename Type>
|
|
1614
|
+
inline CUDA_CALLABLE void adj_add(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
|
|
1615
|
+
{
|
|
1616
|
+
adj_a.c[0] += adj_ret.c[0];
|
|
1617
|
+
adj_a.c[1] += adj_ret.c[1];
|
|
1618
|
+
adj_a.c[2] += adj_ret.c[2];
|
|
1619
|
+
adj_b.c[0] += adj_ret.c[0];
|
|
1620
|
+
adj_b.c[1] += adj_ret.c[1];
|
|
1621
|
+
adj_b.c[2] += adj_ret.c[2];
|
|
1622
|
+
}
|
|
1623
|
+
|
|
1624
|
+
template<unsigned Length, typename Type>
|
|
1625
|
+
inline CUDA_CALLABLE void adj_sub(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
|
|
1626
|
+
{
|
|
1627
|
+
adj_a += adj_ret;
|
|
1628
|
+
adj_b -= adj_ret;
|
|
1629
|
+
}
|
|
1630
|
+
|
|
1631
|
+
template<typename Type>
|
|
1632
|
+
inline CUDA_CALLABLE void adj_sub(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
|
|
1633
|
+
{
|
|
1634
|
+
adj_a.c[0] += adj_ret.c[0];
|
|
1635
|
+
adj_a.c[1] += adj_ret.c[1];
|
|
1636
|
+
adj_b.c[0] -= adj_ret.c[0];
|
|
1637
|
+
adj_b.c[1] -= adj_ret.c[1];
|
|
1638
|
+
}
|
|
1639
|
+
|
|
1640
|
+
template<typename Type>
|
|
1641
|
+
inline CUDA_CALLABLE void adj_sub(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
|
|
1642
|
+
{
|
|
1643
|
+
adj_a.c[0] += adj_ret.c[0];
|
|
1644
|
+
adj_a.c[1] += adj_ret.c[1];
|
|
1645
|
+
adj_a.c[2] += adj_ret.c[2];
|
|
1646
|
+
adj_b.c[0] -= adj_ret.c[0];
|
|
1647
|
+
adj_b.c[1] -= adj_ret.c[1];
|
|
1648
|
+
adj_b.c[2] -= adj_ret.c[2];
|
|
1649
|
+
}
|
|
1650
|
+
|
|
1651
|
+
template<unsigned Length, typename Type>
|
|
1652
|
+
inline CUDA_CALLABLE void adj_mod(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
|
|
1653
|
+
{
|
|
1654
|
+
}
|
|
1655
|
+
|
|
1656
|
+
template<typename Type>
|
|
1657
|
+
inline CUDA_CALLABLE void adj_mod(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
|
|
1658
|
+
{
|
|
1659
|
+
}
|
|
1660
|
+
|
|
1661
|
+
template<typename Type>
|
|
1662
|
+
inline CUDA_CALLABLE void adj_mod(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
|
|
1663
|
+
{
|
|
1664
|
+
}
|
|
1665
|
+
|
|
1666
|
+
template<unsigned Length, typename Type>
|
|
1667
|
+
inline CUDA_CALLABLE void adj_bit_and(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
|
|
1668
|
+
{
|
|
1669
|
+
}
|
|
1670
|
+
|
|
1671
|
+
template<typename Type>
|
|
1672
|
+
inline CUDA_CALLABLE void adj_bit_and(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
|
|
1673
|
+
{
|
|
1674
|
+
}
|
|
1675
|
+
|
|
1676
|
+
template<typename Type>
|
|
1677
|
+
inline CUDA_CALLABLE void adj_bit_and(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
|
|
1678
|
+
{
|
|
1679
|
+
}
|
|
1680
|
+
|
|
1681
|
+
template<unsigned Length, typename Type>
|
|
1682
|
+
inline CUDA_CALLABLE void adj_bit_or(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
|
|
1683
|
+
{
|
|
1684
|
+
}
|
|
1685
|
+
|
|
1686
|
+
template<typename Type>
|
|
1687
|
+
inline CUDA_CALLABLE void adj_bit_or(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
|
|
1688
|
+
{
|
|
1689
|
+
}
|
|
1690
|
+
|
|
1691
|
+
template<typename Type>
|
|
1692
|
+
inline CUDA_CALLABLE void adj_bit_or(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
|
|
1693
|
+
{
|
|
1694
|
+
}
|
|
1695
|
+
|
|
1696
|
+
template<unsigned Length, typename Type>
|
|
1697
|
+
inline CUDA_CALLABLE void adj_bit_xor(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
|
|
1698
|
+
{
|
|
1699
|
+
}
|
|
1700
|
+
|
|
1701
|
+
template<typename Type>
|
|
1702
|
+
inline CUDA_CALLABLE void adj_bit_xor(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
|
|
1703
|
+
{
|
|
1704
|
+
}
|
|
1705
|
+
|
|
1706
|
+
template<typename Type>
|
|
1707
|
+
inline CUDA_CALLABLE void adj_bit_xor(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
|
|
1708
|
+
{
|
|
1709
|
+
}
|
|
1710
|
+
|
|
1711
|
+
template<unsigned Length, typename Type>
|
|
1712
|
+
inline CUDA_CALLABLE void adj_lshift(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
|
|
1713
|
+
{
|
|
1714
|
+
}
|
|
1715
|
+
|
|
1716
|
+
template<typename Type>
|
|
1717
|
+
inline CUDA_CALLABLE void adj_lshift(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
|
|
1718
|
+
{
|
|
1719
|
+
}
|
|
1720
|
+
|
|
1721
|
+
template<typename Type>
|
|
1722
|
+
inline CUDA_CALLABLE void adj_lshift(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
|
|
1723
|
+
{
|
|
1724
|
+
}
|
|
1725
|
+
|
|
1726
|
+
template<unsigned Length, typename Type>
|
|
1727
|
+
inline CUDA_CALLABLE void adj_rshift(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
|
|
1728
|
+
{
|
|
1729
|
+
}
|
|
1730
|
+
|
|
1731
|
+
template<typename Type>
|
|
1732
|
+
inline CUDA_CALLABLE void adj_rshift(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
|
|
1733
|
+
{
|
|
1734
|
+
}
|
|
1735
|
+
|
|
1736
|
+
template<typename Type>
|
|
1737
|
+
inline CUDA_CALLABLE void adj_rshift(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
|
|
1738
|
+
{
|
|
1739
|
+
}
|
|
1740
|
+
|
|
1741
|
+
template<unsigned Length, typename Type>
|
|
1742
|
+
inline CUDA_CALLABLE void adj_invert(
|
|
1743
|
+
const vec_t<Length,Type>& v,
|
|
1744
|
+
vec_t<Length,Type>& adj_v,
|
|
1745
|
+
const vec_t<Length,Type>& adj_ret
|
|
1746
|
+
)
|
|
1747
|
+
{
|
|
1748
|
+
}
|
|
1749
|
+
|
|
1750
|
+
template<unsigned Length, typename Type>
|
|
1751
|
+
inline CUDA_CALLABLE void adj_dot(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const Type adj_ret)
|
|
1752
|
+
{
|
|
1753
|
+
adj_a += b*adj_ret;
|
|
1754
|
+
adj_b += a*adj_ret;
|
|
1755
|
+
|
|
1756
|
+
#if FP_CHECK
|
|
1757
|
+
if (!isfinite(a) || !isfinite(b) || !isfinite(adj_a) || !isfinite(adj_b) || !isfinite(adj_ret))
|
|
1758
|
+
{
|
|
1759
|
+
// \TODO: How shall we implement this error message?
|
|
1760
|
+
//printf("adj_dot((%f %f %f %f), (%f %f %f %f), (%f %f %f %f), (%f %f %f %f), %f)\n", a.x, a.y, a.z, a.w, b.x, b.y, b.z, b.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_b.x, adj_b.y, adj_b.z, adj_b.w, adj_ret);
|
|
1761
|
+
assert(0);
|
|
1762
|
+
}
|
|
1763
|
+
#endif
|
|
1764
|
+
}
|
|
1765
|
+
|
|
1766
|
+
|
|
1767
|
+
|
|
1768
|
+
template<typename Type>
|
|
1769
|
+
inline CUDA_CALLABLE void adj_dot(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const Type adj_ret)
|
|
1770
|
+
{
|
|
1771
|
+
adj_a.c[0] += b.c[0]*adj_ret;
|
|
1772
|
+
adj_a.c[1] += b.c[1]*adj_ret;
|
|
1773
|
+
|
|
1774
|
+
adj_b.c[0] += a.c[0]*adj_ret;
|
|
1775
|
+
adj_b.c[1] += a.c[1]*adj_ret;
|
|
1776
|
+
}
|
|
1777
|
+
|
|
1778
|
+
template<typename Type>
|
|
1779
|
+
inline CUDA_CALLABLE void adj_dot(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const Type adj_ret)
|
|
1780
|
+
{
|
|
1781
|
+
adj_a.c[0] += b.c[0]*adj_ret;
|
|
1782
|
+
adj_a.c[1] += b.c[1]*adj_ret;
|
|
1783
|
+
adj_a.c[2] += b.c[2]*adj_ret;
|
|
1784
|
+
|
|
1785
|
+
adj_b.c[0] += a.c[0]*adj_ret;
|
|
1786
|
+
adj_b.c[1] += a.c[1]*adj_ret;
|
|
1787
|
+
adj_b.c[2] += a.c[2]*adj_ret;
|
|
1788
|
+
}
|
|
1789
|
+
|
|
1790
|
+
|
|
1791
|
+
template<unsigned Length, typename Type>
|
|
1792
|
+
inline CUDA_CALLABLE void adj_extract(const vec_t<Length, Type> & a, int idx, vec_t<Length, Type> & adj_a, int & adj_idx, Type & adj_ret)
|
|
1793
|
+
{
|
|
1794
|
+
#ifndef NDEBUG
|
|
1795
|
+
if (idx < -(int)Length || idx >= (int)Length)
|
|
1796
|
+
{
|
|
1797
|
+
printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
|
|
1798
|
+
assert(0);
|
|
1799
|
+
}
|
|
1800
|
+
#endif
|
|
1801
|
+
|
|
1802
|
+
if (idx < 0)
|
|
1803
|
+
{
|
|
1804
|
+
idx += Length;
|
|
1805
|
+
}
|
|
1806
|
+
|
|
1807
|
+
adj_a[idx] += adj_ret;
|
|
1808
|
+
}
|
|
1809
|
+
|
|
1810
|
+
template<unsigned SliceLength, unsigned Length, typename Type>
|
|
1811
|
+
inline CUDA_CALLABLE void adj_extract(
|
|
1812
|
+
const vec_t<Length, Type>& a, slice_t slice,
|
|
1813
|
+
vec_t<Length, Type>& adj_a, slice_t& adj_slice,
|
|
1814
|
+
const vec_t<SliceLength, Type>& adj_ret
|
|
1815
|
+
)
|
|
1816
|
+
{
|
|
1817
|
+
assert(slice.start >= 0 && slice.start <= (int)Length);
|
|
1818
|
+
assert(slice.stop >= -1 && slice.stop <= (int)Length);
|
|
1819
|
+
assert(slice.step != 0 && slice.step < 0 ? slice.start >= slice.stop : slice.start <= slice.stop);
|
|
1820
|
+
assert(slice_get_length(slice) == SliceLength);
|
|
1821
|
+
|
|
1822
|
+
bool is_reversed = slice.step < 0;
|
|
1823
|
+
|
|
1824
|
+
int ii = 0;
|
|
1825
|
+
for (
|
|
1826
|
+
int i = slice.start;
|
|
1827
|
+
is_reversed ? (i > slice.stop) : (i < slice.stop);
|
|
1828
|
+
i += slice.step
|
|
1829
|
+
)
|
|
1830
|
+
{
|
|
1831
|
+
adj_a[i] += adj_ret[ii];
|
|
1832
|
+
++ii;
|
|
1833
|
+
}
|
|
1834
|
+
|
|
1835
|
+
assert(ii == SliceLength);
|
|
1836
|
+
}
|
|
1837
|
+
|
|
1838
|
+
template<unsigned Length, typename Type>
|
|
1839
|
+
inline CUDA_CALLABLE void adj_length(vec_t<Length, Type> a, Type ret, vec_t<Length, Type>& adj_a, const Type adj_ret)
|
|
1840
|
+
{
|
|
1841
|
+
if (ret > Type(kEps))
|
|
1842
|
+
{
|
|
1843
|
+
adj_a += div(a, ret) * adj_ret;
|
|
1844
|
+
}
|
|
1845
|
+
|
|
1846
|
+
#if FP_CHECK
|
|
1847
|
+
if (!isfinite(adj_a))
|
|
1848
|
+
{
|
|
1849
|
+
// \TODO: How shall we implement this error message?
|
|
1850
|
+
//printf("%s:%d - adj_length((%f %f %f %f), (%f %f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret);
|
|
1851
|
+
assert(0);
|
|
1852
|
+
}
|
|
1853
|
+
#endif
|
|
1854
|
+
}
|
|
1855
|
+
|
|
1856
|
+
template<unsigned Length, typename Type>
|
|
1857
|
+
inline CUDA_CALLABLE void adj_length_sq(vec_t<Length, Type> a, vec_t<Length, Type>& adj_a, const Type adj_ret)
|
|
1858
|
+
{
|
|
1859
|
+
adj_a += Type(2.0)*a*adj_ret;
|
|
1860
|
+
|
|
1861
|
+
#if FP_CHECK
|
|
1862
|
+
if (!isfinite(adj_a))
|
|
1863
|
+
{
|
|
1864
|
+
// \TODO: How shall we implement this error message?
|
|
1865
|
+
//printf("%s:%d - adj_length((%f %f %f %f), (%f %f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret);
|
|
1866
|
+
assert(0);
|
|
1867
|
+
}
|
|
1868
|
+
#endif
|
|
1869
|
+
}
|
|
1870
|
+
|
|
1871
|
+
template<unsigned Length, typename Type>
|
|
1872
|
+
inline CUDA_CALLABLE void adj_normalize(vec_t<Length, Type> a, vec_t<Length, Type>& ret, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret)
|
|
1873
|
+
{
|
|
1874
|
+
Type d = length(a);
|
|
1875
|
+
|
|
1876
|
+
if (d > Type(kEps))
|
|
1877
|
+
{
|
|
1878
|
+
Type invd = Type(1.0f)/d;
|
|
1879
|
+
|
|
1880
|
+
adj_a += (adj_ret*invd - ret*(dot(ret, adj_ret))*invd);
|
|
1881
|
+
|
|
1882
|
+
#if FP_CHECK
|
|
1883
|
+
if (!isfinite(adj_a))
|
|
1884
|
+
{
|
|
1885
|
+
// \TODO: How shall we implement this error message?
|
|
1886
|
+
//printf("%s:%d - adj_normalize((%f %f %f %f), (%f %f %f %f), (%f, %f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
|
|
1887
|
+
assert(0);
|
|
1888
|
+
}
|
|
1889
|
+
#endif
|
|
1890
|
+
}
|
|
1891
|
+
}
|
|
1892
|
+
|
|
1893
|
+
template<typename Type>
|
|
1894
|
+
inline CUDA_CALLABLE void adj_cross(vec_t<3,Type> a, vec_t<3,Type> b, vec_t<3,Type>& adj_a, vec_t<3,Type>& adj_b, const vec_t<3,Type>& adj_ret)
|
|
1895
|
+
{
|
|
1896
|
+
// todo: sign check
|
|
1897
|
+
adj_a += cross(b, adj_ret);
|
|
1898
|
+
adj_b -= cross(a, adj_ret);
|
|
1899
|
+
}
|
|
1900
|
+
|
|
1901
|
+
template<unsigned Length, typename Type>
|
|
1902
|
+
inline CUDA_CALLABLE void adj_isfinite(const vec_t<Length, Type> &x, vec_t<Length,Type>& adj_x, const bool &adj_ret)
|
|
1903
|
+
{
|
|
1904
|
+
|
|
1905
|
+
}
|
|
1906
|
+
|
|
1907
|
+
template<unsigned Length, typename Type>
|
|
1908
|
+
inline CUDA_CALLABLE void adj_isnan(const vec_t<Length, Type> &x, vec_t<Length,Type>& adj_x, const bool &adj_ret)
|
|
1909
|
+
{
|
|
1910
|
+
|
|
1911
|
+
}
|
|
1912
|
+
|
|
1913
|
+
template<unsigned Length, typename Type>
|
|
1914
|
+
inline CUDA_CALLABLE void adj_isinf(const vec_t<Length, Type> &x, vec_t<Length,Type>& adj_x, const bool &adj_ret)
|
|
1915
|
+
{
|
|
1916
|
+
|
|
1917
|
+
}
|
|
1918
|
+
|
|
1919
|
+
template<unsigned Length, typename Type>
|
|
1920
|
+
inline CUDA_CALLABLE void adj_min(const vec_t<Length,Type> &a, const vec_t<Length,Type> &b, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, const vec_t<Length,Type> &adj_ret)
|
|
1921
|
+
{
|
|
1922
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1923
|
+
{
|
|
1924
|
+
if (a[i] < b[i])
|
|
1925
|
+
adj_a[i] += adj_ret[i];
|
|
1926
|
+
else
|
|
1927
|
+
adj_b[i] += adj_ret[i];
|
|
1928
|
+
}
|
|
1929
|
+
}
|
|
1930
|
+
|
|
1931
|
+
template<unsigned Length, typename Type>
|
|
1932
|
+
inline CUDA_CALLABLE void adj_max(const vec_t<Length,Type> &a, const vec_t<Length,Type> &b, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, const vec_t<Length,Type> &adj_ret)
|
|
1933
|
+
{
|
|
1934
|
+
for( unsigned i=0; i < Length; ++i )
|
|
1935
|
+
{
|
|
1936
|
+
if (a[i] > b[i])
|
|
1937
|
+
adj_a[i] += adj_ret[i];
|
|
1938
|
+
else
|
|
1939
|
+
adj_b[i] += adj_ret[i];
|
|
1940
|
+
}
|
|
1941
|
+
}
|
|
1942
|
+
|
|
1943
|
+
template<unsigned Length, typename Type>
|
|
1944
|
+
inline CUDA_CALLABLE void adj_min(const vec_t<Length,Type> &v, vec_t<Length,Type>& adj_v, const Type &adj_ret)
|
|
1945
|
+
{
|
|
1946
|
+
unsigned i = argmin(v);
|
|
1947
|
+
adj_v[i] += adj_ret;
|
|
1948
|
+
}
|
|
1949
|
+
|
|
1950
|
+
template<unsigned Length, typename Type>
|
|
1951
|
+
inline CUDA_CALLABLE void adj_max(const vec_t<Length,Type> &v, vec_t<Length,Type>& adj_v, const Type &adj_ret)
|
|
1952
|
+
{
|
|
1953
|
+
unsigned i = argmax(v);
|
|
1954
|
+
adj_v[i] += adj_ret;
|
|
1955
|
+
}
|
|
1956
|
+
|
|
1957
|
+
template<unsigned Length, typename Type>
|
|
1958
|
+
inline CUDA_CALLABLE void adj_abs(
|
|
1959
|
+
const vec_t<Length,Type>& v,
|
|
1960
|
+
vec_t<Length,Type>& adj_v,
|
|
1961
|
+
const vec_t<Length,Type>& adj_ret
|
|
1962
|
+
)
|
|
1963
|
+
{
|
|
1964
|
+
for (unsigned i=0; i < Length; ++i)
|
|
1965
|
+
{
|
|
1966
|
+
if (v[i] < Type(0))
|
|
1967
|
+
{
|
|
1968
|
+
adj_v[i] -= adj_ret[i];
|
|
1969
|
+
}
|
|
1970
|
+
else
|
|
1971
|
+
{
|
|
1972
|
+
adj_v[i] += adj_ret[i];
|
|
1973
|
+
}
|
|
1974
|
+
}
|
|
1975
|
+
}
|
|
1976
|
+
|
|
1977
|
+
template<unsigned Length, typename Type>
|
|
1978
|
+
inline CUDA_CALLABLE void adj_sign(
|
|
1979
|
+
const vec_t<Length,Type>& v,
|
|
1980
|
+
vec_t<Length,Type>& adj_v,
|
|
1981
|
+
const vec_t<Length,Type>& adj_ret
|
|
1982
|
+
)
|
|
1983
|
+
{
|
|
1984
|
+
for (unsigned i=0; i < Length; ++i)
|
|
1985
|
+
{
|
|
1986
|
+
if (v[i] < Type(0))
|
|
1987
|
+
{
|
|
1988
|
+
adj_v[i] -= adj_ret[i];
|
|
1989
|
+
}
|
|
1990
|
+
else
|
|
1991
|
+
{
|
|
1992
|
+
adj_v[i] += adj_ret[i];
|
|
1993
|
+
}
|
|
1994
|
+
}
|
|
1995
|
+
}
|
|
1996
|
+
|
|
1997
|
+
// Do I need to specialize these for different lengths?
|
|
1998
|
+
template<unsigned Length, typename Type>
|
|
1999
|
+
inline CUDA_CALLABLE vec_t<Length, Type> atomic_add(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
|
|
2000
|
+
{
|
|
2001
|
+
vec_t<Length, Type> ret;
|
|
2002
|
+
for( unsigned i=0; i < Length; ++i )
|
|
2003
|
+
{
|
|
2004
|
+
ret[i] = atomic_add(&(addr -> c[i]), value[i]);
|
|
2005
|
+
}
|
|
2006
|
+
|
|
2007
|
+
return ret;
|
|
2008
|
+
}
|
|
2009
|
+
|
|
2010
|
+
template<unsigned Length, typename Type>
|
|
2011
|
+
inline CUDA_CALLABLE vec_t<Length, Type> atomic_min(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
|
|
2012
|
+
{
|
|
2013
|
+
vec_t<Length, Type> ret;
|
|
2014
|
+
for( unsigned i=0; i < Length; ++i )
|
|
2015
|
+
{
|
|
2016
|
+
ret[i] = atomic_min(&(addr -> c[i]), value[i]);
|
|
2017
|
+
}
|
|
2018
|
+
|
|
2019
|
+
return ret;
|
|
2020
|
+
}
|
|
2021
|
+
|
|
2022
|
+
template<unsigned Length, typename Type>
|
|
2023
|
+
inline CUDA_CALLABLE vec_t<Length, Type> atomic_max(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
|
|
2024
|
+
{
|
|
2025
|
+
vec_t<Length, Type> ret;
|
|
2026
|
+
for( unsigned i=0; i < Length; ++i )
|
|
2027
|
+
{
|
|
2028
|
+
ret[i] = atomic_max(&(addr -> c[i]), value[i]);
|
|
2029
|
+
}
|
|
2030
|
+
|
|
2031
|
+
return ret;
|
|
2032
|
+
}
|
|
2033
|
+
|
|
2034
|
+
template<unsigned Length, typename Type>
|
|
2035
|
+
inline CUDA_CALLABLE vec_t<Length, Type> atomic_and(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
|
|
2036
|
+
{
|
|
2037
|
+
vec_t<Length, Type> ret;
|
|
2038
|
+
for( unsigned i=0; i < Length; ++i )
|
|
2039
|
+
{
|
|
2040
|
+
ret[i] = atomic_and(&(addr -> c[i]), value[i]);
|
|
2041
|
+
}
|
|
2042
|
+
|
|
2043
|
+
return ret;
|
|
2044
|
+
}
|
|
2045
|
+
|
|
2046
|
+
template<unsigned Length, typename Type>
|
|
2047
|
+
inline CUDA_CALLABLE vec_t<Length, Type> atomic_or(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
|
|
2048
|
+
{
|
|
2049
|
+
vec_t<Length, Type> ret;
|
|
2050
|
+
for( unsigned i=0; i < Length; ++i )
|
|
2051
|
+
{
|
|
2052
|
+
ret[i] = atomic_or(&(addr -> c[i]), value[i]);
|
|
2053
|
+
}
|
|
2054
|
+
|
|
2055
|
+
return ret;
|
|
2056
|
+
}
|
|
2057
|
+
|
|
2058
|
+
template<unsigned Length, typename Type>
|
|
2059
|
+
inline CUDA_CALLABLE vec_t<Length, Type> atomic_xor(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
|
|
2060
|
+
{
|
|
2061
|
+
vec_t<Length, Type> ret;
|
|
2062
|
+
for( unsigned i=0; i < Length; ++i )
|
|
2063
|
+
{
|
|
2064
|
+
ret[i] = atomic_xor(&(addr -> c[i]), value[i]);
|
|
2065
|
+
}
|
|
2066
|
+
|
|
2067
|
+
return ret;
|
|
2068
|
+
}
|
|
2069
|
+
|
|
2070
|
+
template<unsigned Length, typename Type>
|
|
2071
|
+
inline CUDA_CALLABLE void adj_atomic_minmax(
|
|
2072
|
+
vec_t<Length,Type> *addr,
|
|
2073
|
+
vec_t<Length,Type> *adj_addr,
|
|
2074
|
+
const vec_t<Length,Type> &value,
|
|
2075
|
+
vec_t<Length,Type> &adj_value)
|
|
2076
|
+
{
|
|
2077
|
+
for (unsigned i=0; i < Length; ++i)
|
|
2078
|
+
adj_atomic_minmax(&(addr->c[i]), &(adj_addr->c[i]), value[i], adj_value[i]);
|
|
2079
|
+
}
|
|
2080
|
+
|
|
2081
|
+
// ok, the original implementation of this didn't take the absolute values.
|
|
2082
|
+
// I wouldn't consider this expected behavior. It looks like it's only
|
|
2083
|
+
// being used for bounding boxes at the moment, where this doesn't matter,
|
|
2084
|
+
// but you often use it for ray tracing where it does. Not sure if the
|
|
2085
|
+
// fabs() incurs a performance hit...
|
|
2086
|
+
template<unsigned Length, typename Type>
|
|
2087
|
+
CUDA_CALLABLE inline int longest_axis(const vec_t<Length, Type>& v)
|
|
2088
|
+
{
|
|
2089
|
+
Type lmax = abs(v[0]);
|
|
2090
|
+
int ret(0);
|
|
2091
|
+
for( unsigned i=1; i < Length; ++i )
|
|
2092
|
+
{
|
|
2093
|
+
Type l = abs(v[i]);
|
|
2094
|
+
if( l > lmax )
|
|
2095
|
+
{
|
|
2096
|
+
ret = i;
|
|
2097
|
+
lmax = l;
|
|
2098
|
+
}
|
|
2099
|
+
}
|
|
2100
|
+
return ret;
|
|
2101
|
+
}
|
|
2102
|
+
|
|
2103
|
+
template<unsigned Length, typename Type>
|
|
2104
|
+
CUDA_CALLABLE inline vec_t<Length,Type> lerp(const vec_t<Length,Type>& a, const vec_t<Length,Type>& b, Type t)
|
|
2105
|
+
{
|
|
2106
|
+
return a*(Type(1)-t) + b*t;
|
|
2107
|
+
}
|
|
2108
|
+
|
|
2109
|
+
template<unsigned Length, typename Type>
|
|
2110
|
+
CUDA_CALLABLE inline void adj_lerp(const vec_t<Length,Type>& a, const vec_t<Length,Type>& b, Type t, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, Type& adj_t, const vec_t<Length,Type>& adj_ret)
|
|
2111
|
+
{
|
|
2112
|
+
adj_a += adj_ret*(Type(1)-t);
|
|
2113
|
+
adj_b += adj_ret*t;
|
|
2114
|
+
adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret);
|
|
2115
|
+
}
|
|
2116
|
+
|
|
2117
|
+
// for integral types we do not accumulate gradients
|
|
2118
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int8>* buf, const vec_t<Length, int8> &value) { }
|
|
2119
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint8>* buf, const vec_t<Length, uint8> &value) { }
|
|
2120
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int16>* buf, const vec_t<Length, int16> &value) { }
|
|
2121
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint16>* buf, const vec_t<Length, uint16> &value) { }
|
|
2122
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int32>* buf, const vec_t<Length, int32> &value) { }
|
|
2123
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint32>* buf, const vec_t<Length, uint32> &value) { }
|
|
2124
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int64>* buf, const vec_t<Length, int64> &value) { }
|
|
2125
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint64>* buf, const vec_t<Length, uint64> &value) { }
|
|
2126
|
+
|
|
2127
|
+
// for bitwise operations we do not accumulate gradients
|
|
2128
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_and(vec_t<Length, int8>* buf, const vec_t<Length, int8> &value) { }
|
|
2129
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_and(vec_t<Length, uint8>* buf, const vec_t<Length, uint8> &value) { }
|
|
2130
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_and(vec_t<Length, int16>* buf, const vec_t<Length, int16> &value) { }
|
|
2131
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_and(vec_t<Length, uint16>* buf, const vec_t<Length, uint16> &value) { }
|
|
2132
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_and(vec_t<Length, int32>* buf, const vec_t<Length, int32> &value) { }
|
|
2133
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_and(vec_t<Length, uint32>* buf, const vec_t<Length, uint32> &value) { }
|
|
2134
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_and(vec_t<Length, int64>* buf, const vec_t<Length, int64> &value) { }
|
|
2135
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_and(vec_t<Length, uint64>* buf, const vec_t<Length, uint64> &value) { }
|
|
2136
|
+
|
|
2137
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_or(vec_t<Length, int8>* buf, const vec_t<Length, int8> &value) { }
|
|
2138
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_or(vec_t<Length, uint8>* buf, const vec_t<Length, uint8> &value) { }
|
|
2139
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_or(vec_t<Length, int16>* buf, const vec_t<Length, int16> &value) { }
|
|
2140
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_or(vec_t<Length, uint16>* buf, const vec_t<Length, uint16> &value) { }
|
|
2141
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_or(vec_t<Length, int32>* buf, const vec_t<Length, int32> &value) { }
|
|
2142
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_or(vec_t<Length, uint32>* buf, const vec_t<Length, uint32> &value) { }
|
|
2143
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_or(vec_t<Length, int64>* buf, const vec_t<Length, int64> &value) { }
|
|
2144
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_or(vec_t<Length, uint64>* buf, const vec_t<Length, uint64> &value) { }
|
|
2145
|
+
|
|
2146
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_xor(vec_t<Length, int8>* buf, const vec_t<Length, int8> &value) { }
|
|
2147
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_xor(vec_t<Length, uint8>* buf, const vec_t<Length, uint8> &value) { }
|
|
2148
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_xor(vec_t<Length, int16>* buf, const vec_t<Length, int16> &value) { }
|
|
2149
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_xor(vec_t<Length, uint16>* buf, const vec_t<Length, uint16> &value) { }
|
|
2150
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_xor(vec_t<Length, int32>* buf, const vec_t<Length, int32> &value) { }
|
|
2151
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_xor(vec_t<Length, uint32>* buf, const vec_t<Length, uint32> &value) { }
|
|
2152
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_xor(vec_t<Length, int64>* buf, const vec_t<Length, int64> &value) { }
|
|
2153
|
+
template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_xor(vec_t<Length, uint64>* buf, const vec_t<Length, uint64> &value) { }
|
|
2154
|
+
|
|
2155
|
+
|
|
2156
|
+
// adjoints for some of the constructors, used in intersect.h
|
|
2157
|
+
inline CUDA_CALLABLE void adj_vec2(float x, float y, float& adj_x, float& adj_y, const vec2& adj_ret)
|
|
2158
|
+
{
|
|
2159
|
+
adj_x += adj_ret[0];
|
|
2160
|
+
adj_y += adj_ret[1];
|
|
2161
|
+
}
|
|
2162
|
+
|
|
2163
|
+
inline CUDA_CALLABLE void adj_vec3(float x, float y, float z, float& adj_x, float& adj_y, float& adj_z, const vec3& adj_ret)
|
|
2164
|
+
{
|
|
2165
|
+
adj_x += adj_ret[0];
|
|
2166
|
+
adj_y += adj_ret[1];
|
|
2167
|
+
adj_z += adj_ret[2];
|
|
2168
|
+
}
|
|
2169
|
+
|
|
2170
|
+
inline CUDA_CALLABLE void adj_vec4(float x, float y, float z, float w, float& adj_x, float& adj_y, float& adj_z, float& adj_w, const vec4& adj_ret)
|
|
2171
|
+
{
|
|
2172
|
+
adj_x += adj_ret[0];
|
|
2173
|
+
adj_y += adj_ret[1];
|
|
2174
|
+
adj_z += adj_ret[2];
|
|
2175
|
+
adj_w += adj_ret[3];
|
|
2176
|
+
}
|
|
2177
|
+
|
|
2178
|
+
inline CUDA_CALLABLE void adj_vec3(float s, float& adj_s, const vec3& adj_ret)
|
|
2179
|
+
{
|
|
2180
|
+
adj_vec_t(s, adj_s, adj_ret);
|
|
2181
|
+
}
|
|
2182
|
+
|
|
2183
|
+
inline CUDA_CALLABLE void adj_vec4(float s, float& adj_s, const vec4& adj_ret)
|
|
2184
|
+
{
|
|
2185
|
+
adj_vec_t(s, adj_s, adj_ret);
|
|
2186
|
+
}
|
|
2187
|
+
|
|
2188
|
+
template<unsigned Length, typename Type>
|
|
2189
|
+
CUDA_CALLABLE inline int len(const vec_t<Length, Type>& x)
|
|
2190
|
+
{
|
|
2191
|
+
return Length;
|
|
2192
|
+
}
|
|
2193
|
+
|
|
2194
|
+
template<unsigned Length, typename Type>
|
|
2195
|
+
CUDA_CALLABLE inline void adj_len(const vec_t<Length, Type>& x, vec_t<Length, Type>& adj_x, const int& adj_ret)
|
|
2196
|
+
{
|
|
2197
|
+
}
|
|
2198
|
+
|
|
2199
|
+
} // namespace wp
|