multipers 2.2.3__cp310-cp310-win_amd64.whl → 2.3.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of multipers might be problematic. Click here for more details.

Files changed (182) hide show
  1. multipers/__init__.py +33 -31
  2. multipers/_signed_measure_meta.py +430 -430
  3. multipers/_slicer_meta.py +211 -212
  4. multipers/data/MOL2.py +458 -458
  5. multipers/data/UCR.py +18 -18
  6. multipers/data/graphs.py +466 -466
  7. multipers/data/immuno_regions.py +27 -27
  8. multipers/data/pytorch2simplextree.py +90 -90
  9. multipers/data/shape3d.py +101 -101
  10. multipers/data/synthetic.py +113 -111
  11. multipers/distances.py +198 -198
  12. multipers/filtration_conversions.pxd.tp +84 -84
  13. multipers/filtrations/__init__.py +18 -0
  14. multipers/filtrations/filtrations.py +289 -0
  15. multipers/filtrations.pxd +224 -224
  16. multipers/function_rips.cp310-win_amd64.pyd +0 -0
  17. multipers/function_rips.pyx +105 -105
  18. multipers/grids.cp310-win_amd64.pyd +0 -0
  19. multipers/grids.pyx +350 -350
  20. multipers/gudhi/Persistence_slices_interface.h +132 -132
  21. multipers/gudhi/Simplex_tree_interface.h +239 -245
  22. multipers/gudhi/Simplex_tree_multi_interface.h +516 -561
  23. multipers/gudhi/cubical_to_boundary.h +59 -59
  24. multipers/gudhi/gudhi/Bitmap_cubical_complex.h +450 -450
  25. multipers/gudhi/gudhi/Bitmap_cubical_complex_base.h +1070 -1070
  26. multipers/gudhi/gudhi/Bitmap_cubical_complex_periodic_boundary_conditions_base.h +579 -579
  27. multipers/gudhi/gudhi/Debug_utils.h +45 -45
  28. multipers/gudhi/gudhi/Fields/Multi_field.h +484 -484
  29. multipers/gudhi/gudhi/Fields/Multi_field_operators.h +455 -455
  30. multipers/gudhi/gudhi/Fields/Multi_field_shared.h +450 -450
  31. multipers/gudhi/gudhi/Fields/Multi_field_small.h +531 -531
  32. multipers/gudhi/gudhi/Fields/Multi_field_small_operators.h +507 -507
  33. multipers/gudhi/gudhi/Fields/Multi_field_small_shared.h +531 -531
  34. multipers/gudhi/gudhi/Fields/Z2_field.h +355 -355
  35. multipers/gudhi/gudhi/Fields/Z2_field_operators.h +376 -376
  36. multipers/gudhi/gudhi/Fields/Zp_field.h +420 -420
  37. multipers/gudhi/gudhi/Fields/Zp_field_operators.h +400 -400
  38. multipers/gudhi/gudhi/Fields/Zp_field_shared.h +418 -418
  39. multipers/gudhi/gudhi/Flag_complex_edge_collapser.h +337 -337
  40. multipers/gudhi/gudhi/Matrix.h +2107 -2107
  41. multipers/gudhi/gudhi/Multi_critical_filtration.h +1038 -1038
  42. multipers/gudhi/gudhi/Multi_persistence/Box.h +171 -171
  43. multipers/gudhi/gudhi/Multi_persistence/Line.h +282 -282
  44. multipers/gudhi/gudhi/Off_reader.h +173 -173
  45. multipers/gudhi/gudhi/One_critical_filtration.h +1432 -1431
  46. multipers/gudhi/gudhi/Persistence_matrix/Base_matrix.h +769 -769
  47. multipers/gudhi/gudhi/Persistence_matrix/Base_matrix_with_column_compression.h +686 -686
  48. multipers/gudhi/gudhi/Persistence_matrix/Boundary_matrix.h +842 -842
  49. multipers/gudhi/gudhi/Persistence_matrix/Chain_matrix.h +1350 -1350
  50. multipers/gudhi/gudhi/Persistence_matrix/Id_to_index_overlay.h +1105 -1105
  51. multipers/gudhi/gudhi/Persistence_matrix/Position_to_index_overlay.h +859 -859
  52. multipers/gudhi/gudhi/Persistence_matrix/RU_matrix.h +910 -910
  53. multipers/gudhi/gudhi/Persistence_matrix/allocators/entry_constructors.h +139 -139
  54. multipers/gudhi/gudhi/Persistence_matrix/base_pairing.h +230 -230
  55. multipers/gudhi/gudhi/Persistence_matrix/base_swap.h +211 -211
  56. multipers/gudhi/gudhi/Persistence_matrix/boundary_cell_position_to_id_mapper.h +60 -60
  57. multipers/gudhi/gudhi/Persistence_matrix/boundary_face_position_to_id_mapper.h +60 -60
  58. multipers/gudhi/gudhi/Persistence_matrix/chain_pairing.h +136 -136
  59. multipers/gudhi/gudhi/Persistence_matrix/chain_rep_cycles.h +190 -190
  60. multipers/gudhi/gudhi/Persistence_matrix/chain_vine_swap.h +616 -616
  61. multipers/gudhi/gudhi/Persistence_matrix/columns/chain_column_extra_properties.h +150 -150
  62. multipers/gudhi/gudhi/Persistence_matrix/columns/column_dimension_holder.h +106 -106
  63. multipers/gudhi/gudhi/Persistence_matrix/columns/column_utilities.h +219 -219
  64. multipers/gudhi/gudhi/Persistence_matrix/columns/entry_types.h +327 -327
  65. multipers/gudhi/gudhi/Persistence_matrix/columns/heap_column.h +1140 -1140
  66. multipers/gudhi/gudhi/Persistence_matrix/columns/intrusive_list_column.h +934 -934
  67. multipers/gudhi/gudhi/Persistence_matrix/columns/intrusive_set_column.h +934 -934
  68. multipers/gudhi/gudhi/Persistence_matrix/columns/list_column.h +980 -980
  69. multipers/gudhi/gudhi/Persistence_matrix/columns/naive_vector_column.h +1092 -1092
  70. multipers/gudhi/gudhi/Persistence_matrix/columns/row_access.h +192 -192
  71. multipers/gudhi/gudhi/Persistence_matrix/columns/set_column.h +921 -921
  72. multipers/gudhi/gudhi/Persistence_matrix/columns/small_vector_column.h +1093 -1093
  73. multipers/gudhi/gudhi/Persistence_matrix/columns/unordered_set_column.h +1012 -1012
  74. multipers/gudhi/gudhi/Persistence_matrix/columns/vector_column.h +1244 -1244
  75. multipers/gudhi/gudhi/Persistence_matrix/matrix_dimension_holders.h +186 -186
  76. multipers/gudhi/gudhi/Persistence_matrix/matrix_row_access.h +164 -164
  77. multipers/gudhi/gudhi/Persistence_matrix/ru_pairing.h +156 -156
  78. multipers/gudhi/gudhi/Persistence_matrix/ru_rep_cycles.h +376 -376
  79. multipers/gudhi/gudhi/Persistence_matrix/ru_vine_swap.h +540 -540
  80. multipers/gudhi/gudhi/Persistent_cohomology/Field_Zp.h +118 -118
  81. multipers/gudhi/gudhi/Persistent_cohomology/Multi_field.h +173 -173
  82. multipers/gudhi/gudhi/Persistent_cohomology/Persistent_cohomology_column.h +128 -128
  83. multipers/gudhi/gudhi/Persistent_cohomology.h +745 -745
  84. multipers/gudhi/gudhi/Points_off_io.h +171 -171
  85. multipers/gudhi/gudhi/Simple_object_pool.h +69 -69
  86. multipers/gudhi/gudhi/Simplex_tree/Simplex_tree_iterators.h +463 -463
  87. multipers/gudhi/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h +83 -83
  88. multipers/gudhi/gudhi/Simplex_tree/Simplex_tree_siblings.h +106 -106
  89. multipers/gudhi/gudhi/Simplex_tree/Simplex_tree_star_simplex_iterators.h +277 -277
  90. multipers/gudhi/gudhi/Simplex_tree/hooks_simplex_base.h +62 -62
  91. multipers/gudhi/gudhi/Simplex_tree/indexing_tag.h +27 -27
  92. multipers/gudhi/gudhi/Simplex_tree/serialization_utils.h +62 -62
  93. multipers/gudhi/gudhi/Simplex_tree/simplex_tree_options.h +157 -157
  94. multipers/gudhi/gudhi/Simplex_tree.h +2794 -2794
  95. multipers/gudhi/gudhi/Simplex_tree_multi.h +152 -163
  96. multipers/gudhi/gudhi/distance_functions.h +62 -62
  97. multipers/gudhi/gudhi/graph_simplicial_complex.h +104 -104
  98. multipers/gudhi/gudhi/persistence_interval.h +253 -253
  99. multipers/gudhi/gudhi/persistence_matrix_options.h +170 -170
  100. multipers/gudhi/gudhi/reader_utils.h +367 -367
  101. multipers/gudhi/mma_interface_coh.h +256 -255
  102. multipers/gudhi/mma_interface_h0.h +223 -231
  103. multipers/gudhi/mma_interface_matrix.h +284 -282
  104. multipers/gudhi/naive_merge_tree.h +536 -575
  105. multipers/gudhi/scc_io.h +310 -289
  106. multipers/gudhi/truc.h +890 -888
  107. multipers/io.cp310-win_amd64.pyd +0 -0
  108. multipers/io.pyx +711 -711
  109. multipers/ml/accuracies.py +90 -90
  110. multipers/ml/convolutions.py +520 -520
  111. multipers/ml/invariants_with_persistable.py +79 -79
  112. multipers/ml/kernels.py +176 -176
  113. multipers/ml/mma.py +713 -714
  114. multipers/ml/one.py +472 -472
  115. multipers/ml/point_clouds.py +352 -346
  116. multipers/ml/signed_measures.py +1589 -1589
  117. multipers/ml/sliced_wasserstein.py +461 -461
  118. multipers/ml/tools.py +113 -113
  119. multipers/mma_structures.cp310-win_amd64.pyd +0 -0
  120. multipers/mma_structures.pxd +127 -127
  121. multipers/mma_structures.pyx +4 -4
  122. multipers/mma_structures.pyx.tp +1085 -1085
  123. multipers/multi_parameter_rank_invariant/diff_helpers.h +84 -93
  124. multipers/multi_parameter_rank_invariant/euler_characteristic.h +97 -97
  125. multipers/multi_parameter_rank_invariant/function_rips.h +322 -322
  126. multipers/multi_parameter_rank_invariant/hilbert_function.h +769 -769
  127. multipers/multi_parameter_rank_invariant/persistence_slices.h +148 -148
  128. multipers/multi_parameter_rank_invariant/rank_invariant.h +369 -369
  129. multipers/multiparameter_edge_collapse.py +41 -41
  130. multipers/multiparameter_module_approximation/approximation.h +2296 -2295
  131. multipers/multiparameter_module_approximation/combinatory.h +129 -129
  132. multipers/multiparameter_module_approximation/debug.h +107 -107
  133. multipers/multiparameter_module_approximation/format_python-cpp.h +286 -286
  134. multipers/multiparameter_module_approximation/heap_column.h +238 -238
  135. multipers/multiparameter_module_approximation/images.h +79 -79
  136. multipers/multiparameter_module_approximation/list_column.h +174 -174
  137. multipers/multiparameter_module_approximation/list_column_2.h +232 -232
  138. multipers/multiparameter_module_approximation/ru_matrix.h +347 -347
  139. multipers/multiparameter_module_approximation/set_column.h +135 -135
  140. multipers/multiparameter_module_approximation/structure_higher_dim_barcode.h +36 -36
  141. multipers/multiparameter_module_approximation/unordered_set_column.h +166 -166
  142. multipers/multiparameter_module_approximation/utilities.h +403 -419
  143. multipers/multiparameter_module_approximation/vector_column.h +223 -223
  144. multipers/multiparameter_module_approximation/vector_matrix.h +331 -331
  145. multipers/multiparameter_module_approximation/vineyards.h +464 -464
  146. multipers/multiparameter_module_approximation/vineyards_trajectories.h +649 -649
  147. multipers/multiparameter_module_approximation.cp310-win_amd64.pyd +0 -0
  148. multipers/multiparameter_module_approximation.pyx +216 -217
  149. multipers/pickle.py +90 -53
  150. multipers/plots.py +342 -334
  151. multipers/point_measure.cp310-win_amd64.pyd +0 -0
  152. multipers/point_measure.pyx +322 -320
  153. multipers/simplex_tree_multi.cp310-win_amd64.pyd +0 -0
  154. multipers/simplex_tree_multi.pxd +133 -133
  155. multipers/simplex_tree_multi.pyx +18 -15
  156. multipers/simplex_tree_multi.pyx.tp +1939 -1935
  157. multipers/slicer.cp310-win_amd64.pyd +0 -0
  158. multipers/slicer.pxd +81 -20
  159. multipers/slicer.pxd.tp +215 -214
  160. multipers/slicer.pyx +1091 -308
  161. multipers/slicer.pyx.tp +924 -914
  162. multipers/tensor/tensor.h +672 -672
  163. multipers/tensor.pxd +13 -13
  164. multipers/test.pyx +44 -44
  165. multipers/tests/__init__.py +57 -57
  166. multipers/torch/diff_grids.py +217 -217
  167. multipers/torch/rips_density.py +310 -304
  168. {multipers-2.2.3.dist-info → multipers-2.3.0.dist-info}/LICENSE +21 -21
  169. {multipers-2.2.3.dist-info → multipers-2.3.0.dist-info}/METADATA +21 -11
  170. multipers-2.3.0.dist-info/RECORD +182 -0
  171. multipers/tests/test_diff_helper.py +0 -73
  172. multipers/tests/test_hilbert_function.py +0 -82
  173. multipers/tests/test_mma.py +0 -83
  174. multipers/tests/test_point_clouds.py +0 -49
  175. multipers/tests/test_python-cpp_conversion.py +0 -82
  176. multipers/tests/test_signed_betti.py +0 -181
  177. multipers/tests/test_signed_measure.py +0 -89
  178. multipers/tests/test_simplextreemulti.py +0 -221
  179. multipers/tests/test_slicer.py +0 -221
  180. multipers-2.2.3.dist-info/RECORD +0 -189
  181. {multipers-2.2.3.dist-info → multipers-2.3.0.dist-info}/WHEEL +0 -0
  182. {multipers-2.2.3.dist-info → multipers-2.3.0.dist-info}/top_level.txt +0 -0
multipers/tensor/tensor.h CHANGED
@@ -1,672 +1,672 @@
1
- #pragma once
2
-
3
- #include <cassert>
4
- #include <cstddef>
5
- #include <iostream>
6
- #include <numeric>
7
- #include <vector>
8
-
9
- // TODO : sparse version, current operator[] is already a hash.
10
- namespace tensor {
11
-
12
- template <typename indices_type>
13
- inline std::vector<indices_type> compute_backward_cumprod(const std::vector<indices_type> &resolution) {
14
- constexpr bool verbose = false;
15
- std::vector<indices_type> cum_prod_resolution_(resolution.size());
16
- cum_prod_resolution_.back() = 1;
17
- for (auto i = resolution.size() - 1; i > 0; i--) {
18
- // std::cout << i << " " << cum_prod_resolution_.size() << std::endl;
19
- cum_prod_resolution_[i - 1] = resolution[i] * cum_prod_resolution_[i];
20
- }
21
- if constexpr (verbose) {
22
- std::cout << "Cum resolution ";
23
- for (auto c : cum_prod_resolution_) std::cout << c << " ";
24
- std::cout << std::endl;
25
- }
26
- return cum_prod_resolution_;
27
- }
28
-
29
- template <typename dtype, typename indices_type>
30
- class static_tensor_view { // Python handles the construction - destruction of
31
- // the data,
32
- public:
33
- using sparse_type = std::pair<std::vector<std::vector<indices_type>>, std::vector<dtype>>;
34
- static_tensor_view();
35
-
36
- static_tensor_view(dtype *data_ptr, const std::vector<indices_type> &resolution)
37
- : data_ptr_(data_ptr),
38
- size_(resolution.size() == 0
39
- ? 0
40
- : std::accumulate(begin(resolution), end(resolution), 1, std::multiplies<indices_type>())),
41
- resolution_(resolution)
42
- // cum_prod_resolution_(compute_backward_cumprod(resolution))
43
- {
44
- // cum_prod_resolution_ = std::vector<std::size_t>(resolution.size());
45
- // std::size_t last = 1;
46
- // for (auto i = resolution.size() -1; i > 0; i--){
47
- // last *=resolution[i];
48
- // // std::cout << i << " " << cum_prod_resolution_.size() << std::endl;
49
- // cum_prod_resolution_[resolution.size()-1 - i] = last;
50
- // }
51
- // cum_prod_resolution_.back() = 1;
52
- cum_prod_resolution_ = std::move(compute_backward_cumprod(resolution));
53
- };
54
-
55
- // dtype[]& data_ref(){
56
- // return *data_ptr;
57
- // }
58
- inline std::size_t size() const { return size_; }
59
-
60
- inline bool empty() const { return size_ == 0; }
61
-
62
- inline dtype &data_back() const { return *(data_ptr_ + size_ - 1); }
63
-
64
- inline std::size_t ndim() const { return resolution_.size(); }
65
-
66
- template <class oned_array_like = std::initializer_list<indices_type>>
67
- inline dtype &operator[](const oned_array_like &coordinates) const {
68
- const bool check = false;
69
- dtype *data_index = data_ptr_;
70
- /* 0; // max is 4*10^9, should be fine. just put an assert in python. */
71
-
72
- if constexpr (check) {
73
- if (coordinates.size() != resolution_.size()) {
74
- auto it = coordinates.begin();
75
- for (size_t i = 0u; i < coordinates.size(); i++) std::cerr << *(it++) << "/" << resolution_[i] << ", ";
76
- std::cerr << ")" << std::endl;
77
- throw std::invalid_argument("Invalid coordinate dimension.");
78
- }
79
- // for (auto [ci, cum_res, res] : std::views::zip(coordinates,
80
- // cum_prod_resolution_, resolution_)){ // NIK Apple clang for
81
- // (indices_type i : std::views::iota(0,coordinates.size())){
82
- auto it = coordinates.begin();
83
- for (size_t i = 0u; i < coordinates.size(); i++) {
84
- auto &ci = *(it++);
85
- auto cum_res = cum_prod_resolution_[i];
86
- auto res = resolution_[i];
87
- if (ci >= res) [[unlikely]] {
88
- std::cerr << "Crash log. Coordinates : (";
89
- auto it = coordinates.begin();
90
- for (auto i = 0u; i < coordinates.size(); i++) std::cerr << *(it++) << "/" << resolution_[i] << ", ";
91
- // for (auto [c, r] : std::views::zip(coordinates, resolution_))
92
- // std::cerr << c << "/" << r << ", "; // NIK APPLE CLANG
93
- std::cerr << ")" << std::endl;
94
- throw std::invalid_argument("Illegal coordinates.");
95
- }
96
- data_index += ci * cum_res;
97
- }
98
- if (data_index >= this->size()) [[unlikely]] {
99
- std::cerr << "Crash log. Coordinates : (";
100
- auto it = coordinates.begin();
101
- for (size_t i = 0u; i < coordinates.size(); i++) std::cerr << *(it++) << "/" << resolution_[i] << ", ";
102
- std::cerr << ")" << std::endl;
103
- throw std::invalid_argument("Internal error : asked data " + std::to_string(data_index) + "/" +
104
- std::to_string(this->size()));
105
- }
106
- // std::cout << data_index << " " << this->size() << std::endl;
107
- // std::cout << data_index << "/" << this->size() << std::endl;
108
- } else {
109
- // for (auto [ci, cum_res] : std::views::zip(coordinates,
110
- // cum_prod_resolution_)){ // small so i'm not sure reduce can be
111
- // efficient here // NIK Apple clang data_index += ci*cum_res;
112
- // }
113
-
114
- auto coord_ptr = coordinates.begin();
115
- auto cum_res_ptr = cum_prod_resolution_.begin();
116
- for (; coord_ptr != coordinates.end(); ++coord_ptr, ++cum_res_ptr) {
117
- data_index += (*coord_ptr) * (*cum_res_ptr);
118
- }
119
- }
120
- /* return *(data_ptr_ + data_index); */
121
- return *data_index;
122
- }
123
-
124
- template <typename idx_type>
125
- inline dtype &data_at_index(idx_type i) {
126
- return *(data_ptr_ + i);
127
- }
128
-
129
- template <typename indice_type_like>
130
- inline std::vector<indices_type> data_index_inverse(indice_type_like data_index,
131
- const std::vector<bool> &flip_axes = {}) const {
132
- std::vector<indices_type> coordinates(resolution_.size());
133
- int data_index_ = data_index;
134
- for (int parameter = static_cast<int>(coordinates.size()) - 1; parameter >= 0; parameter--) {
135
- auto [q, r] = std::div(data_index_, static_cast<int>(resolution_[parameter]));
136
- if (static_cast<int>(flip_axes.size()) > parameter && flip_axes[parameter])
137
- coordinates[parameter] = resolution_[parameter] - r;
138
- else
139
- coordinates[parameter] = r;
140
- data_index_ = q;
141
- }
142
- return coordinates;
143
- }
144
-
145
- // friend std::ostream& operator<<(std::ostream& stream, const
146
- // static_tensor_view<dtype,indices_type>& truc){
147
- // stream << "[";
148
- // for(indices_type i = 0; i < truc.size()-1; i++){
149
- // stream << *(truc.data_ptr_ + i) << ", ";
150
- // }
151
- // if(!truc.empty()) stream << truc.data_back();
152
- // stream << "]";
153
- // stream << "\n resolution : ";
154
- // for(indices_type i = 0; i < truc.resolution_.size(); i++){
155
- // stream << truc.resolution_[i] << ", ";
156
- // }
157
- // stream << "\n cum resolution : ";
158
- // for(indices_type i = 0; i < truc.cum_prod_resolution_.size(); i++){
159
- // stream << truc.cum_prod_resolution_[i] << ", ";
160
- // }
161
- // return stream;
162
- // }
163
-
164
- friend std::ostream &operator<<(std::ostream &stream, const static_tensor_view<dtype, indices_type> &truc) {
165
- // constexpr bool verbose = false;
166
- for (auto parameter = 0u; parameter < truc.ndim(); parameter++) stream << "[";
167
- // iterate over data, update coordinates in a vector, and print if in free
168
- // coords i.e. add one to last coord, modulo if greater, and propagate to
169
- // the next
170
- std::vector<indices_type> coordinates(truc.ndim()); /// 0,...,0
171
- for (auto i = 0u; i < truc.size() - 1; i++) {
172
- stream << truc.data_at(i);
173
-
174
- // for (indices_type parameter =0; parameter < coordinates.size();
175
- // parameter++){ stream << coordinates[parameter];
176
- // }
177
- // stream << "\n";
178
- coordinates[0]++;
179
- indices_type parameter = 0;
180
- for (; parameter < static_cast<int>(coordinates.size()) - 1; ++parameter) {
181
- if (coordinates[parameter] < truc.get_resolution()[parameter]) {
182
- // stream << ", ";
183
- // if (parameter == 1)
184
- // stream << "\n";
185
- break;
186
- }
187
- // for (indices_type i =0; i < parameter; i++)
188
- // stream << ";";
189
- // for (indices_type i =0; i < parameter+1; i++)
190
- // stream << "]";
191
- // stream << ", ";
192
- // for (indices_type i =0; i < parameter; i++)
193
- // stream << "[";
194
- coordinates[parameter] = 0; // 1 by 1 so should be fine not doing mods
195
- coordinates[parameter + 1]++;
196
- }
197
- if (parameter == 1)
198
- stream << "],\n [";
199
- else {
200
- for (indices_type i = 0; i < parameter; i++) stream << "]";
201
- stream << ", ";
202
- for (indices_type i = 0; i < parameter; i++) stream << "[";
203
- }
204
- }
205
-
206
- stream << truc.data_back();
207
- for (auto parameter = 0u; parameter < truc.ndim(); parameter++) stream << "]";
208
- return stream;
209
- }
210
-
211
- // template<class
212
- // twod_array_like=std::initializer_list<std::initializer_list<indices_type>>>
213
- // static_tensor_view_view<dtype,indices_type> view(twod_array_like
214
- // coordinates){ auto out = static_tensor_view_view(data_ptr_,
215
- // resolution_); out.free_coordinates = coordinates; return out;
216
- // }
217
- inline const std::vector<indices_type> &get_resolution() const { return resolution_; }
218
-
219
- inline const std::vector<indices_type> &get_cum_resolution() const { return cum_prod_resolution_; }
220
-
221
- template <typename indice_type_like>
222
- inline dtype &data_at(indice_type_like i) const {
223
- return *(data_ptr_ + i);
224
- }
225
-
226
- void differentiate(indices_type axis);
227
-
228
- inline sparse_type sparsify(const std::vector<bool> &flip_axes = {}, bool verbose = false) const {
229
- std::vector<std::vector<indices_type>> coordinates;
230
- std::vector<dtype> values;
231
- // for (indices_type i = 0; i < static_cast<indices_type>(this->size());
232
- // i++){
233
- for (auto i = 0u; i < this->size(); i++) {
234
- auto stuff = this->data_at(i);
235
- if (stuff == 0) [[likely]] // as this is sparse
236
- continue;
237
- coordinates.push_back(this->data_index_inverse(i, flip_axes));
238
- values.push_back(stuff);
239
- }
240
- if (verbose) [[unlikely]] {
241
- // for (auto [pt,w] : std::views::zip(coordinates, values)){ NIK apple
242
- // clang
243
- for (auto i = 0u; i < coordinates.size(); i++) {
244
- for (const auto &v : coordinates[i]) std::cout << v << " ";
245
- std::cout << "| " << values[i] << std::endl;
246
- }
247
- }
248
- return {coordinates, values};
249
- }
250
-
251
- // template<class oned_array_like=std::initializer_list<indices_type>>
252
- void _rec_add_cone(const std::vector<indices_type> &basepoint,
253
- dtype value,
254
- std::vector<indices_type> &coordinates,
255
- int _rec_parameter) const {
256
- if (_rec_parameter < 0) {
257
- (*this)[coordinates] += value;
258
- return;
259
- }
260
- for (indices_type c = basepoint[_rec_parameter]; c < this->get_resolution()[_rec_parameter]; c++) {
261
- coordinates[_rec_parameter] = c;
262
- this->_rec_add_cone(basepoint, value, coordinates, _rec_parameter - 1);
263
- }
264
- }
265
-
266
- inline void add_cone(const std::vector<indices_type> &basepoint, dtype value) const {
267
- constexpr const bool check = false;
268
- constexpr const bool verbose = false;
269
- if constexpr (check) {
270
- if (basepoint.size() != this->ndim()) throw std::logic_error("Invalid coordinate for cone");
271
- }
272
- if constexpr (verbose) {
273
- std::cout << "Adding cone ";
274
- for (auto b : basepoint) std::cout << b << " ,";
275
- std::cout << std::endl;
276
- }
277
- std::vector<indices_type> temp_container(this->ndim());
278
- this->_rec_add_cone(basepoint, value, temp_container, static_cast<int>(this->ndim()) - 1);
279
- }
280
-
281
- // template<class oned_array_like=std::initializer_list<indices_type>>
282
- void _rec_add_cone_boundary(const std::vector<indices_type> &basepoint,
283
- dtype value,
284
- std::vector<indices_type> &coordinates,
285
- int _rec_parameter) const {
286
- if (_rec_parameter < 0) {
287
- (*this)[coordinates] += value;
288
- return;
289
- }
290
-
291
- // for (auto c=basepoint[_rec_parameter];
292
- // c<this->get_resolution()[_rec_parameter]; c++){
293
- // coordinates[_rec_parameter] = c;
294
- // this->_rec_add_cone(basepoint, value, coordinates, _rec_parameter-1);
295
- // }
296
-
297
- coordinates[_rec_parameter] = basepoint[_rec_parameter];
298
- this->_rec_add_cone_boundary(std::vector<indices_type>(basepoint), value, coordinates, _rec_parameter - 1);
299
-
300
- coordinates[_rec_parameter] = this->get_resolution()[_rec_parameter] - 1;
301
- this->_rec_add_cone_boundary(basepoint, -value, coordinates, _rec_parameter - 1);
302
- }
303
-
304
- inline void add_cone_boundary(const std::vector<indices_type> &basepoint, dtype value) const {
305
- const bool check = false;
306
- if constexpr (check) {
307
- if (basepoint.size() != this->ndim()) throw std::logic_error("Invalid coordinate for cone boundary");
308
- }
309
- std::vector<indices_type> temp_container(this->ndim());
310
- this->_rec_add_cone_boundary(basepoint, value, temp_container, static_cast<int>(this->ndim()) - 1);
311
- }
312
-
313
- public:
314
- private:
315
- dtype *data_ptr_;
316
- std::size_t size_;
317
- std::vector<indices_type> resolution_;
318
- std::vector<indices_type> cum_prod_resolution_;
319
- // std::vector<std::vector<indices_types>> fixed_coordinates; // in child
320
- };
321
-
322
- template <typename dtype, typename indices_type>
323
- class static_tensor_view_view
324
- : public static_tensor_view<dtype, indices_type> { // i'm not sure this class is very efficient.
325
- public:
326
- using base = static_tensor_view<dtype, indices_type>;
327
-
328
- static_tensor_view_view(dtype *data_ptr,
329
- const std::vector<indices_type> &resolution,
330
- const std::vector<std::vector<indices_type>> &free_coordinates,
331
- bool use_sparse = true)
332
- : base(data_ptr, resolution),
333
- resolution_view(this->compute_resolution(free_coordinates))
334
- // free_coordinates(free_coordinates)
335
- {
336
- this->compute_ptrs(free_coordinates, use_sparse);
337
- };
338
-
339
- static_tensor_view_view(const static_tensor_view<dtype, indices_type> &parent,
340
- const std::vector<std::vector<indices_type>> &free_coordinates,
341
- bool use_sparse = true)
342
- : base(parent),
343
- resolution_view(this->compute_resolution(free_coordinates))
344
- // free_coordinates(free_coordinates)
345
- {
346
- this->compute_ptrs(free_coordinates, use_sparse);
347
- };
348
-
349
- inline bool is_float(const std::vector<indices_type> &resolution) const {
350
- indices_type dim = this->dimension();
351
- for (indices_type i = 0; i < dim; i++)
352
- if (resolution[i] > 1) return false;
353
- return true;
354
- }
355
-
356
- inline bool is_float() const { return this->is_float(this->resolution_view); }
357
-
358
- template <class oned_array_like = std::initializer_list<indices_type>>
359
- inline bool is_in_view(const oned_array_like &coordinates,
360
- const std::vector<std::vector<indices_type>> &free_coordinates) {
361
- assert(coordinates.size() == this->ndim());
362
- auto it = coordinates.begin();
363
- for (indices_type parameter = 0; parameter < static_cast<indices_type>(this->ndim()); ++parameter) {
364
- const auto &x = *it;
365
- it++;
366
- for (auto stuff : free_coordinates[parameter]) {
367
- if (stuff < x)
368
- continue;
369
- else if (stuff == x)
370
- break;
371
- else
372
- return false;
373
- }
374
- if (x > free_coordinates[parameter].back()) return false;
375
- }
376
- return true;
377
- }
378
-
379
- std::size_t _size() const { // for construction
380
- std::size_t out = 1;
381
- for (const auto &r : resolution_view) out *= r;
382
- return out;
383
- }
384
-
385
- std::size_t size() const { return ptrs.size(); }
386
-
387
- std::vector<indices_type> compute_resolution(const std::vector<std::vector<indices_type>> &free_coordinates) {
388
- std::vector<indices_type> out(free_coordinates.size());
389
- // for (auto [s, stuff] : std::views::zip(out, free_coordinates)) s =
390
- // stuff.size(); // NIK apple clang
391
- for (auto i = 0u; i < free_coordinates.size(); i++) out[i] = free_coordinates[i].size();
392
- return out;
393
- }
394
-
395
- void compute_ptrs_dense(const std::vector<std::vector<indices_type>> &free_coordinates) { // todo redo from
396
- // DO NOT USE
397
- constexpr bool verbose = false;
398
- std::vector<dtype *> out(this->_size());
399
- std::vector<indices_type> coordinates(this->ndim()); /// 0,...,0
400
- std::size_t count = 0;
401
-
402
- for (int i = 0; i < static_cast<int>(static_tensor_view<dtype, indices_type>::size()) - 1; i++) {
403
- if constexpr (verbose) {
404
- std::cout << "Coordinate : ";
405
- for (auto x : coordinates) std::cout << x << " ";
406
- if (this->is_in_view(coordinates, free_coordinates))
407
- std::cout << " in view";
408
- else
409
- std::cout << "not in view";
410
- std::cout << std::endl;
411
- }
412
-
413
- if (this->is_in_view(coordinates, free_coordinates)) {
414
- out[count] = &this->data_at(i);
415
- count++;
416
- }
417
- coordinates.back()++;
418
- for (indices_type parameter = coordinates.size() - 1; parameter > 0; parameter--) {
419
- if (coordinates[parameter] < this->get_resolution()[parameter]) {
420
- break;
421
- }
422
- for (indices_type i = parameter; i < static_cast<indices_type>(coordinates.size()); i++)
423
- coordinates[i] = 0; // 1 by 1 so should be fine not doing mods
424
- coordinates[parameter - 1]++;
425
- }
426
- }
427
- if (this->is_in_view(coordinates, free_coordinates)) {
428
- out[count] = &this->data_back();
429
- count++;
430
- }
431
- // assert(count == this->size());
432
- ptrs.swap(out);
433
- }
434
-
435
- inline void compute_ptrs_sparse(const std::vector<std::vector<indices_type>> &free_coordinates,
436
- std::vector<indices_type> _rec_coordinates_begin = {}) { // todo redo from
437
- constexpr bool verbose = false;
438
- if (_rec_coordinates_begin.size() == 0) ptrs.reserve(this->_size());
439
- indices_type parameter = _rec_coordinates_begin.size();
440
- if (parameter == static_cast<indices_type>(this->ndim())) {
441
- auto &value = tensor::static_tensor_view<dtype, indices_type>::operator[](
442
- _rec_coordinates_begin); // calling [] is not efficient, but not
443
- // bottleneck
444
- if constexpr (verbose) {
445
- std::cout << "Adding coordinates ";
446
- for (auto c : _rec_coordinates_begin) std::cout << c << " ";
447
- std::cout << " of value " << value;
448
- std::cout << std::endl;
449
- }
450
- ptrs.push_back(&value);
451
- return;
452
- }
453
- _rec_coordinates_begin.reserve(this->ndim());
454
- _rec_coordinates_begin.resize(parameter + 1);
455
- for (indices_type coord : free_coordinates[parameter]) {
456
- _rec_coordinates_begin.back() = coord;
457
- compute_ptrs_sparse(free_coordinates, _rec_coordinates_begin);
458
- }
459
- return;
460
- }
461
-
462
- inline void compute_ptrs(const std::vector<std::vector<indices_type>> &free_coordinates, bool use_sparse = true) {
463
- if (use_sparse)
464
- compute_ptrs_sparse(free_coordinates);
465
- else
466
- compute_ptrs_dense(free_coordinates);
467
- }
468
-
469
- inline void shift_coordinate(indices_type idx, indices_type shift_value) {
470
- // resolution stays the same,
471
- auto to_add = this->get_cum_resolution()[idx] * shift_value;
472
- for (auto &ptr : this->ptrs) ptr += to_add;
473
- }
474
-
475
- // constant additions
476
- inline void operator+=(dtype x) {
477
- // if (ptrs.empty()) this->compute_ptrs_dense();
478
- for (auto stuff : ptrs) *stuff += x;
479
- return;
480
- }
481
-
482
- inline void operator-=(dtype x) {
483
- // if (ptrs.empty()) this->compute_ptrs_dense();
484
- for (auto stuff : ptrs) *stuff -= x;
485
- return;
486
- }
487
-
488
- inline void operator*=(dtype x) {
489
- // if (ptrs.empty()) this->compute_ptrs_dense();
490
- for (auto stuff : ptrs) *stuff *= x;
491
- return;
492
- }
493
-
494
- inline void operator/=(dtype x) {
495
- // if (ptrs.empty()) this->compute_ptrs_dense();
496
- for (auto stuff : ptrs) *stuff /= x;
497
- return;
498
- }
499
-
500
- inline void operator=(dtype x) {
501
- for (auto stuff : ptrs) *stuff = x;
502
- return;
503
- }
504
-
505
- inline void operator=(const static_tensor_view_view<dtype, indices_type> &x) {
506
- assert(this->size() == x.size());
507
- this->ptrs = x.ptrs;
508
- return;
509
- }
510
-
511
- inline void swap(static_tensor_view_view<dtype, indices_type> &x) {
512
- this->ptrs.swap(x.ptrs);
513
- return;
514
- }
515
-
516
- // retrieves data from ptrs
517
- inline void operator+=(const static_tensor_view_view<dtype, indices_type> &x) {
518
- std::size_t num_data = this->size();
519
- assert(num_data == x.size());
520
- for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] += *x[idx];
521
- return;
522
- }
523
-
524
- inline void operator-=(const static_tensor_view_view<dtype, indices_type> &x) {
525
- std::size_t num_data = this->size();
526
- assert(num_data == x.size());
527
- for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] -= *x[idx];
528
- return;
529
- }
530
-
531
- inline void operator*=(const static_tensor_view_view<dtype, indices_type> &x) {
532
- std::size_t num_data = this->size();
533
- assert(num_data == x.size());
534
- for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] *= *x[idx];
535
- return;
536
- }
537
-
538
- inline void operator/=(const static_tensor_view_view<dtype, indices_type> &x) {
539
- std::size_t num_data = this->size();
540
- assert(num_data == x.size());
541
- for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] /= *x[idx];
542
- return;
543
- }
544
-
545
- // Default array_like template
546
- template <typename array_like = std::initializer_list<dtype>>
547
- inline void operator+=(const array_like &x) {
548
- std::size_t num_data = this->size();
549
- assert(num_data == x.size());
550
- for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] += *(x.begin() + idx);
551
- return;
552
- }
553
-
554
- template <typename array_like = std::initializer_list<dtype>>
555
- inline void operator-=(const array_like &x) {
556
- std::size_t num_data = this->size();
557
- assert(num_data == x.size());
558
- for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] -= *(x.begin() + idx);
559
- return;
560
- }
561
-
562
- template <typename array_like = std::initializer_list<dtype>>
563
- inline void operator*=(const array_like &x) {
564
- std::size_t num_data = this->size();
565
- assert(num_data == x.size());
566
- for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] *= *(x.begin() + idx);
567
- return;
568
- }
569
-
570
- template <typename array_like = std::initializer_list<dtype>>
571
- inline void operator/=(const array_like &x) {
572
- std::size_t num_data = this->size();
573
- assert(num_data == x.size());
574
- for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] /= *(x.begin() + idx);
575
- return;
576
- }
577
-
578
- // void compute_cum_res(){
579
- // if (cum_resolution_view.size() == 0){
580
-
581
- // cum_resolution_view =
582
- // compute_backward_cumprod(this->resolution_view);
583
- // }
584
- // }
585
- template <typename T = std::initializer_list<indices_type>>
586
- inline dtype &operator[](T coords) {
587
- throw std::logic_error("Not yet implemented");
588
- // this->compute_cum_res();
589
- // assert(this->cum_resolution_view.size() == coords.size());
590
- // std::size_t data_index = 0;
591
- // // for (indices_type i = 0, auto coords_it = coords.begin(); coords_it !=
592
- // coords.end(); coords_it++, i++)
593
- // // {data_index += (*(coords_it))*cum_resolution_view[i];};
594
- // for (auto [c, cr] : std::views::zip(coords, cum_resolution_view))
595
- // data_index += c*cr;
596
- // std::cout << ptrs.size() << " vs " << data_index << std::endl;
597
- // return *ptrs[data_index];
598
- }
599
-
600
- void print_data() const {
601
- std::cout << "[";
602
- for (auto stuff : ptrs) std::cout << *stuff << " ";
603
- std::cout << "]\n";
604
- }
605
-
606
- inline std::vector<dtype> copy_data() {
607
- std::vector<dtype> out(ptrs.size());
608
- for (auto i = 0u; i < ptrs.size(); i++) out[i] = *ptrs[i];
609
- return out;
610
- }
611
-
612
- public:
613
- // juste besoin de la resolution, avec les ptrs : ok pour l'affichage
614
- // const std::vector<std::vector<indices_type>> free_coordinates; // for each
615
- // parameter, the fixed indices, TODO:REMOVE
616
- const std::vector<indices_type> resolution_view;
617
-
618
- private:
619
- std::vector<dtype *> ptrs;
620
- // std::vector<std::size_t> cum_resolution_view; // not really useful.
621
- };
622
-
623
- template <typename dtype, typename indices_type>
624
- void inline static_tensor_view<dtype, indices_type>::differentiate(indices_type axis) {
625
- std::vector<std::vector<indices_type>> free_coordinates(this->ndim());
626
-
627
- // initialize free_coordinates of the view, full coordinates on each axis
628
- // exept for axis on which we iterate
629
- for (auto i = 0u; i < free_coordinates.size(); i++) {
630
- if (static_cast<indices_type>(i) == axis) continue;
631
- free_coordinates[i] = std::vector<indices_type>(this->get_resolution()[i]);
632
- for (auto j = 0u; j < free_coordinates[i].size(); j++) { // TODO optimize
633
- free_coordinates[i][j] = j;
634
- }
635
- }
636
- // iterate over coordinate of this axis with ab -> b-a -> ab=b[newslice]
637
- free_coordinates[axis] = {{0}};
638
- static_tensor_view_view<dtype, indices_type> x_i(*this, free_coordinates);
639
- std::vector<dtype> a, b;
640
- a = x_i.copy_data();
641
- for (indices_type h = 1; h < this->get_resolution()[axis]; h++) {
642
- free_coordinates[axis] = {{h}};
643
- // x_i = static_tensor_view_view<dtype,
644
- // indices_type>(*this,free_coordinates);
645
- x_i.shift_coordinate(axis, 1);
646
- b = std::move(x_i.copy_data());
647
- x_i -= a;
648
- a.swap(b);
649
- }
650
- }
651
-
652
- template <typename T>
653
- std::vector<std::vector<T>> cart_product(const std::vector<std::vector<T>> &v) {
654
- std::vector<std::vector<T>> s = {{}};
655
- for (const auto &u : v) {
656
- std::vector<std::vector<T>> r;
657
- for (const auto &x : s) {
658
- for (const auto y : u) {
659
- r.push_back(x);
660
- r.back().push_back(y);
661
- }
662
- }
663
- s = std::move(r);
664
- }
665
- for (const auto &truc : s) {
666
- for (const auto &machin : truc) std::cout << machin << ", ";
667
- std::cout << "\n";
668
- }
669
- return s;
670
- }
671
-
672
- } // namespace tensor
1
+ #pragma once
2
+
3
+ #include <cassert>
4
+ #include <cstddef>
5
+ #include <iostream>
6
+ #include <numeric>
7
+ #include <vector>
8
+
9
+ // TODO : sparse version, current operator[] is already a hash.
10
+ namespace tensor {
11
+
12
+ template <typename indices_type>
13
+ inline std::vector<indices_type> compute_backward_cumprod(const std::vector<indices_type> &resolution) {
14
+ constexpr bool verbose = false;
15
+ std::vector<indices_type> cum_prod_resolution_(resolution.size());
16
+ cum_prod_resolution_.back() = 1;
17
+ for (auto i = resolution.size() - 1; i > 0; i--) {
18
+ // std::cout << i << " " << cum_prod_resolution_.size() << std::endl;
19
+ cum_prod_resolution_[i - 1] = resolution[i] * cum_prod_resolution_[i];
20
+ }
21
+ if constexpr (verbose) {
22
+ std::cout << "Cum resolution ";
23
+ for (auto c : cum_prod_resolution_) std::cout << c << " ";
24
+ std::cout << std::endl;
25
+ }
26
+ return cum_prod_resolution_;
27
+ }
28
+
29
+ template <typename dtype, typename indices_type>
30
+ class static_tensor_view { // Python handles the construction - destruction of
31
+ // the data,
32
+ public:
33
+ using sparse_type = std::pair<std::vector<std::vector<indices_type>>, std::vector<dtype>>;
34
+ static_tensor_view();
35
+
36
+ static_tensor_view(dtype *data_ptr, const std::vector<indices_type> &resolution)
37
+ : data_ptr_(data_ptr),
38
+ size_(resolution.size() == 0
39
+ ? 0
40
+ : std::accumulate(begin(resolution), end(resolution), 1, std::multiplies<indices_type>())),
41
+ resolution_(resolution)
42
+ // cum_prod_resolution_(compute_backward_cumprod(resolution))
43
+ {
44
+ // cum_prod_resolution_ = std::vector<std::size_t>(resolution.size());
45
+ // std::size_t last = 1;
46
+ // for (auto i = resolution.size() -1; i > 0; i--){
47
+ // last *=resolution[i];
48
+ // // std::cout << i << " " << cum_prod_resolution_.size() << std::endl;
49
+ // cum_prod_resolution_[resolution.size()-1 - i] = last;
50
+ // }
51
+ // cum_prod_resolution_.back() = 1;
52
+ cum_prod_resolution_ = std::move(compute_backward_cumprod(resolution));
53
+ };
54
+
55
+ // dtype[]& data_ref(){
56
+ // return *data_ptr;
57
+ // }
58
+ inline std::size_t size() const { return size_; }
59
+
60
+ inline bool empty() const { return size_ == 0; }
61
+
62
+ inline dtype &data_back() const { return *(data_ptr_ + size_ - 1); }
63
+
64
+ inline std::size_t ndim() const { return resolution_.size(); }
65
+
66
+ template <class oned_array_like = std::initializer_list<indices_type>>
67
+ inline dtype &operator[](const oned_array_like &coordinates) const {
68
+ const bool check = false;
69
+ dtype *data_index = data_ptr_;
70
+ /* 0; // max is 4*10^9, should be fine. just put an assert in python. */
71
+
72
+ if constexpr (check) {
73
+ if (coordinates.size() != resolution_.size()) {
74
+ auto it = coordinates.begin();
75
+ for (size_t i = 0u; i < coordinates.size(); i++) std::cerr << *(it++) << "/" << resolution_[i] << ", ";
76
+ std::cerr << ")" << std::endl;
77
+ throw std::invalid_argument("Invalid coordinate dimension.");
78
+ }
79
+ // for (auto [ci, cum_res, res] : std::views::zip(coordinates,
80
+ // cum_prod_resolution_, resolution_)){ // NIK Apple clang for
81
+ // (indices_type i : std::views::iota(0,coordinates.size())){
82
+ auto it = coordinates.begin();
83
+ for (size_t i = 0u; i < coordinates.size(); i++) {
84
+ auto &ci = *(it++);
85
+ auto cum_res = cum_prod_resolution_[i];
86
+ auto res = resolution_[i];
87
+ if (ci >= res) [[unlikely]] {
88
+ std::cerr << "Crash log. Coordinates : (";
89
+ auto it = coordinates.begin();
90
+ for (auto i = 0u; i < coordinates.size(); i++) std::cerr << *(it++) << "/" << resolution_[i] << ", ";
91
+ // for (auto [c, r] : std::views::zip(coordinates, resolution_))
92
+ // std::cerr << c << "/" << r << ", "; // NIK APPLE CLANG
93
+ std::cerr << ")" << std::endl;
94
+ throw std::invalid_argument("Illegal coordinates.");
95
+ }
96
+ data_index += ci * cum_res;
97
+ }
98
+ if (data_index >= this->size()) [[unlikely]] {
99
+ std::cerr << "Crash log. Coordinates : (";
100
+ auto it = coordinates.begin();
101
+ for (size_t i = 0u; i < coordinates.size(); i++) std::cerr << *(it++) << "/" << resolution_[i] << ", ";
102
+ std::cerr << ")" << std::endl;
103
+ throw std::invalid_argument("Internal error : asked data " + std::to_string(data_index) + "/" +
104
+ std::to_string(this->size()));
105
+ }
106
+ // std::cout << data_index << " " << this->size() << std::endl;
107
+ // std::cout << data_index << "/" << this->size() << std::endl;
108
+ } else {
109
+ // for (auto [ci, cum_res] : std::views::zip(coordinates,
110
+ // cum_prod_resolution_)){ // small so i'm not sure reduce can be
111
+ // efficient here // NIK Apple clang data_index += ci*cum_res;
112
+ // }
113
+
114
+ auto coord_ptr = coordinates.begin();
115
+ auto cum_res_ptr = cum_prod_resolution_.begin();
116
+ for (; coord_ptr != coordinates.end(); ++coord_ptr, ++cum_res_ptr) {
117
+ data_index += (*coord_ptr) * (*cum_res_ptr);
118
+ }
119
+ }
120
+ /* return *(data_ptr_ + data_index); */
121
+ return *data_index;
122
+ }
123
+
124
+ template <typename idx_type>
125
+ inline dtype &data_at_index(idx_type i) {
126
+ return *(data_ptr_ + i);
127
+ }
128
+
129
+ template <typename indice_type_like>
130
+ inline std::vector<indices_type> data_index_inverse(indice_type_like data_index,
131
+ const std::vector<bool> &flip_axes = {}) const {
132
+ std::vector<indices_type> coordinates(resolution_.size());
133
+ int data_index_ = data_index;
134
+ for (int parameter = static_cast<int>(coordinates.size()) - 1; parameter >= 0; parameter--) {
135
+ auto [q, r] = std::div(data_index_, static_cast<int>(resolution_[parameter]));
136
+ if (static_cast<int>(flip_axes.size()) > parameter && flip_axes[parameter])
137
+ coordinates[parameter] = resolution_[parameter] - r;
138
+ else
139
+ coordinates[parameter] = r;
140
+ data_index_ = q;
141
+ }
142
+ return coordinates;
143
+ }
144
+
145
+ // friend std::ostream& operator<<(std::ostream& stream, const
146
+ // static_tensor_view<dtype,indices_type>& truc){
147
+ // stream << "[";
148
+ // for(indices_type i = 0; i < truc.size()-1; i++){
149
+ // stream << *(truc.data_ptr_ + i) << ", ";
150
+ // }
151
+ // if(!truc.empty()) stream << truc.data_back();
152
+ // stream << "]";
153
+ // stream << "\n resolution : ";
154
+ // for(indices_type i = 0; i < truc.resolution_.size(); i++){
155
+ // stream << truc.resolution_[i] << ", ";
156
+ // }
157
+ // stream << "\n cum resolution : ";
158
+ // for(indices_type i = 0; i < truc.cum_prod_resolution_.size(); i++){
159
+ // stream << truc.cum_prod_resolution_[i] << ", ";
160
+ // }
161
+ // return stream;
162
+ // }
163
+
164
+ friend std::ostream &operator<<(std::ostream &stream, const static_tensor_view<dtype, indices_type> &truc) {
165
+ // constexpr bool verbose = false;
166
+ for (auto parameter = 0u; parameter < truc.ndim(); parameter++) stream << "[";
167
+ // iterate over data, update coordinates in a vector, and print if in free
168
+ // coords i.e. add one to last coord, modulo if greater, and propagate to
169
+ // the next
170
+ std::vector<indices_type> coordinates(truc.ndim()); /// 0,...,0
171
+ for (auto i = 0u; i < truc.size() - 1; i++) {
172
+ stream << truc.data_at(i);
173
+
174
+ // for (indices_type parameter =0; parameter < coordinates.size();
175
+ // parameter++){ stream << coordinates[parameter];
176
+ // }
177
+ // stream << "\n";
178
+ coordinates[0]++;
179
+ indices_type parameter = 0;
180
+ for (; parameter < static_cast<int>(coordinates.size()) - 1; ++parameter) {
181
+ if (coordinates[parameter] < truc.get_resolution()[parameter]) {
182
+ // stream << ", ";
183
+ // if (parameter == 1)
184
+ // stream << "\n";
185
+ break;
186
+ }
187
+ // for (indices_type i =0; i < parameter; i++)
188
+ // stream << ";";
189
+ // for (indices_type i =0; i < parameter+1; i++)
190
+ // stream << "]";
191
+ // stream << ", ";
192
+ // for (indices_type i =0; i < parameter; i++)
193
+ // stream << "[";
194
+ coordinates[parameter] = 0; // 1 by 1 so should be fine not doing mods
195
+ coordinates[parameter + 1]++;
196
+ }
197
+ if (parameter == 1)
198
+ stream << "],\n [";
199
+ else {
200
+ for (indices_type i = 0; i < parameter; i++) stream << "]";
201
+ stream << ", ";
202
+ for (indices_type i = 0; i < parameter; i++) stream << "[";
203
+ }
204
+ }
205
+
206
+ stream << truc.data_back();
207
+ for (auto parameter = 0u; parameter < truc.ndim(); parameter++) stream << "]";
208
+ return stream;
209
+ }
210
+
211
+ // template<class
212
+ // twod_array_like=std::initializer_list<std::initializer_list<indices_type>>>
213
+ // static_tensor_view_view<dtype,indices_type> view(twod_array_like
214
+ // coordinates){ auto out = static_tensor_view_view(data_ptr_,
215
+ // resolution_); out.free_coordinates = coordinates; return out;
216
+ // }
217
+ inline const std::vector<indices_type> &get_resolution() const { return resolution_; }
218
+
219
+ inline const std::vector<indices_type> &get_cum_resolution() const { return cum_prod_resolution_; }
220
+
221
+ template <typename indice_type_like>
222
+ inline dtype &data_at(indice_type_like i) const {
223
+ return *(data_ptr_ + i);
224
+ }
225
+
226
+ void differentiate(indices_type axis);
227
+
228
+ inline sparse_type sparsify(const std::vector<bool> &flip_axes = {}, bool verbose = false) const {
229
+ std::vector<std::vector<indices_type>> coordinates;
230
+ std::vector<dtype> values;
231
+ // for (indices_type i = 0; i < static_cast<indices_type>(this->size());
232
+ // i++){
233
+ for (auto i = 0u; i < this->size(); i++) {
234
+ auto stuff = this->data_at(i);
235
+ if (stuff == 0) [[likely]] // as this is sparse
236
+ continue;
237
+ coordinates.push_back(this->data_index_inverse(i, flip_axes));
238
+ values.push_back(stuff);
239
+ }
240
+ if (verbose) [[unlikely]] {
241
+ // for (auto [pt,w] : std::views::zip(coordinates, values)){ NIK apple
242
+ // clang
243
+ for (auto i = 0u; i < coordinates.size(); i++) {
244
+ for (const auto &v : coordinates[i]) std::cout << v << " ";
245
+ std::cout << "| " << values[i] << std::endl;
246
+ }
247
+ }
248
+ return {coordinates, values};
249
+ }
250
+
251
+ // template<class oned_array_like=std::initializer_list<indices_type>>
252
+ void _rec_add_cone(const std::vector<indices_type> &basepoint,
253
+ dtype value,
254
+ std::vector<indices_type> &coordinates,
255
+ int _rec_parameter) const {
256
+ if (_rec_parameter < 0) {
257
+ (*this)[coordinates] += value;
258
+ return;
259
+ }
260
+ for (indices_type c = basepoint[_rec_parameter]; c < this->get_resolution()[_rec_parameter]; c++) {
261
+ coordinates[_rec_parameter] = c;
262
+ this->_rec_add_cone(basepoint, value, coordinates, _rec_parameter - 1);
263
+ }
264
+ }
265
+
266
+ inline void add_cone(const std::vector<indices_type> &basepoint, dtype value) const {
267
+ constexpr const bool check = false;
268
+ constexpr const bool verbose = false;
269
+ if constexpr (check) {
270
+ if (basepoint.size() != this->ndim()) throw std::logic_error("Invalid coordinate for cone");
271
+ }
272
+ if constexpr (verbose) {
273
+ std::cout << "Adding cone ";
274
+ for (auto b : basepoint) std::cout << b << " ,";
275
+ std::cout << std::endl;
276
+ }
277
+ std::vector<indices_type> temp_container(this->ndim());
278
+ this->_rec_add_cone(basepoint, value, temp_container, static_cast<int>(this->ndim()) - 1);
279
+ }
280
+
281
+ // template<class oned_array_like=std::initializer_list<indices_type>>
282
+ void _rec_add_cone_boundary(const std::vector<indices_type> &basepoint,
283
+ dtype value,
284
+ std::vector<indices_type> &coordinates,
285
+ int _rec_parameter) const {
286
+ if (_rec_parameter < 0) {
287
+ (*this)[coordinates] += value;
288
+ return;
289
+ }
290
+
291
+ // for (auto c=basepoint[_rec_parameter];
292
+ // c<this->get_resolution()[_rec_parameter]; c++){
293
+ // coordinates[_rec_parameter] = c;
294
+ // this->_rec_add_cone(basepoint, value, coordinates, _rec_parameter-1);
295
+ // }
296
+
297
+ coordinates[_rec_parameter] = basepoint[_rec_parameter];
298
+ this->_rec_add_cone_boundary(std::vector<indices_type>(basepoint), value, coordinates, _rec_parameter - 1);
299
+
300
+ coordinates[_rec_parameter] = this->get_resolution()[_rec_parameter] - 1;
301
+ this->_rec_add_cone_boundary(basepoint, -value, coordinates, _rec_parameter - 1);
302
+ }
303
+
304
+ inline void add_cone_boundary(const std::vector<indices_type> &basepoint, dtype value) const {
305
+ const bool check = false;
306
+ if constexpr (check) {
307
+ if (basepoint.size() != this->ndim()) throw std::logic_error("Invalid coordinate for cone boundary");
308
+ }
309
+ std::vector<indices_type> temp_container(this->ndim());
310
+ this->_rec_add_cone_boundary(basepoint, value, temp_container, static_cast<int>(this->ndim()) - 1);
311
+ }
312
+
313
+ public:
314
+ private:
315
+ dtype *data_ptr_;
316
+ std::size_t size_;
317
+ std::vector<indices_type> resolution_;
318
+ std::vector<indices_type> cum_prod_resolution_;
319
+ // std::vector<std::vector<indices_types>> fixed_coordinates; // in child
320
+ };
321
+
322
+ template <typename dtype, typename indices_type>
323
+ class static_tensor_view_view
324
+ : public static_tensor_view<dtype, indices_type> { // i'm not sure this class is very efficient.
325
+ public:
326
+ using base = static_tensor_view<dtype, indices_type>;
327
+
328
+ static_tensor_view_view(dtype *data_ptr,
329
+ const std::vector<indices_type> &resolution,
330
+ const std::vector<std::vector<indices_type>> &free_coordinates,
331
+ bool use_sparse = true)
332
+ : base(data_ptr, resolution),
333
+ resolution_view(this->compute_resolution(free_coordinates))
334
+ // free_coordinates(free_coordinates)
335
+ {
336
+ this->compute_ptrs(free_coordinates, use_sparse);
337
+ };
338
+
339
+ static_tensor_view_view(const static_tensor_view<dtype, indices_type> &parent,
340
+ const std::vector<std::vector<indices_type>> &free_coordinates,
341
+ bool use_sparse = true)
342
+ : base(parent),
343
+ resolution_view(this->compute_resolution(free_coordinates))
344
+ // free_coordinates(free_coordinates)
345
+ {
346
+ this->compute_ptrs(free_coordinates, use_sparse);
347
+ };
348
+
349
+ inline bool is_float(const std::vector<indices_type> &resolution) const {
350
+ indices_type dim = this->dimension();
351
+ for (indices_type i = 0; i < dim; i++)
352
+ if (resolution[i] > 1) return false;
353
+ return true;
354
+ }
355
+
356
+ inline bool is_float() const { return this->is_float(this->resolution_view); }
357
+
358
+ template <class oned_array_like = std::initializer_list<indices_type>>
359
+ inline bool is_in_view(const oned_array_like &coordinates,
360
+ const std::vector<std::vector<indices_type>> &free_coordinates) {
361
+ assert(coordinates.size() == this->ndim());
362
+ auto it = coordinates.begin();
363
+ for (indices_type parameter = 0; parameter < static_cast<indices_type>(this->ndim()); ++parameter) {
364
+ const auto &x = *it;
365
+ it++;
366
+ for (auto stuff : free_coordinates[parameter]) {
367
+ if (stuff < x)
368
+ continue;
369
+ else if (stuff == x)
370
+ break;
371
+ else
372
+ return false;
373
+ }
374
+ if (x > free_coordinates[parameter].back()) return false;
375
+ }
376
+ return true;
377
+ }
378
+
379
+ std::size_t _size() const { // for construction
380
+ std::size_t out = 1;
381
+ for (const auto &r : resolution_view) out *= r;
382
+ return out;
383
+ }
384
+
385
+ std::size_t size() const { return ptrs.size(); }
386
+
387
+ std::vector<indices_type> compute_resolution(const std::vector<std::vector<indices_type>> &free_coordinates) {
388
+ std::vector<indices_type> out(free_coordinates.size());
389
+ // for (auto [s, stuff] : std::views::zip(out, free_coordinates)) s =
390
+ // stuff.size(); // NIK apple clang
391
+ for (auto i = 0u; i < free_coordinates.size(); i++) out[i] = free_coordinates[i].size();
392
+ return out;
393
+ }
394
+
395
+ void compute_ptrs_dense(const std::vector<std::vector<indices_type>> &free_coordinates) { // todo redo from
396
+ // DO NOT USE
397
+ constexpr bool verbose = false;
398
+ std::vector<dtype *> out(this->_size());
399
+ std::vector<indices_type> coordinates(this->ndim()); /// 0,...,0
400
+ std::size_t count = 0;
401
+
402
+ for (int i = 0; i < static_cast<int>(static_tensor_view<dtype, indices_type>::size()) - 1; i++) {
403
+ if constexpr (verbose) {
404
+ std::cout << "Coordinate : ";
405
+ for (auto x : coordinates) std::cout << x << " ";
406
+ if (this->is_in_view(coordinates, free_coordinates))
407
+ std::cout << " in view";
408
+ else
409
+ std::cout << "not in view";
410
+ std::cout << std::endl;
411
+ }
412
+
413
+ if (this->is_in_view(coordinates, free_coordinates)) {
414
+ out[count] = &this->data_at(i);
415
+ count++;
416
+ }
417
+ coordinates.back()++;
418
+ for (indices_type parameter = coordinates.size() - 1; parameter > 0; parameter--) {
419
+ if (coordinates[parameter] < this->get_resolution()[parameter]) {
420
+ break;
421
+ }
422
+ for (indices_type i = parameter; i < static_cast<indices_type>(coordinates.size()); i++)
423
+ coordinates[i] = 0; // 1 by 1 so should be fine not doing mods
424
+ coordinates[parameter - 1]++;
425
+ }
426
+ }
427
+ if (this->is_in_view(coordinates, free_coordinates)) {
428
+ out[count] = &this->data_back();
429
+ count++;
430
+ }
431
+ // assert(count == this->size());
432
+ ptrs.swap(out);
433
+ }
434
+
435
+ inline void compute_ptrs_sparse(const std::vector<std::vector<indices_type>> &free_coordinates,
436
+ std::vector<indices_type> _rec_coordinates_begin = {}) { // todo redo from
437
+ constexpr bool verbose = false;
438
+ if (_rec_coordinates_begin.size() == 0) ptrs.reserve(this->_size());
439
+ indices_type parameter = _rec_coordinates_begin.size();
440
+ if (parameter == static_cast<indices_type>(this->ndim())) {
441
+ auto &value = tensor::static_tensor_view<dtype, indices_type>::operator[](
442
+ _rec_coordinates_begin); // calling [] is not efficient, but not
443
+ // bottleneck
444
+ if constexpr (verbose) {
445
+ std::cout << "Adding coordinates ";
446
+ for (auto c : _rec_coordinates_begin) std::cout << c << " ";
447
+ std::cout << " of value " << value;
448
+ std::cout << std::endl;
449
+ }
450
+ ptrs.push_back(&value);
451
+ return;
452
+ }
453
+ _rec_coordinates_begin.reserve(this->ndim());
454
+ _rec_coordinates_begin.resize(parameter + 1);
455
+ for (indices_type coord : free_coordinates[parameter]) {
456
+ _rec_coordinates_begin.back() = coord;
457
+ compute_ptrs_sparse(free_coordinates, _rec_coordinates_begin);
458
+ }
459
+ return;
460
+ }
461
+
462
+ inline void compute_ptrs(const std::vector<std::vector<indices_type>> &free_coordinates, bool use_sparse = true) {
463
+ if (use_sparse)
464
+ compute_ptrs_sparse(free_coordinates);
465
+ else
466
+ compute_ptrs_dense(free_coordinates);
467
+ }
468
+
469
+ inline void shift_coordinate(indices_type idx, indices_type shift_value) {
470
+ // resolution stays the same,
471
+ auto to_add = this->get_cum_resolution()[idx] * shift_value;
472
+ for (auto &ptr : this->ptrs) ptr += to_add;
473
+ }
474
+
475
+ // constant additions
476
+ inline void operator+=(dtype x) {
477
+ // if (ptrs.empty()) this->compute_ptrs_dense();
478
+ for (auto stuff : ptrs) *stuff += x;
479
+ return;
480
+ }
481
+
482
+ inline void operator-=(dtype x) {
483
+ // if (ptrs.empty()) this->compute_ptrs_dense();
484
+ for (auto stuff : ptrs) *stuff -= x;
485
+ return;
486
+ }
487
+
488
+ inline void operator*=(dtype x) {
489
+ // if (ptrs.empty()) this->compute_ptrs_dense();
490
+ for (auto stuff : ptrs) *stuff *= x;
491
+ return;
492
+ }
493
+
494
+ inline void operator/=(dtype x) {
495
+ // if (ptrs.empty()) this->compute_ptrs_dense();
496
+ for (auto stuff : ptrs) *stuff /= x;
497
+ return;
498
+ }
499
+
500
+ inline void operator=(dtype x) {
501
+ for (auto stuff : ptrs) *stuff = x;
502
+ return;
503
+ }
504
+
505
+ inline void operator=(const static_tensor_view_view<dtype, indices_type> &x) {
506
+ assert(this->size() == x.size());
507
+ this->ptrs = x.ptrs;
508
+ return;
509
+ }
510
+
511
+ inline void swap(static_tensor_view_view<dtype, indices_type> &x) {
512
+ this->ptrs.swap(x.ptrs);
513
+ return;
514
+ }
515
+
516
+ // retrieves data from ptrs
517
+ inline void operator+=(const static_tensor_view_view<dtype, indices_type> &x) {
518
+ std::size_t num_data = this->size();
519
+ assert(num_data == x.size());
520
+ for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] += *x[idx];
521
+ return;
522
+ }
523
+
524
+ inline void operator-=(const static_tensor_view_view<dtype, indices_type> &x) {
525
+ std::size_t num_data = this->size();
526
+ assert(num_data == x.size());
527
+ for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] -= *x[idx];
528
+ return;
529
+ }
530
+
531
+ inline void operator*=(const static_tensor_view_view<dtype, indices_type> &x) {
532
+ std::size_t num_data = this->size();
533
+ assert(num_data == x.size());
534
+ for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] *= *x[idx];
535
+ return;
536
+ }
537
+
538
+ inline void operator/=(const static_tensor_view_view<dtype, indices_type> &x) {
539
+ std::size_t num_data = this->size();
540
+ assert(num_data == x.size());
541
+ for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] /= *x[idx];
542
+ return;
543
+ }
544
+
545
+ // Default array_like template
546
+ template <typename array_like = std::initializer_list<dtype>>
547
+ inline void operator+=(const array_like &x) {
548
+ std::size_t num_data = this->size();
549
+ assert(num_data == x.size());
550
+ for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] += *(x.begin() + idx);
551
+ return;
552
+ }
553
+
554
+ template <typename array_like = std::initializer_list<dtype>>
555
+ inline void operator-=(const array_like &x) {
556
+ std::size_t num_data = this->size();
557
+ assert(num_data == x.size());
558
+ for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] -= *(x.begin() + idx);
559
+ return;
560
+ }
561
+
562
+ template <typename array_like = std::initializer_list<dtype>>
563
+ inline void operator*=(const array_like &x) {
564
+ std::size_t num_data = this->size();
565
+ assert(num_data == x.size());
566
+ for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] *= *(x.begin() + idx);
567
+ return;
568
+ }
569
+
570
+ template <typename array_like = std::initializer_list<dtype>>
571
+ inline void operator/=(const array_like &x) {
572
+ std::size_t num_data = this->size();
573
+ assert(num_data == x.size());
574
+ for (auto idx = 0u; idx < num_data; idx++) *ptrs[idx] /= *(x.begin() + idx);
575
+ return;
576
+ }
577
+
578
+ // void compute_cum_res(){
579
+ // if (cum_resolution_view.size() == 0){
580
+
581
+ // cum_resolution_view =
582
+ // compute_backward_cumprod(this->resolution_view);
583
+ // }
584
+ // }
585
+ template <typename T = std::initializer_list<indices_type>>
586
+ inline dtype &operator[](T coords) {
587
+ throw std::logic_error("Not yet implemented");
588
+ // this->compute_cum_res();
589
+ // assert(this->cum_resolution_view.size() == coords.size());
590
+ // std::size_t data_index = 0;
591
+ // // for (indices_type i = 0, auto coords_it = coords.begin(); coords_it !=
592
+ // coords.end(); coords_it++, i++)
593
+ // // {data_index += (*(coords_it))*cum_resolution_view[i];};
594
+ // for (auto [c, cr] : std::views::zip(coords, cum_resolution_view))
595
+ // data_index += c*cr;
596
+ // std::cout << ptrs.size() << " vs " << data_index << std::endl;
597
+ // return *ptrs[data_index];
598
+ }
599
+
600
+ void print_data() const {
601
+ std::cout << "[";
602
+ for (auto stuff : ptrs) std::cout << *stuff << " ";
603
+ std::cout << "]\n";
604
+ }
605
+
606
+ inline std::vector<dtype> copy_data() {
607
+ std::vector<dtype> out(ptrs.size());
608
+ for (auto i = 0u; i < ptrs.size(); i++) out[i] = *ptrs[i];
609
+ return out;
610
+ }
611
+
612
+ public:
613
+ // juste besoin de la resolution, avec les ptrs : ok pour l'affichage
614
+ // const std::vector<std::vector<indices_type>> free_coordinates; // for each
615
+ // parameter, the fixed indices, TODO:REMOVE
616
+ const std::vector<indices_type> resolution_view;
617
+
618
+ private:
619
+ std::vector<dtype *> ptrs;
620
+ // std::vector<std::size_t> cum_resolution_view; // not really useful.
621
+ };
622
+
623
+ template <typename dtype, typename indices_type>
624
+ void inline static_tensor_view<dtype, indices_type>::differentiate(indices_type axis) {
625
+ std::vector<std::vector<indices_type>> free_coordinates(this->ndim());
626
+
627
+ // initialize free_coordinates of the view, full coordinates on each axis
628
+ // exept for axis on which we iterate
629
+ for (auto i = 0u; i < free_coordinates.size(); i++) {
630
+ if (static_cast<indices_type>(i) == axis) continue;
631
+ free_coordinates[i] = std::vector<indices_type>(this->get_resolution()[i]);
632
+ for (auto j = 0u; j < free_coordinates[i].size(); j++) { // TODO optimize
633
+ free_coordinates[i][j] = j;
634
+ }
635
+ }
636
+ // iterate over coordinate of this axis with ab -> b-a -> ab=b[newslice]
637
+ free_coordinates[axis] = {{0}};
638
+ static_tensor_view_view<dtype, indices_type> x_i(*this, free_coordinates);
639
+ std::vector<dtype> a, b;
640
+ a = x_i.copy_data();
641
+ for (indices_type h = 1; h < this->get_resolution()[axis]; h++) {
642
+ free_coordinates[axis] = {{h}};
643
+ // x_i = static_tensor_view_view<dtype,
644
+ // indices_type>(*this,free_coordinates);
645
+ x_i.shift_coordinate(axis, 1);
646
+ b = std::move(x_i.copy_data());
647
+ x_i -= a;
648
+ a.swap(b);
649
+ }
650
+ }
651
+
652
+ template <typename T>
653
+ std::vector<std::vector<T>> cart_product(const std::vector<std::vector<T>> &v) {
654
+ std::vector<std::vector<T>> s = {{}};
655
+ for (const auto &u : v) {
656
+ std::vector<std::vector<T>> r;
657
+ for (const auto &x : s) {
658
+ for (const auto y : u) {
659
+ r.push_back(x);
660
+ r.back().push_back(y);
661
+ }
662
+ }
663
+ s = std::move(r);
664
+ }
665
+ for (const auto &truc : s) {
666
+ for (const auto &machin : truc) std::cout << machin << ", ";
667
+ std::cout << "\n";
668
+ }
669
+ return s;
670
+ }
671
+
672
+ } // namespace tensor