quantmllibrary 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. quantml/__init__.py +74 -0
  2. quantml/autograd.py +154 -0
  3. quantml/cli/__init__.py +10 -0
  4. quantml/cli/run_experiment.py +385 -0
  5. quantml/config/__init__.py +28 -0
  6. quantml/config/config.py +259 -0
  7. quantml/data/__init__.py +33 -0
  8. quantml/data/cache.py +149 -0
  9. quantml/data/feature_store.py +234 -0
  10. quantml/data/futures.py +254 -0
  11. quantml/data/loaders.py +236 -0
  12. quantml/data/memory_optimizer.py +234 -0
  13. quantml/data/validators.py +390 -0
  14. quantml/experiments/__init__.py +23 -0
  15. quantml/experiments/logger.py +208 -0
  16. quantml/experiments/results.py +158 -0
  17. quantml/experiments/tracker.py +223 -0
  18. quantml/features/__init__.py +25 -0
  19. quantml/features/base.py +104 -0
  20. quantml/features/gap_features.py +124 -0
  21. quantml/features/registry.py +138 -0
  22. quantml/features/volatility_features.py +140 -0
  23. quantml/features/volume_features.py +142 -0
  24. quantml/functional.py +37 -0
  25. quantml/models/__init__.py +27 -0
  26. quantml/models/attention.py +258 -0
  27. quantml/models/dropout.py +130 -0
  28. quantml/models/gru.py +319 -0
  29. quantml/models/linear.py +112 -0
  30. quantml/models/lstm.py +353 -0
  31. quantml/models/mlp.py +286 -0
  32. quantml/models/normalization.py +289 -0
  33. quantml/models/rnn.py +154 -0
  34. quantml/models/tcn.py +238 -0
  35. quantml/online.py +209 -0
  36. quantml/ops.py +1707 -0
  37. quantml/optim/__init__.py +42 -0
  38. quantml/optim/adafactor.py +206 -0
  39. quantml/optim/adagrad.py +157 -0
  40. quantml/optim/adam.py +267 -0
  41. quantml/optim/lookahead.py +97 -0
  42. quantml/optim/quant_optimizer.py +228 -0
  43. quantml/optim/radam.py +192 -0
  44. quantml/optim/rmsprop.py +203 -0
  45. quantml/optim/schedulers.py +286 -0
  46. quantml/optim/sgd.py +181 -0
  47. quantml/py.typed +0 -0
  48. quantml/streaming.py +175 -0
  49. quantml/tensor.py +462 -0
  50. quantml/time_series.py +447 -0
  51. quantml/training/__init__.py +135 -0
  52. quantml/training/alpha_eval.py +203 -0
  53. quantml/training/backtest.py +280 -0
  54. quantml/training/backtest_analysis.py +168 -0
  55. quantml/training/cv.py +106 -0
  56. quantml/training/data_loader.py +177 -0
  57. quantml/training/ensemble.py +84 -0
  58. quantml/training/feature_importance.py +135 -0
  59. quantml/training/features.py +364 -0
  60. quantml/training/futures_backtest.py +266 -0
  61. quantml/training/gradient_clipping.py +206 -0
  62. quantml/training/losses.py +248 -0
  63. quantml/training/lr_finder.py +127 -0
  64. quantml/training/metrics.py +376 -0
  65. quantml/training/regularization.py +89 -0
  66. quantml/training/trainer.py +239 -0
  67. quantml/training/walk_forward.py +190 -0
  68. quantml/utils/__init__.py +51 -0
  69. quantml/utils/gradient_check.py +274 -0
  70. quantml/utils/logging.py +181 -0
  71. quantml/utils/ops_cpu.py +231 -0
  72. quantml/utils/profiling.py +364 -0
  73. quantml/utils/reproducibility.py +220 -0
  74. quantml/utils/serialization.py +335 -0
  75. quantmllibrary-0.1.0.dist-info/METADATA +536 -0
  76. quantmllibrary-0.1.0.dist-info/RECORD +79 -0
  77. quantmllibrary-0.1.0.dist-info/WHEEL +5 -0
  78. quantmllibrary-0.1.0.dist-info/licenses/LICENSE +22 -0
  79. quantmllibrary-0.1.0.dist-info/top_level.txt +1 -0
quantml/streaming.py ADDED
@@ -0,0 +1,175 @@
1
+ """
2
+ Streaming tensor support for tick-level data.
3
+
4
+ This module provides StreamingTensor, a ring buffer implementation optimized
5
+ for high-frequency trading data where new ticks arrive continuously.
6
+ """
7
+
8
+ from typing import List, Optional, Union, Any
9
+ from collections import deque
10
+ from quantml.tensor import Tensor
11
+
12
+ # Try to import NumPy
13
+ try:
14
+ import numpy as np
15
+ HAS_NUMPY = True
16
+ except ImportError:
17
+ HAS_NUMPY = False
18
+ np = None
19
+
20
+
21
+ class StreamingTensor:
22
+ """
23
+ A tensor that maintains a fixed-size ring buffer for streaming data.
24
+
25
+ This is optimized for tick-level market data where new values arrive
26
+ continuously and only the most recent N values need to be kept in memory.
27
+
28
+ Attributes:
29
+ max_size: Maximum number of elements to store
30
+ buffer: Ring buffer storing the data
31
+ _tensor: Current Tensor representation of the buffer
32
+
33
+ Examples:
34
+ >>> st = StreamingTensor(max_size=100)
35
+ >>> st.append(100.5)
36
+ >>> st.append(101.0)
37
+ >>> window = st.get_window(10) # Get last 10 values
38
+ """
39
+
40
+ def __init__(self, max_size: int = 1000, initial_data: Optional[List] = None):
41
+ """
42
+ Initialize a StreamingTensor.
43
+
44
+ Args:
45
+ max_size: Maximum number of elements to store (ring buffer size)
46
+ initial_data: Optional initial data to populate
47
+ """
48
+ if max_size <= 0:
49
+ raise ValueError("max_size must be positive")
50
+
51
+ self.max_size = max_size
52
+ self.buffer = deque(maxlen=max_size)
53
+
54
+ if initial_data is not None:
55
+ for item in initial_data:
56
+ self.buffer.append(float(item))
57
+
58
+ self._tensor = None
59
+ self._np_array = None # Cache NumPy array
60
+ self._update_tensor()
61
+
62
+ def append(self, value: Union[float, int]):
63
+ """
64
+ Append a new value to the streaming tensor.
65
+
66
+ If the buffer is full, the oldest value is automatically removed
67
+ (ring buffer behavior).
68
+
69
+ Args:
70
+ value: New value to append
71
+ """
72
+ self.buffer.append(float(value))
73
+ self._np_array = None # Invalidate NumPy cache
74
+ self._update_tensor()
75
+
76
+ def extend(self, values: List[Union[float, int]]):
77
+ """
78
+ Append multiple values at once.
79
+
80
+ Args:
81
+ values: List of values to append
82
+ """
83
+ for value in values:
84
+ self.buffer.append(float(value))
85
+ self._np_array = None # Invalidate NumPy cache
86
+ self._update_tensor()
87
+
88
+ def get_window(self, size: Optional[int] = None) -> Tensor:
89
+ """
90
+ Get a window of the most recent values as a Tensor.
91
+
92
+ Uses NumPy views when available for better performance.
93
+
94
+ Args:
95
+ size: Number of recent values to return (default: all available)
96
+
97
+ Returns:
98
+ Tensor containing the window of values
99
+ """
100
+ if size is None:
101
+ size = len(self.buffer)
102
+
103
+ if size > len(self.buffer):
104
+ size = len(self.buffer)
105
+
106
+ if size == 0:
107
+ return Tensor([])
108
+
109
+ # Use NumPy if available for efficient slicing
110
+ if HAS_NUMPY and self._np_array is not None and len(self._np_array) >= size:
111
+ try:
112
+ window_arr = self._np_array[-size:]
113
+ return Tensor([window_arr.tolist()])
114
+ except (ValueError, TypeError):
115
+ pass
116
+
117
+ # Fallback: Get last 'size' elements
118
+ window_data = list(self.buffer)[-size:]
119
+ return Tensor([window_data])
120
+
121
+ def get_all(self) -> Tensor:
122
+ """
123
+ Get all current values as a Tensor.
124
+
125
+ Returns:
126
+ Tensor containing all values in the buffer
127
+ """
128
+ return self.get_window()
129
+
130
+ def _update_tensor(self):
131
+ """Update the internal tensor representation."""
132
+ if len(self.buffer) == 0:
133
+ self._tensor = Tensor([])
134
+ self._np_array = None
135
+ else:
136
+ buffer_list = list(self.buffer)
137
+ self._tensor = Tensor([buffer_list])
138
+
139
+ # Update NumPy array cache
140
+ if HAS_NUMPY:
141
+ try:
142
+ self._np_array = np.array(buffer_list, dtype=np.float64)
143
+ except (ValueError, TypeError):
144
+ self._np_array = None
145
+ else:
146
+ self._np_array = None
147
+
148
+ def clear(self):
149
+ """Clear all data from the buffer."""
150
+ self.buffer.clear()
151
+ self._tensor = Tensor([])
152
+ self._np_array = None
153
+
154
+ def __len__(self) -> int:
155
+ """Return the number of elements currently stored."""
156
+ return len(self.buffer)
157
+
158
+ def __repr__(self) -> str:
159
+ """String representation."""
160
+ return f"StreamingTensor(max_size={self.max_size}, current_size={len(self.buffer)})"
161
+
162
+ @property
163
+ def is_full(self) -> bool:
164
+ """Check if the buffer is at maximum capacity."""
165
+ return len(self.buffer) >= self.max_size
166
+
167
+ @property
168
+ def tensor(self) -> Tensor:
169
+ """Get the current tensor representation."""
170
+ return self._tensor
171
+
172
+ def to_list(self) -> List[float]:
173
+ """Convert to a Python list."""
174
+ return list(self.buffer)
175
+
quantml/tensor.py ADDED
@@ -0,0 +1,462 @@
1
+ """
2
+ Core Tensor class for QuantML library.
3
+
4
+ This module provides the Tensor class which is the fundamental building block
5
+ for all operations in the library. It supports automatic differentiation,
6
+ gradient tracking, and integration with the autograd engine.
7
+ """
8
+
9
+ from typing import Optional, List, Union, Any, Callable
10
+ import math
11
+
12
+ # Try to import NumPy
13
+ try:
14
+ import numpy as np
15
+ HAS_NUMPY = True
16
+ except ImportError:
17
+ HAS_NUMPY = False
18
+ np = None
19
+
20
+
21
+ class Tensor:
22
+ """
23
+ A multi-dimensional array with automatic differentiation support.
24
+
25
+ The Tensor class stores data and tracks computation graphs for automatic
26
+ gradient computation. All operations return new tensors (no inplace operations).
27
+
28
+ Attributes:
29
+ data: The underlying data (list, nested lists, or numpy array)
30
+ requires_grad: Whether to track gradients for this tensor
31
+ grad: The gradient of this tensor (computed during backward pass)
32
+ _prev: Set of parent tensors in the computation graph
33
+ _op: String identifier for the operation that created this tensor
34
+ _backward_fn: Function to call during backward pass
35
+
36
+ Examples:
37
+ >>> x = Tensor([1.0, 2.0, 3.0], requires_grad=True)
38
+ >>> y = Tensor([4.0, 5.0, 6.0], requires_grad=True)
39
+ >>> z = x + y
40
+ >>> z.backward()
41
+ >>> x.grad # [1.0, 1.0, 1.0]
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ data: Union[List, Any],
47
+ requires_grad: bool = False,
48
+ _prev: Optional[set] = None,
49
+ _op: Optional[str] = None,
50
+ _backward_fn: Optional[Callable] = None,
51
+ _np_array: Optional[Any] = None
52
+ ):
53
+ """
54
+ Initialize a Tensor.
55
+
56
+ Args:
57
+ data: The data to store (list, nested list, or numpy array)
58
+ requires_grad: Whether to track gradients
59
+ _prev: Parent tensors in computation graph (internal use)
60
+ _op: Operation identifier (internal use)
61
+ _backward_fn: Backward function (internal use)
62
+ _np_array: Direct NumPy array (internal use, avoids conversion)
63
+ """
64
+ # Store as NumPy array if available, otherwise as list
65
+ if _np_array is not None and HAS_NUMPY:
66
+ # Direct NumPy array provided - skip conversion
67
+ self._np_array = _np_array.astype(np.float64, copy=False)
68
+ self._data_list = None # Lazy conversion
69
+ elif HAS_NUMPY:
70
+ try:
71
+ if isinstance(data, np.ndarray):
72
+ self._np_array = data.astype(np.float64, copy=False)
73
+ self._data_list = None # Lazy conversion
74
+ elif isinstance(data, (int, float)):
75
+ self._np_array = np.array([[float(data)]], dtype=np.float64)
76
+ self._data_list = None
77
+ elif isinstance(data, list):
78
+ # Convert to NumPy array
79
+ self._np_array = np.array(data, dtype=np.float64)
80
+ self._data_list = None
81
+ else:
82
+ self._np_array = np.array([[float(data)]], dtype=np.float64)
83
+ self._data_list = None
84
+ except (ValueError, TypeError):
85
+ # Fallback to list if NumPy conversion fails
86
+ if isinstance(data, (int, float)):
87
+ self._data_list = [[float(data)]]
88
+ elif isinstance(data, list):
89
+ self._data_list = self._ensure_nested_list(data)
90
+ else:
91
+ self._data_list = [[float(data)]]
92
+ self._np_array = None
93
+ else:
94
+ # No NumPy - use list storage
95
+ if isinstance(data, (int, float)):
96
+ self._data_list = [[float(data)]]
97
+ elif isinstance(data, list):
98
+ self._data_list = self._ensure_nested_list(data)
99
+ else:
100
+ self._data_list = [[float(data)]]
101
+ self._np_array = None
102
+
103
+ self.requires_grad = requires_grad
104
+ self._grad_np = None # NumPy array for gradient storage
105
+ self._grad_list = None # List gradient storage (fallback)
106
+ self._prev = _prev if _prev is not None else set()
107
+ self._op = _op
108
+ self._backward_fn = _backward_fn
109
+
110
+ @property
111
+ def data(self) -> List:
112
+ """Get data as nested list. Converts from NumPy if needed."""
113
+ if self._np_array is not None:
114
+ if self._data_list is None:
115
+ self._data_list = self._np_array.tolist()
116
+ return self._data_list
117
+ return self._data_list
118
+
119
+ @data.setter
120
+ def data(self, value: Union[List, Any]):
121
+ """Set data, converting to NumPy if available."""
122
+ if HAS_NUMPY:
123
+ try:
124
+ if isinstance(value, np.ndarray):
125
+ self._np_array = value.astype(np.float64, copy=False)
126
+ else:
127
+ self._np_array = np.array(value, dtype=np.float64)
128
+ self._data_list = None # Invalidate cache
129
+ except (ValueError, TypeError):
130
+ self._data_list = self._ensure_nested_list(value) if isinstance(value, list) else [[float(value)]]
131
+ self._np_array = None
132
+ else:
133
+ self._data_list = self._ensure_nested_list(value) if isinstance(value, list) else [[float(value)]]
134
+ self._np_array = None
135
+
136
+ @property
137
+ def numpy(self):
138
+ """Get data as NumPy array. Returns None if NumPy not available."""
139
+ if HAS_NUMPY and self._np_array is not None:
140
+ return self._np_array
141
+ elif HAS_NUMPY:
142
+ try:
143
+ self._np_array = np.array(self._data_list, dtype=np.float64)
144
+ return self._np_array
145
+ except (ValueError, TypeError):
146
+ return None
147
+ return None
148
+
149
+ def to_numpy(self):
150
+ """Convert to NumPy array."""
151
+ return self.numpy
152
+
153
+ @classmethod
154
+ def from_numpy(cls, arr, requires_grad: bool = False):
155
+ """Create Tensor from NumPy array."""
156
+ if not HAS_NUMPY:
157
+ raise ImportError("NumPy is required for from_numpy()")
158
+ return cls(arr, requires_grad=requires_grad)
159
+
160
+ def _ensure_nested_list(self, data: Any) -> List:
161
+ """Ensure data is a nested list structure."""
162
+ if not isinstance(data, list):
163
+ return [[float(data)]]
164
+ if len(data) == 0:
165
+ return [[]]
166
+ # Check if first element is a list (2D+)
167
+ if isinstance(data[0], list):
168
+ return data
169
+ # 1D list - wrap it
170
+ return [data]
171
+
172
+ @property
173
+ def shape(self) -> tuple:
174
+ """Get the shape of the tensor. Uses NumPy if available."""
175
+ if HAS_NUMPY and self._np_array is not None:
176
+ return tuple(self._np_array.shape)
177
+ # Fallback to list-based shape
178
+ data = self.data
179
+ if not isinstance(data, list):
180
+ return (1,)
181
+ if len(data) == 0:
182
+ return (0,)
183
+ if isinstance(data[0], list):
184
+ return (len(data), len(data[0]))
185
+ return (len(data),)
186
+
187
+ def backward(self, grad: Optional[Union[List, Any, Any]] = None):
188
+ """
189
+ Compute gradients by backpropagation.
190
+
191
+ Args:
192
+ grad: Initial gradient (defaults to ones if not provided)
193
+ """
194
+ if not self.requires_grad:
195
+ return
196
+
197
+ # Initialize gradient if not set
198
+ if self._grad_np is None and self._grad_list is None:
199
+ if grad is None:
200
+ # Default to ones with same shape
201
+ if HAS_NUMPY and self._np_array is not None:
202
+ self._grad_np = np.ones_like(self._np_array, dtype=np.float64)
203
+ self._grad_list = None # Lazy conversion
204
+ else:
205
+ self._grad_list = self._ones_like(self.data)
206
+ self._grad_np = None
207
+ else:
208
+ # Convert grad to NumPy if possible
209
+ if HAS_NUMPY:
210
+ try:
211
+ if isinstance(grad, np.ndarray):
212
+ self._grad_np = grad.astype(np.float64, copy=False)
213
+ self._grad_list = None
214
+ else:
215
+ grad_arr = np.array(grad, dtype=np.float64)
216
+ self._grad_np = grad_arr
217
+ self._grad_list = None
218
+ except (ValueError, TypeError):
219
+ self._grad_list = self._ensure_nested_list(grad)
220
+ self._grad_np = None
221
+ else:
222
+ self._grad_list = self._ensure_nested_list(grad)
223
+ self._grad_np = None
224
+ else:
225
+ # Accumulate gradients
226
+ if grad is not None:
227
+ if HAS_NUMPY:
228
+ try:
229
+ grad_arr = np.array(grad, dtype=np.float64) if not isinstance(grad, np.ndarray) else grad
230
+ if self._grad_np is not None:
231
+ self._grad_np = self._grad_np + grad_arr
232
+ self._grad_list = None # Invalidate list cache
233
+ else:
234
+ # Convert existing grad to NumPy and add
235
+ if self._grad_list is not None:
236
+ self._grad_np = np.array(self._grad_list, dtype=np.float64) + grad_arr
237
+ self._grad_list = None
238
+ else:
239
+ self._grad_np = grad_arr
240
+ except (ValueError, TypeError):
241
+ # Fallback to list operations
242
+ if self._grad_list is None and self._grad_np is not None:
243
+ self._grad_list = self._grad_np.tolist()
244
+ self._grad_np = None
245
+ if self._grad_list is None:
246
+ self._grad_list = self._ensure_nested_list(grad)
247
+ else:
248
+ self._grad_list = self._add(self._grad_list, self._ensure_nested_list(grad))
249
+ else:
250
+ if self._grad_list is None:
251
+ self._grad_list = self._ensure_nested_list(grad)
252
+ else:
253
+ self._grad_list = self._add(self._grad_list, self._ensure_nested_list(grad))
254
+
255
+ # Call backward function if it exists
256
+ if self._backward_fn is not None:
257
+ # Pass gradient in appropriate format
258
+ if self._grad_np is not None:
259
+ self._backward_fn(self._grad_np)
260
+ else:
261
+ self._backward_fn(self._grad_list)
262
+
263
+ @property
264
+ def grad(self):
265
+ """Get gradient. Returns NumPy array if available, otherwise list."""
266
+ if self._grad_np is not None:
267
+ return self._grad_np
268
+ return self._grad_list
269
+
270
+ @grad.setter
271
+ def grad(self, value):
272
+ """Set gradient. Accepts NumPy array or list."""
273
+ if value is None:
274
+ self._grad_np = None
275
+ self._grad_list = None
276
+ return
277
+
278
+ if HAS_NUMPY:
279
+ try:
280
+ if isinstance(value, np.ndarray):
281
+ self._grad_np = value.astype(np.float64, copy=False)
282
+ self._grad_list = None
283
+ else:
284
+ self._grad_np = np.array(value, dtype=np.float64)
285
+ self._grad_list = None
286
+ except (ValueError, TypeError):
287
+ self._grad_list = self._ensure_nested_list(value) if isinstance(value, list) else [[float(value)]]
288
+ self._grad_np = None
289
+ else:
290
+ self._grad_list = self._ensure_nested_list(value) if isinstance(value, list) else [[float(value)]]
291
+ self._grad_np = None
292
+
293
+ def zero_grad(self):
294
+ """Clear the gradient."""
295
+ self._grad_np = None
296
+ self._grad_list = None
297
+
298
+ def detach(self) -> 'Tensor':
299
+ """Create a new tensor without gradient tracking."""
300
+ return Tensor(self.data, requires_grad=False)
301
+
302
+ def update_data(self, new_data: Union[List, Any]):
303
+ """
304
+ Update data in-place. Only works if requires_grad=False to prevent graph corruption.
305
+
306
+ Args:
307
+ new_data: New data to set
308
+ """
309
+ if self.requires_grad:
310
+ raise RuntimeError("Cannot update data in-place for tensors with requires_grad=True")
311
+ self.data = new_data
312
+
313
+ def add_(self, other: Union['Tensor', float, int]) -> 'Tensor':
314
+ """
315
+ In-place addition: self += other
316
+
317
+ Args:
318
+ other: Tensor or scalar to add
319
+
320
+ Returns:
321
+ self (modified in-place)
322
+ """
323
+ if self.requires_grad:
324
+ raise RuntimeError("Cannot use in-place operations on tensors with requires_grad=True")
325
+
326
+ from quantml.ops import add
327
+ result = add(self, other)
328
+ self.data = result.data
329
+ return self
330
+
331
+ def mul_(self, other: Union['Tensor', float, int]) -> 'Tensor':
332
+ """
333
+ In-place multiplication: self *= other
334
+
335
+ Args:
336
+ other: Tensor or scalar to multiply
337
+
338
+ Returns:
339
+ self (modified in-place)
340
+ """
341
+ if self.requires_grad:
342
+ raise RuntimeError("Cannot use in-place operations on tensors with requires_grad=True")
343
+
344
+ from quantml.ops import mul
345
+ result = mul(self, other)
346
+ self.data = result.data
347
+ return self
348
+
349
+ def sub_(self, other: Union['Tensor', float, int]) -> 'Tensor':
350
+ """
351
+ In-place subtraction: self -= other
352
+
353
+ Args:
354
+ other: Tensor or scalar to subtract
355
+
356
+ Returns:
357
+ self (modified in-place)
358
+ """
359
+ if self.requires_grad:
360
+ raise RuntimeError("Cannot use in-place operations on tensors with requires_grad=True")
361
+
362
+ from quantml.ops import sub
363
+ result = sub(self, other)
364
+ self.data = result.data
365
+ return self
366
+
367
+ def div_(self, other: Union['Tensor', float, int]) -> 'Tensor':
368
+ """
369
+ In-place division: self /= other
370
+
371
+ Args:
372
+ other: Tensor or scalar to divide by
373
+
374
+ Returns:
375
+ self (modified in-place)
376
+ """
377
+ if self.requires_grad:
378
+ raise RuntimeError("Cannot use in-place operations on tensors with requires_grad=True")
379
+
380
+ from quantml.ops import div
381
+ result = div(self, other)
382
+ self.data = result.data
383
+ return self
384
+
385
+ def _ones_like(self, data: Any) -> List:
386
+ """Create a tensor of ones with the same shape."""
387
+ if isinstance(data, list):
388
+ if len(data) == 0:
389
+ return []
390
+ if isinstance(data[0], list):
391
+ return [[1.0] * len(row) for row in data]
392
+ return [1.0] * len(data)
393
+ return [[1.0]]
394
+
395
+ def _add(self, a: List, b: List) -> List:
396
+ """Element-wise addition of nested lists."""
397
+ if isinstance(a[0], list) and isinstance(b[0], list):
398
+ return [[a[i][j] + b[i][j] for j in range(len(a[i]))]
399
+ for i in range(len(a))]
400
+ elif isinstance(a[0], list):
401
+ return [[a[i][j] + b[0] for j in range(len(a[i]))]
402
+ for i in range(len(a))]
403
+ elif isinstance(b[0], list):
404
+ return [[a[0] + b[i][j] for j in range(len(b[i]))]
405
+ for i in range(len(b))]
406
+ else:
407
+ return [a[i] + b[i] for i in range(len(a))]
408
+
409
+ def __repr__(self) -> str:
410
+ """String representation of the tensor."""
411
+ return f"Tensor(data={self.data}, requires_grad={self.requires_grad})"
412
+
413
+ def __add__(self, other: Union['Tensor', float, int]) -> 'Tensor':
414
+ """Add two tensors or tensor and scalar."""
415
+ from quantml.ops import add
416
+ return add(self, other)
417
+
418
+ def __radd__(self, other: Union[float, int]) -> 'Tensor':
419
+ """Right addition (scalar + tensor)."""
420
+ from quantml.ops import add
421
+ return add(other, self)
422
+
423
+ def __sub__(self, other: Union['Tensor', float, int]) -> 'Tensor':
424
+ """Subtract two tensors or tensor and scalar."""
425
+ from quantml.ops import sub
426
+ return sub(self, other)
427
+
428
+ def __rsub__(self, other: Union[float, int]) -> 'Tensor':
429
+ """Right subtraction (scalar - tensor)."""
430
+ from quantml.ops import sub
431
+ return sub(other, self)
432
+
433
+ def __mul__(self, other: Union['Tensor', float, int]) -> 'Tensor':
434
+ """Multiply two tensors or tensor and scalar."""
435
+ from quantml.ops import mul
436
+ return mul(self, other)
437
+
438
+ def __rmul__(self, other: Union[float, int]) -> 'Tensor':
439
+ """Right multiplication (scalar * tensor)."""
440
+ from quantml.ops import mul
441
+ return mul(other, self)
442
+
443
+ def __truediv__(self, other: Union['Tensor', float, int]) -> 'Tensor':
444
+ """Divide two tensors or tensor and scalar."""
445
+ from quantml.ops import div
446
+ return div(self, other)
447
+
448
+ def __rtruediv__(self, other: Union[float, int]) -> 'Tensor':
449
+ """Right division (scalar / tensor)."""
450
+ from quantml.ops import div
451
+ return div(other, self)
452
+
453
+ def __neg__(self) -> 'Tensor':
454
+ """Negate the tensor."""
455
+ from quantml.ops import mul
456
+ return mul(self, -1.0)
457
+
458
+ def __pow__(self, power: Union[float, int]) -> 'Tensor':
459
+ """Raise tensor to a power."""
460
+ from quantml.ops import pow
461
+ return pow(self, power)
462
+