numpy2 2.0.1__tar.gz → 2.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {numpy2-2.0.1/numpy2.egg-info → numpy2-2.1.0}/PKG-INFO +3 -3
  2. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/__init__.py +26 -2
  3. numpy2-2.1.0/numpy2/advanced.py +572 -0
  4. {numpy2-2.0.1 → numpy2-2.1.0/numpy2.egg-info}/PKG-INFO +3 -3
  5. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2.egg-info/SOURCES.txt +1 -0
  6. {numpy2-2.0.1 → numpy2-2.1.0}/pyproject.toml +16 -3
  7. {numpy2-2.0.1 → numpy2-2.1.0}/setup.py +13 -2
  8. {numpy2-2.0.1 → numpy2-2.1.0}/CONTRIBUTING.md +0 -0
  9. {numpy2-2.0.1 → numpy2-2.1.0}/LICENSE +0 -0
  10. {numpy2-2.0.1 → numpy2-2.1.0}/MANIFEST.in +0 -0
  11. {numpy2-2.0.1 → numpy2-2.1.0}/README.md +0 -0
  12. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/array.py +0 -0
  13. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/converters.py +0 -0
  14. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/core.py +0 -0
  15. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/dtypes.py +0 -0
  16. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/fft.py +0 -0
  17. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/integrations.py +0 -0
  18. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/linalg.py +0 -0
  19. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/math_ops.py +0 -0
  20. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2/random.py +0 -0
  21. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2.egg-info/dependency_links.txt +0 -0
  22. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2.egg-info/not-zip-safe +0 -0
  23. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2.egg-info/requires.txt +0 -0
  24. {numpy2-2.0.1 → numpy2-2.1.0}/numpy2.egg-info/top_level.txt +0 -0
  25. {numpy2-2.0.1 → numpy2-2.1.0}/setup.cfg +0 -0
  26. {numpy2-2.0.1 → numpy2-2.1.0}/tests/test_core.py +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: numpy2
3
- Version: 2.0.1
4
- Summary: Pure-Python NumPy drop-in: full NumPy API + JSON serialization, FastAPI/Flask/Django integration, zero dependencies
3
+ Version: 2.1.0
4
+ Summary: Pure-Python NumPy drop-in: full NumPy API + JSON serialization, array compression, pipeline transforms, schema validation, zero dependencies
5
5
  Home-page: https://github.com/maheshmakvana/numpy2
6
6
  Author: Mahesh Makvana
7
7
  Author-email: Mahesh Makvana <mahesh.makvana@example.com>
@@ -10,7 +10,7 @@ Project-URL: Homepage, https://github.com/maheshmakvana/numpy2
10
10
  Project-URL: Bug Tracker, https://github.com/maheshmakvana/numpy2/issues
11
11
  Project-URL: Documentation, https://github.com/maheshmakvana/numpy2/wiki
12
12
  Project-URL: Source Code, https://github.com/maheshmakvana/numpy2
13
- Keywords: numpy,numpy2,numpy drop-in,pure python numpy,json serialization,numpy json,fastapi numpy,flask numpy,django numpy,pandas,data-science,ndarray,linear algebra,fft,random,numerical computing,scientific computing,int64 json,numpy serializable,rest-api
13
+ Keywords: numpy,numpy2,numpy drop-in,numpy replacement,pure python numpy,json serialization,numpy json,fastapi numpy,flask numpy,django numpy,web api numpy,array compression,array cache,array pipeline,array validation,sliding window,batch processing numpy,streaming array,pandas,data-science,type-conversion,rest-api,ndarray,linear algebra,fft,random,numerical computing,scientific computing,int64 json,numpy serializable,object of type int64 is not json serializable,numpy web framework,numpy fastapi
14
14
  Classifier: Development Status :: 5 - Production/Stable
15
15
  Classifier: Environment :: Web Environment
16
16
  Classifier: Intended Audience :: Developers
@@ -15,7 +15,7 @@ NumPy is used as an optional accelerator when installed; if it is absent
15
15
  every operation runs in pure Python.
16
16
  """
17
17
 
18
- __version__ = "2.0.1"
18
+ __version__ = "2.1.0"
19
19
  __author__ = "Mahesh Makvana"
20
20
  __email__ = "mahesh.makvana@example.com"
21
21
  __license__ = "MIT"
@@ -192,7 +192,25 @@ except ImportError:
192
192
  char = _Stub()
193
193
  testing = _Stub()
194
194
 
195
- # ── 5. numpy2 web extras ──────────────────────────────────────────────────────
195
+ # ── 5. numpy2 advanced features ──────────────────────────────────────────────
196
+ from .advanced import (
197
+ ArrayCache,
198
+ ArrayPipeline,
199
+ ArrayValidator,
200
+ ArrayValidationError,
201
+ ProfiledArray,
202
+ compress_array,
203
+ decompress_array,
204
+ compress_to_b64,
205
+ decompress_from_b64,
206
+ sliding_window_view,
207
+ batch_apply,
208
+ to_structured,
209
+ array_chunks,
210
+ describe,
211
+ )
212
+
213
+ # ── 6. numpy2 web extras ──────────────────────────────────────────────────────
196
214
  from .core import (
197
215
  to_json, from_json,
198
216
  serialize, deserialize,
@@ -457,6 +475,12 @@ __all__ = [
457
475
  'infer_dtype', 'safe_cast', 'batch_convert',
458
476
  'FastAPIResponse', 'FlaskResponse', 'DjangoResponse',
459
477
  'setup_json_encoder', 'create_response_handler',
478
+ # advanced
479
+ 'ArrayCache', 'ArrayPipeline', 'ArrayValidator', 'ArrayValidationError',
480
+ 'ProfiledArray',
481
+ 'compress_array', 'decompress_array', 'compress_to_b64', 'decompress_from_b64',
482
+ 'sliding_window_view', 'batch_apply', 'to_structured',
483
+ 'array_chunks', 'describe',
460
484
  ]
461
485
 
462
486
  # matrix alias (2-D array subclass stub)
@@ -0,0 +1,572 @@
1
+ """
2
+ numpy2.advanced — Advanced array operations and utilities.
3
+
4
+ New in 2.1.0:
5
+ - ArrayCache: LRU cache for expensive array computations
6
+ - ArrayPipeline: Chainable transformation pipeline
7
+ - compress_array / decompress_array: zlib-based array compression
8
+ - ArrayValidator: Schema-based array validation
9
+ - sliding_window_view: Efficient sliding window without copies
10
+ - batch_apply: Apply a function across batches of rows
11
+ - to_structured: Convert ndarray + field names to structured-dict list
12
+ - ProfiledArray: Transparent profiling wrapper
13
+ """
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import math
18
+ import time
19
+ import zlib
20
+ import base64
21
+ import hashlib
22
+ import threading
23
+ from collections import OrderedDict
24
+ from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
25
+
26
+ from .array import ndarray, asarray, zeros
27
+
28
+
29
+ # ---------------------------------------------------------------------------
30
+ # LRU Array Cache
31
+ # ---------------------------------------------------------------------------
32
+
33
+ class _CacheEntry:
34
+ __slots__ = ("value", "hits", "created_at", "size_bytes")
35
+
36
+ def __init__(self, value, size_bytes: int = 0):
37
+ self.value = value
38
+ self.hits = 0
39
+ self.created_at = time.time()
40
+ self.size_bytes = size_bytes
41
+
42
+
43
+ class ArrayCache:
44
+ """
45
+ Thread-safe LRU cache for memoising expensive array computations.
46
+
47
+ Parameters
48
+ ----------
49
+ maxsize : int
50
+ Maximum number of entries to keep (default 128).
51
+ ttl : float | None
52
+ Time-to-live in seconds; None means no expiry.
53
+
54
+ Example
55
+ -------
56
+ >>> cache = ArrayCache(maxsize=64)
57
+ >>> @cache.memoize
58
+ ... def expensive(x):
59
+ ... return x * 2
60
+ >>> expensive(np2.array([1, 2, 3]))
61
+ """
62
+
63
+ def __init__(self, maxsize: int = 128, ttl: Optional[float] = None) -> None:
64
+ self._maxsize = maxsize
65
+ self._ttl = ttl
66
+ self._store: OrderedDict[str, _CacheEntry] = OrderedDict()
67
+ self._lock = threading.Lock()
68
+ self._hits = 0
69
+ self._misses = 0
70
+
71
+ # ------------------------------------------------------------------
72
+ def _make_key(self, *args, **kwargs) -> str:
73
+ parts = []
74
+ for a in args:
75
+ if isinstance(a, ndarray):
76
+ parts.append(f"arr:{a.tolist()!r}:{a.dtype}")
77
+ else:
78
+ parts.append(repr(a))
79
+ for k, v in sorted(kwargs.items()):
80
+ parts.append(f"{k}={v!r}")
81
+ raw = "|".join(parts)
82
+ return hashlib.md5(raw.encode()).hexdigest()
83
+
84
+ def get(self, key: str) -> Optional[Any]:
85
+ with self._lock:
86
+ if key not in self._store:
87
+ self._misses += 1
88
+ return None
89
+ entry = self._store[key]
90
+ if self._ttl is not None and (time.time() - entry.created_at) > self._ttl:
91
+ del self._store[key]
92
+ self._misses += 1
93
+ return None
94
+ self._store.move_to_end(key)
95
+ entry.hits += 1
96
+ self._hits += 1
97
+ return entry.value
98
+
99
+ def set(self, key: str, value: Any) -> None:
100
+ size = 0
101
+ if isinstance(value, ndarray):
102
+ size = len(value._data) * 8
103
+ with self._lock:
104
+ if key in self._store:
105
+ self._store.move_to_end(key)
106
+ self._store[key] = _CacheEntry(value, size)
107
+ return
108
+ if len(self._store) >= self._maxsize:
109
+ self._store.popitem(last=False)
110
+ self._store[key] = _CacheEntry(value, size)
111
+
112
+ def clear(self) -> None:
113
+ with self._lock:
114
+ self._store.clear()
115
+ self._hits = 0
116
+ self._misses = 0
117
+
118
+ @property
119
+ def stats(self) -> Dict[str, Any]:
120
+ with self._lock:
121
+ total = self._hits + self._misses
122
+ return {
123
+ "size": len(self._store),
124
+ "maxsize": self._maxsize,
125
+ "hits": self._hits,
126
+ "misses": self._misses,
127
+ "hit_rate": self._hits / total if total else 0.0,
128
+ }
129
+
130
+ def memoize(self, func: Callable) -> Callable:
131
+ """Decorator: cache results of *func* based on its arguments."""
132
+ def wrapper(*args, **kwargs):
133
+ key = self._make_key(*args, **kwargs)
134
+ cached = self.get(key)
135
+ if cached is not None:
136
+ return cached
137
+ result = func(*args, **kwargs)
138
+ self.set(key, result)
139
+ return result
140
+ wrapper.__name__ = func.__name__
141
+ wrapper.__doc__ = func.__doc__
142
+ return wrapper
143
+
144
+
145
+ # ---------------------------------------------------------------------------
146
+ # Array Pipeline
147
+ # ---------------------------------------------------------------------------
148
+
149
+ class ArrayPipeline:
150
+ """
151
+ Chainable, lazy transformation pipeline for ndarrays.
152
+
153
+ Example
154
+ -------
155
+ >>> pipe = ArrayPipeline(np2.array([1, 2, 3, 4, 5]))
156
+ >>> result = pipe.filter(lambda x: x > 2).map(lambda x: x ** 2).run()
157
+ """
158
+
159
+ def __init__(self, arr: ndarray) -> None:
160
+ self._source = asarray(arr)
161
+ self._steps: List[Tuple[str, Callable]] = []
162
+
163
+ def map(self, func: Callable) -> "ArrayPipeline":
164
+ """Apply element-wise function."""
165
+ self._steps.append(("map", func))
166
+ return self
167
+
168
+ def filter(self, predicate: Callable) -> "ArrayPipeline":
169
+ """Keep elements where predicate returns True."""
170
+ self._steps.append(("filter", predicate))
171
+ return self
172
+
173
+ def normalize(self) -> "ArrayPipeline":
174
+ """Min-max normalize to [0, 1]."""
175
+ def _norm(arr):
176
+ mn = min(arr._data)
177
+ mx = max(arr._data)
178
+ rng = mx - mn
179
+ if rng == 0:
180
+ return zeros(len(arr._data), dtype='float64')
181
+ return ndarray([(v - mn) / rng for v in arr._data], dtype='float64')
182
+ self._steps.append(("transform", _norm))
183
+ return self
184
+
185
+ def clip(self, lo: float, hi: float) -> "ArrayPipeline":
186
+ """Clip values to [lo, hi]."""
187
+ self._steps.append(("map", lambda x: max(lo, min(hi, x))))
188
+ return self
189
+
190
+ def round(self, decimals: int = 0) -> "ArrayPipeline":
191
+ """Round values to *decimals* decimal places."""
192
+ self._steps.append(("map", lambda x: round(x, decimals)))
193
+ return self
194
+
195
+ def transform(self, func: Callable) -> "ArrayPipeline":
196
+ """Apply a function to the entire array."""
197
+ self._steps.append(("transform", func))
198
+ return self
199
+
200
+ def run(self) -> ndarray:
201
+ """Execute all pipeline steps and return final ndarray."""
202
+ arr = self._source
203
+ for step_type, func in self._steps:
204
+ if step_type == "map":
205
+ arr = ndarray([func(v) for v in arr._data], dtype=arr.dtype)
206
+ elif step_type == "filter":
207
+ arr = ndarray([v for v in arr._data if func(v)], dtype=arr.dtype)
208
+ elif step_type == "transform":
209
+ arr = func(arr)
210
+ return arr
211
+
212
+
213
+ # ---------------------------------------------------------------------------
214
+ # Compression
215
+ # ---------------------------------------------------------------------------
216
+
217
+ def compress_array(arr: ndarray, level: int = 6) -> bytes:
218
+ """
219
+ Compress an ndarray to bytes using zlib.
220
+
221
+ Returns a self-describing byte payload that includes shape, dtype, and data.
222
+
223
+ Example
224
+ -------
225
+ >>> blob = np2.compress_array(np2.array([1.0, 2.0, 3.0]))
226
+ >>> arr = np2.decompress_array(blob)
227
+ """
228
+ arr = asarray(arr)
229
+ payload = {
230
+ "shape": list(arr.shape),
231
+ "dtype": arr.dtype.name,
232
+ "data": arr.tolist(),
233
+ "version": 1,
234
+ }
235
+ raw = json.dumps(payload, separators=(",", ":")).encode()
236
+ compressed = zlib.compress(raw, level=level)
237
+ return compressed
238
+
239
+
240
+ def decompress_array(blob: bytes) -> ndarray:
241
+ """
242
+ Decompress bytes produced by compress_array back to an ndarray.
243
+ """
244
+ raw = zlib.decompress(blob)
245
+ payload = json.loads(raw.decode())
246
+ return ndarray(payload["data"], dtype=payload.get("dtype"))
247
+
248
+
249
+ def compress_to_b64(arr: ndarray, level: int = 6) -> str:
250
+ """Compress array and encode as a base64 string (safe for JSON transport)."""
251
+ return base64.b64encode(compress_array(arr, level)).decode()
252
+
253
+
254
+ def decompress_from_b64(b64_str: str) -> ndarray:
255
+ """Decompress a base64-encoded compressed array."""
256
+ return decompress_array(base64.b64decode(b64_str.encode()))
257
+
258
+
259
+ # ---------------------------------------------------------------------------
260
+ # Array Validator
261
+ # ---------------------------------------------------------------------------
262
+
263
+ class ArrayValidationError(Exception):
264
+ pass
265
+
266
+
267
+ class ArrayValidator:
268
+ """
269
+ Validate ndarrays against a set of declarative constraints.
270
+
271
+ Example
272
+ -------
273
+ >>> v = ArrayValidator(dtype='float64', min_val=0.0, max_val=1.0, ndim=1)
274
+ >>> v.validate(np2.array([0.2, 0.5, 0.8])) # passes silently
275
+ >>> v.validate(np2.array([-1.0])) # raises ArrayValidationError
276
+ """
277
+
278
+ def __init__(
279
+ self,
280
+ dtype: Optional[str] = None,
281
+ min_val: Optional[float] = None,
282
+ max_val: Optional[float] = None,
283
+ shape: Optional[Tuple[int, ...]] = None,
284
+ ndim: Optional[int] = None,
285
+ min_size: Optional[int] = None,
286
+ max_size: Optional[int] = None,
287
+ allow_nan: bool = False,
288
+ allow_inf: bool = False,
289
+ ) -> None:
290
+ self._dtype = dtype
291
+ self._min_val = min_val
292
+ self._max_val = max_val
293
+ self._shape = shape
294
+ self._ndim = ndim
295
+ self._min_size = min_size
296
+ self._max_size = max_size
297
+ self._allow_nan = allow_nan
298
+ self._allow_inf = allow_inf
299
+
300
+ def validate(self, arr: ndarray) -> None:
301
+ """Raise ArrayValidationError if any constraint is violated."""
302
+ errors = self.check(arr)
303
+ if errors:
304
+ raise ArrayValidationError("; ".join(errors))
305
+
306
+ def check(self, arr: ndarray) -> List[str]:
307
+ """Return list of violated constraint descriptions (empty = valid)."""
308
+ arr = asarray(arr)
309
+ errors: List[str] = []
310
+
311
+ if self._dtype and arr.dtype.name != self._dtype:
312
+ errors.append(f"dtype must be '{self._dtype}', got '{arr.dtype.name}'")
313
+
314
+ if self._ndim is not None and arr.ndim != self._ndim:
315
+ errors.append(f"ndim must be {self._ndim}, got {arr.ndim}")
316
+
317
+ if self._shape is not None and tuple(arr.shape) != tuple(self._shape):
318
+ errors.append(f"shape must be {self._shape}, got {tuple(arr.shape)}")
319
+
320
+ if self._min_size is not None and arr.size < self._min_size:
321
+ errors.append(f"size must be >= {self._min_size}, got {arr.size}")
322
+
323
+ if self._max_size is not None and arr.size > self._max_size:
324
+ errors.append(f"size must be <= {self._max_size}, got {arr.size}")
325
+
326
+ for v in arr._data:
327
+ if isinstance(v, float):
328
+ if not self._allow_nan and math.isnan(v):
329
+ errors.append("array contains NaN (allow_nan=False)")
330
+ break
331
+ if not self._allow_inf and math.isinf(v):
332
+ errors.append("array contains Inf (allow_inf=False)")
333
+ break
334
+
335
+ if self._min_val is not None:
336
+ for v in arr._data:
337
+ if isinstance(v, (int, float)) and not math.isnan(v) and v < self._min_val:
338
+ errors.append(f"all values must be >= {self._min_val}")
339
+ break
340
+
341
+ if self._max_val is not None:
342
+ for v in arr._data:
343
+ if isinstance(v, (int, float)) and not math.isnan(v) and v > self._max_val:
344
+ errors.append(f"all values must be <= {self._max_val}")
345
+ break
346
+
347
+ return errors
348
+
349
+
350
+ # ---------------------------------------------------------------------------
351
+ # Sliding window view
352
+ # ---------------------------------------------------------------------------
353
+
354
+ def sliding_window_view(arr: ndarray, window_size: int, step: int = 1) -> ndarray:
355
+ """
356
+ Return a 2-D array of sliding windows over a 1-D input array.
357
+
358
+ Parameters
359
+ ----------
360
+ arr : ndarray (1-D)
361
+ Input array.
362
+ window_size : int
363
+ Number of elements in each window.
364
+ step : int
365
+ Number of elements to advance per step (default 1).
366
+
367
+ Example
368
+ -------
369
+ >>> np2.sliding_window_view(np2.array([1,2,3,4,5]), window_size=3)
370
+ [[1,2,3],[2,3,4],[3,4,5]]
371
+ """
372
+ arr = asarray(arr)
373
+ if arr.ndim != 1:
374
+ raise ValueError("sliding_window_view requires a 1-D array")
375
+ n = arr.size
376
+ if window_size > n:
377
+ raise ValueError(f"window_size ({window_size}) exceeds array size ({n})")
378
+ data = arr._data
379
+ windows = []
380
+ for start in range(0, n - window_size + 1, step):
381
+ windows.append(list(data[start: start + window_size]))
382
+ return ndarray(windows, dtype=arr.dtype)
383
+
384
+
385
+ # ---------------------------------------------------------------------------
386
+ # Batch apply
387
+ # ---------------------------------------------------------------------------
388
+
389
+ def batch_apply(
390
+ arr: ndarray,
391
+ func: Callable,
392
+ batch_size: int = 256,
393
+ axis: int = 0,
394
+ ) -> ndarray:
395
+ """
396
+ Apply *func* to consecutive batches of rows along *axis*.
397
+
398
+ Useful for processing large arrays without loading everything into memory.
399
+
400
+ Example
401
+ -------
402
+ >>> arr = np2.arange(1000).reshape(100, 10)
403
+ >>> result = np2.batch_apply(arr, lambda b: b * 2, batch_size=25)
404
+ """
405
+ arr = asarray(arr)
406
+ if arr.ndim < 2:
407
+ # 1-D: split into chunks
408
+ n = arr.size
409
+ results = []
410
+ for start in range(0, n, batch_size):
411
+ chunk = ndarray(arr._data[start: start + batch_size], dtype=arr.dtype)
412
+ results.extend(asarray(func(chunk))._data)
413
+ return ndarray(results, dtype=arr.dtype)
414
+
415
+ rows = arr.shape[0]
416
+ result_rows = []
417
+ for start in range(0, rows, batch_size):
418
+ batch_data = arr._data[start * arr.shape[1]: (start + batch_size) * arr.shape[1]]
419
+ batch_shape = (min(batch_size, rows - start),) + arr.shape[1:]
420
+ batch = ndarray.__new__(ndarray)
421
+ batch._data = list(batch_data)
422
+ batch._shape = list(batch_shape)
423
+ batch._dtype = arr._dtype
424
+ out = asarray(func(batch))
425
+ result_rows.extend(out._data)
426
+ result = ndarray.__new__(ndarray)
427
+ result._data = result_rows
428
+ result._shape = [rows] + list(arr.shape[1:])
429
+ result._dtype = arr._dtype
430
+ return result
431
+
432
+
433
+ # ---------------------------------------------------------------------------
434
+ # to_structured
435
+ # ---------------------------------------------------------------------------
436
+
437
+ def to_structured(arr: ndarray, field_names: List[str]) -> List[Dict[str, Any]]:
438
+ """
439
+ Convert a 2-D ndarray to a list of dicts with named fields.
440
+
441
+ Example
442
+ -------
443
+ >>> arr = np2.array([[1, 2], [3, 4]])
444
+ >>> np2.to_structured(arr, ['x', 'y'])
445
+ [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}]
446
+ """
447
+ arr = asarray(arr)
448
+ if arr.ndim != 2:
449
+ raise ValueError("to_structured requires a 2-D array")
450
+ n_cols = arr.shape[1]
451
+ if len(field_names) != n_cols:
452
+ raise ValueError(
453
+ f"field_names length ({len(field_names)}) must match number of columns ({n_cols})"
454
+ )
455
+ rows = []
456
+ data = arr._data
457
+ for r in range(arr.shape[0]):
458
+ row = {field_names[c]: data[r * n_cols + c] for c in range(n_cols)}
459
+ rows.append(row)
460
+ return rows
461
+
462
+
463
+ # ---------------------------------------------------------------------------
464
+ # ProfiledArray
465
+ # ---------------------------------------------------------------------------
466
+
467
+ class ProfiledArray:
468
+ """
469
+ Transparent profiling wrapper around ndarray.
470
+
471
+ Records every operation (method call) with timing.
472
+
473
+ Example
474
+ -------
475
+ >>> pa = ProfiledArray(np2.array([1, 2, 3]))
476
+ >>> _ = pa.mean()
477
+ >>> pa.report()
478
+ """
479
+
480
+ def __init__(self, arr: ndarray) -> None:
481
+ self._arr = asarray(arr)
482
+ self._log: List[Dict[str, Any]] = []
483
+
484
+ def __getattr__(self, name: str):
485
+ attr = getattr(self._arr, name)
486
+ if callable(attr):
487
+ def _timed(*args, **kwargs):
488
+ t0 = time.perf_counter()
489
+ result = attr(*args, **kwargs)
490
+ elapsed = (time.perf_counter() - t0) * 1000
491
+ self._log.append({"op": name, "ms": round(elapsed, 4)})
492
+ return result
493
+ return _timed
494
+ return attr
495
+
496
+ def report(self) -> List[Dict[str, Any]]:
497
+ """Return a list of {op, ms} dicts recorded so far."""
498
+ total = sum(e["ms"] for e in self._log)
499
+ print(f"ProfiledArray — {len(self._log)} ops, total {total:.3f} ms")
500
+ for e in self._log:
501
+ print(f" {e['op']:30s} {e['ms']:.4f} ms")
502
+ return self._log
503
+
504
+ def clear_profile(self) -> None:
505
+ """Clear recorded profiling data."""
506
+ self._log.clear()
507
+
508
+
509
+ # ---------------------------------------------------------------------------
510
+ # Chunk generator (streaming)
511
+ # ---------------------------------------------------------------------------
512
+
513
+ def array_chunks(arr: ndarray, chunk_size: int) -> Generator[ndarray, None, None]:
514
+ """
515
+ Yield successive chunks of a 1-D ndarray as separate ndarrays.
516
+
517
+ Useful for streaming large arrays without loading all into memory.
518
+
519
+ Example
520
+ -------
521
+ >>> for chunk in np2.array_chunks(big_array, 100):
522
+ ... process(chunk)
523
+ """
524
+ arr = asarray(arr)
525
+ n = arr.size
526
+ data = arr._data
527
+ for start in range(0, n, chunk_size):
528
+ yield ndarray(data[start: start + chunk_size], dtype=arr.dtype)
529
+
530
+
531
+ # ---------------------------------------------------------------------------
532
+ # describe() (summary statistics)
533
+ # ---------------------------------------------------------------------------
534
+
535
+ def describe(arr: ndarray) -> Dict[str, float]:
536
+ """
537
+ Return a dict of descriptive statistics for a 1-D ndarray.
538
+
539
+ Keys: count, mean, std, min, p25, p50, p75, max
540
+
541
+ Example
542
+ -------
543
+ >>> np2.describe(np2.array([1.0, 2.0, 3.0, 4.0, 5.0]))
544
+ {'count': 5, 'mean': 3.0, 'std': 1.414..., 'min': 1.0, ...}
545
+ """
546
+ arr = asarray(arr)
547
+ data = [v for v in arr._data if isinstance(v, (int, float)) and not math.isnan(v)]
548
+ n = len(data)
549
+ if n == 0:
550
+ return {"count": 0, "mean": float("nan"), "std": float("nan"),
551
+ "min": float("nan"), "p25": float("nan"), "p50": float("nan"),
552
+ "p75": float("nan"), "max": float("nan")}
553
+ data_sorted = sorted(data)
554
+ mean = sum(data_sorted) / n
555
+ variance = sum((x - mean) ** 2 for x in data_sorted) / n
556
+ std = math.sqrt(variance)
557
+
558
+ def _percentile(sorted_data, p):
559
+ idx = (len(sorted_data) - 1) * p / 100
560
+ lo, hi = int(idx), min(int(idx) + 1, len(sorted_data) - 1)
561
+ return sorted_data[lo] + (sorted_data[hi] - sorted_data[lo]) * (idx - lo)
562
+
563
+ return {
564
+ "count": n,
565
+ "mean": mean,
566
+ "std": std,
567
+ "min": data_sorted[0],
568
+ "p25": _percentile(data_sorted, 25),
569
+ "p50": _percentile(data_sorted, 50),
570
+ "p75": _percentile(data_sorted, 75),
571
+ "max": data_sorted[-1],
572
+ }
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: numpy2
3
- Version: 2.0.1
4
- Summary: Pure-Python NumPy drop-in: full NumPy API + JSON serialization, FastAPI/Flask/Django integration, zero dependencies
3
+ Version: 2.1.0
4
+ Summary: Pure-Python NumPy drop-in: full NumPy API + JSON serialization, array compression, pipeline transforms, schema validation, zero dependencies
5
5
  Home-page: https://github.com/maheshmakvana/numpy2
6
6
  Author: Mahesh Makvana
7
7
  Author-email: Mahesh Makvana <mahesh.makvana@example.com>
@@ -10,7 +10,7 @@ Project-URL: Homepage, https://github.com/maheshmakvana/numpy2
10
10
  Project-URL: Bug Tracker, https://github.com/maheshmakvana/numpy2/issues
11
11
  Project-URL: Documentation, https://github.com/maheshmakvana/numpy2/wiki
12
12
  Project-URL: Source Code, https://github.com/maheshmakvana/numpy2
13
- Keywords: numpy,numpy2,numpy drop-in,pure python numpy,json serialization,numpy json,fastapi numpy,flask numpy,django numpy,pandas,data-science,ndarray,linear algebra,fft,random,numerical computing,scientific computing,int64 json,numpy serializable,rest-api
13
+ Keywords: numpy,numpy2,numpy drop-in,numpy replacement,pure python numpy,json serialization,numpy json,fastapi numpy,flask numpy,django numpy,web api numpy,array compression,array cache,array pipeline,array validation,sliding window,batch processing numpy,streaming array,pandas,data-science,type-conversion,rest-api,ndarray,linear algebra,fft,random,numerical computing,scientific computing,int64 json,numpy serializable,object of type int64 is not json serializable,numpy web framework,numpy fastapi
14
14
  Classifier: Development Status :: 5 - Production/Stable
15
15
  Classifier: Environment :: Web Environment
16
16
  Classifier: Intended Audience :: Developers
@@ -5,6 +5,7 @@ README.md
5
5
  pyproject.toml
6
6
  setup.py
7
7
  numpy2/__init__.py
8
+ numpy2/advanced.py
8
9
  numpy2/array.py
9
10
  numpy2/converters.py
10
11
  numpy2/core.py
@@ -4,8 +4,8 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "numpy2"
7
- version = "2.0.1"
8
- description = "Pure-Python NumPy drop-in: full NumPy API + JSON serialization, FastAPI/Flask/Django integration, zero dependencies"
7
+ version = "2.1.0"
8
+ description = "Pure-Python NumPy drop-in: full NumPy API + JSON serialization, array compression, pipeline transforms, schema validation, zero dependencies"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
11
11
  license = {text = "MIT"}
@@ -17,14 +17,25 @@ keywords = [
17
17
  "numpy",
18
18
  "numpy2",
19
19
  "numpy drop-in",
20
+ "numpy replacement",
20
21
  "pure python numpy",
21
22
  "json serialization",
22
23
  "numpy json",
23
24
  "fastapi numpy",
24
25
  "flask numpy",
25
26
  "django numpy",
27
+ "web api numpy",
28
+ "array compression",
29
+ "array cache",
30
+ "array pipeline",
31
+ "array validation",
32
+ "sliding window",
33
+ "batch processing numpy",
34
+ "streaming array",
26
35
  "pandas",
27
36
  "data-science",
37
+ "type-conversion",
38
+ "rest-api",
28
39
  "ndarray",
29
40
  "linear algebra",
30
41
  "fft",
@@ -33,7 +44,9 @@ keywords = [
33
44
  "scientific computing",
34
45
  "int64 json",
35
46
  "numpy serializable",
36
- "rest-api",
47
+ "object of type int64 is not json serializable",
48
+ "numpy web framework",
49
+ "numpy fastapi",
37
50
  ]
38
51
 
39
52
  classifiers = [
@@ -15,10 +15,10 @@ long_description = readme_file.read_text(encoding="utf-8") if readme_file.exists
15
15
 
16
16
  setup(
17
17
  name="numpy2",
18
- version="2.0.1",
18
+ version="2.1.0",
19
19
  author="Mahesh Makvana",
20
20
  author_email="mahesh.makvana@example.com",
21
- description="Pure-Python NumPy drop-in: full NumPy API + JSON serialization, FastAPI/Flask/Django integration, zero dependencies",
21
+ description="Pure-Python NumPy drop-in: full NumPy API + JSON serialization, array compression, pipeline transforms, schema validation, zero dependencies",
22
22
  long_description=long_description,
23
23
  long_description_content_type="text/markdown",
24
24
  url="https://github.com/maheshmakvana/numpy2",
@@ -68,6 +68,7 @@ setup(
68
68
  "numpy",
69
69
  "numpy2",
70
70
  "numpy drop-in",
71
+ "numpy replacement",
71
72
  "pure python numpy",
72
73
  "json serialization",
73
74
  "numpy json",
@@ -75,6 +76,13 @@ setup(
75
76
  "flask numpy",
76
77
  "django numpy",
77
78
  "web api numpy",
79
+ "array compression",
80
+ "array cache",
81
+ "array pipeline",
82
+ "array validation",
83
+ "sliding window",
84
+ "batch processing numpy",
85
+ "streaming array",
78
86
  "pandas",
79
87
  "data-science",
80
88
  "type-conversion",
@@ -87,6 +95,9 @@ setup(
87
95
  "scientific computing",
88
96
  "int64 json",
89
97
  "numpy serializable",
98
+ "object of type int64 is not json serializable",
99
+ "numpy web framework",
100
+ "numpy fastapi",
90
101
  ],
91
102
  zip_safe=False,
92
103
  )
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes