torchquery 1.0.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- torchquery-1.0.2/PKG-INFO +16 -0
- torchquery-1.0.2/README.md +2 -0
- torchquery-1.0.2/pyproject.toml +23 -0
- torchquery-1.0.2/setup.cfg +4 -0
- torchquery-1.0.2/setup.py +7 -0
- torchquery-1.0.2/torchquery.egg-info/PKG-INFO +16 -0
- torchquery-1.0.2/torchquery.egg-info/SOURCES.txt +9 -0
- torchquery-1.0.2/torchquery.egg-info/dependency_links.txt +1 -0
- torchquery-1.0.2/torchquery.egg-info/requires.txt +1 -0
- torchquery-1.0.2/torchquery.egg-info/top_level.txt +1 -0
- torchquery-1.0.2/torchquery.py +74 -0
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: torchquery
|
|
3
|
+
Version: 1.0.2
|
|
4
|
+
Summary: Billion-scale neural healing engine for high-speed tensor manipulation.
|
|
5
|
+
Author: Sundaram Gupta
|
|
6
|
+
License: MIT
|
|
7
|
+
Keywords: torch,pytorch,tensor,deep-learning
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
11
|
+
Requires-Python: >=3.8
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
Requires-Dist: torch>=2.0.0
|
|
14
|
+
|
|
15
|
+
## 🔌 Compatibility
|
|
16
|
+
**TorchQuery** is built specifically for the **PyTorch** ecosystem. It supports all `torch.Tensor` types and is optimized for both CPU and NVIDIA CUDA (GPU) workflows. It serves as a high-speed alternative to standard **torch** data cleaning methods.
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "torchquery"
|
|
7
|
+
version = "1.0.2" # Bump to 1.0.2 for a fresh start
|
|
8
|
+
authors = [
|
|
9
|
+
{ name="Sundaram Gupta" },
|
|
10
|
+
]
|
|
11
|
+
description = "Billion-scale neural healing engine for high-speed tensor manipulation."
|
|
12
|
+
readme = "README.md"
|
|
13
|
+
requires-python = ">=3.8"
|
|
14
|
+
license = {text = "MIT"}
|
|
15
|
+
dependencies = [
|
|
16
|
+
"torch>=2.0.0",
|
|
17
|
+
]
|
|
18
|
+
keywords = ["torch", "pytorch", "tensor", "deep-learning"]
|
|
19
|
+
classifiers = [
|
|
20
|
+
"Programming Language :: Python :: 3",
|
|
21
|
+
"Operating System :: OS Independent",
|
|
22
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
23
|
+
]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: torchquery
|
|
3
|
+
Version: 1.0.2
|
|
4
|
+
Summary: Billion-scale neural healing engine for high-speed tensor manipulation.
|
|
5
|
+
Author: Sundaram Gupta
|
|
6
|
+
License: MIT
|
|
7
|
+
Keywords: torch,pytorch,tensor,deep-learning
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
11
|
+
Requires-Python: >=3.8
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
Requires-Dist: torch>=2.0.0
|
|
14
|
+
|
|
15
|
+
## 🔌 Compatibility
|
|
16
|
+
**TorchQuery** is built specifically for the **PyTorch** ecosystem. It supports all `torch.Tensor` types and is optimized for both CPU and NVIDIA CUDA (GPU) workflows. It serves as a high-speed alternative to standard **torch** data cleaning methods.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
torch>=2.0.0
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
torchquery
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
class QueryEngine:
|
|
4
|
+
def __init__(self, data):
|
|
5
|
+
"""Unified Engine for Mouse Hacker Data Processing."""
|
|
6
|
+
if torch.is_tensor(data):
|
|
7
|
+
self.data = data
|
|
8
|
+
else:
|
|
9
|
+
self.data = torch.as_tensor(data, dtype=torch.float32)
|
|
10
|
+
|
|
11
|
+
@classmethod
|
|
12
|
+
def pre_load_billions(cls, file_path, count):
|
|
13
|
+
"""Loads billion-scale binary data via Memory Mapping."""
|
|
14
|
+
storage = torch.FloatStorage.from_file(file_path, shared=True, size=count)
|
|
15
|
+
return cls(torch.FloatTensor(storage))
|
|
16
|
+
|
|
17
|
+
# --- THE HEALERS ---
|
|
18
|
+
def find_Nan_FillwithModernNumbers(self, modern_value=0.0):
|
|
19
|
+
"""Replaces NaNs with a specific value."""
|
|
20
|
+
self.data.masked_fill_(torch.isnan(self.data), modern_value)
|
|
21
|
+
return self.data
|
|
22
|
+
|
|
23
|
+
def fill_with_Nan_into_your_suggestions(self):
|
|
24
|
+
"""
|
|
25
|
+
AI SUGGESTION: Automatically fills NaNs with the Mean (Average).
|
|
26
|
+
Essential for stable Deep Learning in Tensorkite.
|
|
27
|
+
"""
|
|
28
|
+
clean_data = self.data[~torch.isnan(self.data)]
|
|
29
|
+
suggestion = torch.mean(clean_data) if clean_data.numel() > 0 else torch.tensor(0.0)
|
|
30
|
+
return self.find_Nan_FillwithModernNumbers(modern_value=suggestion.item())
|
|
31
|
+
|
|
32
|
+
def find_inf_rename(self, replace_with=0.0):
|
|
33
|
+
"""Cleans Infinity values using in-place operations."""
|
|
34
|
+
torch.nan_to_num(self.data, posinf=replace_with, neginf=-replace_with, out=self.data)
|
|
35
|
+
return self.data
|
|
36
|
+
|
|
37
|
+
def find_inf_intoleastNum(self):
|
|
38
|
+
"""Replaces Infinity with the minimum value found in the data."""
|
|
39
|
+
least = torch.min(self.data)
|
|
40
|
+
return self.find_inf_rename(replace_with=least.item())
|
|
41
|
+
|
|
42
|
+
def find_inf_intoBignumbers(self):
|
|
43
|
+
"""Replaces Infinity with the maximum value found in the data."""
|
|
44
|
+
large = torch.max(self.data)
|
|
45
|
+
return self.find_inf_rename(replace_with=large.item())
|
|
46
|
+
|
|
47
|
+
# --- THE ANALYSTS ---
|
|
48
|
+
def find_least_rename_largerNumber(self):
|
|
49
|
+
"""Swaps the smallest values with the largest."""
|
|
50
|
+
least, large = torch.min(self.data), torch.max(self.data)
|
|
51
|
+
self.data[self.data == least] = large
|
|
52
|
+
return self.data
|
|
53
|
+
|
|
54
|
+
def find_largennum_leastnumbers(self):
|
|
55
|
+
"""Swaps the largest values with the smallest."""
|
|
56
|
+
least, large = torch.min(self.data), torch.max(self.data)
|
|
57
|
+
self.data[self.data == large] = least
|
|
58
|
+
return self.data
|
|
59
|
+
|
|
60
|
+
class TensorTricafig:
|
|
61
|
+
def __init__(self, tensor_data):
|
|
62
|
+
self.data = tensor_data
|
|
63
|
+
|
|
64
|
+
def action_normalize(self):
|
|
65
|
+
"""Scales tensor values to a range of 0.0 to 1.0."""
|
|
66
|
+
min_v, max_v = torch.min(self.data), torch.max(self.data)
|
|
67
|
+
diff = max_v - min_v
|
|
68
|
+
if diff != 0:
|
|
69
|
+
self.data.sub_(min_v).div_(diff)
|
|
70
|
+
return self.data
|
|
71
|
+
|
|
72
|
+
def tensor_grid(self, rows, cols):
|
|
73
|
+
"""Shapes data into a matrix view without copying memory."""
|
|
74
|
+
return self.data.view(rows, cols)
|