quarterbit 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
quarterbit/__init__.py ADDED
@@ -0,0 +1,94 @@
1
+ """
2
+ QuarterBit
3
+ ==========
4
+
5
+ Precision computing library.
6
+
7
+ Usage:
8
+ from quarterbit.torch import Adam
9
+
10
+ Copyright (c) 2026 Clouthier Simulation Labs. All rights reserved.
11
+ """
12
+
13
+ __version__ = "1.0.0"
14
+ __author__ = "Kyle Clouthier"
15
+ __license__ = "Proprietary"
16
+
17
+ import os as _os
18
+ import hashlib as _hashlib
19
+ import platform as _platform
20
+
21
+ # ============================================================================
22
+ # License Validation (stub - implement full validation later)
23
+ # ============================================================================
24
+
25
+ _LICENSE_KEY = _os.environ.get("QUARTERBIT_LICENSE_KEY", "")
26
+ _VALIDATED = False
27
+
28
+ def _validate_license():
29
+ """Validate license key. Returns tier or raises."""
30
+ global _VALIDATED
31
+
32
+ if _VALIDATED:
33
+ return True
34
+
35
+ # Free tier: limited functionality
36
+ if not _LICENSE_KEY:
37
+ _VALIDATED = True
38
+ return "free"
39
+
40
+ # TODO: Implement full license validation against server
41
+ # For now, any key = pro tier
42
+ _VALIDATED = True
43
+ return "pro"
44
+
45
+ def _check_environment():
46
+ """Check for debugging/reverse engineering attempts."""
47
+ # Basic anti-debug checks
48
+ suspicious = []
49
+
50
+ # Check for common debuggers
51
+ if _os.environ.get("PYTHONDEBUG"):
52
+ suspicious.append("debug_mode")
53
+
54
+ # Check for trace
55
+ import sys
56
+ if sys.gettrace() is not None:
57
+ suspicious.append("trace_active")
58
+
59
+ return suspicious
60
+
61
+ # Run checks on import
62
+ _tier = _validate_license()
63
+ _suspicious = _check_environment()
64
+
65
+ if _suspicious and _tier == "free":
66
+ import warnings
67
+ warnings.warn("QuarterBit: Debug mode detected. Some features disabled.")
68
+
69
+ # ============================================================================
70
+ # Public API
71
+ # ============================================================================
72
+
73
+ def get_version():
74
+ """Get QuarterBit version."""
75
+ return __version__
76
+
77
+ def get_license_tier():
78
+ """Get current license tier."""
79
+ return _tier
80
+
81
+ def is_available():
82
+ """Check if QuarterBit GPU backend is available."""
83
+ try:
84
+ from .torch.utils import is_available as _gpu_available
85
+ return _gpu_available()
86
+ except ImportError:
87
+ return False
88
+
89
+ # Lazy imports for submodules
90
+ def __getattr__(name):
91
+ if name == "torch":
92
+ from . import torch
93
+ return torch
94
+ raise AttributeError(f"module 'quarterbit' has no attribute '{name}'")
@@ -0,0 +1,22 @@
1
+ """
2
+ QuarterBit PyTorch Backend
3
+ ==========================
4
+
5
+ Precision optimizers for PyTorch.
6
+
7
+ Usage:
8
+ from quarterbit.torch import Adam
9
+ optimizer = Adam(model.parameters(), lr=1e-3)
10
+
11
+ Copyright (c) 2026 Clouthier Simulation Labs. All rights reserved.
12
+ """
13
+
14
+ from .optim import SGD, Adam, AdamW, CompactAdam, CompactEFTAdam
15
+ from .functional import eft_matmul, eft_sum, eft_accumulate
16
+ from .utils import get_backend_info, is_available
17
+
18
+ __all__ = [
19
+ 'SGD', 'Adam', 'AdamW', 'CompactAdam', 'CompactEFTAdam',
20
+ 'eft_matmul', 'eft_sum', 'eft_accumulate',
21
+ 'get_backend_info', 'is_available'
22
+ ]
@@ -0,0 +1,229 @@
1
+ """
2
+ QuarterBit Functional Operations
3
+ ================================
4
+
5
+ Copyright (c) 2026 Clouthier Simulation Labs. All rights reserved.
6
+ """
7
+
8
+ import torch
9
+ import ctypes
10
+ from .utils import get_lib, is_available
11
+
12
+ def _ptr(tensor):
13
+ """Get ctypes pointer to tensor data."""
14
+ if tensor is None:
15
+ return None
16
+ return ctypes.cast(tensor.data_ptr(), ctypes.POINTER(ctypes.c_float))
17
+
18
+
19
+ def eft_accumulate(dst: torch.Tensor, src: torch.Tensor, comp: torch.Tensor = None):
20
+ """
21
+ Accumulate src into dst with EFT precision: dst += src
22
+
23
+ If comp (compensation buffer) is not provided, creates one.
24
+ Returns the compensation buffer for continued accumulation.
25
+
26
+ Args:
27
+ dst: Destination tensor (modified in-place)
28
+ src: Source tensor to add
29
+ comp: Compensation buffer (same shape as dst)
30
+
31
+ Returns:
32
+ comp: Compensation buffer
33
+
34
+ Example:
35
+ >>> dst = torch.zeros(1000).cuda()
36
+ >>> comp = None
37
+ >>> for batch in data:
38
+ ... comp = eft_accumulate(dst, batch, comp)
39
+ """
40
+ if not is_available():
41
+ # Fallback: standard addition
42
+ dst.add_(src)
43
+ return torch.zeros_like(dst) if comp is None else comp
44
+
45
+ if comp is None:
46
+ comp = torch.zeros_like(dst)
47
+
48
+ if not (dst.is_cuda and src.is_cuda and dst.dtype == torch.float32):
49
+ # Fallback for non-CUDA or non-float32
50
+ dst.add_(src)
51
+ return comp
52
+
53
+ lib = get_lib()
54
+ n = dst.numel()
55
+
56
+ lib.eft_accumulate(
57
+ _ptr(dst), _ptr(comp), _ptr(src),
58
+ ctypes.c_int(n)
59
+ )
60
+
61
+ return comp
62
+
63
+
64
+ def eft_accumulate_scaled(dst: torch.Tensor, src: torch.Tensor,
65
+ scale: float, comp: torch.Tensor = None):
66
+ """
67
+ Scaled accumulation with EFT: dst += scale * src
68
+
69
+ Args:
70
+ dst: Destination tensor
71
+ src: Source tensor
72
+ scale: Scaling factor
73
+ comp: Compensation buffer
74
+
75
+ Returns:
76
+ comp: Compensation buffer
77
+ """
78
+ if not is_available():
79
+ dst.add_(src, alpha=scale)
80
+ return torch.zeros_like(dst) if comp is None else comp
81
+
82
+ if comp is None:
83
+ comp = torch.zeros_like(dst)
84
+
85
+ if not (dst.is_cuda and src.is_cuda and dst.dtype == torch.float32):
86
+ dst.add_(src, alpha=scale)
87
+ return comp
88
+
89
+ lib = get_lib()
90
+ n = dst.numel()
91
+
92
+ lib.eft_accumulate_scaled(
93
+ _ptr(dst), _ptr(comp), _ptr(src),
94
+ ctypes.c_float(scale),
95
+ ctypes.c_int(n)
96
+ )
97
+
98
+ return comp
99
+
100
+
101
+ def eft_sum(tensor: torch.Tensor) -> torch.Tensor:
102
+ """
103
+ Sum all elements with EFT precision.
104
+
105
+ More accurate than tensor.sum() for large tensors.
106
+
107
+ Args:
108
+ tensor: Input tensor
109
+
110
+ Returns:
111
+ Scalar tensor with sum
112
+
113
+ Example:
114
+ >>> x = torch.randn(1000000).cuda()
115
+ >>> standard_sum = x.sum()
116
+ >>> eft_precise_sum = eft_sum(x)
117
+ """
118
+ if not is_available() or not tensor.is_cuda or tensor.dtype != torch.float32:
119
+ return tensor.sum()
120
+
121
+ lib = get_lib()
122
+ n = tensor.numel()
123
+
124
+ output = torch.zeros(1, device=tensor.device, dtype=torch.float32)
125
+ output_comp = torch.zeros(1, device=tensor.device, dtype=torch.float32)
126
+
127
+ # Flatten for contiguous access
128
+ flat = tensor.contiguous().view(-1)
129
+
130
+ lib.eft_reduce_sum(
131
+ _ptr(flat), _ptr(output), _ptr(output_comp),
132
+ ctypes.c_int(n)
133
+ )
134
+
135
+ # Final compensation
136
+ return output + output_comp
137
+
138
+
139
+ def eft_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
140
+ """
141
+ Matrix multiplication with EFT precision: C = A @ B
142
+
143
+ More accurate than torch.mm for ill-conditioned matrices.
144
+
145
+ Args:
146
+ a: First matrix [M, K]
147
+ b: Second matrix [K, N]
148
+
149
+ Returns:
150
+ Result matrix [M, N]
151
+
152
+ Example:
153
+ >>> A = torch.randn(1000, 500).cuda()
154
+ >>> B = torch.randn(500, 1000).cuda()
155
+ >>> C = eft_matmul(A, B) # More precise than A @ B
156
+ """
157
+ if not is_available():
158
+ return torch.mm(a, b)
159
+
160
+ if not (a.is_cuda and b.is_cuda and a.dtype == torch.float32 and b.dtype == torch.float32):
161
+ return torch.mm(a, b)
162
+
163
+ if a.dim() != 2 or b.dim() != 2:
164
+ raise ValueError("eft_matmul requires 2D tensors")
165
+
166
+ M, K = a.shape
167
+ K2, N = b.shape
168
+ if K != K2:
169
+ raise ValueError(f"Matrix dimensions don't match: {a.shape} @ {b.shape}")
170
+
171
+ lib = get_lib()
172
+
173
+ c = torch.empty(M, N, device=a.device, dtype=torch.float32)
174
+
175
+ # Ensure contiguous
176
+ a_contig = a.contiguous()
177
+ b_contig = b.contiguous()
178
+
179
+ lib.eft_matmul(
180
+ _ptr(a_contig), _ptr(b_contig), _ptr(c),
181
+ ctypes.c_int(M), ctypes.c_int(K), ctypes.c_int(N)
182
+ )
183
+
184
+ return c
185
+
186
+
187
+ def eft_dot(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
188
+ """
189
+ Dot product with EFT precision.
190
+
191
+ Args:
192
+ a: First vector
193
+ b: Second vector
194
+
195
+ Returns:
196
+ Scalar tensor with dot product
197
+ """
198
+ return eft_sum(a * b)
199
+
200
+
201
+ def eft_mean(tensor: torch.Tensor) -> torch.Tensor:
202
+ """
203
+ Mean with EFT precision.
204
+
205
+ Args:
206
+ tensor: Input tensor
207
+
208
+ Returns:
209
+ Scalar tensor with mean
210
+ """
211
+ return eft_sum(tensor) / tensor.numel()
212
+
213
+
214
+ def eft_var(tensor: torch.Tensor, unbiased: bool = True) -> torch.Tensor:
215
+ """
216
+ Variance with EFT precision.
217
+
218
+ Args:
219
+ tensor: Input tensor
220
+ unbiased: Use Bessel's correction (N-1 denominator)
221
+
222
+ Returns:
223
+ Scalar tensor with variance
224
+ """
225
+ mean = eft_mean(tensor)
226
+ diff = tensor - mean
227
+ ss = eft_sum(diff * diff)
228
+ n = tensor.numel()
229
+ return ss / (n - 1 if unbiased else n)