valuebridge-tdfloat 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Tushar Dadlani
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,192 @@
1
+ Metadata-Version: 2.4
2
+ Name: valuebridge-tdfloat
3
+ Version: 1.0.0
4
+ Summary: Triadic Dot Float — exact rational arithmetic as a single Python integer
5
+ Author-email: Tushar Dadlani <tushar@valuebridge.ai>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/valuebridge-ai/tdfloat
8
+ Project-URL: Source, https://github.com/valuebridge-ai/tdfloat
9
+ Project-URL: Bug Tracker, https://github.com/valuebridge-ai/tdfloat/issues
10
+ Keywords: arithmetic,rational,exact,floating-point,mathematics,encoding,number-theory
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: Intended Audience :: Education
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.8
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Programming Language :: Python :: 3 :: Only
23
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
24
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
25
+ Requires-Python: >=3.8
26
+ Description-Content-Type: text/markdown
27
+ License-File: LICENSE
28
+ Dynamic: license-file
29
+
30
+ # valuebridge-tdfloat
31
+
32
+ **Triadic Dot Float (TDFloat)** — exact rational arithmetic as a single Python integer.
33
+
34
+ ```python
35
+ from valuebridge.tdfloat import td, frac
36
+
37
+ td('0.1') + td('0.2') == td('0.3') # True — always
38
+ (a + b) + c == a + (b + c) # True — for any a, b, c
39
+ ```
40
+
41
+ ---
42
+
43
+ ## Why This Matters for Explainable AI
44
+
45
+ Modern AI systems — language models, recommendation engines, fairness audits, financial models — all perform millions of arithmetic operations. Every one of those operations runs on IEEE 754 floating-point, a format designed in 1985 for numerical simulation, not for systems that need to be explained, audited, or trusted.
46
+
47
+ **TDFloat replaces floating-point approximation with provably exact rational arithmetic.** The consequences for explainable AI are direct.
48
+
49
+ ---
50
+
51
+ ### 1. Computations Are Reproducible — Exactly
52
+
53
+ IEEE 754 arithmetic is non-deterministic across platforms, compilers, and even thread orderings. The same model can produce different outputs on different hardware, or after recompilation, or across NumPy versions.
54
+
55
+ TDFloat arithmetic is deterministic by construction. There is no rounding, no platform-specific behaviour, no "it worked on my machine." The same inputs always produce the same output, encoded as the same integer. This makes AI outputs **auditable**: you can replay any computation and verify it step by step.
56
+
57
+ ---
58
+
59
+ ### 2. Associativity is a Theorem, Not a Hope
60
+
61
+ A fundamental requirement for any system that needs to explain its reasoning is that the order of operations should not change the result. In IEEE 754, it does:
62
+
63
+ ```
64
+ (a + b) + c ≠ a + (b + c) -- in IEEE 754, for many values of a, b, c
65
+ ```
66
+
67
+ This means that simply reordering additions in a neural network — something optimising compilers do routinely — can change the model's output.
68
+
69
+ TDFloat's associativity is **formally proved in Coq** (`proofs/tdfloat_ieee_resolution.v`). It is not a property that holds approximately or usually — it is a theorem. Any system built on TDFloat inherits this guarantee.
70
+
71
+ ---
72
+
73
+ ### 3. Every Number Has an Interpretable Identity
74
+
75
+ In IEEE 754, numbers are opaque 64-bit patterns. `3.141592653589793` is just a bit string — you cannot read off that it approximates 22/7, or that it is an integer, or anything structural about it.
76
+
77
+ In TDFloat, every number's encoding carries explicit structural information:
78
+
79
+ - **The info-bit** (the `"."`) declares whether the number is an integer or a fractional. This is formally proved (`proofs/tdfloat_dot_encoding.v`): the dot IS the axis-selector bit. `td(3)` and `td(3.0)` are provably different kinds of numbers.
80
+ - **Constants are exact rationals**: π = 22/7, e = 19/7, φ = 3/2. Not approximations — exact fractions with an explicit numerator and denominator. A model using TDFloat constants can be inspected and its reasoning followed.
81
+ - **Every result is a fraction**: the output of any computation is always `p/q` for some integers `p` and `q`. You can always ask "what fraction did this computation produce?" and get a precise, human-readable answer.
82
+
83
+ ---
84
+
85
+ ### 4. Fairness Audits Can Be Verified
86
+
87
+ A core problem in algorithmic fairness is verifying that a decision-making system treats groups consistently. With IEEE 754, floating-point drift means that two computations that should be identical (same formula, different data ordering) can diverge. This makes it impossible to prove that a model's disparity between groups is real versus artefact.
88
+
89
+ With TDFloat, if two computations produce the same rational value, their encodings are equal. If they produce different rationals, their encodings are provably different. There is no grey zone of "close enough." Fairness claims can be verified arithmetically, not just statistically.
90
+
91
+ ---
92
+
93
+ ### 5. Intermediate Results Are Traceable
94
+
95
+ When debugging why a model made a decision, the ability to inspect intermediate values matters. A TDFloat value always exposes:
96
+
97
+ ```python
98
+ x = td('3.14')
99
+ x.as_fraction() # (157, 50) — exact numerator and denominator
100
+ x.info_bit # 1 — it's on the half-step axis (has a dot)
101
+ x.dot_pos # 2 — two decimal places
102
+ x.fields() # full encoding breakdown
103
+ ```
104
+
105
+ There is no hidden rounding. Every intermediate result is fully inspectable as an exact rational.
106
+
107
+ ---
108
+
109
+ ## Formal Verification
110
+
111
+ The mathematical foundations of TDFloat are proved in Coq:
112
+
113
+ | Proof file | What it proves |
114
+ |---|---|
115
+ | `proofs/encoding_any_symbol.v` | The abstract half-step encoding is injective and perfectly reversible |
116
+ | `proofs/tdfloat_dot_encoding.v` | The `"."` is the info-bit; integer and fractional encodings never collide |
117
+ | `proofs/tdfloat_ieee_resolution.v` | TDFloat addition is associative; IEEE 754 addition provably is not |
118
+
119
+ These are not documentation claims. They are machine-checked mathematical proofs.
120
+
121
+ ---
122
+
123
+ ## Quick Start
124
+
125
+ ```python
126
+ from valuebridge.tdfloat import td, frac, TDFloat, PI, E, PHI
127
+ from valuebridge.tdfloat.math import sqrt, circle_area, cosine_similarity
128
+
129
+ # Exact arithmetic
130
+ assert td('0.1') + td('0.2') == td('0.3')
131
+
132
+ # Exact constants
133
+ print(PI) # 22/7
134
+ print(E) # 19/7
135
+ print(PHI) # 3/2
136
+
137
+ # Exact geometry
138
+ print(circle_area(td(7))) # 154 (exact integer)
139
+ print(circle_area(td(1))) # 22/7 (exact π)
140
+
141
+ # Exact vector similarity
142
+ u = [td(3), td(4)]
143
+ v = [td(4), td(3)]
144
+ print(cosine_similarity(u, v)) # 24/25 (exact)
145
+ ```
146
+
147
+ ---
148
+
149
+ ## Installation
150
+
151
+ ```bash
152
+ # With uv (recommended)
153
+ uv add valuebridge-tdfloat
154
+
155
+ # With pip
156
+ pip install valuebridge-tdfloat
157
+ ```
158
+
159
+ ---
160
+
161
+ ## Development
162
+
163
+ ```bash
164
+ git clone https://github.com/valuebridge-ai/tdfloat
165
+ cd tdfloat
166
+ uv sync
167
+ uv run pytest tests/
168
+ ```
169
+
170
+ ---
171
+
172
+ ## Architecture
173
+
174
+ ```
175
+ valuebridge/tdfloat/
176
+ ├── tdfloat.py — TDFloat class: encoding, arithmetic operators, constructors
177
+ ├── _encoding.py — Half-step line: pack/unpack, from_rational, to_rational
178
+ ├── _arithmetic.py — Exact rational ops: add, sub, mul, div, sqrt, divmod
179
+ ├── constants.py — Exact constants: π=22/7, e=19/7, φ=3/2, √2=7/5, ...
180
+ └── math.py — Functions: sqrt, exp, log, sin, cos, vector ops
181
+
182
+ proofs/
183
+ ├── encoding_any_symbol.v — Abstract half-step encoding theory
184
+ ├── tdfloat_dot_encoding.v — The dot is the info-bit (Coq)
185
+ └── tdfloat_ieee_resolution.v — TDFloat vs IEEE 754 associativity (Coq)
186
+ ```
187
+
188
+ ---
189
+
190
+ ## License
191
+
192
+ MIT — Copyright 2026 Tushar Dadlani / Valuebridge AI
@@ -0,0 +1,163 @@
1
+ # valuebridge-tdfloat
2
+
3
+ **Triadic Dot Float (TDFloat)** — exact rational arithmetic as a single Python integer.
4
+
5
+ ```python
6
+ from valuebridge.tdfloat import td, frac
7
+
8
+ td('0.1') + td('0.2') == td('0.3') # True — always
9
+ (a + b) + c == a + (b + c) # True — for any a, b, c
10
+ ```
11
+
12
+ ---
13
+
14
+ ## Why This Matters for Explainable AI
15
+
16
+ Modern AI systems — language models, recommendation engines, fairness audits, financial models — all perform millions of arithmetic operations. Every one of those operations runs on IEEE 754 floating-point, a format designed in 1985 for numerical simulation, not for systems that need to be explained, audited, or trusted.
17
+
18
+ **TDFloat replaces floating-point approximation with provably exact rational arithmetic.** The consequences for explainable AI are direct.
19
+
20
+ ---
21
+
22
+ ### 1. Computations Are Reproducible — Exactly
23
+
24
+ IEEE 754 arithmetic is non-deterministic across platforms, compilers, and even thread orderings. The same model can produce different outputs on different hardware, or after recompilation, or across NumPy versions.
25
+
26
+ TDFloat arithmetic is deterministic by construction. There is no rounding, no platform-specific behaviour, no "it worked on my machine." The same inputs always produce the same output, encoded as the same integer. This makes AI outputs **auditable**: you can replay any computation and verify it step by step.
27
+
28
+ ---
29
+
30
+ ### 2. Associativity is a Theorem, Not a Hope
31
+
32
+ A fundamental requirement for any system that needs to explain its reasoning is that the order of operations should not change the result. In IEEE 754, it does:
33
+
34
+ ```
35
+ (a + b) + c ≠ a + (b + c) -- in IEEE 754, for many values of a, b, c
36
+ ```
37
+
38
+ This means that simply reordering additions in a neural network — something optimising compilers do routinely — can change the model's output.
39
+
40
+ TDFloat's associativity is **formally proved in Coq** (`proofs/tdfloat_ieee_resolution.v`). It is not a property that holds approximately or usually — it is a theorem. Any system built on TDFloat inherits this guarantee.
41
+
42
+ ---
43
+
44
+ ### 3. Every Number Has an Interpretable Identity
45
+
46
+ In IEEE 754, numbers are opaque 64-bit patterns. `3.141592653589793` is just a bit string — you cannot read off that it approximates 22/7, or that it is an integer, or anything structural about it.
47
+
48
+ In TDFloat, every number's encoding carries explicit structural information:
49
+
50
+ - **The info-bit** (the `"."`) declares whether the number is an integer or a fractional. This is formally proved (`proofs/tdfloat_dot_encoding.v`): the dot IS the axis-selector bit. `td(3)` and `td(3.0)` are provably different kinds of numbers.
51
+ - **Constants are exact rationals**: π = 22/7, e = 19/7, φ = 3/2. Not approximations — exact fractions with an explicit numerator and denominator. A model using TDFloat constants can be inspected and its reasoning followed.
52
+ - **Every result is a fraction**: the output of any computation is always `p/q` for some integers `p` and `q`. You can always ask "what fraction did this computation produce?" and get a precise, human-readable answer.
53
+
54
+ ---
55
+
56
+ ### 4. Fairness Audits Can Be Verified
57
+
58
+ A core problem in algorithmic fairness is verifying that a decision-making system treats groups consistently. With IEEE 754, floating-point drift means that two computations that should be identical (same formula, different data ordering) can diverge. This makes it impossible to prove that a model's disparity between groups is real versus artefact.
59
+
60
+ With TDFloat, if two computations produce the same rational value, their encodings are equal. If they produce different rationals, their encodings are provably different. There is no grey zone of "close enough." Fairness claims can be verified arithmetically, not just statistically.
61
+
62
+ ---
63
+
64
+ ### 5. Intermediate Results Are Traceable
65
+
66
+ When debugging why a model made a decision, the ability to inspect intermediate values matters. A TDFloat value always exposes:
67
+
68
+ ```python
69
+ x = td('3.14')
70
+ x.as_fraction() # (157, 50) — exact numerator and denominator
71
+ x.info_bit # 1 — it's on the half-step axis (has a dot)
72
+ x.dot_pos # 2 — two decimal places
73
+ x.fields() # full encoding breakdown
74
+ ```
75
+
76
+ There is no hidden rounding. Every intermediate result is fully inspectable as an exact rational.
77
+
78
+ ---
79
+
80
+ ## Formal Verification
81
+
82
+ The mathematical foundations of TDFloat are proved in Coq:
83
+
84
+ | Proof file | What it proves |
85
+ |---|---|
86
+ | `proofs/encoding_any_symbol.v` | The abstract half-step encoding is injective and perfectly reversible |
87
+ | `proofs/tdfloat_dot_encoding.v` | The `"."` is the info-bit; integer and fractional encodings never collide |
88
+ | `proofs/tdfloat_ieee_resolution.v` | TDFloat addition is associative; IEEE 754 addition provably is not |
89
+
90
+ These are not documentation claims. They are machine-checked mathematical proofs.
91
+
92
+ ---
93
+
94
+ ## Quick Start
95
+
96
+ ```python
97
+ from valuebridge.tdfloat import td, frac, TDFloat, PI, E, PHI
98
+ from valuebridge.tdfloat.math import sqrt, circle_area, cosine_similarity
99
+
100
+ # Exact arithmetic
101
+ assert td('0.1') + td('0.2') == td('0.3')
102
+
103
+ # Exact constants
104
+ print(PI) # 22/7
105
+ print(E) # 19/7
106
+ print(PHI) # 3/2
107
+
108
+ # Exact geometry
109
+ print(circle_area(td(7))) # 154 (exact integer)
110
+ print(circle_area(td(1))) # 22/7 (exact π)
111
+
112
+ # Exact vector similarity
113
+ u = [td(3), td(4)]
114
+ v = [td(4), td(3)]
115
+ print(cosine_similarity(u, v)) # 24/25 (exact)
116
+ ```
117
+
118
+ ---
119
+
120
+ ## Installation
121
+
122
+ ```bash
123
+ # With uv (recommended)
124
+ uv add valuebridge-tdfloat
125
+
126
+ # With pip
127
+ pip install valuebridge-tdfloat
128
+ ```
129
+
130
+ ---
131
+
132
+ ## Development
133
+
134
+ ```bash
135
+ git clone https://github.com/valuebridge-ai/tdfloat
136
+ cd tdfloat
137
+ uv sync
138
+ uv run pytest tests/
139
+ ```
140
+
141
+ ---
142
+
143
+ ## Architecture
144
+
145
+ ```
146
+ valuebridge/tdfloat/
147
+ ├── tdfloat.py — TDFloat class: encoding, arithmetic operators, constructors
148
+ ├── _encoding.py — Half-step line: pack/unpack, from_rational, to_rational
149
+ ├── _arithmetic.py — Exact rational ops: add, sub, mul, div, sqrt, divmod
150
+ ├── constants.py — Exact constants: π=22/7, e=19/7, φ=3/2, √2=7/5, ...
151
+ └── math.py — Functions: sqrt, exp, log, sin, cos, vector ops
152
+
153
+ proofs/
154
+ ├── encoding_any_symbol.v — Abstract half-step encoding theory
155
+ ├── tdfloat_dot_encoding.v — The dot is the info-bit (Coq)
156
+ └── tdfloat_ieee_resolution.v — TDFloat vs IEEE 754 associativity (Coq)
157
+ ```
158
+
159
+ ---
160
+
161
+ ## License
162
+
163
+ MIT — Copyright 2026 Tushar Dadlani / Valuebridge AI
@@ -0,0 +1,47 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "valuebridge-tdfloat"
7
+ version = "1.0.0"
8
+ description = "Triadic Dot Float — exact rational arithmetic as a single Python integer"
9
+ readme = "README.md"
10
+ license = "MIT"
11
+ license-files = ["LICENSE"]
12
+ requires-python = ">=3.8"
13
+ authors = [
14
+ { name = "Tushar Dadlani", email = "tushar@valuebridge.ai" },
15
+ ]
16
+ keywords = [
17
+ "arithmetic", "rational", "exact", "floating-point",
18
+ "mathematics", "encoding", "number-theory",
19
+ ]
20
+ classifiers = [
21
+ "Development Status :: 5 - Production/Stable",
22
+ "Intended Audience :: Science/Research",
23
+ "Intended Audience :: Education",
24
+ "Intended Audience :: Developers",
25
+ "Operating System :: OS Independent",
26
+ "Programming Language :: Python :: 3",
27
+ "Programming Language :: Python :: 3.8",
28
+ "Programming Language :: Python :: 3.9",
29
+ "Programming Language :: Python :: 3.10",
30
+ "Programming Language :: Python :: 3.11",
31
+ "Programming Language :: Python :: 3.12",
32
+ "Programming Language :: Python :: 3 :: Only",
33
+ "Topic :: Scientific/Engineering :: Mathematics",
34
+ "Topic :: Software Development :: Libraries :: Python Modules",
35
+ ]
36
+
37
+ [project.urls]
38
+ Homepage = "https://github.com/valuebridge-ai/tdfloat"
39
+ Source = "https://github.com/valuebridge-ai/tdfloat"
40
+ "Bug Tracker" = "https://github.com/valuebridge-ai/tdfloat/issues"
41
+
42
+ [tool.setuptools.packages.find]
43
+ where = ["."]
44
+ include = ["valuebridge*"]
45
+
46
+ [dependency-groups]
47
+ dev = ["pytest>=8"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+