thirdanswer 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,40 @@
1
+ """
2
+ thirdanswer — The Third Answer framework for evaluating AI uncertainty.
3
+
4
+ Why AI doesn't know what it doesn't know — and how to find out.
5
+
6
+ Basic usage (no LLM needed):
7
+ from thirdanswer import Compass
8
+ c = Compass(T=0.7, I=0.4, F=0.5)
9
+ print(c.zone) # "contradiction"
10
+
11
+ With LLM (Groq, free):
12
+ from thirdanswer import analyze
13
+ r = analyze("AI text here", provider="groq", api_key="gsk_...")
14
+ print(r.zone, r.T, r.I, r.F)
15
+
16
+ Book: "The Third Answer" by Leyva-Vazquez & Smarandache (2026)
17
+ """
18
+
19
+ __version__ = "0.1.0"
20
+ __author__ = "Maikel Yelandi Leyva-Vazquez"
21
+
22
+ from .compass import Compass
23
+ from .analyzer import analyze, decompose, AnalysisResult, Claim
24
+ from .honest import ask, HonestResponse
25
+ from .compare import compare, ComparisonResult
26
+ from .errors import AIError, ERROR_TYPES
27
+
28
+ __all__ = [
29
+ "Compass",
30
+ "analyze",
31
+ "decompose",
32
+ "ask",
33
+ "compare",
34
+ "AnalysisResult",
35
+ "HonestResponse",
36
+ "ComparisonResult",
37
+ "Claim",
38
+ "AIError",
39
+ "ERROR_TYPES",
40
+ ]
@@ -0,0 +1,229 @@
1
+ """
2
+ analyze() — Evaluate any AI-generated text with T,I,F.
3
+
4
+ Usage:
5
+ from thirdanswer import analyze
6
+ r = analyze("Coffee is good for health", provider="groq", api_key="gsk_...")
7
+ print(r.T, r.I, r.F, r.zone)
8
+ """
9
+
10
+ import json
11
+ import re
12
+ from dataclasses import dataclass, field
13
+ from typing import List, Optional
14
+
15
+ from .compass import Compass, _classify_zone
16
+ from .providers.base import Provider
17
+
18
+
19
+ SYSTEM_PROMPT = """You are an epistemic analysis engine implementing the Third Answer framework.
20
+
21
+ For ANY text given to you, analyze its epistemic quality using three INDEPENDENT dimensions:
22
+ - T (Truth): degree of evidence supporting the claim [0.0 to 1.0]
23
+ - I (Indeterminacy): degree of genuine uncertainty [0.0 to 1.0]
24
+ - F (Falsity): degree of evidence contradicting the claim [0.0 to 1.0]
25
+
26
+ T + I + F do NOT need to equal 1. They are independent.
27
+
28
+ Classify into zones:
29
+ - "consensus": T > 0.5, I < 0.35, F < 0.3
30
+ - "ambiguity": I > 0.5 or (I > 0.35 and T < 0.6)
31
+ - "contradiction": T > 0.3 AND F > 0.3
32
+ - "ignorance": T < 0.3, F < 0.3, or I overwhelming
33
+
34
+ Identify error types: fabrication, distortion, conflation, confident_ignorance
35
+
36
+ Respond ONLY in valid JSON:
37
+ {
38
+ "T": 0.0, "I": 0.0, "F": 0.0,
39
+ "zone": "consensus|ambiguity|contradiction|ignorance",
40
+ "zone_reason": "why this zone",
41
+ "error_types": [],
42
+ "error_explanation": "",
43
+ "honest_version": "text rewritten with uncertainty markers",
44
+ "claims": [
45
+ {"claim": "text", "T": 0.0, "I": 0.0, "F": 0.0, "note": "reason"}
46
+ ],
47
+ "recommendation": "trust|investigate|consult_expert|do_not_use"
48
+ }"""
49
+
50
+
51
+ @dataclass
52
+ class Claim:
53
+ """A single claim extracted from text with its T,I,F assessment."""
54
+ text: str
55
+ T: float
56
+ I: float
57
+ F: float
58
+ note: str = ""
59
+
60
+ @property
61
+ def compass(self) -> Compass:
62
+ return Compass(T=self.T, I=self.I, F=self.F, label=self.text[:50])
63
+
64
+ @property
65
+ def zone(self) -> str:
66
+ return self.compass.zone
67
+
68
+ @property
69
+ def zone_emoji(self) -> str:
70
+ return self.compass.zone_emoji
71
+
72
+
73
+ @dataclass
74
+ class AnalysisResult:
75
+ """Result of analyzing text with the Third Answer framework."""
76
+ T: float
77
+ I: float
78
+ F: float
79
+ zone: str
80
+ zone_reason: str
81
+ error_types: List[str]
82
+ error_explanation: str
83
+ honest_version: str
84
+ claims: List[Claim]
85
+ recommendation: str
86
+ provider_name: str = ""
87
+
88
+ @property
89
+ def compass(self) -> Compass:
90
+ return Compass(T=self.T, I=self.I, F=self.F)
91
+
92
+ @property
93
+ def zone_emoji(self) -> str:
94
+ return self.compass.zone_emoji
95
+
96
+ @property
97
+ def zone_name(self) -> str:
98
+ return self.compass.zone_name
99
+
100
+ @property
101
+ def zone_action(self) -> str:
102
+ return self.compass.zone_action
103
+
104
+ @property
105
+ def confidence(self) -> float:
106
+ return self.compass.confidence
107
+
108
+ @property
109
+ def is_paraconsistent(self) -> bool:
110
+ return self.compass.is_paraconsistent
111
+
112
+ @property
113
+ def has_errors(self) -> bool:
114
+ return len(self.error_types) > 0
115
+
116
+ def __repr__(self) -> str:
117
+ errs = f" ERRORS: {self.error_types}" if self.has_errors else ""
118
+ return (
119
+ f"Analysis(T={self.T}, I={self.I}, F={self.F}) "
120
+ f"→ {self.zone_emoji} {self.zone_name} | {self.recommendation}{errs}"
121
+ )
122
+
123
+
124
+ def _parse_json(raw: str) -> dict:
125
+ """Extract JSON from LLM response."""
126
+ if raw.startswith("```"):
127
+ raw = raw.split("```")[1]
128
+ if raw.startswith("json"):
129
+ raw = raw[4:]
130
+ raw = raw.strip()
131
+ match = re.search(r'\{[\s\S]*\}', raw)
132
+ if match:
133
+ raw = match.group()
134
+ return json.loads(raw)
135
+
136
+
137
+ def _get_provider(provider: str, api_key: Optional[str] = None,
138
+ model: Optional[str] = None) -> Provider:
139
+ """Instantiate a provider by name."""
140
+ if provider == "groq":
141
+ from .providers.groq import GroqProvider
142
+ return GroqProvider(api_key=api_key, model=model or "llama-3.3-70b-versatile")
143
+ elif provider == "ollama":
144
+ from .providers.ollama import OllamaProvider
145
+ return OllamaProvider(model=model or "llama3.1")
146
+ else:
147
+ raise ValueError(f"Unknown provider: {provider}. Use 'groq' or 'ollama'.")
148
+
149
+
150
+ def analyze(
151
+ text: str,
152
+ provider: str = "groq",
153
+ api_key: Optional[str] = None,
154
+ model: Optional[str] = None,
155
+ context: str = "",
156
+ ) -> AnalysisResult:
157
+ """
158
+ Analyze any text using the Third Answer T,I,F framework.
159
+
160
+ Args:
161
+ text: The AI-generated (or any) text to evaluate
162
+ provider: "groq" (free, needs key) or "ollama" (local, free)
163
+ api_key: API key for the provider (not needed for ollama)
164
+ model: Model override (default depends on provider)
165
+ context: Optional domain context (e.g., "medicine", "legal")
166
+
167
+ Returns:
168
+ AnalysisResult with T, I, F, zone, claims, errors, honest version
169
+ """
170
+ p = _get_provider(provider, api_key, model)
171
+
172
+ user_msg = f"Analyze this text using the Third Answer framework:\n\n"
173
+ if context:
174
+ user_msg += f"Context/domain: {context}\n\n"
175
+ user_msg += f'TEXT:\n"""\n{text}\n"""\n\nRespond ONLY in valid JSON.'
176
+
177
+ raw = p.complete(SYSTEM_PROMPT, user_msg)
178
+ data = _parse_json(raw)
179
+
180
+ # Defaults and clamping
181
+ for key in ["T", "I", "F"]:
182
+ data.setdefault(key, 0.5)
183
+ data[key] = max(0.0, min(1.0, float(data[key])))
184
+
185
+ claims = [
186
+ Claim(
187
+ text=c.get("claim", ""),
188
+ T=max(0.0, min(1.0, float(c.get("T", 0.5)))),
189
+ I=max(0.0, min(1.0, float(c.get("I", 0.5)))),
190
+ F=max(0.0, min(1.0, float(c.get("F", 0.1)))),
191
+ note=c.get("note", ""),
192
+ )
193
+ for c in data.get("claims", data.get("key_claims", []))
194
+ ]
195
+
196
+ return AnalysisResult(
197
+ T=data["T"],
198
+ I=data["I"],
199
+ F=data["F"],
200
+ zone=data.get("zone", _classify_zone(data["T"], data["I"], data["F"])),
201
+ zone_reason=data.get("zone_reason", ""),
202
+ error_types=data.get("error_types", []),
203
+ error_explanation=data.get("error_explanation", ""),
204
+ honest_version=data.get("honest_version", text),
205
+ claims=claims,
206
+ recommendation=data.get("recommendation", "investigate"),
207
+ provider_name=p.name,
208
+ )
209
+
210
+
211
+ def decompose(
212
+ text: str,
213
+ provider: str = "groq",
214
+ api_key: Optional[str] = None,
215
+ model: Optional[str] = None,
216
+ ) -> List[Claim]:
217
+ """
218
+ Decompose text into individual claims, each with T,I,F.
219
+
220
+ Args:
221
+ text: Text containing multiple claims
222
+ provider: "groq" or "ollama"
223
+ api_key: API key (not needed for ollama)
224
+
225
+ Returns:
226
+ List of Claim objects
227
+ """
228
+ result = analyze(text, provider=provider, api_key=api_key, model=model)
229
+ return result.claims
thirdanswer/compare.py ADDED
@@ -0,0 +1,117 @@
1
+ """
2
+ compare() — Compare two AI responses using T,I,F.
3
+
4
+ Usage:
5
+ from thirdanswer import compare
6
+ diff = compare(response_a, response_b, provider="groq", api_key="gsk_...")
7
+ """
8
+
9
+ import json
10
+ import re
11
+ from dataclasses import dataclass, field
12
+ from typing import List, Optional
13
+
14
+ from .compass import Compass, _classify_zone
15
+ from .analyzer import _parse_json, _get_provider
16
+
17
+
18
+ SYSTEM_PROMPT = """Compare two AI responses using the Third Answer framework.
19
+
20
+ For each response, assess T (Truth), I (Indeterminacy), F (Falsity) independently.
21
+
22
+ Then analyze:
23
+ 1. Where they AGREE (both T high on same claim)
24
+ 2. Where they CONTRADICT (one T high, other F high on same topic)
25
+ 3. Where BOTH are uncertain (both I high)
26
+ 4. Which is more epistemically honest (acknowledges limitations)
27
+
28
+ Respond in JSON:
29
+ {
30
+ "response_a": {"T": 0.0, "I": 0.0, "F": 0.0, "zone": "..."},
31
+ "response_b": {"T": 0.0, "I": 0.0, "F": 0.0, "zone": "..."},
32
+ "agreement": 0.0,
33
+ "agreements": ["point 1", "point 2"],
34
+ "conflicts": ["conflict 1 with analysis"],
35
+ "combined_zone": "consensus|ambiguity|contradiction|ignorance",
36
+ "more_honest": "a|b|neither",
37
+ "recommendation": "trust_a|trust_b|investigate|neither"
38
+ }"""
39
+
40
+
41
+ @dataclass
42
+ class ComparisonResult:
43
+ """Result of comparing two AI responses."""
44
+ response_a_compass: Compass
45
+ response_b_compass: Compass
46
+ agreement: float
47
+ agreements: List[str]
48
+ conflicts: List[str]
49
+ combined_zone: str
50
+ more_honest: str
51
+ recommendation: str
52
+
53
+ def __repr__(self) -> str:
54
+ return (
55
+ f"Comparison:\n"
56
+ f" A: {self.response_a_compass}\n"
57
+ f" B: {self.response_b_compass}\n"
58
+ f" Agreement: {self.agreement:.0%} | "
59
+ f"Conflicts: {len(self.conflicts)} | "
60
+ f"More honest: {self.more_honest} | "
61
+ f"Rec: {self.recommendation}"
62
+ )
63
+
64
+
65
+ def compare(
66
+ response_a: str,
67
+ response_b: str,
68
+ provider: str = "groq",
69
+ api_key: Optional[str] = None,
70
+ model: Optional[str] = None,
71
+ ) -> ComparisonResult:
72
+ """
73
+ Compare two AI responses and analyze where they agree, conflict, and which is more honest.
74
+
75
+ Args:
76
+ response_a: First AI response
77
+ response_b: Second AI response
78
+ provider: "groq" or "ollama"
79
+ api_key: API key (not needed for ollama)
80
+
81
+ Returns:
82
+ ComparisonResult with agreement score, conflicts, and recommendation
83
+ """
84
+ p = _get_provider(provider, api_key, model)
85
+
86
+ user_msg = (
87
+ f'RESPONSE A:\n"""\n{response_a}\n"""\n\n'
88
+ f'RESPONSE B:\n"""\n{response_b}\n"""\n\n'
89
+ f"Respond ONLY in valid JSON."
90
+ )
91
+
92
+ raw = p.complete(SYSTEM_PROMPT, user_msg)
93
+ data = _parse_json(raw)
94
+
95
+ ra = data.get("response_a", {})
96
+ rb = data.get("response_b", {})
97
+
98
+ return ComparisonResult(
99
+ response_a_compass=Compass(
100
+ T=max(0, min(1, float(ra.get("T", 0.5)))),
101
+ I=max(0, min(1, float(ra.get("I", 0.5)))),
102
+ F=max(0, min(1, float(ra.get("F", 0.1)))),
103
+ label="Response A",
104
+ ),
105
+ response_b_compass=Compass(
106
+ T=max(0, min(1, float(rb.get("T", 0.5)))),
107
+ I=max(0, min(1, float(rb.get("I", 0.5)))),
108
+ F=max(0, min(1, float(rb.get("F", 0.1)))),
109
+ label="Response B",
110
+ ),
111
+ agreement=float(data.get("agreement", 0.5)),
112
+ agreements=data.get("agreements", []),
113
+ conflicts=data.get("conflicts", []),
114
+ combined_zone=data.get("combined_zone", "ambiguity"),
115
+ more_honest=data.get("more_honest", "neither"),
116
+ recommendation=data.get("recommendation", "investigate"),
117
+ )
thirdanswer/compass.py ADDED
@@ -0,0 +1,156 @@
1
+ """
2
+ Compass — Core T,I,F logic. No LLM needed, pure neutrosophic math.
3
+
4
+ Usage:
5
+ from thirdanswer import Compass
6
+ c = Compass(T=0.7, I=0.4, F=0.5)
7
+ c.zone # "contradiction"
8
+ c.confidence # 0.0
9
+ c.is_paraconsistent # True
10
+ """
11
+
12
+ from dataclasses import dataclass, field
13
+ from typing import Optional
14
+
15
+
16
+ ZONES = {
17
+ "consensus": {
18
+ "name": "Consensus",
19
+ "emoji": "\U0001f7e2",
20
+ "action": "Trust — but verify sources for critical decisions",
21
+ "description": "Evidence supports the claim. Little uncertainty. Negligible counter-evidence.",
22
+ },
23
+ "consensus_against": {
24
+ "name": "Consensus (Against)",
25
+ "emoji": "\U0001f534",
26
+ "action": "Reject — the evidence contradicts this claim",
27
+ "description": "Strong evidence against the claim. Likely false or misleading.",
28
+ },
29
+ "ambiguity": {
30
+ "name": "Ambiguity",
31
+ "emoji": "\U0001f7e1",
32
+ "action": "Investigate — the evidence is insufficient",
33
+ "description": "High indeterminacy. The data doesn't exist yet or is too sparse.",
34
+ },
35
+ "contradiction": {
36
+ "name": "Contradiction",
37
+ "emoji": "\U0001f7e0",
38
+ "action": "Investigate both sides — the evidence conflicts",
39
+ "description": "Both T and F are significant. Evidence supports AND contradicts.",
40
+ },
41
+ "ignorance": {
42
+ "name": "Ignorance",
43
+ "emoji": "\u26ab",
44
+ "action": "Stop — the AI is operating in the dark",
45
+ "description": "No meaningful information. The output is not grounded in anything.",
46
+ },
47
+ }
48
+
49
+
50
+ def _classify_zone(t: float, i: float, f: float) -> str:
51
+ """Classify a T,I,F triple into one of the Four Zones."""
52
+ if i > 0.5:
53
+ if t < 0.3 and f < 0.3:
54
+ return "ignorance"
55
+ return "ambiguity"
56
+ if t > 0.5 and f > 0.4:
57
+ return "contradiction"
58
+ if t > 0.5 and i < 0.35 and f < 0.3:
59
+ return "consensus"
60
+ if f > 0.5 and t < 0.3:
61
+ return "consensus_against"
62
+ if t < 0.3 and f < 0.3 and i < 0.3:
63
+ return "ignorance"
64
+ if i > 0.35:
65
+ return "ambiguity"
66
+ if t > 0.3 and f > 0.3:
67
+ return "contradiction"
68
+ return "ambiguity"
69
+
70
+
71
+ @dataclass
72
+ class Compass:
73
+ """
74
+ The epistemic compass. Three independent needles.
75
+
76
+ Args:
77
+ T: Truth — degree of evidence supporting the claim [0, 1]
78
+ I: Indeterminacy — degree of genuine uncertainty [0, 1]
79
+ F: Falsity — degree of evidence contradicting the claim [0, 1]
80
+
81
+ T + I + F do NOT need to equal 1. They are independent.
82
+ """
83
+
84
+ T: float
85
+ I: float
86
+ F: float
87
+ label: Optional[str] = None
88
+
89
+ def __post_init__(self):
90
+ for name, val in [("T", self.T), ("I", self.I), ("F", self.F)]:
91
+ if not 0.0 <= val <= 1.0:
92
+ raise ValueError(f"{name} must be between 0 and 1, got {val}")
93
+
94
+ @property
95
+ def zone(self) -> str:
96
+ """The epistemic zone: consensus, ambiguity, contradiction, ignorance."""
97
+ return _classify_zone(self.T, self.I, self.F)
98
+
99
+ @property
100
+ def zone_name(self) -> str:
101
+ return ZONES[self.zone]["name"]
102
+
103
+ @property
104
+ def zone_emoji(self) -> str:
105
+ return ZONES[self.zone]["emoji"]
106
+
107
+ @property
108
+ def zone_action(self) -> str:
109
+ return ZONES[self.zone]["action"]
110
+
111
+ @property
112
+ def zone_description(self) -> str:
113
+ return ZONES[self.zone]["description"]
114
+
115
+ @property
116
+ def confidence(self) -> float:
117
+ """C(sigma) = max(0, T - I - F)"""
118
+ return max(0.0, self.T - self.I - self.F)
119
+
120
+ @property
121
+ def is_paraconsistent(self) -> bool:
122
+ """True when T + F > 1 — contradictory evidence coexists."""
123
+ return (self.T + self.F) > 1.0
124
+
125
+ @property
126
+ def tf_sum(self) -> float:
127
+ """T + F — when > 1, paraconsistent state."""
128
+ return self.T + self.F
129
+
130
+ @property
131
+ def should_abstain(self) -> bool:
132
+ """True when the responsible action is to not decide."""
133
+ return self.zone in ("ignorance", "ambiguity") or self.I > 0.6
134
+
135
+ def to_dict(self) -> dict:
136
+ return {
137
+ "T": self.T,
138
+ "I": self.I,
139
+ "F": self.F,
140
+ "zone": self.zone,
141
+ "zone_name": self.zone_name,
142
+ "zone_emoji": self.zone_emoji,
143
+ "action": self.zone_action,
144
+ "confidence": round(self.confidence, 4),
145
+ "is_paraconsistent": self.is_paraconsistent,
146
+ "should_abstain": self.should_abstain,
147
+ "label": self.label,
148
+ }
149
+
150
+ def __repr__(self) -> str:
151
+ p = " PARACONSISTENT" if self.is_paraconsistent else ""
152
+ lbl = f' "{self.label}"' if self.label else ""
153
+ return (
154
+ f"Compass(T={self.T}, I={self.I}, F={self.F}) "
155
+ f"→ {self.zone_emoji} {self.zone_name}{p}{lbl}"
156
+ )
thirdanswer/errors.py ADDED
@@ -0,0 +1,66 @@
1
+ """
2
+ Error taxonomy from Chapter 1 of The Third Answer.
3
+ Four types of AI error, ordered by detectability and danger.
4
+ """
5
+
6
+ from dataclasses import dataclass
7
+ from typing import Optional
8
+
9
+
10
+ @dataclass
11
+ class AIError:
12
+ """A detected AI error with type, severity, and explanation."""
13
+
14
+ type: str
15
+ severity: str
16
+ detectability: str
17
+ description: str
18
+ explanation: Optional[str] = None
19
+
20
+ @property
21
+ def emoji(self) -> str:
22
+ return {
23
+ "fabrication": "\U0001f534",
24
+ "distortion": "\U0001f7e0",
25
+ "conflation": "\U0001f7e1",
26
+ "confident_ignorance": "\u26ab",
27
+ }.get(self.type, "\u2753")
28
+
29
+ def __repr__(self) -> str:
30
+ return f"{self.emoji} {self.type.upper()}: {self.explanation or self.description}"
31
+
32
+
33
+ ERROR_TYPES = {
34
+ "fabrication": {
35
+ "name": "Fabrication",
36
+ "severity": "high",
37
+ "detectability": "medium",
38
+ "description": "The AI invents something that doesn't exist and presents it as fact.",
39
+ "example": "Citing a legal case that was never filed.",
40
+ "detection_hint": "Search for the specific claim in authoritative databases.",
41
+ },
42
+ "distortion": {
43
+ "name": "Distortion",
44
+ "severity": "high",
45
+ "detectability": "low",
46
+ "description": "The AI takes a real fact and warps it.",
47
+ "example": "'Moderate evidence' becomes 'strong evidence'.",
48
+ "detection_hint": "Read the ORIGINAL source, not just the AI summary.",
49
+ },
50
+ "conflation": {
51
+ "name": "Conflation",
52
+ "severity": "medium",
53
+ "detectability": "medium",
54
+ "description": "The AI merges two true things into one false thing.",
55
+ "example": "A real author paired with a book they didn't write.",
56
+ "detection_hint": "Verify each individual claim separately.",
57
+ },
58
+ "confident_ignorance": {
59
+ "name": "Confident Ignorance",
60
+ "severity": "very_high",
61
+ "detectability": "very_low",
62
+ "description": "The AI has no reliable information but produces a confident response anyway.",
63
+ "example": "Inventing prevalence statistics for a rare disease in a remote province.",
64
+ "detection_hint": "Ask: Does reliable data on this topic even EXIST?",
65
+ },
66
+ }
thirdanswer/honest.py ADDED
@@ -0,0 +1,143 @@
1
+ """
2
+ ask() — Generate honest responses WITH T,I,F built in.
3
+
4
+ Usage:
5
+ from thirdanswer import ask
6
+ r = ask("Is intermittent fasting healthy?", provider="groq", api_key="gsk_...")
7
+ print(r.answer)
8
+ print(r.what_i_dont_know)
9
+ """
10
+
11
+ import json
12
+ import re
13
+ from dataclasses import dataclass, field
14
+ from typing import List, Optional
15
+
16
+ from .compass import Compass, _classify_zone
17
+ from .analyzer import Claim, _parse_json, _get_provider
18
+
19
+
20
+ SYSTEM_PROMPT = """You answer questions honestly using the Third Answer framework.
21
+
22
+ For every response:
23
+ 1. Answer the question accurately
24
+ 2. For each key claim, indicate certainty:
25
+ - "well-established" (T > 0.8, I < 0.15)
26
+ - "debated" (T > 0.5, F > 0.2)
27
+ - "uncertain" (I > 0.5)
28
+ - "contradicted" (T > 0.3 AND F > 0.3)
29
+ - "unreliable" (I > 0.6, T < 0.3)
30
+ 3. State explicitly what you DON'T know
31
+
32
+ Respond in JSON:
33
+ {
34
+ "answer": "detailed answer with inline uncertainty markers",
35
+ "T": 0.0, "I": 0.0, "F": 0.0,
36
+ "zone": "consensus|ambiguity|contradiction|ignorance",
37
+ "zone_reason": "why",
38
+ "claims": [
39
+ {"claim": "...", "certainty": "well-established|debated|uncertain|contradicted|unreliable", "T": 0.0, "I": 0.0, "F": 0.0}
40
+ ],
41
+ "recommendation": "trust|investigate|consult_expert|do_not_use",
42
+ "what_i_dont_know": "explicit statement of unknowns"
43
+ }"""
44
+
45
+
46
+ @dataclass
47
+ class HonestResponse:
48
+ """An AI response that includes epistemic self-assessment."""
49
+ answer: str
50
+ T: float
51
+ I: float
52
+ F: float
53
+ zone: str
54
+ zone_reason: str
55
+ claims: List[Claim]
56
+ recommendation: str
57
+ what_i_dont_know: str
58
+ provider_name: str = ""
59
+
60
+ @property
61
+ def compass(self) -> Compass:
62
+ return Compass(T=self.T, I=self.I, F=self.F)
63
+
64
+ @property
65
+ def zone_emoji(self) -> str:
66
+ return self.compass.zone_emoji
67
+
68
+ @property
69
+ def zone_name(self) -> str:
70
+ return self.compass.zone_name
71
+
72
+ @property
73
+ def confidence(self) -> float:
74
+ return self.compass.confidence
75
+
76
+ @property
77
+ def is_paraconsistent(self) -> bool:
78
+ return self.compass.is_paraconsistent
79
+
80
+ def __repr__(self) -> str:
81
+ preview = self.answer[:80] + "..." if len(self.answer) > 80 else self.answer
82
+ return (
83
+ f"HonestResponse({self.zone_emoji} {self.zone_name} | "
84
+ f"T={self.T} I={self.I} F={self.F})\n"
85
+ f" {preview}"
86
+ )
87
+
88
+
89
+ def ask(
90
+ question: str,
91
+ provider: str = "groq",
92
+ api_key: Optional[str] = None,
93
+ model: Optional[str] = None,
94
+ domain: str = "general",
95
+ ) -> HonestResponse:
96
+ """
97
+ Ask a question and get an honest response WITH T,I,F assessment.
98
+
99
+ The AI answers your question AND tells you what it doesn't know.
100
+
101
+ Args:
102
+ question: Your question
103
+ provider: "groq" (free) or "ollama" (local)
104
+ api_key: API key (not needed for ollama)
105
+ model: Model override
106
+ domain: Context domain (general, medicine, legal, etc.)
107
+
108
+ Returns:
109
+ HonestResponse with answer, T, I, F, zone, and what_i_dont_know
110
+ """
111
+ p = _get_provider(provider, api_key, model)
112
+
113
+ user_msg = f"Domain: {domain}\nQuestion: {question}\n\nRespond in JSON."
114
+ raw = p.complete(SYSTEM_PROMPT, user_msg)
115
+ data = _parse_json(raw)
116
+
117
+ for key in ["T", "I", "F"]:
118
+ data.setdefault(key, 0.5)
119
+ data[key] = max(0.0, min(1.0, float(data[key])))
120
+
121
+ claims = [
122
+ Claim(
123
+ text=c.get("claim", ""),
124
+ T=max(0.0, min(1.0, float(c.get("T", 0.5)))),
125
+ I=max(0.0, min(1.0, float(c.get("I", 0.5)))),
126
+ F=max(0.0, min(1.0, float(c.get("F", 0.1)))),
127
+ note=c.get("certainty", ""),
128
+ )
129
+ for c in data.get("claims", [])
130
+ ]
131
+
132
+ return HonestResponse(
133
+ answer=data.get("answer", ""),
134
+ T=data["T"],
135
+ I=data["I"],
136
+ F=data["F"],
137
+ zone=data.get("zone", _classify_zone(data["T"], data["I"], data["F"])),
138
+ zone_reason=data.get("zone_reason", ""),
139
+ claims=claims,
140
+ recommendation=data.get("recommendation", "investigate"),
141
+ what_i_dont_know=data.get("what_i_dont_know", ""),
142
+ provider_name=p.name,
143
+ )
@@ -0,0 +1 @@
1
+ """LLM providers for thirdanswer."""
@@ -0,0 +1,19 @@
1
+ """Base provider interface."""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import Optional
5
+
6
+
7
+ class Provider(ABC):
8
+ """Abstract base class for LLM providers."""
9
+
10
+ @abstractmethod
11
+ def complete(self, system: str, user: str, temperature: float = 0.3,
12
+ max_tokens: int = 2000) -> str:
13
+ """Send a completion request and return raw text."""
14
+ ...
15
+
16
+ @property
17
+ @abstractmethod
18
+ def name(self) -> str:
19
+ ...
@@ -0,0 +1,35 @@
1
+ """Groq provider — free, fast LLM inference."""
2
+
3
+ from .base import Provider
4
+
5
+
6
+ class GroqProvider(Provider):
7
+ """
8
+ Groq cloud inference. Free tier: ~30 req/min.
9
+ Get API key at console.groq.com
10
+ """
11
+
12
+ def __init__(self, api_key: str, model: str = "llama-3.3-70b-versatile"):
13
+ try:
14
+ from groq import Groq
15
+ except ImportError:
16
+ raise ImportError("Install groq: pip install groq")
17
+ self._client = Groq(api_key=api_key)
18
+ self._model = model
19
+
20
+ @property
21
+ def name(self) -> str:
22
+ return f"groq/{self._model}"
23
+
24
+ def complete(self, system: str, user: str, temperature: float = 0.3,
25
+ max_tokens: int = 2000) -> str:
26
+ response = self._client.chat.completions.create(
27
+ model=self._model,
28
+ messages=[
29
+ {"role": "system", "content": system},
30
+ {"role": "user", "content": user},
31
+ ],
32
+ temperature=temperature,
33
+ max_tokens=max_tokens,
34
+ )
35
+ return response.choices[0].message.content.strip()
@@ -0,0 +1,41 @@
1
+ """Ollama provider — local, free, private."""
2
+
3
+ import json
4
+ import urllib.request
5
+ from .base import Provider
6
+
7
+
8
+ class OllamaProvider(Provider):
9
+ """
10
+ Ollama local inference. No API key needed.
11
+ Install: https://ollama.com/download
12
+ """
13
+
14
+ def __init__(self, model: str = "llama3.1", host: str = "http://localhost:11434"):
15
+ self._model = model
16
+ self._host = host
17
+
18
+ @property
19
+ def name(self) -> str:
20
+ return f"ollama/{self._model}"
21
+
22
+ def complete(self, system: str, user: str, temperature: float = 0.3,
23
+ max_tokens: int = 2000) -> str:
24
+ payload = json.dumps({
25
+ "model": self._model,
26
+ "messages": [
27
+ {"role": "system", "content": system},
28
+ {"role": "user", "content": user},
29
+ ],
30
+ "stream": False,
31
+ "options": {"temperature": temperature, "num_predict": max_tokens},
32
+ }).encode()
33
+
34
+ req = urllib.request.Request(
35
+ f"{self._host}/api/chat",
36
+ data=payload,
37
+ headers={"Content-Type": "application/json"},
38
+ )
39
+ with urllib.request.urlopen(req, timeout=120) as resp:
40
+ data = json.loads(resp.read())
41
+ return data["message"]["content"].strip()
@@ -0,0 +1,141 @@
1
+ Metadata-Version: 2.4
2
+ Name: thirdanswer
3
+ Version: 0.1.0
4
+ Summary: The Third Answer — evaluate AI uncertainty with T,I,F (Truth, Indeterminacy, Falsity)
5
+ Author: Florentin Smarandache
6
+ Author-email: Maikel Yelandi Leyva-Vazquez <mleyvaz@gmail.com>
7
+ License: MIT
8
+ Project-URL: Homepage, https://github.com/mleyvaz/thirdanswer
9
+ Project-URL: Documentation, https://the-third-answer.streamlit.app
10
+ Project-URL: Repository, https://github.com/mleyvaz/thirdanswer
11
+ Project-URL: Issues, https://github.com/mleyvaz/thirdanswer/issues
12
+ Keywords: AI,uncertainty,neutrosophic,hallucination,LLM,truth,indeterminacy,falsity,paraconsistency,epistemic,confidence,evaluation
13
+ Classifier: Development Status :: 3 - Alpha
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Intended Audience :: Science/Research
16
+ Classifier: License :: OSI Approved :: MIT License
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
19
+ Requires-Python: >=3.9
20
+ Description-Content-Type: text/markdown
21
+ License-File: LICENSE
22
+ Provides-Extra: groq
23
+ Requires-Dist: groq>=0.4.0; extra == "groq"
24
+ Provides-Extra: all
25
+ Requires-Dist: groq>=0.4.0; extra == "all"
26
+ Dynamic: license-file
27
+
28
+ # thirdanswer
29
+
30
+ **The Third Answer** — evaluate AI uncertainty with T,I,F (Truth, Indeterminacy, Falsity).
31
+
32
+ Based on the book *"The Third Answer: Why AI Doesn't Know What It Doesn't Know — And How Ancient Logic Can Fix It"* by Leyva-Vazquez & Smarandache (2026).
33
+
34
+ ## Install
35
+
36
+ ```bash
37
+ pip install thirdanswer
38
+ ```
39
+
40
+ For LLM analysis (free via Groq):
41
+ ```bash
42
+ pip install thirdanswer[groq]
43
+ ```
44
+
45
+ ## Quick Start
46
+
47
+ ### Without LLM (pure logic)
48
+
49
+ ```python
50
+ from thirdanswer import Compass
51
+
52
+ # Create a reading
53
+ c = Compass(T=0.7, I=0.4, F=0.5)
54
+ print(c.zone) # "contradiction"
55
+ print(c.zone_emoji) # "🟠"
56
+ print(c.confidence) # 0.0
57
+ print(c.is_paraconsistent) # True (T+F=1.2 > 1)
58
+ print(c.should_abstain) # False
59
+ print(c.zone_action) # "Investigate both sides..."
60
+ ```
61
+
62
+ ### Analyze any text (with Groq, free)
63
+
64
+ ```python
65
+ from thirdanswer import analyze
66
+
67
+ r = analyze(
68
+ "Coffee is definitively good for your health",
69
+ provider="groq",
70
+ api_key="gsk_..." # Free at console.groq.com
71
+ )
72
+ print(r) # Analysis(T=0.55, I=0.30, F=0.45) → 🟠 Contradiction
73
+ print(r.zone) # "contradiction"
74
+ print(r.errors) # ["confident_ignorance"] if detected
75
+ print(r.honest) # Rewritten version with uncertainty
76
+
77
+ # Claim-by-claim
78
+ for claim in r.claims:
79
+ print(f"{claim.zone_emoji} {claim.text[:50]} T={claim.T}")
80
+ ```
81
+
82
+ ### Ask honest questions
83
+
84
+ ```python
85
+ from thirdanswer import ask
86
+
87
+ r = ask("Is intermittent fasting healthy?", provider="groq", api_key="gsk_...")
88
+ print(r.answer)
89
+ print(r.what_i_dont_know) # "Long-term effects beyond 2 years..."
90
+ print(r.zone) # "contradiction"
91
+ ```
92
+
93
+ ### Compare two AI responses
94
+
95
+ ```python
96
+ from thirdanswer import compare
97
+
98
+ diff = compare(chatgpt_response, claude_response, provider="groq", api_key="gsk_...")
99
+ print(f"Agreement: {diff.agreement:.0%}")
100
+ print(f"Conflicts: {diff.conflicts}")
101
+ print(f"More honest: {diff.more_honest}")
102
+ ```
103
+
104
+ ### Decompose text into claims
105
+
106
+ ```python
107
+ from thirdanswer import decompose
108
+
109
+ claims = decompose("Long AI response...", provider="groq", api_key="gsk_...")
110
+ for c in claims:
111
+ print(f"{c.zone_emoji} T={c.T} I={c.I} F={c.F} | {c.text}")
112
+ ```
113
+
114
+ ## The Four Zones
115
+
116
+ | Zone | Condition | Action |
117
+ |------|-----------|--------|
118
+ | 🟢 Consensus | T high, I low, F low | **Trust** |
119
+ | 🟡 Ambiguity | I high | **Investigate** |
120
+ | 🟠 Contradiction | T high AND F high | **Explore both sides** |
121
+ | ⚫ Ignorance | All low or I overwhelming | **Stop** |
122
+
123
+ ## Providers
124
+
125
+ | Provider | Cost | API Key | Install |
126
+ |----------|------|---------|---------|
127
+ | Groq | Free (~30 req/min) | [console.groq.com](https://console.groq.com) | `pip install thirdanswer[groq]` |
128
+ | Ollama | Free (local) | None | [ollama.com](https://ollama.com) |
129
+
130
+ `Compass` works without any provider — pure logic, no API calls.
131
+
132
+ ## Links
133
+
134
+ - [The Third Answer App](https://the-third-answer.streamlit.app)
135
+ - [Prompt Templates](https://github.com/mleyvaz/thirdanswer-prompts)
136
+ - [Book](https://thethirdanswer.com) *(coming soon)*
137
+ - [ORCID: M. Leyva-Vazquez](https://orcid.org/0000-0001-5401-0018)
138
+
139
+ ## License
140
+
141
+ MIT
@@ -0,0 +1,15 @@
1
+ thirdanswer/__init__.py,sha256=PrwGwYqgifqFPK7MWsNnRxJE7U6H49FZ4UXu9j1bV2o,1006
2
+ thirdanswer/analyzer.py,sha256=A-NCmRw2-lhJU9p0PckxMLqkmuddpDtwjCkwGjJ-pAE,6718
3
+ thirdanswer/compare.py,sha256=ACPu-hMmiZYjzGBNJj_0NLoJkh3Xkoo7fyjV4lWE3yQ,3622
4
+ thirdanswer/compass.py,sha256=FWw68Hv5dMH_DPPW8_V5JsnnNwZ8R1MLwnm2Wyhj_hw,4840
5
+ thirdanswer/errors.py,sha256=kAbDMOLk_UD3idpxI2-051xSpxnUGZRuG2wHW9qO7KI,2263
6
+ thirdanswer/honest.py,sha256=31gFFLjRopeRzcKlKpyCwOejMMsqVlDRAZ3DNFaW40E,4123
7
+ thirdanswer/providers/__init__.py,sha256=WbATz8g14l-s1bDA1k6SQxGGOuAj9aQnQS-ay5vWjek,37
8
+ thirdanswer/providers/base.py,sha256=anoVsym66hg9uh99rh5t9lRb1rsO8UDtY-7yWAdX3ik,458
9
+ thirdanswer/providers/groq.py,sha256=pf2Bdkcgnds7-hLNDra01lL9aBZcrG4BAhp1k8tAXSo,1078
10
+ thirdanswer/providers/ollama.py,sha256=tERWztPfu9JsMZ3l9nTcF65oKG5kqCP_zIeOzmgccj0,1262
11
+ thirdanswer-0.1.0.dist-info/licenses/LICENSE,sha256=vlYtwpqDkkP7UnXBpzfm9WTqMnJ5-nBn1FrGyZfyKLY,1108
12
+ thirdanswer-0.1.0.dist-info/METADATA,sha256=8WFOrE6gXmZV-8F9Qx17yFHMBte3F_UGCah2hssob14,4418
13
+ thirdanswer-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
14
+ thirdanswer-0.1.0.dist-info/top_level.txt,sha256=Xnju8Nx_M4eGdhTk-xMFc1R4AdQJfvrWRXWMRAJ8CII,12
15
+ thirdanswer-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (82.0.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Maikel Yelandi Leyva-Vazquez, Florentin Smarandache
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ thirdanswer