meta-reasoning 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,41 @@
1
+ """
2
+ Meta-Reasoning SDK
3
+
4
+ Reasoning is not a property of the model —
5
+ it is an emergent dynamic of external control.
6
+ """
7
+
8
+ from .controller import CognitiveController
9
+ from .engine import CognitiveEngine, CycleResult, EngineResult
10
+ from .ledger import EpistemicLedger
11
+ from .metrics import compute_metrics
12
+ from .mutations import generate_mutations
13
+ from .substrate import LLMBackend, Substrate
14
+ from .types import (
15
+ CognitiveMetrics,
16
+ CognitiveMove,
17
+ LedgerEntry,
18
+ Mutation,
19
+ MutationType,
20
+ ReasoningTrace,
21
+ StructuredOutput,
22
+ )
23
+
24
+ __all__ = [
25
+ "CognitiveController",
26
+ "CognitiveEngine",
27
+ "CognitiveMetrics",
28
+ "CognitiveMove",
29
+ "CycleResult",
30
+ "EngineResult",
31
+ "EpistemicLedger",
32
+ "LedgerEntry",
33
+ "LLMBackend",
34
+ "Mutation",
35
+ "MutationType",
36
+ "ReasoningTrace",
37
+ "StructuredOutput",
38
+ "Substrate",
39
+ "compute_metrics",
40
+ "generate_mutations",
41
+ ]
@@ -0,0 +1,97 @@
1
+ """
2
+ Cognitive Controller (Level 2) — the heart of the SDK.
3
+
4
+ The controller is semantically blind. It does NOT evaluate whether
5
+ a response is "correct". It observes:
6
+ - form
7
+ - trajectory
8
+ - redundancy
9
+ - stall
10
+ - premature convergence
11
+
12
+ It is a conductor, a coach, a theatre director.
13
+ It does not play the instrument.
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ from .ledger import EpistemicLedger
19
+ from .metrics import compute_metrics
20
+ from .mutations import generate_mutations
21
+ from .types import (
22
+ CognitiveMetrics,
23
+ LedgerEntry,
24
+ Mutation,
25
+ ReasoningTrace,
26
+ StructuredOutput,
27
+ )
28
+
29
+
30
+ class CognitiveController:
31
+ """Semantically-blind governor of reasoning dynamics."""
32
+
33
+ def __init__(
34
+ self,
35
+ ledger: EpistemicLedger | None = None,
36
+ max_violations: int = 2,
37
+ ) -> None:
38
+ self._ledger = ledger or EpistemicLedger()
39
+ self._max_violations = max_violations
40
+ self._cycle = 0
41
+ self._active_mutations: list[Mutation] = []
42
+
43
+ @property
44
+ def ledger(self) -> EpistemicLedger:
45
+ return self._ledger
46
+
47
+ @property
48
+ def cycle(self) -> int:
49
+ return self._cycle
50
+
51
+ @property
52
+ def active_mutations(self) -> list[Mutation]:
53
+ return list(self._active_mutations)
54
+
55
+ def observe(self, output: StructuredOutput) -> CognitiveMetrics:
56
+ """Observe an LLM output and compute cognitive metrics."""
57
+ history = [e.trace for e in self._ledger.entries]
58
+ return compute_metrics(output.reasoning_trace, history, self._active_mutations)
59
+
60
+ def decide(self, output: StructuredOutput, metrics: CognitiveMetrics) -> str:
61
+ """Decide the outcome: 'continued', 'failed', or 'terminated'.
62
+ Record the cycle in the ledger. Return the outcome."""
63
+ self._cycle += 1
64
+ failure_reason = None
65
+
66
+ if metrics.constraint_violations > self._max_violations:
67
+ outcome = "failed"
68
+ failure_reason = f"Too many constraint violations ({metrics.constraint_violations})"
69
+ elif metrics.strategy_repetition >= 1.0 and self._cycle > 2:
70
+ outcome = "failed"
71
+ failure_reason = "Complete strategy repetition — cognitive stall"
72
+ else:
73
+ outcome = "continued"
74
+
75
+ entry = LedgerEntry(
76
+ cycle=self._cycle,
77
+ trace=output.reasoning_trace,
78
+ metrics=metrics,
79
+ mutations_applied=list(self._active_mutations),
80
+ outcome=outcome,
81
+ failure_reason=failure_reason,
82
+ )
83
+ self._ledger.record(entry)
84
+
85
+ # Generate next mutations based on what we observed
86
+ self._active_mutations = generate_mutations(metrics, self._cycle)
87
+
88
+ return outcome
89
+
90
+ def get_mutations(self) -> list[Mutation]:
91
+ """Return the current set of mutations to impose on the next generation."""
92
+ return list(self._active_mutations)
93
+
94
+ def reset(self) -> None:
95
+ """Reset cycle counter and mutations (ledger is preserved)."""
96
+ self._cycle = 0
97
+ self._active_mutations = []
@@ -0,0 +1,106 @@
1
+ """
2
+ Cognitive Engine — the governed reasoning loop.
3
+
4
+ This is NOT an agent. There is no autonomous decision-making.
5
+ There is a governed cognitive loop where linguistic generation is
6
+ continuously deformed by external control that does not participate
7
+ in semantic production.
8
+
9
+ Loop:
10
+ 1. Substrate generates structured output under constraints
11
+ 2. Controller observes the cognitive form (not content)
12
+ 3. Controller decides: continue, fail, or terminate
13
+ 4. Controller generates mutations for the next cycle
14
+ 5. Repeat until termination or max cycles
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ from dataclasses import dataclass, field
20
+
21
+ from .controller import CognitiveController
22
+ from .ledger import EpistemicLedger
23
+ from .substrate import LLMBackend, Substrate
24
+ from .types import CognitiveMetrics, StructuredOutput
25
+
26
+
27
+ @dataclass
28
+ class CycleResult:
29
+ """Result of a single cognitive cycle."""
30
+ cycle: int
31
+ output: StructuredOutput
32
+ metrics: CognitiveMetrics
33
+ outcome: str
34
+
35
+
36
+ @dataclass
37
+ class EngineResult:
38
+ """Result of a complete reasoning session."""
39
+ cycles: list[CycleResult] = field(default_factory=list)
40
+ final_outcome: str = "not_started"
41
+
42
+ @property
43
+ def final_output(self) -> StructuredOutput | None:
44
+ return self.cycles[-1].output if self.cycles else None
45
+
46
+
47
+ class CognitiveEngine:
48
+ """Orchestrates the governed cognitive loop."""
49
+
50
+ def __init__(
51
+ self,
52
+ backend: LLMBackend,
53
+ max_cycles: int = 6,
54
+ max_violations: int = 2,
55
+ ) -> None:
56
+ self._substrate = Substrate(backend)
57
+ self._ledger = EpistemicLedger()
58
+ self._controller = CognitiveController(
59
+ ledger=self._ledger,
60
+ max_violations=max_violations,
61
+ )
62
+ self._max_cycles = max_cycles
63
+
64
+ @property
65
+ def ledger(self) -> EpistemicLedger:
66
+ return self._ledger
67
+
68
+ @property
69
+ def controller(self) -> CognitiveController:
70
+ return self._controller
71
+
72
+ def run(self, task: str) -> EngineResult:
73
+ """Execute the full governed reasoning loop on a task."""
74
+ result = EngineResult()
75
+
76
+ for _ in range(self._max_cycles):
77
+ mutations = self._controller.get_mutations()
78
+ history_summary = self._ledger.summary_for_prompt()
79
+
80
+ output = self._substrate.generate(
81
+ task=task,
82
+ mutations=mutations,
83
+ history_summary=history_summary or None,
84
+ )
85
+
86
+ metrics = self._controller.observe(output)
87
+ outcome = self._controller.decide(output, metrics)
88
+
89
+ cycle_result = CycleResult(
90
+ cycle=self._controller.cycle,
91
+ output=output,
92
+ metrics=metrics,
93
+ outcome=outcome,
94
+ )
95
+ result.cycles.append(cycle_result)
96
+
97
+ if outcome == "failed":
98
+ result.final_outcome = "failed"
99
+ return result
100
+
101
+ if outcome == "terminated":
102
+ result.final_outcome = "terminated"
103
+ return result
104
+
105
+ result.final_outcome = "max_cycles_reached"
106
+ return result
@@ -0,0 +1,67 @@
1
+ """
2
+ Epistemic Ledger (Level 3) — structural memory.
3
+
4
+ Not "memory" in the RAG sense. This is:
5
+ - a trace of cognitive transformations
6
+ - a history of strategies used
7
+ - a map of failures
8
+
9
+ Its purpose: never return to the same mental state.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import json
15
+ from pathlib import Path
16
+
17
+ from .types import CognitiveMove, LedgerEntry
18
+
19
+
20
+ class EpistemicLedger:
21
+ """Append-only cognitive trajectory store."""
22
+
23
+ def __init__(self) -> None:
24
+ self._entries: list[LedgerEntry] = []
25
+
26
+ @property
27
+ def entries(self) -> list[LedgerEntry]:
28
+ return list(self._entries)
29
+
30
+ @property
31
+ def size(self) -> int:
32
+ return len(self._entries)
33
+
34
+ def record(self, entry: LedgerEntry) -> None:
35
+ self._entries.append(entry)
36
+
37
+ def failures(self) -> list[LedgerEntry]:
38
+ return [e for e in self._entries if e.outcome == "failed"]
39
+
40
+ def strategies_tried(self) -> list[tuple[CognitiveMove, ...]]:
41
+ return [tuple(e.trace.moves) for e in self._entries]
42
+
43
+ def summary_for_prompt(self, max_entries: int = 5) -> str:
44
+ """Generate a concise summary for the substrate to avoid repetition."""
45
+ if not self._entries:
46
+ return ""
47
+ recent = self._entries[-max_entries:]
48
+ lines = []
49
+ for e in recent:
50
+ moves_str = ", ".join(m.value for m in e.trace.moves)
51
+ status = e.outcome
52
+ if e.failure_reason:
53
+ status += f" ({e.failure_reason})"
54
+ lines.append(f" Cycle {e.cycle}: [{moves_str}] → {status}")
55
+ return "\n".join(lines)
56
+
57
+ def save(self, path: str | Path) -> None:
58
+ path = Path(path)
59
+ data = [e.model_dump(mode="json") for e in self._entries]
60
+ path.write_text(json.dumps(data, indent=2))
61
+
62
+ def load(self, path: str | Path) -> None:
63
+ path = Path(path)
64
+ if not path.exists():
65
+ return
66
+ data = json.loads(path.read_text())
67
+ self._entries = [LedgerEntry.model_validate(d) for d in data]
@@ -0,0 +1,99 @@
1
+ """
2
+ Cognitive Metrics — the semantically-blind observation layer.
3
+
4
+ These metrics do NOT measure accuracy, correctness, or truth.
5
+ They measure form, trajectory, redundancy, stall, and premature convergence.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import math
11
+ from collections import Counter
12
+
13
+ from .types import CognitiveMetrics, CognitiveMove, Mutation, MutationType, ReasoningTrace
14
+
15
+
16
+ def compute_metrics(
17
+ trace: ReasoningTrace,
18
+ history: list[ReasoningTrace],
19
+ active_mutations: list[Mutation],
20
+ ) -> CognitiveMetrics:
21
+ """Compute all cognitive metrics for a single reasoning trace."""
22
+ return CognitiveMetrics(
23
+ entropy=_move_entropy(trace.moves),
24
+ strategy_repetition=_strategy_repetition(trace, history),
25
+ depth_without_novelty=_depth_without_novelty(trace, history),
26
+ constraint_violations=_count_violations(trace, active_mutations),
27
+ premature_closure=_premature_closure(trace),
28
+ dominant_move=_dominant_move(trace.moves),
29
+ )
30
+
31
+
32
+ def _move_entropy(moves: list[CognitiveMove]) -> float:
33
+ if not moves:
34
+ return 0.0
35
+ counts = Counter(moves)
36
+ total = len(moves)
37
+ return -sum((c / total) * math.log2(c / total) for c in counts.values())
38
+
39
+
40
+ def _strategy_repetition(trace: ReasoningTrace, history: list[ReasoningTrace]) -> float:
41
+ if not history:
42
+ return 0.0
43
+ current = tuple(trace.moves)
44
+ matches = sum(1 for h in history if tuple(h.moves) == current)
45
+ return matches / len(history)
46
+
47
+
48
+ def _depth_without_novelty(trace: ReasoningTrace, history: list[ReasoningTrace]) -> int:
49
+ if not history:
50
+ return 0
51
+ seen_types = set()
52
+ for h in history:
53
+ seen_types.update(h.moves)
54
+ novel = set(trace.moves) - seen_types
55
+ if novel:
56
+ return 0
57
+ # count consecutive history entries also without novelty (backwards)
58
+ streak = 1
59
+ all_seen = set()
60
+ for h in reversed(history):
61
+ all_seen.update(h.moves)
62
+ remaining = set(trace.moves) - all_seen
63
+ if not remaining:
64
+ streak += 1
65
+ else:
66
+ break
67
+ return streak
68
+
69
+
70
+ def _count_violations(trace: ReasoningTrace, mutations: list[Mutation]) -> int:
71
+ violations = 0
72
+ for m in mutations:
73
+ if m.type == MutationType.BAN and m.target in trace.moves:
74
+ violations += 1
75
+ if m.type == MutationType.REQUIRE and m.target not in trace.moves:
76
+ violations += 1
77
+ if m.type == MutationType.LIMIT_DEPTH and isinstance(m.parameter, int):
78
+ if trace.depth > m.parameter:
79
+ violations += 1
80
+ return violations
81
+
82
+
83
+ def _premature_closure(trace: ReasoningTrace) -> float:
84
+ if not trace.moves:
85
+ return 0.0
86
+ unique_ratio = len(set(trace.moves)) / len(CognitiveMove)
87
+ depth_factor = min(trace.depth / 5, 1.0)
88
+ # high closure = low diversity + low depth
89
+ return max(0.0, 1.0 - (unique_ratio * 0.6 + depth_factor * 0.4))
90
+
91
+
92
+ def _dominant_move(moves: list[CognitiveMove]) -> CognitiveMove | None:
93
+ if not moves:
94
+ return None
95
+ counts = Counter(moves)
96
+ most_common, freq = counts.most_common(1)[0]
97
+ if freq / len(moves) > 0.5:
98
+ return most_common
99
+ return None
@@ -0,0 +1,98 @@
1
+ """
2
+ Mutation Engine — generates cognitive mutations from observed metrics.
3
+
4
+ The controller does not say "reason better".
5
+ It says: "deduction is banned", "analogy is required", "compress to 2 concepts".
6
+ Every mutation is a formal operator, not a suggestion.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from .types import CognitiveMetrics, CognitiveMove, Mutation, MutationType
12
+
13
+
14
+ def generate_mutations(metrics: CognitiveMetrics, cycle: int) -> list[Mutation]:
15
+ """Produce a list of mutations based on current cognitive metrics.
16
+ Pressure increases with cycle count — improvisation through constraint."""
17
+ mutations: list[Mutation] = []
18
+
19
+ # Ban dominant strategy
20
+ if metrics.dominant_move:
21
+ mutations.append(Mutation(
22
+ type=MutationType.BAN,
23
+ target=metrics.dominant_move,
24
+ reason=f"Move '{metrics.dominant_move.value}' dominates ({cycle=})",
25
+ ))
26
+
27
+ # Low entropy → require a move not yet seen
28
+ if metrics.entropy < 1.5:
29
+ forced = _pick_absent_move(metrics)
30
+ if forced:
31
+ mutations.append(Mutation(
32
+ type=MutationType.REQUIRE,
33
+ target=forced,
34
+ reason=f"Low entropy ({metrics.entropy:.2f}), forcing diversity",
35
+ ))
36
+
37
+ # Strategy repetition → force inversion + ban most frequent move
38
+ if metrics.strategy_repetition > 0.0:
39
+ mutations.append(Mutation(
40
+ type=MutationType.INVERT_CAUSALITY,
41
+ reason=f"Strategy repetition ({metrics.strategy_repetition:.2f}), forcing inversion",
42
+ ))
43
+ mutations.append(Mutation(
44
+ type=MutationType.FORCE_COMPRESSION,
45
+ parameter=2,
46
+ reason=f"Strategy repetition — compress to break pattern",
47
+ ))
48
+
49
+ # Premature closure → require contradiction search
50
+ if metrics.premature_closure > 0.6:
51
+ mutations.append(Mutation(
52
+ type=MutationType.REQUIRE_CONTRADICTION,
53
+ reason=f"Premature closure score {metrics.premature_closure:.2f}",
54
+ ))
55
+
56
+ # Depth without novelty → compress
57
+ if metrics.depth_without_novelty >= 2:
58
+ mutations.append(Mutation(
59
+ type=MutationType.FORCE_COMPRESSION,
60
+ parameter=2,
61
+ reason=f"Depth without novelty = {metrics.depth_without_novelty}",
62
+ ))
63
+
64
+ # Progressive pressure: tighten depth limit over cycles
65
+ if cycle >= 2:
66
+ max_depth = max(2, 6 - cycle)
67
+ mutations.append(Mutation(
68
+ type=MutationType.LIMIT_DEPTH,
69
+ parameter=max_depth,
70
+ reason=f"Progressive pressure at {cycle=}",
71
+ ))
72
+
73
+ # Every cycle after the first: always perturb to prevent stasis
74
+ if cycle >= 1 and not mutations:
75
+ mutations.append(Mutation(
76
+ type=MutationType.REQUIRE,
77
+ target=_pick_absent_move(metrics) or CognitiveMove.NARRATIVE_SIMULATION,
78
+ reason="Baseline perturbation — no cycle without mutation",
79
+ ))
80
+
81
+ return mutations
82
+
83
+
84
+ def _pick_absent_move(metrics: CognitiveMetrics) -> CognitiveMove | None:
85
+ """Pick a cognitive move that hasn't been dominant."""
86
+ candidates = [m for m in CognitiveMove if m != metrics.dominant_move]
87
+ # Prefer structurally disruptive moves
88
+ priority = [
89
+ CognitiveMove.CONTRADICTION,
90
+ CognitiveMove.ANALOGY,
91
+ CognitiveMove.ABDUCTION,
92
+ CognitiveMove.COMPRESSION,
93
+ CognitiveMove.INDUCTION,
94
+ ]
95
+ for p in priority:
96
+ if p in candidates:
97
+ return p
98
+ return candidates[0] if candidates else None
@@ -0,0 +1,119 @@
1
+ """
2
+ Generative Substrate (Level 1) — the LLM interface.
3
+
4
+ The model is stateless by design. It does not decide objectives, strategies,
5
+ or validity. It is a linguistic machine, not a mind.
6
+ This module handles prompt construction and structured output parsing.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import json
12
+ from typing import Any, Protocol
13
+
14
+ from .types import (
15
+ CognitiveMove,
16
+ Mutation,
17
+ ReasoningTrace,
18
+ StructuredOutput,
19
+ )
20
+
21
+ # ---------------------------------------------------------------------------
22
+ # System prompt that enforces the structured output protocol
23
+ # ---------------------------------------------------------------------------
24
+
25
+ SYSTEM_PROMPT = """You are a generative substrate. You produce structured reasoning output.
26
+
27
+ EVERY response MUST be valid JSON with this exact schema:
28
+ {
29
+ "content": "<your answer>",
30
+ "reasoning_trace": {
31
+ "moves": ["<move1>", "<move2>", ...],
32
+ "depth": <integer>,
33
+ "confidence_markers": <integer>,
34
+ "abstraction_level": "<low|medium|high>"
35
+ }
36
+ }
37
+
38
+ Valid moves: assumption, deduction, induction, abduction, analogy, contradiction, enumeration, compression, narrative_simulation.
39
+
40
+ You MUST classify every reasoning step you take using these moves.
41
+ Do NOT add any text outside the JSON object."""
42
+
43
+
44
+ class LLMBackend(Protocol):
45
+ """Protocol for any LLM backend (OpenAI, Anthropic, local, mock)."""
46
+ def generate(self, messages: list[dict[str, str]]) -> dict[str, Any]: ...
47
+
48
+
49
+ class Substrate:
50
+ """Wraps an LLM backend, enforcing structured output and cognitive constraints."""
51
+
52
+ def __init__(self, backend: LLMBackend) -> None:
53
+ self._backend = backend
54
+
55
+ def generate(
56
+ self,
57
+ task: str,
58
+ mutations: list[Mutation] | None = None,
59
+ history_summary: str | None = None,
60
+ ) -> StructuredOutput:
61
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}]
62
+
63
+ if history_summary:
64
+ messages.append({
65
+ "role": "system",
66
+ "content": f"Cognitive history (avoid repeating these patterns):\n{history_summary}",
67
+ })
68
+
69
+ constraint_block = self._build_constraints(mutations or [])
70
+ user_content = f"{constraint_block}\n\nTask: {task}" if constraint_block else f"Task: {task}"
71
+ messages.append({"role": "user", "content": user_content})
72
+
73
+ raw = self._backend.generate(messages)
74
+ return self._parse(raw)
75
+
76
+ @staticmethod
77
+ def _build_constraints(mutations: list[Mutation]) -> str:
78
+ if not mutations:
79
+ return ""
80
+ lines = ["COGNITIVE CONSTRAINTS (you MUST obey these):"]
81
+ for i, m in enumerate(mutations, 1):
82
+ lines.append(f" {i}. {m.to_prompt_constraint()}")
83
+ return "\n".join(lines)
84
+
85
+ @staticmethod
86
+ def _parse(raw: dict[str, Any]) -> StructuredOutput:
87
+ text = raw.get("content", "") or raw.get("text", "") or ""
88
+ # Try to extract JSON from the response
89
+ try:
90
+ data = json.loads(text)
91
+ except json.JSONDecodeError:
92
+ # Fallback: wrap raw text as unstructured output
93
+ return StructuredOutput(
94
+ content=text,
95
+ reasoning_trace=ReasoningTrace(
96
+ moves=[CognitiveMove.ASSUMPTION],
97
+ depth=1,
98
+ confidence_markers=0,
99
+ abstraction_level="low",
100
+ ),
101
+ raw=raw,
102
+ )
103
+
104
+ moves_raw = data.get("reasoning_trace", {}).get("moves", [])
105
+ valid_moves = []
106
+ for m in moves_raw:
107
+ try:
108
+ valid_moves.append(CognitiveMove(m))
109
+ except ValueError:
110
+ continue
111
+
112
+ trace = ReasoningTrace(
113
+ moves=valid_moves or [CognitiveMove.ASSUMPTION],
114
+ depth=data.get("reasoning_trace", {}).get("depth", 1),
115
+ confidence_markers=data.get("reasoning_trace", {}).get("confidence_markers", 0),
116
+ abstraction_level=data.get("reasoning_trace", {}).get("abstraction_level", "low"),
117
+ )
118
+
119
+ return StructuredOutput(content=data.get("content", text), reasoning_trace=trace, raw=raw)