hegelion 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. hegelion/__init__.py +45 -0
  2. hegelion/core/__init__.py +29 -0
  3. hegelion/core/agent.py +166 -0
  4. hegelion/core/autocoding_state.py +293 -0
  5. hegelion/core/backends.py +442 -0
  6. hegelion/core/cache.py +92 -0
  7. hegelion/core/config.py +276 -0
  8. hegelion/core/core.py +649 -0
  9. hegelion/core/engine.py +865 -0
  10. hegelion/core/logging_utils.py +67 -0
  11. hegelion/core/models.py +293 -0
  12. hegelion/core/parsing.py +271 -0
  13. hegelion/core/personas.py +81 -0
  14. hegelion/core/prompt_autocoding.py +353 -0
  15. hegelion/core/prompt_dialectic.py +414 -0
  16. hegelion/core/prompts.py +127 -0
  17. hegelion/core/schema.py +67 -0
  18. hegelion/core/validation.py +68 -0
  19. hegelion/council.py +254 -0
  20. hegelion/examples_data/__init__.py +6 -0
  21. hegelion/examples_data/glm4_6_examples.jsonl +2 -0
  22. hegelion/judge.py +230 -0
  23. hegelion/mcp/__init__.py +3 -0
  24. hegelion/mcp/server.py +918 -0
  25. hegelion/scripts/hegelion_agent_cli.py +90 -0
  26. hegelion/scripts/hegelion_bench.py +117 -0
  27. hegelion/scripts/hegelion_cli.py +497 -0
  28. hegelion/scripts/hegelion_dataset.py +99 -0
  29. hegelion/scripts/hegelion_eval.py +137 -0
  30. hegelion/scripts/mcp_setup.py +150 -0
  31. hegelion/search_providers.py +151 -0
  32. hegelion/training/__init__.py +7 -0
  33. hegelion/training/datasets.py +123 -0
  34. hegelion/training/generator.py +232 -0
  35. hegelion/training/mlx_scu_trainer.py +379 -0
  36. hegelion/training/mlx_trainer.py +181 -0
  37. hegelion/training/unsloth_trainer.py +136 -0
  38. hegelion-0.4.0.dist-info/METADATA +295 -0
  39. hegelion-0.4.0.dist-info/RECORD +43 -0
  40. hegelion-0.4.0.dist-info/WHEEL +5 -0
  41. hegelion-0.4.0.dist-info/entry_points.txt +8 -0
  42. hegelion-0.4.0.dist-info/licenses/LICENSE +21 -0
  43. hegelion-0.4.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,181 @@
1
+ """
2
+ Hegelion MLX Trainer for Apple Silicon
3
+
4
+ Fine-tunes an OLMo/Llama/Qwen model on Hegelion reasoning traces using LoRA.
5
+ Optimized for M-series chips (M1/M2/M3/M4).
6
+
7
+ Requirements:
8
+ pip install mlx-lm
9
+ """
10
+
11
+ import argparse
12
+ import importlib.util
13
+ import sys
14
+ from pathlib import Path
15
+
16
+ if importlib.util.find_spec("mlx_lm") is None:
17
+ print("Error: mlx-lm not installed. Run: pip install mlx-lm", file=sys.stderr)
18
+ sys.exit(1)
19
+
20
+
21
+ def train_hegelion_adapter(
22
+ model_path: str,
23
+ data_path: str,
24
+ adapter_path: str = "artifacts/adapters",
25
+ iters: int = 600,
26
+ batch_size: int = 1,
27
+ lora_layers: int = 8,
28
+ learning_rate: float = 1e-5,
29
+ **kwargs,
30
+ ):
31
+ print("--- Hegelion MLX Trainer ---")
32
+ print(f"Model: {model_path}")
33
+ print(f"Data: {data_path}")
34
+
35
+ # We shell out to the mlx_lm CLI for robustness,
36
+ # as it handles the complex training loop, validation, and saving perfectly.
37
+ # This is the standard recommended way to use MLX for LoRA.
38
+
39
+ cmd = [
40
+ sys.executable,
41
+ "-m",
42
+ "mlx_lm.lora",
43
+ "--model",
44
+ model_path,
45
+ "--train",
46
+ "--data",
47
+ data_path, # Expects directory with train.jsonl / valid.jsonl
48
+ "--iters",
49
+ str(iters),
50
+ "--batch-size",
51
+ str(batch_size),
52
+ "--num-layers",
53
+ str(lora_layers),
54
+ "--learning-rate",
55
+ str(learning_rate),
56
+ "--adapter-path",
57
+ adapter_path,
58
+ "--save-every",
59
+ "100",
60
+ "--steps-per-eval",
61
+ "50",
62
+ ]
63
+
64
+ if kwargs.get("max_seq_length"):
65
+ cmd.extend(["--max-seq-length", str(kwargs["max_seq_length"])])
66
+
67
+ if kwargs.get("grad_checkpoint"):
68
+ cmd.append("--grad-checkpoint")
69
+
70
+ print(f"Running: {' '.join(cmd)}")
71
+
72
+ import subprocess
73
+
74
+ try:
75
+ subprocess.run(cmd, check=True)
76
+ print(f"\nSuccess! Adapters saved to {adapter_path}")
77
+ print("To fuse and upload:")
78
+ print(
79
+ f" python -m mlx_lm.fuse --model {model_path} --adapter-path {adapter_path} --upload-name my-hegelion-model"
80
+ )
81
+ except subprocess.CalledProcessError as e:
82
+ print(f"Training failed with error: {e}")
83
+ sys.exit(1)
84
+
85
+
86
+ def prepare_data_for_mlx(jsonl_path: str, output_dir: str = "artifacts/data/mlx"):
87
+ """
88
+ MLX expects a directory with 'train.jsonl' and 'valid.jsonl'.
89
+ This helper converts our single generator output to that format.
90
+ """
91
+ import json
92
+ import random
93
+
94
+ path = Path(jsonl_path)
95
+ if not path.exists():
96
+ print(f"Data file not found: {jsonl_path}")
97
+ return
98
+
99
+ out_path = Path(output_dir)
100
+ out_path.mkdir(exist_ok=True)
101
+
102
+ print(f"Preparing data from {jsonl_path}...")
103
+
104
+ data = []
105
+ with open(path, "r") as f:
106
+ for line in f:
107
+ if not line.strip():
108
+ continue
109
+ try:
110
+ obj = json.loads(line)
111
+
112
+ # Standardize reasoning tags for DeepSeek R1 models
113
+ # R1 expects <think>...</think> but our data might have <thought>
114
+ output_text = obj.get("output", "")
115
+ output_text = output_text.replace("<thought>", "<think>").replace(
116
+ "</thought>", "</think>"
117
+ )
118
+
119
+ # MLX format: {"text": "..."}
120
+ # We need to construct the full prompt text including system/user/assistant tokens
121
+ # Simple ChatML-like format for now
122
+
123
+ text = (
124
+ "<|im_start|>system\n"
125
+ f"{obj.get('system', '')}<|im_end|>\n"
126
+ "<|im_start|>user\n"
127
+ f"{obj.get('instruction', '')}<|im_end|>\n"
128
+ "<|im_start|>assistant\n"
129
+ f"{output_text}<|im_end|>\n"
130
+ )
131
+ data.append({"text": text})
132
+ except Exception:
133
+ pass
134
+
135
+ random.shuffle(data)
136
+ split_idx = int(len(data) * 0.9)
137
+ train_data = data[:split_idx]
138
+ valid_data = data[split_idx:]
139
+
140
+ with open(out_path / "train.jsonl", "w") as f:
141
+ for entry in train_data:
142
+ json.dump(entry, f, ensure_ascii=False)
143
+ f.write("\n")
144
+
145
+ with open(out_path / "valid.jsonl", "w") as f:
146
+ for entry in valid_data:
147
+ json.dump(entry, f, ensure_ascii=False)
148
+ f.write("\n")
149
+
150
+ print(
151
+ f"Saved {len(train_data)} training and {len(valid_data)} validation examples to {output_dir}/"
152
+ )
153
+ return str(out_path)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ parser = argparse.ArgumentParser()
158
+ parser.add_argument("--model", default="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B")
159
+ parser.add_argument("--data", required=True, help="Path to generated hegelion_kimi_data.jsonl")
160
+ parser.add_argument("--iters", type=int, default=600)
161
+ parser.add_argument("--batch-size", type=int, default=1, help="Batch size per step")
162
+ parser.add_argument("--lora-layers", type=int, default=8, help="Number of LoRA layers")
163
+ parser.add_argument("--max-seq-length", type=int, default=2048, help="Maximum sequence length")
164
+ parser.add_argument(
165
+ "--grad-checkpoint",
166
+ action="store_true",
167
+ help="Enable gradient checkpointing to save memory",
168
+ )
169
+ args = parser.parse_args()
170
+
171
+ data_dir = prepare_data_for_mlx(args.data)
172
+ if data_dir:
173
+ train_hegelion_adapter(
174
+ args.model,
175
+ data_dir,
176
+ iters=args.iters,
177
+ batch_size=args.batch_size,
178
+ lora_layers=args.lora_layers,
179
+ max_seq_length=args.max_seq_length,
180
+ grad_checkpoint=args.grad_checkpoint,
181
+ )
@@ -0,0 +1,136 @@
1
+ """
2
+ Hegelion Unsloth Trainer
3
+
4
+ This script fine-tunes a Llama-3 model to internalize the dialectical process.
5
+ It uses Unsloth for memory-efficient 4-bit training.
6
+
7
+ Requirements:
8
+ pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
9
+ pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes
10
+
11
+ Usage:
12
+ python -m hegelion.training.unsloth_trainer --dataset hegelion_data.jsonl
13
+ """
14
+
15
+ import torch
16
+ import sys
17
+
18
+
19
+ def train(
20
+ dataset_path: str,
21
+ model_name: str = "unsloth/llama-3-8b-Instruct-bnb-4bit",
22
+ output_dir: str = "hegelion_lora_model",
23
+ max_seq_length: int = 2048,
24
+ ):
25
+ # Check for CUDA
26
+ if not torch.cuda.is_available():
27
+ print("WARNING: CUDA not detected. Unsloth requires an NVIDIA GPU.", file=sys.stderr)
28
+ print(
29
+ "This script is intended to run on Linux with GPUs (e.g. Lambda Labs, RunPod).",
30
+ file=sys.stderr,
31
+ )
32
+ return
33
+
34
+ from unsloth import FastLanguageModel
35
+ from trl import SFTTrainer
36
+ from transformers import TrainingArguments
37
+ from datasets import load_dataset
38
+
39
+ print(f"Loading model: {model_name}")
40
+ model, tokenizer = FastLanguageModel.from_pretrained(
41
+ model_name=model_name,
42
+ max_seq_length=max_seq_length,
43
+ dtype=None,
44
+ load_in_4bit=True,
45
+ )
46
+
47
+ model = FastLanguageModel.get_peft_model(
48
+ model,
49
+ r=16,
50
+ target_modules=[
51
+ "q_proj",
52
+ "k_proj",
53
+ "v_proj",
54
+ "o_proj",
55
+ "gate_proj",
56
+ "up_proj",
57
+ "down_proj",
58
+ ],
59
+ lora_alpha=16,
60
+ lora_dropout=0,
61
+ bias="none",
62
+ use_gradient_checkpointing="unsloth",
63
+ random_state=3407,
64
+ use_rslora=False,
65
+ loftq_config=None,
66
+ )
67
+
68
+ # Load Dataset
69
+ # Format: {"instruction": "...", "output": "..."}
70
+ dataset = load_dataset("json", data_files=dataset_path, split="train")
71
+
72
+ # Formatting function
73
+ prompt_style = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
74
+
75
+ ### Instruction:
76
+ {}
77
+
78
+ ### Response:
79
+ {}"""
80
+
81
+ def formatting_prompts_func(examples):
82
+ instructions = examples["instruction"]
83
+ outputs = examples["output"]
84
+ texts = []
85
+ for instruction, output in zip(instructions, outputs):
86
+ text = prompt_style.format(instruction, output) + tokenizer.eos_token
87
+ texts.append(text)
88
+ return {"text": texts}
89
+
90
+ dataset = dataset.map(formatting_prompts_func, batched=True)
91
+
92
+ trainer = SFTTrainer(
93
+ model=model,
94
+ tokenizer=tokenizer,
95
+ train_dataset=dataset,
96
+ dataset_text_field="text",
97
+ max_seq_length=max_seq_length,
98
+ dataset_num_proc=2,
99
+ packing=False,
100
+ args=TrainingArguments(
101
+ per_device_train_batch_size=2,
102
+ gradient_accumulation_steps=4,
103
+ warmup_steps=5,
104
+ max_steps=60, # Increase for real training
105
+ learning_rate=2e-4,
106
+ fp16=not torch.cuda.is_bf16_supported(),
107
+ bf16=torch.cuda.is_bf16_supported(),
108
+ logging_steps=1,
109
+ optim="adamw_8bit",
110
+ weight_decay=0.01,
111
+ lr_scheduler_type="linear",
112
+ seed=3407,
113
+ output_dir=output_dir,
114
+ ),
115
+ )
116
+
117
+ print("Starting training...")
118
+ trainer.train()
119
+
120
+ print(f"Saving model to {output_dir}...")
121
+ model.save_pretrained(output_dir)
122
+ tokenizer.save_pretrained(output_dir)
123
+
124
+ # Save GGUF for local use
125
+ # model.save_pretrained_gguf(output_dir, tokenizer, quantization_method = "q4_k_m")
126
+
127
+
128
+ if __name__ == "__main__":
129
+ import argparse
130
+
131
+ parser = argparse.ArgumentParser()
132
+ parser.add_argument("--dataset", required=True, help="Path to JSONL dataset")
133
+ parser.add_argument("--output", default="hegelion_adapter")
134
+ args = parser.parse_args()
135
+
136
+ train(args.dataset, output_dir=args.output)
@@ -0,0 +1,295 @@
1
+ Metadata-Version: 2.4
2
+ Name: hegelion
3
+ Version: 0.4.0
4
+ Summary: Dialectical reasoning harness for LLMs (thesis → antithesis → synthesis)
5
+ Author-email: Hunter Bown <hunter@shannonlabs.dev>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/Hmbown/Hegelion
8
+ Project-URL: Repository, https://github.com/Hmbown/Hegelion
9
+ Project-URL: Documentation, https://github.com/Hmbown/Hegelion/blob/main/docs/HEGELION_SPEC.md
10
+ Project-URL: Bug Tracker, https://github.com/Hmbown/Hegelion/issues
11
+ Keywords: ai,llm,reasoning,dialectical,hegelian,mcp,anthropic,openai,claude,gpt,ollama,gemini
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
+ Requires-Python: >=3.10
22
+ Description-Content-Type: text/markdown
23
+ License-File: LICENSE
24
+ Requires-Dist: mcp>=1.21.1
25
+ Requires-Dist: openai>=1.0.0
26
+ Requires-Dist: anthropic>=0.40.0
27
+ Requires-Dist: google-generativeai>=0.8.3
28
+ Requires-Dist: httpx>=0.25.0
29
+ Requires-Dist: jsonschema>=4.22.0
30
+ Requires-Dist: sentence-transformers>=2.2.0
31
+ Requires-Dist: numpy>=1.24.0
32
+ Requires-Dist: scikit-learn>=1.2.0
33
+ Requires-Dist: datasets>=4.4.1
34
+ Requires-Dist: psutil>=5.9.0
35
+ Requires-Dist: rich>=13.0.0
36
+ Requires-Dist: duckduckgo-search>=6.0.0
37
+ Requires-Dist: instructor>=1.0.0
38
+ Requires-Dist: pydantic>=2.0.0
39
+ Provides-Extra: search
40
+ Requires-Dist: tavily-python>=0.3.0; extra == "search"
41
+ Provides-Extra: dev
42
+ Requires-Dist: pytest>=7.0; extra == "dev"
43
+ Requires-Dist: pytest-asyncio>=0.21; extra == "dev"
44
+ Requires-Dist: pytest-cov>=4.0; extra == "dev"
45
+ Requires-Dist: black>=23.0; extra == "dev"
46
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
47
+ Requires-Dist: pre-commit>=3.0; extra == "dev"
48
+ Requires-Dist: build>=1.0.0; extra == "dev"
49
+ Requires-Dist: twine>=5.0.0; extra == "dev"
50
+ Provides-Extra: monitoring
51
+ Requires-Dist: psutil>=5.9.0; extra == "monitoring"
52
+ Dynamic: license-file
53
+
54
+ # Hegelion
55
+
56
+ > "The True is the whole." — G.W.F. Hegel
57
+
58
+ Hegelion applies dialectical reasoning to LLMs: forcing models to argue with themselves before reaching conclusions. This produces better reasoning for questions and better code for implementations.
59
+
60
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) ![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg) [![PyPI version](https://badge.fury.io/py/hegelion.svg)](https://badge.fury.io/py/hegelion)
61
+
62
+ ---
63
+
64
+ ## Two Modes
65
+
66
+ | Mode | Pattern | Use Case |
67
+ |------|---------|----------|
68
+ | **Dialectical Reasoning** | Thesis → Antithesis → Synthesis | Deep analysis of questions, philosophy, strategy |
69
+ | **Autocoding** | Player → Coach → Iterate | Verified code implementations with independent review |
70
+
71
+ Both modes use the same principle: **force the model to oppose itself** before concluding. This catches blind spots that single-pass approaches miss.
72
+
73
+ ---
74
+
75
+ ## Autocoding: Player-Coach Loop
76
+
77
+ **New in v0.4.0** — Based on [Block AI's g3 agent research](https://block.xyz/documents/adversarial-cooperation-in-code-synthesis.pdf).
78
+
79
+ ### The Problem
80
+
81
+ Single-agent coding tools often:
82
+ - Declare success prematurely ("I have successfully implemented all requirements!")
83
+ - Accumulate context pollution over long sessions
84
+ - Miss edge cases because they verify their own work
85
+
86
+ ### The Solution
87
+
88
+ Two roles iterate until requirements are verified:
89
+
90
+ ```
91
+ REQUIREMENTS (Source of Truth)
92
+
93
+
94
+ ┌───────────────┐ ┌───────────────┐ ┌───────────────┐
95
+ │ PLAYER │────▶│ COACH │────▶│ ADVANCE │
96
+ │ Implements │ │ Verifies │ │ State │
97
+ │ code & tests │ │ independently │ │ │
98
+ └───────────────┘ └───────────────┘ └───────┬───────┘
99
+ ▲ │
100
+ │ ┌───────────┐ │
101
+ └──────────────│ APPROVED? │◀───────────────┘
102
+ └───────────┘
103
+ │ │
104
+ No Yes
105
+ │ │
106
+ ▼ ▼
107
+ Continue Done
108
+ ```
109
+
110
+ **Player**: Implements requirements, writes tests, responds to feedback. Does NOT declare success.
111
+
112
+ **Coach**: Independently verifies each requirement, ignores player's self-assessment, outputs structured checklist.
113
+
114
+ ### Key Insight
115
+
116
+ > "Discard the player's self-report of success. Have the coach perform independent evaluation."
117
+
118
+ The coach catches issues by re-reading requirements and actually running tests—not by trusting what the player says it did.
119
+
120
+ ### Quick Start
121
+
122
+ In Claude Code, Cursor, or any MCP-enabled editor:
123
+
124
+ ```
125
+ You: Use autocoding_init with these requirements:
126
+ - Add user authentication to src/api.py
127
+ - Add tests in tests/test_auth.py
128
+ - All tests must pass
129
+
130
+ [Session initializes]
131
+
132
+ You: Generate player_prompt and implement
133
+
134
+ [Player writes code and tests]
135
+
136
+ You: Generate coach_prompt and verify
137
+
138
+ [Coach: ✓ auth endpoint exists, ✗ missing password validation test]
139
+
140
+ You: Call autocoding_advance and continue
141
+
142
+ [Loop until COACH APPROVED]
143
+ ```
144
+
145
+ ### MCP Tools
146
+
147
+ | Tool | Purpose |
148
+ |------|---------|
149
+ | `autocoding_init` | Start session with requirements checklist |
150
+ | `player_prompt` | Generate implementation prompt |
151
+ | `coach_prompt` | Generate verification prompt |
152
+ | `autocoding_advance` | Update state after coach review |
153
+ | `autocoding_single_shot` | Combined prompt for simpler tasks |
154
+ | `autocoding_save` / `autocoding_load` | Persist and resume sessions |
155
+
156
+ ### Why It Works
157
+
158
+ | Problem | Single Agent | Coach-Player |
159
+ |---------|--------------|--------------|
160
+ | **Anchoring** | Drifts from requirements | Requirements anchor every turn |
161
+ | **Verification** | Self-assessment (unreliable) | Independent verification |
162
+ | **Context** | Accumulates pollution | Fresh context each turn |
163
+ | **Completion** | Open-ended | Explicit approval gates |
164
+
165
+ ---
166
+
167
+ ## Dialectical Reasoning: Thesis → Antithesis → Synthesis
168
+
169
+ For questions requiring deep analysis, Hegelion forces three separate LLM calls:
170
+
171
+ ```
172
+ [Call 1] Thesis → LLM commits to a position
173
+ [Call 2] Antithesis → LLM attacks that position (separate call, no hedging)
174
+ [Call 3] Synthesis → LLM reconciles the opposition
175
+ ```
176
+
177
+ ### Why Separate Calls Matter
178
+
179
+ | Method | Calls | Result |
180
+ |--------|:-----:|--------|
181
+ | **Raw** | 1 | "It depends on definitions..." |
182
+ | **Enhanced** | 1 | "Hold both views in tension..." |
183
+ | **Hegelion** | 3 | Novel framework with testable predictions |
184
+
185
+ When the model must commit to a thesis, then genuinely attack it in a separate call, the synthesis surfaces insights that single-call approaches shortcut.
186
+
187
+ <details>
188
+ <summary><b>Example: "Is free will compatible with determinism?"</b></summary>
189
+
190
+ **Hegelion synthesis** (after thesis and antithesis):
191
+
192
+ > The deadlock dissolves when we recognize free will exists on a **spectrum of self-authorship**:
193
+ >
194
+ > 1. **Minimal freedom**: Acting on desires without external coercion
195
+ > 2. **Reflective freedom**: Second-order endorsement—I want to want this
196
+ > 3. **Narrative freedom**: Acting consistently with a coherent life narrative
197
+ > 4. **Constitutive freedom**: Recursive self-modification through deliberate habituation
198
+ >
199
+ > **Research proposal**: Use fMRI to scan participants under (1) snap judgments, (2) brief reflection, (3) extended deliberation. Hypothesis: Condition (3) shows strongest correlation with self-reported decision "ownership."
200
+
201
+ This 4-level framework emerged from actually arguing with itself—not from asking for "thesis/antithesis/synthesis" in one prompt.
202
+ </details>
203
+
204
+ ### Quick Start
205
+
206
+ ```bash
207
+ pip install hegelion
208
+
209
+ # MCP setup for Claude Desktop (macOS)
210
+ hegelion-setup-mcp --write "$HOME/Library/Application Support/Claude/claude_desktop_config.json"
211
+ ```
212
+
213
+ Or use the Python API:
214
+
215
+ ```python
216
+ import asyncio
217
+ from hegelion import run_dialectic
218
+
219
+ async def main():
220
+ result = await run_dialectic("Is AI conscious?")
221
+ print(result.synthesis)
222
+
223
+ asyncio.run(main())
224
+ ```
225
+
226
+ Or CLI:
227
+
228
+ ```bash
229
+ hegelion --stream "Is consciousness fundamental or emergent?"
230
+ ```
231
+
232
+ ### Feature Toggles
233
+
234
+ | Option | Description |
235
+ |--------|-------------|
236
+ | `use_council` | Three critics: Logician, Empiricist, Ethicist |
237
+ | `use_judge` | Final quality evaluation |
238
+ | `use_search` | Grounds arguments with web search |
239
+ | `response_style` | `sections`, `json`, or `synthesis_only` |
240
+
241
+ ---
242
+
243
+ ## Installation
244
+
245
+ ```bash
246
+ pip install hegelion
247
+ ```
248
+
249
+ For MCP integration (Claude Desktop, Cursor, VS Code):
250
+
251
+ ```bash
252
+ # Claude Desktop (macOS)
253
+ hegelion-setup-mcp --write "$HOME/Library/Application Support/Claude/claude_desktop_config.json"
254
+
255
+ # Then restart Claude Desktop
256
+ ```
257
+
258
+ See [MCP Integration Guide](docs/guides/mcp-integration.md) for other editors.
259
+
260
+ ---
261
+
262
+ ## Documentation
263
+
264
+ - **[MCP Integration](docs/guides/mcp-integration.md)** — Setup for Claude Desktop, Cursor, VS Code, Gemini CLI
265
+ - **[Python API](docs/guides/python-api.md)** — Full API reference
266
+ - **[CLI Reference](docs/guides/cli-reference.md)** — Command-line usage
267
+ - **[Configuration](docs/getting-started/configuration.md)** — Backends and feature toggles
268
+ - **[Technical Specification](docs/HEGELION_SPEC.md)** — Output schemas, phase specs
269
+
270
+ ---
271
+
272
+ ## Contributing
273
+
274
+ Issues and PRs welcome. For significant changes, open a discussion first.
275
+
276
+ ---
277
+
278
+ ## Recent Changes
279
+
280
+ ### v0.4.0 (December 2025)
281
+
282
+ - **Autocoding system**: Player-coach dialectical loop based on Block AI's g3 agent
283
+ - MCP tools: `autocoding_init`, `player_prompt`, `coach_prompt`, `autocoding_advance`
284
+ - Session persistence with `autocoding_save` / `autocoding_load`
285
+ - Single-shot mode for simpler use cases
286
+
287
+ ### v0.3.x
288
+
289
+ - CLI streaming with `--stream` flag
290
+ - MCP progress notifications
291
+ - 470+ tests passing
292
+
293
+ ---
294
+
295
+ **License:** MIT
@@ -0,0 +1,43 @@
1
+ hegelion/__init__.py,sha256=lG58sxyEbRLBJT4BVJeMcJkcNDrqibyG_KXOXm_ruYc,1216
2
+ hegelion/council.py,sha256=iJvGxgHs6Mp3c2481QLIkmcnBeHTBQqjZQW0u0gerfs,8627
3
+ hegelion/judge.py,sha256=mU9k8nWZ-u5z2-mOczD9TKVW819vMMQoBt1_kHHqip0,8310
4
+ hegelion/search_providers.py,sha256=hkSfqhe1MfGucSL7dMnQgcfhDm59Q4DhS3glyDo5zUU,4654
5
+ hegelion/core/__init__.py,sha256=2hgRsatEUk0ELXyrAtAjiPj8xcl7KV78cYHod0fn6Fk,617
6
+ hegelion/core/agent.py,sha256=5UyOzalNO3Z0zjnbhke4i2h_hTtV9Zf6JEPL6fdeXEw,5771
7
+ hegelion/core/autocoding_state.py,sha256=ezAonYljkEruWEMgEugor9yrf_6mPC7XsDiLgj6anok,9975
8
+ hegelion/core/backends.py,sha256=4-b03sz_Yv_fKJ1Fk32CcbObAKzKNEe7-MDcoYVSfpw,14897
9
+ hegelion/core/cache.py,sha256=B1CprAuaotCMMwaHjMOD1uBY1K7Ev-3cfTrT9C9pK38,2603
10
+ hegelion/core/config.py,sha256=jINgrD0PryUs2miOdgYKa0tofWxBb22yAm9qDBSJRkQ,8905
11
+ hegelion/core/core.py,sha256=k7TTQYqXZ2KlzZdW-ayiP3C4BBoz8bMKboAi6qPkvUw,23431
12
+ hegelion/core/engine.py,sha256=og7ooHQLJvI1z7dCwodgZsCxDVbD4ozA-mAZrNahjHw,33192
13
+ hegelion/core/logging_utils.py,sha256=rx7N1u6u5nn3N65rcFHIckGYWhiFl3we2iG549CBZbo,2074
14
+ hegelion/core/models.py,sha256=w5wI9lnU3vGdDiQARU_5IkdiEKQkVYNXYtvX1VEvF-k,9722
15
+ hegelion/core/parsing.py,sha256=mhWvSemyQNvEDg7OGRZJl22AzoRyxBooRhs-7plj0wY,9385
16
+ hegelion/core/personas.py,sha256=2bjhIb8RWNQEfXgM8uU4ynPY76zt7OmrNjh6fhKKD0o,2955
17
+ hegelion/core/prompt_autocoding.py,sha256=3N59Gv_7Li3qXYZXze_jPjbdnNltchTuL_35bY73rIg,11971
18
+ hegelion/core/prompt_dialectic.py,sha256=wUHwHTBQjd8OdjXkg04FzEiVSxyChy4PCX6cv_qrgv4,14979
19
+ hegelion/core/prompts.py,sha256=Y2Exw_DKgp_5uUs1oQxfQElIu6sCkkJ_qNnmtObJQu0,3534
20
+ hegelion/core/schema.py,sha256=PknWdeMuv_pRclBF1AwJ0xS6qB14HyLXoiwielMSMjk,2247
21
+ hegelion/core/validation.py,sha256=ZGirtkPzsJaSuojXAGR7pKfSUz6kbJV4kxc1Sy_VQwg,2427
22
+ hegelion/examples_data/__init__.py,sha256=EK4nTRY5gHtvn6CTD_w5FOxwQmYdDyMlMzQ3S7QaslE,219
23
+ hegelion/examples_data/glm4_6_examples.jsonl,sha256=XAFfg1ZAloM4czEI2PPUk5K_m2eeCymZ9qFrmV952ek,16986
24
+ hegelion/mcp/__init__.py,sha256=so0b-cG2q0sZWH8Cx50qEIlldFVCXrd8YZeZyicyifA,67
25
+ hegelion/mcp/server.py,sha256=NCLIH_FcPD_f50EtvYpxXgK4WSUT-ceSU6B1LH6qQi0,34056
26
+ hegelion/scripts/hegelion_agent_cli.py,sha256=0hNeX6grKv5mrypYCeJZ1Fb_7LKbsBrVdtX7gLSxbr0,3224
27
+ hegelion/scripts/hegelion_bench.py,sha256=ihyPqMe_9zjryiiqWEqiw_IZTXNw1hY7n9sc-t-5xMg,3828
28
+ hegelion/scripts/hegelion_cli.py,sha256=HISVUgg65U5NFpFPuG6q5HnBUl-8RztqjxsR9fNkubM,18445
29
+ hegelion/scripts/hegelion_dataset.py,sha256=UPDXzLRFXwC4j6O_1tzKr3YqSwDcpxrQjjiHnnnr4Po,3129
30
+ hegelion/scripts/hegelion_eval.py,sha256=ZKjxlB0O1Haq2IgieQ0UtQRVwkcy7flGTw-_x44tqeU,4354
31
+ hegelion/scripts/mcp_setup.py,sha256=D5a0EPp4u_IHxxokrvNj5FdCn425-pnS7Ppu69Z17MQ,4740
32
+ hegelion/training/__init__.py,sha256=gqjIQk1WRBMbys280HPp3ioC7DPriva0uS7gBAhwDYk,192
33
+ hegelion/training/datasets.py,sha256=qJ3n3hxJgHd5_B3j5uOPYHQy1ylwpvPv-zwnNjD8hsA,4169
34
+ hegelion/training/generator.py,sha256=3wjtC20aaNrfV8hWcUZ5otBmXeyVcth1nsoHt69XBX4,9029
35
+ hegelion/training/mlx_scu_trainer.py,sha256=ME_LqyjyZ_DGxEotF4BrYfTXp9lpM_rCHgq2M-exhWA,12782
36
+ hegelion/training/mlx_trainer.py,sha256=i9ikEYEsKUI9c8lTu-ox5IGW8RlWUHjBNNWSGrX65L4,5613
37
+ hegelion/training/unsloth_trainer.py,sha256=NUEkZv1lwYITFK7JJ0LeR8VCNrXUm_2kJRnhdk5ZxB0,3968
38
+ hegelion-0.4.0.dist-info/licenses/LICENSE,sha256=VYjQbIKaDFQapkzZI3KjymsIQvh-Cj-UzP3BRQBotao,1077
39
+ hegelion-0.4.0.dist-info/METADATA,sha256=qe-0I4plfBc1dsJkWH9P6-xuVTgz4TUocbTpN9VoFJg,10448
40
+ hegelion-0.4.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
41
+ hegelion-0.4.0.dist-info/entry_points.txt,sha256=DxG1r3fF9-qQZ0zUmjI_M5fMQx2eKUhlon5kNm9KdpM,382
42
+ hegelion-0.4.0.dist-info/top_level.txt,sha256=f7kLwRsjAK5wh2bJGdPmbc4A8SYKkr_GRNtYvHaYxzI,9
43
+ hegelion-0.4.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,8 @@
1
+ [console_scripts]
2
+ hegelion = hegelion.scripts.hegelion_cli:main
3
+ hegelion-agent = hegelion.scripts.hegelion_agent_cli:main
4
+ hegelion-bench = hegelion.scripts.hegelion_bench:main
5
+ hegelion-dataset = hegelion.scripts.hegelion_dataset:main
6
+ hegelion-eval = hegelion.scripts.hegelion_eval:main
7
+ hegelion-server = hegelion.mcp.server:main
8
+ hegelion-setup-mcp = hegelion.scripts.mcp_setup:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Hegelion Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.