nouse 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nouse-0.2.0/PKG-INFO +230 -0
- nouse-0.2.0/README.md +184 -0
- nouse-0.2.0/pyproject.toml +76 -0
- nouse-0.2.0/setup.cfg +4 -0
- nouse-0.2.0/src/nouse/__init__.py +56 -0
- nouse-0.2.0/src/nouse/brain_sync/transporter.py +340 -0
- nouse-0.2.0/src/nouse/brian2_bridge.py +347 -0
- nouse-0.2.0/src/nouse/cli/__init__.py +0 -0
- nouse-0.2.0/src/nouse/cli/aliases.py +1 -0
- nouse-0.2.0/src/nouse/cli/ask.py +406 -0
- nouse-0.2.0/src/nouse/cli/chat.py +663 -0
- nouse-0.2.0/src/nouse/cli/commands/__init__.py +0 -0
- nouse-0.2.0/src/nouse/cli/commands/agent.py +1 -0
- nouse-0.2.0/src/nouse/cli/commands/config_cmd.py +1 -0
- nouse-0.2.0/src/nouse/cli/commands/daemon_cmd.py +1 -0
- nouse-0.2.0/src/nouse/cli/commands/field.py +1 -0
- nouse-0.2.0/src/nouse/cli/commands/self_cmd.py +1 -0
- nouse-0.2.0/src/nouse/cli/commands/status.py +1 -0
- nouse-0.2.0/src/nouse/cli/commands/wake.py +1 -0
- nouse-0.2.0/src/nouse/cli/companion.py +411 -0
- nouse-0.2.0/src/nouse/cli/console.py +1 -0
- nouse-0.2.0/src/nouse/cli/main.py +4334 -0
- nouse-0.2.0/src/nouse/cli/run.py +241 -0
- nouse-0.2.0/src/nouse/cli/run_repl.py +335 -0
- nouse-0.2.0/src/nouse/cli/viz.py +530 -0
- nouse-0.2.0/src/nouse/client.py +631 -0
- nouse-0.2.0/src/nouse/config/__init__.py +0 -0
- nouse-0.2.0/src/nouse/config/defaults.py +1 -0
- nouse-0.2.0/src/nouse/config/env.py +80 -0
- nouse-0.2.0/src/nouse/config/loader.py +1 -0
- nouse-0.2.0/src/nouse/config/schema.py +1 -0
- nouse-0.2.0/src/nouse/daemon/__init__.py +0 -0
- nouse-0.2.0/src/nouse/daemon/auto_skill.py +118 -0
- nouse-0.2.0/src/nouse/daemon/backup.py +1 -0
- nouse-0.2.0/src/nouse/daemon/disk_mapper.py +407 -0
- nouse-0.2.0/src/nouse/daemon/evidence.py +97 -0
- nouse-0.2.0/src/nouse/daemon/extractor.py +586 -0
- nouse-0.2.0/src/nouse/daemon/file_text.py +39 -0
- nouse-0.2.0/src/nouse/daemon/ghost_q.py +301 -0
- nouse-0.2.0/src/nouse/daemon/hitl.py +216 -0
- nouse-0.2.0/src/nouse/daemon/initiative.py +273 -0
- nouse-0.2.0/src/nouse/daemon/journal.py +317 -0
- nouse-0.2.0/src/nouse/daemon/kickstart.py +232 -0
- nouse-0.2.0/src/nouse/daemon/lock.py +46 -0
- nouse-0.2.0/src/nouse/daemon/main.py +1580 -0
- nouse-0.2.0/src/nouse/daemon/mission.py +286 -0
- nouse-0.2.0/src/nouse/daemon/morning_report.py +84 -0
- nouse-0.2.0/src/nouse/daemon/nightrun.py +426 -0
- nouse-0.2.0/src/nouse/daemon/node_context.py +312 -0
- nouse-0.2.0/src/nouse/daemon/node_deepdive.py +975 -0
- nouse-0.2.0/src/nouse/daemon/node_inbox.py +241 -0
- nouse-0.2.0/src/nouse/daemon/research_queue.py +489 -0
- nouse-0.2.0/src/nouse/daemon/sources.py +367 -0
- nouse-0.2.0/src/nouse/daemon/storage_tier.py +198 -0
- nouse-0.2.0/src/nouse/daemon/system_events.py +172 -0
- nouse-0.2.0/src/nouse/daemon/web_text.py +207 -0
- nouse-0.2.0/src/nouse/daemon/write_queue.py +120 -0
- nouse-0.2.0/src/nouse/embeddings/__init__.py +12 -0
- nouse-0.2.0/src/nouse/embeddings/chunking.py +35 -0
- nouse-0.2.0/src/nouse/embeddings/index.py +117 -0
- nouse-0.2.0/src/nouse/embeddings/ollama_embed.py +44 -0
- nouse-0.2.0/src/nouse/field/__init__.py +0 -0
- nouse-0.2.0/src/nouse/field/audit.py +1 -0
- nouse-0.2.0/src/nouse/field/errors.py +1 -0
- nouse-0.2.0/src/nouse/field/migration.py +1 -0
- nouse-0.2.0/src/nouse/field/node.py +1 -0
- nouse-0.2.0/src/nouse/field/reader.py +1 -0
- nouse-0.2.0/src/nouse/field/surface.py +1457 -0
- nouse-0.2.0/src/nouse/field/writer.py +1 -0
- nouse-0.2.0/src/nouse/ingress/__init__.py +32 -0
- nouse-0.2.0/src/nouse/ingress/allowlist.py +207 -0
- nouse-0.2.0/src/nouse/ingress/clawbot.py +147 -0
- nouse-0.2.0/src/nouse/ingress/telegram.py +151 -0
- nouse-0.2.0/src/nouse/inject.py +377 -0
- nouse-0.2.0/src/nouse/kernel/__init__.py +27 -0
- nouse-0.2.0/src/nouse/kernel/brain.py +492 -0
- nouse-0.2.0/src/nouse/kernel/db.py +335 -0
- nouse-0.2.0/src/nouse/kernel/mission_runner.py +250 -0
- nouse-0.2.0/src/nouse/kernel/models.py +21 -0
- nouse-0.2.0/src/nouse/kernel/runtime.py +269 -0
- nouse-0.2.0/src/nouse/kernel/schema.py +18 -0
- nouse-0.2.0/src/nouse/learning_coordinator.py +161 -0
- nouse-0.2.0/src/nouse/limbic/__init__.py +0 -0
- nouse-0.2.0/src/nouse/limbic/signals.py +192 -0
- nouse-0.2.0/src/nouse/llm/__init__.py +27 -0
- nouse-0.2.0/src/nouse/llm/autodiscover.py +464 -0
- nouse-0.2.0/src/nouse/llm/model_capabilities.py +139 -0
- nouse-0.2.0/src/nouse/llm/model_router.py +287 -0
- nouse-0.2.0/src/nouse/llm/policy.py +207 -0
- nouse-0.2.0/src/nouse/llm/usage.py +180 -0
- nouse-0.2.0/src/nouse/mcp_gateway/__init__.py +0 -0
- nouse-0.2.0/src/nouse/mcp_gateway/gateway.py +1483 -0
- nouse-0.2.0/src/nouse/mcp_gateway/server.py +194 -0
- nouse-0.2.0/src/nouse/memory/__init__.py +5 -0
- nouse-0.2.0/src/nouse/memory/store.py +569 -0
- nouse-0.2.0/src/nouse/metacognition/__init__.py +0 -0
- nouse-0.2.0/src/nouse/metacognition/genesis.py +104 -0
- nouse-0.2.0/src/nouse/metacognition/snapshot.py +83 -0
- nouse-0.2.0/src/nouse/ollama_client/__init__.py +0 -0
- nouse-0.2.0/src/nouse/ollama_client/client.py +549 -0
- nouse-0.2.0/src/nouse/ollama_client/errors.py +1 -0
- nouse-0.2.0/src/nouse/orchestrator/__init__.py +21 -0
- nouse-0.2.0/src/nouse/orchestrator/compaction.py +110 -0
- nouse-0.2.0/src/nouse/orchestrator/conductor.py +603 -0
- nouse-0.2.0/src/nouse/orchestrator/global_workspace.py +179 -0
- nouse-0.2.0/src/nouse/orchestrator/status.py +1 -0
- nouse-0.2.0/src/nouse/plugins/__init__.py +25 -0
- nouse-0.2.0/src/nouse/plugins/loader.py +248 -0
- nouse-0.2.0/src/nouse/review_queue/__init__.py +0 -0
- nouse-0.2.0/src/nouse/review_queue/errors.py +1 -0
- nouse-0.2.0/src/nouse/review_queue/queue.py +1 -0
- nouse-0.2.0/src/nouse/search/__init__.py +4 -0
- nouse-0.2.0/src/nouse/search/escalator.py +247 -0
- nouse-0.2.0/src/nouse/self_layer/__init__.py +23 -0
- nouse-0.2.0/src/nouse/self_layer/errors.py +13 -0
- nouse-0.2.0/src/nouse/self_layer/living_core.py +630 -0
- nouse-0.2.0/src/nouse/self_layer/reader.py +14 -0
- nouse-0.2.0/src/nouse/self_layer/versioning.py +30 -0
- nouse-0.2.0/src/nouse/self_layer/writer.py +87 -0
- nouse-0.2.0/src/nouse/session/__init__.py +33 -0
- nouse-0.2.0/src/nouse/session/cancellation.py +41 -0
- nouse-0.2.0/src/nouse/session/energy.py +39 -0
- nouse-0.2.0/src/nouse/session/state.py +388 -0
- nouse-0.2.0/src/nouse/session/writer.py +32 -0
- nouse-0.2.0/src/nouse/tda/__init__.py +0 -0
- nouse-0.2.0/src/nouse/tda/bridge.py +142 -0
- nouse-0.2.0/src/nouse/testing/__init__.py +0 -0
- nouse-0.2.0/src/nouse/testing/config_factory.py +1 -0
- nouse-0.2.0/src/nouse/testing/fake_ollama.py +1 -0
- nouse-0.2.0/src/nouse/trace/__init__.py +2 -0
- nouse-0.2.0/src/nouse/trace/output_trace.py +169 -0
- nouse-0.2.0/src/nouse/web/__init__.py +0 -0
- nouse-0.2.0/src/nouse/web/server.py +3944 -0
- nouse-0.2.0/src/nouse.egg-info/PKG-INFO +230 -0
- nouse-0.2.0/src/nouse.egg-info/SOURCES.txt +139 -0
- nouse-0.2.0/src/nouse.egg-info/dependency_links.txt +1 -0
- nouse-0.2.0/src/nouse.egg-info/entry_points.txt +4 -0
- nouse-0.2.0/src/nouse.egg-info/requires.txt +28 -0
- nouse-0.2.0/src/nouse.egg-info/top_level.txt +1 -0
- nouse-0.2.0/tests/test_nouse_kernel.py +152 -0
- nouse-0.2.0/tests/test_plasticity_live.py +169 -0
nouse-0.2.0/PKG-INFO
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: nouse
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: The Cognitive Substrate Framework for Model-Agnostic AI — the missing link to AGI
|
|
5
|
+
Author-email: Björn Wikström <bjorn@base76.se>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/base76-research-lab/NoUse
|
|
8
|
+
Project-URL: Repository, https://github.com/base76-research-lab/NoUse
|
|
9
|
+
Keywords: ai,cognition,memory,brain,agi,mcp,plasticity,graph,knowledge-graph
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: Intended Audience :: Science/Research
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
|
+
Requires-Python: >=3.11
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
Requires-Dist: ollama>=0.6.1
|
|
21
|
+
Requires-Dist: rich>=14.3.3
|
|
22
|
+
Requires-Dist: ruamel-yaml>=0.19.1
|
|
23
|
+
Requires-Dist: typer>=0.24.1
|
|
24
|
+
Requires-Dist: kuzu>=0.11.0
|
|
25
|
+
Requires-Dist: watchdog>=4.0.0
|
|
26
|
+
Requires-Dist: numpy>=1.26.0
|
|
27
|
+
Requires-Dist: scipy>=1.12.0
|
|
28
|
+
Requires-Dist: pyvis>=0.3.2
|
|
29
|
+
Requires-Dist: duckduckgo-search>=8.1.1
|
|
30
|
+
Requires-Dist: httpx>=0.28.1
|
|
31
|
+
Requires-Dist: beautifulsoup4>=4.14.3
|
|
32
|
+
Requires-Dist: lxml>=6.0.2
|
|
33
|
+
Requires-Dist: pypdf>=4.2.0
|
|
34
|
+
Requires-Dist: fastapi>=0.135.2
|
|
35
|
+
Requires-Dist: uvicorn>=0.42.0
|
|
36
|
+
Requires-Dist: pydantic>=2.12.5
|
|
37
|
+
Requires-Dist: pandas>=3.0.1
|
|
38
|
+
Requires-Dist: flask>=3.0.0
|
|
39
|
+
Requires-Dist: mcp>=0.1.0
|
|
40
|
+
Provides-Extra: dev
|
|
41
|
+
Requires-Dist: pytest>=9.0.2; extra == "dev"
|
|
42
|
+
Requires-Dist: pytest-asyncio>=1.3.0; extra == "dev"
|
|
43
|
+
Requires-Dist: maturin>=1.12.6; extra == "dev"
|
|
44
|
+
Provides-Extra: brian2
|
|
45
|
+
Requires-Dist: brian2>=2.5.0; extra == "brian2"
|
|
46
|
+
|
|
47
|
+
<p align="center">
|
|
48
|
+
<img src="IMG/Nouse.png" alt="Nouse" width="220"/>
|
|
49
|
+
</p>
|
|
50
|
+
|
|
51
|
+
<h1 align="center">Nouse</h1>
|
|
52
|
+
|
|
53
|
+
<p align="center">
|
|
54
|
+
<strong>Persistent domain memory for LLMs. Works with any model.</strong>
|
|
55
|
+
</p>
|
|
56
|
+
|
|
57
|
+
<p align="center">
|
|
58
|
+
<a href="https://www.python.org/downloads/"><img src="https://img.shields.io/badge/python-3.11+-blue.svg" alt="Python 3.11+"></a>
|
|
59
|
+
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="MIT"></a>
|
|
60
|
+
<a href="eval/RESULTS.md"><img src="https://img.shields.io/badge/benchmark-96%25_vs_46%25-brightgreen" alt="Benchmark"></a>
|
|
61
|
+
</p>
|
|
62
|
+
|
|
63
|
+
---
|
|
64
|
+
|
|
65
|
+
## The result that motivated this
|
|
66
|
+
|
|
67
|
+
```
|
|
68
|
+
Model Score Questions
|
|
69
|
+
─────────────────────────────────────────────────────
|
|
70
|
+
llama3.1-8b (no memory) 46% 60
|
|
71
|
+
llama-3.3-70b (no memory) 47% 60
|
|
72
|
+
llama3.1-8b + Nouse memory → 96% 60
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
**An 8B model with Nouse outperforms a 70B model without it.**
|
|
76
|
+
|
|
77
|
+
The effect is not about retrieval. It is about *disambiguation* — a small, precise knowledge signal
|
|
78
|
+
redirects the model's existing priors onto the correct frame. We call this the
|
|
79
|
+
**Intent Disambiguation Effect**.
|
|
80
|
+
|
|
81
|
+
→ Full benchmark: [eval/RESULTS.md](eval/RESULTS.md)
|
|
82
|
+
|
|
83
|
+
---
|
|
84
|
+
|
|
85
|
+
## What Nouse is
|
|
86
|
+
|
|
87
|
+
Nouse (νοῦς, Gk. *mind*) is a **persistent, self-growing knowledge graph** that attaches to any LLM
|
|
88
|
+
as a memory substrate.
|
|
89
|
+
|
|
90
|
+
```
|
|
91
|
+
Your documents, conversations, research
|
|
92
|
+
↓
|
|
93
|
+
Nouse knowledge graph
|
|
94
|
+
(KuzuDB + Hebbian learning)
|
|
95
|
+
↓
|
|
96
|
+
brain.query("your question")
|
|
97
|
+
↓
|
|
98
|
+
Structured context injected into any LLM prompt
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
It is **not** a RAG system. RAG retrieves chunks. Nouse extracts *relations* — typed, weighted,
|
|
102
|
+
evidence-scored connections between concepts — and injects a compact, structured context block.
|
|
103
|
+
|
|
104
|
+
It **learns continuously**. Every interaction strengthens or weakens connections (Hebbian plasticity).
|
|
105
|
+
There is no retraining. No gradient descent. The graph grows.
|
|
106
|
+
|
|
107
|
+
---
|
|
108
|
+
|
|
109
|
+
## Quick start
|
|
110
|
+
|
|
111
|
+
```bash
|
|
112
|
+
pip install nouse
|
|
113
|
+
|
|
114
|
+
# Attach to your knowledge graph
|
|
115
|
+
import nouse
|
|
116
|
+
brain = nouse.attach()
|
|
117
|
+
|
|
118
|
+
# Query and inject context
|
|
119
|
+
result = brain.query("transformer attention mechanism")
|
|
120
|
+
print(result.context_block()) # inject this into your LLM prompt
|
|
121
|
+
print(result.confidence) # 0.0 – 1.0
|
|
122
|
+
print(result.strong_axioms()) # verified high-evidence relations
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
Works with any provider — OpenAI, Anthropic, Groq, Cerebras, Ollama:
|
|
126
|
+
|
|
127
|
+
```python
|
|
128
|
+
# You handle the LLM call. Nouse handles the memory.
|
|
129
|
+
context = brain.query(user_question).context_block()
|
|
130
|
+
response = openai.chat(messages=[
|
|
131
|
+
{"role": "system", "content": context},
|
|
132
|
+
{"role": "user", "content": user_question},
|
|
133
|
+
])
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
---
|
|
137
|
+
|
|
138
|
+
## Run the benchmark yourself
|
|
139
|
+
|
|
140
|
+
```bash
|
|
141
|
+
git clone https://github.com/base76-research-lab/NoUse
|
|
142
|
+
cd NoUse
|
|
143
|
+
pip install -e .
|
|
144
|
+
|
|
145
|
+
# Generate questions from your own graph
|
|
146
|
+
python eval/generate_questions.py --n 60
|
|
147
|
+
|
|
148
|
+
# Run benchmark (requires Cerebras or Groq API key, or use Ollama)
|
|
149
|
+
python eval/run_eval.py \
|
|
150
|
+
--small cerebras/llama3.1-8b \
|
|
151
|
+
--large groq/llama-3.3-70b-versatile \
|
|
152
|
+
--n 60 --no-judge
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
---
|
|
156
|
+
|
|
157
|
+
## How the graph grows
|
|
158
|
+
|
|
159
|
+
```
|
|
160
|
+
Read a document / have a conversation
|
|
161
|
+
↓
|
|
162
|
+
nouse daemon (background)
|
|
163
|
+
↓
|
|
164
|
+
DeepDive: extract concepts + relations
|
|
165
|
+
↓
|
|
166
|
+
Hebbian update: strengthen confirmed paths
|
|
167
|
+
↓
|
|
168
|
+
NightRun: consolidate, prune weak edges
|
|
169
|
+
↓
|
|
170
|
+
Ghost Q (nightly): ask LLM about weak nodes → enrich graph
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
The daemon runs as a systemd service. It watches your files, chat history,
|
|
174
|
+
browser bookmarks — anything you configure. You never manually curate the graph.
|
|
175
|
+
|
|
176
|
+
---
|
|
177
|
+
|
|
178
|
+
## Architecture
|
|
179
|
+
|
|
180
|
+
```
|
|
181
|
+
nouse/
|
|
182
|
+
├── inject.py # Public API: attach(), NouseBrain, Axiom, QueryResult
|
|
183
|
+
├── field/
|
|
184
|
+
│ └── surface.py # KuzuDB graph interface
|
|
185
|
+
├── daemon/
|
|
186
|
+
│ ├── main.py # Autonomous learning loop
|
|
187
|
+
│ ├── nightrun.py # Nightly consolidation (9 phases)
|
|
188
|
+
│ ├── node_deepdive.py # 5-step concept extraction
|
|
189
|
+
│ └── ghost_q.py # LLM-driven graph enrichment
|
|
190
|
+
└── search/
|
|
191
|
+
└── escalator.py # 3-level knowledge escalation
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
---
|
|
195
|
+
|
|
196
|
+
## The hypothesis (work in progress)
|
|
197
|
+
|
|
198
|
+
```
|
|
199
|
+
small model + Nouse[domain] > large model without Nouse
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
We have evidence for this in our benchmark. The next step is to test across
|
|
203
|
+
more domains, more models, and with an LLM judge instead of keyword scoring.
|
|
204
|
+
|
|
205
|
+
Contributions welcome — especially domain-specific question banks.
|
|
206
|
+
|
|
207
|
+
---
|
|
208
|
+
|
|
209
|
+
## Install & run daemon
|
|
210
|
+
|
|
211
|
+
```bash
|
|
212
|
+
pip install -e ".[dev]"
|
|
213
|
+
|
|
214
|
+
# Start the learning daemon
|
|
215
|
+
nouse daemon start
|
|
216
|
+
|
|
217
|
+
# Interactive REPL with memory
|
|
218
|
+
nouse run
|
|
219
|
+
|
|
220
|
+
# Check graph stats
|
|
221
|
+
nouse status
|
|
222
|
+
```
|
|
223
|
+
|
|
224
|
+
Requires Python 3.11+. Graph stored in `~/.local/share/nouse/field.kuzu`.
|
|
225
|
+
|
|
226
|
+
---
|
|
227
|
+
|
|
228
|
+
## License
|
|
229
|
+
|
|
230
|
+
MIT — Björn Wikström / Base76 Research Lab
|
nouse-0.2.0/README.md
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="IMG/Nouse.png" alt="Nouse" width="220"/>
|
|
3
|
+
</p>
|
|
4
|
+
|
|
5
|
+
<h1 align="center">Nouse</h1>
|
|
6
|
+
|
|
7
|
+
<p align="center">
|
|
8
|
+
<strong>Persistent domain memory for LLMs. Works with any model.</strong>
|
|
9
|
+
</p>
|
|
10
|
+
|
|
11
|
+
<p align="center">
|
|
12
|
+
<a href="https://www.python.org/downloads/"><img src="https://img.shields.io/badge/python-3.11+-blue.svg" alt="Python 3.11+"></a>
|
|
13
|
+
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="MIT"></a>
|
|
14
|
+
<a href="eval/RESULTS.md"><img src="https://img.shields.io/badge/benchmark-96%25_vs_46%25-brightgreen" alt="Benchmark"></a>
|
|
15
|
+
</p>
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## The result that motivated this
|
|
20
|
+
|
|
21
|
+
```
|
|
22
|
+
Model Score Questions
|
|
23
|
+
─────────────────────────────────────────────────────
|
|
24
|
+
llama3.1-8b (no memory) 46% 60
|
|
25
|
+
llama-3.3-70b (no memory) 47% 60
|
|
26
|
+
llama3.1-8b + Nouse memory → 96% 60
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
**An 8B model with Nouse outperforms a 70B model without it.**
|
|
30
|
+
|
|
31
|
+
The effect is not about retrieval. It is about *disambiguation* — a small, precise knowledge signal
|
|
32
|
+
redirects the model's existing priors onto the correct frame. We call this the
|
|
33
|
+
**Intent Disambiguation Effect**.
|
|
34
|
+
|
|
35
|
+
→ Full benchmark: [eval/RESULTS.md](eval/RESULTS.md)
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## What Nouse is
|
|
40
|
+
|
|
41
|
+
Nouse (νοῦς, Gk. *mind*) is a **persistent, self-growing knowledge graph** that attaches to any LLM
|
|
42
|
+
as a memory substrate.
|
|
43
|
+
|
|
44
|
+
```
|
|
45
|
+
Your documents, conversations, research
|
|
46
|
+
↓
|
|
47
|
+
Nouse knowledge graph
|
|
48
|
+
(KuzuDB + Hebbian learning)
|
|
49
|
+
↓
|
|
50
|
+
brain.query("your question")
|
|
51
|
+
↓
|
|
52
|
+
Structured context injected into any LLM prompt
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
It is **not** a RAG system. RAG retrieves chunks. Nouse extracts *relations* — typed, weighted,
|
|
56
|
+
evidence-scored connections between concepts — and injects a compact, structured context block.
|
|
57
|
+
|
|
58
|
+
It **learns continuously**. Every interaction strengthens or weakens connections (Hebbian plasticity).
|
|
59
|
+
There is no retraining. No gradient descent. The graph grows.
|
|
60
|
+
|
|
61
|
+
---
|
|
62
|
+
|
|
63
|
+
## Quick start
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
pip install nouse
|
|
67
|
+
|
|
68
|
+
# Attach to your knowledge graph
|
|
69
|
+
import nouse
|
|
70
|
+
brain = nouse.attach()
|
|
71
|
+
|
|
72
|
+
# Query and inject context
|
|
73
|
+
result = brain.query("transformer attention mechanism")
|
|
74
|
+
print(result.context_block()) # inject this into your LLM prompt
|
|
75
|
+
print(result.confidence) # 0.0 – 1.0
|
|
76
|
+
print(result.strong_axioms()) # verified high-evidence relations
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
Works with any provider — OpenAI, Anthropic, Groq, Cerebras, Ollama:
|
|
80
|
+
|
|
81
|
+
```python
|
|
82
|
+
# You handle the LLM call. Nouse handles the memory.
|
|
83
|
+
context = brain.query(user_question).context_block()
|
|
84
|
+
response = openai.chat(messages=[
|
|
85
|
+
{"role": "system", "content": context},
|
|
86
|
+
{"role": "user", "content": user_question},
|
|
87
|
+
])
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
---
|
|
91
|
+
|
|
92
|
+
## Run the benchmark yourself
|
|
93
|
+
|
|
94
|
+
```bash
|
|
95
|
+
git clone https://github.com/base76-research-lab/NoUse
|
|
96
|
+
cd NoUse
|
|
97
|
+
pip install -e .
|
|
98
|
+
|
|
99
|
+
# Generate questions from your own graph
|
|
100
|
+
python eval/generate_questions.py --n 60
|
|
101
|
+
|
|
102
|
+
# Run benchmark (requires Cerebras or Groq API key, or use Ollama)
|
|
103
|
+
python eval/run_eval.py \
|
|
104
|
+
--small cerebras/llama3.1-8b \
|
|
105
|
+
--large groq/llama-3.3-70b-versatile \
|
|
106
|
+
--n 60 --no-judge
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
---
|
|
110
|
+
|
|
111
|
+
## How the graph grows
|
|
112
|
+
|
|
113
|
+
```
|
|
114
|
+
Read a document / have a conversation
|
|
115
|
+
↓
|
|
116
|
+
nouse daemon (background)
|
|
117
|
+
↓
|
|
118
|
+
DeepDive: extract concepts + relations
|
|
119
|
+
↓
|
|
120
|
+
Hebbian update: strengthen confirmed paths
|
|
121
|
+
↓
|
|
122
|
+
NightRun: consolidate, prune weak edges
|
|
123
|
+
↓
|
|
124
|
+
Ghost Q (nightly): ask LLM about weak nodes → enrich graph
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
The daemon runs as a systemd service. It watches your files, chat history,
|
|
128
|
+
browser bookmarks — anything you configure. You never manually curate the graph.
|
|
129
|
+
|
|
130
|
+
---
|
|
131
|
+
|
|
132
|
+
## Architecture
|
|
133
|
+
|
|
134
|
+
```
|
|
135
|
+
nouse/
|
|
136
|
+
├── inject.py # Public API: attach(), NouseBrain, Axiom, QueryResult
|
|
137
|
+
├── field/
|
|
138
|
+
│ └── surface.py # KuzuDB graph interface
|
|
139
|
+
├── daemon/
|
|
140
|
+
│ ├── main.py # Autonomous learning loop
|
|
141
|
+
│ ├── nightrun.py # Nightly consolidation (9 phases)
|
|
142
|
+
│ ├── node_deepdive.py # 5-step concept extraction
|
|
143
|
+
│ └── ghost_q.py # LLM-driven graph enrichment
|
|
144
|
+
└── search/
|
|
145
|
+
└── escalator.py # 3-level knowledge escalation
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
---
|
|
149
|
+
|
|
150
|
+
## The hypothesis (work in progress)
|
|
151
|
+
|
|
152
|
+
```
|
|
153
|
+
small model + Nouse[domain] > large model without Nouse
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
We have evidence for this in our benchmark. The next step is to test across
|
|
157
|
+
more domains, more models, and with an LLM judge instead of keyword scoring.
|
|
158
|
+
|
|
159
|
+
Contributions welcome — especially domain-specific question banks.
|
|
160
|
+
|
|
161
|
+
---
|
|
162
|
+
|
|
163
|
+
## Install & run daemon
|
|
164
|
+
|
|
165
|
+
```bash
|
|
166
|
+
pip install -e ".[dev]"
|
|
167
|
+
|
|
168
|
+
# Start the learning daemon
|
|
169
|
+
nouse daemon start
|
|
170
|
+
|
|
171
|
+
# Interactive REPL with memory
|
|
172
|
+
nouse run
|
|
173
|
+
|
|
174
|
+
# Check graph stats
|
|
175
|
+
nouse status
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
Requires Python 3.11+. Graph stored in `~/.local/share/nouse/field.kuzu`.
|
|
179
|
+
|
|
180
|
+
---
|
|
181
|
+
|
|
182
|
+
## License
|
|
183
|
+
|
|
184
|
+
MIT — Björn Wikström / Base76 Research Lab
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "nouse"
|
|
3
|
+
version = "0.2.0"
|
|
4
|
+
description = "The Cognitive Substrate Framework for Model-Agnostic AI — the missing link to AGI"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
requires-python = ">=3.11"
|
|
7
|
+
license = { text = "MIT" }
|
|
8
|
+
authors = [
|
|
9
|
+
{ name = "Björn Wikström", email = "bjorn@base76.se" }
|
|
10
|
+
]
|
|
11
|
+
keywords = ["ai", "cognition", "memory", "brain", "agi", "mcp", "plasticity", "graph", "knowledge-graph"]
|
|
12
|
+
classifiers = [
|
|
13
|
+
"Development Status :: 3 - Alpha",
|
|
14
|
+
"Intended Audience :: Developers",
|
|
15
|
+
"Intended Audience :: Science/Research",
|
|
16
|
+
"License :: OSI Approved :: MIT License",
|
|
17
|
+
"Programming Language :: Python :: 3",
|
|
18
|
+
"Programming Language :: Python :: 3.11",
|
|
19
|
+
"Programming Language :: Python :: 3.12",
|
|
20
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
21
|
+
]
|
|
22
|
+
dependencies = [
|
|
23
|
+
"ollama>=0.6.1",
|
|
24
|
+
"rich>=14.3.3",
|
|
25
|
+
"ruamel-yaml>=0.19.1",
|
|
26
|
+
"typer>=0.24.1",
|
|
27
|
+
"kuzu>=0.11.0",
|
|
28
|
+
"watchdog>=4.0.0",
|
|
29
|
+
"numpy>=1.26.0",
|
|
30
|
+
"scipy>=1.12.0",
|
|
31
|
+
"pyvis>=0.3.2",
|
|
32
|
+
"duckduckgo-search>=8.1.1",
|
|
33
|
+
"httpx>=0.28.1",
|
|
34
|
+
"beautifulsoup4>=4.14.3",
|
|
35
|
+
"lxml>=6.0.2",
|
|
36
|
+
"pypdf>=4.2.0",
|
|
37
|
+
"fastapi>=0.135.2",
|
|
38
|
+
"uvicorn>=0.42.0",
|
|
39
|
+
"pydantic>=2.12.5",
|
|
40
|
+
"pandas>=3.0.1",
|
|
41
|
+
"flask>=3.0.0",
|
|
42
|
+
"mcp>=0.1.0",
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
[project.optional-dependencies]
|
|
46
|
+
dev = [
|
|
47
|
+
"pytest>=9.0.2",
|
|
48
|
+
"pytest-asyncio>=1.3.0",
|
|
49
|
+
"maturin>=1.12.6",
|
|
50
|
+
]
|
|
51
|
+
brian2 = [
|
|
52
|
+
"brian2>=2.5.0",
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
[project.scripts]
|
|
56
|
+
nouse = "nouse.cli.main:app"
|
|
57
|
+
nouse-brain = "nouse.daemon.main:main"
|
|
58
|
+
nouse-server = "nouse.web.server:main"
|
|
59
|
+
|
|
60
|
+
[project.urls]
|
|
61
|
+
Homepage = "https://github.com/base76-research-lab/NoUse"
|
|
62
|
+
Repository = "https://github.com/base76-research-lab/NoUse"
|
|
63
|
+
|
|
64
|
+
[build-system]
|
|
65
|
+
requires = ["setuptools>=68", "wheel"]
|
|
66
|
+
build-backend = "setuptools.build_meta"
|
|
67
|
+
|
|
68
|
+
[tool.setuptools]
|
|
69
|
+
package-dir = {"" = "src"}
|
|
70
|
+
|
|
71
|
+
[tool.setuptools.packages.find]
|
|
72
|
+
where = ["src"]
|
|
73
|
+
|
|
74
|
+
[tool.pytest.ini_options]
|
|
75
|
+
asyncio_mode = "auto"
|
|
76
|
+
testpaths = ["tests"]
|
nouse-0.2.0/setup.cfg
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
"""
|
|
2
|
+
nouse — The Cognitive Substrate Framework for Model-Agnostic AI.
|
|
3
|
+
|
|
4
|
+
The missing link to AGI: a persistent, plastic brain layer that gives any LLM
|
|
5
|
+
the cognitive architecture of the human mind.
|
|
6
|
+
|
|
7
|
+
Memory architecture: working → episodic → semantic → procedural
|
|
8
|
+
Core innovation: KuzuDB knowledge graph + Residual Streams (w, r, u) per edge.
|
|
9
|
+
Plasticity: STDP + Hebbian learning, NightRun consolidation, DeepDive axiom-discovery.
|
|
10
|
+
|
|
11
|
+
Quick start (high-level kernel API):
|
|
12
|
+
import nouse
|
|
13
|
+
k = nouse.Kernel()
|
|
14
|
+
k.upsert_edge("e1", src="a", rel_type="causes", tgt="b", w=0.3, r=0.0, u=0.6)
|
|
15
|
+
k.step()
|
|
16
|
+
|
|
17
|
+
Quick start (knowledge graph API):
|
|
18
|
+
from nouse.field.surface import FieldSurface
|
|
19
|
+
field = FieldSurface()
|
|
20
|
+
field.add_relation("ocean_current", "influences", "climate", why="heat transport")
|
|
21
|
+
"""
|
|
22
|
+
from nouse.kernel import (
|
|
23
|
+
Brain as Kernel,
|
|
24
|
+
FieldEvent,
|
|
25
|
+
NeuromodulatorState,
|
|
26
|
+
NodeStateSpace,
|
|
27
|
+
ResidualEdge,
|
|
28
|
+
MEMORY_TIERS,
|
|
29
|
+
NEUROMODULATORS,
|
|
30
|
+
SCHEMA_VERSION,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
from nouse.inject import attach, NouseBrain, Axiom, ConceptProfile, QueryResult
|
|
34
|
+
from nouse.search.escalator import EscalationResult
|
|
35
|
+
|
|
36
|
+
__version__ = "0.2.0"
|
|
37
|
+
|
|
38
|
+
__all__ = [
|
|
39
|
+
# Inject API — one-line entry point
|
|
40
|
+
"attach",
|
|
41
|
+
"NouseBrain",
|
|
42
|
+
"Axiom",
|
|
43
|
+
"ConceptProfile",
|
|
44
|
+
"QueryResult",
|
|
45
|
+
# Escalation
|
|
46
|
+
"EscalationResult",
|
|
47
|
+
# Residual Stream kernel
|
|
48
|
+
"Kernel",
|
|
49
|
+
"FieldEvent",
|
|
50
|
+
"NeuromodulatorState",
|
|
51
|
+
"NodeStateSpace",
|
|
52
|
+
"ResidualEdge",
|
|
53
|
+
"MEMORY_TIERS",
|
|
54
|
+
"NEUROMODULATORS",
|
|
55
|
+
"SCHEMA_VERSION",
|
|
56
|
+
]
|