brain-system 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. brain_system-0.1.0/.env.example +10 -0
  2. brain_system-0.1.0/.gitignore +28 -0
  3. brain_system-0.1.0/LICENSE +21 -0
  4. brain_system-0.1.0/MANIFEST.in +2 -0
  5. brain_system-0.1.0/PKG-INFO +278 -0
  6. brain_system-0.1.0/README.md +242 -0
  7. brain_system-0.1.0/brain_system/__init__.py +29 -0
  8. brain_system-0.1.0/brain_system/__main__.py +5 -0
  9. brain_system-0.1.0/brain_system/agents/__init__.py +0 -0
  10. brain_system-0.1.0/brain_system/agents/base_agent.py +37 -0
  11. brain_system-0.1.0/brain_system/agents/emotional_agent.py +58 -0
  12. brain_system-0.1.0/brain_system/agents/executive_agent.py +71 -0
  13. brain_system-0.1.0/brain_system/agents/logic_agent.py +56 -0
  14. brain_system-0.1.0/brain_system/agents/memory_agent.py +65 -0
  15. brain_system-0.1.0/brain_system/agents/sensory_agent.py +47 -0
  16. brain_system-0.1.0/brain_system/app.py +161 -0
  17. brain_system-0.1.0/brain_system/core/__init__.py +0 -0
  18. brain_system-0.1.0/brain_system/core/document_loader.py +74 -0
  19. brain_system-0.1.0/brain_system/core/llm_interface.py +47 -0
  20. brain_system-0.1.0/brain_system/core/memory_store.py +76 -0
  21. brain_system-0.1.0/brain_system/core/orchestrator.py +155 -0
  22. brain_system-0.1.0/brain_system/core/persona.py +131 -0
  23. brain_system-0.1.0/brain_system/main.py +100 -0
  24. brain_system-0.1.0/brain_system/py.typed +0 -0
  25. brain_system-0.1.0/brain_system/web/static/css/style.css +987 -0
  26. brain_system-0.1.0/brain_system/web/static/js/app.js +416 -0
  27. brain_system-0.1.0/brain_system/web/templates/index.html +151 -0
  28. brain_system-0.1.0/brain_system/wrapper.py +204 -0
  29. brain_system-0.1.0/demo/1_setup.png +0 -0
  30. brain_system-0.1.0/demo/2_processing.png +0 -0
  31. brain_system-0.1.0/demo/3_response.png +0 -0
  32. brain_system-0.1.0/demo/4_real_screenshot.png +0 -0
  33. brain_system-0.1.0/examples/basic_usage.py +29 -0
  34. brain_system-0.1.0/examples/custom_provider.py +27 -0
  35. brain_system-0.1.0/examples/persona_mode.py +24 -0
  36. brain_system-0.1.0/pyproject.toml +52 -0
  37. brain_system-0.1.0/run.sh +26 -0
@@ -0,0 +1,10 @@
1
+ # Example environment configuration
2
+ # Copy this file to .env and fill in your API keys
3
+
4
+ # Required for Gemini provider
5
+ # GOOGLE_API_KEY=your_google_api_key_here
6
+
7
+ # Required for OpenAI provider
8
+ # OPENAI_API_KEY=your_openai_api_key_here
9
+
10
+ # Ollama requires no API key — just install Ollama and pull a model
@@ -0,0 +1,28 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ *.egg-info/
7
+ dist/
8
+ build/
9
+ *.egg
10
+
11
+ # Environment
12
+ .env
13
+ .venv/
14
+ venv/
15
+ env/
16
+
17
+ # IDE
18
+ .vscode/
19
+ .idea/
20
+ *.swp
21
+ *.swo
22
+ *~
23
+ .DS_Store
24
+
25
+ # Project-specific
26
+ brain_memory.json
27
+ brain_system/uploads/
28
+ *.pdf
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Brain System Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,2 @@
1
+ recursive-include brain_system/web *
2
+ include brain_system/py.typed
@@ -0,0 +1,278 @@
1
+ Metadata-Version: 2.4
2
+ Name: brain-system
3
+ Version: 0.1.0
4
+ Summary: A multi-agent cognitive architecture powered by LangGraph — five specialized AI agents modeled after the human brain.
5
+ Project-URL: Homepage, https://github.com/shivamtyagi18/BRAIN
6
+ Project-URL: Repository, https://github.com/shivamtyagi18/BRAIN
7
+ Project-URL: Issues, https://github.com/shivamtyagi18/BRAIN/issues
8
+ Author-email: Shivam Tyagi <shivamtyagi18@gmail.com>
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Keywords: agents,ai,brain,cognitive,langchain,langgraph,multi-agent
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Requires-Python: >=3.10
21
+ Requires-Dist: langchain
22
+ Requires-Dist: langchain-community
23
+ Requires-Dist: langchain-core
24
+ Requires-Dist: langchain-google-genai
25
+ Requires-Dist: langchain-ollama
26
+ Requires-Dist: langchain-openai
27
+ Requires-Dist: langgraph
28
+ Requires-Dist: pypdf2
29
+ Requires-Dist: python-dotenv
30
+ Provides-Extra: dev
31
+ Requires-Dist: build; extra == 'dev'
32
+ Requires-Dist: twine; extra == 'dev'
33
+ Provides-Extra: web
34
+ Requires-Dist: flask; extra == 'web'
35
+ Description-Content-Type: text/markdown
36
+
37
+ <div align="center">
38
+
39
+ # 🧠 Brain System
40
+
41
+ ### A Multi-Agent Cognitive Architecture Powered by LangGraph
42
+
43
+ *Five specialized AI agents — modeled after the human brain — collaborate to process your input and generate thoughtful, nuanced responses.*
44
+
45
+ [![Python 3.10+](https://img.shields.io/badge/Python-3.10+-blue.svg)](https://www.python.org/downloads/)
46
+ [![PyPI](https://img.shields.io/pypi/v/brain-system.svg)](https://pypi.org/project/brain-system/)
47
+ [![LangGraph](https://img.shields.io/badge/Built%20with-LangGraph-orange.svg)](https://github.com/langchain-ai/langgraph)
48
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE)
49
+
50
+ </div>
51
+
52
+ ---
53
+
54
+ ## 🧩 How It Works
55
+
56
+ Brain System maps biological brain functions to specialized AI agents that process every input in parallel — just like the human brain:
57
+
58
+ ```mermaid
59
+ graph LR
60
+ A[User Input] --> B[🔵 Sensory Agent<br>Thalamus]
61
+ B --> C[🟣 Memory Agent<br>Hippocampus]
62
+ B --> D[🟢 Logic Agent<br>Frontal Lobe]
63
+ B --> E[🔴 Emotional Agent<br>Amygdala]
64
+ C --> F[🟡 Executive Agent<br>Prefrontal Cortex]
65
+ D --> F
66
+ E --> F
67
+ F --> G[Final Response]
68
+ ```
69
+
70
+ | Agent | Brain Analog | What It Does |
71
+ |:------|:-------------|:-------------|
72
+ | **Sensory** | Thalamus & Sensory Cortex | Multi-layer signal classification, pattern recognition, salience detection |
73
+ | **Memory** | Hippocampus & DLPFC | LLM-driven contextual synthesis, associative linking, temporal weighting |
74
+ | **Logic** | Left Frontal Lobe & DLPFC | Deductive/inductive reasoning, fallacy detection, counter-arguments |
75
+ | **Emotional** | Amygdala, Insula & Cingulate | Emotional profiling, empathy reading, ethical safety checks |
76
+ | **Executive** | Full Prefrontal Cortex | Conflict resolution between agents, response calibration, integrated output |
77
+
78
+ ## 🎭 Persona Mode
79
+
80
+ Upload a biography or autobiography, and the entire Brain responds **as that person would**.
81
+
82
+ The system extracts personality traits, speech patterns, reasoning style, and emotional tendencies — then injects tailored context into each agent. The Logic Agent thinks in their reasoning style, the Emotional Agent mirrors their emotional tendencies, and the Executive Agent speaks in their voice.
83
+
84
+ > **Example:** Upload Nelson Mandela's autobiography → ask about dealing with conflict → get a response reflecting his values of reconciliation, strategic patience, and ubuntu philosophy.
85
+
86
+ ## 📦 Install
87
+
88
+ ```bash
89
+ pip install brain-system
90
+ ```
91
+
92
+ > For the web UI, install the optional extra: `pip install brain-system[web]`
93
+
94
+ ## 🚀 Quick Start — Library Usage
95
+
96
+ ```python
97
+ from brain_system import BrainWrapper
98
+
99
+ # Create a Brain (choose provider: "gemini", "openai", or "ollama")
100
+ brain = BrainWrapper(provider="ollama", model_name="mistral")
101
+
102
+ # Process input through all 5 agents
103
+ result = brain.think("What is the meaning of justice?")
104
+
105
+ # Get the final synthesized response
106
+ print(result.response)
107
+
108
+ # Inspect individual agent signals
109
+ print(result.sensory) # Thalamus — input classification
110
+ print(result.memory) # Hippocampus — memory context
111
+ print(result.logic) # Frontal Lobe — logical analysis
112
+ print(result.emotional) # Amygdala — emotional analysis
113
+ ```
114
+
115
+ ### Persona Mode
116
+
117
+ ```python
118
+ brain.load_persona("gandhi_autobiography.pdf")
119
+ result = brain.think("How should we deal with injustice?")
120
+ print(result.response) # Responds in Gandhi's voice
121
+
122
+ brain.clear_persona() # Revert to default
123
+ ```
124
+
125
+ ### Memory Management
126
+
127
+ ```python
128
+ # Custom memory file location
129
+ brain = BrainWrapper(provider="gemini", memory_path="./my_memory.json")
130
+
131
+ # Clear all stored memories
132
+ brain.clear_memory()
133
+ ```
134
+
135
+ ### API Reference
136
+
137
+ | Class / Method | Description |
138
+ |:---|:---|
139
+ | `BrainWrapper(provider, model_name, memory_path)` | Create a Brain instance |
140
+ | `.think(input) → BrainResult` | Process input through the 5-agent pipeline |
141
+ | `.load_persona(filepath)` | Load a persona from `.txt` or `.pdf` |
142
+ | `.clear_persona()` | Remove the active persona |
143
+ | `.clear_memory()` | Erase all long-term memories |
144
+ | `.persona_active` | `bool` — is a persona loaded? |
145
+ | `.persona_name` | Name of the active persona |
146
+ | `BrainResult.response` | Final synthesized response |
147
+ | `BrainResult.agent_signals` | `dict` of each agent's raw output |
148
+ | `BrainResult.sensory / .memory / .logic / .emotional` | Shortcut accessors |
149
+
150
+ See [`examples/`](examples/) for complete usage scripts.
151
+
152
+ ---
153
+
154
+ ## 🖥️ Development Setup
155
+
156
+ ### Clone & Install
157
+
158
+ ```bash
159
+ git clone https://github.com/shivamtyagi18/BRAIN.git
160
+ cd BRAIN
161
+ pip install -e ".[web,dev]"
162
+ ```
163
+
164
+ ### Configure (Optional)
165
+
166
+ Create a `.env` file in the project root for cloud providers:
167
+
168
+ ```env
169
+ # Only needed if using Gemini or OpenAI
170
+ GOOGLE_API_KEY=your_key_here
171
+ OPENAI_API_KEY=your_key_here
172
+ ```
173
+
174
+ > **No API key needed for Ollama** — runs entirely on your local machine.
175
+
176
+ ### Run
177
+
178
+ #### Web UI
179
+ ```bash
180
+ python -m brain_system.app
181
+ ```
182
+ Open **http://localhost:5001** in your browser.
183
+
184
+ #### Command Line
185
+ ```bash
186
+ brain-cli
187
+ ```
188
+
189
+ ## 🖥️ Web Interface
190
+
191
+ The web UI features:
192
+ - **Provider selection** — choose Gemini, OpenAI, or Ollama at startup
193
+ - **Persona upload** — drag & drop a `.txt` or `.pdf` biography
194
+ - **Live chat** — dark-mode interface with agent activity indicators
195
+ - **Agent transparency** — expand each agent's internal reasoning with "Show agent signals"
196
+ - **Mid-conversation persona switching** — change or clear persona without restarting
197
+ - **New Chat** — full reset button to start fresh
198
+ - **Clear Memory** — wipe stored memories without restarting
199
+
200
+ ## 🤖 Supported LLM Providers
201
+
202
+ | Provider | Requirements | Best For |
203
+ |:---------|:-------------|:---------|
204
+ | **Ollama** | [Ollama](https://ollama.ai) installed locally | Privacy, offline use, no cost |
205
+ | **Gemini** | `GOOGLE_API_KEY` in `.env` | High-quality responses |
206
+ | **OpenAI** | `OPENAI_API_KEY` in `.env` | GPT-4 class models |
207
+
208
+ ### Using Ollama (Local)
209
+
210
+ ```bash
211
+ # Install Ollama, then pull a model:
212
+ ollama pull mistral
213
+
214
+ # For uncensored output, try:
215
+ ollama pull dolphin-mistral
216
+ ```
217
+
218
+ ## 📁 Project Structure
219
+
220
+ ```
221
+ brain-system/
222
+ ├── pyproject.toml # Package config & dependencies
223
+ ├── run.sh # Single-command launcher
224
+ ├── examples/
225
+ │ ├── basic_usage.py # Minimal library usage
226
+ │ ├── persona_mode.py # Persona loading example
227
+ │ └── custom_provider.py # Provider switching example
228
+ └── brain_system/
229
+ ├── __init__.py # Public API exports
230
+ ├── wrapper.py # BrainWrapper — developer entry point
231
+ ├── app.py # Flask web server (optional)
232
+ ├── main.py # CLI entry point
233
+ ├── agents/
234
+ │ ├── base_agent.py # Abstract base with persona injection
235
+ │ ├── sensory_agent.py # Input parsing (Thalamus)
236
+ │ ├── memory_agent.py # Context retrieval (Hippocampus)
237
+ │ ├── emotional_agent.py # Sentiment analysis (Amygdala)
238
+ │ ├── logic_agent.py # Reasoning (Frontal Lobe)
239
+ │ └── executive_agent.py # Decision synthesis (PFC)
240
+ ├── core/
241
+ │ ├── orchestrator.py # LangGraph workflow engine
242
+ │ ├── llm_interface.py # Multi-provider LLM factory
243
+ │ ├── memory_store.py # Persistent memory (JSON)
244
+ │ ├── document_loader.py # TXT/PDF document ingestion
245
+ │ └── persona.py # Persona extraction & injection
246
+ └── web/
247
+ ├── templates/index.html # Chat interface
248
+ └── static/
249
+ ├── css/style.css # Dark-mode theme
250
+ └── js/app.js # Frontend logic
251
+ ```
252
+
253
+ ## 🔧 Architecture Highlights
254
+
255
+ - **LangGraph Orchestration** — Agents run as nodes in a compiled state graph with parallel execution for Memory, Logic, and Emotional processing
256
+ - **Modular LLM Factory** — Swap providers with a single parameter; no code changes needed
257
+ - **Dual Memory** — Short-term (conversation context) + Long-term (persistent JSON store with keyword retrieval)
258
+ - **Persona Injection** — Role-specific context: each agent gets *different* aspects of the persona profile tailored to its function
259
+
260
+ ## 🤝 Contributing
261
+
262
+ Contributions are welcome! Some ideas:
263
+
264
+ - **Vector memory** — Replace JSON keyword search with embedding-based retrieval
265
+ - **Additional agents** — Add a Creativity Agent, Social Agent, or Moral Reasoning Agent
266
+ - **Streaming responses** — Real-time token streaming in the web UI
267
+ - **Multi-turn persona** — Let the persona evolve based on the conversation
268
+ - **Voice interface** — Add speech-to-text input and text-to-speech output
269
+
270
+ ## 📝 License
271
+
272
+ MIT License — see [LICENSE](LICENSE) for details.
273
+
274
+ ---
275
+
276
+ <div align="center">
277
+ <i>Built with 🧠 by mapping neuroscience to multi-agent AI</i>
278
+ </div>
@@ -0,0 +1,242 @@
1
+ <div align="center">
2
+
3
+ # 🧠 Brain System
4
+
5
+ ### A Multi-Agent Cognitive Architecture Powered by LangGraph
6
+
7
+ *Five specialized AI agents — modeled after the human brain — collaborate to process your input and generate thoughtful, nuanced responses.*
8
+
9
+ [![Python 3.10+](https://img.shields.io/badge/Python-3.10+-blue.svg)](https://www.python.org/downloads/)
10
+ [![PyPI](https://img.shields.io/pypi/v/brain-system.svg)](https://pypi.org/project/brain-system/)
11
+ [![LangGraph](https://img.shields.io/badge/Built%20with-LangGraph-orange.svg)](https://github.com/langchain-ai/langgraph)
12
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE)
13
+
14
+ </div>
15
+
16
+ ---
17
+
18
+ ## 🧩 How It Works
19
+
20
+ Brain System maps biological brain functions to specialized AI agents that process every input in parallel — just like the human brain:
21
+
22
+ ```mermaid
23
+ graph LR
24
+ A[User Input] --> B[🔵 Sensory Agent<br>Thalamus]
25
+ B --> C[🟣 Memory Agent<br>Hippocampus]
26
+ B --> D[🟢 Logic Agent<br>Frontal Lobe]
27
+ B --> E[🔴 Emotional Agent<br>Amygdala]
28
+ C --> F[🟡 Executive Agent<br>Prefrontal Cortex]
29
+ D --> F
30
+ E --> F
31
+ F --> G[Final Response]
32
+ ```
33
+
34
+ | Agent | Brain Analog | What It Does |
35
+ |:------|:-------------|:-------------|
36
+ | **Sensory** | Thalamus & Sensory Cortex | Multi-layer signal classification, pattern recognition, salience detection |
37
+ | **Memory** | Hippocampus & DLPFC | LLM-driven contextual synthesis, associative linking, temporal weighting |
38
+ | **Logic** | Left Frontal Lobe & DLPFC | Deductive/inductive reasoning, fallacy detection, counter-arguments |
39
+ | **Emotional** | Amygdala, Insula & Cingulate | Emotional profiling, empathy reading, ethical safety checks |
40
+ | **Executive** | Full Prefrontal Cortex | Conflict resolution between agents, response calibration, integrated output |
41
+
42
+ ## 🎭 Persona Mode
43
+
44
+ Upload a biography or autobiography, and the entire Brain responds **as that person would**.
45
+
46
+ The system extracts personality traits, speech patterns, reasoning style, and emotional tendencies — then injects tailored context into each agent. The Logic Agent thinks in their reasoning style, the Emotional Agent mirrors their emotional tendencies, and the Executive Agent speaks in their voice.
47
+
48
+ > **Example:** Upload Nelson Mandela's autobiography → ask about dealing with conflict → get a response reflecting his values of reconciliation, strategic patience, and ubuntu philosophy.
49
+
50
+ ## 📦 Install
51
+
52
+ ```bash
53
+ pip install brain-system
54
+ ```
55
+
56
+ > For the web UI, install the optional extra: `pip install brain-system[web]`
57
+
58
+ ## 🚀 Quick Start — Library Usage
59
+
60
+ ```python
61
+ from brain_system import BrainWrapper
62
+
63
+ # Create a Brain (choose provider: "gemini", "openai", or "ollama")
64
+ brain = BrainWrapper(provider="ollama", model_name="mistral")
65
+
66
+ # Process input through all 5 agents
67
+ result = brain.think("What is the meaning of justice?")
68
+
69
+ # Get the final synthesized response
70
+ print(result.response)
71
+
72
+ # Inspect individual agent signals
73
+ print(result.sensory) # Thalamus — input classification
74
+ print(result.memory) # Hippocampus — memory context
75
+ print(result.logic) # Frontal Lobe — logical analysis
76
+ print(result.emotional) # Amygdala — emotional analysis
77
+ ```
78
+
79
+ ### Persona Mode
80
+
81
+ ```python
82
+ brain.load_persona("gandhi_autobiography.pdf")
83
+ result = brain.think("How should we deal with injustice?")
84
+ print(result.response) # Responds in Gandhi's voice
85
+
86
+ brain.clear_persona() # Revert to default
87
+ ```
88
+
89
+ ### Memory Management
90
+
91
+ ```python
92
+ # Custom memory file location
93
+ brain = BrainWrapper(provider="gemini", memory_path="./my_memory.json")
94
+
95
+ # Clear all stored memories
96
+ brain.clear_memory()
97
+ ```
98
+
99
+ ### API Reference
100
+
101
+ | Class / Method | Description |
102
+ |:---|:---|
103
+ | `BrainWrapper(provider, model_name, memory_path)` | Create a Brain instance |
104
+ | `.think(input) → BrainResult` | Process input through the 5-agent pipeline |
105
+ | `.load_persona(filepath)` | Load a persona from `.txt` or `.pdf` |
106
+ | `.clear_persona()` | Remove the active persona |
107
+ | `.clear_memory()` | Erase all long-term memories |
108
+ | `.persona_active` | `bool` — is a persona loaded? |
109
+ | `.persona_name` | Name of the active persona |
110
+ | `BrainResult.response` | Final synthesized response |
111
+ | `BrainResult.agent_signals` | `dict` of each agent's raw output |
112
+ | `BrainResult.sensory / .memory / .logic / .emotional` | Shortcut accessors |
113
+
114
+ See [`examples/`](examples/) for complete usage scripts.
115
+
116
+ ---
117
+
118
+ ## 🖥️ Development Setup
119
+
120
+ ### Clone & Install
121
+
122
+ ```bash
123
+ git clone https://github.com/shivamtyagi18/BRAIN.git
124
+ cd BRAIN
125
+ pip install -e ".[web,dev]"
126
+ ```
127
+
128
+ ### Configure (Optional)
129
+
130
+ Create a `.env` file in the project root for cloud providers:
131
+
132
+ ```env
133
+ # Only needed if using Gemini or OpenAI
134
+ GOOGLE_API_KEY=your_key_here
135
+ OPENAI_API_KEY=your_key_here
136
+ ```
137
+
138
+ > **No API key needed for Ollama** — runs entirely on your local machine.
139
+
140
+ ### Run
141
+
142
+ #### Web UI
143
+ ```bash
144
+ python -m brain_system.app
145
+ ```
146
+ Open **http://localhost:5001** in your browser.
147
+
148
+ #### Command Line
149
+ ```bash
150
+ brain-cli
151
+ ```
152
+
153
+ ## 🖥️ Web Interface
154
+
155
+ The web UI features:
156
+ - **Provider selection** — choose Gemini, OpenAI, or Ollama at startup
157
+ - **Persona upload** — drag & drop a `.txt` or `.pdf` biography
158
+ - **Live chat** — dark-mode interface with agent activity indicators
159
+ - **Agent transparency** — expand each agent's internal reasoning with "Show agent signals"
160
+ - **Mid-conversation persona switching** — change or clear persona without restarting
161
+ - **New Chat** — full reset button to start fresh
162
+ - **Clear Memory** — wipe stored memories without restarting
163
+
164
+ ## 🤖 Supported LLM Providers
165
+
166
+ | Provider | Requirements | Best For |
167
+ |:---------|:-------------|:---------|
168
+ | **Ollama** | [Ollama](https://ollama.ai) installed locally | Privacy, offline use, no cost |
169
+ | **Gemini** | `GOOGLE_API_KEY` in `.env` | High-quality responses |
170
+ | **OpenAI** | `OPENAI_API_KEY` in `.env` | GPT-4 class models |
171
+
172
+ ### Using Ollama (Local)
173
+
174
+ ```bash
175
+ # Install Ollama, then pull a model:
176
+ ollama pull mistral
177
+
178
+ # For uncensored output, try:
179
+ ollama pull dolphin-mistral
180
+ ```
181
+
182
+ ## 📁 Project Structure
183
+
184
+ ```
185
+ brain-system/
186
+ ├── pyproject.toml # Package config & dependencies
187
+ ├── run.sh # Single-command launcher
188
+ ├── examples/
189
+ │ ├── basic_usage.py # Minimal library usage
190
+ │ ├── persona_mode.py # Persona loading example
191
+ │ └── custom_provider.py # Provider switching example
192
+ └── brain_system/
193
+ ├── __init__.py # Public API exports
194
+ ├── wrapper.py # BrainWrapper — developer entry point
195
+ ├── app.py # Flask web server (optional)
196
+ ├── main.py # CLI entry point
197
+ ├── agents/
198
+ │ ├── base_agent.py # Abstract base with persona injection
199
+ │ ├── sensory_agent.py # Input parsing (Thalamus)
200
+ │ ├── memory_agent.py # Context retrieval (Hippocampus)
201
+ │ ├── emotional_agent.py # Sentiment analysis (Amygdala)
202
+ │ ├── logic_agent.py # Reasoning (Frontal Lobe)
203
+ │ └── executive_agent.py # Decision synthesis (PFC)
204
+ ├── core/
205
+ │ ├── orchestrator.py # LangGraph workflow engine
206
+ │ ├── llm_interface.py # Multi-provider LLM factory
207
+ │ ├── memory_store.py # Persistent memory (JSON)
208
+ │ ├── document_loader.py # TXT/PDF document ingestion
209
+ │ └── persona.py # Persona extraction & injection
210
+ └── web/
211
+ ├── templates/index.html # Chat interface
212
+ └── static/
213
+ ├── css/style.css # Dark-mode theme
214
+ └── js/app.js # Frontend logic
215
+ ```
216
+
217
+ ## 🔧 Architecture Highlights
218
+
219
+ - **LangGraph Orchestration** — Agents run as nodes in a compiled state graph with parallel execution for Memory, Logic, and Emotional processing
220
+ - **Modular LLM Factory** — Swap providers with a single parameter; no code changes needed
221
+ - **Dual Memory** — Short-term (conversation context) + Long-term (persistent JSON store with keyword retrieval)
222
+ - **Persona Injection** — Role-specific context: each agent gets *different* aspects of the persona profile tailored to its function
223
+
224
+ ## 🤝 Contributing
225
+
226
+ Contributions are welcome! Some ideas:
227
+
228
+ - **Vector memory** — Replace JSON keyword search with embedding-based retrieval
229
+ - **Additional agents** — Add a Creativity Agent, Social Agent, or Moral Reasoning Agent
230
+ - **Streaming responses** — Real-time token streaming in the web UI
231
+ - **Multi-turn persona** — Let the persona evolve based on the conversation
232
+ - **Voice interface** — Add speech-to-text input and text-to-speech output
233
+
234
+ ## 📝 License
235
+
236
+ MIT License — see [LICENSE](LICENSE) for details.
237
+
238
+ ---
239
+
240
+ <div align="center">
241
+ <i>Built with 🧠 by mapping neuroscience to multi-agent AI</i>
242
+ </div>
@@ -0,0 +1,29 @@
1
+ """Brain System — Multi-Agent Cognitive Architecture powered by LangGraph.
2
+
3
+ Five specialised AI agents — modelled after the human brain — collaborate
4
+ to process input and generate thoughtful, nuanced responses.
5
+
6
+ Quick start::
7
+
8
+ from brain_system import BrainWrapper
9
+
10
+ brain = BrainWrapper(provider="openai")
11
+ result = brain.think("What is justice?")
12
+ print(result.response)
13
+ """
14
+
15
+ __version__ = "0.1.0"
16
+
17
+ from brain_system.wrapper import BrainWrapper, BrainResult # noqa: F401
18
+ from brain_system.core.orchestrator import BrainOrchestrator # noqa: F401
19
+ from brain_system.core.llm_interface import LLMFactory # noqa: F401
20
+ from brain_system.agents.base_agent import BaseAgent # noqa: F401
21
+
22
+ __all__ = [
23
+ "BrainWrapper",
24
+ "BrainResult",
25
+ "BrainOrchestrator",
26
+ "LLMFactory",
27
+ "BaseAgent",
28
+ "__version__",
29
+ ]
@@ -0,0 +1,5 @@
1
+ """Allow running with: python -m brain_system"""
2
+ from brain_system.app import run_server
3
+
4
+ if __name__ == "__main__":
5
+ run_server()
File without changes
@@ -0,0 +1,37 @@
1
+
2
+ from abc import ABC, abstractmethod
3
+ from typing import Any, Dict, List
4
+ from langchain_core.messages import HumanMessage, SystemMessage
5
+ from ..core.llm_interface import LLMFactory
6
+
7
+ class BaseAgent(ABC):
8
+ def __init__(self, name: str, role: str, provider: str = "gemini", model_name: str = None):
9
+ self.name = name
10
+ self.role = role
11
+ self.persona_context: str = "" # Injected by orchestrator when persona is active
12
+ self.llm = LLMFactory.create_llm(provider=provider, model_name=model_name)
13
+
14
+ @abstractmethod
15
+ def process(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
16
+ """
17
+ Process the input dictionary and return a result dictionary.
18
+ Must be implemented by subclasses.
19
+ """
20
+ pass
21
+
22
+ def _query_llm(self, system_prompt: str, user_input: str) -> str:
23
+ """
24
+ Helper method to query the LLM with a system and user message.
25
+ Prepends persona context if active.
26
+ """
27
+ full_prompt = system_prompt
28
+ if self.persona_context:
29
+ full_prompt = self.persona_context + "\n\n" + system_prompt
30
+
31
+ messages = [
32
+ SystemMessage(content=full_prompt),
33
+ HumanMessage(content=user_input)
34
+ ]
35
+ response = self.llm.invoke(messages)
36
+ return response.content
37
+