limbiq 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
limbiq-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Dimuthu
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
limbiq-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,128 @@
1
+ Metadata-Version: 2.4
2
+ Name: limbiq
3
+ Version: 0.1.0
4
+ Summary: Neurotransmitter-inspired adaptive learning layer for LLMs
5
+ Author-email: Dimuthu <dimuthu@example.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/deBilla/limbiq
8
+ Project-URL: Repository, https://github.com/deBilla/limbiq
9
+ Project-URL: Issues, https://github.com/deBilla/limbiq/issues
10
+ Keywords: llm,memory,adaptive-learning,neurotransmitter,context
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Requires-Python: >=3.10
21
+ Description-Content-Type: text/markdown
22
+ License-File: LICENSE
23
+ Requires-Dist: sentence-transformers>=2.2.0
24
+ Requires-Dist: numpy>=1.24.0
25
+ Provides-Extra: dev
26
+ Requires-Dist: pytest>=7.0; extra == "dev"
27
+ Requires-Dist: pytest-cov>=4.0; extra == "dev"
28
+ Dynamic: license-file
29
+
30
+ # Limbiq
31
+
32
+ Neurotransmitter-inspired adaptive learning layer for LLMs.
33
+
34
+ Limbiq makes any LLM appear to learn and adapt across conversations — without touching a single weight. It sits between the user and the LLM, modifying what the model sees through five discrete signal types inspired by human brain chemistry.
35
+
36
+ ```
37
+ User → Limbiq → Modified Context → Any LLM → Response → Limbiq observes → Loop
38
+ ```
39
+
40
+ ## Installation
41
+
42
+ ```bash
43
+ pip install limbiq
44
+ ```
45
+
46
+ ## Quick Start
47
+
48
+ ```python
49
+ from limbiq import Limbiq
50
+
51
+ # Initialize
52
+ lq = Limbiq(
53
+ store_path="./neuro_data",
54
+ user_id="dimuthu",
55
+ )
56
+
57
+ # Before sending to LLM — get enriched context
58
+ result = lq.process(
59
+ message="What's my wife's name?",
60
+ conversation_history=[
61
+ {"role": "user", "content": "Hi there"},
62
+ {"role": "assistant", "content": "Hello! How can I help?"},
63
+ ],
64
+ )
65
+
66
+ # Inject result.context into your system prompt
67
+ messages = [
68
+ {"role": "system", "content": f"You are a helpful assistant.\n\n{result.context}"},
69
+ {"role": "user", "content": "What's my wife's name?"},
70
+ ]
71
+ response = my_llm(messages) # Any LLM
72
+
73
+ # After getting response — let Limbiq observe and learn
74
+ lq.observe("What's my wife's name?", response)
75
+
76
+ # End session — triggers memory compression
77
+ lq.end_session()
78
+ ```
79
+
80
+ ## Signals (v0.1)
81
+
82
+ ### Dopamine — "This matters, remember it"
83
+
84
+ Fires when the user shares personal info, corrects the model, or gives positive feedback. Tagged memories are **always** included in context.
85
+
86
+ ```python
87
+ lq.dopamine("User's wife is named Prabhashi")
88
+ ```
89
+
90
+ ### GABA — "Suppress this, let it fade"
91
+
92
+ Fires when memories are denied, contradicted, or go stale. Suppression is soft — memories can be restored.
93
+
94
+ ```python
95
+ lq.gaba(memory_id="abc123")
96
+ lq.restore_memory("abc123") # Undo suppression
97
+ ```
98
+
99
+ ### Corrections
100
+
101
+ Combines both signals — stores new info as priority, suppresses the old.
102
+
103
+ ```python
104
+ lq.correct("User works at Bitsmedia, not Google")
105
+ ```
106
+
107
+ ## Inspection
108
+
109
+ ```python
110
+ lq.get_stats() # Memory counts per tier
111
+ lq.get_signal_log() # Full history of signals fired
112
+ lq.get_priority_memories() # All dopamine-tagged memories
113
+ lq.get_suppressed() # All GABA-suppressed memories
114
+ lq.export_state() # Full JSON export for debugging
115
+ ```
116
+
117
+ ## How It Works
118
+
119
+ - **LLM-agnostic** — works with any LLM (OpenAI, Anthropic, Ollama, llama.cpp, etc.)
120
+ - **Zero weight modification** — all adaptation through context manipulation
121
+ - **SQLite persistence** — memories survive across sessions
122
+ - **Semantic search** — uses sentence-transformers for embedding-based retrieval (falls back to TF-IDF if not installed)
123
+ - **Transparent** — every signal is logged with trigger, timestamp, and effect
124
+ - **Reversible** — suppressed memories can be restored, nothing is permanently destructive
125
+
126
+ ## License
127
+
128
+ MIT
limbiq-0.1.0/README.md ADDED
@@ -0,0 +1,99 @@
1
+ # Limbiq
2
+
3
+ Neurotransmitter-inspired adaptive learning layer for LLMs.
4
+
5
+ Limbiq makes any LLM appear to learn and adapt across conversations — without touching a single weight. It sits between the user and the LLM, modifying what the model sees through five discrete signal types inspired by human brain chemistry.
6
+
7
+ ```
8
+ User → Limbiq → Modified Context → Any LLM → Response → Limbiq observes → Loop
9
+ ```
10
+
11
+ ## Installation
12
+
13
+ ```bash
14
+ pip install limbiq
15
+ ```
16
+
17
+ ## Quick Start
18
+
19
+ ```python
20
+ from limbiq import Limbiq
21
+
22
+ # Initialize
23
+ lq = Limbiq(
24
+ store_path="./neuro_data",
25
+ user_id="dimuthu",
26
+ )
27
+
28
+ # Before sending to LLM — get enriched context
29
+ result = lq.process(
30
+ message="What's my wife's name?",
31
+ conversation_history=[
32
+ {"role": "user", "content": "Hi there"},
33
+ {"role": "assistant", "content": "Hello! How can I help?"},
34
+ ],
35
+ )
36
+
37
+ # Inject result.context into your system prompt
38
+ messages = [
39
+ {"role": "system", "content": f"You are a helpful assistant.\n\n{result.context}"},
40
+ {"role": "user", "content": "What's my wife's name?"},
41
+ ]
42
+ response = my_llm(messages) # Any LLM
43
+
44
+ # After getting response — let Limbiq observe and learn
45
+ lq.observe("What's my wife's name?", response)
46
+
47
+ # End session — triggers memory compression
48
+ lq.end_session()
49
+ ```
50
+
51
+ ## Signals (v0.1)
52
+
53
+ ### Dopamine — "This matters, remember it"
54
+
55
+ Fires when the user shares personal info, corrects the model, or gives positive feedback. Tagged memories are **always** included in context.
56
+
57
+ ```python
58
+ lq.dopamine("User's wife is named Prabhashi")
59
+ ```
60
+
61
+ ### GABA — "Suppress this, let it fade"
62
+
63
+ Fires when memories are denied, contradicted, or go stale. Suppression is soft — memories can be restored.
64
+
65
+ ```python
66
+ lq.gaba(memory_id="abc123")
67
+ lq.restore_memory("abc123") # Undo suppression
68
+ ```
69
+
70
+ ### Corrections
71
+
72
+ Combines both signals — stores new info as priority, suppresses the old.
73
+
74
+ ```python
75
+ lq.correct("User works at Bitsmedia, not Google")
76
+ ```
77
+
78
+ ## Inspection
79
+
80
+ ```python
81
+ lq.get_stats() # Memory counts per tier
82
+ lq.get_signal_log() # Full history of signals fired
83
+ lq.get_priority_memories() # All dopamine-tagged memories
84
+ lq.get_suppressed() # All GABA-suppressed memories
85
+ lq.export_state() # Full JSON export for debugging
86
+ ```
87
+
88
+ ## How It Works
89
+
90
+ - **LLM-agnostic** — works with any LLM (OpenAI, Anthropic, Ollama, llama.cpp, etc.)
91
+ - **Zero weight modification** — all adaptation through context manipulation
92
+ - **SQLite persistence** — memories survive across sessions
93
+ - **Semantic search** — uses sentence-transformers for embedding-based retrieval (falls back to TF-IDF if not installed)
94
+ - **Transparent** — every signal is logged with trigger, timestamp, and effect
95
+ - **Reversible** — suppressed memories can be restored, nothing is permanently destructive
96
+
97
+ ## License
98
+
99
+ MIT
@@ -0,0 +1,120 @@
1
+ """
2
+ Limbiq -- Neurotransmitter-inspired adaptive learning for LLMs.
3
+
4
+ Usage:
5
+ from limbiq import Limbiq
6
+
7
+ lq = Limbiq(store_path="./data", user_id="user1")
8
+ result = lq.process("Hello, my name is Dimuthu")
9
+ # ... send to LLM with result.context injected ...
10
+ lq.observe("Hello, my name is Dimuthu", llm_response)
11
+ """
12
+
13
+ from limbiq.core import LimbiqCore
14
+ from limbiq.types import (
15
+ ProcessResult,
16
+ SignalEvent,
17
+ Memory,
18
+ MemoryTier,
19
+ SignalType,
20
+ SuppressionReason,
21
+ )
22
+
23
+
24
+ class Limbiq:
25
+ """Main interface for Limbiq."""
26
+
27
+ def __init__(
28
+ self,
29
+ store_path: str = "./neuro_data",
30
+ user_id: str = "default",
31
+ embedding_model: str = "all-MiniLM-L6-v2",
32
+ llm_fn=None,
33
+ ):
34
+ self._core = LimbiqCore(store_path, user_id, embedding_model, llm_fn)
35
+
36
+ def process(
37
+ self, message: str, conversation_history: list[dict] = None
38
+ ) -> ProcessResult:
39
+ """Process a user message and return enriched context for the LLM."""
40
+ return self._core.process(message, conversation_history)
41
+
42
+ def observe(
43
+ self, message: str, response: str, feedback: str = None
44
+ ) -> list[SignalEvent]:
45
+ """Observe a completed exchange and fire appropriate signals."""
46
+ return self._core.observe(message, response, feedback)
47
+
48
+ def start_session(self):
49
+ """Start a new conversation session."""
50
+ self._core.start_session()
51
+
52
+ def end_session(self) -> dict:
53
+ """End session and run compression/cleanup."""
54
+ return self._core.end_session()
55
+
56
+ # -- Explicit signals --
57
+
58
+ def dopamine(self, content: str):
59
+ """Manually tag a piece of information as high-priority."""
60
+ embedding = self._core.embeddings.embed(content)
61
+ self._core.store.store(
62
+ content=content,
63
+ tier=MemoryTier.PRIORITY,
64
+ confidence=1.0,
65
+ is_priority=True,
66
+ source="manual_dopamine",
67
+ metadata={},
68
+ embedding=embedding,
69
+ )
70
+
71
+ def gaba(self, memory_id: str):
72
+ """Manually suppress a memory."""
73
+ self._core.store.suppress(memory_id, SuppressionReason.MANUAL)
74
+
75
+ def correct(self, correction: str):
76
+ """Apply a correction -- combines dopamine (new info) + gaba (old info)."""
77
+ self.dopamine(correction)
78
+
79
+ embedding = self._core.embeddings.embed(correction)
80
+ related = self._core.store.search(embedding, top_k=5, include_suppressed=False)
81
+ for m in related:
82
+ if m.content != correction:
83
+ self._core.store.suppress(m.id, SuppressionReason.CONTRADICTED)
84
+
85
+ # -- Inspection --
86
+
87
+ def get_stats(self) -> dict:
88
+ """Return memory statistics."""
89
+ return self._core.store.get_stats()
90
+
91
+ def get_signal_log(self, limit: int = 50) -> list[SignalEvent]:
92
+ """Return recent signal events."""
93
+ return self._core.signal_log.get_recent(limit)
94
+
95
+ def get_priority_memories(self) -> list[Memory]:
96
+ """Return all dopamine-tagged priority memories."""
97
+ return self._core.store.get_priority_memories()
98
+
99
+ def get_suppressed(self) -> list[Memory]:
100
+ """Return all GABA-suppressed memories."""
101
+ return self._core.store.get_suppressed()
102
+
103
+ def restore_memory(self, memory_id: str):
104
+ """Undo a GABA suppression."""
105
+ self._core.store.restore(memory_id)
106
+
107
+ def export_state(self) -> dict:
108
+ """Export full state as JSON for debugging."""
109
+ return self._core.store.export_all()
110
+
111
+
112
+ __all__ = [
113
+ "Limbiq",
114
+ "ProcessResult",
115
+ "SignalEvent",
116
+ "Memory",
117
+ "MemoryTier",
118
+ "SignalType",
119
+ "SuppressionReason",
120
+ ]
File without changes
@@ -0,0 +1,76 @@
1
+ """
2
+ Memory Compressor -- the lossy compression pipeline.
3
+
4
+ Full conversations -> gists (mid-term) -> abstract facts (long-term)
5
+ """
6
+
7
+
8
+ class MemoryCompressor:
9
+ def __init__(self, llm_fn=None):
10
+ self.llm_fn = llm_fn
11
+
12
+ def compress_conversation(self, messages: list[dict]) -> list[str]:
13
+ if self.llm_fn:
14
+ return self._llm_compress(messages)
15
+ else:
16
+ return self._extractive_compress(messages)
17
+
18
+ def _llm_compress(self, messages: list[dict]) -> list[str]:
19
+ conversation_text = "\n".join(
20
+ f"{'User' if m['role'] == 'user' else 'Assistant'}: {m['content']}"
21
+ for m in messages
22
+ if m.get("role") != "system"
23
+ )
24
+
25
+ prompt = (
26
+ "Extract individual, self-contained facts from this conversation.\n"
27
+ "Each fact should be on its own line. Each fact should be independently searchable.\n\n"
28
+ "Rules:\n"
29
+ "- One fact per line, no bullets or numbering\n"
30
+ "- Each fact must be understandable without the others\n"
31
+ "- Include personal details the user shared (name, job, preferences, family)\n"
32
+ "- Include key topics discussed and conclusions reached\n"
33
+ "- Skip greetings, filler, and meta-conversation\n"
34
+ "- If no meaningful facts exist, respond with NONE\n\n"
35
+ f"Conversation:\n{conversation_text}"
36
+ )
37
+
38
+ result = self.llm_fn(prompt)
39
+
40
+ if "NONE" in result.upper():
41
+ return []
42
+
43
+ facts = [f.strip() for f in result.strip().split("\n") if len(f.strip()) > 10]
44
+ return facts
45
+
46
+ def _extractive_compress(self, messages: list[dict]) -> list[str]:
47
+ """Fallback: extract key sentences from user messages."""
48
+ personal_indicators = [
49
+ "my name",
50
+ "i work",
51
+ "i'm a",
52
+ "i live",
53
+ "i prefer",
54
+ "my wife",
55
+ "my husband",
56
+ "my partner",
57
+ "i'm from",
58
+ "i'm based",
59
+ "i love",
60
+ "i hate",
61
+ "my kid",
62
+ "my email",
63
+ "my phone",
64
+ ]
65
+
66
+ facts = []
67
+ for m in messages:
68
+ if m.get("role") == "user":
69
+ content = m["content"].strip()
70
+ for sentence in content.replace("!", ".").replace("?", ".").split("."):
71
+ sentence = sentence.strip()
72
+ if len(sentence) > 15 and any(
73
+ p in sentence.lower() for p in personal_indicators
74
+ ):
75
+ facts.append(sentence)
76
+ return facts
File without changes
@@ -0,0 +1,58 @@
1
+ """
2
+ Context Builder -- assembles the enriched context string.
3
+
4
+ This is what gets injected into the system prompt.
5
+ """
6
+
7
+ from limbiq.types import Memory
8
+
9
+
10
+ class ContextBuilder:
11
+ def __init__(self, max_tokens: int = 1500):
12
+ self.max_tokens = max_tokens
13
+
14
+ def build(
15
+ self,
16
+ priority_memories: list[Memory],
17
+ relevant_memories: list[Memory],
18
+ suppressed_ids: set[str],
19
+ ) -> str:
20
+ sections = []
21
+
22
+ if priority_memories:
23
+ facts = "\n".join(f" - {m.content}" for m in priority_memories)
24
+ sections.append(
25
+ f"[IMPORTANT -- known facts about this user]\n{facts}"
26
+ )
27
+
28
+ if relevant_memories:
29
+ priority_ids = {m.id for m in priority_memories}
30
+ filtered = [
31
+ m
32
+ for m in relevant_memories
33
+ if m.id not in priority_ids and m.id not in suppressed_ids
34
+ ]
35
+
36
+ if filtered:
37
+ items = "\n".join(
38
+ f" - [{m.confidence:.0%} confidence] {m.content}"
39
+ for m in filtered
40
+ )
41
+ sections.append(
42
+ f"[Relevant memories from previous conversations]\n{items}"
43
+ )
44
+
45
+ if not sections:
46
+ return ""
47
+
48
+ context = "\n\n".join(sections)
49
+
50
+ return (
51
+ "<memory_context>\n"
52
+ "The following is stored information from previous interactions with this user.\n"
53
+ "Use it naturally when relevant. If the user asks about something listed here,\n"
54
+ "reference it confidently. If something is NOT listed here, say you don't know\n"
55
+ "rather than guessing.\n\n"
56
+ f"{context}\n"
57
+ "</memory_context>"
58
+ )
@@ -0,0 +1,152 @@
1
+ """
2
+ Limbiq Core -- the main orchestrator.
3
+
4
+ Coordinates detection, signals, storage, retrieval, and context building.
5
+ """
6
+
7
+ import uuid
8
+
9
+ from limbiq.types import (
10
+ ProcessResult,
11
+ SignalEvent,
12
+ SignalType,
13
+ MemoryTier,
14
+ SuppressionReason,
15
+ )
16
+ from limbiq.store.memory_store import MemoryStore
17
+ from limbiq.store.embeddings import EmbeddingEngine
18
+ from limbiq.store.signal_log import SignalLog
19
+ from limbiq.context.builder import ContextBuilder
20
+ from limbiq.compression.compressor import MemoryCompressor
21
+ from limbiq.signals.dopamine import DopamineSignal
22
+ from limbiq.signals.gaba import GABASignal
23
+
24
+
25
+ class LimbiqCore:
26
+ def __init__(
27
+ self,
28
+ store_path: str,
29
+ user_id: str,
30
+ embedding_model: str,
31
+ llm_fn=None,
32
+ ):
33
+ self.store = MemoryStore(store_path, user_id)
34
+ self.embeddings = EmbeddingEngine(embedding_model)
35
+ self.compressor = MemoryCompressor(llm_fn)
36
+ self.context_builder = ContextBuilder()
37
+ self.signal_log = SignalLog(self.store.db)
38
+
39
+ self.signals = [
40
+ DopamineSignal(),
41
+ GABASignal(),
42
+ ]
43
+
44
+ self._current_session_id = None
45
+ self._conversation_buffer: list[dict] = []
46
+
47
+ def process(self, message: str, conversation_history: list[dict] = None) -> ProcessResult:
48
+ query_embedding = self.embeddings.embed(message)
49
+
50
+ relevant = self.store.search(
51
+ query_embedding, top_k=10, include_suppressed=False
52
+ )
53
+
54
+ priority = self.store.get_priority_memories()
55
+
56
+ suppressed_ids = {m.id for m in self.store.get_suppressed()}
57
+
58
+ for m in relevant:
59
+ self.store.increment_access(m.id)
60
+
61
+ context = self.context_builder.build(priority, relevant, suppressed_ids)
62
+
63
+ return ProcessResult(
64
+ context=context,
65
+ signals_fired=[],
66
+ memories_retrieved=len(relevant),
67
+ priority_count=len(priority),
68
+ suppressed_count=len(suppressed_ids),
69
+ )
70
+
71
+ def observe(
72
+ self, message: str, response: str, feedback: str = None
73
+ ) -> list[SignalEvent]:
74
+ events = []
75
+
76
+ self._conversation_buffer.append({"role": "user", "content": message})
77
+ self._conversation_buffer.append({"role": "assistant", "content": response})
78
+
79
+ existing_memories = self.store.search(
80
+ self.embeddings.embed(message), top_k=5
81
+ )
82
+
83
+ for signal in self.signals:
84
+ detected = signal.detect(
85
+ message=message,
86
+ response=response,
87
+ feedback=feedback,
88
+ memories=existing_memories,
89
+ )
90
+ for event in detected:
91
+ signal.apply(event, self.store, self.embeddings)
92
+ self.signal_log.log(event)
93
+ events.append(event)
94
+
95
+ if len(message.strip()) > 20:
96
+ embedding = self.embeddings.embed(message)
97
+ self.store.store(
98
+ content=f"User said: {message}",
99
+ tier=MemoryTier.SHORT,
100
+ confidence=0.8,
101
+ is_priority=False,
102
+ source="conversation",
103
+ metadata={},
104
+ embedding=embedding,
105
+ )
106
+
107
+ return events
108
+
109
+ def end_session(self) -> dict:
110
+ results = {"compressed": 0, "aged": 0, "suppressed": 0, "deleted": 0}
111
+
112
+ if self._conversation_buffer:
113
+ facts = self.compressor.compress_conversation(self._conversation_buffer)
114
+ for fact in facts:
115
+ embedding = self.embeddings.embed(fact)
116
+ self.store.store(
117
+ content=fact,
118
+ tier=MemoryTier.MID,
119
+ confidence=0.7,
120
+ is_priority=False,
121
+ source="compression",
122
+ metadata={"source_turns": len(self._conversation_buffer)},
123
+ embedding=embedding,
124
+ )
125
+ results["compressed"] += 1
126
+
127
+ self.store.store_conversation(
128
+ self._conversation_buffer, self._current_session_id
129
+ )
130
+ self._conversation_buffer = []
131
+
132
+ self.store.age_all()
133
+
134
+ stale = self.store.get_stale(min_sessions=10)
135
+ for m in stale:
136
+ gaba_event = SignalEvent(
137
+ signal_type=SignalType.GABA,
138
+ trigger="never_accessed",
139
+ details={"memory_id": m.id, "sessions_old": m.session_count},
140
+ memory_ids_affected=[m.id],
141
+ )
142
+ self.store.suppress(m.id, SuppressionReason.NEVER_ACCESSED)
143
+ self.signal_log.log(gaba_event)
144
+ results["suppressed"] += 1
145
+
146
+ results["deleted"] = self.store.delete_old_suppressed(min_sessions=30)
147
+
148
+ return results
149
+
150
+ def start_session(self):
151
+ self._current_session_id = str(uuid.uuid4())[:8]
152
+ self._conversation_buffer = []
File without changes
@@ -0,0 +1,28 @@
1
+ """Main signal detector -- analyzes input and determines which signals to fire."""
2
+
3
+ from limbiq.types import Memory, SignalEvent
4
+ from limbiq.signals.base import BaseSignal
5
+
6
+
7
+ class SignalDetector:
8
+ def __init__(self, signals: list[BaseSignal]):
9
+ self.signals = signals
10
+
11
+ def detect_all(
12
+ self,
13
+ message: str,
14
+ response: str = None,
15
+ feedback: str = None,
16
+ memories: list[Memory] = None,
17
+ ) -> list[SignalEvent]:
18
+ """Run all signal detectors and return combined events."""
19
+ events = []
20
+ for signal in self.signals:
21
+ detected = signal.detect(
22
+ message=message,
23
+ response=response,
24
+ feedback=feedback,
25
+ memories=memories,
26
+ )
27
+ events.extend(detected)
28
+ return events