neuroweave-python 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuroweave/__init__.py +14 -0
- neuroweave/api.py +494 -0
- neuroweave/config.py +87 -0
- neuroweave/events.py +203 -0
- neuroweave/extraction/__init__.py +0 -0
- neuroweave/extraction/llm_client.py +123 -0
- neuroweave/extraction/pipeline.py +346 -0
- neuroweave/graph/__init__.py +29 -0
- neuroweave/graph/ingest.py +125 -0
- neuroweave/graph/nl_query.py +305 -0
- neuroweave/graph/query.py +203 -0
- neuroweave/graph/store.py +314 -0
- neuroweave/logging.py +85 -0
- neuroweave/main.py +187 -0
- neuroweave/py.typed +0 -0
- neuroweave/server/__init__.py +0 -0
- neuroweave/server/app.py +185 -0
- neuroweave_python-0.1.0.dist-info/METADATA +336 -0
- neuroweave_python-0.1.0.dist-info/RECORD +22 -0
- neuroweave_python-0.1.0.dist-info/WHEEL +4 -0
- neuroweave_python-0.1.0.dist-info/entry_points.txt +2 -0
- neuroweave_python-0.1.0.dist-info/licenses/LICENSE +191 -0
neuroweave/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""NeuroWeave — Real-time knowledge graph memory for agentic AI platforms."""
|
|
2
|
+
|
|
3
|
+
__version__ = "0.1.0"
|
|
4
|
+
|
|
5
|
+
from neuroweave.api import ContextResult, EventType, NeuroWeave, ProcessResult
|
|
6
|
+
from neuroweave.graph.query import QueryResult
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"ContextResult",
|
|
10
|
+
"EventType",
|
|
11
|
+
"NeuroWeave",
|
|
12
|
+
"ProcessResult",
|
|
13
|
+
"QueryResult",
|
|
14
|
+
]
|
neuroweave/api.py
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
1
|
+
"""NeuroWeave public API — the main entry point for library consumers.
|
|
2
|
+
|
|
3
|
+
This module provides the `NeuroWeave` class that agents import and use.
|
|
4
|
+
It wires together the extraction pipeline, graph store, query engines,
|
|
5
|
+
event bus, and optional visualization server behind a clean async API.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from neuroweave import NeuroWeave
|
|
9
|
+
|
|
10
|
+
async with NeuroWeave(llm_provider="mock") as nw:
|
|
11
|
+
result = await nw.process("My wife Lena loves Malbec")
|
|
12
|
+
context = await nw.get_context("what does my wife like?")
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import asyncio
|
|
18
|
+
from dataclasses import dataclass, field
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any, Awaitable, Callable
|
|
21
|
+
|
|
22
|
+
import uvicorn
|
|
23
|
+
|
|
24
|
+
from neuroweave.config import LLMProvider, LogFormat, NeuroWeaveConfig
|
|
25
|
+
from neuroweave.events import EventBus
|
|
26
|
+
from neuroweave.extraction.llm_client import (
|
|
27
|
+
AnthropicLLMClient,
|
|
28
|
+
LLMClient,
|
|
29
|
+
MockLLMClient,
|
|
30
|
+
)
|
|
31
|
+
from neuroweave.extraction.pipeline import ExtractionPipeline, ExtractionResult
|
|
32
|
+
from neuroweave.graph.ingest import ingest_extraction
|
|
33
|
+
from neuroweave.graph.nl_query import NLQueryPlanner, QueryPlan
|
|
34
|
+
from neuroweave.graph.query import QueryResult, query_subgraph
|
|
35
|
+
from neuroweave.graph.store import GraphEvent, GraphEventType, GraphStore
|
|
36
|
+
from neuroweave.logging import configure_logging, get_logger
|
|
37
|
+
|
|
38
|
+
log = get_logger("api")
|
|
39
|
+
|
|
40
|
+
# Re-export EventType for convenience
|
|
41
|
+
EventType = GraphEventType
|
|
42
|
+
|
|
43
|
+
# Type alias for event handlers
|
|
44
|
+
EventHandler = Callable[[GraphEvent], Awaitable[None]]
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# ---------------------------------------------------------------------------
|
|
48
|
+
# Result types
|
|
49
|
+
# ---------------------------------------------------------------------------
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@dataclass(frozen=True, slots=True)
|
|
53
|
+
class ProcessResult:
|
|
54
|
+
"""Result of processing a single message through the extraction pipeline.
|
|
55
|
+
|
|
56
|
+
Attributes:
|
|
57
|
+
extraction: Raw extraction result (entities, relations, timing).
|
|
58
|
+
nodes_added: Number of new nodes created in the graph.
|
|
59
|
+
edges_added: Number of new edges created in the graph.
|
|
60
|
+
edges_skipped: Number of edges skipped (unknown entities, etc.).
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
extraction: ExtractionResult
|
|
64
|
+
nodes_added: int = 0
|
|
65
|
+
edges_added: int = 0
|
|
66
|
+
edges_skipped: int = 0
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def entity_count(self) -> int:
|
|
70
|
+
return len(self.extraction.entities)
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def relation_count(self) -> int:
|
|
74
|
+
return len(self.extraction.relations)
|
|
75
|
+
|
|
76
|
+
def to_dict(self) -> dict[str, Any]:
|
|
77
|
+
return {
|
|
78
|
+
"entities_extracted": self.entity_count,
|
|
79
|
+
"relations_extracted": self.relation_count,
|
|
80
|
+
"nodes_added": self.nodes_added,
|
|
81
|
+
"edges_added": self.edges_added,
|
|
82
|
+
"edges_skipped": self.edges_skipped,
|
|
83
|
+
"extraction_ms": round(self.extraction.duration_ms, 1),
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@dataclass(frozen=True, slots=True)
|
|
88
|
+
class ContextResult:
|
|
89
|
+
"""Combined result of processing a message AND querying relevant context.
|
|
90
|
+
|
|
91
|
+
This is the main return type for `get_context()` — the most common
|
|
92
|
+
operation in agent integration.
|
|
93
|
+
|
|
94
|
+
Attributes:
|
|
95
|
+
process: What was extracted from this message.
|
|
96
|
+
relevant: Knowledge graph context relevant to this message.
|
|
97
|
+
plan: The NL query plan used (for debugging/transparency).
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
process: ProcessResult
|
|
101
|
+
relevant: QueryResult
|
|
102
|
+
plan: QueryPlan | None = None
|
|
103
|
+
|
|
104
|
+
def to_dict(self) -> dict[str, Any]:
|
|
105
|
+
return {
|
|
106
|
+
"process": self.process.to_dict(),
|
|
107
|
+
"relevant": self.relevant.to_dict(),
|
|
108
|
+
"plan": self.plan.to_dict() if self.plan else None,
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
# ---------------------------------------------------------------------------
|
|
113
|
+
# NeuroWeave facade
|
|
114
|
+
# ---------------------------------------------------------------------------
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class NeuroWeave:
|
|
118
|
+
"""The public API for NeuroWeave — knowledge graph memory for AI agents.
|
|
119
|
+
|
|
120
|
+
NeuroWeave manages the full lifecycle: extraction pipeline, graph store,
|
|
121
|
+
query engines, event bus, and optional visualization server. Agents
|
|
122
|
+
interact through three main methods:
|
|
123
|
+
|
|
124
|
+
- `process(message)` — Extract knowledge from a message, update the graph.
|
|
125
|
+
- `query(...)` — Query the graph (structured or natural language).
|
|
126
|
+
- `get_context(message)` — Process + query in one call (most common).
|
|
127
|
+
|
|
128
|
+
Usage:
|
|
129
|
+
# Programmatic construction
|
|
130
|
+
nw = NeuroWeave(llm_provider="mock")
|
|
131
|
+
await nw.start()
|
|
132
|
+
context = await nw.get_context("My wife Lena loves sushi")
|
|
133
|
+
await nw.stop()
|
|
134
|
+
|
|
135
|
+
# Context manager (recommended)
|
|
136
|
+
async with NeuroWeave(llm_provider="mock") as nw:
|
|
137
|
+
context = await nw.get_context("My wife Lena loves sushi")
|
|
138
|
+
|
|
139
|
+
# From config file
|
|
140
|
+
async with NeuroWeave.from_config("config/default.yaml") as nw:
|
|
141
|
+
...
|
|
142
|
+
"""
|
|
143
|
+
|
|
144
|
+
def __init__(
|
|
145
|
+
self,
|
|
146
|
+
*,
|
|
147
|
+
llm_provider: str | None = None,
|
|
148
|
+
llm_model: str | None = None,
|
|
149
|
+
llm_api_key: str | None = None,
|
|
150
|
+
enable_visualization: bool = False,
|
|
151
|
+
server_host: str | None = None,
|
|
152
|
+
server_port: int | None = None,
|
|
153
|
+
log_level: str | None = None,
|
|
154
|
+
log_format: str | None = None,
|
|
155
|
+
) -> None:
|
|
156
|
+
# Build config from .env / env vars / YAML first, then overlay
|
|
157
|
+
# any explicit kwargs. This ensures .env values are picked up for
|
|
158
|
+
# any parameter not explicitly provided by the caller.
|
|
159
|
+
base = NeuroWeaveConfig.load()
|
|
160
|
+
overrides: dict[str, Any] = {}
|
|
161
|
+
if llm_provider is not None:
|
|
162
|
+
overrides["llm_provider"] = LLMProvider(llm_provider)
|
|
163
|
+
if llm_model is not None:
|
|
164
|
+
overrides["llm_model"] = llm_model
|
|
165
|
+
if llm_api_key is not None:
|
|
166
|
+
overrides["llm_api_key"] = llm_api_key
|
|
167
|
+
if server_host is not None:
|
|
168
|
+
overrides["server_host"] = server_host
|
|
169
|
+
if server_port is not None:
|
|
170
|
+
overrides["server_port"] = server_port
|
|
171
|
+
if log_level is not None:
|
|
172
|
+
overrides["log_level"] = log_level
|
|
173
|
+
if log_format is not None:
|
|
174
|
+
overrides["log_format"] = LogFormat(log_format)
|
|
175
|
+
|
|
176
|
+
if overrides:
|
|
177
|
+
self._config = base.model_copy(update=overrides)
|
|
178
|
+
else:
|
|
179
|
+
self._config = base
|
|
180
|
+
|
|
181
|
+
self._enable_visualization = enable_visualization
|
|
182
|
+
|
|
183
|
+
# Core components (initialized in start())
|
|
184
|
+
self._store: GraphStore | None = None
|
|
185
|
+
self._pipeline: ExtractionPipeline | None = None
|
|
186
|
+
self._event_bus: EventBus | None = None
|
|
187
|
+
self._nl_planner: NLQueryPlanner | None = None
|
|
188
|
+
|
|
189
|
+
# Visualization server
|
|
190
|
+
self._server_task: asyncio.Task | None = None
|
|
191
|
+
|
|
192
|
+
# Lifecycle state
|
|
193
|
+
self._started = False
|
|
194
|
+
|
|
195
|
+
@classmethod
|
|
196
|
+
def from_config(
|
|
197
|
+
cls, path: str | Path, *, enable_visualization: bool = False,
|
|
198
|
+
) -> NeuroWeave:
|
|
199
|
+
"""Create a NeuroWeave instance from a YAML config file.
|
|
200
|
+
|
|
201
|
+
The YAML file is loaded first, then `.env` and `NEUROWEAVE_*`
|
|
202
|
+
environment variables are overlaid (env vars win).
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
path: Path to the YAML configuration file.
|
|
206
|
+
enable_visualization: Whether to start the graph visualizer.
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
NeuroWeave instance (not yet started — call start() or use as context manager).
|
|
210
|
+
"""
|
|
211
|
+
config = NeuroWeaveConfig.load(Path(path))
|
|
212
|
+
instance = cls.__new__(cls)
|
|
213
|
+
instance._config = config
|
|
214
|
+
instance._enable_visualization = enable_visualization
|
|
215
|
+
instance._store = None
|
|
216
|
+
instance._pipeline = None
|
|
217
|
+
instance._event_bus = None
|
|
218
|
+
instance._nl_planner = None
|
|
219
|
+
instance._server_task = None
|
|
220
|
+
instance._started = False
|
|
221
|
+
return instance
|
|
222
|
+
|
|
223
|
+
# -- Lifecycle ----------------------------------------------------------
|
|
224
|
+
|
|
225
|
+
async def start(self) -> None:
|
|
226
|
+
"""Initialize all components and optionally start the visualization server.
|
|
227
|
+
|
|
228
|
+
This must be called before using process/query/get_context.
|
|
229
|
+
Prefer using the async context manager instead of calling start/stop manually.
|
|
230
|
+
"""
|
|
231
|
+
if self._started:
|
|
232
|
+
return
|
|
233
|
+
|
|
234
|
+
configure_logging(self._config)
|
|
235
|
+
|
|
236
|
+
# Core components
|
|
237
|
+
llm_client = _create_llm_client(self._config)
|
|
238
|
+
self._store = GraphStore()
|
|
239
|
+
self._pipeline = ExtractionPipeline(llm_client)
|
|
240
|
+
self._event_bus = EventBus()
|
|
241
|
+
self._nl_planner = NLQueryPlanner(llm_client, self._store)
|
|
242
|
+
|
|
243
|
+
# Wire event bus to graph store
|
|
244
|
+
self._store.set_event_bus(self._event_bus)
|
|
245
|
+
|
|
246
|
+
# Optional visualization server
|
|
247
|
+
if self._enable_visualization:
|
|
248
|
+
await self._start_visualization_server()
|
|
249
|
+
|
|
250
|
+
self._started = True
|
|
251
|
+
log.info(
|
|
252
|
+
"neuroweave.started",
|
|
253
|
+
llm_provider=self._config.llm_provider.value,
|
|
254
|
+
visualization=self._enable_visualization,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
async def stop(self) -> None:
|
|
258
|
+
"""Gracefully shut down all components.
|
|
259
|
+
|
|
260
|
+
Safe to call multiple times.
|
|
261
|
+
"""
|
|
262
|
+
if not self._started:
|
|
263
|
+
return
|
|
264
|
+
|
|
265
|
+
if self._server_task is not None:
|
|
266
|
+
self._server_task.cancel()
|
|
267
|
+
try:
|
|
268
|
+
await self._server_task
|
|
269
|
+
except asyncio.CancelledError:
|
|
270
|
+
pass
|
|
271
|
+
self._server_task = None
|
|
272
|
+
|
|
273
|
+
self._started = False
|
|
274
|
+
log.info("neuroweave.stopped")
|
|
275
|
+
|
|
276
|
+
async def __aenter__(self) -> NeuroWeave:
|
|
277
|
+
await self.start()
|
|
278
|
+
return self
|
|
279
|
+
|
|
280
|
+
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
|
281
|
+
await self.stop()
|
|
282
|
+
|
|
283
|
+
# -- Write path: process messages ---------------------------------------
|
|
284
|
+
|
|
285
|
+
async def process(self, message: str) -> ProcessResult:
|
|
286
|
+
"""Extract knowledge from a message and update the graph.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
message: A user's conversational message.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
ProcessResult with extraction details and graph delta.
|
|
293
|
+
|
|
294
|
+
Raises:
|
|
295
|
+
RuntimeError: If NeuroWeave hasn't been started.
|
|
296
|
+
"""
|
|
297
|
+
self._ensure_started()
|
|
298
|
+
|
|
299
|
+
extraction = await self._pipeline.extract(message) # type: ignore[union-attr]
|
|
300
|
+
stats = ingest_extraction(self._store, extraction) # type: ignore[arg-type]
|
|
301
|
+
|
|
302
|
+
return ProcessResult(
|
|
303
|
+
extraction=extraction,
|
|
304
|
+
nodes_added=stats["nodes_added"],
|
|
305
|
+
edges_added=stats["edges_added"],
|
|
306
|
+
edges_skipped=stats["edges_skipped"],
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# -- Read path: query the graph -----------------------------------------
|
|
310
|
+
|
|
311
|
+
async def query(
|
|
312
|
+
self,
|
|
313
|
+
text_or_entities: str | list[str] | None = None,
|
|
314
|
+
*,
|
|
315
|
+
relations: list[str] | None = None,
|
|
316
|
+
min_confidence: float = 0.0,
|
|
317
|
+
max_hops: int = 1,
|
|
318
|
+
) -> QueryResult:
|
|
319
|
+
"""Query the knowledge graph.
|
|
320
|
+
|
|
321
|
+
Auto-detects the query mode:
|
|
322
|
+
- **String input** → Natural language query (LLM translates to graph query).
|
|
323
|
+
- **List input** → Structured query (entity names passed directly).
|
|
324
|
+
- **None** → Whole-graph query.
|
|
325
|
+
|
|
326
|
+
Args:
|
|
327
|
+
text_or_entities: Natural language question (str), entity names (list),
|
|
328
|
+
or None for whole-graph.
|
|
329
|
+
relations: Relation types to filter on (structured mode only).
|
|
330
|
+
min_confidence: Minimum edge confidence (structured mode only).
|
|
331
|
+
max_hops: Hop traversal depth (structured mode only).
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
QueryResult with matching nodes and edges.
|
|
335
|
+
"""
|
|
336
|
+
self._ensure_started()
|
|
337
|
+
|
|
338
|
+
if isinstance(text_or_entities, str):
|
|
339
|
+
# NL query path
|
|
340
|
+
return await self._nl_planner.query(text_or_entities) # type: ignore[union-attr]
|
|
341
|
+
else:
|
|
342
|
+
# Structured query path
|
|
343
|
+
entities = text_or_entities if text_or_entities else None
|
|
344
|
+
return query_subgraph(
|
|
345
|
+
self._store, # type: ignore[arg-type]
|
|
346
|
+
entities=entities,
|
|
347
|
+
relations=relations,
|
|
348
|
+
min_confidence=min_confidence,
|
|
349
|
+
max_hops=max_hops,
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
# -- Combined path: process + query ------------------------------------
|
|
353
|
+
|
|
354
|
+
async def get_context(self, message: str) -> ContextResult:
|
|
355
|
+
"""Process a message AND query for relevant context — in one call.
|
|
356
|
+
|
|
357
|
+
This is the most common operation for agent integration:
|
|
358
|
+
1. Extract entities/relations from the message and update the graph.
|
|
359
|
+
2. Use the NL query planner to find relevant existing knowledge.
|
|
360
|
+
3. Return both results together.
|
|
361
|
+
|
|
362
|
+
Args:
|
|
363
|
+
message: A user's conversational message.
|
|
364
|
+
|
|
365
|
+
Returns:
|
|
366
|
+
ContextResult with extraction details and relevant graph context.
|
|
367
|
+
"""
|
|
368
|
+
self._ensure_started()
|
|
369
|
+
|
|
370
|
+
# Step 1: Extract and ingest
|
|
371
|
+
process_result = await self.process(message)
|
|
372
|
+
|
|
373
|
+
# Step 2: Query for relevant context using the message as an NL query
|
|
374
|
+
plan = await self._nl_planner.plan(message) # type: ignore[union-attr]
|
|
375
|
+
relevant = self._nl_planner.execute(plan) # type: ignore[union-attr]
|
|
376
|
+
|
|
377
|
+
return ContextResult(
|
|
378
|
+
process=process_result,
|
|
379
|
+
relevant=relevant,
|
|
380
|
+
plan=plan,
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
# -- Event subscription -------------------------------------------------
|
|
384
|
+
|
|
385
|
+
def subscribe(
|
|
386
|
+
self,
|
|
387
|
+
handler: EventHandler,
|
|
388
|
+
*,
|
|
389
|
+
event_types: set[GraphEventType] | None = None,
|
|
390
|
+
) -> None:
|
|
391
|
+
"""Register an async callback to receive graph mutation events.
|
|
392
|
+
|
|
393
|
+
Args:
|
|
394
|
+
handler: Async function that takes a GraphEvent.
|
|
395
|
+
event_types: Set of event types to filter on. None = all events.
|
|
396
|
+
|
|
397
|
+
Raises:
|
|
398
|
+
RuntimeError: If NeuroWeave hasn't been started.
|
|
399
|
+
"""
|
|
400
|
+
self._ensure_started()
|
|
401
|
+
self._event_bus.subscribe(handler, event_types=event_types) # type: ignore[union-attr]
|
|
402
|
+
|
|
403
|
+
def unsubscribe(self, handler: EventHandler) -> None:
|
|
404
|
+
"""Remove a previously registered event handler.
|
|
405
|
+
|
|
406
|
+
Args:
|
|
407
|
+
handler: The same function object passed to subscribe().
|
|
408
|
+
"""
|
|
409
|
+
if self._event_bus is not None:
|
|
410
|
+
self._event_bus.unsubscribe(handler)
|
|
411
|
+
|
|
412
|
+
# -- Visualization ------------------------------------------------------
|
|
413
|
+
|
|
414
|
+
def create_visualization_app(self) -> Any:
|
|
415
|
+
"""Create a FastAPI app for the graph visualizer.
|
|
416
|
+
|
|
417
|
+
Use this if you want to mount the visualization alongside your own
|
|
418
|
+
FastAPI routes instead of running it as a standalone server.
|
|
419
|
+
|
|
420
|
+
Returns:
|
|
421
|
+
FastAPI application instance.
|
|
422
|
+
|
|
423
|
+
Raises:
|
|
424
|
+
RuntimeError: If NeuroWeave hasn't been started.
|
|
425
|
+
"""
|
|
426
|
+
self._ensure_started()
|
|
427
|
+
from neuroweave.server.app import create_app
|
|
428
|
+
|
|
429
|
+
return create_app(self._store, event_bus=self._event_bus) # type: ignore[arg-type]
|
|
430
|
+
|
|
431
|
+
# -- Properties ---------------------------------------------------------
|
|
432
|
+
|
|
433
|
+
@property
|
|
434
|
+
def graph(self) -> GraphStore:
|
|
435
|
+
"""Direct access to the graph store (for advanced use cases)."""
|
|
436
|
+
self._ensure_started()
|
|
437
|
+
return self._store # type: ignore[return-value]
|
|
438
|
+
|
|
439
|
+
@property
|
|
440
|
+
def event_bus(self) -> EventBus:
|
|
441
|
+
"""Direct access to the event bus (for advanced use cases)."""
|
|
442
|
+
self._ensure_started()
|
|
443
|
+
return self._event_bus # type: ignore[return-value]
|
|
444
|
+
|
|
445
|
+
@property
|
|
446
|
+
def is_started(self) -> bool:
|
|
447
|
+
return self._started
|
|
448
|
+
|
|
449
|
+
# -- Internal -----------------------------------------------------------
|
|
450
|
+
|
|
451
|
+
def _ensure_started(self) -> None:
|
|
452
|
+
if not self._started:
|
|
453
|
+
raise RuntimeError(
|
|
454
|
+
"NeuroWeave is not started. Call await nw.start() or use "
|
|
455
|
+
"'async with NeuroWeave(...) as nw:'"
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
async def _start_visualization_server(self) -> None:
|
|
459
|
+
"""Start the Cytoscape.js visualization server as a background task."""
|
|
460
|
+
from neuroweave.server.app import create_app
|
|
461
|
+
|
|
462
|
+
app = create_app(self._store, event_bus=self._event_bus) # type: ignore[arg-type]
|
|
463
|
+
config = uvicorn.Config(
|
|
464
|
+
app,
|
|
465
|
+
host=self._config.server_host,
|
|
466
|
+
port=self._config.server_port,
|
|
467
|
+
log_level="warning",
|
|
468
|
+
access_log=False,
|
|
469
|
+
)
|
|
470
|
+
server = uvicorn.Server(config)
|
|
471
|
+
self._server_task = asyncio.create_task(server.serve())
|
|
472
|
+
log.info(
|
|
473
|
+
"neuroweave.visualization_started",
|
|
474
|
+
url=f"http://{self._config.server_host}:{self._config.server_port}",
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
# ---------------------------------------------------------------------------
|
|
479
|
+
# Internal helpers
|
|
480
|
+
# ---------------------------------------------------------------------------
|
|
481
|
+
|
|
482
|
+
|
|
483
|
+
def _create_llm_client(config: NeuroWeaveConfig) -> LLMClient:
|
|
484
|
+
"""Create the appropriate LLM client based on configuration."""
|
|
485
|
+
if config.llm_provider == LLMProvider.MOCK:
|
|
486
|
+
return MockLLMClient()
|
|
487
|
+
elif config.llm_provider == LLMProvider.ANTHROPIC:
|
|
488
|
+
if not config.llm_api_key:
|
|
489
|
+
raise ValueError(
|
|
490
|
+
"NEUROWEAVE_LLM_API_KEY must be set when using the anthropic provider."
|
|
491
|
+
)
|
|
492
|
+
return AnthropicLLMClient(api_key=config.llm_api_key, model=config.llm_model)
|
|
493
|
+
else:
|
|
494
|
+
raise ValueError(f"Unknown LLM provider: {config.llm_provider}")
|
neuroweave/config.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""NeuroWeave configuration — single source of truth for all settings."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
import yaml
|
|
10
|
+
from pydantic import Field
|
|
11
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LLMProvider(str, Enum):
|
|
15
|
+
ANTHROPIC = "anthropic"
|
|
16
|
+
OPENAI = "openai"
|
|
17
|
+
MOCK = "mock"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class LogFormat(str, Enum):
|
|
21
|
+
CONSOLE = "console"
|
|
22
|
+
JSON = "json"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class GraphBackend(str, Enum):
|
|
26
|
+
MEMORY = "memory"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent
|
|
30
|
+
_DEFAULT_CONFIG = _PROJECT_ROOT / "config" / "default.yaml"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _load_yaml_defaults(path: Path) -> dict[str, Any]:
|
|
34
|
+
"""Load default values from YAML config file."""
|
|
35
|
+
if path.exists():
|
|
36
|
+
with open(path) as f:
|
|
37
|
+
return yaml.safe_load(f) or {}
|
|
38
|
+
return {}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class NeuroWeaveConfig(BaseSettings):
|
|
42
|
+
"""All NeuroWeave configuration.
|
|
43
|
+
|
|
44
|
+
Loading priority (highest wins):
|
|
45
|
+
1. Environment variables (NEUROWEAVE_*)
|
|
46
|
+
2. .env file
|
|
47
|
+
3. config/default.yaml
|
|
48
|
+
4. Field defaults below
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
model_config = SettingsConfigDict(
|
|
52
|
+
env_prefix="NEUROWEAVE_",
|
|
53
|
+
env_file=".env",
|
|
54
|
+
env_file_encoding="utf-8",
|
|
55
|
+
extra="ignore",
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# --- LLM ---
|
|
59
|
+
llm_provider: LLMProvider = LLMProvider.ANTHROPIC
|
|
60
|
+
llm_model: str = "claude-haiku-4-5-20251001"
|
|
61
|
+
llm_api_key: str = ""
|
|
62
|
+
|
|
63
|
+
# --- Extraction ---
|
|
64
|
+
extraction_enabled: bool = True
|
|
65
|
+
extraction_confidence_threshold: float = Field(default=0.3, ge=0.0, le=1.0)
|
|
66
|
+
|
|
67
|
+
# --- Graph ---
|
|
68
|
+
graph_backend: GraphBackend = GraphBackend.MEMORY
|
|
69
|
+
|
|
70
|
+
# --- Server ---
|
|
71
|
+
server_host: str = "127.0.0.1"
|
|
72
|
+
server_port: int = Field(default=8787, ge=1024, le=65535)
|
|
73
|
+
|
|
74
|
+
# --- Logging ---
|
|
75
|
+
log_level: str = "INFO"
|
|
76
|
+
log_format: LogFormat = LogFormat.CONSOLE
|
|
77
|
+
|
|
78
|
+
@classmethod
|
|
79
|
+
def load(cls, config_path: Path | None = None) -> NeuroWeaveConfig:
|
|
80
|
+
"""Load config with YAML defaults, then env var overrides.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
config_path: Path to YAML config file. Defaults to config/default.yaml.
|
|
84
|
+
"""
|
|
85
|
+
yaml_path = config_path or _DEFAULT_CONFIG
|
|
86
|
+
yaml_values = _load_yaml_defaults(yaml_path)
|
|
87
|
+
return cls(**yaml_values)
|