nowledge-mem-bub 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nowledge_mem_bub-0.1.0/.gitignore +1 -0
- nowledge_mem_bub-0.1.0/CHANGELOG.md +13 -0
- nowledge_mem_bub-0.1.0/PKG-INFO +112 -0
- nowledge_mem_bub-0.1.0/README.md +95 -0
- nowledge_mem_bub-0.1.0/pyproject.toml +30 -0
- nowledge_mem_bub-0.1.0/src/bub_skills/nowledge-mem/SKILL.md +78 -0
- nowledge_mem_bub-0.1.0/src/nowledge_mem_bub/__init__.py +6 -0
- nowledge_mem_bub-0.1.0/src/nowledge_mem_bub/client.py +304 -0
- nowledge_mem_bub-0.1.0/src/nowledge_mem_bub/plugin.py +190 -0
- nowledge_mem_bub-0.1.0/src/nowledge_mem_bub/tools.py +376 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__pycache__/
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
## 0.1.0 (2026-03-12)
|
|
4
|
+
|
|
5
|
+
Initial release — brings cross-tool knowledge into Bub.
|
|
6
|
+
|
|
7
|
+
- 9 tools (`mem.search`, `mem.save`, `mem.context`, `mem.connections`, `mem.timeline`, `mem.forget`, `mem.threads`, `mem.thread`, `mem.status`) for searching and saving knowledge across all your AI tools
|
|
8
|
+
- Hook-based integration: behavioural guidance via `system_prompt`, optional Working Memory injection via `load_state`, incremental thread capture via `save_state`
|
|
9
|
+
- Two modes: default (agent-driven, on-demand) and session context (auto-inject Working Memory + recalled knowledge each turn)
|
|
10
|
+
- Conversations in Bub flow into Nowledge Mem so other tools can find them
|
|
11
|
+
- Pre-save deduplication check
|
|
12
|
+
- Bundled `nowledge-mem` skill for agent self-guidance
|
|
13
|
+
- Access Anywhere support via `~/.nowledge-mem/config.json` or `NMEM_API_URL` / `NMEM_API_KEY`
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: nowledge-mem-bub
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Nowledge Mem plugin for Bub — cross-ai context for your agent.
|
|
5
|
+
Project-URL: Homepage, https://mem.nowledge.co
|
|
6
|
+
Project-URL: Documentation, https://mem.nowledge.co/docs/integrations/bub
|
|
7
|
+
Project-URL: Repository, https://github.com/nowledge-co/community/tree/main/nowledge-mem-bub-plugin
|
|
8
|
+
Author-email: Nowledge Labs <hello@nowledge-labs.ai>
|
|
9
|
+
License-Expression: Apache-2.0
|
|
10
|
+
Keywords: agent,bub,knowledge-graph,memory,nowledge-mem
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
14
|
+
Requires-Python: >=3.12
|
|
15
|
+
Requires-Dist: bub>=0.3.0a1
|
|
16
|
+
Description-Content-Type: text/markdown
|
|
17
|
+
|
|
18
|
+
# Nowledge Mem — Bub Plugin
|
|
19
|
+
|
|
20
|
+
> Bring your cross-tool knowledge into Bub, and share what you learn in Bub with every other tool.
|
|
21
|
+
|
|
22
|
+
Bub records every session through its tape system. This plugin connects Bub to your personal knowledge graph in Nowledge Mem — so decisions from Claude Code, preferences from Cursor, and insights from ChatGPT are all searchable inside Bub. And what you learn in Bub flows back to every other tool.
|
|
23
|
+
|
|
24
|
+
## Install
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
pip install nowledge-mem-bub
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
**Prerequisite:** `nmem` CLI must be in your PATH:
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
pip install nmem-cli # or: pipx install nmem-cli
|
|
34
|
+
nmem status # verify connection
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Verify
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
uv run bub hooks # should list nowledge_mem for system_prompt, load_state, save_state
|
|
41
|
+
uv run bub run "what was I working on this week?"
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
If you have existing knowledge in Nowledge Mem, the agent should find it through `mem.search`.
|
|
45
|
+
|
|
46
|
+
## Tools
|
|
47
|
+
|
|
48
|
+
| Tool | What it does |
|
|
49
|
+
|------|-------------|
|
|
50
|
+
| `mem.search` | Search knowledge from all your tools. Supports label and date filters. |
|
|
51
|
+
| `mem.save` | Save a decision, insight, or preference so any tool can find it. |
|
|
52
|
+
| `mem.context` | Read today's Working Memory — focus areas, priorities, recent activity. |
|
|
53
|
+
| `mem.connections` | Explore how a piece of knowledge relates to others across tools and time. |
|
|
54
|
+
| `mem.timeline` | Recent activity grouped by day. |
|
|
55
|
+
| `mem.forget` | Delete a memory by ID. |
|
|
56
|
+
| `mem.threads` | Search past conversations from any tool. |
|
|
57
|
+
| `mem.thread` | Fetch full messages from a conversation with pagination. |
|
|
58
|
+
| `mem.status` | Connection and configuration diagnostics. |
|
|
59
|
+
|
|
60
|
+
All tools work as Bub comma commands too: `,mem.search query=...`
|
|
61
|
+
|
|
62
|
+
**Bundled skill:** The `nowledge-mem` skill teaches the agent when and how to use these tools effectively.
|
|
63
|
+
|
|
64
|
+
## Configuration
|
|
65
|
+
|
|
66
|
+
No config needed for local use. The plugin reads `~/.nowledge-mem/config.json` and environment variables automatically.
|
|
67
|
+
|
|
68
|
+
| Variable | Default | What it does |
|
|
69
|
+
|----------|---------|-------------|
|
|
70
|
+
| `NMEM_SESSION_CONTEXT` | `false` | Inject Working Memory + recalled knowledge each turn |
|
|
71
|
+
| `NMEM_SESSION_DIGEST` | `true` | Feed Bub conversations into Mem for other tools to find |
|
|
72
|
+
| `NMEM_API_URL` | *(local)* | Remote Nowledge Mem server URL |
|
|
73
|
+
| `NMEM_API_KEY` | *(none)* | API key for remote access |
|
|
74
|
+
|
|
75
|
+
### Remote Access
|
|
76
|
+
|
|
77
|
+
```json
|
|
78
|
+
// ~/.nowledge-mem/config.json
|
|
79
|
+
{
|
|
80
|
+
"apiUrl": "https://your-server:14242",
|
|
81
|
+
"apiKey": "your-key"
|
|
82
|
+
}
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
Or use environment variables (`NMEM_API_URL`, `NMEM_API_KEY`), which override the config file.
|
|
86
|
+
|
|
87
|
+
## Two Modes
|
|
88
|
+
|
|
89
|
+
| Mode | Config | What happens |
|
|
90
|
+
|------|--------|-------------|
|
|
91
|
+
| **Default** | nothing | The agent searches and saves on demand. Conversations flow into Mem for other tools to find. |
|
|
92
|
+
| **Session context** | `NMEM_SESSION_CONTEXT=1` | Working Memory and relevant knowledge injected automatically each turn. |
|
|
93
|
+
|
|
94
|
+
Most users should start with the default.
|
|
95
|
+
|
|
96
|
+
## Troubleshooting
|
|
97
|
+
|
|
98
|
+
**nmem not found:** Install with `pip install nmem-cli` or `pipx install nmem-cli`.
|
|
99
|
+
|
|
100
|
+
**Plugin not loading:** Run `uv run bub hooks` and check that `nowledge_mem` appears in the hook list.
|
|
101
|
+
|
|
102
|
+
**Server not running:** Start the Nowledge Mem desktop app, or run `nmem status` for diagnostics.
|
|
103
|
+
|
|
104
|
+
## Links
|
|
105
|
+
|
|
106
|
+
- [Documentation](https://mem.nowledge.co/docs/integrations/bub)
|
|
107
|
+
- [Discord](https://nowled.ge/discord)
|
|
108
|
+
- [GitHub](https://github.com/nowledge-co/community/tree/main/nowledge-mem-bub-plugin)
|
|
109
|
+
|
|
110
|
+
---
|
|
111
|
+
|
|
112
|
+
Made with care by [Nowledge Labs](https://nowledge-labs.ai)
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# Nowledge Mem — Bub Plugin
|
|
2
|
+
|
|
3
|
+
> Bring your cross-tool knowledge into Bub, and share what you learn in Bub with every other tool.
|
|
4
|
+
|
|
5
|
+
Bub records every session through its tape system. This plugin connects Bub to your personal knowledge graph in Nowledge Mem — so decisions from Claude Code, preferences from Cursor, and insights from ChatGPT are all searchable inside Bub. And what you learn in Bub flows back to every other tool.
|
|
6
|
+
|
|
7
|
+
## Install
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install nowledge-mem-bub
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
**Prerequisite:** `nmem` CLI must be in your PATH:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
pip install nmem-cli # or: pipx install nmem-cli
|
|
17
|
+
nmem status # verify connection
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
## Verify
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
uv run bub hooks # should list nowledge_mem for system_prompt, load_state, save_state
|
|
24
|
+
uv run bub run "what was I working on this week?"
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
If you have existing knowledge in Nowledge Mem, the agent should find it through `mem.search`.
|
|
28
|
+
|
|
29
|
+
## Tools
|
|
30
|
+
|
|
31
|
+
| Tool | What it does |
|
|
32
|
+
|------|-------------|
|
|
33
|
+
| `mem.search` | Search knowledge from all your tools. Supports label and date filters. |
|
|
34
|
+
| `mem.save` | Save a decision, insight, or preference so any tool can find it. |
|
|
35
|
+
| `mem.context` | Read today's Working Memory — focus areas, priorities, recent activity. |
|
|
36
|
+
| `mem.connections` | Explore how a piece of knowledge relates to others across tools and time. |
|
|
37
|
+
| `mem.timeline` | Recent activity grouped by day. |
|
|
38
|
+
| `mem.forget` | Delete a memory by ID. |
|
|
39
|
+
| `mem.threads` | Search past conversations from any tool. |
|
|
40
|
+
| `mem.thread` | Fetch full messages from a conversation with pagination. |
|
|
41
|
+
| `mem.status` | Connection and configuration diagnostics. |
|
|
42
|
+
|
|
43
|
+
All tools work as Bub comma commands too: `,mem.search query=...`
|
|
44
|
+
|
|
45
|
+
**Bundled skill:** The `nowledge-mem` skill teaches the agent when and how to use these tools effectively.
|
|
46
|
+
|
|
47
|
+
## Configuration
|
|
48
|
+
|
|
49
|
+
No config needed for local use. The plugin reads `~/.nowledge-mem/config.json` and environment variables automatically.
|
|
50
|
+
|
|
51
|
+
| Variable | Default | What it does |
|
|
52
|
+
|----------|---------|-------------|
|
|
53
|
+
| `NMEM_SESSION_CONTEXT` | `false` | Inject Working Memory + recalled knowledge each turn |
|
|
54
|
+
| `NMEM_SESSION_DIGEST` | `true` | Feed Bub conversations into Mem for other tools to find |
|
|
55
|
+
| `NMEM_API_URL` | *(local)* | Remote Nowledge Mem server URL |
|
|
56
|
+
| `NMEM_API_KEY` | *(none)* | API key for remote access |
|
|
57
|
+
|
|
58
|
+
### Remote Access
|
|
59
|
+
|
|
60
|
+
```json
|
|
61
|
+
// ~/.nowledge-mem/config.json
|
|
62
|
+
{
|
|
63
|
+
"apiUrl": "https://your-server:14242",
|
|
64
|
+
"apiKey": "your-key"
|
|
65
|
+
}
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
Or use environment variables (`NMEM_API_URL`, `NMEM_API_KEY`), which override the config file.
|
|
69
|
+
|
|
70
|
+
## Two Modes
|
|
71
|
+
|
|
72
|
+
| Mode | Config | What happens |
|
|
73
|
+
|------|--------|-------------|
|
|
74
|
+
| **Default** | nothing | The agent searches and saves on demand. Conversations flow into Mem for other tools to find. |
|
|
75
|
+
| **Session context** | `NMEM_SESSION_CONTEXT=1` | Working Memory and relevant knowledge injected automatically each turn. |
|
|
76
|
+
|
|
77
|
+
Most users should start with the default.
|
|
78
|
+
|
|
79
|
+
## Troubleshooting
|
|
80
|
+
|
|
81
|
+
**nmem not found:** Install with `pip install nmem-cli` or `pipx install nmem-cli`.
|
|
82
|
+
|
|
83
|
+
**Plugin not loading:** Run `uv run bub hooks` and check that `nowledge_mem` appears in the hook list.
|
|
84
|
+
|
|
85
|
+
**Server not running:** Start the Nowledge Mem desktop app, or run `nmem status` for diagnostics.
|
|
86
|
+
|
|
87
|
+
## Links
|
|
88
|
+
|
|
89
|
+
- [Documentation](https://mem.nowledge.co/docs/integrations/bub)
|
|
90
|
+
- [Discord](https://nowled.ge/discord)
|
|
91
|
+
- [GitHub](https://github.com/nowledge-co/community/tree/main/nowledge-mem-bub-plugin)
|
|
92
|
+
|
|
93
|
+
---
|
|
94
|
+
|
|
95
|
+
Made with care by [Nowledge Labs](https://nowledge-labs.ai)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "nowledge-mem-bub"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Nowledge Mem plugin for Bub — cross-ai context for your agent."
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
license = "Apache-2.0"
|
|
7
|
+
requires-python = ">=3.12"
|
|
8
|
+
authors = [{ name = "Nowledge Labs", email = "hello@nowledge-labs.ai" }]
|
|
9
|
+
keywords = ["bub", "nowledge-mem", "memory", "agent", "knowledge-graph"]
|
|
10
|
+
classifiers = [
|
|
11
|
+
"Development Status :: 3 - Alpha",
|
|
12
|
+
"Intended Audience :: Developers",
|
|
13
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
14
|
+
]
|
|
15
|
+
dependencies = ["bub>=0.3.0a1"]
|
|
16
|
+
|
|
17
|
+
[project.entry-points."bub"]
|
|
18
|
+
nowledge_mem = "nowledge_mem_bub:plugin"
|
|
19
|
+
|
|
20
|
+
[project.urls]
|
|
21
|
+
Homepage = "https://mem.nowledge.co"
|
|
22
|
+
Documentation = "https://mem.nowledge.co/docs/integrations/bub"
|
|
23
|
+
Repository = "https://github.com/nowledge-co/community/tree/main/nowledge-mem-bub-plugin"
|
|
24
|
+
|
|
25
|
+
[build-system]
|
|
26
|
+
requires = ["hatchling"]
|
|
27
|
+
build-backend = "hatchling.build"
|
|
28
|
+
|
|
29
|
+
[tool.hatch.build.targets.wheel]
|
|
30
|
+
packages = ["src/nowledge_mem_bub", "src/bub_skills"]
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: nowledge-mem
|
|
3
|
+
description: Search, save, and manage knowledge across all your AI tools through Nowledge Mem.
|
|
4
|
+
license: Apache-2.0
|
|
5
|
+
compatibility: ">=3.12"
|
|
6
|
+
metadata: {}
|
|
7
|
+
allowed-tools:
|
|
8
|
+
- mem.search
|
|
9
|
+
- mem.save
|
|
10
|
+
- mem.context
|
|
11
|
+
- mem.connections
|
|
12
|
+
- mem.timeline
|
|
13
|
+
- mem.forget
|
|
14
|
+
- mem.threads
|
|
15
|
+
- mem.thread
|
|
16
|
+
- mem.status
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
# Nowledge Mem — Cross-Tool Knowledge for Bub
|
|
20
|
+
|
|
21
|
+
You have access to the user's personal knowledge graph through Nowledge Mem.
|
|
22
|
+
This graph contains knowledge from all their AI tools — decisions from Claude Code,
|
|
23
|
+
preferences from Cursor, insights from ChatGPT, and more — not just this Bub session.
|
|
24
|
+
Knowledge you save here will be available in their other tools too.
|
|
25
|
+
|
|
26
|
+
## When to search
|
|
27
|
+
|
|
28
|
+
Recognise these signals and call `mem.search` **before** answering:
|
|
29
|
+
|
|
30
|
+
- **Continuity** — the user references something from a previous session or another tool
|
|
31
|
+
- **Decision recall** — "what did we decide about…", "why did we choose…"
|
|
32
|
+
- **Pattern match** — the current topic overlaps with past work in any tool
|
|
33
|
+
- **Implicit recall** — the user assumes you know something you haven't seen this session
|
|
34
|
+
|
|
35
|
+
Search both memories and threads. When a memory has `source_thread_id`,
|
|
36
|
+
fetch the full conversation with `mem.thread` for deeper context.
|
|
37
|
+
|
|
38
|
+
## When to save
|
|
39
|
+
|
|
40
|
+
Call `mem.save` when durable knowledge appears:
|
|
41
|
+
|
|
42
|
+
- **Decisions** — compared options and chose one
|
|
43
|
+
- **Learnings** — debugging revealed something non-obvious
|
|
44
|
+
- **Preferences** — user stated how they want things done
|
|
45
|
+
- **Plans** — concrete next steps agreed on
|
|
46
|
+
- **Procedures** — repeatable workflow documented
|
|
47
|
+
|
|
48
|
+
**Skip**: routine fixes, work-in-progress, simple Q&A, generic info.
|
|
49
|
+
|
|
50
|
+
Guidelines:
|
|
51
|
+
- Atomic and actionable — one idea per memory
|
|
52
|
+
- Title is a short summary, content is the detail
|
|
53
|
+
- 0–3 labels per memory (project names, topics)
|
|
54
|
+
- Importance: 0.8–1.0 critical | 0.5–0.7 useful | 0.1–0.4 minor
|
|
55
|
+
- Ask before saving: "This seems worth remembering — save it?"
|
|
56
|
+
|
|
57
|
+
## Working Memory
|
|
58
|
+
|
|
59
|
+
`mem.context` returns today's Working Memory briefing: focus areas, priorities,
|
|
60
|
+
recent changes, and open questions. Read it at the start of a session or when
|
|
61
|
+
the user asks "what am I working on?"
|
|
62
|
+
|
|
63
|
+
## Thread retrieval
|
|
64
|
+
|
|
65
|
+
Two paths into past conversations:
|
|
66
|
+
|
|
67
|
+
1. **From a memory**: `mem.search` returns `source_thread_id` → `mem.thread`
|
|
68
|
+
2. **Direct search**: `mem.threads` finds conversations by keyword → `mem.thread`
|
|
69
|
+
|
|
70
|
+
Use `offset` for pagination on long threads.
|
|
71
|
+
|
|
72
|
+
## Graph exploration
|
|
73
|
+
|
|
74
|
+
`mem.connections` shows how a memory relates to other knowledge:
|
|
75
|
+
related topics, EVOLVES chains (how understanding changed over time),
|
|
76
|
+
and source document provenance.
|
|
77
|
+
|
|
78
|
+
`mem.timeline` shows recent activity grouped by day.
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
"""Async wrapper around the nmem CLI.
|
|
2
|
+
|
|
3
|
+
All memory operations route through ``nmem`` so that Access Anywhere
|
|
4
|
+
(remote config via ``~/.nowledge-mem/config.json`` or ``NMEM_API_URL``)
|
|
5
|
+
works transparently.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
import os
|
|
14
|
+
import shutil
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class NmemError(Exception):
|
|
21
|
+
"""Error from nmem CLI execution."""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class NmemClient:
|
|
25
|
+
"""Async subprocess wrapper for the ``nmem`` CLI."""
|
|
26
|
+
|
|
27
|
+
def __init__(self) -> None:
|
|
28
|
+
self._cmd: str | None = None
|
|
29
|
+
self._api_url: str | None = None
|
|
30
|
+
self._api_key: str | None = None
|
|
31
|
+
self._load_config()
|
|
32
|
+
|
|
33
|
+
# ------------------------------------------------------------------
|
|
34
|
+
# Config
|
|
35
|
+
# ------------------------------------------------------------------
|
|
36
|
+
|
|
37
|
+
def _load_config(self) -> None:
|
|
38
|
+
"""Load config. Priority: env vars > config file > defaults."""
|
|
39
|
+
file_config: dict = {}
|
|
40
|
+
config_path = Path.home() / ".nowledge-mem" / "config.json"
|
|
41
|
+
if config_path.is_file():
|
|
42
|
+
try:
|
|
43
|
+
file_config = json.loads(config_path.read_text(encoding="utf-8"))
|
|
44
|
+
except Exception:
|
|
45
|
+
logger.warning("failed to parse %s", config_path)
|
|
46
|
+
|
|
47
|
+
self._api_url = (
|
|
48
|
+
os.environ.get("NMEM_API_URL")
|
|
49
|
+
or file_config.get("apiUrl")
|
|
50
|
+
or file_config.get("api_url")
|
|
51
|
+
or None
|
|
52
|
+
)
|
|
53
|
+
self._api_key = (
|
|
54
|
+
os.environ.get("NMEM_API_KEY")
|
|
55
|
+
or file_config.get("apiKey")
|
|
56
|
+
or file_config.get("api_key")
|
|
57
|
+
or None
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# ------------------------------------------------------------------
|
|
61
|
+
# Internal helpers
|
|
62
|
+
# ------------------------------------------------------------------
|
|
63
|
+
|
|
64
|
+
def _resolve_cmd(self) -> str:
|
|
65
|
+
if self._cmd is not None:
|
|
66
|
+
return self._cmd
|
|
67
|
+
cmd = shutil.which("nmem")
|
|
68
|
+
if cmd:
|
|
69
|
+
self._cmd = cmd
|
|
70
|
+
return cmd
|
|
71
|
+
raise NmemError(
|
|
72
|
+
"nmem not found in PATH. Install with: pip install nmem-cli"
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
def _build_env(self) -> dict[str, str]:
|
|
76
|
+
env = dict(os.environ)
|
|
77
|
+
if self._api_key:
|
|
78
|
+
env["NMEM_API_KEY"] = self._api_key
|
|
79
|
+
return env
|
|
80
|
+
|
|
81
|
+
def _base_args(self, *, json_output: bool = True) -> list[str]:
|
|
82
|
+
args = [self._resolve_cmd()]
|
|
83
|
+
if json_output:
|
|
84
|
+
args.append("--json")
|
|
85
|
+
if self._api_url:
|
|
86
|
+
args.extend(["--api-url", self._api_url])
|
|
87
|
+
return args
|
|
88
|
+
|
|
89
|
+
def is_available(self) -> bool:
|
|
90
|
+
return shutil.which("nmem") is not None
|
|
91
|
+
|
|
92
|
+
async def _exec(
|
|
93
|
+
self,
|
|
94
|
+
*args: str,
|
|
95
|
+
json_output: bool = True,
|
|
96
|
+
timeout: float = 15,
|
|
97
|
+
) -> str:
|
|
98
|
+
cmd = [*self._base_args(json_output=json_output), *args]
|
|
99
|
+
try:
|
|
100
|
+
proc = await asyncio.create_subprocess_exec(
|
|
101
|
+
*cmd,
|
|
102
|
+
stdout=asyncio.subprocess.PIPE,
|
|
103
|
+
stderr=asyncio.subprocess.PIPE,
|
|
104
|
+
env=self._build_env(),
|
|
105
|
+
)
|
|
106
|
+
stdout, stderr = await asyncio.wait_for(
|
|
107
|
+
proc.communicate(), timeout=timeout
|
|
108
|
+
)
|
|
109
|
+
except asyncio.TimeoutError:
|
|
110
|
+
proc.kill() # type: ignore[union-attr]
|
|
111
|
+
raise NmemError(f"nmem timed out after {timeout}s")
|
|
112
|
+
except FileNotFoundError:
|
|
113
|
+
raise NmemError("nmem not found in PATH")
|
|
114
|
+
|
|
115
|
+
if proc.returncode != 0:
|
|
116
|
+
err = stderr.decode(errors="replace").strip()
|
|
117
|
+
raise NmemError(f"nmem exited {proc.returncode}: {err}")
|
|
118
|
+
|
|
119
|
+
return stdout.decode(errors="replace").strip()
|
|
120
|
+
|
|
121
|
+
async def _exec_json(self, *args: str, timeout: float = 15) -> dict | list:
|
|
122
|
+
raw = await self._exec(*args, json_output=True, timeout=timeout)
|
|
123
|
+
if not raw:
|
|
124
|
+
return {}
|
|
125
|
+
try:
|
|
126
|
+
return json.loads(raw)
|
|
127
|
+
except json.JSONDecodeError as e:
|
|
128
|
+
raise NmemError(f"invalid JSON from nmem: {e}")
|
|
129
|
+
|
|
130
|
+
# ------------------------------------------------------------------
|
|
131
|
+
# Memory operations
|
|
132
|
+
# ------------------------------------------------------------------
|
|
133
|
+
|
|
134
|
+
async def search(
|
|
135
|
+
self,
|
|
136
|
+
query: str,
|
|
137
|
+
limit: int = 5,
|
|
138
|
+
*,
|
|
139
|
+
labels: list[str] | None = None,
|
|
140
|
+
importance: float | None = None,
|
|
141
|
+
event_from: str | None = None,
|
|
142
|
+
event_to: str | None = None,
|
|
143
|
+
mode: str | None = None,
|
|
144
|
+
) -> list:
|
|
145
|
+
args = ["m", "search", query, "-n", str(limit)]
|
|
146
|
+
if importance is not None:
|
|
147
|
+
args.extend(["--importance", str(importance)])
|
|
148
|
+
for label in labels or []:
|
|
149
|
+
args.extend(["-l", label])
|
|
150
|
+
if event_from:
|
|
151
|
+
args.extend(["--event-from", event_from])
|
|
152
|
+
if event_to:
|
|
153
|
+
args.extend(["--event-to", event_to])
|
|
154
|
+
if mode:
|
|
155
|
+
args.extend(["--mode", mode])
|
|
156
|
+
result = await self._exec_json(*args)
|
|
157
|
+
if isinstance(result, list):
|
|
158
|
+
return result
|
|
159
|
+
return result.get("results", result.get("memories", []))
|
|
160
|
+
|
|
161
|
+
async def add_memory(
|
|
162
|
+
self,
|
|
163
|
+
content: str,
|
|
164
|
+
*,
|
|
165
|
+
title: str | None = None,
|
|
166
|
+
importance: float | None = None,
|
|
167
|
+
labels: list[str] | None = None,
|
|
168
|
+
unit_type: str | None = None,
|
|
169
|
+
event_start: str | None = None,
|
|
170
|
+
event_end: str | None = None,
|
|
171
|
+
temporal_context: str | None = None,
|
|
172
|
+
) -> dict:
|
|
173
|
+
args = ["m", "add", content]
|
|
174
|
+
if title:
|
|
175
|
+
args.extend(["-t", title])
|
|
176
|
+
if importance is not None:
|
|
177
|
+
args.extend(["-i", str(importance)])
|
|
178
|
+
for label in labels or []:
|
|
179
|
+
args.extend(["-l", label])
|
|
180
|
+
if unit_type:
|
|
181
|
+
args.extend(["--unit-type", unit_type])
|
|
182
|
+
if event_start:
|
|
183
|
+
args.extend(["--event-start", event_start])
|
|
184
|
+
if event_end:
|
|
185
|
+
args.extend(["--event-end", event_end])
|
|
186
|
+
if temporal_context:
|
|
187
|
+
args.extend(["--temporal-context", temporal_context])
|
|
188
|
+
args.extend(["-s", "bub"])
|
|
189
|
+
result = await self._exec_json(*args)
|
|
190
|
+
return result if isinstance(result, dict) else {}
|
|
191
|
+
|
|
192
|
+
async def delete_memory(self, memory_id: str) -> dict:
|
|
193
|
+
result = await self._exec_json("m", "delete", memory_id, "-f")
|
|
194
|
+
return result if isinstance(result, dict) else {}
|
|
195
|
+
|
|
196
|
+
async def get_memory(self, memory_id: str) -> dict:
|
|
197
|
+
result = await self._exec_json("m", "show", memory_id)
|
|
198
|
+
return result if isinstance(result, dict) else {}
|
|
199
|
+
|
|
200
|
+
# ------------------------------------------------------------------
|
|
201
|
+
# Working Memory
|
|
202
|
+
# ------------------------------------------------------------------
|
|
203
|
+
|
|
204
|
+
async def read_working_memory(self) -> dict:
|
|
205
|
+
try:
|
|
206
|
+
result = await self._exec_json("wm", "read")
|
|
207
|
+
return result if isinstance(result, dict) else {"content": ""}
|
|
208
|
+
except NmemError:
|
|
209
|
+
wm_path = Path.home() / "ai-now" / "memory.md"
|
|
210
|
+
if wm_path.is_file():
|
|
211
|
+
return {
|
|
212
|
+
"content": wm_path.read_text(encoding="utf-8"),
|
|
213
|
+
"available": True,
|
|
214
|
+
}
|
|
215
|
+
return {"content": "", "available": False}
|
|
216
|
+
|
|
217
|
+
# ------------------------------------------------------------------
|
|
218
|
+
# Graph
|
|
219
|
+
# ------------------------------------------------------------------
|
|
220
|
+
|
|
221
|
+
async def graph_expand(
|
|
222
|
+
self, memory_id: str, depth: int = 1, limit: int = 20
|
|
223
|
+
) -> dict:
|
|
224
|
+
result = await self._exec_json(
|
|
225
|
+
"g", "expand", memory_id, "--depth", str(depth), "-n", str(limit)
|
|
226
|
+
)
|
|
227
|
+
return result if isinstance(result, dict) else {}
|
|
228
|
+
|
|
229
|
+
async def graph_evolves(self, memory_id: str, limit: int = 10) -> dict:
|
|
230
|
+
result = await self._exec_json(
|
|
231
|
+
"g", "evolves", memory_id, "-n", str(limit)
|
|
232
|
+
)
|
|
233
|
+
return result if isinstance(result, dict) else {}
|
|
234
|
+
|
|
235
|
+
# ------------------------------------------------------------------
|
|
236
|
+
# Feed
|
|
237
|
+
# ------------------------------------------------------------------
|
|
238
|
+
|
|
239
|
+
async def feed_events(
|
|
240
|
+
self,
|
|
241
|
+
days: int = 7,
|
|
242
|
+
*,
|
|
243
|
+
event_type: str | None = None,
|
|
244
|
+
date_from: str | None = None,
|
|
245
|
+
date_to: str | None = None,
|
|
246
|
+
) -> list:
|
|
247
|
+
args = ["f", "--last-n-days", str(days)]
|
|
248
|
+
if event_type:
|
|
249
|
+
args.extend(["--type", event_type])
|
|
250
|
+
if date_from:
|
|
251
|
+
args.extend(["--date-from", date_from])
|
|
252
|
+
if date_to:
|
|
253
|
+
args.extend(["--date-to", date_to])
|
|
254
|
+
result = await self._exec_json(*args)
|
|
255
|
+
if isinstance(result, list):
|
|
256
|
+
return result
|
|
257
|
+
return result.get("events", []) if isinstance(result, dict) else []
|
|
258
|
+
|
|
259
|
+
# ------------------------------------------------------------------
|
|
260
|
+
# Threads
|
|
261
|
+
# ------------------------------------------------------------------
|
|
262
|
+
|
|
263
|
+
async def search_threads(
|
|
264
|
+
self, query: str, limit: int = 5, source: str | None = None
|
|
265
|
+
) -> list:
|
|
266
|
+
args = ["t", "search", query, "--limit", str(limit)]
|
|
267
|
+
if source:
|
|
268
|
+
args.extend(["--source", source])
|
|
269
|
+
result = await self._exec_json(*args)
|
|
270
|
+
if isinstance(result, list):
|
|
271
|
+
return result
|
|
272
|
+
return result.get("threads", []) if isinstance(result, dict) else []
|
|
273
|
+
|
|
274
|
+
async def fetch_thread(
|
|
275
|
+
self, thread_id: str, limit: int = 20, offset: int = 0
|
|
276
|
+
) -> dict:
|
|
277
|
+
args = ["t", "show", thread_id, "-m", str(limit)]
|
|
278
|
+
if offset > 0:
|
|
279
|
+
args.extend(["--offset", str(offset)])
|
|
280
|
+
args.extend(["--content-limit", "1200"])
|
|
281
|
+
result = await self._exec_json(*args)
|
|
282
|
+
return result if isinstance(result, dict) else {}
|
|
283
|
+
|
|
284
|
+
async def create_thread(
|
|
285
|
+
self, thread_id: str, title: str, messages_json: str
|
|
286
|
+
) -> dict:
|
|
287
|
+
result = await self._exec_json(
|
|
288
|
+
"t", "create", "--id", thread_id, "-t", title,
|
|
289
|
+
"-m", messages_json, "-s", "bub",
|
|
290
|
+
)
|
|
291
|
+
return result if isinstance(result, dict) else {}
|
|
292
|
+
|
|
293
|
+
async def append_thread(self, thread_id: str, messages_json: str) -> dict:
|
|
294
|
+
result = await self._exec_json(
|
|
295
|
+
"t", "append", thread_id, "-m", messages_json,
|
|
296
|
+
)
|
|
297
|
+
return result if isinstance(result, dict) else {}
|
|
298
|
+
|
|
299
|
+
# ------------------------------------------------------------------
|
|
300
|
+
# Status
|
|
301
|
+
# ------------------------------------------------------------------
|
|
302
|
+
|
|
303
|
+
async def status(self) -> str:
|
|
304
|
+
return await self._exec("status", json_output=False)
|
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
"""Bub hook implementations for Nowledge Mem.
|
|
2
|
+
|
|
3
|
+
Hooks:
|
|
4
|
+
system_prompt — behavioural guidance (~50 tokens) + optional WM / recall
|
|
5
|
+
load_state — fetch Working Memory and (if session_context) recalled memories
|
|
6
|
+
save_state — capture each turn to a Nowledge Mem thread (incremental)
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import hashlib
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
import os
|
|
15
|
+
|
|
16
|
+
from bub import hookimpl
|
|
17
|
+
from bub.envelope import content_of
|
|
18
|
+
|
|
19
|
+
from .client import NmemClient, NmemError
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
# ---------------------------------------------------------------------------
|
|
24
|
+
# Behavioural guidance injected into the system prompt.
|
|
25
|
+
# Cost: ~50 tokens. Adjusts when session_context is on to avoid redundant
|
|
26
|
+
# tool calls for context that was already injected.
|
|
27
|
+
# ---------------------------------------------------------------------------
|
|
28
|
+
|
|
29
|
+
_GUIDANCE_BASE = """\
|
|
30
|
+
You have access to the user's personal knowledge graph (Nowledge Mem).
|
|
31
|
+
It contains knowledge from all their tools — Claude Code, Cursor, ChatGPT, and others — not just this session.
|
|
32
|
+
When prior context would improve your response, search with mem.search.
|
|
33
|
+
When the conversation produces something worth keeping, save it with mem.save.
|
|
34
|
+
When a memory has source_thread_id, fetch the full conversation with mem.thread."""
|
|
35
|
+
|
|
36
|
+
_GUIDANCE_WITH_CONTEXT = """\
|
|
37
|
+
You have access to the user's personal knowledge graph (Nowledge Mem).
|
|
38
|
+
It contains knowledge from all their tools — not just this session.
|
|
39
|
+
Relevant memories and Working Memory have already been injected into context.
|
|
40
|
+
Use mem.search only for specific follow-ups beyond what was auto-recalled.
|
|
41
|
+
When the conversation produces something worth keeping, save it with mem.save."""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class NowledgeMemPlugin:
|
|
45
|
+
"""Nowledge Mem integration for Bub.
|
|
46
|
+
|
|
47
|
+
Configuration (env vars):
|
|
48
|
+
NMEM_SESSION_CONTEXT — "1"/"true" to inject WM + recall each turn
|
|
49
|
+
NMEM_SESSION_DIGEST — "0"/"false" to disable thread capture
|
|
50
|
+
NMEM_API_URL — remote server URL
|
|
51
|
+
NMEM_API_KEY — API key (passed to nmem via env, never logged)
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(self) -> None:
|
|
55
|
+
self.client = NmemClient()
|
|
56
|
+
self._session_context = os.environ.get(
|
|
57
|
+
"NMEM_SESSION_CONTEXT", ""
|
|
58
|
+
).lower() in ("1", "true", "yes")
|
|
59
|
+
self._session_digest = os.environ.get(
|
|
60
|
+
"NMEM_SESSION_DIGEST", "1"
|
|
61
|
+
).lower() not in ("0", "false", "no")
|
|
62
|
+
|
|
63
|
+
# ------------------------------------------------------------------
|
|
64
|
+
# system_prompt — sync, call_many, results joined with \n\n
|
|
65
|
+
# ------------------------------------------------------------------
|
|
66
|
+
|
|
67
|
+
@hookimpl
|
|
68
|
+
def system_prompt(self, prompt, state) -> str:
|
|
69
|
+
"""Inject behavioural guidance and, when session_context is on,
|
|
70
|
+
Working Memory + recalled knowledge from state."""
|
|
71
|
+
sections: list[str] = []
|
|
72
|
+
|
|
73
|
+
# Always: behavioural nudge
|
|
74
|
+
if self._session_context:
|
|
75
|
+
sections.append(_GUIDANCE_WITH_CONTEXT)
|
|
76
|
+
else:
|
|
77
|
+
sections.append(_GUIDANCE_BASE)
|
|
78
|
+
|
|
79
|
+
if not self._session_context:
|
|
80
|
+
return "\n".join(sections)
|
|
81
|
+
|
|
82
|
+
# Session context mode: include WM + recalled memories
|
|
83
|
+
wm = state.get("_nmem_working_memory")
|
|
84
|
+
if wm:
|
|
85
|
+
sections.append(f"<working-memory>\n{wm}\n</working-memory>")
|
|
86
|
+
|
|
87
|
+
recalled = state.get("_nmem_recalled")
|
|
88
|
+
if recalled:
|
|
89
|
+
lines: list[str] = []
|
|
90
|
+
for r in recalled:
|
|
91
|
+
title = r.get("title", "")
|
|
92
|
+
text = r.get("content", "")[:300]
|
|
93
|
+
mid = r.get("id", "")
|
|
94
|
+
thread = r.get("source_thread_id", "")
|
|
95
|
+
entry = f"- [{title}] {text}"
|
|
96
|
+
if mid:
|
|
97
|
+
entry += f" (id: {mid})"
|
|
98
|
+
if thread:
|
|
99
|
+
entry += f" (thread: {thread})"
|
|
100
|
+
lines.append(entry)
|
|
101
|
+
sections.append(
|
|
102
|
+
"<recalled-knowledge>\n"
|
|
103
|
+
+ "\n".join(lines)
|
|
104
|
+
+ "\n</recalled-knowledge>"
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
return "\n\n".join(sections)
|
|
108
|
+
|
|
109
|
+
# ------------------------------------------------------------------
|
|
110
|
+
# load_state — async, call_many, dicts merged into state
|
|
111
|
+
# ------------------------------------------------------------------
|
|
112
|
+
|
|
113
|
+
@hookimpl
|
|
114
|
+
async def load_state(self, message, session_id) -> dict:
|
|
115
|
+
"""Load Working Memory and recalled memories (when session_context is on)."""
|
|
116
|
+
state: dict = {"_nmem_client": self.client}
|
|
117
|
+
|
|
118
|
+
if not self.client.is_available():
|
|
119
|
+
logger.debug("nmem not in PATH, skipping load_state")
|
|
120
|
+
return state
|
|
121
|
+
|
|
122
|
+
if not self._session_context:
|
|
123
|
+
# Default mode: no per-turn fetches. The agent calls
|
|
124
|
+
# mem.context or mem.search on demand.
|
|
125
|
+
return state
|
|
126
|
+
|
|
127
|
+
# Session context mode: fetch WM + recalled memories
|
|
128
|
+
try:
|
|
129
|
+
wm = await self.client.read_working_memory()
|
|
130
|
+
content = wm.get("content", "")
|
|
131
|
+
if content:
|
|
132
|
+
state["_nmem_working_memory"] = content
|
|
133
|
+
except Exception as exc:
|
|
134
|
+
logger.debug("working memory read failed: %s", exc)
|
|
135
|
+
|
|
136
|
+
# Recall: search for memories relevant to the current message
|
|
137
|
+
query = content_of(message)
|
|
138
|
+
if query and len(query.strip()) > 3:
|
|
139
|
+
try:
|
|
140
|
+
results = await self.client.search(query[:500], limit=5)
|
|
141
|
+
if results:
|
|
142
|
+
state["_nmem_recalled"] = results
|
|
143
|
+
except Exception as exc:
|
|
144
|
+
logger.debug("recall search failed: %s", exc)
|
|
145
|
+
|
|
146
|
+
return state
|
|
147
|
+
|
|
148
|
+
# ------------------------------------------------------------------
|
|
149
|
+
# save_state — async, call_many, always runs (finally block)
|
|
150
|
+
# ------------------------------------------------------------------
|
|
151
|
+
|
|
152
|
+
@hookimpl
|
|
153
|
+
async def save_state(self, session_id, state, message, model_output) -> None:
|
|
154
|
+
"""Append the turn (user + assistant) to a Nowledge Mem thread."""
|
|
155
|
+
if not self._session_digest:
|
|
156
|
+
return
|
|
157
|
+
if not self.client.is_available():
|
|
158
|
+
return
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
user_content = content_of(message)
|
|
162
|
+
if not user_content or not model_output:
|
|
163
|
+
return
|
|
164
|
+
|
|
165
|
+
digest = hashlib.sha1(session_id.encode()).hexdigest()[:10]
|
|
166
|
+
thread_id = f"bub-{digest}"
|
|
167
|
+
|
|
168
|
+
messages = [
|
|
169
|
+
{"role": "user", "content": user_content[:800]},
|
|
170
|
+
{"role": "assistant", "content": str(model_output)[:800]},
|
|
171
|
+
]
|
|
172
|
+
messages_json = json.dumps(messages)
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
await self.client.append_thread(thread_id, messages_json)
|
|
176
|
+
except NmemError:
|
|
177
|
+
# Thread doesn't exist yet — create it
|
|
178
|
+
title = f"Bub Session ({session_id[:30]})"
|
|
179
|
+
try:
|
|
180
|
+
await self.client.create_thread(
|
|
181
|
+
thread_id, title, messages_json
|
|
182
|
+
)
|
|
183
|
+
except Exception as exc:
|
|
184
|
+
logger.debug("thread create failed: %s", exc)
|
|
185
|
+
except Exception as exc:
|
|
186
|
+
# save_state must never raise — it runs in a finally block
|
|
187
|
+
logger.debug("session capture failed: %s", exc)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
plugin = NowledgeMemPlugin()
|
|
@@ -0,0 +1,376 @@
|
|
|
1
|
+
"""Nowledge Mem tools for Bub agents.
|
|
2
|
+
|
|
3
|
+
Tools are registered in ``bub.tools.REGISTRY`` on import via the ``@tool``
|
|
4
|
+
decorator. The plugin's ``__init__.py`` imports this module to trigger
|
|
5
|
+
registration.
|
|
6
|
+
|
|
7
|
+
Naming follows Bub conventions: ``mem.*`` namespace, dot-separated.
|
|
8
|
+
In the model's tool list dots become underscores (``mem_search``, etc.).
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import logging
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from pydantic import BaseModel, Field
|
|
17
|
+
|
|
18
|
+
from bub import tool
|
|
19
|
+
|
|
20
|
+
from .client import NmemClient, NmemError
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# ---------------------------------------------------------------------------
|
|
26
|
+
# Helpers
|
|
27
|
+
# ---------------------------------------------------------------------------
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _get_client(context: Any) -> NmemClient:
|
|
31
|
+
"""Retrieve NmemClient from tool context state, or create a fresh one."""
|
|
32
|
+
client = context.state.get("_nmem_client")
|
|
33
|
+
if client is None:
|
|
34
|
+
client = NmemClient()
|
|
35
|
+
return client
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _fmt_memory(m: dict) -> str:
|
|
39
|
+
"""Format one memory result for the agent."""
|
|
40
|
+
mid = m.get("id", "")
|
|
41
|
+
title = m.get("title", "")
|
|
42
|
+
text = m.get("content", "")
|
|
43
|
+
score = m.get("score", "")
|
|
44
|
+
labels = m.get("labels", [])
|
|
45
|
+
thread = m.get("source_thread_id", "")
|
|
46
|
+
parts = [f"[{title}]" if title else "", text[:400]]
|
|
47
|
+
if score:
|
|
48
|
+
parts.append(f"score: {score}")
|
|
49
|
+
if labels:
|
|
50
|
+
parts.append(f"labels: {', '.join(labels)}")
|
|
51
|
+
if mid:
|
|
52
|
+
parts.append(f"id: {mid}")
|
|
53
|
+
if thread:
|
|
54
|
+
parts.append(f"thread: {thread}")
|
|
55
|
+
return " · ".join(p for p in parts if p)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# ---------------------------------------------------------------------------
|
|
59
|
+
# mem.search
|
|
60
|
+
# ---------------------------------------------------------------------------
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class MemSearchInput(BaseModel):
|
|
64
|
+
query: str = Field(..., description="Search query.")
|
|
65
|
+
limit: int = Field(5, description="Max results (1–20).")
|
|
66
|
+
labels: list[str] = Field(
|
|
67
|
+
default_factory=list, description="Filter by labels."
|
|
68
|
+
)
|
|
69
|
+
event_from: str | None = Field(
|
|
70
|
+
None, description="Event start date filter (ISO, e.g. 2025-01)."
|
|
71
|
+
)
|
|
72
|
+
event_to: str | None = Field(
|
|
73
|
+
None, description="Event end date filter (ISO)."
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@tool(context=True, name="mem.search", model=MemSearchInput)
|
|
78
|
+
async def mem_search(param: MemSearchInput, *, context: Any) -> str:
|
|
79
|
+
"""Search your personal knowledge graph. Returns memories ranked by relevance. Use when prior context would help."""
|
|
80
|
+
client = _get_client(context)
|
|
81
|
+
results = await client.search(
|
|
82
|
+
param.query,
|
|
83
|
+
limit=param.limit,
|
|
84
|
+
labels=param.labels or None,
|
|
85
|
+
event_from=param.event_from,
|
|
86
|
+
event_to=param.event_to,
|
|
87
|
+
)
|
|
88
|
+
if not results:
|
|
89
|
+
return "(no matches)"
|
|
90
|
+
lines = [_fmt_memory(r) for r in results]
|
|
91
|
+
return "\n".join(lines)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
# ---------------------------------------------------------------------------
|
|
95
|
+
# mem.save
|
|
96
|
+
# ---------------------------------------------------------------------------
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class MemSaveInput(BaseModel):
|
|
100
|
+
content: str = Field(..., description="Memory content to save.")
|
|
101
|
+
title: str | None = Field(None, description="Short descriptive title.")
|
|
102
|
+
importance: float = Field(
|
|
103
|
+
0.7, description="Importance 0.0–1.0. 0.8+ critical, 0.5–0.7 useful."
|
|
104
|
+
)
|
|
105
|
+
labels: list[str] = Field(
|
|
106
|
+
default_factory=list, description="Topic labels (0–3 recommended)."
|
|
107
|
+
)
|
|
108
|
+
unit_type: str = Field(
|
|
109
|
+
"fact",
|
|
110
|
+
description="Type: fact, decision, learning, preference, plan, procedure, context, event.",
|
|
111
|
+
)
|
|
112
|
+
event_start: str | None = Field(
|
|
113
|
+
None, description="When it happened (ISO date)."
|
|
114
|
+
)
|
|
115
|
+
event_end: str | None = Field(
|
|
116
|
+
None, description="End date if range (ISO)."
|
|
117
|
+
)
|
|
118
|
+
temporal_context: str = Field(
|
|
119
|
+
"present", description="past, present, future, or timeless."
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
@tool(context=True, name="mem.save", model=MemSaveInput)
|
|
124
|
+
async def mem_save(param: MemSaveInput, *, context: Any) -> str:
|
|
125
|
+
"""Save a memory (decision, insight, preference, plan, …) to your knowledge graph. Check for duplicates before saving."""
|
|
126
|
+
client = _get_client(context)
|
|
127
|
+
|
|
128
|
+
# Pre-save dedup: search for high-similarity existing memory
|
|
129
|
+
try:
|
|
130
|
+
check_query = param.title or param.content[:200]
|
|
131
|
+
existing = await client.search(check_query, limit=3)
|
|
132
|
+
for r in existing:
|
|
133
|
+
score = r.get("score", 0)
|
|
134
|
+
if isinstance(score, (int, float)) and score >= 0.9:
|
|
135
|
+
mid = r.get("id", "unknown")
|
|
136
|
+
title = r.get("title", "")
|
|
137
|
+
return (
|
|
138
|
+
f"(duplicate detected — existing memory: [{title}] id: {mid})"
|
|
139
|
+
)
|
|
140
|
+
except Exception:
|
|
141
|
+
pass # dedup is best-effort
|
|
142
|
+
|
|
143
|
+
result = await client.add_memory(
|
|
144
|
+
param.content,
|
|
145
|
+
title=param.title,
|
|
146
|
+
importance=param.importance,
|
|
147
|
+
labels=param.labels or None,
|
|
148
|
+
unit_type=param.unit_type,
|
|
149
|
+
event_start=param.event_start,
|
|
150
|
+
event_end=param.event_end,
|
|
151
|
+
temporal_context=param.temporal_context,
|
|
152
|
+
)
|
|
153
|
+
mid = result.get("id", "")
|
|
154
|
+
title = param.title or param.content[:40]
|
|
155
|
+
label_str = ", ".join(param.labels) if param.labels else ""
|
|
156
|
+
parts = [f"Saved: {title} [{param.unit_type}]"]
|
|
157
|
+
if mid:
|
|
158
|
+
parts.append(f"id: {mid}")
|
|
159
|
+
if label_str:
|
|
160
|
+
parts.append(f"labels: {label_str}")
|
|
161
|
+
return " · ".join(parts)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
# ---------------------------------------------------------------------------
|
|
165
|
+
# mem.context — Working Memory
|
|
166
|
+
# ---------------------------------------------------------------------------
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
@tool(context=True, name="mem.context")
|
|
170
|
+
async def mem_context(*, context: Any) -> str:
|
|
171
|
+
"""Read today's Working Memory briefing: focus areas, priorities, recent activity."""
|
|
172
|
+
# Check if already loaded in state
|
|
173
|
+
wm = context.state.get("_nmem_working_memory")
|
|
174
|
+
if wm:
|
|
175
|
+
return wm
|
|
176
|
+
|
|
177
|
+
client = _get_client(context)
|
|
178
|
+
result = await client.read_working_memory()
|
|
179
|
+
content = result.get("content", "")
|
|
180
|
+
return content or "(no Working Memory available)"
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
# ---------------------------------------------------------------------------
|
|
184
|
+
# mem.connections — graph exploration
|
|
185
|
+
# ---------------------------------------------------------------------------
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class MemConnectionsInput(BaseModel):
|
|
189
|
+
memory_id: str | None = Field(
|
|
190
|
+
None, description="Memory ID to explore. If omitted, searches by query."
|
|
191
|
+
)
|
|
192
|
+
query: str | None = Field(
|
|
193
|
+
None,
|
|
194
|
+
description="Search query (used when memory_id is not provided).",
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
@tool(context=True, name="mem.connections", model=MemConnectionsInput)
|
|
199
|
+
async def mem_connections(param: MemConnectionsInput, *, context: Any) -> str:
|
|
200
|
+
"""Explore how a memory connects to other knowledge: related topics, version chains (EVOLVES), source documents."""
|
|
201
|
+
client = _get_client(context)
|
|
202
|
+
|
|
203
|
+
mid = param.memory_id
|
|
204
|
+
if not mid:
|
|
205
|
+
if not param.query:
|
|
206
|
+
return "(provide memory_id or query)"
|
|
207
|
+
results = await client.search(param.query, limit=1)
|
|
208
|
+
if not results:
|
|
209
|
+
return "(no matching memory found)"
|
|
210
|
+
mid = results[0].get("id", "")
|
|
211
|
+
if not mid:
|
|
212
|
+
return "(no memory ID in result)"
|
|
213
|
+
|
|
214
|
+
sections: list[str] = []
|
|
215
|
+
|
|
216
|
+
# Neighbours
|
|
217
|
+
try:
|
|
218
|
+
expand = await client.graph_expand(mid)
|
|
219
|
+
nodes = expand.get("nodes", expand.get("neighbors", []))
|
|
220
|
+
if nodes:
|
|
221
|
+
lines = []
|
|
222
|
+
for n in nodes[:15]:
|
|
223
|
+
label = n.get("label", n.get("title", ""))
|
|
224
|
+
nid = n.get("id", "")
|
|
225
|
+
edge = n.get("edge_type", n.get("relationship", ""))
|
|
226
|
+
lines.append(f" - {label} ({edge}) id: {nid}")
|
|
227
|
+
sections.append("Connections:\n" + "\n".join(lines))
|
|
228
|
+
except Exception as exc:
|
|
229
|
+
logger.debug("graph expand failed: %s", exc)
|
|
230
|
+
|
|
231
|
+
# EVOLVES chain
|
|
232
|
+
try:
|
|
233
|
+
evolves = await client.graph_evolves(mid)
|
|
234
|
+
chain = evolves.get("chain", evolves.get("evolves", []))
|
|
235
|
+
if chain:
|
|
236
|
+
lines = []
|
|
237
|
+
for e in chain[:10]:
|
|
238
|
+
title = e.get("title", "")
|
|
239
|
+
eid = e.get("id", "")
|
|
240
|
+
rel = e.get("relationship", e.get("type", ""))
|
|
241
|
+
lines.append(f" - {title} ({rel}) id: {eid}")
|
|
242
|
+
sections.append("Evolution:\n" + "\n".join(lines))
|
|
243
|
+
except Exception as exc:
|
|
244
|
+
logger.debug("graph evolves failed: %s", exc)
|
|
245
|
+
|
|
246
|
+
return "\n\n".join(sections) if sections else "(no connections found)"
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# ---------------------------------------------------------------------------
|
|
250
|
+
# mem.timeline — activity feed
|
|
251
|
+
# ---------------------------------------------------------------------------
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
class MemTimelineInput(BaseModel):
|
|
255
|
+
last_n_days: int = Field(7, description="Number of recent days (1–90).")
|
|
256
|
+
date_from: str | None = Field(None, description="Start date (ISO).")
|
|
257
|
+
date_to: str | None = Field(None, description="End date (ISO).")
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
@tool(context=True, name="mem.timeline", model=MemTimelineInput)
|
|
261
|
+
async def mem_timeline(param: MemTimelineInput, *, context: Any) -> str:
|
|
262
|
+
"""Show recent activity: memories created, conversations captured, insights generated. Grouped by day."""
|
|
263
|
+
client = _get_client(context)
|
|
264
|
+
events = await client.feed_events(
|
|
265
|
+
days=param.last_n_days,
|
|
266
|
+
date_from=param.date_from,
|
|
267
|
+
date_to=param.date_to,
|
|
268
|
+
)
|
|
269
|
+
if not events:
|
|
270
|
+
return "(no recent activity)"
|
|
271
|
+
|
|
272
|
+
lines: list[str] = []
|
|
273
|
+
current_date = ""
|
|
274
|
+
for ev in events[:50]:
|
|
275
|
+
date = ev.get("date", ev.get("created_at", ""))[:10]
|
|
276
|
+
if date != current_date:
|
|
277
|
+
current_date = date
|
|
278
|
+
lines.append(f"\n## {date}")
|
|
279
|
+
etype = ev.get("type", ev.get("event_type", ""))
|
|
280
|
+
title = ev.get("title", ev.get("summary", ""))
|
|
281
|
+
mid = ev.get("memory_id", "")
|
|
282
|
+
entry = f"- {etype}: {title}"
|
|
283
|
+
if mid:
|
|
284
|
+
entry += f" (id: {mid})"
|
|
285
|
+
lines.append(entry)
|
|
286
|
+
return "\n".join(lines)
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
# ---------------------------------------------------------------------------
|
|
290
|
+
# mem.forget
|
|
291
|
+
# ---------------------------------------------------------------------------
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
@tool(context=True, name="mem.forget")
|
|
295
|
+
async def mem_forget(memory_id: str, *, context: Any) -> str:
|
|
296
|
+
"""Delete a memory by ID."""
|
|
297
|
+
client = _get_client(context)
|
|
298
|
+
await client.delete_memory(memory_id)
|
|
299
|
+
return f"deleted: {memory_id}"
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
# ---------------------------------------------------------------------------
|
|
303
|
+
# mem.threads — search past conversations
|
|
304
|
+
# ---------------------------------------------------------------------------
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
@tool(context=True, name="mem.threads")
|
|
308
|
+
async def mem_thread_search(
|
|
309
|
+
query: str, limit: int = 5, *, context: Any
|
|
310
|
+
) -> str:
|
|
311
|
+
"""Search past conversations by keyword. Returns threads with matched message snippets."""
|
|
312
|
+
client = _get_client(context)
|
|
313
|
+
threads = await client.search_threads(query, limit=limit)
|
|
314
|
+
if not threads:
|
|
315
|
+
return "(no matching threads)"
|
|
316
|
+
lines: list[str] = []
|
|
317
|
+
for t in threads:
|
|
318
|
+
tid = t.get("id", t.get("thread_id", ""))
|
|
319
|
+
title = t.get("title", "")
|
|
320
|
+
msgs = t.get("message_count", t.get("total_messages", ""))
|
|
321
|
+
snippet = t.get("snippet", t.get("matched_text", ""))[:200]
|
|
322
|
+
entry = f"- [{title}] ({msgs} messages) id: {tid}"
|
|
323
|
+
if snippet:
|
|
324
|
+
entry += f"\n {snippet}"
|
|
325
|
+
lines.append(entry)
|
|
326
|
+
return "\n".join(lines)
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
# ---------------------------------------------------------------------------
|
|
330
|
+
# mem.thread — fetch full conversation
|
|
331
|
+
# ---------------------------------------------------------------------------
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
@tool(context=True, name="mem.thread")
|
|
335
|
+
async def mem_thread_fetch(
|
|
336
|
+
thread_id: str,
|
|
337
|
+
offset: int = 0,
|
|
338
|
+
limit: int = 20,
|
|
339
|
+
*,
|
|
340
|
+
context: Any,
|
|
341
|
+
) -> str:
|
|
342
|
+
"""Fetch messages from a specific thread (conversation). Supports pagination."""
|
|
343
|
+
client = _get_client(context)
|
|
344
|
+
result = await client.fetch_thread(thread_id, limit=limit, offset=offset)
|
|
345
|
+
|
|
346
|
+
title = result.get("title", "")
|
|
347
|
+
total = result.get("total_messages", result.get("message_count", "?"))
|
|
348
|
+
messages = result.get("messages", [])
|
|
349
|
+
has_more = result.get("has_more", len(messages) >= limit)
|
|
350
|
+
|
|
351
|
+
lines = [f"Thread: {title} ({total} messages)"]
|
|
352
|
+
for msg in messages:
|
|
353
|
+
role = msg.get("role", "")
|
|
354
|
+
text = msg.get("content", "")[:600]
|
|
355
|
+
lines.append(f"[{role}] {text}")
|
|
356
|
+
|
|
357
|
+
if has_more:
|
|
358
|
+
next_offset = offset + len(messages)
|
|
359
|
+
lines.append(f"\n(more messages available — use offset={next_offset})")
|
|
360
|
+
|
|
361
|
+
return "\n".join(lines)
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
# ---------------------------------------------------------------------------
|
|
365
|
+
# mem.status — diagnostics
|
|
366
|
+
# ---------------------------------------------------------------------------
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
@tool(context=True, name="mem.status")
|
|
370
|
+
async def mem_status(*, context: Any) -> str:
|
|
371
|
+
"""Check Nowledge Mem connection status and configuration."""
|
|
372
|
+
client = _get_client(context)
|
|
373
|
+
try:
|
|
374
|
+
return await client.status()
|
|
375
|
+
except NmemError as exc:
|
|
376
|
+
return f"(connection failed: {exc})"
|