transcendence-memory-server 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. transcendence_memory_server-0.3.0/.env.example +47 -0
  2. transcendence_memory_server-0.3.0/.github/workflows/ci.yml +67 -0
  3. transcendence_memory_server-0.3.0/.gitignore +48 -0
  4. transcendence_memory_server-0.3.0/CONTRIBUTING.md +43 -0
  5. transcendence_memory_server-0.3.0/Dockerfile +21 -0
  6. transcendence_memory_server-0.3.0/LICENSE +21 -0
  7. transcendence_memory_server-0.3.0/PKG-INFO +223 -0
  8. transcendence_memory_server-0.3.0/README.md +190 -0
  9. transcendence_memory_server-0.3.0/README.zh-CN.md +190 -0
  10. transcendence_memory_server-0.3.0/SECURITY.md +26 -0
  11. transcendence_memory_server-0.3.0/docker-compose.prod.yml +17 -0
  12. transcendence_memory_server-0.3.0/docker-compose.yml +25 -0
  13. transcendence_memory_server-0.3.0/docs/README.md +30 -0
  14. transcendence_memory_server-0.3.0/docs/api-contract.md +233 -0
  15. transcendence_memory_server-0.3.0/docs/deployment/docker-deployment.md +48 -0
  16. transcendence_memory_server-0.3.0/docs/deployment/environment-reference.md +64 -0
  17. transcendence_memory_server-0.3.0/docs/deployment/quickstart.md +100 -0
  18. transcendence_memory_server-0.3.0/docs/deployment/reverse-proxy.md +60 -0
  19. transcendence_memory_server-0.3.0/docs/deployment/systemd-deployment.md +67 -0
  20. transcendence_memory_server-0.3.0/docs/development-bootstrap.md +122 -0
  21. transcendence_memory_server-0.3.0/docs/identity/identity-backend.md +23 -0
  22. transcendence_memory_server-0.3.0/docs/identity/identity-both.md +23 -0
  23. transcendence_memory_server-0.3.0/docs/operations/backup-restore.md +49 -0
  24. transcendence_memory_server-0.3.0/docs/operations/health-check.md +81 -0
  25. transcendence_memory_server-0.3.0/docs/operations/troubleshooting.md +103 -0
  26. transcendence_memory_server-0.3.0/docs/operations/upgrade-migration.md +39 -0
  27. transcendence_memory_server-0.3.0/docs/server-boundary.md +45 -0
  28. transcendence_memory_server-0.3.0/pyproject.toml +52 -0
  29. transcendence_memory_server-0.3.0/scripts/arch_detect.py +114 -0
  30. transcendence_memory_server-0.3.0/scripts/bootstrap_dev.sh +35 -0
  31. transcendence_memory_server-0.3.0/scripts/entrypoint.sh +52 -0
  32. transcendence_memory_server-0.3.0/scripts/load_rag_config.sh +34 -0
  33. transcendence_memory_server-0.3.0/scripts/preflight_check.sh +105 -0
  34. transcendence_memory_server-0.3.0/scripts/rag_engine.py +157 -0
  35. transcendence_memory_server-0.3.0/scripts/run_task_rag_server.sh +17 -0
  36. transcendence_memory_server-0.3.0/scripts/smoke_test_client_ingest_search.py +75 -0
  37. transcendence_memory_server-0.3.0/scripts/task_rag_lancedb_ingest.py +254 -0
  38. transcendence_memory_server-0.3.0/scripts/task_rag_runtime.py +67 -0
  39. transcendence_memory_server-0.3.0/scripts/task_rag_search.py +80 -0
  40. transcendence_memory_server-0.3.0/scripts/task_rag_server.py +613 -0
  41. transcendence_memory_server-0.3.0/scripts/task_rag_server_models.py +213 -0
  42. transcendence_memory_server-0.3.0/scripts/task_rag_structured_ingest.py +176 -0
  43. transcendence_memory_server-0.3.0/src/tm_server/__init__.py +2 -0
  44. transcendence_memory_server-0.3.0/src/tm_server/__main__.py +4 -0
  45. transcendence_memory_server-0.3.0/src/tm_server/cli.py +96 -0
  46. transcendence_memory_server-0.3.0/tasks_rag/README.md +19 -0
  47. transcendence_memory_server-0.3.0/tests/conftest.py +57 -0
  48. transcendence_memory_server-0.3.0/tests/test_arch_detect.py +98 -0
  49. transcendence_memory_server-0.3.0/tests/test_connection_token.py +43 -0
  50. transcendence_memory_server-0.3.0/tests/test_container_management.py +58 -0
  51. transcendence_memory_server-0.3.0/tests/test_crud_endpoints.py +103 -0
  52. transcendence_memory_server-0.3.0/tests/test_error_scenarios.py +25 -0
  53. transcendence_memory_server-0.3.0/tests/test_health_modules.py +84 -0
  54. transcendence_memory_server-0.3.0/tests/test_task_rag_server_memory_objects.py +181 -0
@@ -0,0 +1,47 @@
1
+ # transcendence-memory-server 环境变量模板
2
+ # 复制为 .env 并填入实际值:cp .env.example .env
3
+
4
+ # ╔═══════════════════════════════════════════════════════╗
5
+ # ║ 必需配置(基础 LanceDB 向量检索) ║
6
+ # ╚═══════════════════════════════════════════════════════╝
7
+
8
+ # 服务端 API 认证密钥(客户端通过 X-API-KEY 或 Authorization: Bearer 传入)
9
+ RAG_API_KEY=your-rag-api-key
10
+
11
+ # Embedding 模型配置
12
+ EMBEDDING_BASE_URL=https://api.openai.com/v1
13
+ EMBEDDING_API_KEY=sk-your-api-key
14
+ EMBEDDING_MODEL=gemini-embedding-001
15
+ EMBEDDING_DIM=3072
16
+
17
+ # ╔═══════════════════════════════════════════════════════╗
18
+ # ║ 可选:启用 LightRAG 知识图谱 ║
19
+ # ║ 设置后架构升级为 lancedb+lightrag ║
20
+ # ╚═══════════════════════════════════════════════════════╝
21
+
22
+ LLM_MODEL=gemini-2.5-flash
23
+ LLM_BASE_URL=https://api.openai.com/v1
24
+ LLM_API_KEY=sk-your-api-key
25
+
26
+ # ╔═══════════════════════════════════════════════════════╗
27
+ # ║ 可选:启用多模态 RAG(PDF/图片/表格解析) ║
28
+ # ║ 需同时配置上方 LLM 密钥 ║
29
+ # ║ 设置后架构升级为 rag-everything ║
30
+ # ╚═══════════════════════════════════════════════════════╝
31
+
32
+ VLM_MODEL=qwen3-vl-plus
33
+ VLM_BASE_URL=https://api.openai.com/v1
34
+ VLM_API_KEY=sk-your-api-key
35
+
36
+ # ╔═══════════════════════════════════════════════════════╗
37
+ # ║ 服务配置 ║
38
+ # ╚═══════════════════════════════════════════════════════╝
39
+
40
+ # 对外 endpoint(connection token 中使用)
41
+ RAG_ADVERTISED_ENDPOINT=http://localhost:8711
42
+
43
+ # 工作区目录(Docker 中默认 /data,本地开发使用项目根目录)
44
+ WORKSPACE=/data
45
+
46
+ # 端口(仅 docker-compose 使用)
47
+ TM_PORT=8711
@@ -0,0 +1,67 @@
1
+ name: CI/CD
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ tags: ["v*.*.*"]
7
+ pull_request:
8
+ branches: [main]
9
+
10
+ jobs:
11
+ test:
12
+ runs-on: ubuntu-latest
13
+ strategy:
14
+ matrix:
15
+ python-version: ["3.11", "3.12", "3.13"]
16
+ steps:
17
+ - uses: actions/checkout@v4
18
+ - uses: actions/setup-python@v5
19
+ with:
20
+ python-version: ${{ matrix.python-version }}
21
+ - run: pip install -e ".[dev]"
22
+ - run: pytest tests/ -v
23
+
24
+ publish-pypi:
25
+ needs: test
26
+ if: startsWith(github.ref, 'refs/tags/v')
27
+ runs-on: ubuntu-latest
28
+ steps:
29
+ - uses: actions/checkout@v4
30
+ - uses: actions/setup-python@v5
31
+ with:
32
+ python-version: "3.11"
33
+ - name: Build and publish
34
+ env:
35
+ TWINE_USERNAME: __token__
36
+ TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
37
+ run: |
38
+ pip install build twine
39
+ python -m build
40
+ twine upload dist/*
41
+
42
+ publish-docker:
43
+ needs: test
44
+ if: startsWith(github.ref, 'refs/tags/v')
45
+ runs-on: ubuntu-latest
46
+ steps:
47
+ - uses: actions/checkout@v4
48
+ - name: Log in to Docker Hub
49
+ uses: docker/login-action@v3
50
+ with:
51
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
52
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
53
+ - name: Extract metadata
54
+ id: meta
55
+ uses: docker/metadata-action@v5
56
+ with:
57
+ images: ${{ secrets.DOCKERHUB_USERNAME }}/transcendence-memory-server
58
+ tags: |
59
+ type=semver,pattern={{version}}
60
+ type=raw,value=latest
61
+ - name: Build and push
62
+ uses: docker/build-push-action@v5
63
+ with:
64
+ context: .
65
+ push: true
66
+ tags: ${{ steps.meta.outputs.tags }}
67
+ labels: ${{ steps.meta.outputs.labels }}
@@ -0,0 +1,48 @@
1
+ __pycache__/
2
+ .pytest_cache/
3
+ .learnings/
4
+ .venv-task-rag-server/
5
+ .venv/
6
+ venv/
7
+ scripts/__pycache__/
8
+ tests/__pycache__/
9
+
10
+ # LightRAG runtime data
11
+ kv_store_*.json
12
+ vdb_*.json
13
+ graph_chunk_entity_relation.graphml
14
+
15
+ # Runtime data
16
+ tasks/rag/containers/
17
+ memory/
18
+ memory_archive/
19
+
20
+ # Environment
21
+ .env
22
+
23
+ # Build artifacts
24
+ dist/
25
+ build/
26
+ *.egg-info/
27
+ src/*.egg-info/
28
+
29
+ # IDE
30
+ .idea/
31
+ .vscode/
32
+ *.swp
33
+ *.swo
34
+
35
+ # OS
36
+ .DS_Store
37
+ Thumbs.db
38
+
39
+ # Coverage
40
+ htmlcov/
41
+ .coverage
42
+ coverage.xml
43
+
44
+ # Secrets (defensive)
45
+ .env.*
46
+ !.env.example
47
+ *.pem
48
+ *.key
@@ -0,0 +1,43 @@
1
+ # Contributing
2
+
3
+ Thanks for your interest in contributing to transcendence-memory-server!
4
+
5
+ ## Development Setup
6
+
7
+ ```bash
8
+ git clone https://github.com/leekkk2/transcendence-memory-server.git
9
+ cd transcendence-memory-server
10
+ python -m venv .venv
11
+ source .venv/bin/activate
12
+ pip install -e ".[dev]"
13
+ ```
14
+
15
+ ## Running Tests
16
+
17
+ ```bash
18
+ pytest tests/ -v
19
+ ```
20
+
21
+ ## Code Style
22
+
23
+ - Python 3.11+, type hints encouraged
24
+ - Keep functions under 50 lines
25
+ - Use meaningful names, avoid magic numbers
26
+
27
+ ## Pull Requests
28
+
29
+ 1. Fork the repo and create a feature branch
30
+ 2. Write tests for new functionality
31
+ 3. Ensure all tests pass
32
+ 4. Submit a PR with a clear description
33
+
34
+ ## Reporting Issues
35
+
36
+ Use [GitHub Issues](https://github.com/leekkk2/transcendence-memory-server/issues) with:
37
+ - Steps to reproduce
38
+ - Expected vs actual behavior
39
+ - Environment details (OS, Python version, Docker version)
40
+
41
+ ## License
42
+
43
+ By contributing, you agree that your contributions will be licensed under the MIT License.
@@ -0,0 +1,21 @@
1
+ FROM python:3.13-slim AS builder
2
+ WORKDIR /build
3
+ RUN pip install --no-cache-dir \
4
+ fastapi uvicorn httpx requests numpy \
5
+ lancedb pyarrow lightrag-hku
6
+ # raganything 从 GitHub 安装(可选,失败不阻塞构建)
7
+ RUN pip install --no-cache-dir git+https://github.com/HKUDS/RAG-Anything.git || true
8
+
9
+ FROM python:3.13-slim
10
+ RUN apt-get update && apt-get install -y --no-install-recommends curl && rm -rf /var/lib/apt/lists/*
11
+ WORKDIR /app
12
+ COPY --from=builder /usr/local/lib/python3.13/site-packages /usr/local/lib/python3.13/site-packages
13
+ COPY --from=builder /usr/local/bin /usr/local/bin
14
+ COPY scripts/ ./scripts/
15
+ COPY src/ ./src/
16
+ RUN chmod +x /app/scripts/entrypoint.sh
17
+ RUN mkdir -p /data/tasks/active /data/tasks/archived /data/tasks/rag/containers /data/memory /data/memory_archive
18
+ ENV WORKSPACE=/data
19
+ EXPOSE 8711
20
+ HEALTHCHECK --interval=30s --timeout=10s --retries=3 --start-period=15s CMD curl -f http://localhost:8711/health || exit 1
21
+ ENTRYPOINT ["/app/scripts/entrypoint.sh"]
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 leekkk2
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,223 @@
1
+ Metadata-Version: 2.4
2
+ Name: transcendence-memory-server
3
+ Version: 0.3.0
4
+ Summary: Self-hosted multimodal RAG memory service with LanceDB + RAG-Anything
5
+ Project-URL: Homepage, https://github.com/leekkk2/transcendence-memory-server
6
+ Project-URL: Repository, https://github.com/leekkk2/transcendence-memory-server
7
+ Project-URL: Issues, https://github.com/leekkk2/transcendence-memory-server/issues
8
+ Author: leekkk2
9
+ License-Expression: MIT
10
+ License-File: LICENSE
11
+ Keywords: fastapi,lancedb,lightrag,memory,multimodal,rag,vector-database
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Framework :: FastAPI
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Programming Language :: Python :: 3.13
18
+ Requires-Python: <3.14,>=3.11
19
+ Requires-Dist: fastapi>=0.100.0
20
+ Requires-Dist: httpx>=0.24.0
21
+ Requires-Dist: lancedb>=0.4.0
22
+ Requires-Dist: lightrag-hku>=1.4.0
23
+ Requires-Dist: numpy>=1.24.0
24
+ Requires-Dist: pyarrow>=12.0.0
25
+ Requires-Dist: requests>=2.28.0
26
+ Requires-Dist: uvicorn>=0.20.0
27
+ Provides-Extra: dev
28
+ Requires-Dist: httpx>=0.24.0; extra == 'dev'
29
+ Requires-Dist: pytest>=8.0.0; extra == 'dev'
30
+ Provides-Extra: multimodal
31
+ Requires-Dist: raganything>=1.2.0; extra == 'multimodal'
32
+ Description-Content-Type: text/markdown
33
+
34
+ # Transcendence Memory Server
35
+
36
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE)
37
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org/downloads/)
38
+ [![Docker](https://img.shields.io/badge/docker-ready-blue.svg)](Dockerfile)
39
+
40
+ > **Self-hosted multimodal RAG cloud memory service — a shared brain for your AI agents.**
41
+
42
+ [中文文档](README.zh-CN.md)
43
+
44
+ Transcendence Memory Server is a cloud memory backend that multiple AI agents connect to simultaneously. Each agent stores its own memories in isolated containers, while being able to cross-query other agents' knowledge — turning isolated AI sessions into a collaborative, persistent knowledge network.
45
+
46
+ ```
47
+ Agent A (Claude Code) Agent B (Codex CLI) Agent C (OpenClaw)
48
+ | | |
49
+ | store & search own | store & search own | store & search own
50
+ | cross-query B, C | cross-query A, C | cross-query A, B
51
+ | | |
52
+ +------------------------------+------------------------------+
53
+ |
54
+ Transcendence Memory Server
55
+ +-------------------------+
56
+ | Container: agent-a |
57
+ | Container: agent-b |
58
+ | Container: agent-c |
59
+ | Container: shared |
60
+ +-------------------------+
61
+ ```
62
+
63
+ ## Why Cloud Memory?
64
+
65
+ | Problem | Without | With Transcendence |
66
+ |---------|---------|-------------------|
67
+ | Session ends | Memory lost | Persisted to cloud, recoverable anytime |
68
+ | Switch agents | Start from zero | New agent inherits context via search |
69
+ | Cross-project | Knowledge siloed | Agent B queries Agent A's decisions |
70
+ | Team of agents | Each works in isolation | Shared container for collective knowledge |
71
+ | Onboarding | Re-explain everything | Agent reads past decisions and rationale |
72
+
73
+ ## Features
74
+
75
+ - **Multi-Agent Cloud Memory** — one server, many agents; each stores its own, each can query others
76
+ - **Container Isolation** — per-agent or per-project namespaces with full CRUD; shared containers for team knowledge
77
+ - **LanceDB Vector Search** — sub-second semantic retrieval over task cards, memory objects, and structured data
78
+ - **LightRAG Knowledge Graph** — entity/relation extraction with hybrid retrieval (local + global + keyword)
79
+ - **RAG-Anything Multimodal** — PDF, image, and table parsing with vision model support
80
+ - **Auto-Detect Architecture** — automatically enables capabilities based on configured API keys
81
+ - **Connection Token** — one-step client setup; give each agent a token and it's connected
82
+ - **Zero Permission Issues** — Docker named volumes, no bind mount headaches
83
+
84
+ ## Architecture Tiers
85
+
86
+ The server auto-detects its capability tier based on your `.env` configuration:
87
+
88
+ | Tier | Required Keys | Capabilities |
89
+ |------|--------------|-------------|
90
+ | `lancedb-only` | `EMBEDDING_API_KEY` | Vector search, typed objects, structured ingest |
91
+ | `lancedb+lightrag` | + `LLM_API_KEY` | + Knowledge graph, entity extraction, hybrid queries |
92
+ | `rag-everything` | + `VLM_API_KEY` | + PDF/image/table parsing, vision model queries |
93
+
94
+ ## Quick Start
95
+
96
+ ### Docker (recommended)
97
+
98
+ ```bash
99
+ git clone https://github.com/leekkk2/transcendence-memory-server.git
100
+ cd transcendence-memory-server
101
+ cp .env.example .env # edit with your API keys
102
+ docker compose up -d --build
103
+ curl http://localhost:8711/health
104
+ ```
105
+
106
+ ### Production (VPS + Nginx)
107
+
108
+ ```bash
109
+ # Preflight check
110
+ bash scripts/preflight_check.sh
111
+
112
+ # Deploy with localhost-only binding
113
+ docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d --build
114
+ ```
115
+
116
+ ### Connect Your Agents
117
+
118
+ Once the server is running, each agent gets its own connection token:
119
+
120
+ ```bash
121
+ # Export a token for Agent A
122
+ curl -sS "http://localhost:8711/export-connection-token?container=agent-a" \
123
+ -H "X-API-KEY: your-key"
124
+
125
+ # Export a token for Agent B (different container)
126
+ curl -sS "http://localhost:8711/export-connection-token?container=agent-b" \
127
+ -H "X-API-KEY: your-key"
128
+
129
+ # Export a shared container token (for cross-agent collaboration)
130
+ curl -sS "http://localhost:8711/export-connection-token?container=shared" \
131
+ -H "X-API-KEY: your-key"
132
+ ```
133
+
134
+ Give each token to the corresponding agent. With the [transcendence-memory](https://github.com/leekkk2/transcendence-memory) skill installed, the agent runs `/tm connect <token>` and it's ready.
135
+
136
+ ### Local Development
137
+
138
+ ```bash
139
+ ./scripts/bootstrap_dev.sh
140
+ export RAG_API_KEY="your-key"
141
+ export EMBEDDING_API_KEY="your-key"
142
+ ./scripts/run_task_rag_server.sh
143
+ ```
144
+
145
+ ## API Overview
146
+
147
+ ### Text Memory (Lightweight Path)
148
+
149
+ | Endpoint | Method | Description |
150
+ |----------|--------|-------------|
151
+ | `/health` | GET | Health check with module status (public) |
152
+ | `/search` | POST | Semantic vector search |
153
+ | `/embed` | POST | Rebuild LanceDB index |
154
+ | `/ingest-memory/objects` | POST | Store typed memory objects |
155
+ | `/ingest-structured` | POST | Structured JSON ingest |
156
+ | `/containers/{c}/memories/{id}` | PUT/DELETE | Update/delete individual memories |
157
+
158
+ ### Multimodal RAG (Knowledge Graph Path)
159
+
160
+ | Endpoint | Method | Description |
161
+ |----------|--------|-------------|
162
+ | `/documents/text` | POST | Ingest text into knowledge graph |
163
+ | `/documents/upload` | POST | Upload PDF/image/MD files |
164
+ | `/query` | POST | RAG query with LLM-generated answer |
165
+
166
+ ### Management
167
+
168
+ | Endpoint | Method | Description |
169
+ |----------|--------|-------------|
170
+ | `/containers` | GET | List all containers |
171
+ | `/containers/{name}` | DELETE | Delete a container |
172
+ | `/export-connection-token` | GET | Export credentials for client setup |
173
+ | `/jobs/{pid}` | GET | Check async task status |
174
+
175
+ All endpoints except `/health` require authentication via `X-API-KEY` or `Authorization: Bearer` header.
176
+
177
+ ## Configuration
178
+
179
+ All settings via `.env` file (see [.env.example](.env.example)):
180
+
181
+ | Variable | Required | Tier | Description |
182
+ |----------|----------|------|-------------|
183
+ | `RAG_API_KEY` | Yes | All | API authentication key |
184
+ | `EMBEDDING_API_KEY` | Yes | All | Embedding model API key |
185
+ | `EMBEDDING_BASE_URL` | No | All | Embedding endpoint (default: OpenAI) |
186
+ | `EMBEDDING_MODEL` | No | All | Model name (default: gemini-embedding-001) |
187
+ | `LLM_API_KEY` | No | lightrag+ | LLM API key for knowledge graph |
188
+ | `LLM_MODEL` | No | lightrag+ | LLM model (default: gemini-2.5-flash) |
189
+ | `VLM_API_KEY` | No | everything | Vision model API key |
190
+ | `VLM_MODEL` | No | everything | Vision model (default: qwen3-vl-plus) |
191
+
192
+ ## CLI
193
+
194
+ ```bash
195
+ pip install -e .
196
+ tm-server start # Start server (default 0.0.0.0:8711)
197
+ tm-server start --port 9000 # Custom port
198
+ tm-server health # Health check
199
+ tm-server export-token # Export connection token
200
+ ```
201
+
202
+ ## Client Skill
203
+
204
+ Pair with [transcendence-memory](https://github.com/leekkk2/transcendence-memory) — an agent skill that provides built-in commands (`/tm connect`, `/tm search`, `/tm remember`, `/tm query`) for Claude Code, OpenClaw, Codex CLI, and other AI coding agents.
205
+
206
+ ## Documentation
207
+
208
+ - [Quick Start](docs/deployment/quickstart.md)
209
+ - [Docker Deployment](docs/deployment/docker-deployment.md)
210
+ - [Reverse Proxy](docs/deployment/reverse-proxy.md)
211
+ - [Environment Reference](docs/deployment/environment-reference.md)
212
+ - [API Contract](docs/api-contract.md)
213
+ - [Health Check](docs/operations/health-check.md)
214
+ - [Troubleshooting](docs/operations/troubleshooting.md)
215
+ - [Development Bootstrap](docs/development-bootstrap.md)
216
+
217
+ ## Contributing
218
+
219
+ See [CONTRIBUTING.md](CONTRIBUTING.md). Pull requests welcome.
220
+
221
+ ## License
222
+
223
+ [MIT](LICENSE)
@@ -0,0 +1,190 @@
1
+ # Transcendence Memory Server
2
+
3
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE)
4
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org/downloads/)
5
+ [![Docker](https://img.shields.io/badge/docker-ready-blue.svg)](Dockerfile)
6
+
7
+ > **Self-hosted multimodal RAG cloud memory service — a shared brain for your AI agents.**
8
+
9
+ [中文文档](README.zh-CN.md)
10
+
11
+ Transcendence Memory Server is a cloud memory backend that multiple AI agents connect to simultaneously. Each agent stores its own memories in isolated containers, while being able to cross-query other agents' knowledge — turning isolated AI sessions into a collaborative, persistent knowledge network.
12
+
13
+ ```
14
+ Agent A (Claude Code) Agent B (Codex CLI) Agent C (OpenClaw)
15
+ | | |
16
+ | store & search own | store & search own | store & search own
17
+ | cross-query B, C | cross-query A, C | cross-query A, B
18
+ | | |
19
+ +------------------------------+------------------------------+
20
+ |
21
+ Transcendence Memory Server
22
+ +-------------------------+
23
+ | Container: agent-a |
24
+ | Container: agent-b |
25
+ | Container: agent-c |
26
+ | Container: shared |
27
+ +-------------------------+
28
+ ```
29
+
30
+ ## Why Cloud Memory?
31
+
32
+ | Problem | Without | With Transcendence |
33
+ |---------|---------|-------------------|
34
+ | Session ends | Memory lost | Persisted to cloud, recoverable anytime |
35
+ | Switch agents | Start from zero | New agent inherits context via search |
36
+ | Cross-project | Knowledge siloed | Agent B queries Agent A's decisions |
37
+ | Team of agents | Each works in isolation | Shared container for collective knowledge |
38
+ | Onboarding | Re-explain everything | Agent reads past decisions and rationale |
39
+
40
+ ## Features
41
+
42
+ - **Multi-Agent Cloud Memory** — one server, many agents; each stores its own, each can query others
43
+ - **Container Isolation** — per-agent or per-project namespaces with full CRUD; shared containers for team knowledge
44
+ - **LanceDB Vector Search** — sub-second semantic retrieval over task cards, memory objects, and structured data
45
+ - **LightRAG Knowledge Graph** — entity/relation extraction with hybrid retrieval (local + global + keyword)
46
+ - **RAG-Anything Multimodal** — PDF, image, and table parsing with vision model support
47
+ - **Auto-Detect Architecture** — automatically enables capabilities based on configured API keys
48
+ - **Connection Token** — one-step client setup; give each agent a token and it's connected
49
+ - **Zero Permission Issues** — Docker named volumes, no bind mount headaches
50
+
51
+ ## Architecture Tiers
52
+
53
+ The server auto-detects its capability tier based on your `.env` configuration:
54
+
55
+ | Tier | Required Keys | Capabilities |
56
+ |------|--------------|-------------|
57
+ | `lancedb-only` | `EMBEDDING_API_KEY` | Vector search, typed objects, structured ingest |
58
+ | `lancedb+lightrag` | + `LLM_API_KEY` | + Knowledge graph, entity extraction, hybrid queries |
59
+ | `rag-everything` | + `VLM_API_KEY` | + PDF/image/table parsing, vision model queries |
60
+
61
+ ## Quick Start
62
+
63
+ ### Docker (recommended)
64
+
65
+ ```bash
66
+ git clone https://github.com/leekkk2/transcendence-memory-server.git
67
+ cd transcendence-memory-server
68
+ cp .env.example .env # edit with your API keys
69
+ docker compose up -d --build
70
+ curl http://localhost:8711/health
71
+ ```
72
+
73
+ ### Production (VPS + Nginx)
74
+
75
+ ```bash
76
+ # Preflight check
77
+ bash scripts/preflight_check.sh
78
+
79
+ # Deploy with localhost-only binding
80
+ docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d --build
81
+ ```
82
+
83
+ ### Connect Your Agents
84
+
85
+ Once the server is running, each agent gets its own connection token:
86
+
87
+ ```bash
88
+ # Export a token for Agent A
89
+ curl -sS "http://localhost:8711/export-connection-token?container=agent-a" \
90
+ -H "X-API-KEY: your-key"
91
+
92
+ # Export a token for Agent B (different container)
93
+ curl -sS "http://localhost:8711/export-connection-token?container=agent-b" \
94
+ -H "X-API-KEY: your-key"
95
+
96
+ # Export a shared container token (for cross-agent collaboration)
97
+ curl -sS "http://localhost:8711/export-connection-token?container=shared" \
98
+ -H "X-API-KEY: your-key"
99
+ ```
100
+
101
+ Give each token to the corresponding agent. With the [transcendence-memory](https://github.com/leekkk2/transcendence-memory) skill installed, the agent runs `/tm connect <token>` and it's ready.
102
+
103
+ ### Local Development
104
+
105
+ ```bash
106
+ ./scripts/bootstrap_dev.sh
107
+ export RAG_API_KEY="your-key"
108
+ export EMBEDDING_API_KEY="your-key"
109
+ ./scripts/run_task_rag_server.sh
110
+ ```
111
+
112
+ ## API Overview
113
+
114
+ ### Text Memory (Lightweight Path)
115
+
116
+ | Endpoint | Method | Description |
117
+ |----------|--------|-------------|
118
+ | `/health` | GET | Health check with module status (public) |
119
+ | `/search` | POST | Semantic vector search |
120
+ | `/embed` | POST | Rebuild LanceDB index |
121
+ | `/ingest-memory/objects` | POST | Store typed memory objects |
122
+ | `/ingest-structured` | POST | Structured JSON ingest |
123
+ | `/containers/{c}/memories/{id}` | PUT/DELETE | Update/delete individual memories |
124
+
125
+ ### Multimodal RAG (Knowledge Graph Path)
126
+
127
+ | Endpoint | Method | Description |
128
+ |----------|--------|-------------|
129
+ | `/documents/text` | POST | Ingest text into knowledge graph |
130
+ | `/documents/upload` | POST | Upload PDF/image/MD files |
131
+ | `/query` | POST | RAG query with LLM-generated answer |
132
+
133
+ ### Management
134
+
135
+ | Endpoint | Method | Description |
136
+ |----------|--------|-------------|
137
+ | `/containers` | GET | List all containers |
138
+ | `/containers/{name}` | DELETE | Delete a container |
139
+ | `/export-connection-token` | GET | Export credentials for client setup |
140
+ | `/jobs/{pid}` | GET | Check async task status |
141
+
142
+ All endpoints except `/health` require authentication via `X-API-KEY` or `Authorization: Bearer` header.
143
+
144
+ ## Configuration
145
+
146
+ All settings via `.env` file (see [.env.example](.env.example)):
147
+
148
+ | Variable | Required | Tier | Description |
149
+ |----------|----------|------|-------------|
150
+ | `RAG_API_KEY` | Yes | All | API authentication key |
151
+ | `EMBEDDING_API_KEY` | Yes | All | Embedding model API key |
152
+ | `EMBEDDING_BASE_URL` | No | All | Embedding endpoint (default: OpenAI) |
153
+ | `EMBEDDING_MODEL` | No | All | Model name (default: gemini-embedding-001) |
154
+ | `LLM_API_KEY` | No | lightrag+ | LLM API key for knowledge graph |
155
+ | `LLM_MODEL` | No | lightrag+ | LLM model (default: gemini-2.5-flash) |
156
+ | `VLM_API_KEY` | No | everything | Vision model API key |
157
+ | `VLM_MODEL` | No | everything | Vision model (default: qwen3-vl-plus) |
158
+
159
+ ## CLI
160
+
161
+ ```bash
162
+ pip install -e .
163
+ tm-server start # Start server (default 0.0.0.0:8711)
164
+ tm-server start --port 9000 # Custom port
165
+ tm-server health # Health check
166
+ tm-server export-token # Export connection token
167
+ ```
168
+
169
+ ## Client Skill
170
+
171
+ Pair with [transcendence-memory](https://github.com/leekkk2/transcendence-memory) — an agent skill that provides built-in commands (`/tm connect`, `/tm search`, `/tm remember`, `/tm query`) for Claude Code, OpenClaw, Codex CLI, and other AI coding agents.
172
+
173
+ ## Documentation
174
+
175
+ - [Quick Start](docs/deployment/quickstart.md)
176
+ - [Docker Deployment](docs/deployment/docker-deployment.md)
177
+ - [Reverse Proxy](docs/deployment/reverse-proxy.md)
178
+ - [Environment Reference](docs/deployment/environment-reference.md)
179
+ - [API Contract](docs/api-contract.md)
180
+ - [Health Check](docs/operations/health-check.md)
181
+ - [Troubleshooting](docs/operations/troubleshooting.md)
182
+ - [Development Bootstrap](docs/development-bootstrap.md)
183
+
184
+ ## Contributing
185
+
186
+ See [CONTRIBUTING.md](CONTRIBUTING.md). Pull requests welcome.
187
+
188
+ ## License
189
+
190
+ [MIT](LICENSE)