longmem 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,40 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.pyo
5
+ *.pyd
6
+ .Python
7
+ *.egg-info/
8
+ *.egg
9
+ .eggs/
10
+
11
+ # Build
12
+ dist/
13
+ build/
14
+ .hatch/
15
+
16
+ # Virtual environments
17
+ .venv/
18
+ venv/
19
+ env/
20
+
21
+ # uv
22
+ .uv/
23
+
24
+ # Testing
25
+ .pytest_cache/
26
+ .coverage
27
+ htmlcov/
28
+
29
+ # IDE
30
+ .idea/
31
+ .vscode/
32
+ *.swp
33
+ *.swo
34
+
35
+ # OS
36
+ .DS_Store
37
+ Thumbs.db
38
+
39
+ # longmem-cursor DB (local data, never commit)
40
+ .longmem-cursor/
longmem-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Mariia Eremina
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
longmem-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,243 @@
1
+ Metadata-Version: 2.4
2
+ Name: longmem
3
+ Version: 0.1.0
4
+ Summary: Persistent cross-project memory for Cursor and Claude Code — stores problem/solution pairs with semantic search
5
+ Project-URL: Homepage, https://github.com/marerem/longmem
6
+ Project-URL: Bug Tracker, https://github.com/marerem/longmem/issues
7
+ License: MIT
8
+ License-File: LICENSE
9
+ Keywords: claude-code,cursor,embeddings,lancedb,llm,mcp,memory,ollama,rag
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
16
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
17
+ Requires-Python: >=3.11
18
+ Requires-Dist: fastmcp<4,>=3.2.4
19
+ Requires-Dist: httpx>=0.27.0
20
+ Requires-Dist: lancedb>=0.17.0
21
+ Requires-Dist: numpy>=1.24.0
22
+ Requires-Dist: pyarrow>=14.0.0
23
+ Provides-Extra: openai
24
+ Requires-Dist: openai>=1.0.0; extra == 'openai'
25
+ Description-Content-Type: text/markdown
26
+
27
+ <div align="center">
28
+
29
+ <img src="longmem_github_logo.svg" alt="longmem" width="480"/>
30
+
31
+ **Cross-project memory for AI coding assistants.**
32
+ Stop solving the same problems twice.
33
+
34
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org)
35
+ [![License: MIT](https://img.shields.io/badge/license-MIT-green.svg)](LICENSE)
36
+ [![Tests](https://github.com/marerem/longmem/actions/workflows/test.yml/badge.svg)](https://github.com/marerem/longmem/actions)
37
+ [![Coverage](https://codecov.io/gh/marerem/longmem/branch/main/graph/badge.svg)](https://codecov.io/gh/marerem/longmem)
38
+ [![Open Issues](https://img.shields.io/github/issues/marerem/longmem)](https://github.com/marerem/longmem/issues)
39
+ [![Closed Issues](https://img.shields.io/github/issues-closed/marerem/longmem?color=green)](https://github.com/marerem/longmem/issues?q=is%3Aissue+is%3Aclosed)
40
+
41
+ </div>
42
+
43
+ ---
44
+
45
+ Your AI solves the same bug in a different project six months later. Writes the same boilerplate. Explains the same pattern. You already knew the answer.
46
+
47
+ **longmem** gives your AI a persistent memory that works across every project and every session. Before reasoning from scratch, it searches what you've already solved. After something works, it saves it. The longer you use it, the less you repeat yourself.
48
+
49
+ ```
50
+ You describe a problem
51
+
52
+
53
+ search_similar() ──── match found (≥85%) ────▶ cached solution + edge cases
54
+
55
+ no match
56
+
57
+
58
+ AI reasons from scratch
59
+
60
+ "it works"
61
+
62
+
63
+ confirm_solution() ──── saved for every future project
64
+ ```
65
+
66
+ ---
67
+
68
+ ## Why longmem
69
+
70
+ | | longmem | others |
71
+ |---|---|---|
72
+ | **Cost** | Free — local [Ollama](https://ollama.com) embeddings | Requires API calls per session |
73
+ | **Privacy** | Nothing leaves your machine | Sends observations to external APIs |
74
+ | **Process** | Starts on demand, no daemon | Background worker + open port required |
75
+ | **IDE support** | Cursor + Claude Code | Primarily one IDE |
76
+ | **Search** | Hybrid: semantic + keyword (FTS5) | Vector-only or keyword-only |
77
+ | **Teams** | Export / import / shared DB path / S3 | Single-user |
78
+ | **License** | MIT | AGPL / proprietary |
79
+
80
+ ---
81
+
82
+ ## Quickstart
83
+
84
+ **1. Install**
85
+
86
+ ```bash
87
+ pipx install longmem
88
+ ```
89
+
90
+ **2. Setup** — checks Ollama, pulls the embedding model, writes your IDE config
91
+
92
+ ```bash
93
+ longmem init
94
+ ```
95
+
96
+ **3. Activate in each project** — copies the rules file that tells the AI how to use memory
97
+
98
+ ```bash
99
+ cd your-project
100
+ longmem install
101
+ ```
102
+
103
+ **4. Restart your IDE.** Memory tools are now active on every chat.
104
+
105
+ > **Need Ollama?** Install from [ollama.com](https://ollama.com), then `ollama pull nomic-embed-text`. Or use OpenAI — see [Configuration](#configuration).
106
+
107
+ ---
108
+
109
+ ## How it works
110
+
111
+ longmem is an [MCP](https://modelcontextprotocol.io) server. Your IDE starts it on demand. Two rules drive the workflow:
112
+
113
+ **Rule 1 — search first.** Before the AI reasons about any bug or question, it calls `search_similar`. If a match is found (cosine similarity ≥ 85%), the cached solution is returned with any edge-case notes. Below the threshold, the AI solves normally.
114
+
115
+ **Rule 2 — save on success.** When you confirm something works, the AI calls `confirm_solution`. One parameter — just the solution text. Problem metadata is auto-filled from the earlier search.
116
+
117
+ The rules file (`longmem.mdc` for Cursor, `CLAUDE.md` for Claude Code) wires this up automatically. No manual prompting.
118
+
119
+ **AI forgot to save?** Run `longmem review` — an interactive CLI to save any solution in 30 seconds.
120
+
121
+ ---
122
+
123
+ ## CLI
124
+
125
+ | Command | What it does |
126
+ |---------|-------------|
127
+ | `longmem init` | One-time setup: Ollama check, model pull, writes IDE config |
128
+ | `longmem install` | Copy rules into the current project |
129
+ | `longmem status` | Config, Ollama reachability, entry count, DB size |
130
+ | `longmem export [file]` | Dump all entries to JSON — backup or share |
131
+ | `longmem import <file>` | Load a JSON export — onboard teammates or migrate machines |
132
+ | `longmem review` | Manually save a solution when the AI forgot |
133
+
134
+ `longmem` with no arguments starts the MCP server (used by your IDE).
135
+
136
+ ---
137
+
138
+ ## Configuration
139
+
140
+ Config lives at `~/.longmem/config.toml`. All fields are optional — defaults work with a local Ollama instance.
141
+
142
+ ### Switch to OpenAI embeddings
143
+
144
+ ```toml
145
+ embedder = "openai"
146
+ openai_model = "text-embedding-3-small"
147
+ openai_api_key = "sk-..." # or set OPENAI_API_KEY
148
+ ```
149
+
150
+ Install the extra: `pip install 'longmem[openai]'`
151
+
152
+ ### Team shared database
153
+
154
+ Point every team member's config at the same path:
155
+
156
+ ```toml
157
+ # NFS / shared drive
158
+ db_path = "/mnt/shared/longmem/db"
159
+ ```
160
+
161
+ Or use cloud storage:
162
+
163
+ ```toml
164
+ # S3 (uses AWS env vars)
165
+ db_uri = "s3://my-bucket/longmem"
166
+
167
+ # LanceDB Cloud
168
+ db_uri = "db://my-org/my-db"
169
+ lancedb_api_key = "ldb_..." # or set LANCEDB_API_KEY
170
+ ```
171
+
172
+ No shared mount? Use `longmem export` / `longmem import` to distribute a snapshot.
173
+
174
+ ### Tuning
175
+
176
+ ```toml
177
+ similarity_threshold = 0.85 # minimum score to surface a cached result (default 0.85)
178
+ duplicate_threshold = 0.95 # minimum score to block a save as a near-duplicate (default 0.95)
179
+ ```
180
+
181
+ ---
182
+
183
+ ## MCP tools
184
+
185
+ The server exposes 11 tools. The two you interact with most:
186
+
187
+ - **`search_similar`** — semantic + keyword hybrid search. Returns ranked matches with similarity scores, edge cases, and a `keyword_match` flag when the hit came from exact text rather than vector similarity.
188
+ - **`confirm_solution`** — saves a solution with one parameter. Problem metadata auto-filled from the preceding search.
189
+
190
+ Full list: `save_solution`, `correct_solution`, `enrich_solution`, `add_edge_case`, `search_by_project`, `delete_solution`, `rebuild_index`, `list_recent`, `stats`.
191
+
192
+ Call `rebuild_index` once you reach 256+ entries to compact the database and build the ANN index for faster search.
193
+
194
+ ---
195
+
196
+ ## Category reference
197
+
198
+ Categories pre-filter before vector search — keeps retrieval fast at any scale.
199
+
200
+ | Category | Use for |
201
+ |---|---|
202
+ | `ci_cd` | GitHub Actions, Jenkins, GitLab CI, build failures |
203
+ | `containers` | Docker, Kubernetes, Helm, OOM kills |
204
+ | `infrastructure` | Terraform, Pulumi, CDK, IaC drift |
205
+ | `cloud` | AWS/GCP/Azure SDK, IAM, quota errors |
206
+ | `networking` | DNS, TLS, load balancers, timeouts, proxies |
207
+ | `observability` | Logging, metrics, tracing, Prometheus, Grafana |
208
+ | `auth_security` | OAuth, JWT, RBAC, secrets, CVEs |
209
+ | `data_pipeline` | Airflow, Prefect, Dagster, ETL, data quality |
210
+ | `ml_training` | GPU/CUDA, distributed training, OOM |
211
+ | `model_serving` | vLLM, Triton, inference latency, batching |
212
+ | `experiment_tracking` | MLflow, W&B, DVC, reproducibility |
213
+ | `llm_rag` | Chunking, embedding, retrieval, reranking |
214
+ | `llm_api` | Rate limits, token cost, prompt engineering |
215
+ | `vector_db` | Pinecone, Weaviate, Qdrant, LanceDB |
216
+ | `agents` | LangChain, LlamaIndex, tool-calling, agent memory |
217
+ | `database` | SQL/NoSQL, migrations, slow queries |
218
+ | `api` | REST, GraphQL, gRPC, versioning |
219
+ | `async_concurrency` | Race conditions, event loops, deadlocks |
220
+ | `dependencies` | Version conflicts, packaging, lock files |
221
+ | `performance` | Profiling, memory leaks, caching |
222
+ | `testing` | Flaky tests, mocks, integration vs unit |
223
+ | `architecture` | Design patterns, service boundaries, refactoring |
224
+ | `other` | When nothing above fits |
225
+
226
+ ---
227
+
228
+ ## Contributing
229
+
230
+ ```bash
231
+ git clone https://github.com/mariia-eremina/longmem
232
+ cd longmem
233
+ uv sync --group dev
234
+ uv run pytest
235
+ ```
236
+
237
+ Pull requests welcome — bug fixes, features, docs, tests.
238
+
239
+ ---
240
+
241
+ ## License
242
+
243
+ MIT — see [LICENSE](LICENSE).
@@ -0,0 +1,217 @@
1
+ <div align="center">
2
+
3
+ <img src="longmem_github_logo.svg" alt="longmem" width="480"/>
4
+
5
+ **Cross-project memory for AI coding assistants.**
6
+ Stop solving the same problems twice.
7
+
8
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org)
9
+ [![License: MIT](https://img.shields.io/badge/license-MIT-green.svg)](LICENSE)
10
+ [![Tests](https://github.com/marerem/longmem/actions/workflows/test.yml/badge.svg)](https://github.com/marerem/longmem/actions)
11
+ [![Coverage](https://codecov.io/gh/marerem/longmem/branch/main/graph/badge.svg)](https://codecov.io/gh/marerem/longmem)
12
+ [![Open Issues](https://img.shields.io/github/issues/marerem/longmem)](https://github.com/marerem/longmem/issues)
13
+ [![Closed Issues](https://img.shields.io/github/issues-closed/marerem/longmem?color=green)](https://github.com/marerem/longmem/issues?q=is%3Aissue+is%3Aclosed)
14
+
15
+ </div>
16
+
17
+ ---
18
+
19
+ Your AI solves the same bug in a different project six months later. Writes the same boilerplate. Explains the same pattern. You already knew the answer.
20
+
21
+ **longmem** gives your AI a persistent memory that works across every project and every session. Before reasoning from scratch, it searches what you've already solved. After something works, it saves it. The longer you use it, the less you repeat yourself.
22
+
23
+ ```
24
+ You describe a problem
25
+
26
+
27
+ search_similar() ──── match found (≥85%) ────▶ cached solution + edge cases
28
+
29
+ no match
30
+
31
+
32
+ AI reasons from scratch
33
+
34
+ "it works"
35
+
36
+
37
+ confirm_solution() ──── saved for every future project
38
+ ```
39
+
40
+ ---
41
+
42
+ ## Why longmem
43
+
44
+ | | longmem | others |
45
+ |---|---|---|
46
+ | **Cost** | Free — local [Ollama](https://ollama.com) embeddings | Requires API calls per session |
47
+ | **Privacy** | Nothing leaves your machine | Sends observations to external APIs |
48
+ | **Process** | Starts on demand, no daemon | Background worker + open port required |
49
+ | **IDE support** | Cursor + Claude Code | Primarily one IDE |
50
+ | **Search** | Hybrid: semantic + keyword (FTS5) | Vector-only or keyword-only |
51
+ | **Teams** | Export / import / shared DB path / S3 | Single-user |
52
+ | **License** | MIT | AGPL / proprietary |
53
+
54
+ ---
55
+
56
+ ## Quickstart
57
+
58
+ **1. Install**
59
+
60
+ ```bash
61
+ pipx install longmem
62
+ ```
63
+
64
+ **2. Setup** — checks Ollama, pulls the embedding model, writes your IDE config
65
+
66
+ ```bash
67
+ longmem init
68
+ ```
69
+
70
+ **3. Activate in each project** — copies the rules file that tells the AI how to use memory
71
+
72
+ ```bash
73
+ cd your-project
74
+ longmem install
75
+ ```
76
+
77
+ **4. Restart your IDE.** Memory tools are now active on every chat.
78
+
79
+ > **Need Ollama?** Install from [ollama.com](https://ollama.com), then `ollama pull nomic-embed-text`. Or use OpenAI — see [Configuration](#configuration).
80
+
81
+ ---
82
+
83
+ ## How it works
84
+
85
+ longmem is an [MCP](https://modelcontextprotocol.io) server. Your IDE starts it on demand. Two rules drive the workflow:
86
+
87
+ **Rule 1 — search first.** Before the AI reasons about any bug or question, it calls `search_similar`. If a match is found (cosine similarity ≥ 85%), the cached solution is returned with any edge-case notes. Below the threshold, the AI solves normally.
88
+
89
+ **Rule 2 — save on success.** When you confirm something works, the AI calls `confirm_solution`. One parameter — just the solution text. Problem metadata is auto-filled from the earlier search.
90
+
91
+ The rules file (`longmem.mdc` for Cursor, `CLAUDE.md` for Claude Code) wires this up automatically. No manual prompting.
92
+
93
+ **AI forgot to save?** Run `longmem review` — an interactive CLI to save any solution in 30 seconds.
94
+
95
+ ---
96
+
97
+ ## CLI
98
+
99
+ | Command | What it does |
100
+ |---------|-------------|
101
+ | `longmem init` | One-time setup: Ollama check, model pull, writes IDE config |
102
+ | `longmem install` | Copy rules into the current project |
103
+ | `longmem status` | Config, Ollama reachability, entry count, DB size |
104
+ | `longmem export [file]` | Dump all entries to JSON — backup or share |
105
+ | `longmem import <file>` | Load a JSON export — onboard teammates or migrate machines |
106
+ | `longmem review` | Manually save a solution when the AI forgot |
107
+
108
+ `longmem` with no arguments starts the MCP server (used by your IDE).
109
+
110
+ ---
111
+
112
+ ## Configuration
113
+
114
+ Config lives at `~/.longmem/config.toml`. All fields are optional — defaults work with a local Ollama instance.
115
+
116
+ ### Switch to OpenAI embeddings
117
+
118
+ ```toml
119
+ embedder = "openai"
120
+ openai_model = "text-embedding-3-small"
121
+ openai_api_key = "sk-..." # or set OPENAI_API_KEY
122
+ ```
123
+
124
+ Install the extra: `pip install 'longmem[openai]'`
125
+
126
+ ### Team shared database
127
+
128
+ Point every team member's config at the same path:
129
+
130
+ ```toml
131
+ # NFS / shared drive
132
+ db_path = "/mnt/shared/longmem/db"
133
+ ```
134
+
135
+ Or use cloud storage:
136
+
137
+ ```toml
138
+ # S3 (uses AWS env vars)
139
+ db_uri = "s3://my-bucket/longmem"
140
+
141
+ # LanceDB Cloud
142
+ db_uri = "db://my-org/my-db"
143
+ lancedb_api_key = "ldb_..." # or set LANCEDB_API_KEY
144
+ ```
145
+
146
+ No shared mount? Use `longmem export` / `longmem import` to distribute a snapshot.
147
+
148
+ ### Tuning
149
+
150
+ ```toml
151
+ similarity_threshold = 0.85 # minimum score to surface a cached result (default 0.85)
152
+ duplicate_threshold = 0.95 # minimum score to block a save as a near-duplicate (default 0.95)
153
+ ```
154
+
155
+ ---
156
+
157
+ ## MCP tools
158
+
159
+ The server exposes 11 tools. The two you interact with most:
160
+
161
+ - **`search_similar`** — semantic + keyword hybrid search. Returns ranked matches with similarity scores, edge cases, and a `keyword_match` flag when the hit came from exact text rather than vector similarity.
162
+ - **`confirm_solution`** — saves a solution with one parameter. Problem metadata auto-filled from the preceding search.
163
+
164
+ Full list: `save_solution`, `correct_solution`, `enrich_solution`, `add_edge_case`, `search_by_project`, `delete_solution`, `rebuild_index`, `list_recent`, `stats`.
165
+
166
+ Call `rebuild_index` once you reach 256+ entries to compact the database and build the ANN index for faster search.
167
+
168
+ ---
169
+
170
+ ## Category reference
171
+
172
+ Categories pre-filter before vector search — keeps retrieval fast at any scale.
173
+
174
+ | Category | Use for |
175
+ |---|---|
176
+ | `ci_cd` | GitHub Actions, Jenkins, GitLab CI, build failures |
177
+ | `containers` | Docker, Kubernetes, Helm, OOM kills |
178
+ | `infrastructure` | Terraform, Pulumi, CDK, IaC drift |
179
+ | `cloud` | AWS/GCP/Azure SDK, IAM, quota errors |
180
+ | `networking` | DNS, TLS, load balancers, timeouts, proxies |
181
+ | `observability` | Logging, metrics, tracing, Prometheus, Grafana |
182
+ | `auth_security` | OAuth, JWT, RBAC, secrets, CVEs |
183
+ | `data_pipeline` | Airflow, Prefect, Dagster, ETL, data quality |
184
+ | `ml_training` | GPU/CUDA, distributed training, OOM |
185
+ | `model_serving` | vLLM, Triton, inference latency, batching |
186
+ | `experiment_tracking` | MLflow, W&B, DVC, reproducibility |
187
+ | `llm_rag` | Chunking, embedding, retrieval, reranking |
188
+ | `llm_api` | Rate limits, token cost, prompt engineering |
189
+ | `vector_db` | Pinecone, Weaviate, Qdrant, LanceDB |
190
+ | `agents` | LangChain, LlamaIndex, tool-calling, agent memory |
191
+ | `database` | SQL/NoSQL, migrations, slow queries |
192
+ | `api` | REST, GraphQL, gRPC, versioning |
193
+ | `async_concurrency` | Race conditions, event loops, deadlocks |
194
+ | `dependencies` | Version conflicts, packaging, lock files |
195
+ | `performance` | Profiling, memory leaks, caching |
196
+ | `testing` | Flaky tests, mocks, integration vs unit |
197
+ | `architecture` | Design patterns, service boundaries, refactoring |
198
+ | `other` | When nothing above fits |
199
+
200
+ ---
201
+
202
+ ## Contributing
203
+
204
+ ```bash
205
+ git clone https://github.com/mariia-eremina/longmem
206
+ cd longmem
207
+ uv sync --group dev
208
+ uv run pytest
209
+ ```
210
+
211
+ Pull requests welcome — bug fixes, features, docs, tests.
212
+
213
+ ---
214
+
215
+ ## License
216
+
217
+ MIT — see [LICENSE](LICENSE).
@@ -0,0 +1,60 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "longmem"
7
+ version = "0.1.0"
8
+ description = "Persistent cross-project memory for Cursor and Claude Code — stores problem/solution pairs with semantic search"
9
+ readme = "README.md"
10
+ requires-python = ">=3.11"
11
+ license = { text = "MIT" }
12
+ keywords = ["cursor", "claude-code", "mcp", "memory", "llm", "rag", "embeddings", "lancedb", "ollama"]
13
+ classifiers = [
14
+ "Development Status :: 3 - Alpha",
15
+ "Intended Audience :: Developers",
16
+ "License :: OSI Approved :: MIT License",
17
+ "Programming Language :: Python :: 3.11",
18
+ "Programming Language :: Python :: 3.12",
19
+ "Topic :: Software Development :: Libraries :: Python Modules",
20
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
21
+ ]
22
+ dependencies = [
23
+ "fastmcp>=3.2.4,<4",
24
+ "lancedb>=0.17.0",
25
+ "pyarrow>=14.0.0",
26
+ "httpx>=0.27.0",
27
+ "numpy>=1.24.0",
28
+ ]
29
+
30
+ [project.optional-dependencies]
31
+ openai = ["openai>=1.0.0"]
32
+
33
+ [project.urls]
34
+ "Homepage" = "https://github.com/marerem/longmem"
35
+ "Bug Tracker" = "https://github.com/marerem/longmem/issues"
36
+
37
+ [project.scripts]
38
+ longmem = "longmem.cli:main"
39
+
40
+ [tool.hatch.build.targets.wheel]
41
+ packages = ["src/longmem"]
42
+
43
+ # Ensure non-Python template files are included in the wheel
44
+ [tool.hatch.build.targets.wheel.shared-data]
45
+
46
+ [tool.hatch.build]
47
+ include = [
48
+ "src/longmem/**",
49
+ ]
50
+
51
+ [tool.pytest.ini_options]
52
+ asyncio_mode = "auto"
53
+ testpaths = ["tests"]
54
+
55
+ [dependency-groups]
56
+ dev = [
57
+ "pytest>=9.0.3",
58
+ "pytest-asyncio>=1.3.0",
59
+ "pytest-cov>=5.0.0",
60
+ ]
@@ -0,0 +1,3 @@
1
+ """longmem-cursor: persistent memory MCP server for Cursor IDE."""
2
+
3
+ __version__ = "0.1.0"