deepcontext 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. deepcontext-0.1.0/LICENSE +21 -0
  2. deepcontext-0.1.0/PKG-INFO +456 -0
  3. deepcontext-0.1.0/README.md +406 -0
  4. deepcontext-0.1.0/pyproject.toml +98 -0
  5. deepcontext-0.1.0/setup.cfg +4 -0
  6. deepcontext-0.1.0/src/deepcontext/__init__.py +37 -0
  7. deepcontext-0.1.0/src/deepcontext/api/__init__.py +1 -0
  8. deepcontext-0.1.0/src/deepcontext/api/server.py +233 -0
  9. deepcontext-0.1.0/src/deepcontext/core/__init__.py +1 -0
  10. deepcontext-0.1.0/src/deepcontext/core/clients.py +113 -0
  11. deepcontext-0.1.0/src/deepcontext/core/settings.py +147 -0
  12. deepcontext-0.1.0/src/deepcontext/core/types.py +149 -0
  13. deepcontext-0.1.0/src/deepcontext/db/__init__.py +5 -0
  14. deepcontext-0.1.0/src/deepcontext/db/database.py +84 -0
  15. deepcontext-0.1.0/src/deepcontext/db/models/__init__.py +1 -0
  16. deepcontext-0.1.0/src/deepcontext/db/models/base.py +9 -0
  17. deepcontext-0.1.0/src/deepcontext/db/models/graph.py +138 -0
  18. deepcontext-0.1.0/src/deepcontext/db/models/memory.py +152 -0
  19. deepcontext-0.1.0/src/deepcontext/extraction/__init__.py +1 -0
  20. deepcontext-0.1.0/src/deepcontext/extraction/extractor.py +291 -0
  21. deepcontext-0.1.0/src/deepcontext/extraction/prompts.py +120 -0
  22. deepcontext-0.1.0/src/deepcontext/graph/__init__.py +1 -0
  23. deepcontext-0.1.0/src/deepcontext/graph/knowledge_graph.py +286 -0
  24. deepcontext-0.1.0/src/deepcontext/lifecycle/__init__.py +1 -0
  25. deepcontext-0.1.0/src/deepcontext/lifecycle/manager.py +245 -0
  26. deepcontext-0.1.0/src/deepcontext/memory/__init__.py +1 -0
  27. deepcontext-0.1.0/src/deepcontext/memory/engine.py +535 -0
  28. deepcontext-0.1.0/src/deepcontext/retrieval/__init__.py +1 -0
  29. deepcontext-0.1.0/src/deepcontext/retrieval/hybrid.py +338 -0
  30. deepcontext-0.1.0/src/deepcontext/vectorstore/__init__.py +1 -0
  31. deepcontext-0.1.0/src/deepcontext/vectorstore/base.py +61 -0
  32. deepcontext-0.1.0/src/deepcontext/vectorstore/pgvector_store.py +276 -0
  33. deepcontext-0.1.0/src/deepcontext.egg-info/PKG-INFO +456 -0
  34. deepcontext-0.1.0/src/deepcontext.egg-info/SOURCES.txt +45 -0
  35. deepcontext-0.1.0/src/deepcontext.egg-info/dependency_links.txt +1 -0
  36. deepcontext-0.1.0/src/deepcontext.egg-info/requires.txt +27 -0
  37. deepcontext-0.1.0/src/deepcontext.egg-info/top_level.txt +1 -0
  38. deepcontext-0.1.0/tests/test_api.py +311 -0
  39. deepcontext-0.1.0/tests/test_core.py +229 -0
  40. deepcontext-0.1.0/tests/test_database.py +296 -0
  41. deepcontext-0.1.0/tests/test_edge_cases.py +379 -0
  42. deepcontext-0.1.0/tests/test_engine_integration.py +564 -0
  43. deepcontext-0.1.0/tests/test_extraction.py +252 -0
  44. deepcontext-0.1.0/tests/test_graph.py +190 -0
  45. deepcontext-0.1.0/tests/test_multi_user.py +304 -0
  46. deepcontext-0.1.0/tests/test_retrieval_lifecycle.py +608 -0
  47. deepcontext-0.1.0/tests/test_vectorstore.py +219 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Umair Inayat
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,456 @@
1
+ Metadata-Version: 2.4
2
+ Name: deepcontext
3
+ Version: 0.1.0
4
+ Summary: Hierarchical memory system for AI agents - async, graph-aware, with hybrid retrieval and memory lifecycle management
5
+ Author-email: Umair Inayat <umairinayat@users.noreply.github.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://umairinayat.github.io/DeepContext/
8
+ Project-URL: Documentation, https://umairinayat.github.io/DeepContext/#/docs
9
+ Project-URL: Repository, https://github.com/umairinayat/DeepContext
10
+ Project-URL: Issues, https://github.com/umairinayat/DeepContext/issues
11
+ Keywords: ai,memory,llm,openai,context,conversation,long-term-memory,knowledge-graph,vector-search,pgvector,hierarchical-memory,agents
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Programming Language :: Python :: 3.13
20
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
21
+ Classifier: Framework :: FastAPI
22
+ Classifier: Typing :: Typed
23
+ Requires-Python: >=3.11
24
+ Description-Content-Type: text/markdown
25
+ License-File: LICENSE
26
+ Requires-Dist: openai>=1.40.0
27
+ Requires-Dist: sqlalchemy[asyncio]>=2.0.30
28
+ Requires-Dist: aiosqlite>=0.20.0
29
+ Requires-Dist: pydantic>=2.7.0
30
+ Requires-Dist: pydantic-settings>=2.3.0
31
+ Requires-Dist: instructor>=1.3.0
32
+ Requires-Dist: numpy>=1.26.0
33
+ Requires-Dist: python-dotenv>=1.0.0
34
+ Provides-Extra: postgres
35
+ Requires-Dist: asyncpg>=0.29.0; extra == "postgres"
36
+ Requires-Dist: pgvector>=0.3.0; extra == "postgres"
37
+ Provides-Extra: api
38
+ Requires-Dist: fastapi>=0.111.0; extra == "api"
39
+ Requires-Dist: uvicorn[standard]>=0.30.0; extra == "api"
40
+ Provides-Extra: dev
41
+ Requires-Dist: pytest>=8.0.0; extra == "dev"
42
+ Requires-Dist: pytest-asyncio>=0.23.0; extra == "dev"
43
+ Requires-Dist: ruff>=0.5.0; extra == "dev"
44
+ Requires-Dist: mypy>=1.10.0; extra == "dev"
45
+ Requires-Dist: alembic>=1.13.0; extra == "dev"
46
+ Requires-Dist: httpx>=0.27.0; extra == "dev"
47
+ Provides-Extra: all
48
+ Requires-Dist: deepcontext[api,dev,postgres]; extra == "all"
49
+ Dynamic: license-file
50
+
51
+ <p align="center">
52
+ <h1 align="center">DeepContext</h1>
53
+ <p align="center">Hierarchical memory system for AI agents -- async, graph-aware, with hybrid retrieval and memory lifecycle management.</p>
54
+ </p>
55
+
56
+ <p align="center">
57
+ <a href="https://github.com/umairinayat/DeepContext/blob/master/LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue.svg" alt="License: MIT"></a>
58
+ <a href="https://www.python.org/downloads/"><img src="https://img.shields.io/badge/python-3.11%2B-blue.svg" alt="Python 3.11+"></a>
59
+ <a href="https://github.com/umairinayat/DeepContext"><img src="https://img.shields.io/badge/tests-110%20passed-brightgreen.svg" alt="Tests: 110 passed"></a>
60
+ <a href="https://fastapi.tiangolo.com/"><img src="https://img.shields.io/badge/API-FastAPI-009688.svg" alt="FastAPI"></a>
61
+ <a href="https://github.com/pgvector/pgvector"><img src="https://img.shields.io/badge/vector%20store-pgvector-orange.svg" alt="pgvector"></a>
62
+ <a href="https://pydantic-docs.helpmanual.io/"><img src="https://img.shields.io/badge/models-Pydantic%20v2-e92063.svg" alt="Pydantic v2"></a>
63
+ </p>
64
+
65
+ ---
66
+
67
+ DeepContext gives AI agents persistent, structured memory. Conversations are automatically broken into semantic facts, stored with embeddings, linked in a knowledge graph, and retrieved using a hybrid pipeline that fuses vector similarity, keyword search, and graph traversal.
68
+
69
+ ## Features
70
+
71
+ - **Hierarchical Memory** -- Working, short-term, and long-term tiers inspired by human cognition
72
+ - **Memory Types** -- Semantic (facts), episodic (events), and procedural (how-to) memories
73
+ - **Knowledge Graph** -- Entities and relationships extracted from conversations, stored in PostgreSQL (no Neo4j required)
74
+ - **Hybrid Retrieval** -- Reciprocal Rank Fusion (RRF) across vector, keyword, and graph search
75
+ - **Memory Lifecycle** -- Ebbinghaus forgetting curve decay, consolidation of short-term into long-term, automatic cleanup
76
+ - **Fully Async** -- Built on SQLAlchemy async, asyncpg, and AsyncOpenAI
77
+ - **Multi-user** -- All memories scoped by `user_id`
78
+ - **REST API** -- FastAPI server with 7 endpoints
79
+ - **Pluggable LLM** -- OpenAI and OpenRouter support out of the box
80
+ - **SQLite Fallback** -- Works without PostgreSQL for development
81
+
82
+ ---
83
+
84
+ ## Quick Start
85
+
86
+ ### Installation
87
+
88
+ ```bash
89
+ git clone https://github.com/umairinayat/DeepContext.git
90
+ cd DeepContext
91
+ python -m venv .venv
92
+
93
+ # Windows
94
+ .venv\Scripts\activate
95
+ # Linux/macOS
96
+ source .venv/bin/activate
97
+
98
+ pip install -e ".[all]"
99
+ ```
100
+
101
+ ### Configuration
102
+
103
+ Create a `.env` file in the project root:
104
+
105
+ ```env
106
+ DEEPCONTEXT_OPENAI_API_KEY=sk-your-key-here
107
+
108
+ # PostgreSQL (recommended for production)
109
+ # DEEPCONTEXT_DATABASE_URL=postgresql+asyncpg://user:pass@localhost:5432/deepcontext
110
+
111
+ # SQLite fallback (default, no setup needed)
112
+ # Automatically uses ~/.deepcontext/memory.db
113
+ ```
114
+
115
+ All settings can be passed as environment variables with the `DEEPCONTEXT_` prefix, or directly in code.
116
+
117
+ ### Basic Usage
118
+
119
+ ```python
120
+ import asyncio
121
+ from deepcontext import DeepContext
122
+
123
+ async def main():
124
+ ctx = DeepContext(openai_api_key="sk-...")
125
+ await ctx.init()
126
+
127
+ # Store memories from a conversation
128
+ response = await ctx.add(
129
+ messages=[
130
+ {"role": "user", "content": "I'm a Python developer working at Acme Corp"},
131
+ {"role": "assistant", "content": "Nice to meet you!"},
132
+ ],
133
+ user_id="user_1",
134
+ conversation_id="conv_1",
135
+ )
136
+ print(f"Stored {response.memories_added} memories, found {response.entities_found} entities")
137
+
138
+ # Search memories
139
+ results = await ctx.search("What does the user do for work?", user_id="user_1")
140
+ for r in results.results:
141
+ print(f" [{r.tier.value}] {r.text} (score: {r.score:.3f})")
142
+
143
+ # Explore the knowledge graph
144
+ neighbors = await ctx.get_entity_graph("user_1", "Acme Corp", depth=2)
145
+ for n in neighbors:
146
+ print(f" {n['entity']} --{n['relation']}--> (depth {n['depth']})")
147
+
148
+ # Run lifecycle maintenance (decay + consolidation + cleanup)
149
+ stats = await ctx.run_lifecycle("user_1")
150
+ print(f"Decayed: {stats['memories_decayed']}, Consolidated: {stats['memories_consolidated']}")
151
+
152
+ await ctx.close()
153
+
154
+ asyncio.run(main())
155
+ ```
156
+
157
+ ### Interactive Demo
158
+
159
+ ```bash
160
+ python examples/chat_demo.py
161
+ ```
162
+
163
+ An interactive chatbot that remembers conversations across turns. Special commands:
164
+
165
+ | Command | Description |
166
+ |---------|-------------|
167
+ | `memories` | Search stored memories |
168
+ | `graph <entity>` | Show knowledge graph for an entity |
169
+ | `lifecycle` | Run decay / consolidation / cleanup |
170
+ | `exit` | Quit |
171
+
172
+ ---
173
+
174
+ ## Architecture
175
+
176
+ ```
177
+ deepcontext/
178
+ __init__.py DeepContext (alias), exports
179
+ core/
180
+ settings.py Configuration (pydantic-settings, env vars, .env)
181
+ types.py Enums, Pydantic models (facts, entities, responses)
182
+ clients.py OpenAI/OpenRouter async client wrapper
183
+ memory/
184
+ engine.py MemoryEngine -- main orchestrator
185
+ extraction/
186
+ extractor.py LLM-based fact and entity extraction
187
+ prompts.py Prompt templates for extraction/classification
188
+ retrieval/
189
+ hybrid.py HybridRetriever (vector + keyword + graph + RRF)
190
+ graph/
191
+ knowledge_graph.py Entity/relationship CRUD, BFS traversal
192
+ lifecycle/
193
+ manager.py Ebbinghaus decay, consolidation, cleanup
194
+ vectorstore/
195
+ base.py Abstract vector store interface
196
+ pgvector_store.py pgvector implementation (SQLite cosine fallback)
197
+ db/
198
+ database.py Async SQLAlchemy engine manager
199
+ models/
200
+ base.py Base ORM model
201
+ memory.py Memory table (embeddings, tiers, types, decay)
202
+ graph.py Entity, Relationship, ConversationSummary tables
203
+ api/
204
+ server.py FastAPI REST API
205
+ ```
206
+
207
+ ---
208
+
209
+ ## How It Works
210
+
211
+ ### Memory Pipeline
212
+
213
+ When you call `ctx.add(messages, user_id)`:
214
+
215
+ ```
216
+ Conversation ──> LLM Extraction ──> Classification ──> Embedding ──> Storage
217
+ | | |
218
+ v v v
219
+ Facts, Entities ADD / UPDATE / Knowledge Graph
220
+ Relationships REPLACE / NOOP Update
221
+ ```
222
+
223
+ 1. **Extraction** -- The LLM analyzes the conversation and extracts semantic facts, episodic events, entities, and relationships
224
+ 2. **Classification** -- Each extracted fact is compared against existing memories. The LLM decides whether to ADD, UPDATE, REPLACE, or skip (NOOP)
225
+ 3. **Embedding** -- New/updated facts are embedded using the configured embedding model
226
+ 4. **Storage** -- Memories are stored with their embeddings, tier (short-term), type, importance, and confidence scores
227
+ 5. **Graph Update** -- Extracted entities and relationships are upserted into the knowledge graph
228
+ 6. **Auto-consolidation** -- If short-term memory count exceeds the threshold, consolidation is triggered
229
+
230
+ ### Hybrid Retrieval
231
+
232
+ When you call `ctx.search(query, user_id)`:
233
+
234
+ ```
235
+ Query ──> Embed ──> Vector Search (0.6) ──┐
236
+ | ├──> RRF Fusion ──> Scoring ──> Results
237
+ ├─────> Keyword Search (0.25) ──────────┤
238
+ | |
239
+ └─────> Graph Expansion (0.15) ─────────┘
240
+ ```
241
+
242
+ 1. **Vector search** -- Query is embedded and compared via cosine similarity (pgvector or Python fallback)
243
+ 2. **Keyword search** -- PostgreSQL `tsvector` full-text search (ILIKE fallback on SQLite)
244
+ 3. **Graph expansion** -- Entities mentioned in the query are found, their graph neighbors are traversed, and memories referencing those entities are boosted
245
+ 4. **RRF fusion** -- Results from all three strategies are combined using Reciprocal Rank Fusion (weights: vector 0.6, keyword 0.25, graph 0.15)
246
+ 5. **Scoring** -- Final score applies importance, recency decay, confidence, and access-count boost
247
+ 6. **Access tracking** -- Each returned memory's access count and timestamp are updated
248
+
249
+ ### Memory Lifecycle
250
+
251
+ When you call `ctx.run_lifecycle(user_id)`:
252
+
253
+ ```
254
+ Short-term Memories ──> Decay (Ebbinghaus) ──> Consolidation (LLM merge) ──> Long-term
255
+ | |
256
+ v v
257
+ Deactivate Group by entity
258
+ (importance < 0.05) overlap (Union-Find)
259
+ ```
260
+
261
+ 1. **Decay** -- Ebbinghaus forgetting curve: `R = e^(-0.693 * days / effective_half_life)`. Frequently accessed memories decay slower. Memories below 0.05 importance are deactivated
262
+ 2. **Consolidation** -- When short-term memory count >= threshold (default 20), memories are grouped by entity overlap (Union-Find), each group is merged by the LLM into a long-term fact, and source memories are deactivated
263
+ 3. **Cleanup** -- Remaining low-importance non-long-term memories are soft-deleted
264
+
265
+ ---
266
+
267
+ ## REST API
268
+
269
+ Start the server:
270
+
271
+ ```bash
272
+ uvicorn deepcontext.api.server:app --reload
273
+ ```
274
+
275
+ ### Endpoints
276
+
277
+ | Method | Path | Description |
278
+ |--------|------|-------------|
279
+ | `GET` | `/health` | Health check |
280
+ | `POST` | `/memory/add` | Extract and store memories from messages |
281
+ | `POST` | `/memory/search` | Hybrid search across memories |
282
+ | `PUT` | `/memory/update` | Update a memory's text and re-embed |
283
+ | `DELETE` | `/memory/delete` | Soft-delete a memory |
284
+ | `POST` | `/graph/neighbors` | Get knowledge graph neighborhood |
285
+ | `POST` | `/lifecycle/run` | Run decay + consolidation + cleanup |
286
+
287
+ ### Example: Add Memory
288
+
289
+ ```bash
290
+ curl -X POST http://localhost:8000/memory/add \
291
+ -H "Content-Type: application/json" \
292
+ -d '{
293
+ "messages": [
294
+ {"role": "user", "content": "I prefer Python over JavaScript"},
295
+ {"role": "assistant", "content": "Got it, Python is your go-to!"}
296
+ ],
297
+ "user_id": "user_1",
298
+ "conversation_id": "conv_1"
299
+ }'
300
+ ```
301
+
302
+ ### Example: Search
303
+
304
+ ```bash
305
+ curl -X POST http://localhost:8000/memory/search \
306
+ -H "Content-Type: application/json" \
307
+ -d '{
308
+ "query": "programming languages",
309
+ "user_id": "user_1",
310
+ "limit": 5
311
+ }'
312
+ ```
313
+
314
+ ---
315
+
316
+ ## Configuration Reference
317
+
318
+ All settings use the `DEEPCONTEXT_` env prefix. Set them in `.env` or pass directly to `DeepContext()`.
319
+
320
+ | Setting | Default | Description |
321
+ |---------|---------|-------------|
322
+ | `database_url` | SQLite fallback | PostgreSQL connection URL (`postgresql+asyncpg://...`) |
323
+ | `llm_provider` | `openai` | `openai` or `openrouter` |
324
+ | `openai_api_key` | -- | Required for OpenAI provider |
325
+ | `openrouter_api_key` | -- | Required for OpenRouter provider |
326
+ | `llm_model` | `gpt-4o-mini` | Model for fact extraction and classification |
327
+ | `embedding_model` | `text-embedding-3-small` | Embedding model |
328
+ | `embedding_dimensions` | `1536` | Embedding vector dimensions |
329
+ | `consolidation_threshold` | `20` | Short-term memories before auto-consolidation |
330
+ | `decay_half_life_days` | `7.0` | Ebbinghaus half-life for episodic decay |
331
+ | `connection_similarity_threshold` | `0.6` | Min cosine similarity for memory connections |
332
+ | `max_connections_per_memory` | `5` | Max connections per memory node |
333
+ | `debug` | `false` | Enable debug logging |
334
+ | `auto_consolidate` | `true` | Auto-consolidate on add |
335
+
336
+ ---
337
+
338
+ ## Development
339
+
340
+ ### Running Tests
341
+
342
+ ```bash
343
+ pip install -e ".[dev]"
344
+ pytest tests/ -v
345
+ ```
346
+
347
+ 110 tests covering all subsystems. Tests use in-memory SQLite and mock LLM clients -- no API keys or database needed.
348
+
349
+ ### PostgreSQL + pgvector Setup (Production)
350
+
351
+ DeepContext works with SQLite for development, but PostgreSQL with pgvector is recommended for production (native vector indexing, full-text search with `tsvector`, JSONB).
352
+
353
+ #### Windows
354
+
355
+ 1. **Install PostgreSQL 15+** from https://www.postgresql.org/download/windows/ (the installer includes pgAdmin)
356
+
357
+ 2. **Install pgvector** -- after PostgreSQL is installed:
358
+ ```powershell
359
+ # Option A: Using pgvector installer (recommended)
360
+ # Download the latest release from https://github.com/pgvector/pgvector/releases
361
+ # Run the .exe installer matching your PostgreSQL version
362
+
363
+ # Option B: Build from source (requires Visual Studio Build Tools)
364
+ git clone https://github.com/pgvector/pgvector.git
365
+ cd pgvector
366
+ # Set environment for your PG version, e.g.:
367
+ set "PG_HOME=C:\Program Files\PostgreSQL\16"
368
+ nmake /F Makefile.win install
369
+ ```
370
+
371
+ 3. **Create the database and enable pgvector**:
372
+ ```powershell
373
+ psql -U postgres -c "CREATE DATABASE deepcontext;"
374
+ psql -U postgres -d deepcontext -c "CREATE EXTENSION IF NOT EXISTS vector;"
375
+ ```
376
+
377
+ 4. **Update `.env`**:
378
+ ```env
379
+ DEEPCONTEXT_DATABASE_URL=postgresql+asyncpg://postgres:yourpassword@localhost:5432/deepcontext
380
+ ```
381
+
382
+ 5. **Run Alembic migrations**:
383
+ ```bash
384
+ alembic upgrade head
385
+ ```
386
+
387
+ #### Linux / macOS
388
+
389
+ ```bash
390
+ # Ubuntu/Debian
391
+ sudo apt install postgresql-16 postgresql-16-pgvector
392
+
393
+ # macOS (Homebrew)
394
+ brew install postgresql@16 pgvector
395
+
396
+ # Create database
397
+ createdb deepcontext
398
+ psql deepcontext -c "CREATE EXTENSION IF NOT EXISTS vector;"
399
+
400
+ # Set connection URL
401
+ export DEEPCONTEXT_DATABASE_URL="postgresql+asyncpg://user:pass@localhost:5432/deepcontext"
402
+
403
+ # Run migrations
404
+ alembic upgrade head
405
+ ```
406
+
407
+ ### Database Migrations (Alembic)
408
+
409
+ The project uses Alembic for schema versioning. Migration files are in `alembic/versions/`.
410
+
411
+ ```bash
412
+ # Apply all migrations (requires PostgreSQL connection)
413
+ alembic upgrade head
414
+
415
+ # Check current migration status
416
+ alembic current
417
+
418
+ # Generate a new migration after model changes
419
+ alembic revision --autogenerate -m "description of changes"
420
+
421
+ # Rollback one migration
422
+ alembic downgrade -1
423
+
424
+ # Generate SQL without applying (offline mode)
425
+ alembic upgrade head --sql
426
+ ```
427
+
428
+ > **Note:** Alembic migrations target PostgreSQL. SQLite mode uses `Base.metadata.create_all()` at runtime and does not need Alembic.
429
+
430
+ ---
431
+
432
+ ## Tech Stack
433
+
434
+ | Component | Technology |
435
+ |-----------|------------|
436
+ | Language | Python 3.11+ with full type annotations |
437
+ | ORM | SQLAlchemy 2.0 (async) |
438
+ | Vector Store | pgvector (PostgreSQL) |
439
+ | LLM | OpenAI API / OpenRouter |
440
+ | API | FastAPI + Uvicorn |
441
+ | Validation | Pydantic v2 + pydantic-settings |
442
+ | Math | NumPy |
443
+ | Migrations | Alembic |
444
+ | Testing | pytest + pytest-asyncio + httpx |
445
+
446
+ ---
447
+
448
+ ## License
449
+
450
+ MIT -- see [LICENSE](LICENSE) for details.
451
+
452
+ ---
453
+
454
+ <p align="center">
455
+ Built by <a href="https://github.com/umairinayat">@umairinayat</a>
456
+ </p>