quantum-memory-graph 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- benchmarks/__init__.py +1 -0
- benchmarks/memcombine.py +236 -0
- quantum_memory_graph/__init__.py +16 -0
- quantum_memory_graph/__main__.py +3 -0
- quantum_memory_graph/api.py +132 -0
- quantum_memory_graph/graph.py +301 -0
- quantum_memory_graph/pipeline.py +237 -0
- quantum_memory_graph/subgraph_optimizer.py +262 -0
- quantum_memory_graph-0.1.0.dist-info/LICENSE +21 -0
- quantum_memory_graph-0.1.0.dist-info/METADATA +228 -0
- quantum_memory_graph-0.1.0.dist-info/RECORD +13 -0
- quantum_memory_graph-0.1.0.dist-info/WHEEL +5 -0
- quantum_memory_graph-0.1.0.dist-info/top_level.txt +2 -0
benchmarks/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""MemCombine benchmark suite."""
|
benchmarks/memcombine.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MemCombine Benchmark — Tests memory COMBINATION quality.
|
|
3
|
+
|
|
4
|
+
Unlike LongMemEval (needle-in-haystack retrieval), MemCombine tests whether
|
|
5
|
+
selected memories work TOGETHER to answer complex questions.
|
|
6
|
+
|
|
7
|
+
Questions require synthesizing information from multiple memories:
|
|
8
|
+
- "What was the decision AND its reasoning AND its outcome?"
|
|
9
|
+
- "How do project X and project Y relate?"
|
|
10
|
+
- "What changed between meeting A and meeting B?"
|
|
11
|
+
|
|
12
|
+
Metrics:
|
|
13
|
+
- Combination Score: Do selected memories cover all required facets?
|
|
14
|
+
- Synergy Score: Do memories reference/build on each other?
|
|
15
|
+
- Completeness: Can the question be fully answered from selected memories?
|
|
16
|
+
|
|
17
|
+
Copyright 2026 Coinkong (Chef's Attraction). MIT License.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import json
|
|
21
|
+
import numpy as np
|
|
22
|
+
from typing import List, Dict
|
|
23
|
+
from dataclasses import dataclass, field
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class MemCombineQuestion:
|
|
28
|
+
"""A question requiring multiple related memories."""
|
|
29
|
+
id: str
|
|
30
|
+
question: str
|
|
31
|
+
category: str # synthesis, temporal, causal, multi-entity
|
|
32
|
+
memories: List[Dict] # All available memories
|
|
33
|
+
evidence_ids: List[int] # Which memories contain evidence
|
|
34
|
+
facets: List[str] # Required information facets
|
|
35
|
+
facet_memory_map: Dict # Which facet comes from which memory
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# Built-in benchmark scenarios
|
|
39
|
+
SCENARIOS = [
|
|
40
|
+
{
|
|
41
|
+
"id": "synthesis_1",
|
|
42
|
+
"question": "What technology stack was chosen for the project and why was each component selected?",
|
|
43
|
+
"category": "synthesis",
|
|
44
|
+
"memories": [
|
|
45
|
+
{"id": 0, "text": "Team meeting: Decided to use React for the frontend. Sarah argued it has the best ecosystem for our use case."},
|
|
46
|
+
{"id": 1, "text": "Architecture review: PostgreSQL chosen for the database. Need JSONB support for flexible schemas."},
|
|
47
|
+
{"id": 2, "text": "Sprint planning: Set up CI/CD pipeline using GitHub Actions. Two-week sprint cycles."},
|
|
48
|
+
{"id": 3, "text": "Team lunch at the Italian place. Good pasta. Bob told a funny joke about recursion."},
|
|
49
|
+
{"id": 4, "text": "Backend discussion: FastAPI selected over Django. Need async support for real-time features."},
|
|
50
|
+
{"id": 5, "text": "Deployment strategy: Going with Docker + Kubernetes on AWS. Auto-scaling is critical for launch."},
|
|
51
|
+
{"id": 6, "text": "Budget review: Cloud costs estimated at $2000/month. Within budget allocation."},
|
|
52
|
+
{"id": 7, "text": "Coffee chat about the new office layout. Open floor plan vs cubicles debate."},
|
|
53
|
+
{"id": 8, "text": "Performance testing results: FastAPI handles 10K concurrent connections. Meets our requirements."},
|
|
54
|
+
{"id": 9, "text": "Security audit: Need to add rate limiting and input validation before launch."},
|
|
55
|
+
],
|
|
56
|
+
"evidence_ids": [0, 1, 4, 5],
|
|
57
|
+
"facets": ["frontend_choice", "frontend_reason", "backend_choice", "backend_reason", "database_choice", "database_reason", "deployment_choice"],
|
|
58
|
+
"facet_memory_map": {"frontend_choice": 0, "frontend_reason": 0, "backend_choice": 4, "backend_reason": 4, "database_choice": 1, "database_reason": 1, "deployment_choice": 5},
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
"id": "temporal_1",
|
|
62
|
+
"question": "How did the team's stance on remote work change over the three months?",
|
|
63
|
+
"category": "temporal",
|
|
64
|
+
"memories": [
|
|
65
|
+
{"id": 0, "text": "January all-hands: CEO announced mandatory return to office 5 days a week starting February."},
|
|
66
|
+
{"id": 1, "text": "Q4 revenue report showed 15% growth. Celebrated with team dinner."},
|
|
67
|
+
{"id": 2, "text": "February survey results: 73% of employees reported decreased satisfaction with RTO policy."},
|
|
68
|
+
{"id": 3, "text": "New coffee machine installed in the break room. Everyone loves it."},
|
|
69
|
+
{"id": 4, "text": "February town hall: HR presented data showing 20% increase in turnover since RTO mandate."},
|
|
70
|
+
{"id": 5, "text": "March policy update: CEO reversed course. Now hybrid 3 days in office, 2 remote. Cited retention data."},
|
|
71
|
+
{"id": 6, "text": "IT upgraded the conference room AV equipment for better hybrid meetings."},
|
|
72
|
+
{"id": 7, "text": "Quarterly OKR review. Team hit 4 of 5 objectives."},
|
|
73
|
+
{"id": 8, "text": "March satisfaction survey: Employee satisfaction recovered to 85% after hybrid policy."},
|
|
74
|
+
{"id": 9, "text": "Parking garage construction causing noise complaints from third floor."},
|
|
75
|
+
],
|
|
76
|
+
"evidence_ids": [0, 2, 4, 5, 8],
|
|
77
|
+
"facets": ["initial_policy", "employee_reaction", "turnover_impact", "policy_change", "final_outcome"],
|
|
78
|
+
"facet_memory_map": {"initial_policy": 0, "employee_reaction": 2, "turnover_impact": 4, "policy_change": 5, "final_outcome": 8},
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
"id": "causal_1",
|
|
82
|
+
"question": "What caused the production outage, what was done to fix it, and what prevention measures were taken?",
|
|
83
|
+
"category": "causal",
|
|
84
|
+
"memories": [
|
|
85
|
+
{"id": 0, "text": "Monday 2am alert: Production database hit 100% disk usage. All writes failing."},
|
|
86
|
+
{"id": 1, "text": "Sprint retrospective: Team agreed to improve code review process."},
|
|
87
|
+
{"id": 2, "text": "Root cause analysis: Logging table grew 500GB in 2 weeks due to debug logging left on after feature deploy."},
|
|
88
|
+
{"id": 3, "text": "Incident response: DevOps team purged old log entries and increased disk from 1TB to 2TB."},
|
|
89
|
+
{"id": 4, "text": "New hire orientation for three junior developers. HR handled logistics."},
|
|
90
|
+
{"id": 5, "text": "Post-mortem action item 1: Implement log rotation with 30-day retention policy."},
|
|
91
|
+
{"id": 6, "text": "Post-mortem action item 2: Add disk usage alerts at 70%, 80%, 90% thresholds."},
|
|
92
|
+
{"id": 7, "text": "Post-mortem action item 3: Require removing debug logging before merging to main."},
|
|
93
|
+
{"id": 8, "text": "Team building event at the escape room. Marketing team won."},
|
|
94
|
+
{"id": 9, "text": "Client demo went well. They want to proceed with Phase 2."},
|
|
95
|
+
],
|
|
96
|
+
"evidence_ids": [0, 2, 3, 5, 6, 7],
|
|
97
|
+
"facets": ["what_happened", "root_cause", "immediate_fix", "prevention_1", "prevention_2", "prevention_3"],
|
|
98
|
+
"facet_memory_map": {"what_happened": 0, "root_cause": 2, "immediate_fix": 3, "prevention_1": 5, "prevention_2": 6, "prevention_3": 7},
|
|
99
|
+
},
|
|
100
|
+
{
|
|
101
|
+
"id": "multi_entity_1",
|
|
102
|
+
"question": "What are each team member's roles and how do their responsibilities interact?",
|
|
103
|
+
"category": "multi_entity",
|
|
104
|
+
"memories": [
|
|
105
|
+
{"id": 0, "text": "Alice leads frontend development. She works closely with Bob on API contracts."},
|
|
106
|
+
{"id": 1, "text": "Company picnic was fun. Great weather this year."},
|
|
107
|
+
{"id": 2, "text": "Bob owns the backend services. He designs APIs that Alice's frontend consumes."},
|
|
108
|
+
{"id": 3, "text": "Carol manages the infrastructure. She provisions the servers Bob's services run on."},
|
|
109
|
+
{"id": 4, "text": "New ping pong table in the break room. Tournament next Friday."},
|
|
110
|
+
{"id": 5, "text": "Dave handles QA. He writes integration tests that cover Alice's UI and Bob's APIs."},
|
|
111
|
+
{"id": 6, "text": "Eve is the project manager. She coordinates between Alice, Bob, Carol, and Dave."},
|
|
112
|
+
{"id": 7, "text": "Office plants are dying. Need to assign someone to water them."},
|
|
113
|
+
{"id": 8, "text": "Alice and Carol paired on improving the CI/CD pipeline. Reduced deploy time by 40%."},
|
|
114
|
+
{"id": 9, "text": "Dave found a critical bug in Bob's API. Bob fixed it same day."},
|
|
115
|
+
],
|
|
116
|
+
"evidence_ids": [0, 2, 3, 5, 6, 8, 9],
|
|
117
|
+
"facets": ["alice_role", "bob_role", "carol_role", "dave_role", "eve_role", "alice_bob_interaction", "bob_carol_interaction", "dave_integration"],
|
|
118
|
+
"facet_memory_map": {"alice_role": 0, "bob_role": 2, "carol_role": 3, "dave_role": 5, "eve_role": 6, "alice_bob_interaction": 0, "bob_carol_interaction": 3, "dave_integration": 5},
|
|
119
|
+
},
|
|
120
|
+
{
|
|
121
|
+
"id": "synthesis_2",
|
|
122
|
+
"question": "What is the complete customer onboarding process from signup to first value?",
|
|
123
|
+
"category": "synthesis",
|
|
124
|
+
"memories": [
|
|
125
|
+
{"id": 0, "text": "Step 1: Customer signs up via website. Auto-creates account and sends welcome email."},
|
|
126
|
+
{"id": 1, "text": "Marketing team redesigned the landing page. Conversion rate up 12%."},
|
|
127
|
+
{"id": 2, "text": "Step 2: Customer success rep schedules onboarding call within 24 hours of signup."},
|
|
128
|
+
{"id": 3, "text": "Step 3: During onboarding call, rep helps customer import their data and configure integrations."},
|
|
129
|
+
{"id": 4, "text": "Sales team hit quarterly target. Pizza party celebration."},
|
|
130
|
+
{"id": 5, "text": "Step 4: Customer gets access to interactive tutorial. Must complete 3 core modules."},
|
|
131
|
+
{"id": 6, "text": "Step 5: After tutorial completion, customer success checks in at day 7 and day 30."},
|
|
132
|
+
{"id": 7, "text": "Office AC broken again. Facilities contacted."},
|
|
133
|
+
{"id": 8, "text": "Churn analysis: Customers who complete onboarding tutorial have 3x higher retention."},
|
|
134
|
+
{"id": 9, "text": "Support ticket about login issues. Resolved — was a password reset problem."},
|
|
135
|
+
],
|
|
136
|
+
"evidence_ids": [0, 2, 3, 5, 6],
|
|
137
|
+
"facets": ["signup", "scheduling", "data_import", "tutorial", "followup"],
|
|
138
|
+
"facet_memory_map": {"signup": 0, "scheduling": 2, "data_import": 3, "tutorial": 5, "followup": 6},
|
|
139
|
+
},
|
|
140
|
+
]
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def evaluate_combination(selected_ids: List[int], scenario: Dict) -> Dict:
|
|
144
|
+
"""
|
|
145
|
+
Evaluate how well selected memories combine to answer the question.
|
|
146
|
+
|
|
147
|
+
Returns facet coverage, synergy score, and overall combination quality.
|
|
148
|
+
"""
|
|
149
|
+
evidence_ids = set(scenario["evidence_ids"])
|
|
150
|
+
facet_map = scenario["facet_memory_map"]
|
|
151
|
+
facets = scenario["facets"]
|
|
152
|
+
selected_set = set(selected_ids)
|
|
153
|
+
|
|
154
|
+
# Facet coverage: what percentage of required facets are covered?
|
|
155
|
+
covered_facets = []
|
|
156
|
+
for facet in facets:
|
|
157
|
+
required_mem = facet_map[facet]
|
|
158
|
+
if required_mem in selected_set:
|
|
159
|
+
covered_facets.append(facet)
|
|
160
|
+
|
|
161
|
+
coverage = len(covered_facets) / len(facets) if facets else 0
|
|
162
|
+
|
|
163
|
+
# Evidence recall: what percentage of evidence memories selected?
|
|
164
|
+
evidence_found = selected_set & evidence_ids
|
|
165
|
+
evidence_recall = len(evidence_found) / len(evidence_ids) if evidence_ids else 0
|
|
166
|
+
|
|
167
|
+
# Precision: what percentage of selected are actually evidence?
|
|
168
|
+
precision = len(evidence_found) / len(selected_set) if selected_set else 0
|
|
169
|
+
|
|
170
|
+
# Noise: non-evidence memories selected
|
|
171
|
+
noise = len(selected_set - evidence_ids)
|
|
172
|
+
|
|
173
|
+
return {
|
|
174
|
+
"coverage": coverage,
|
|
175
|
+
"evidence_recall": evidence_recall,
|
|
176
|
+
"precision": precision,
|
|
177
|
+
"noise": noise,
|
|
178
|
+
"covered_facets": covered_facets,
|
|
179
|
+
"missing_facets": [f for f in facets if f not in covered_facets],
|
|
180
|
+
"f1": (2 * precision * evidence_recall / (precision + evidence_recall)
|
|
181
|
+
if (precision + evidence_recall) > 0 else 0),
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def run_benchmark(recall_fn, K: int = 5, scenarios: List[Dict] = None) -> Dict:
|
|
186
|
+
"""
|
|
187
|
+
Run MemCombine benchmark against a recall function.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
recall_fn: Function(memories, query, K) -> List[int] (selected indices)
|
|
191
|
+
K: Number of memories to select
|
|
192
|
+
scenarios: Custom scenarios (uses built-in if None)
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
Benchmark results with per-scenario and aggregate scores
|
|
196
|
+
"""
|
|
197
|
+
if scenarios is None:
|
|
198
|
+
scenarios = SCENARIOS
|
|
199
|
+
|
|
200
|
+
results = []
|
|
201
|
+
total_coverage = 0
|
|
202
|
+
total_recall = 0
|
|
203
|
+
total_f1 = 0
|
|
204
|
+
perfect = 0
|
|
205
|
+
|
|
206
|
+
for scenario in scenarios:
|
|
207
|
+
memory_texts = [m["text"] for m in scenario["memories"]]
|
|
208
|
+
selected = recall_fn(memory_texts, scenario["question"], K)
|
|
209
|
+
|
|
210
|
+
eval_result = evaluate_combination(selected, scenario)
|
|
211
|
+
|
|
212
|
+
results.append({
|
|
213
|
+
"id": scenario["id"],
|
|
214
|
+
"category": scenario["category"],
|
|
215
|
+
"selected": selected,
|
|
216
|
+
**eval_result,
|
|
217
|
+
})
|
|
218
|
+
|
|
219
|
+
total_coverage += eval_result["coverage"]
|
|
220
|
+
total_recall += eval_result["evidence_recall"]
|
|
221
|
+
total_f1 += eval_result["f1"]
|
|
222
|
+
if eval_result["coverage"] == 1.0:
|
|
223
|
+
perfect += 1
|
|
224
|
+
|
|
225
|
+
n = len(scenarios)
|
|
226
|
+
return {
|
|
227
|
+
"benchmark": "MemCombine",
|
|
228
|
+
"n_scenarios": n,
|
|
229
|
+
"K": K,
|
|
230
|
+
"avg_coverage": total_coverage / n,
|
|
231
|
+
"avg_evidence_recall": total_recall / n,
|
|
232
|
+
"avg_f1": total_f1 / n,
|
|
233
|
+
"perfect_coverage": perfect,
|
|
234
|
+
"perfect_coverage_pct": perfect / n * 100,
|
|
235
|
+
"per_scenario": results,
|
|
236
|
+
}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Quantum Memory Graph — Relationship-aware memory for AI agents.
|
|
3
|
+
|
|
4
|
+
Uses knowledge graphs to map relationships between memories,
|
|
5
|
+
then QAOA to find the optimal subgraph for any query.
|
|
6
|
+
|
|
7
|
+
Copyright 2026 Coinkong (Chef's Attraction). MIT License.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
__version__ = "0.1.0"
|
|
11
|
+
|
|
12
|
+
from .graph import MemoryGraph
|
|
13
|
+
from .subgraph_optimizer import optimize_subgraph
|
|
14
|
+
from .pipeline import recall, store
|
|
15
|
+
|
|
16
|
+
__all__ = ["MemoryGraph", "optimize_subgraph", "recall", "store"]
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Quantum Memory Graph API Server.
|
|
3
|
+
|
|
4
|
+
REST API for the graph + QAOA memory system.
|
|
5
|
+
|
|
6
|
+
Copyright 2026 Coinkong (Chef's Attraction). MIT License.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
from fastapi import FastAPI, Request, HTTPException, Depends
|
|
11
|
+
from pydantic import BaseModel
|
|
12
|
+
from typing import Optional, List
|
|
13
|
+
import uvicorn
|
|
14
|
+
|
|
15
|
+
from .graph import MemoryGraph
|
|
16
|
+
from .pipeline import store, store_batch, recall, get_graph, set_graph
|
|
17
|
+
|
|
18
|
+
API_TOKEN = os.environ.get("QMG_API_TOKEN", "")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
async def verify_token(request: Request):
|
|
22
|
+
if request.url.path in ("/", "/docs", "/openapi.json", "/redoc"):
|
|
23
|
+
return
|
|
24
|
+
if not API_TOKEN:
|
|
25
|
+
return
|
|
26
|
+
auth = request.headers.get("Authorization", "")
|
|
27
|
+
if not auth.startswith("Bearer ") or auth[7:] != API_TOKEN:
|
|
28
|
+
raise HTTPException(status_code=401, detail="Unauthorized")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
app = FastAPI(
|
|
32
|
+
title="Quantum Memory Graph API",
|
|
33
|
+
version="0.1.0",
|
|
34
|
+
description="Knowledge graph + QAOA subgraph optimization for AI agent memory",
|
|
35
|
+
dependencies=[Depends(verify_token)],
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Initialize graph on startup
|
|
39
|
+
_graph = None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@app.on_event("startup")
|
|
43
|
+
async def startup():
|
|
44
|
+
global _graph
|
|
45
|
+
threshold = float(os.environ.get("QMG_SIMILARITY_THRESHOLD", "0.3"))
|
|
46
|
+
_graph = MemoryGraph(similarity_threshold=threshold)
|
|
47
|
+
set_graph(_graph)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@app.get("/")
|
|
51
|
+
async def health():
|
|
52
|
+
g = get_graph()
|
|
53
|
+
return {
|
|
54
|
+
"status": "operational",
|
|
55
|
+
"service": "Quantum Memory Graph",
|
|
56
|
+
"version": "0.1.0",
|
|
57
|
+
"graph": g.stats() if g and g.memories else {"nodes": 0, "edges": 0},
|
|
58
|
+
"auth": "enabled" if API_TOKEN else "disabled",
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class StoreRequest(BaseModel):
|
|
63
|
+
text: str
|
|
64
|
+
entities: Optional[List[str]] = None
|
|
65
|
+
source: str = ""
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@app.post("/store")
|
|
69
|
+
async def api_store(req: StoreRequest):
|
|
70
|
+
result = store(
|
|
71
|
+
text=req.text,
|
|
72
|
+
entities=req.entities,
|
|
73
|
+
source=req.source,
|
|
74
|
+
)
|
|
75
|
+
return result
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class StoreBatchRequest(BaseModel):
|
|
79
|
+
texts: List[str]
|
|
80
|
+
sources: Optional[List[str]] = None
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@app.post("/store-batch")
|
|
84
|
+
async def api_store_batch(req: StoreBatchRequest):
|
|
85
|
+
result = store_batch(
|
|
86
|
+
texts=req.texts,
|
|
87
|
+
sources=req.sources,
|
|
88
|
+
)
|
|
89
|
+
return result
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class RecallRequest(BaseModel):
|
|
93
|
+
query: str
|
|
94
|
+
k: int = 5
|
|
95
|
+
hops: int = 2
|
|
96
|
+
top_seeds: int = 5
|
|
97
|
+
alpha: float = 0.4
|
|
98
|
+
beta_conn: float = 0.35
|
|
99
|
+
gamma_cov: float = 0.25
|
|
100
|
+
max_candidates: int = 14
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@app.post("/recall")
|
|
104
|
+
async def api_recall(req: RecallRequest):
|
|
105
|
+
result = recall(
|
|
106
|
+
query=req.query,
|
|
107
|
+
K=req.k,
|
|
108
|
+
hops=req.hops,
|
|
109
|
+
top_seeds=req.top_seeds,
|
|
110
|
+
alpha=req.alpha,
|
|
111
|
+
beta_conn=req.beta_conn,
|
|
112
|
+
gamma_cov=req.gamma_cov,
|
|
113
|
+
max_candidates=req.max_candidates,
|
|
114
|
+
)
|
|
115
|
+
return result
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@app.get("/stats")
|
|
119
|
+
async def api_stats():
|
|
120
|
+
g = get_graph()
|
|
121
|
+
return g.stats() if g else {"nodes": 0, "edges": 0}
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def main():
|
|
125
|
+
host = os.environ.get("QMG_HOST", "0.0.0.0")
|
|
126
|
+
port = int(os.environ.get("QMG_PORT", "8502"))
|
|
127
|
+
print(f"⚛️🧠 Quantum Memory Graph API starting on {host}:{port}...")
|
|
128
|
+
uvicorn.run(app, host=host, port=port)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
if __name__ == "__main__":
|
|
132
|
+
main()
|