@cybermem/mcp 0.5.1 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/README.md +1 -1
  2. package/dist/index.js +187 -194
  3. package/package.json +29 -28
  4. package/requirements.txt +2 -0
  5. package/server.py +347 -0
  6. package/src/index.ts +227 -0
  7. package/test_mcp.py +111 -0
  8. package/tsconfig.json +14 -0
  9. package/dist/commands/deploy.js +0 -230
  10. package/dist/commands/init.js +0 -65
  11. package/dist/templates/ansible/inventory/hosts.ini +0 -3
  12. package/dist/templates/ansible/playbooks/deploy-cybermem.yml +0 -71
  13. package/dist/templates/ansible/playbooks/stop-cybermem.yml +0 -17
  14. package/dist/templates/charts/cybermem/Chart.yaml +0 -6
  15. package/dist/templates/charts/cybermem/templates/dashboard-deployment.yaml +0 -29
  16. package/dist/templates/charts/cybermem/templates/dashboard-service.yaml +0 -20
  17. package/dist/templates/charts/cybermem/templates/openmemory-deployment.yaml +0 -40
  18. package/dist/templates/charts/cybermem/templates/openmemory-pvc.yaml +0 -10
  19. package/dist/templates/charts/cybermem/templates/openmemory-service.yaml +0 -13
  20. package/dist/templates/charts/cybermem/values-vps.yaml +0 -18
  21. package/dist/templates/charts/cybermem/values.yaml +0 -42
  22. package/dist/templates/docker-compose.yml +0 -219
  23. package/dist/templates/envs/local.example +0 -27
  24. package/dist/templates/envs/rpi.example +0 -27
  25. package/dist/templates/envs/vps.example +0 -25
  26. package/dist/templates/monitoring/db_exporter/Dockerfile +0 -19
  27. package/dist/templates/monitoring/db_exporter/exporter.py +0 -313
  28. package/dist/templates/monitoring/db_exporter/requirements.txt +0 -2
  29. package/dist/templates/monitoring/grafana/dashboards/cybermem.json +0 -1088
  30. package/dist/templates/monitoring/grafana/provisioning/dashboards/default.yml +0 -12
  31. package/dist/templates/monitoring/grafana/provisioning/datasources/prometheus.yml +0 -9
  32. package/dist/templates/monitoring/log_exporter/Dockerfile +0 -13
  33. package/dist/templates/monitoring/log_exporter/exporter.py +0 -274
  34. package/dist/templates/monitoring/log_exporter/requirements.txt +0 -1
  35. package/dist/templates/monitoring/postgres_exporter/queries.yml +0 -22
  36. package/dist/templates/monitoring/prometheus/prometheus.yml +0 -22
  37. package/dist/templates/monitoring/traefik/traefik.yml +0 -32
  38. package/dist/templates/monitoring/vector/vector.toml/vector.yaml +0 -77
  39. package/dist/templates/monitoring/vector/vector.yaml +0 -106
  40. package/templates/ansible/inventory/hosts.ini +0 -3
  41. package/templates/ansible/playbooks/deploy-cybermem.yml +0 -71
  42. package/templates/ansible/playbooks/stop-cybermem.yml +0 -17
  43. package/templates/charts/cybermem/Chart.yaml +0 -6
  44. package/templates/charts/cybermem/templates/dashboard-deployment.yaml +0 -29
  45. package/templates/charts/cybermem/templates/dashboard-service.yaml +0 -20
  46. package/templates/charts/cybermem/templates/openmemory-deployment.yaml +0 -40
  47. package/templates/charts/cybermem/templates/openmemory-pvc.yaml +0 -10
  48. package/templates/charts/cybermem/templates/openmemory-service.yaml +0 -13
  49. package/templates/charts/cybermem/values-vps.yaml +0 -18
  50. package/templates/charts/cybermem/values.yaml +0 -42
  51. package/templates/docker-compose.yml +0 -219
  52. package/templates/envs/local.example +0 -27
  53. package/templates/envs/rpi.example +0 -27
  54. package/templates/envs/vps.example +0 -25
  55. package/templates/monitoring/db_exporter/Dockerfile +0 -19
  56. package/templates/monitoring/db_exporter/exporter.py +0 -313
  57. package/templates/monitoring/db_exporter/requirements.txt +0 -2
  58. package/templates/monitoring/grafana/dashboards/cybermem.json +0 -1088
  59. package/templates/monitoring/grafana/provisioning/dashboards/default.yml +0 -12
  60. package/templates/monitoring/grafana/provisioning/datasources/prometheus.yml +0 -9
  61. package/templates/monitoring/log_exporter/Dockerfile +0 -13
  62. package/templates/monitoring/log_exporter/exporter.py +0 -274
  63. package/templates/monitoring/log_exporter/requirements.txt +0 -1
  64. package/templates/monitoring/postgres_exporter/queries.yml +0 -22
  65. package/templates/monitoring/prometheus/prometheus.yml +0 -22
  66. package/templates/monitoring/traefik/traefik.yml +0 -32
  67. package/templates/monitoring/vector/vector.toml/vector.yaml +0 -77
  68. package/templates/monitoring/vector/vector.yaml +0 -106
@@ -1,313 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- CyberMem Database Exporter for Prometheus
4
-
5
- Queries OpenMemory's database and exports per-client metrics to Prometheus.
6
- Replaces the complex Vector + Traefik access logs pipeline with simple DB queries.
7
- """
8
-
9
- import os
10
- import time
11
- import sqlite3
12
- import json
13
- from prometheus_client import Gauge, Info, generate_latest, CONTENT_TYPE_LATEST
14
- from flask import Flask, Response, request, jsonify
15
- import logging
16
- import threading
17
-
18
- # Configuration
19
- DB_PATH = os.getenv("DB_PATH", "/data/openmemory.sqlite")
20
- SCRAPE_INTERVAL = int(os.getenv("SCRAPE_INTERVAL", "15")) # seconds
21
- EXPORTER_PORT = int(os.getenv("EXPORTER_PORT", "8000"))
22
-
23
- # Setup logging
24
- logging.basicConfig(
25
- level=logging.INFO,
26
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
27
- )
28
- logger = logging.getLogger("db_exporter")
29
-
30
- # Prometheus metrics
31
- info = Info('cybermem_exporter', 'CyberMem Database Exporter Info')
32
- info.info({'version': '1.0.0', 'db_path': DB_PATH})
33
-
34
- memories_total = Gauge(
35
- 'openmemory_memories_total',
36
- 'Total number of memories stored',
37
- ['client']
38
- )
39
-
40
- memories_recent_24h = Gauge(
41
- 'openmemory_memories_recent_24h',
42
- 'Memories created in the last 24 hours',
43
- ['client']
44
- )
45
-
46
- memories_recent_1h = Gauge(
47
- 'openmemory_memories_recent_1h',
48
- 'Memories created in the last hour',
49
- ['client']
50
- )
51
-
52
- requests_by_operation = Gauge(
53
- 'openmemory_requests_total',
54
- 'Total requests by client and operation (from cybermem_stats table)',
55
- ['client_name', 'operation']
56
- )
57
-
58
- errors_by_operation = Gauge(
59
- 'openmemory_errors_total',
60
- 'Total errors by client and operation (from cybermem_stats table)',
61
- ['client_name', 'operation']
62
- )
63
-
64
- sectors_count = Gauge(
65
- 'openmemory_sectors_total',
66
- 'Number of unique sectors per client',
67
- ['client']
68
- )
69
-
70
- avg_score = Gauge(
71
- 'openmemory_avg_score',
72
- 'Average score of memories',
73
- ['client']
74
- )
75
-
76
- # Aggregate metrics (not per-client)
77
- total_requests_aggregate = Gauge(
78
- 'openmemory_requests_aggregate_total',
79
- 'Total API requests (aggregate, from stats table)'
80
- )
81
-
82
- total_errors_aggregate = Gauge(
83
- 'openmemory_errors_aggregate_total',
84
- 'Total API errors (aggregate, from stats table)'
85
- )
86
-
87
- success_rate_aggregate = Gauge(
88
- 'openmemory_success_rate_aggregate',
89
- 'API success rate percentage (aggregate)'
90
- )
91
-
92
-
93
- def get_db_connection():
94
- """Get SQLite database connection."""
95
- try:
96
- conn = sqlite3.connect(DB_PATH)
97
- conn.row_factory = sqlite3.Row
98
- return conn
99
- except Exception as e:
100
- logger.error(f"Failed to connect to database: {e}")
101
- raise
102
-
103
-
104
- def collect_metrics():
105
- """Collect all metrics from OpenMemory database."""
106
- try:
107
- db = get_db_connection()
108
- cursor = db.cursor()
109
-
110
- # Metric 1: Total memories per client
111
- cursor.execute('''
112
- SELECT user_id as client, COUNT(*) as count
113
- FROM memories
114
- GROUP BY user_id
115
- ''')
116
- for row in cursor.fetchall():
117
- client = row['client'] or 'anonymous'
118
- memories_total.labels(client=client).set(row['count'])
119
-
120
- logger.debug(f"Collected total memories for {cursor.rowcount} clients")
121
-
122
- # Metric 2: Recent memories (24h)
123
- # Note: created_at is stored as milliseconds since epoch
124
- cursor.execute('''
125
- SELECT user_id as client, COUNT(*) as count
126
- FROM memories
127
- WHERE created_at > ?
128
- GROUP BY user_id
129
- ''', [int((time.time() - 86400) * 1000)])
130
- for row in cursor.fetchall():
131
- client = row['client'] or 'anonymous'
132
- memories_recent_24h.labels(client=client).set(row['count'])
133
-
134
- logger.debug(f"Collected 24h memories for {cursor.rowcount} clients")
135
-
136
- # Metric 3: Recent memories (1h)
137
- cursor.execute('''
138
- SELECT user_id as client, COUNT(*) as count
139
- FROM memories
140
- WHERE created_at > ?
141
- GROUP BY user_id
142
- ''', [int((time.time() - 3600) * 1000)])
143
- for row in cursor.fetchall():
144
- client = row['client'] or 'anonymous'
145
- memories_recent_1h.labels(client=client).set(row['count'])
146
-
147
- logger.debug(f"Collected 1h memories for {cursor.rowcount} clients")
148
-
149
- # Metric 4: Per-client request stats from cybermem_stats table
150
- cursor.execute('''
151
- SELECT client_name, operation, count, errors
152
- FROM cybermem_stats
153
- ''')
154
- for row in cursor.fetchall():
155
- client_name = row['client_name'] or 'unknown'
156
- operation = row['operation']
157
- count = row['count']
158
- errors = row['errors']
159
- requests_by_operation.labels(client_name=client_name, operation=operation).set(count)
160
- errors_by_operation.labels(client_name=client_name, operation=operation).set(errors)
161
-
162
- logger.debug(f"Collected request stats for {cursor.rowcount} client/operation pairs")
163
-
164
- # Metric 5: Aggregate request stats from OpenMemory's stats table
165
- # Note: stats table has no client_id, so these are aggregate only
166
- hour_ago_ms = int((time.time() - 3600) * 1000)
167
-
168
- # Get total requests (sum of qps snapshots)
169
- cursor.execute('''
170
- SELECT SUM(count) as total
171
- FROM stats
172
- WHERE type = 'qps' AND ts > ?
173
- ''', [hour_ago_ms])
174
- total_reqs = cursor.fetchone()['total'] or 0
175
- total_requests_aggregate.set(total_reqs)
176
-
177
- # Get total errors
178
- cursor.execute('''
179
- SELECT COUNT(*) as total
180
- FROM stats
181
- WHERE type = 'error' AND ts > ?
182
- ''', [hour_ago_ms])
183
- total_errs = cursor.fetchone()['total'] or 0
184
- total_errors_aggregate.set(total_errs)
185
-
186
- # Calculate success rate
187
- if total_reqs > 0:
188
- success_rate = ((total_reqs - total_errs) / total_reqs) * 100
189
- success_rate_aggregate.set(max(0.0, success_rate)) # Cap at 0% minimum
190
- else:
191
- success_rate_aggregate.set(100.0) # No requests = 100% success
192
-
193
- logger.debug(f"Collected aggregate stats: {total_reqs} reqs, {total_errs} errs")
194
-
195
- # Metric 5: Number of unique sectors per client
196
- cursor.execute('''
197
- SELECT user_id as client, COUNT(DISTINCT primary_sector) as count
198
- FROM memories
199
- WHERE primary_sector IS NOT NULL
200
- GROUP BY user_id
201
- ''')
202
- for row in cursor.fetchall():
203
- client = row['client'] or 'anonymous'
204
- sectors_count.labels(client=client).set(row['count'])
205
-
206
- logger.debug(f"Collected sectors count for {cursor.rowcount} clients")
207
-
208
- # Metric 6: Average feedback score per client
209
- cursor.execute('''
210
- SELECT user_id as client, AVG(feedback_score) as avg_score
211
- FROM memories
212
- WHERE feedback_score IS NOT NULL
213
- GROUP BY user_id
214
- ''')
215
- for row in cursor.fetchall():
216
- client = row['client'] or 'anonymous'
217
- avg_score.labels(client=client).set(row['avg_score'] or 0)
218
-
219
- logger.debug(f"Collected average scores for {cursor.rowcount} clients")
220
-
221
- db.close()
222
- logger.info("Metrics collection completed successfully")
223
-
224
- except Exception as e:
225
- logger.error(f"Error collecting metrics: {e}", exc_info=True)
226
-
227
-
228
- def get_logs_from_db(start_ms: int, limit: int = 100):
229
- """Get access logs from database"""
230
- try:
231
- db = get_db_connection()
232
- cursor = db.cursor()
233
-
234
- cursor.execute('''
235
- SELECT timestamp, client_name, client_version, method, endpoint, operation, status, is_error
236
- FROM cybermem_access_log
237
- WHERE timestamp >= ?
238
- ORDER BY timestamp DESC
239
- LIMIT ?
240
- ''', [start_ms, limit])
241
-
242
- logs = []
243
- for row in cursor.fetchall():
244
- logs.append({
245
- 'timestamp': row['timestamp'],
246
- 'client_name': row['client_name'],
247
- 'client_version': row['client_version'],
248
- 'method': row['method'],
249
- 'endpoint': row['endpoint'],
250
- 'operation': row['operation'],
251
- 'status': row['status'],
252
- 'is_error': bool(row['is_error'])
253
- })
254
-
255
- db.close()
256
- return logs
257
- except Exception as e:
258
- logger.error(f"Error fetching logs: {e}", exc_info=True)
259
- return []
260
-
261
-
262
- # Create Flask app
263
- app = Flask(__name__)
264
-
265
- @app.route('/metrics')
266
- def metrics():
267
- """Prometheus metrics endpoint"""
268
- return Response(generate_latest(), mimetype=CONTENT_TYPE_LATEST)
269
-
270
- @app.route('/api/logs')
271
- def api_logs():
272
- """Access logs API endpoint"""
273
- try:
274
- start_ms = int(request.args.get('start', 0))
275
- limit = int(request.args.get('limit', 100))
276
-
277
- logs = get_logs_from_db(start_ms, limit)
278
- return jsonify({'logs': logs})
279
- except Exception as e:
280
- logger.error(f"Error in /api/logs: {e}", exc_info=True)
281
- return jsonify({'error': str(e)}), 500
282
-
283
- def metrics_collection_loop():
284
- """Background thread for collecting metrics"""
285
- logger.info("Starting metrics collection loop")
286
- while True:
287
- try:
288
- collect_metrics()
289
- time.sleep(SCRAPE_INTERVAL)
290
- except Exception as e:
291
- logger.error(f"Error in metrics collection: {e}", exc_info=True)
292
- time.sleep(SCRAPE_INTERVAL)
293
-
294
- def main():
295
- """Start the exporter and metrics collection loop."""
296
- logger.info(f"Starting CyberMem Database Exporter on port {EXPORTER_PORT}")
297
- logger.info(f"Database path: {DB_PATH}")
298
- logger.info(f"Scrape interval: {SCRAPE_INTERVAL}s")
299
-
300
- # Start metrics collection in background thread
301
- metrics_thread = threading.Thread(target=metrics_collection_loop, daemon=True)
302
- metrics_thread.start()
303
-
304
- # Start Flask HTTP server
305
- logger.info(f"Starting HTTP server on http://0.0.0.0:{EXPORTER_PORT}")
306
- logger.info(f" Metrics: http://0.0.0.0:{EXPORTER_PORT}/metrics")
307
- logger.info(f" Logs API: http://0.0.0.0:{EXPORTER_PORT}/api/logs")
308
-
309
- app.run(host='0.0.0.0', port=EXPORTER_PORT, threaded=True)
310
-
311
-
312
- if __name__ == '__main__':
313
- main()
@@ -1,2 +0,0 @@
1
- prometheus-client==0.19.0
2
- flask==3.0.0