@cybermem/mcp 0.5.3 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/README.md +1 -1
  2. package/dist/index.js +203 -28
  3. package/package.json +29 -28
  4. package/requirements.txt +2 -0
  5. package/server.py +347 -0
  6. package/src/index.ts +227 -0
  7. package/test_mcp.py +111 -0
  8. package/tsconfig.json +14 -0
  9. package/dist/commands/__tests__/backup.test.js +0 -75
  10. package/dist/commands/__tests__/restore.test.js +0 -70
  11. package/dist/commands/backup.js +0 -52
  12. package/dist/commands/deploy.js +0 -242
  13. package/dist/commands/init.js +0 -65
  14. package/dist/commands/restore.js +0 -62
  15. package/dist/templates/ansible/inventory/hosts.ini +0 -3
  16. package/dist/templates/ansible/playbooks/deploy-cybermem.yml +0 -71
  17. package/dist/templates/ansible/playbooks/stop-cybermem.yml +0 -17
  18. package/dist/templates/charts/cybermem/Chart.yaml +0 -6
  19. package/dist/templates/charts/cybermem/templates/dashboard-deployment.yaml +0 -29
  20. package/dist/templates/charts/cybermem/templates/dashboard-service.yaml +0 -20
  21. package/dist/templates/charts/cybermem/templates/openmemory-deployment.yaml +0 -40
  22. package/dist/templates/charts/cybermem/templates/openmemory-pvc.yaml +0 -10
  23. package/dist/templates/charts/cybermem/templates/openmemory-service.yaml +0 -13
  24. package/dist/templates/charts/cybermem/values-vps.yaml +0 -18
  25. package/dist/templates/charts/cybermem/values.yaml +0 -42
  26. package/dist/templates/docker-compose.yml +0 -236
  27. package/dist/templates/envs/local.example +0 -27
  28. package/dist/templates/envs/rpi.example +0 -27
  29. package/dist/templates/envs/vps.example +0 -25
  30. package/dist/templates/mcp-responder/Dockerfile +0 -6
  31. package/dist/templates/mcp-responder/server.js +0 -22
  32. package/dist/templates/monitoring/db_exporter/Dockerfile +0 -19
  33. package/dist/templates/monitoring/db_exporter/exporter.py +0 -313
  34. package/dist/templates/monitoring/db_exporter/requirements.txt +0 -2
  35. package/dist/templates/monitoring/grafana/dashboards/cybermem.json +0 -1088
  36. package/dist/templates/monitoring/grafana/provisioning/dashboards/default.yml +0 -12
  37. package/dist/templates/monitoring/grafana/provisioning/datasources/prometheus.yml +0 -9
  38. package/dist/templates/monitoring/log_exporter/Dockerfile +0 -13
  39. package/dist/templates/monitoring/log_exporter/exporter.py +0 -274
  40. package/dist/templates/monitoring/log_exporter/requirements.txt +0 -1
  41. package/dist/templates/monitoring/postgres_exporter/queries.yml +0 -22
  42. package/dist/templates/monitoring/prometheus/prometheus.yml +0 -22
  43. package/dist/templates/monitoring/traefik/dynamic/.gitkeep +0 -0
  44. package/dist/templates/monitoring/traefik/traefik.yml +0 -32
  45. package/dist/templates/monitoring/vector/vector.toml/vector.yaml +0 -77
  46. package/dist/templates/monitoring/vector/vector.yaml +0 -106
  47. package/dist/templates/openmemory/Dockerfile +0 -19
  48. package/templates/ansible/inventory/hosts.ini +0 -3
  49. package/templates/ansible/playbooks/deploy-cybermem.yml +0 -71
  50. package/templates/ansible/playbooks/stop-cybermem.yml +0 -17
  51. package/templates/charts/cybermem/Chart.yaml +0 -6
  52. package/templates/charts/cybermem/templates/dashboard-deployment.yaml +0 -29
  53. package/templates/charts/cybermem/templates/dashboard-service.yaml +0 -20
  54. package/templates/charts/cybermem/templates/openmemory-deployment.yaml +0 -40
  55. package/templates/charts/cybermem/templates/openmemory-pvc.yaml +0 -10
  56. package/templates/charts/cybermem/templates/openmemory-service.yaml +0 -13
  57. package/templates/charts/cybermem/values-vps.yaml +0 -18
  58. package/templates/charts/cybermem/values.yaml +0 -42
  59. package/templates/docker-compose.yml +0 -236
  60. package/templates/envs/local.example +0 -27
  61. package/templates/envs/rpi.example +0 -27
  62. package/templates/envs/vps.example +0 -25
  63. package/templates/mcp-responder/Dockerfile +0 -6
  64. package/templates/mcp-responder/server.js +0 -22
  65. package/templates/monitoring/db_exporter/Dockerfile +0 -19
  66. package/templates/monitoring/db_exporter/exporter.py +0 -313
  67. package/templates/monitoring/db_exporter/requirements.txt +0 -2
  68. package/templates/monitoring/grafana/dashboards/cybermem.json +0 -1088
  69. package/templates/monitoring/grafana/provisioning/dashboards/default.yml +0 -12
  70. package/templates/monitoring/grafana/provisioning/datasources/prometheus.yml +0 -9
  71. package/templates/monitoring/log_exporter/Dockerfile +0 -13
  72. package/templates/monitoring/log_exporter/exporter.py +0 -274
  73. package/templates/monitoring/log_exporter/requirements.txt +0 -1
  74. package/templates/monitoring/postgres_exporter/queries.yml +0 -22
  75. package/templates/monitoring/prometheus/prometheus.yml +0 -22
  76. package/templates/monitoring/traefik/dynamic/.gitkeep +0 -0
  77. package/templates/monitoring/traefik/traefik.yml +0 -32
  78. package/templates/monitoring/vector/vector.toml/vector.yaml +0 -77
  79. package/templates/monitoring/vector/vector.yaml +0 -106
  80. package/templates/openmemory/Dockerfile +0 -19
@@ -1,236 +0,0 @@
1
- # CyberMem - OpenMemory + DevOps monitoring stack
2
- # For local development only
3
- # Production deployment: use Helm chart (charts/cybermem/)
4
-
5
- services:
6
- traefik:
7
- image: traefik:v3.0
8
- container_name: cybermem-traefik
9
- command:
10
- - --api.dashboard=true
11
- - --api.insecure=true
12
- - --providers.docker=true
13
- - --providers.docker.exposedbydefault=false
14
- - --entrypoints.web.address=:8626
15
- - --accesslog=true
16
- - --accesslog.filepath=/var/log/traefik/access.log
17
- - --accesslog.format=json
18
- ports:
19
- - "8626:8626"
20
- volumes:
21
- - /var/run/docker.sock:/var/run/docker.sock:ro
22
- - ./monitoring/traefik/traefik.yml:/etc/traefik/traefik.yml:ro
23
- - ./monitoring/traefik/dynamic:/etc/traefik/dynamic:ro
24
- - traefik-logs:/var/log/traefik
25
- labels:
26
- - traefik.enable=true
27
- restart: unless-stopped
28
-
29
- # Workaround: responds 200 on GET /mcp for Perplexity validation
30
- mcp-responder:
31
- build:
32
- context: ./mcp-responder
33
- dockerfile: Dockerfile
34
- container_name: cybermem-mcp-responder
35
- labels:
36
- - traefik.enable=true
37
- - traefik.http.routers.mcp-get.entrypoints=web
38
- - traefik.http.routers.mcp-get.rule=Method(`GET`) && Path(`/mcp`)
39
- - traefik.http.routers.mcp-get.priority=200
40
- - traefik.http.services.mcp-get.loadbalancer.server.port=8081
41
- restart: unless-stopped
42
-
43
- openmemory:
44
- build:
45
- context: ./openmemory
46
- dockerfile: Dockerfile
47
- container_name: cybermem-openmemory
48
- ports: [] # Access via Traefik on 8626
49
- volumes:
50
- - openmemory-data:/data
51
- - ${CYBERMEM_ENV_PATH}:/.env
52
- environment:
53
- # Core settings
54
- OM_PORT: "8080"
55
- OM_TIER: "deep"
56
- # API Key is loaded from /.env file
57
-
58
- # Embeddings (local dev uses Ollama)
59
- OM_EMBEDDINGS: ${EMBEDDINGS_PROVIDER:-ollama}
60
- OLLAMA_URL: ${OLLAMA_URL:-http://ollama:11434}
61
- OPENAI_API_KEY: ${OPENAI_API_KEY:-}
62
-
63
- # Database (local dev uses SQLite)
64
- OM_METADATA_BACKEND: ${DB_BACKEND:-sqlite}
65
- OM_DB_PATH: ${DB_PATH:-/data/openmemory.sqlite}
66
- OM_VECTOR_BACKEND: ${VECTOR_BACKEND:-sqlite}
67
-
68
- # PostgreSQL (for production/testing)
69
- OM_PG_HOST: ${PG_HOST:-postgres}
70
- OM_PG_PORT: ${PG_PORT:-5432}
71
- OM_PG_DB: ${PG_DB:-openmemory}
72
- OM_PG_USER: ${PG_USER:-openmemory}
73
- OM_PG_PASSWORD: ${PG_PASSWORD:-}
74
-
75
- # Performance
76
- OM_RATE_LIMIT_ENABLED: "true"
77
- OM_RATE_LIMIT_MAX_REQUESTS: "1000"
78
-
79
- labels:
80
- - traefik.enable=true
81
- - traefik.http.routers.openmemory.entrypoints=web
82
- - traefik.http.routers.openmemory.rule=PathPrefix(`/memory`) || PathPrefix(`/health`) || PathPrefix(`/v1`) || PathPrefix(`/api`) || PathPrefix(`/all`) || PathPrefix(`/add`) || PathPrefix(`/mcp`) || PathPrefix(`/sse`)
83
- - traefik.http.services.openmemory.loadbalancer.server.port=8080
84
- healthcheck:
85
- test:
86
- [
87
- "CMD",
88
- "wget",
89
- "--quiet",
90
- "--tries=1",
91
- "--spider",
92
- "http://localhost:8080/health",
93
- ]
94
- interval: 30s
95
- timeout: 10s
96
- retries: 3
97
- start_period: 40s
98
- restart: unless-stopped
99
- depends_on:
100
- - traefik
101
-
102
- db-exporter:
103
- image: ghcr.io/mikhailkogan17/cybermem-db_exporter:latest
104
- container_name: cybermem-db-exporter
105
- environment:
106
- DB_PATH: /data/openmemory.sqlite
107
- SCRAPE_INTERVAL: "15"
108
- EXPORTER_PORT: "8000"
109
- ports:
110
- - "8000:8000"
111
- volumes:
112
- - openmemory-data:/data:ro
113
- restart: unless-stopped
114
- depends_on:
115
- - openmemory
116
-
117
- log-exporter:
118
- image: ghcr.io/mikhailkogan17/cybermem-log_exporter:latest
119
- container_name: cybermem-log-exporter
120
- environment:
121
- LOG_FILE: /var/log/traefik/access.log
122
- SCRAPE_INTERVAL: "5"
123
- EXPORTER_PORT: "8001"
124
- DB_PATH: /data/openmemory.sqlite
125
- volumes:
126
- - traefik-logs:/var/log/traefik:ro
127
- - openmemory-data:/data
128
- - ./monitoring/log_exporter/exporter.py:/app/exporter.py:ro
129
- restart: unless-stopped
130
- depends_on:
131
- - traefik
132
- - openmemory
133
-
134
- postgres:
135
- image: postgres:15-alpine
136
- container_name: cybermem-postgres
137
- environment:
138
- POSTGRES_DB: ${PG_DB:-openmemory}
139
- POSTGRES_USER: ${PG_USER:-openmemory}
140
- POSTGRES_PASSWORD: ${PG_PASSWORD:-postgres}
141
- ports:
142
- - "5432:5432"
143
- volumes:
144
- - postgres-data:/var/lib/postgresql/data
145
- healthcheck:
146
- test: ["CMD-SHELL", "pg_isready -U ${PG_USER:-openmemory}"]
147
- interval: 10s
148
- timeout: 5s
149
- retries: 5
150
- restart: unless-stopped
151
- profiles:
152
- - postgres
153
-
154
- postgres-exporter:
155
- image: prometheuscommunity/postgres-exporter:v0.15.0
156
- container_name: cybermem-postgres-exporter
157
- environment:
158
- DATA_SOURCE_NAME: postgresql://${PG_USER:-openmemory}:${PG_PASSWORD:-postgres}@postgres:5432/${PG_DB:-openmemory}?sslmode=disable
159
- PG_EXPORTER_EXTEND_QUERY_PATH: /queries.yml
160
- ports:
161
- - "9187:9187"
162
- volumes:
163
- - ./monitoring/postgres_exporter/queries.yml:/queries.yml:ro
164
- restart: unless-stopped
165
- depends_on:
166
- - postgres
167
- profiles:
168
- - postgres
169
-
170
- ollama:
171
- image: ollama/ollama:latest
172
- container_name: cybermem-ollama
173
- ports:
174
- - "11434:11434"
175
- volumes:
176
- - ollama-models:/root/.ollama
177
- restart: unless-stopped
178
- profiles:
179
- - ollama
180
-
181
- prometheus:
182
- image: prom/prometheus:v2.48.0
183
- container_name: cybermem-prometheus
184
- command:
185
- - --config.file=/etc/prometheus/prometheus.yml
186
- - --storage.tsdb.path=/prometheus
187
- - --storage.tsdb.retention.time=${PROM_RETENTION:-7d}
188
- - --web.console.libraries=/usr/share/prometheus/console_libraries
189
- - --web.console.templates=/usr/share/prometheus/consoles
190
- ports:
191
- - "9092:9090"
192
- volumes:
193
- - ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
194
- - prometheus-data:/prometheus
195
- restart: unless-stopped
196
- depends_on:
197
- - db-exporter
198
-
199
- dashboard:
200
- image: ghcr.io/mikhailkogan17/cybermem-dashboard:latest
201
- container_name: cybermem-dashboard
202
- environment:
203
- NEXT_PUBLIC_PROMETHEUS_URL: http://prometheus:9090
204
- PROMETHEUS_URL: http://prometheus:9090
205
- OM_API_KEY: ${OM_API_KEY:-dev-secret-key}
206
- # WATCHPACK_POLLING: "true" # Enable if hot reload blocks (high CPU usage)
207
- ports:
208
- - "3000:3000"
209
- volumes:
210
- - openmemory-data:/data
211
- - /var/run/docker.sock:/var/run/docker.sock
212
- - ${CYBERMEM_ENV_PATH}:/app/shared.env
213
- restart: unless-stopped
214
- depends_on:
215
- - prometheus
216
-
217
- volumes:
218
- openmemory-data:
219
- name: cybermem-openmemory-data
220
- driver: local
221
- postgres-data:
222
- name: cybermem-postgres-data
223
- driver: local
224
- ollama-models:
225
- name: cybermem-ollama-models
226
- driver: local
227
- prometheus-data:
228
- name: cybermem-prometheus-data
229
- driver: local
230
- traefik-logs:
231
- name: cybermem-traefik-logs
232
- driver: local
233
-
234
- networks:
235
- default:
236
- name: cybermem-network
@@ -1,27 +0,0 @@
1
- # Example environment configuration
2
- # Copy to .env and customize
3
-
4
- # Embeddings provider: ollama (local) or openai (cloud)
5
- EMBEDDINGS_PROVIDER=ollama
6
- OLLAMA_URL=http://ollama:11434
7
- OPENAI_API_KEY=
8
-
9
- # Database backend: sqlite (local/rpi) or postgres (vps)
10
- DB_BACKEND=sqlite
11
- DB_PATH=/data/openmemory.sqlite
12
- VECTOR_BACKEND=sqlite
13
-
14
- # PostgreSQL settings (only for DB_BACKEND=postgres)
15
- PG_HOST=postgres
16
- PG_PORT=5432
17
- PG_DB=openmemory
18
- PG_USER=openmemory
19
- PG_PASSWORD=change-me
20
-
21
- # OpenMemory API key (Optional for local mode)
22
- # OM_API_KEY=
23
-
24
- # Monitoring
25
- PROM_RETENTION=7d
26
- GRAFANA_USER=admin
27
- GRAFANA_PASSWORD=admin
@@ -1,27 +0,0 @@
1
- # Raspberry Pi environment
2
- # Optimized for low memory (1GB total)
3
- DOCKER_PLATFORM=linux/arm64
4
-
5
-
6
- # Embeddings (use Ollama with small models)
7
- EMBEDDINGS_PROVIDER=ollama
8
- OLLAMA_URL=http://ollama:11434
9
- OPENAI_API_KEY=
10
-
11
- # Database (SQLite for RPi - low memory footprint)
12
- DB_BACKEND=sqlite
13
- DB_PATH=/data/openmemory.sqlite
14
-
15
- # PostgreSQL (not used)
16
- PG_HOST=postgres
17
- PG_DB=openmemory
18
- PG_USER=openmemory
19
- PG_PASSWORD=not-used
20
-
21
- # OpenMemory
22
- OM_API_KEY=key-change-me
23
-
24
- # Monitoring (short retention for disk space)
25
- PROM_RETENTION=3d
26
- GRAFANA_USER=admin
27
- GRAFANA_PASSWORD=admin
@@ -1,25 +0,0 @@
1
- # VPS production environment
2
- # For Hetzner CX22 or similar (2 vCPU, 4GB RAM)
3
-
4
- # Embeddings (use OpenAI for production)
5
- EMBEDDINGS_PROVIDER=openai
6
- OPENAI_API_KEY=sk-change-me-in-production
7
- OLLAMA_URL=
8
-
9
- # Database (PostgreSQL for production)
10
- DB_BACKEND=postgres
11
- VECTOR_BACKEND=postgres
12
-
13
- # PostgreSQL
14
- PG_HOST=postgres
15
- PG_DB=openmemory
16
- PG_USER=openmemory
17
- PG_PASSWORD=change-me-in-production-use-secrets
18
-
19
- # OpenMemory
20
- OM_API_KEY=change-me-in-production-use-secrets
21
-
22
- # Monitoring
23
- PROM_RETENTION=30d
24
- GRAFANA_USER=admin
25
- GRAFANA_PASSWORD=change-me-in-production-use-secrets
@@ -1,6 +0,0 @@
1
- FROM node:20-alpine
2
- WORKDIR /app
3
- COPY server.js .
4
- USER node
5
- EXPOSE 8081
6
- CMD ["node", "server.js"]
@@ -1,22 +0,0 @@
1
- const http = require('http');
2
-
3
- const server = http.createServer((req, res) => {
4
- if (req.method === 'GET' && req.url === '/mcp') {
5
- res.writeHead(200, { 'Content-Type': 'application/json' });
6
- res.end(JSON.stringify({
7
- jsonrpc: '2.0',
8
- result: {
9
- serverInfo: { name: 'openmemory-mcp', version: '1.3.2' },
10
- protocolVersion: '2025-06-18',
11
- capabilities: { tools: {}, resources: {}, logging: {} },
12
- message: 'Use POST /mcp for MCP requests'
13
- },
14
- id: null
15
- }));
16
- } else {
17
- res.writeHead(404);
18
- res.end();
19
- }
20
- });
21
-
22
- server.listen(8081, () => console.log('MCP responder on :8081'));
@@ -1,19 +0,0 @@
1
- FROM python:3.11-alpine
2
-
3
- WORKDIR /app
4
-
5
- # Install dependencies
6
- COPY requirements.txt .
7
- RUN pip install --no-cache-dir -r requirements.txt
8
-
9
- # Copy exporter script
10
- COPY exporter.py .
11
-
12
- # Make script executable
13
- RUN chmod +x exporter.py
14
-
15
- # Expose Prometheus metrics port
16
- EXPOSE 8000
17
-
18
- # Run exporter
19
- CMD ["python", "-u", "exporter.py"]
@@ -1,313 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- CyberMem Database Exporter for Prometheus
4
-
5
- Queries OpenMemory's database and exports per-client metrics to Prometheus.
6
- Replaces the complex Vector + Traefik access logs pipeline with simple DB queries.
7
- """
8
-
9
- import os
10
- import time
11
- import sqlite3
12
- import json
13
- from prometheus_client import Gauge, Info, generate_latest, CONTENT_TYPE_LATEST
14
- from flask import Flask, Response, request, jsonify
15
- import logging
16
- import threading
17
-
18
- # Configuration
19
- DB_PATH = os.getenv("DB_PATH", "/data/openmemory.sqlite")
20
- SCRAPE_INTERVAL = int(os.getenv("SCRAPE_INTERVAL", "15")) # seconds
21
- EXPORTER_PORT = int(os.getenv("EXPORTER_PORT", "8000"))
22
-
23
- # Setup logging
24
- logging.basicConfig(
25
- level=logging.INFO,
26
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
27
- )
28
- logger = logging.getLogger("db_exporter")
29
-
30
- # Prometheus metrics
31
- info = Info('cybermem_exporter', 'CyberMem Database Exporter Info')
32
- info.info({'version': '1.0.0', 'db_path': DB_PATH})
33
-
34
- memories_total = Gauge(
35
- 'openmemory_memories_total',
36
- 'Total number of memories stored',
37
- ['client']
38
- )
39
-
40
- memories_recent_24h = Gauge(
41
- 'openmemory_memories_recent_24h',
42
- 'Memories created in the last 24 hours',
43
- ['client']
44
- )
45
-
46
- memories_recent_1h = Gauge(
47
- 'openmemory_memories_recent_1h',
48
- 'Memories created in the last hour',
49
- ['client']
50
- )
51
-
52
- requests_by_operation = Gauge(
53
- 'openmemory_requests_total',
54
- 'Total requests by client and operation (from cybermem_stats table)',
55
- ['client_name', 'operation']
56
- )
57
-
58
- errors_by_operation = Gauge(
59
- 'openmemory_errors_total',
60
- 'Total errors by client and operation (from cybermem_stats table)',
61
- ['client_name', 'operation']
62
- )
63
-
64
- sectors_count = Gauge(
65
- 'openmemory_sectors_total',
66
- 'Number of unique sectors per client',
67
- ['client']
68
- )
69
-
70
- avg_score = Gauge(
71
- 'openmemory_avg_score',
72
- 'Average score of memories',
73
- ['client']
74
- )
75
-
76
- # Aggregate metrics (not per-client)
77
- total_requests_aggregate = Gauge(
78
- 'openmemory_requests_aggregate_total',
79
- 'Total API requests (aggregate, from stats table)'
80
- )
81
-
82
- total_errors_aggregate = Gauge(
83
- 'openmemory_errors_aggregate_total',
84
- 'Total API errors (aggregate, from stats table)'
85
- )
86
-
87
- success_rate_aggregate = Gauge(
88
- 'openmemory_success_rate_aggregate',
89
- 'API success rate percentage (aggregate)'
90
- )
91
-
92
-
93
- def get_db_connection():
94
- """Get SQLite database connection."""
95
- try:
96
- conn = sqlite3.connect(DB_PATH)
97
- conn.row_factory = sqlite3.Row
98
- return conn
99
- except Exception as e:
100
- logger.error(f"Failed to connect to database: {e}")
101
- raise
102
-
103
-
104
- def collect_metrics():
105
- """Collect all metrics from OpenMemory database."""
106
- try:
107
- db = get_db_connection()
108
- cursor = db.cursor()
109
-
110
- # Metric 1: Total memories per client
111
- cursor.execute('''
112
- SELECT user_id as client, COUNT(*) as count
113
- FROM memories
114
- GROUP BY user_id
115
- ''')
116
- for row in cursor.fetchall():
117
- client = row['client'] or 'anonymous'
118
- memories_total.labels(client=client).set(row['count'])
119
-
120
- logger.debug(f"Collected total memories for {cursor.rowcount} clients")
121
-
122
- # Metric 2: Recent memories (24h)
123
- # Note: created_at is stored as milliseconds since epoch
124
- cursor.execute('''
125
- SELECT user_id as client, COUNT(*) as count
126
- FROM memories
127
- WHERE created_at > ?
128
- GROUP BY user_id
129
- ''', [int((time.time() - 86400) * 1000)])
130
- for row in cursor.fetchall():
131
- client = row['client'] or 'anonymous'
132
- memories_recent_24h.labels(client=client).set(row['count'])
133
-
134
- logger.debug(f"Collected 24h memories for {cursor.rowcount} clients")
135
-
136
- # Metric 3: Recent memories (1h)
137
- cursor.execute('''
138
- SELECT user_id as client, COUNT(*) as count
139
- FROM memories
140
- WHERE created_at > ?
141
- GROUP BY user_id
142
- ''', [int((time.time() - 3600) * 1000)])
143
- for row in cursor.fetchall():
144
- client = row['client'] or 'anonymous'
145
- memories_recent_1h.labels(client=client).set(row['count'])
146
-
147
- logger.debug(f"Collected 1h memories for {cursor.rowcount} clients")
148
-
149
- # Metric 4: Per-client request stats from cybermem_stats table
150
- cursor.execute('''
151
- SELECT client_name, operation, count, errors
152
- FROM cybermem_stats
153
- ''')
154
- for row in cursor.fetchall():
155
- client_name = row['client_name'] or 'unknown'
156
- operation = row['operation']
157
- count = row['count']
158
- errors = row['errors']
159
- requests_by_operation.labels(client_name=client_name, operation=operation).set(count)
160
- errors_by_operation.labels(client_name=client_name, operation=operation).set(errors)
161
-
162
- logger.debug(f"Collected request stats for {cursor.rowcount} client/operation pairs")
163
-
164
- # Metric 5: Aggregate request stats from OpenMemory's stats table
165
- # Note: stats table has no client_id, so these are aggregate only
166
- hour_ago_ms = int((time.time() - 3600) * 1000)
167
-
168
- # Get total requests (sum of qps snapshots)
169
- cursor.execute('''
170
- SELECT SUM(count) as total
171
- FROM stats
172
- WHERE type = 'qps' AND ts > ?
173
- ''', [hour_ago_ms])
174
- total_reqs = cursor.fetchone()['total'] or 0
175
- total_requests_aggregate.set(total_reqs)
176
-
177
- # Get total errors
178
- cursor.execute('''
179
- SELECT COUNT(*) as total
180
- FROM stats
181
- WHERE type = 'error' AND ts > ?
182
- ''', [hour_ago_ms])
183
- total_errs = cursor.fetchone()['total'] or 0
184
- total_errors_aggregate.set(total_errs)
185
-
186
- # Calculate success rate
187
- if total_reqs > 0:
188
- success_rate = ((total_reqs - total_errs) / total_reqs) * 100
189
- success_rate_aggregate.set(max(0.0, success_rate)) # Cap at 0% minimum
190
- else:
191
- success_rate_aggregate.set(100.0) # No requests = 100% success
192
-
193
- logger.debug(f"Collected aggregate stats: {total_reqs} reqs, {total_errs} errs")
194
-
195
- # Metric 5: Number of unique sectors per client
196
- cursor.execute('''
197
- SELECT user_id as client, COUNT(DISTINCT primary_sector) as count
198
- FROM memories
199
- WHERE primary_sector IS NOT NULL
200
- GROUP BY user_id
201
- ''')
202
- for row in cursor.fetchall():
203
- client = row['client'] or 'anonymous'
204
- sectors_count.labels(client=client).set(row['count'])
205
-
206
- logger.debug(f"Collected sectors count for {cursor.rowcount} clients")
207
-
208
- # Metric 6: Average feedback score per client
209
- cursor.execute('''
210
- SELECT user_id as client, AVG(feedback_score) as avg_score
211
- FROM memories
212
- WHERE feedback_score IS NOT NULL
213
- GROUP BY user_id
214
- ''')
215
- for row in cursor.fetchall():
216
- client = row['client'] or 'anonymous'
217
- avg_score.labels(client=client).set(row['avg_score'] or 0)
218
-
219
- logger.debug(f"Collected average scores for {cursor.rowcount} clients")
220
-
221
- db.close()
222
- logger.info("Metrics collection completed successfully")
223
-
224
- except Exception as e:
225
- logger.error(f"Error collecting metrics: {e}", exc_info=True)
226
-
227
-
228
- def get_logs_from_db(start_ms: int, limit: int = 100):
229
- """Get access logs from database"""
230
- try:
231
- db = get_db_connection()
232
- cursor = db.cursor()
233
-
234
- cursor.execute('''
235
- SELECT timestamp, client_name, client_version, method, endpoint, operation, status, is_error
236
- FROM cybermem_access_log
237
- WHERE timestamp >= ?
238
- ORDER BY timestamp DESC
239
- LIMIT ?
240
- ''', [start_ms, limit])
241
-
242
- logs = []
243
- for row in cursor.fetchall():
244
- logs.append({
245
- 'timestamp': row['timestamp'],
246
- 'client_name': row['client_name'],
247
- 'client_version': row['client_version'],
248
- 'method': row['method'],
249
- 'endpoint': row['endpoint'],
250
- 'operation': row['operation'],
251
- 'status': row['status'],
252
- 'is_error': bool(row['is_error'])
253
- })
254
-
255
- db.close()
256
- return logs
257
- except Exception as e:
258
- logger.error(f"Error fetching logs: {e}", exc_info=True)
259
- return []
260
-
261
-
262
- # Create Flask app
263
- app = Flask(__name__)
264
-
265
- @app.route('/metrics')
266
- def metrics():
267
- """Prometheus metrics endpoint"""
268
- return Response(generate_latest(), mimetype=CONTENT_TYPE_LATEST)
269
-
270
- @app.route('/api/logs')
271
- def api_logs():
272
- """Access logs API endpoint"""
273
- try:
274
- start_ms = int(request.args.get('start', 0))
275
- limit = int(request.args.get('limit', 100))
276
-
277
- logs = get_logs_from_db(start_ms, limit)
278
- return jsonify({'logs': logs})
279
- except Exception as e:
280
- logger.error(f"Error in /api/logs: {e}", exc_info=True)
281
- return jsonify({'error': str(e)}), 500
282
-
283
- def metrics_collection_loop():
284
- """Background thread for collecting metrics"""
285
- logger.info("Starting metrics collection loop")
286
- while True:
287
- try:
288
- collect_metrics()
289
- time.sleep(SCRAPE_INTERVAL)
290
- except Exception as e:
291
- logger.error(f"Error in metrics collection: {e}", exc_info=True)
292
- time.sleep(SCRAPE_INTERVAL)
293
-
294
- def main():
295
- """Start the exporter and metrics collection loop."""
296
- logger.info(f"Starting CyberMem Database Exporter on port {EXPORTER_PORT}")
297
- logger.info(f"Database path: {DB_PATH}")
298
- logger.info(f"Scrape interval: {SCRAPE_INTERVAL}s")
299
-
300
- # Start metrics collection in background thread
301
- metrics_thread = threading.Thread(target=metrics_collection_loop, daemon=True)
302
- metrics_thread.start()
303
-
304
- # Start Flask HTTP server
305
- logger.info(f"Starting HTTP server on http://0.0.0.0:{EXPORTER_PORT}")
306
- logger.info(f" Metrics: http://0.0.0.0:{EXPORTER_PORT}/metrics")
307
- logger.info(f" Logs API: http://0.0.0.0:{EXPORTER_PORT}/api/logs")
308
-
309
- app.run(host='0.0.0.0', port=EXPORTER_PORT, threaded=True)
310
-
311
-
312
- if __name__ == '__main__':
313
- main()
@@ -1,2 +0,0 @@
1
- prometheus-client==0.19.0
2
- flask==3.0.0