@cybermem/mcp 0.5.1 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/README.md +1 -1
  2. package/dist/index.js +187 -194
  3. package/package.json +29 -28
  4. package/requirements.txt +2 -0
  5. package/server.py +347 -0
  6. package/src/index.ts +227 -0
  7. package/test_mcp.py +111 -0
  8. package/tsconfig.json +14 -0
  9. package/dist/commands/deploy.js +0 -230
  10. package/dist/commands/init.js +0 -65
  11. package/dist/templates/ansible/inventory/hosts.ini +0 -3
  12. package/dist/templates/ansible/playbooks/deploy-cybermem.yml +0 -71
  13. package/dist/templates/ansible/playbooks/stop-cybermem.yml +0 -17
  14. package/dist/templates/charts/cybermem/Chart.yaml +0 -6
  15. package/dist/templates/charts/cybermem/templates/dashboard-deployment.yaml +0 -29
  16. package/dist/templates/charts/cybermem/templates/dashboard-service.yaml +0 -20
  17. package/dist/templates/charts/cybermem/templates/openmemory-deployment.yaml +0 -40
  18. package/dist/templates/charts/cybermem/templates/openmemory-pvc.yaml +0 -10
  19. package/dist/templates/charts/cybermem/templates/openmemory-service.yaml +0 -13
  20. package/dist/templates/charts/cybermem/values-vps.yaml +0 -18
  21. package/dist/templates/charts/cybermem/values.yaml +0 -42
  22. package/dist/templates/docker-compose.yml +0 -219
  23. package/dist/templates/envs/local.example +0 -27
  24. package/dist/templates/envs/rpi.example +0 -27
  25. package/dist/templates/envs/vps.example +0 -25
  26. package/dist/templates/monitoring/db_exporter/Dockerfile +0 -19
  27. package/dist/templates/monitoring/db_exporter/exporter.py +0 -313
  28. package/dist/templates/monitoring/db_exporter/requirements.txt +0 -2
  29. package/dist/templates/monitoring/grafana/dashboards/cybermem.json +0 -1088
  30. package/dist/templates/monitoring/grafana/provisioning/dashboards/default.yml +0 -12
  31. package/dist/templates/monitoring/grafana/provisioning/datasources/prometheus.yml +0 -9
  32. package/dist/templates/monitoring/log_exporter/Dockerfile +0 -13
  33. package/dist/templates/monitoring/log_exporter/exporter.py +0 -274
  34. package/dist/templates/monitoring/log_exporter/requirements.txt +0 -1
  35. package/dist/templates/monitoring/postgres_exporter/queries.yml +0 -22
  36. package/dist/templates/monitoring/prometheus/prometheus.yml +0 -22
  37. package/dist/templates/monitoring/traefik/traefik.yml +0 -32
  38. package/dist/templates/monitoring/vector/vector.toml/vector.yaml +0 -77
  39. package/dist/templates/monitoring/vector/vector.yaml +0 -106
  40. package/templates/ansible/inventory/hosts.ini +0 -3
  41. package/templates/ansible/playbooks/deploy-cybermem.yml +0 -71
  42. package/templates/ansible/playbooks/stop-cybermem.yml +0 -17
  43. package/templates/charts/cybermem/Chart.yaml +0 -6
  44. package/templates/charts/cybermem/templates/dashboard-deployment.yaml +0 -29
  45. package/templates/charts/cybermem/templates/dashboard-service.yaml +0 -20
  46. package/templates/charts/cybermem/templates/openmemory-deployment.yaml +0 -40
  47. package/templates/charts/cybermem/templates/openmemory-pvc.yaml +0 -10
  48. package/templates/charts/cybermem/templates/openmemory-service.yaml +0 -13
  49. package/templates/charts/cybermem/values-vps.yaml +0 -18
  50. package/templates/charts/cybermem/values.yaml +0 -42
  51. package/templates/docker-compose.yml +0 -219
  52. package/templates/envs/local.example +0 -27
  53. package/templates/envs/rpi.example +0 -27
  54. package/templates/envs/vps.example +0 -25
  55. package/templates/monitoring/db_exporter/Dockerfile +0 -19
  56. package/templates/monitoring/db_exporter/exporter.py +0 -313
  57. package/templates/monitoring/db_exporter/requirements.txt +0 -2
  58. package/templates/monitoring/grafana/dashboards/cybermem.json +0 -1088
  59. package/templates/monitoring/grafana/provisioning/dashboards/default.yml +0 -12
  60. package/templates/monitoring/grafana/provisioning/datasources/prometheus.yml +0 -9
  61. package/templates/monitoring/log_exporter/Dockerfile +0 -13
  62. package/templates/monitoring/log_exporter/exporter.py +0 -274
  63. package/templates/monitoring/log_exporter/requirements.txt +0 -1
  64. package/templates/monitoring/postgres_exporter/queries.yml +0 -22
  65. package/templates/monitoring/prometheus/prometheus.yml +0 -22
  66. package/templates/monitoring/traefik/traefik.yml +0 -32
  67. package/templates/monitoring/vector/vector.toml/vector.yaml +0 -77
  68. package/templates/monitoring/vector/vector.yaml +0 -106
@@ -1,219 +0,0 @@
1
- # CyberMem - OpenMemory + DevOps monitoring stack
2
- # For local development only
3
- # Production deployment: use Helm chart (charts/cybermem/)
4
-
5
- services:
6
- traefik:
7
- image: traefik:v3.0
8
- container_name: cybermem-traefik
9
- command:
10
- - --api.dashboard=true
11
- - --api.insecure=true
12
- - --providers.docker=true
13
- - --providers.docker.exposedbydefault=false
14
- - --entrypoints.web.address=:8626
15
- - --accesslog=true
16
- - --accesslog.filepath=/var/log/traefik/access.log
17
- - --accesslog.format=json
18
- ports:
19
- - "8626:8626"
20
- volumes:
21
- - /var/run/docker.sock:/var/run/docker.sock:ro
22
- - ./monitoring/traefik/traefik.yml:/etc/traefik/traefik.yml:ro
23
- - traefik-logs:/var/log/traefik
24
- labels:
25
- - traefik.enable=true
26
- restart: unless-stopped
27
-
28
- openmemory:
29
- image: ghcr.io/mikhailkogan17/cybermem-openmemory:latest
30
- container_name: cybermem-openmemory
31
- ports: [] # Access via Traefik on 8626
32
- volumes:
33
- - openmemory-data:/data
34
- - ${CYBERMEM_ENV_PATH}:/.env
35
- environment:
36
- # Core settings
37
- OM_PORT: "8080"
38
- OM_TIER: "deep"
39
- # API Key is loaded from /.env file
40
-
41
- # Embeddings (local dev uses Ollama)
42
- OM_EMBEDDINGS: ${EMBEDDINGS_PROVIDER:-ollama}
43
- OLLAMA_URL: ${OLLAMA_URL:-http://ollama:11434}
44
- OPENAI_API_KEY: ${OPENAI_API_KEY:-}
45
-
46
- # Database (local dev uses SQLite)
47
- OM_METADATA_BACKEND: ${DB_BACKEND:-sqlite}
48
- OM_DB_PATH: ${DB_PATH:-/data/openmemory.sqlite}
49
- OM_VECTOR_BACKEND: ${VECTOR_BACKEND:-sqlite}
50
-
51
- # PostgreSQL (for production/testing)
52
- OM_PG_HOST: ${PG_HOST:-postgres}
53
- OM_PG_PORT: ${PG_PORT:-5432}
54
- OM_PG_DB: ${PG_DB:-openmemory}
55
- OM_PG_USER: ${PG_USER:-openmemory}
56
- OM_PG_PASSWORD: ${PG_PASSWORD:-}
57
-
58
- # Performance
59
- OM_RATE_LIMIT_ENABLED: "true"
60
- OM_RATE_LIMIT_MAX_REQUESTS: "1000"
61
-
62
- labels:
63
- - traefik.enable=true
64
- - traefik.http.routers.openmemory.entrypoints=web
65
- - traefik.http.routers.openmemory.rule=PathPrefix(`/memory`) || PathPrefix(`/health`) || PathPrefix(`/v1`) || PathPrefix(`/api`) || PathPrefix(`/all`) || PathPrefix(`/add`) || PathPrefix(`/mcp`) || PathPrefix(`/sse`)
66
- - traefik.http.services.openmemory.loadbalancer.server.port=8080
67
- healthcheck:
68
- test:
69
- [
70
- "CMD",
71
- "wget",
72
- "--quiet",
73
- "--tries=1",
74
- "--spider",
75
- "http://localhost:8080/health",
76
- ]
77
- interval: 30s
78
- timeout: 10s
79
- retries: 3
80
- start_period: 40s
81
- restart: unless-stopped
82
- depends_on:
83
- - traefik
84
-
85
- db-exporter:
86
- image: ghcr.io/mikhailkogan17/cybermem-db_exporter:latest
87
- container_name: cybermem-db-exporter
88
- environment:
89
- DB_PATH: /data/openmemory.sqlite
90
- SCRAPE_INTERVAL: "15"
91
- EXPORTER_PORT: "8000"
92
- ports:
93
- - "8000:8000"
94
- volumes:
95
- - openmemory-data:/data:ro
96
- restart: unless-stopped
97
- depends_on:
98
- - openmemory
99
-
100
- log-exporter:
101
- image: ghcr.io/mikhailkogan17/cybermem-log_exporter:latest
102
- container_name: cybermem-log-exporter
103
- environment:
104
- LOG_FILE: /var/log/traefik/access.log
105
- SCRAPE_INTERVAL: "5"
106
- EXPORTER_PORT: "8001"
107
- DB_PATH: /data/openmemory.sqlite
108
- volumes:
109
- - traefik-logs:/var/log/traefik:ro
110
- - openmemory-data:/data
111
- - ./monitoring/log_exporter/exporter.py:/app/exporter.py:ro
112
- restart: unless-stopped
113
- depends_on:
114
- - traefik
115
- - openmemory
116
-
117
- postgres:
118
- image: postgres:15-alpine
119
- container_name: cybermem-postgres
120
- environment:
121
- POSTGRES_DB: ${PG_DB:-openmemory}
122
- POSTGRES_USER: ${PG_USER:-openmemory}
123
- POSTGRES_PASSWORD: ${PG_PASSWORD:-postgres}
124
- ports:
125
- - "5432:5432"
126
- volumes:
127
- - postgres-data:/var/lib/postgresql/data
128
- healthcheck:
129
- test: ["CMD-SHELL", "pg_isready -U ${PG_USER:-openmemory}"]
130
- interval: 10s
131
- timeout: 5s
132
- retries: 5
133
- restart: unless-stopped
134
- profiles:
135
- - postgres
136
-
137
- postgres-exporter:
138
- image: prometheuscommunity/postgres-exporter:v0.15.0
139
- container_name: cybermem-postgres-exporter
140
- environment:
141
- DATA_SOURCE_NAME: postgresql://${PG_USER:-openmemory}:${PG_PASSWORD:-postgres}@postgres:5432/${PG_DB:-openmemory}?sslmode=disable
142
- PG_EXPORTER_EXTEND_QUERY_PATH: /queries.yml
143
- ports:
144
- - "9187:9187"
145
- volumes:
146
- - ./monitoring/postgres_exporter/queries.yml:/queries.yml:ro
147
- restart: unless-stopped
148
- depends_on:
149
- - postgres
150
- profiles:
151
- - postgres
152
-
153
- ollama:
154
- image: ollama/ollama:latest
155
- container_name: cybermem-ollama
156
- ports:
157
- - "11434:11434"
158
- volumes:
159
- - ollama-models:/root/.ollama
160
- restart: unless-stopped
161
- profiles:
162
- - ollama
163
-
164
- prometheus:
165
- image: prom/prometheus:v2.48.0
166
- container_name: cybermem-prometheus
167
- command:
168
- - --config.file=/etc/prometheus/prometheus.yml
169
- - --storage.tsdb.path=/prometheus
170
- - --storage.tsdb.retention.time=${PROM_RETENTION:-7d}
171
- - --web.console.libraries=/usr/share/prometheus/console_libraries
172
- - --web.console.templates=/usr/share/prometheus/consoles
173
- ports:
174
- - "9092:9090"
175
- volumes:
176
- - ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
177
- - prometheus-data:/prometheus
178
- restart: unless-stopped
179
- depends_on:
180
- - db-exporter
181
-
182
- dashboard:
183
- image: ghcr.io/mikhailkogan17/cybermem-dashboard:latest
184
- container_name: cybermem-dashboard
185
- environment:
186
- NEXT_PUBLIC_PROMETHEUS_URL: http://prometheus:9090
187
- PROMETHEUS_URL: http://prometheus:9090
188
- OM_API_KEY: ${OM_API_KEY:-dev-secret-key}
189
- # WATCHPACK_POLLING: "true" # Enable if hot reload blocks (high CPU usage)
190
- ports:
191
- - "3000:3000"
192
- volumes:
193
- - openmemory-data:/data
194
- - /var/run/docker.sock:/var/run/docker.sock
195
- - ${CYBERMEM_ENV_PATH}:/app/shared.env
196
- restart: unless-stopped
197
- depends_on:
198
- - prometheus
199
-
200
- volumes:
201
- openmemory-data:
202
- name: cybermem-openmemory-data
203
- driver: local
204
- postgres-data:
205
- name: cybermem-postgres-data
206
- driver: local
207
- ollama-models:
208
- name: cybermem-ollama-models
209
- driver: local
210
- prometheus-data:
211
- name: cybermem-prometheus-data
212
- driver: local
213
- traefik-logs:
214
- name: cybermem-traefik-logs
215
- driver: local
216
-
217
- networks:
218
- default:
219
- name: cybermem-network
@@ -1,27 +0,0 @@
1
- # Example environment configuration
2
- # Copy to .env and customize
3
-
4
- # Embeddings provider: ollama (local) or openai (cloud)
5
- EMBEDDINGS_PROVIDER=ollama
6
- OLLAMA_URL=http://ollama:11434
7
- OPENAI_API_KEY=
8
-
9
- # Database backend: sqlite (local/rpi) or postgres (vps)
10
- DB_BACKEND=sqlite
11
- DB_PATH=/data/openmemory.sqlite
12
- VECTOR_BACKEND=sqlite
13
-
14
- # PostgreSQL settings (only for DB_BACKEND=postgres)
15
- PG_HOST=postgres
16
- PG_PORT=5432
17
- PG_DB=openmemory
18
- PG_USER=openmemory
19
- PG_PASSWORD=change-me
20
-
21
- # OpenMemory API key (Optional for local mode)
22
- # CYBERMEM_API_KEY=
23
-
24
- # Monitoring
25
- PROM_RETENTION=7d
26
- GRAFANA_USER=admin
27
- GRAFANA_PASSWORD=admin
@@ -1,27 +0,0 @@
1
- # Raspberry Pi environment
2
- # Optimized for low memory (1GB total)
3
- DOCKER_PLATFORM=linux/arm64
4
-
5
-
6
- # Embeddings (use Ollama with small models)
7
- EMBEDDINGS_PROVIDER=ollama
8
- OLLAMA_URL=http://ollama:11434
9
- OPENAI_API_KEY=
10
-
11
- # Database (SQLite for RPi - low memory footprint)
12
- DB_BACKEND=sqlite
13
- DB_PATH=/data/openmemory.sqlite
14
-
15
- # PostgreSQL (not used)
16
- PG_HOST=postgres
17
- PG_DB=openmemory
18
- PG_USER=openmemory
19
- PG_PASSWORD=not-used
20
-
21
- # OpenMemory
22
- CYBERMEM_API_KEY=key-change-me
23
-
24
- # Monitoring (short retention for disk space)
25
- PROM_RETENTION=3d
26
- GRAFANA_USER=admin
27
- GRAFANA_PASSWORD=admin
@@ -1,25 +0,0 @@
1
- # VPS production environment
2
- # For Hetzner CX22 or similar (2 vCPU, 4GB RAM)
3
-
4
- # Embeddings (use OpenAI for production)
5
- EMBEDDINGS_PROVIDER=openai
6
- OPENAI_API_KEY=sk-change-me-in-production
7
- OLLAMA_URL=
8
-
9
- # Database (PostgreSQL for production)
10
- DB_BACKEND=postgres
11
- VECTOR_BACKEND=postgres
12
-
13
- # PostgreSQL
14
- PG_HOST=postgres
15
- PG_DB=openmemory
16
- PG_USER=openmemory
17
- PG_PASSWORD=change-me-in-production-use-secrets
18
-
19
- # OpenMemory
20
- CYBERMEM_API_KEY=change-me-in-production-use-secrets
21
-
22
- # Monitoring
23
- PROM_RETENTION=30d
24
- GRAFANA_USER=admin
25
- GRAFANA_PASSWORD=change-me-in-production-use-secrets
@@ -1,19 +0,0 @@
1
- FROM python:3.11-alpine
2
-
3
- WORKDIR /app
4
-
5
- # Install dependencies
6
- COPY requirements.txt .
7
- RUN pip install --no-cache-dir -r requirements.txt
8
-
9
- # Copy exporter script
10
- COPY exporter.py .
11
-
12
- # Make script executable
13
- RUN chmod +x exporter.py
14
-
15
- # Expose Prometheus metrics port
16
- EXPOSE 8000
17
-
18
- # Run exporter
19
- CMD ["python", "-u", "exporter.py"]
@@ -1,313 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- CyberMem Database Exporter for Prometheus
4
-
5
- Queries OpenMemory's database and exports per-client metrics to Prometheus.
6
- Replaces the complex Vector + Traefik access logs pipeline with simple DB queries.
7
- """
8
-
9
- import os
10
- import time
11
- import sqlite3
12
- import json
13
- from prometheus_client import Gauge, Info, generate_latest, CONTENT_TYPE_LATEST
14
- from flask import Flask, Response, request, jsonify
15
- import logging
16
- import threading
17
-
18
- # Configuration
19
- DB_PATH = os.getenv("DB_PATH", "/data/openmemory.sqlite")
20
- SCRAPE_INTERVAL = int(os.getenv("SCRAPE_INTERVAL", "15")) # seconds
21
- EXPORTER_PORT = int(os.getenv("EXPORTER_PORT", "8000"))
22
-
23
- # Setup logging
24
- logging.basicConfig(
25
- level=logging.INFO,
26
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
27
- )
28
- logger = logging.getLogger("db_exporter")
29
-
30
- # Prometheus metrics
31
- info = Info('cybermem_exporter', 'CyberMem Database Exporter Info')
32
- info.info({'version': '1.0.0', 'db_path': DB_PATH})
33
-
34
- memories_total = Gauge(
35
- 'openmemory_memories_total',
36
- 'Total number of memories stored',
37
- ['client']
38
- )
39
-
40
- memories_recent_24h = Gauge(
41
- 'openmemory_memories_recent_24h',
42
- 'Memories created in the last 24 hours',
43
- ['client']
44
- )
45
-
46
- memories_recent_1h = Gauge(
47
- 'openmemory_memories_recent_1h',
48
- 'Memories created in the last hour',
49
- ['client']
50
- )
51
-
52
- requests_by_operation = Gauge(
53
- 'openmemory_requests_total',
54
- 'Total requests by client and operation (from cybermem_stats table)',
55
- ['client_name', 'operation']
56
- )
57
-
58
- errors_by_operation = Gauge(
59
- 'openmemory_errors_total',
60
- 'Total errors by client and operation (from cybermem_stats table)',
61
- ['client_name', 'operation']
62
- )
63
-
64
- sectors_count = Gauge(
65
- 'openmemory_sectors_total',
66
- 'Number of unique sectors per client',
67
- ['client']
68
- )
69
-
70
- avg_score = Gauge(
71
- 'openmemory_avg_score',
72
- 'Average score of memories',
73
- ['client']
74
- )
75
-
76
- # Aggregate metrics (not per-client)
77
- total_requests_aggregate = Gauge(
78
- 'openmemory_requests_aggregate_total',
79
- 'Total API requests (aggregate, from stats table)'
80
- )
81
-
82
- total_errors_aggregate = Gauge(
83
- 'openmemory_errors_aggregate_total',
84
- 'Total API errors (aggregate, from stats table)'
85
- )
86
-
87
- success_rate_aggregate = Gauge(
88
- 'openmemory_success_rate_aggregate',
89
- 'API success rate percentage (aggregate)'
90
- )
91
-
92
-
93
- def get_db_connection():
94
- """Get SQLite database connection."""
95
- try:
96
- conn = sqlite3.connect(DB_PATH)
97
- conn.row_factory = sqlite3.Row
98
- return conn
99
- except Exception as e:
100
- logger.error(f"Failed to connect to database: {e}")
101
- raise
102
-
103
-
104
- def collect_metrics():
105
- """Collect all metrics from OpenMemory database."""
106
- try:
107
- db = get_db_connection()
108
- cursor = db.cursor()
109
-
110
- # Metric 1: Total memories per client
111
- cursor.execute('''
112
- SELECT user_id as client, COUNT(*) as count
113
- FROM memories
114
- GROUP BY user_id
115
- ''')
116
- for row in cursor.fetchall():
117
- client = row['client'] or 'anonymous'
118
- memories_total.labels(client=client).set(row['count'])
119
-
120
- logger.debug(f"Collected total memories for {cursor.rowcount} clients")
121
-
122
- # Metric 2: Recent memories (24h)
123
- # Note: created_at is stored as milliseconds since epoch
124
- cursor.execute('''
125
- SELECT user_id as client, COUNT(*) as count
126
- FROM memories
127
- WHERE created_at > ?
128
- GROUP BY user_id
129
- ''', [int((time.time() - 86400) * 1000)])
130
- for row in cursor.fetchall():
131
- client = row['client'] or 'anonymous'
132
- memories_recent_24h.labels(client=client).set(row['count'])
133
-
134
- logger.debug(f"Collected 24h memories for {cursor.rowcount} clients")
135
-
136
- # Metric 3: Recent memories (1h)
137
- cursor.execute('''
138
- SELECT user_id as client, COUNT(*) as count
139
- FROM memories
140
- WHERE created_at > ?
141
- GROUP BY user_id
142
- ''', [int((time.time() - 3600) * 1000)])
143
- for row in cursor.fetchall():
144
- client = row['client'] or 'anonymous'
145
- memories_recent_1h.labels(client=client).set(row['count'])
146
-
147
- logger.debug(f"Collected 1h memories for {cursor.rowcount} clients")
148
-
149
- # Metric 4: Per-client request stats from cybermem_stats table
150
- cursor.execute('''
151
- SELECT client_name, operation, count, errors
152
- FROM cybermem_stats
153
- ''')
154
- for row in cursor.fetchall():
155
- client_name = row['client_name'] or 'unknown'
156
- operation = row['operation']
157
- count = row['count']
158
- errors = row['errors']
159
- requests_by_operation.labels(client_name=client_name, operation=operation).set(count)
160
- errors_by_operation.labels(client_name=client_name, operation=operation).set(errors)
161
-
162
- logger.debug(f"Collected request stats for {cursor.rowcount} client/operation pairs")
163
-
164
- # Metric 5: Aggregate request stats from OpenMemory's stats table
165
- # Note: stats table has no client_id, so these are aggregate only
166
- hour_ago_ms = int((time.time() - 3600) * 1000)
167
-
168
- # Get total requests (sum of qps snapshots)
169
- cursor.execute('''
170
- SELECT SUM(count) as total
171
- FROM stats
172
- WHERE type = 'qps' AND ts > ?
173
- ''', [hour_ago_ms])
174
- total_reqs = cursor.fetchone()['total'] or 0
175
- total_requests_aggregate.set(total_reqs)
176
-
177
- # Get total errors
178
- cursor.execute('''
179
- SELECT COUNT(*) as total
180
- FROM stats
181
- WHERE type = 'error' AND ts > ?
182
- ''', [hour_ago_ms])
183
- total_errs = cursor.fetchone()['total'] or 0
184
- total_errors_aggregate.set(total_errs)
185
-
186
- # Calculate success rate
187
- if total_reqs > 0:
188
- success_rate = ((total_reqs - total_errs) / total_reqs) * 100
189
- success_rate_aggregate.set(max(0.0, success_rate)) # Cap at 0% minimum
190
- else:
191
- success_rate_aggregate.set(100.0) # No requests = 100% success
192
-
193
- logger.debug(f"Collected aggregate stats: {total_reqs} reqs, {total_errs} errs")
194
-
195
- # Metric 5: Number of unique sectors per client
196
- cursor.execute('''
197
- SELECT user_id as client, COUNT(DISTINCT primary_sector) as count
198
- FROM memories
199
- WHERE primary_sector IS NOT NULL
200
- GROUP BY user_id
201
- ''')
202
- for row in cursor.fetchall():
203
- client = row['client'] or 'anonymous'
204
- sectors_count.labels(client=client).set(row['count'])
205
-
206
- logger.debug(f"Collected sectors count for {cursor.rowcount} clients")
207
-
208
- # Metric 6: Average feedback score per client
209
- cursor.execute('''
210
- SELECT user_id as client, AVG(feedback_score) as avg_score
211
- FROM memories
212
- WHERE feedback_score IS NOT NULL
213
- GROUP BY user_id
214
- ''')
215
- for row in cursor.fetchall():
216
- client = row['client'] or 'anonymous'
217
- avg_score.labels(client=client).set(row['avg_score'] or 0)
218
-
219
- logger.debug(f"Collected average scores for {cursor.rowcount} clients")
220
-
221
- db.close()
222
- logger.info("Metrics collection completed successfully")
223
-
224
- except Exception as e:
225
- logger.error(f"Error collecting metrics: {e}", exc_info=True)
226
-
227
-
228
- def get_logs_from_db(start_ms: int, limit: int = 100):
229
- """Get access logs from database"""
230
- try:
231
- db = get_db_connection()
232
- cursor = db.cursor()
233
-
234
- cursor.execute('''
235
- SELECT timestamp, client_name, client_version, method, endpoint, operation, status, is_error
236
- FROM cybermem_access_log
237
- WHERE timestamp >= ?
238
- ORDER BY timestamp DESC
239
- LIMIT ?
240
- ''', [start_ms, limit])
241
-
242
- logs = []
243
- for row in cursor.fetchall():
244
- logs.append({
245
- 'timestamp': row['timestamp'],
246
- 'client_name': row['client_name'],
247
- 'client_version': row['client_version'],
248
- 'method': row['method'],
249
- 'endpoint': row['endpoint'],
250
- 'operation': row['operation'],
251
- 'status': row['status'],
252
- 'is_error': bool(row['is_error'])
253
- })
254
-
255
- db.close()
256
- return logs
257
- except Exception as e:
258
- logger.error(f"Error fetching logs: {e}", exc_info=True)
259
- return []
260
-
261
-
262
- # Create Flask app
263
- app = Flask(__name__)
264
-
265
- @app.route('/metrics')
266
- def metrics():
267
- """Prometheus metrics endpoint"""
268
- return Response(generate_latest(), mimetype=CONTENT_TYPE_LATEST)
269
-
270
- @app.route('/api/logs')
271
- def api_logs():
272
- """Access logs API endpoint"""
273
- try:
274
- start_ms = int(request.args.get('start', 0))
275
- limit = int(request.args.get('limit', 100))
276
-
277
- logs = get_logs_from_db(start_ms, limit)
278
- return jsonify({'logs': logs})
279
- except Exception as e:
280
- logger.error(f"Error in /api/logs: {e}", exc_info=True)
281
- return jsonify({'error': str(e)}), 500
282
-
283
- def metrics_collection_loop():
284
- """Background thread for collecting metrics"""
285
- logger.info("Starting metrics collection loop")
286
- while True:
287
- try:
288
- collect_metrics()
289
- time.sleep(SCRAPE_INTERVAL)
290
- except Exception as e:
291
- logger.error(f"Error in metrics collection: {e}", exc_info=True)
292
- time.sleep(SCRAPE_INTERVAL)
293
-
294
- def main():
295
- """Start the exporter and metrics collection loop."""
296
- logger.info(f"Starting CyberMem Database Exporter on port {EXPORTER_PORT}")
297
- logger.info(f"Database path: {DB_PATH}")
298
- logger.info(f"Scrape interval: {SCRAPE_INTERVAL}s")
299
-
300
- # Start metrics collection in background thread
301
- metrics_thread = threading.Thread(target=metrics_collection_loop, daemon=True)
302
- metrics_thread.start()
303
-
304
- # Start Flask HTTP server
305
- logger.info(f"Starting HTTP server on http://0.0.0.0:{EXPORTER_PORT}")
306
- logger.info(f" Metrics: http://0.0.0.0:{EXPORTER_PORT}/metrics")
307
- logger.info(f" Logs API: http://0.0.0.0:{EXPORTER_PORT}/api/logs")
308
-
309
- app.run(host='0.0.0.0', port=EXPORTER_PORT, threaded=True)
310
-
311
-
312
- if __name__ == '__main__':
313
- main()
@@ -1,2 +0,0 @@
1
- prometheus-client==0.19.0
2
- flask==3.0.0