coppermind-cmo 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. coppermind_cmo-0.1.0/PKG-INFO +17 -0
  2. coppermind_cmo-0.1.0/pyproject.toml +38 -0
  3. coppermind_cmo-0.1.0/setup.cfg +4 -0
  4. coppermind_cmo-0.1.0/src/coppermind_cmo/__init__.py +3 -0
  5. coppermind_cmo-0.1.0/src/coppermind_cmo/__main__.py +6 -0
  6. coppermind_cmo-0.1.0/src/coppermind_cmo/config.py +82 -0
  7. coppermind_cmo-0.1.0/src/coppermind_cmo/db.py +121 -0
  8. coppermind_cmo-0.1.0/src/coppermind_cmo/embedding_client.py +120 -0
  9. coppermind_cmo-0.1.0/src/coppermind_cmo/embedding_server.py +71 -0
  10. coppermind_cmo-0.1.0/src/coppermind_cmo/errors.py +44 -0
  11. coppermind_cmo-0.1.0/src/coppermind_cmo/llm_client.py +197 -0
  12. coppermind_cmo-0.1.0/src/coppermind_cmo/log.py +58 -0
  13. coppermind_cmo-0.1.0/src/coppermind_cmo/server.py +203 -0
  14. coppermind_cmo-0.1.0/src/coppermind_cmo/supabase-ca.pem +23 -0
  15. coppermind_cmo-0.1.0/src/coppermind_cmo/tools/__init__.py +12 -0
  16. coppermind_cmo-0.1.0/src/coppermind_cmo/tools/brand.py +221 -0
  17. coppermind_cmo-0.1.0/src/coppermind_cmo/tools/intelligence.py +350 -0
  18. coppermind_cmo-0.1.0/src/coppermind_cmo/tools/memories.py +306 -0
  19. coppermind_cmo-0.1.0/src/coppermind_cmo/tools/minds.py +141 -0
  20. coppermind_cmo-0.1.0/src/coppermind_cmo/utils.py +13 -0
  21. coppermind_cmo-0.1.0/src/coppermind_cmo.egg-info/PKG-INFO +17 -0
  22. coppermind_cmo-0.1.0/src/coppermind_cmo.egg-info/SOURCES.txt +39 -0
  23. coppermind_cmo-0.1.0/src/coppermind_cmo.egg-info/dependency_links.txt +1 -0
  24. coppermind_cmo-0.1.0/src/coppermind_cmo.egg-info/requires.txt +15 -0
  25. coppermind_cmo-0.1.0/src/coppermind_cmo.egg-info/top_level.txt +1 -0
  26. coppermind_cmo-0.1.0/tests/test_api_contracts.py +1027 -0
  27. coppermind_cmo-0.1.0/tests/test_brand.py +284 -0
  28. coppermind_cmo-0.1.0/tests/test_config.py +169 -0
  29. coppermind_cmo-0.1.0/tests/test_db.py +327 -0
  30. coppermind_cmo-0.1.0/tests/test_embedding_client.py +203 -0
  31. coppermind_cmo-0.1.0/tests/test_embedding_server.py +130 -0
  32. coppermind_cmo-0.1.0/tests/test_errors.py +106 -0
  33. coppermind_cmo-0.1.0/tests/test_integration.py +364 -0
  34. coppermind_cmo-0.1.0/tests/test_integration_smoke.py +29 -0
  35. coppermind_cmo-0.1.0/tests/test_intelligence.py +467 -0
  36. coppermind_cmo-0.1.0/tests/test_llm_client.py +289 -0
  37. coppermind_cmo-0.1.0/tests/test_log.py +54 -0
  38. coppermind_cmo-0.1.0/tests/test_memories.py +426 -0
  39. coppermind_cmo-0.1.0/tests/test_minds.py +89 -0
  40. coppermind_cmo-0.1.0/tests/test_performance.py +214 -0
  41. coppermind_cmo-0.1.0/tests/test_server.py +285 -0
@@ -0,0 +1,17 @@
1
+ Metadata-Version: 2.4
2
+ Name: coppermind-cmo
3
+ Version: 0.1.0
4
+ Summary: AI memory for fractional CMOs
5
+ Requires-Python: >=3.10
6
+ Requires-Dist: mcp>=1.0.0
7
+ Requires-Dist: psycopg2-binary>=2.9.11
8
+ Requires-Dist: httpx>=0.27.0
9
+ Provides-Extra: self-hosted
10
+ Requires-Dist: flask>=3.0.0; extra == "self-hosted"
11
+ Requires-Dist: sentence-transformers>=3.0.0; extra == "self-hosted"
12
+ Requires-Dist: waitress>=3.0.0; extra == "self-hosted"
13
+ Provides-Extra: llm
14
+ Requires-Dist: anthropic>=0.80.0; extra == "llm"
15
+ Provides-Extra: dev
16
+ Requires-Dist: pytest>=8.0; extra == "dev"
17
+ Requires-Dist: pytest-cov>=5.0; extra == "dev"
@@ -0,0 +1,38 @@
1
+ [build-system]
2
+ requires = ["setuptools>=68.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "coppermind-cmo"
7
+ version = "0.1.0"
8
+ description = "AI memory for fractional CMOs"
9
+ requires-python = ">=3.10"
10
+ dependencies = [
11
+ "mcp>=1.0.0",
12
+ "psycopg2-binary>=2.9.11",
13
+ "httpx>=0.27.0",
14
+ ]
15
+
16
+ [project.optional-dependencies]
17
+ self-hosted = [
18
+ "flask>=3.0.0",
19
+ "sentence-transformers>=3.0.0",
20
+ "waitress>=3.0.0",
21
+ ]
22
+ llm = [
23
+ "anthropic>=0.80.0",
24
+ ]
25
+ dev = [
26
+ "pytest>=8.0",
27
+ "pytest-cov>=5.0",
28
+ ]
29
+
30
+ [tool.pytest.ini_options]
31
+ testpaths = ["tests", "benchmark/tests"]
32
+ pythonpath = ["src", "."]
33
+
34
+ [tool.setuptools.packages.find]
35
+ where = ["src"]
36
+
37
+ [tool.setuptools.package-data]
38
+ coppermind_cmo = ["supabase-ca.pem"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,3 @@
1
+ """Coppermind CMO — AI memory for fractional CMOs."""
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,6 @@
1
+ """Entry point: python -m coppermind_cmo runs the MCP server."""
2
+
3
+ from coppermind_cmo.server import main
4
+
5
+ if __name__ == "__main__":
6
+ main()
@@ -0,0 +1,82 @@
1
+ """Configuration from COPPERMIND_* environment variables.
2
+
3
+ All numeric env vars are validated as integers at parse time (not on first use)
4
+ per SERVER_SPEC.md startup requirements.
5
+
6
+ Provider detection:
7
+ COPPERMIND_API_KEY set → hosted mode (gateway for LLM+embedding)
8
+ COPPERMIND_OLLAMA_HOST set → self-hosted LLM (Ollama)
9
+ COPPERMIND_EMBEDDING_HOST set → self-hosted embeddings (local server)
10
+ Neither → degraded mode (same as Sprint 1 when services are down)
11
+ """
12
+
13
+ import os
14
+
15
+
16
+ def _int_env(name: str, default: int) -> int:
17
+ val = os.environ.get(name)
18
+ if val is None:
19
+ return default
20
+ try:
21
+ return int(val)
22
+ except ValueError:
23
+ raise ValueError(f"{name} must be an integer, got: {val!r}")
24
+
25
+
26
+ class Config:
27
+ """Parsed configuration from environment variables."""
28
+
29
+ def __init__(self):
30
+ # --- Hosted mode ---
31
+ self.api_key: str = os.environ.get("COPPERMIND_API_KEY", "")
32
+ self.gateway_url: str = os.environ.get(
33
+ "COPPERMIND_GATEWAY_URL",
34
+ "https://api.coppermind.app",
35
+ )
36
+
37
+ # --- Self-hosted PostgreSQL ---
38
+ self.pg_host: str = os.environ.get("COPPERMIND_PG_HOST", "localhost")
39
+ self.pg_port: int = _int_env("COPPERMIND_PG_PORT", 5432)
40
+ self.pg_db: str = os.environ.get("COPPERMIND_PG_DB", "coppermind_cmo")
41
+ self.pg_user: str = os.environ.get("COPPERMIND_PG_USER", "coppermind")
42
+ self.pg_password: str = os.environ.get("COPPERMIND_PG_PASSWORD", "")
43
+
44
+ # --- Self-hosted embedding service ---
45
+ self.embedding_host: str = os.environ.get("COPPERMIND_EMBEDDING_HOST", "localhost")
46
+ self.embedding_port: int = _int_env("COPPERMIND_EMBEDDING_PORT", 8400)
47
+
48
+ # --- Self-hosted Ollama ---
49
+ self.ollama_host: str = os.environ.get("COPPERMIND_OLLAMA_HOST", "localhost")
50
+ self.ollama_port: int = _int_env("COPPERMIND_OLLAMA_PORT", 11434)
51
+ self.ollama_model: str = os.environ.get("COPPERMIND_OLLAMA_MODEL", "gemma3:12b")
52
+
53
+ # --- Claude model configuration (hosted mode) ---
54
+ self.claude_fast_model: str = os.environ.get(
55
+ "COPPERMIND_CLAUDE_FAST_MODEL", "claude-haiku-4-5-20251001"
56
+ )
57
+ self.claude_synthesis_model: str = os.environ.get(
58
+ "COPPERMIND_CLAUDE_SYNTHESIS_MODEL", "claude-sonnet-4-20250514"
59
+ )
60
+
61
+ # --- Provider detection ---
62
+ if self.api_key:
63
+ self.llm_provider: str | None = "gateway"
64
+ self.embedding_provider: str | None = "gateway"
65
+ self.embedding_dims: int = 512
66
+ self.max_prompt_chars: int = 40000
67
+ else:
68
+ # Self-hosted: detect individually
69
+ has_ollama = bool(os.environ.get("COPPERMIND_OLLAMA_HOST"))
70
+ has_embedding = bool(os.environ.get("COPPERMIND_EMBEDDING_HOST"))
71
+ self.llm_provider = "ollama" if has_ollama else None
72
+ self.embedding_provider = "local" if has_embedding else None
73
+ self.embedding_dims = 384
74
+ self.max_prompt_chars = 13000
75
+
76
+ @property
77
+ def embedding_url(self) -> str:
78
+ return f"http://{self.embedding_host}:{self.embedding_port}"
79
+
80
+ @property
81
+ def ollama_url(self) -> str:
82
+ return f"http://{self.ollama_host}:{self.ollama_port}"
@@ -0,0 +1,121 @@
1
+ """Database connection pool and query helpers.
2
+
3
+ Adapted from old Coppermind lib/database.py. Simplified for CMO use case:
4
+ - Single config source (COPPERMIND_PG_* env vars via Config)
5
+ - psycopg2 ThreadedConnectionPool
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import os
11
+ import threading
12
+ from contextlib import contextmanager
13
+ from typing import Any
14
+
15
+ import psycopg2
16
+ from psycopg2.pool import PoolError, ThreadedConnectionPool
17
+
18
+ from coppermind_cmo.config import Config
19
+ from coppermind_cmo.log import ToolLogger
20
+
21
+ logger = ToolLogger("db")
22
+
23
+ POOL_MIN = 1
24
+ POOL_MAX = 10
25
+
26
+
27
+ class Database:
28
+ """PostgreSQL connection pool wrapper."""
29
+
30
+ def __init__(self, config: Config):
31
+ self._config = config
32
+ self._pool: ThreadedConnectionPool | None = None
33
+ self._pool_lock = threading.Lock()
34
+
35
+ def _ensure_pool(self):
36
+ if self._pool is not None:
37
+ return
38
+ with self._pool_lock:
39
+ if self._pool is None:
40
+ ssl_kwargs = {}
41
+ if self._config.api_key:
42
+ ca_path = os.path.join(
43
+ os.path.dirname(__file__), "supabase-ca.pem"
44
+ )
45
+ ssl_kwargs = {
46
+ "sslmode": "verify-ca",
47
+ "sslrootcert": ca_path,
48
+ }
49
+ self._pool = ThreadedConnectionPool(
50
+ POOL_MIN, POOL_MAX,
51
+ host=self._config.pg_host,
52
+ port=self._config.pg_port,
53
+ dbname=self._config.pg_db,
54
+ user=self._config.pg_user,
55
+ password=self._config.pg_password,
56
+ connect_timeout=5,
57
+ keepalives=1,
58
+ keepalives_idle=30,
59
+ keepalives_interval=10,
60
+ keepalives_count=5,
61
+ **ssl_kwargs,
62
+ )
63
+
64
+ @contextmanager
65
+ def connection(self):
66
+ """Borrow a connection from the pool. Auto-returned on exit."""
67
+ self._ensure_pool()
68
+ try:
69
+ conn = self._pool.getconn()
70
+ except PoolError:
71
+ logger.warn("Connection pool exhausted")
72
+ raise
73
+ _force_close = False
74
+ try:
75
+ yield conn
76
+ except Exception:
77
+ try:
78
+ conn.rollback()
79
+ except Exception:
80
+ _force_close = True # rollback failed — connection is dead
81
+ raise
82
+ finally:
83
+ if conn.closed or _force_close:
84
+ self._pool.putconn(conn, close=True)
85
+ else:
86
+ self._pool.putconn(conn)
87
+
88
+ def fetch_all(self, sql: str, params: list[Any] | None = None) -> list[tuple]:
89
+ with self.connection() as conn:
90
+ with conn.cursor() as cur:
91
+ cur.execute(sql, params)
92
+ result = cur.fetchall()
93
+ conn.commit()
94
+ return result
95
+
96
+ def fetch_one(self, sql: str, params: list[Any] | None = None) -> tuple | None:
97
+ with self.connection() as conn:
98
+ with conn.cursor() as cur:
99
+ cur.execute(sql, params)
100
+ result = cur.fetchone()
101
+ conn.commit()
102
+ return result
103
+
104
+ def execute(self, sql: str, params: list[Any] | None = None):
105
+ with self.connection() as conn:
106
+ with conn.cursor() as cur:
107
+ cur.execute(sql, params)
108
+ conn.commit()
109
+
110
+ def execute_returning(self, sql: str, params: list[Any] | None = None) -> tuple | None:
111
+ with self.connection() as conn:
112
+ with conn.cursor() as cur:
113
+ cur.execute(sql, params)
114
+ row = cur.fetchone()
115
+ conn.commit()
116
+ return row
117
+
118
+ def close(self):
119
+ if self._pool is not None:
120
+ self._pool.closeall()
121
+ self._pool = None
@@ -0,0 +1,120 @@
1
+ """HTTP client for embedding services.
2
+
3
+ Supports two providers:
4
+ - Gateway (hosted): routes through Coppermind gateway → Voyage AI (512 dims)
5
+ - Local (self-hosted): sentence-transformers service on localhost:8400 (384 dims)
6
+
7
+ Returns None on any failure — callers handle degraded mode.
8
+ """
9
+
10
+ from typing import Optional
11
+
12
+ import httpx
13
+
14
+ from coppermind_cmo.config import Config
15
+ from coppermind_cmo.log import ToolLogger
16
+
17
+ logger = ToolLogger("embedding")
18
+
19
+ # Legacy constant for backward compatibility in tests
20
+ EXPECTED_DIMS = 384
21
+
22
+
23
+ def _get_embedding_local(text: str, base_url: str, expected_dims: int, timeout: float = 10) -> Optional[list[float]]:
24
+ """Get embedding from the local sentence-transformers service."""
25
+ try:
26
+ resp = httpx.post(
27
+ f"{base_url.rstrip('/')}/embed",
28
+ json={"text": text},
29
+ timeout=timeout,
30
+ )
31
+ resp.raise_for_status()
32
+ embedding = resp.json()["embedding"]
33
+
34
+ if not isinstance(embedding, list) or len(embedding) == 0:
35
+ logger.warn("Invalid embedding shape: expected non-empty list")
36
+ return None
37
+ if not all(isinstance(x, (int, float)) for x in embedding):
38
+ logger.warn("Invalid embedding: contains non-numeric elements")
39
+ return None
40
+ if len(embedding) != expected_dims:
41
+ logger.warn(f"Embedding dimension mismatch: got {len(embedding)}, expected {expected_dims}")
42
+ return None
43
+
44
+ return embedding
45
+ except Exception as e:
46
+ logger.warn(f"Embedding request failed: {e}")
47
+ return None
48
+
49
+
50
+ def _get_embedding_gateway(text: str, config: Config, timeout: float = 10) -> Optional[list[float]]:
51
+ """Get embedding from the Coppermind gateway → Voyage AI."""
52
+ try:
53
+ resp = httpx.post(
54
+ f"{config.gateway_url.rstrip('/')}/v1/embed",
55
+ json={"text": text},
56
+ headers={"Authorization": f"Bearer {config.api_key}"},
57
+ timeout=timeout,
58
+ )
59
+ resp.raise_for_status()
60
+ data = resp.json()
61
+ embedding = data["embedding"]
62
+
63
+ if not isinstance(embedding, list) or len(embedding) == 0:
64
+ logger.warn("Invalid gateway embedding shape: expected non-empty list")
65
+ return None
66
+ if not all(isinstance(x, (int, float)) for x in embedding):
67
+ logger.warn("Invalid gateway embedding: contains non-numeric elements")
68
+ return None
69
+ expected = data.get("dimensions", config.embedding_dims)
70
+ if len(embedding) != expected:
71
+ logger.warn(f"Gateway embedding dimension mismatch: got {len(embedding)}, expected {expected}")
72
+ return None
73
+
74
+ return embedding
75
+ except Exception as e:
76
+ logger.warn(f"Gateway embedding request failed: {e}")
77
+ return None
78
+
79
+
80
+ def get_embedding(text: str, config: Config, timeout: float = 10) -> Optional[list[float]]:
81
+ """Get embedding vector for text. Routes to gateway or local based on config."""
82
+ if config.embedding_provider == "gateway":
83
+ return _get_embedding_gateway(text, config, timeout)
84
+ elif config.embedding_provider == "local":
85
+ return _get_embedding_local(text, config.embedding_url, config.embedding_dims, timeout)
86
+ else:
87
+ return None
88
+
89
+
90
+ def check_health(config: Config, timeout: float = 5) -> bool:
91
+ """Check if the embedding service is healthy."""
92
+ if config.embedding_provider == "gateway":
93
+ try:
94
+ resp = httpx.get(
95
+ f"{config.gateway_url.rstrip('/')}/v1/health",
96
+ headers={"Authorization": f"Bearer {config.api_key}"},
97
+ timeout=timeout,
98
+ )
99
+ if resp.status_code != 200:
100
+ return False
101
+ return True
102
+ except Exception:
103
+ return False
104
+ elif config.embedding_provider == "local":
105
+ try:
106
+ resp = httpx.get(f"{config.embedding_url.rstrip('/')}/health", timeout=timeout)
107
+ if resp.status_code != 200:
108
+ return False
109
+ data = resp.json()
110
+ reported_dims = data.get("dimensions")
111
+ if reported_dims is not None and reported_dims != config.embedding_dims:
112
+ logger.warn(
113
+ f"Embedding service dimension mismatch: reports {reported_dims}, expected {config.embedding_dims}"
114
+ )
115
+ return False
116
+ return True
117
+ except Exception:
118
+ return False
119
+ else:
120
+ return False
@@ -0,0 +1,71 @@
1
+ """Standalone embedding HTTP server using sentence-transformers.
2
+
3
+ Run: python -m coppermind_cmo.embedding_server
4
+ Serves on port 8400 by default.
5
+ """
6
+
7
+ import os
8
+ import logging
9
+ import threading
10
+
11
+ from flask import Flask, request, jsonify
12
+
13
+ logger = logging.getLogger(__name__)
14
+ app = Flask(__name__)
15
+
16
+ _model = None
17
+ _model_lock = threading.Lock()
18
+ MODEL_NAME = "all-MiniLM-L6-v2"
19
+ DIMENSIONS = 384
20
+
21
+
22
+ def _get_model():
23
+ global _model
24
+ if _model is not None:
25
+ return _model
26
+ with _model_lock:
27
+ if _model is None:
28
+ from sentence_transformers import SentenceTransformer
29
+ _model = SentenceTransformer(MODEL_NAME)
30
+ logger.info("Loaded embedding model: %s", MODEL_NAME)
31
+ return _model
32
+
33
+
34
+ @app.route("/health")
35
+ def health():
36
+ try:
37
+ _get_model()
38
+ return jsonify({"status": "ok", "model": MODEL_NAME, "dimensions": DIMENSIONS})
39
+ except Exception as e:
40
+ logger.error("Model load failed: %s", e)
41
+ return jsonify({"status": "error", "message": "Embedding model unavailable"}), 503
42
+
43
+
44
+ @app.route("/embed", methods=["POST"])
45
+ def embed():
46
+ data = request.get_json()
47
+ if not data or "text" not in data:
48
+ return jsonify({"error": "Missing 'text' field"}), 400
49
+
50
+ text = data["text"]
51
+ if not isinstance(text, str) or not text.strip():
52
+ return jsonify({"error": "'text' must be a non-empty string"}), 400
53
+ if len(text) > 50000:
54
+ return jsonify({"error": "Text exceeds 50,000 character limit"}), 400
55
+
56
+ model = _get_model()
57
+ embedding = model.encode(text, convert_to_numpy=True).tolist()
58
+ return jsonify({"embedding": embedding})
59
+
60
+
61
+ def main():
62
+ port = int(os.environ.get("COPPERMIND_EMBEDDING_PORT", "8400"))
63
+ logger.info("Starting embedding server on port %d", port)
64
+ _get_model() # Pre-load model
65
+ from waitress import serve
66
+ serve(app, host="127.0.0.1", port=port, threads=4)
67
+
68
+
69
+ if __name__ == "__main__":
70
+ logging.basicConfig(level=logging.INFO)
71
+ main()
@@ -0,0 +1,44 @@
1
+ """Error response helpers per SERVER_SPEC.md error format."""
2
+
3
+ from typing import Any
4
+
5
+
6
+ def tool_error(code: str, message: str) -> dict[str, Any]:
7
+ return {"error": True, "code": code, "message": message}
8
+
9
+
10
+ def NO_ACTIVE_MIND() -> dict[str, Any]:
11
+ return tool_error("NO_ACTIVE_MIND", "No active client. Call switch_client first.")
12
+
13
+
14
+ def MIND_NOT_FOUND() -> dict[str, Any]:
15
+ return tool_error("MIND_NOT_FOUND", "Mind not found. Use list_minds to see available clients.")
16
+
17
+
18
+ def MIND_ARCHIVED(name: str) -> dict[str, Any]:
19
+ return tool_error("MIND_ARCHIVED", f"Mind '{name}' is archived. Unarchive via direct DB access.")
20
+
21
+
22
+ def INVALID_INPUT(message: str) -> dict[str, Any]:
23
+ return tool_error("INVALID_INPUT", message)
24
+
25
+
26
+ def INVALID_MEMORY_TYPE(value: str) -> dict[str, Any]:
27
+ valid = "decision, preference, campaign_outcome, commitment, stakeholder, fact"
28
+ return tool_error("INVALID_MEMORY_TYPE", f"Invalid memory_type '{value}'. Valid types: {valid}")
29
+
30
+
31
+ def INVALID_BRAND_DNA(message: str) -> dict[str, Any]:
32
+ return tool_error("INVALID_BRAND_DNA", message)
33
+
34
+
35
+ def SERVICE_UNAVAILABLE(service: str) -> dict[str, Any]:
36
+ return tool_error("SERVICE_UNAVAILABLE", f"{service} is unreachable.")
37
+
38
+
39
+ def MEMORY_NOT_FOUND(memory_id: str) -> dict[str, Any]:
40
+ return tool_error("MEMORY_NOT_FOUND", f"Memory '{memory_id}' not found or belongs to a different mind.")
41
+
42
+
43
+ def INVALID_QUERY() -> dict[str, Any]:
44
+ return tool_error("INVALID_QUERY", "Query cannot be empty.")