shared-context-cache-mcp-server 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,8 @@
1
+ __pycache__/
2
+ *.py[cod]
3
+ *.egg-info/
4
+ dist/
5
+ build/
6
+ .venv/
7
+ .env
8
+ *.env
@@ -0,0 +1,91 @@
1
+ Metadata-Version: 2.4
2
+ Name: shared-context-cache-mcp-server
3
+ Version: 0.1.0
4
+ Summary: MCP server for shared context caching — AI agents share computed results to reduce token cost and latency
5
+ Project-URL: Homepage, https://github.com/AiAgentKarl/shared-context-cache-mcp-server
6
+ Project-URL: Repository, https://github.com/AiAgentKarl/shared-context-cache-mcp-server
7
+ Author: AiAgentKarl
8
+ License: MIT
9
+ Keywords: agent-economy,ai-agents,cache,context-sharing,mcp,model-context-protocol,shared-context,token-optimization
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.10
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Programming Language :: Python :: 3.13
18
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
19
+ Classifier: Topic :: Software Development :: Libraries
20
+ Requires-Python: >=3.10
21
+ Requires-Dist: httpx>=0.27.0
22
+ Requires-Dist: mcp>=1.0.0
23
+ Description-Content-Type: text/markdown
24
+
25
+ # shared-context-cache-mcp-server
26
+
27
+ **MCP server for shared context caching** — AI agents share computed results to reduce token cost and latency.
28
+
29
+ [![PyPI](https://img.shields.io/pypi/v/shared-context-cache-mcp-server)](https://pypi.org/project/shared-context-cache-mcp-server/)
30
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
31
+
32
+ ## Why?
33
+
34
+ Every AI agent constantly re-computes the same results: weather lookups, price checks, document summaries, research queries. With this MCP server, agents share their computed results through a common cache.
35
+
36
+ **Network effect:** More agents caching → more cache hits → everyone saves tokens and latency.
37
+
38
+ > Like a CDN, but for agent intelligence.
39
+
40
+ ## Install
41
+
42
+ ```bash
43
+ pip install shared-context-cache-mcp-server
44
+ ```
45
+
46
+ ## Tools
47
+
48
+ | Tool | Description |
49
+ |------|-------------|
50
+ | `cache_lookup` | Look up a cached result by key — check before computing |
51
+ | `cache_search` | Search cache by keywords — find relevant precomputed results |
52
+ | `cache_store` | Store a computed result for other agents to reuse |
53
+ | `cache_stats` | Get cache performance stats (hits, misses, cost savings) |
54
+ | `cache_list` | List available cache entries, optionally filtered by tags |
55
+
56
+ ## Usage Pattern
57
+
58
+ ```
59
+ 1. Before computing: cache_search("weather berlin") → cache_lookup("weather:berlin:today")
60
+ 2. Cache hit? → Use the stored result directly, no API call needed
61
+ 3. Cache miss? → Compute the result, then: cache_store(key, value, tags="weather,berlin")
62
+ 4. Other agents now benefit from your computation
63
+ ```
64
+
65
+ ## Claude Desktop Config
66
+
67
+ ```json
68
+ {
69
+ "mcpServers": {
70
+ "shared-context-cache": {
71
+ "command": "shared-context-cache-mcp-server"
72
+ }
73
+ }
74
+ }
75
+ ```
76
+
77
+ ## Cache Key Conventions
78
+
79
+ Use descriptive, hierarchical keys:
80
+ - `weather:berlin:2026-03-28`
81
+ - `research:arxiv:2501.00001:summary`
82
+ - `price:bitcoin:usd:2026-03-28`
83
+ - `analysis:company:AAPL:q1-2026`
84
+
85
+ ## Backend
86
+
87
+ Powered by [agent-apis.vercel.app/api/cache](https://agent-apis.vercel.app/api/cache) — a shared cache API built for the agent economy.
88
+
89
+ ## License
90
+
91
+ MIT — [AiAgentKarl](https://github.com/AiAgentKarl)
@@ -0,0 +1,67 @@
1
+ # shared-context-cache-mcp-server
2
+
3
+ **MCP server for shared context caching** — AI agents share computed results to reduce token cost and latency.
4
+
5
+ [![PyPI](https://img.shields.io/pypi/v/shared-context-cache-mcp-server)](https://pypi.org/project/shared-context-cache-mcp-server/)
6
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
7
+
8
+ ## Why?
9
+
10
+ Every AI agent constantly re-computes the same results: weather lookups, price checks, document summaries, research queries. With this MCP server, agents share their computed results through a common cache.
11
+
12
+ **Network effect:** More agents caching → more cache hits → everyone saves tokens and latency.
13
+
14
+ > Like a CDN, but for agent intelligence.
15
+
16
+ ## Install
17
+
18
+ ```bash
19
+ pip install shared-context-cache-mcp-server
20
+ ```
21
+
22
+ ## Tools
23
+
24
+ | Tool | Description |
25
+ |------|-------------|
26
+ | `cache_lookup` | Look up a cached result by key — check before computing |
27
+ | `cache_search` | Search cache by keywords — find relevant precomputed results |
28
+ | `cache_store` | Store a computed result for other agents to reuse |
29
+ | `cache_stats` | Get cache performance stats (hits, misses, cost savings) |
30
+ | `cache_list` | List available cache entries, optionally filtered by tags |
31
+
32
+ ## Usage Pattern
33
+
34
+ ```
35
+ 1. Before computing: cache_search("weather berlin") → cache_lookup("weather:berlin:today")
36
+ 2. Cache hit? → Use the stored result directly, no API call needed
37
+ 3. Cache miss? → Compute the result, then: cache_store(key, value, tags="weather,berlin")
38
+ 4. Other agents now benefit from your computation
39
+ ```
40
+
41
+ ## Claude Desktop Config
42
+
43
+ ```json
44
+ {
45
+ "mcpServers": {
46
+ "shared-context-cache": {
47
+ "command": "shared-context-cache-mcp-server"
48
+ }
49
+ }
50
+ }
51
+ ```
52
+
53
+ ## Cache Key Conventions
54
+
55
+ Use descriptive, hierarchical keys:
56
+ - `weather:berlin:2026-03-28`
57
+ - `research:arxiv:2501.00001:summary`
58
+ - `price:bitcoin:usd:2026-03-28`
59
+ - `analysis:company:AAPL:q1-2026`
60
+
61
+ ## Backend
62
+
63
+ Powered by [agent-apis.vercel.app/api/cache](https://agent-apis.vercel.app/api/cache) — a shared cache API built for the agent economy.
64
+
65
+ ## License
66
+
67
+ MIT — [AiAgentKarl](https://github.com/AiAgentKarl)
@@ -0,0 +1,45 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "shared-context-cache-mcp-server"
7
+ version = "0.1.0"
8
+ description = "MCP server for shared context caching — AI agents share computed results to reduce token cost and latency"
9
+ readme = "README.md"
10
+ requires-python = ">=3.10"
11
+ license = { text = "MIT" }
12
+ authors = [{ name = "AiAgentKarl" }]
13
+ keywords = [
14
+ "mcp",
15
+ "model-context-protocol",
16
+ "ai-agents",
17
+ "cache",
18
+ "shared-context",
19
+ "agent-economy",
20
+ "context-sharing",
21
+ "token-optimization",
22
+ ]
23
+ classifiers = [
24
+ "Development Status :: 4 - Beta",
25
+ "Intended Audience :: Developers",
26
+ "License :: OSI Approved :: MIT License",
27
+ "Programming Language :: Python :: 3",
28
+ "Programming Language :: Python :: 3.10",
29
+ "Programming Language :: Python :: 3.11",
30
+ "Programming Language :: Python :: 3.12",
31
+ "Programming Language :: Python :: 3.13",
32
+ "Topic :: Software Development :: Libraries",
33
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
34
+ ]
35
+ dependencies = ["mcp>=1.0.0", "httpx>=0.27.0"]
36
+
37
+ [project.urls]
38
+ Homepage = "https://github.com/AiAgentKarl/shared-context-cache-mcp-server"
39
+ Repository = "https://github.com/AiAgentKarl/shared-context-cache-mcp-server"
40
+
41
+ [project.scripts]
42
+ shared-context-cache-mcp-server = "src.server:main"
43
+
44
+ [tool.hatch.build.targets.wheel]
45
+ packages = ["src"]
File without changes
@@ -0,0 +1,69 @@
1
+ """HTTP-Client fuer den Shared Context Cache (agent-apis.vercel.app/api/cache)."""
2
+
3
+ import httpx
4
+
5
+ CACHE_BASE_URL = "https://agent-apis.vercel.app/api/cache"
6
+
7
+ # Standard-Timeout in Sekunden
8
+ TIMEOUT = 15
9
+
10
+
11
+ async def get_cache_entry(key: str) -> dict:
12
+ """Ruft einen Cache-Eintrag per Key ab."""
13
+ async with httpx.AsyncClient(timeout=TIMEOUT) as client:
14
+ resp = await client.get(CACHE_BASE_URL, params={"action": "get", "key": key})
15
+ resp.raise_for_status()
16
+ return resp.json()
17
+
18
+
19
+ async def search_cache(query: str, limit: int = 10) -> dict:
20
+ """Sucht im Cache nach passenden Eintraegen (Stichwortsuche)."""
21
+ async with httpx.AsyncClient(timeout=TIMEOUT) as client:
22
+ resp = await client.get(
23
+ CACHE_BASE_URL,
24
+ params={"action": "search", "query": query, "limit": limit},
25
+ )
26
+ resp.raise_for_status()
27
+ return resp.json()
28
+
29
+
30
+ async def store_cache_entry(
31
+ key: str,
32
+ value: dict | str | list,
33
+ ttl: int = 86400,
34
+ tags: list[str] | None = None,
35
+ agent_id: str = "mcp-agent",
36
+ ) -> dict:
37
+ """Speichert einen neuen Eintrag im Cache."""
38
+ payload: dict = {
39
+ "action": "store",
40
+ "key": key,
41
+ "value": value,
42
+ "ttl": ttl,
43
+ "agent_id": agent_id,
44
+ }
45
+ if tags:
46
+ payload["tags"] = tags
47
+ async with httpx.AsyncClient(timeout=TIMEOUT) as client:
48
+ resp = await client.post(CACHE_BASE_URL, json=payload)
49
+ resp.raise_for_status()
50
+ return resp.json()
51
+
52
+
53
+ async def get_cache_stats() -> dict:
54
+ """Gibt Statistiken ueber den Cache zurueck (Hits, Misses, Top-Queries)."""
55
+ async with httpx.AsyncClient(timeout=TIMEOUT) as client:
56
+ resp = await client.get(CACHE_BASE_URL, params={"action": "stats"})
57
+ resp.raise_for_status()
58
+ return resp.json()
59
+
60
+
61
+ async def list_cache_entries(limit: int = 20, tags: list[str] | None = None) -> dict:
62
+ """Listet alle Cache-Eintraege auf, optional gefiltert nach Tags."""
63
+ params: dict = {"action": "list", "limit": limit}
64
+ if tags:
65
+ params["tags"] = ",".join(tags)
66
+ async with httpx.AsyncClient(timeout=TIMEOUT) as client:
67
+ resp = await client.get(CACHE_BASE_URL, params=params)
68
+ resp.raise_for_status()
69
+ return resp.json()
@@ -0,0 +1,28 @@
1
+ """Shared Context Cache MCP-Server -- Agents teilen berechnete Ergebnisse."""
2
+
3
+ from mcp.server.fastmcp import FastMCP
4
+ from src.tools.cache_tools import register_cache_tools
5
+
6
+ # FastMCP-Server initialisieren
7
+ mcp = FastMCP(
8
+ "Shared Context Cache",
9
+ instructions=(
10
+ "Shared context cache for AI agents. Before computing expensive results "
11
+ "(web searches, analyses, API calls), use cache_search or cache_lookup to "
12
+ "check if another agent has already computed the answer. After computing, "
13
+ "use cache_store to share your result with other agents. "
14
+ "Network effect: more agents caching = more cache hits = everyone saves tokens."
15
+ ),
16
+ )
17
+
18
+ # Cache-Tools registrieren
19
+ register_cache_tools(mcp)
20
+
21
+
22
+ def main() -> None:
23
+ """Einstiegspunkt fuer den MCP-Server (stdio-Transport)."""
24
+ mcp.run()
25
+
26
+
27
+ if __name__ == "__main__":
28
+ main()
@@ -0,0 +1,184 @@
1
+ """MCP-Tools fuer Shared Context Cache -- Agents teilen berechnete Ergebnisse."""
2
+
3
+ import json
4
+ from mcp.server.fastmcp import FastMCP
5
+ from src.clients.cache_client import (
6
+ get_cache_entry,
7
+ search_cache,
8
+ store_cache_entry,
9
+ get_cache_stats,
10
+ list_cache_entries,
11
+ )
12
+
13
+
14
+ def register_cache_tools(mcp: FastMCP) -> None:
15
+ """Registriert alle Cache-Tools am MCP-Server."""
16
+
17
+ @mcp.tool()
18
+ async def cache_lookup(key: str) -> str:
19
+ """Look up a cached result by key. Returns the stored value if found.
20
+
21
+ Use this BEFORE computing expensive results — another agent may have
22
+ already computed and cached the answer, saving tokens and latency.
23
+
24
+ Args:
25
+ key: Cache key (e.g. 'weather:berlin:2026-03-28', 'research:quantum-computing')
26
+ """
27
+ try:
28
+ data = await get_cache_entry(key)
29
+ if data.get("found"):
30
+ entry = data.get("entry", {})
31
+ result = {
32
+ "found": True,
33
+ "key": key,
34
+ "value": entry.get("value"),
35
+ "cached_by": entry.get("agent_id", "unknown"),
36
+ "hits": entry.get("hits", 0),
37
+ "expires_in": entry.get("ttl_remaining", "unknown"),
38
+ "tags": entry.get("tags", []),
39
+ "message": "Cache HIT — result retrieved, no computation needed",
40
+ }
41
+ else:
42
+ result = {
43
+ "found": False,
44
+ "key": key,
45
+ "message": "Cache MISS — compute the result and use cache_store to share it",
46
+ }
47
+ return json.dumps(result, ensure_ascii=False)
48
+ except Exception as e:
49
+ return json.dumps({"error": str(e), "key": key})
50
+
51
+ @mcp.tool()
52
+ async def cache_search(query: str, limit: int = 10) -> str:
53
+ """Search the shared cache by keywords. Find relevant cached results from other agents.
54
+
55
+ Search before computing — if another agent has cached a similar result,
56
+ you can reuse it directly. More agents caching = more cache hits for everyone.
57
+
58
+ Args:
59
+ query: Keywords to search for (e.g. 'weather berlin', 'bitcoin price', 'quantum computing summary')
60
+ limit: Max number of results to return (default: 10, max: 50)
61
+ """
62
+ try:
63
+ data = await search_cache(query, limit)
64
+ entries = data.get("entries", [])
65
+ result = {
66
+ "query": query,
67
+ "total_found": len(entries),
68
+ "entries": [
69
+ {
70
+ "key": e.get("key"),
71
+ "tags": e.get("tags", []),
72
+ "hits": e.get("hits", 0),
73
+ "agent_id": e.get("agent_id"),
74
+ "preview": str(e.get("value", ""))[:200],
75
+ }
76
+ for e in entries
77
+ ],
78
+ "tip": "Use cache_lookup with a specific key to retrieve the full value",
79
+ }
80
+ return json.dumps(result, ensure_ascii=False)
81
+ except Exception as e:
82
+ return json.dumps({"error": str(e), "query": query})
83
+
84
+ @mcp.tool()
85
+ async def cache_store(
86
+ key: str,
87
+ value: str,
88
+ ttl_seconds: int = 86400,
89
+ tags: str = "",
90
+ agent_id: str = "mcp-agent",
91
+ ) -> str:
92
+ """Store a computed result in the shared cache so other agents can reuse it.
93
+
94
+ After computing an expensive result (web search, analysis, API call),
95
+ store it here. Other agents using cache_lookup or cache_search will
96
+ find your result and save their own computation costs.
97
+
98
+ Network effect: More agents caching = more cache hits = everyone benefits.
99
+
100
+ Args:
101
+ key: Unique cache key (e.g. 'weather:berlin:2026-03-28', 'summary:arxiv:2501.00001')
102
+ value: The result to cache (JSON string, text, or any serializable content)
103
+ ttl_seconds: Time-to-live in seconds (default: 86400 = 24h, max: 604800 = 7 days)
104
+ tags: Comma-separated tags for discovery (e.g. 'weather,berlin,temperature')
105
+ agent_id: Your agent identifier for attribution (e.g. 'weather-agent-v2')
106
+ """
107
+ try:
108
+ # Wert parsen falls JSON, sonst als String speichern
109
+ try:
110
+ parsed_value = json.loads(value)
111
+ except (json.JSONDecodeError, TypeError):
112
+ parsed_value = value
113
+
114
+ tag_list = [t.strip() for t in tags.split(",") if t.strip()] if tags else []
115
+ ttl = min(max(ttl_seconds, 60), 604800) # Zwischen 1 Min und 7 Tagen
116
+
117
+ data = await store_cache_entry(
118
+ key=key,
119
+ value=parsed_value,
120
+ ttl=ttl,
121
+ tags=tag_list,
122
+ agent_id=agent_id,
123
+ )
124
+ result = {
125
+ "stored": True,
126
+ "key": key,
127
+ "ttl_seconds": ttl,
128
+ "tags": tag_list,
129
+ "agent_id": agent_id,
130
+ "message": "Result cached successfully — other agents can now reuse this",
131
+ "expires_in": f"{ttl // 3600}h {(ttl % 3600) // 60}m",
132
+ }
133
+ return json.dumps(result, ensure_ascii=False)
134
+ except Exception as e:
135
+ return json.dumps({"error": str(e), "key": key})
136
+
137
+ @mcp.tool()
138
+ async def cache_stats() -> str:
139
+ """Get statistics about the shared cache — hits, misses, top queries, cost savings.
140
+
141
+ Shows overall cache performance and which keys are most frequently accessed.
142
+ Use this to understand the network effect: how much computation has been
143
+ saved across all agents.
144
+ """
145
+ try:
146
+ data = await get_cache_stats()
147
+ return json.dumps(data, ensure_ascii=False)
148
+ except Exception as e:
149
+ return json.dumps({"error": str(e)})
150
+
151
+ @mcp.tool()
152
+ async def cache_list(limit: int = 20, tags: str = "") -> str:
153
+ """List available cache entries, optionally filtered by tags.
154
+
155
+ Browse what other agents have cached. Good for discovering available
156
+ precomputed results before running your own queries.
157
+
158
+ Args:
159
+ limit: Max entries to return (default: 20, max: 100)
160
+ tags: Filter by tags, comma-separated (e.g. 'weather,temperature')
161
+ """
162
+ try:
163
+ tag_list = [t.strip() for t in tags.split(",") if t.strip()] if tags else None
164
+ data = await list_cache_entries(limit=min(limit, 100), tags=tag_list)
165
+ entries = data.get("entries", [])
166
+ result = {
167
+ "total_entries": data.get("total", len(entries)),
168
+ "showing": len(entries),
169
+ "filter_tags": tag_list,
170
+ "entries": [
171
+ {
172
+ "key": e.get("key"),
173
+ "tags": e.get("tags", []),
174
+ "hits": e.get("hits", 0),
175
+ "agent_id": e.get("agent_id"),
176
+ "ttl_remaining": e.get("ttl_remaining"),
177
+ }
178
+ for e in entries
179
+ ],
180
+ "tip": "Use cache_lookup to retrieve the full value for any key",
181
+ }
182
+ return json.dumps(result, ensure_ascii=False)
183
+ except Exception as e:
184
+ return json.dumps({"error": str(e)})