iflow-mcp_hulupeep_ruvscan-mcp 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,282 @@
1
+ """
2
+ SAFLA (Symbolic Analogical Framework for Lateral Analysis) Agent
3
+ Generates outside-the-box reasoning and creative leverage insights
4
+ """
5
+
6
+ from typing import Dict, List, Any, Optional
7
+ import logging
8
+ import json
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ class SAFLAAgent:
13
+ """
14
+ Analogical reasoning agent for generating creative reuse insights
15
+ """
16
+
17
+ def __init__(self, fact_cache=None):
18
+ self.fact_cache = fact_cache
19
+ self.reasoning_domains = [
20
+ "algorithmic",
21
+ "architectural",
22
+ "performance",
23
+ "scalability",
24
+ "integration",
25
+ "domain_transfer"
26
+ ]
27
+
28
+ async def generate_outside_box_reasoning(
29
+ self,
30
+ repo_summary: str,
31
+ query_intent: str,
32
+ repo_capabilities: List[str]
33
+ ) -> Dict[str, Any]:
34
+ """
35
+ Generate outside-the-box reasoning for how a repo could be reused
36
+
37
+ Args:
38
+ repo_summary: Repository summary
39
+ query_intent: User's intent/problem statement
40
+ repo_capabilities: List of repo capabilities
41
+
42
+ Returns:
43
+ Reasoning result with insights
44
+ """
45
+ logger.info(f"Generating SAFLA reasoning for query: {query_intent[:50]}...")
46
+
47
+ # Check FACT cache first
48
+ cache_key = f"safla:{query_intent}:{repo_summary[:100]}"
49
+ if self.fact_cache:
50
+ cached = self.fact_cache.get(cache_key)
51
+ if cached:
52
+ logger.info("SAFLA reasoning retrieved from FACT cache")
53
+ return json.loads(cached['response'])
54
+
55
+ # Generate reasoning (placeholder - would use LLM in production)
56
+ reasoning = await self._analogical_inference(
57
+ repo_summary,
58
+ query_intent,
59
+ repo_capabilities
60
+ )
61
+
62
+ result = {
63
+ "outside_box_reasoning": reasoning['primary_insight'],
64
+ "integration_hint": reasoning['integration_strategy'],
65
+ "analogical_domains": reasoning['domains'],
66
+ "confidence": reasoning['confidence'],
67
+ "reasoning_chain": reasoning['chain']
68
+ }
69
+
70
+ # Store in FACT cache
71
+ if self.fact_cache:
72
+ self.fact_cache.set(
73
+ cache_key,
74
+ json.dumps(result),
75
+ metadata={"type": "safla_reasoning"}
76
+ )
77
+
78
+ return result
79
+
80
+ async def _analogical_inference(
81
+ self,
82
+ repo_summary: str,
83
+ query_intent: str,
84
+ capabilities: List[str]
85
+ ) -> Dict[str, Any]:
86
+ """
87
+ Perform analogical inference using cross-domain mapping
88
+
89
+ Args:
90
+ repo_summary: Repository summary
91
+ query_intent: User intent
92
+ capabilities: Repository capabilities
93
+
94
+ Returns:
95
+ Inference result
96
+ """
97
+ # TODO: Integrate with LLM for actual analogical reasoning
98
+ # This is a placeholder implementation
99
+
100
+ # Extract key concepts from intent
101
+ intent_concepts = self._extract_concepts(query_intent)
102
+
103
+ # Map capabilities to domains
104
+ domain_mappings = self._map_to_domains(capabilities)
105
+
106
+ # Generate creative transfer insights
107
+ insights = self._generate_transfer_insights(
108
+ intent_concepts,
109
+ domain_mappings,
110
+ repo_summary
111
+ )
112
+
113
+ return {
114
+ "primary_insight": insights['primary'],
115
+ "integration_strategy": insights['strategy'],
116
+ "domains": domain_mappings,
117
+ "confidence": insights['confidence'],
118
+ "chain": insights['reasoning_steps']
119
+ }
120
+
121
+ def _extract_concepts(self, text: str) -> List[str]:
122
+ """Extract key concepts from text"""
123
+ # Simplified concept extraction
124
+ keywords = [
125
+ "speed", "performance", "optimize", "scale",
126
+ "context", "memory", "recall", "search",
127
+ "api", "latency", "throughput", "real-time"
128
+ ]
129
+
130
+ concepts = []
131
+ text_lower = text.lower()
132
+
133
+ for keyword in keywords:
134
+ if keyword in text_lower:
135
+ concepts.append(keyword)
136
+
137
+ return concepts
138
+
139
+ def _map_to_domains(self, capabilities: List[str]) -> List[str]:
140
+ """Map capabilities to reasoning domains"""
141
+ domain_map = {
142
+ "solver": ["algorithmic", "performance"],
143
+ "O(log n)": ["algorithmic", "scalability"],
144
+ "context": ["architectural", "integration"],
145
+ "caching": ["performance", "scalability"],
146
+ "MCP": ["integration", "architectural"]
147
+ }
148
+
149
+ domains = set()
150
+ for cap in capabilities:
151
+ cap_lower = cap.lower()
152
+ for key, mapped_domains in domain_map.items():
153
+ if key in cap_lower:
154
+ domains.update(mapped_domains)
155
+
156
+ return list(domains)
157
+
158
+ def _generate_transfer_insights(
159
+ self,
160
+ intent_concepts: List[str],
161
+ domains: List[str],
162
+ repo_summary: str
163
+ ) -> Dict[str, Any]:
164
+ """Generate creative transfer insights"""
165
+
166
+ # Placeholder implementation
167
+ # In production, this would use LLM with carefully crafted prompts
168
+
169
+ primary_insight = (
170
+ "This technology could be repurposed by applying its core algorithmic "
171
+ "approach to your use case, even though the original implementation "
172
+ "was designed for a different domain."
173
+ )
174
+
175
+ if "performance" in intent_concepts and "algorithmic" in domains:
176
+ primary_insight = (
177
+ "The sublinear algorithmic approach could replace linear operations "
178
+ "in your system, achieving exponential speedup through dimensional "
179
+ "reduction and probabilistic guarantees."
180
+ )
181
+
182
+ strategy = "Integrate as MCP tool or standalone service with API bridge"
183
+
184
+ if "MCP" in repo_summary or "integration" in domains:
185
+ strategy = "Direct MCP integration - install via npx and call from agent workflow"
186
+
187
+ return {
188
+ "primary": primary_insight,
189
+ "strategy": strategy,
190
+ "confidence": 0.85,
191
+ "reasoning_steps": [
192
+ "Identified domain overlap",
193
+ "Mapped algorithmic primitives",
194
+ "Generated creative transfer",
195
+ "Validated integration path"
196
+ ]
197
+ }
198
+
199
+ def generate_leverage_card(
200
+ self,
201
+ repo_data: Dict[str, Any],
202
+ query_intent: str,
203
+ similarity_score: float
204
+ ) -> Dict[str, Any]:
205
+ """
206
+ Generate complete leverage card with SAFLA reasoning
207
+
208
+ Args:
209
+ repo_data: Repository data
210
+ query_intent: User intent
211
+ similarity_score: Sublinear similarity score
212
+
213
+ Returns:
214
+ Leverage card dictionary
215
+ """
216
+ # Extract capabilities
217
+ capabilities = repo_data.get('capabilities', [])
218
+ if not capabilities:
219
+ capabilities = self._infer_capabilities(repo_data)
220
+
221
+ # Generate outside-box reasoning
222
+ # This would be async in production with LLM calls
223
+ reasoning_result = {
224
+ "outside_box_reasoning": "Creative reuse insight",
225
+ "integration_hint": "Integration strategy",
226
+ "analogical_domains": ["algorithmic"],
227
+ "confidence": 0.85,
228
+ "reasoning_chain": []
229
+ }
230
+
231
+ card = {
232
+ "repo": repo_data['full_name'],
233
+ "capabilities": capabilities,
234
+ "summary": repo_data.get('description', 'No description'),
235
+ "outside_box_reasoning": reasoning_result['outside_box_reasoning'],
236
+ "integration_hint": reasoning_result['integration_hint'],
237
+ "relevance_score": similarity_score,
238
+ "runtime_complexity": self._infer_complexity(repo_data),
239
+ "cached": True
240
+ }
241
+
242
+ return card
243
+
244
+ def _infer_capabilities(self, repo_data: Dict[str, Any]) -> List[str]:
245
+ """Infer capabilities from repo data"""
246
+ capabilities = []
247
+
248
+ description = (repo_data.get('description', '') + ' ' +
249
+ repo_data.get('readme', '')[:500]).lower()
250
+
251
+ capability_keywords = {
252
+ "solver": ["solve", "solver", "solution"],
253
+ "O(log n)": ["sublinear", "logarithmic", "o(log"],
254
+ "caching": ["cache", "caching", "memoiz"],
255
+ "MCP": ["mcp", "model context protocol"],
256
+ "API": ["api", "rest", "graphql"],
257
+ "ML": ["machine learning", "neural", "model"]
258
+ }
259
+
260
+ for cap, keywords in capability_keywords.items():
261
+ if any(kw in description for kw in keywords):
262
+ capabilities.append(cap)
263
+
264
+ return capabilities or ["general purpose"]
265
+
266
+ def _infer_complexity(self, repo_data: Dict[str, Any]) -> Optional[str]:
267
+ """Infer runtime complexity from repo data"""
268
+ text = (repo_data.get('description', '') + ' ' +
269
+ repo_data.get('readme', '')[:500]).lower()
270
+
271
+ complexity_patterns = [
272
+ ("O(log n)", ["o(log", "sublinear", "logarithmic"]),
273
+ ("O(n)", ["linear time", "o(n)"]),
274
+ ("O(n²)", ["quadratic", "o(n^2)", "o(n2)"]),
275
+ ("O(1)", ["constant time", "o(1)"])
276
+ ]
277
+
278
+ for complexity, patterns in complexity_patterns:
279
+ if any(p in text for p in patterns):
280
+ return complexity
281
+
282
+ return None
mcp/server.py ADDED
@@ -0,0 +1,268 @@
1
+ """
2
+ RuvScan MCP Server
3
+ Main FastAPI orchestrator for sublinear-intelligence scanning
4
+ """
5
+
6
+ from fastapi import FastAPI, HTTPException
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from pydantic import BaseModel, Field
9
+ from typing import Optional, List, Dict, Any
10
+ import logging
11
+ import uvicorn
12
+
13
+ # Configure logging
14
+ logging.basicConfig(
15
+ level=logging.INFO,
16
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
17
+ )
18
+ logger = logging.getLogger(__name__)
19
+
20
+ # Create FastAPI app
21
+ app = FastAPI(
22
+ title="RuvScan MCP Server",
23
+ description="Sublinear-intelligence scanning for GitHub repositories",
24
+ version="0.5.0"
25
+ )
26
+
27
+ # Add CORS middleware
28
+ app.add_middleware(
29
+ CORSMiddleware,
30
+ allow_origins=["*"],
31
+ allow_credentials=True,
32
+ allow_methods=["*"],
33
+ allow_headers=["*"],
34
+ )
35
+
36
+ # Pydantic models
37
+ class ScanRequest(BaseModel):
38
+ """Request to scan GitHub org/user/topic"""
39
+ source_type: str = Field(..., description="Type: 'org', 'user', or 'topic'")
40
+ source_name: str = Field(..., description="Name of org/user or topic keyword")
41
+ limit: Optional[int] = Field(50, description="Max repos to scan")
42
+
43
+ class QueryRequest(BaseModel):
44
+ """Request to query for leverage"""
45
+ intent: str = Field(..., description="User's intent or problem statement")
46
+ max_results: Optional[int] = Field(10, description="Max leverage cards to return")
47
+ min_score: Optional[float] = Field(0.7, description="Minimum relevance score")
48
+
49
+ class CompareRequest(BaseModel):
50
+ """Request to compare two repositories"""
51
+ repo_a: str = Field(..., description="First repo (org/name)")
52
+ repo_b: str = Field(..., description="Second repo (org/name)")
53
+
54
+ class LeverageCard(BaseModel):
55
+ """Schema for leverage card response"""
56
+ repo: str
57
+ capabilities: List[str]
58
+ summary: str
59
+ outside_box_reasoning: str
60
+ integration_hint: str
61
+ relevance_score: float
62
+ runtime_complexity: Optional[str] = None
63
+ cached: bool = False
64
+
65
+ # Health check endpoint
66
+ @app.get("/health")
67
+ async def health_check():
68
+ """Health check endpoint"""
69
+ return {
70
+ "status": "healthy",
71
+ "version": "0.5.0",
72
+ "service": "RuvScan MCP Server"
73
+ }
74
+
75
+ # MCP Endpoints
76
+ @app.post("/scan")
77
+ async def scan_repos(request: ScanRequest):
78
+ """
79
+ Trigger GitHub scanning for org/user/topic
80
+
81
+ This endpoint initiates concurrent Go workers to fetch and analyze repos
82
+ """
83
+ logger.info(f"Scanning {request.source_type}: {request.source_name}")
84
+
85
+ try:
86
+ # TODO: Trigger Go scanning workers
87
+ # TODO: Store results in database
88
+ # TODO: Return scan summary
89
+
90
+ return {
91
+ "status": "initiated",
92
+ "source_type": request.source_type,
93
+ "source_name": request.source_name,
94
+ "estimated_repos": request.limit,
95
+ "message": "Scan initiated - workers processing in background"
96
+ }
97
+ except Exception as e:
98
+ logger.error(f"Scan error: {str(e)}")
99
+ raise HTTPException(status_code=500, detail=str(e))
100
+
101
+ @app.post("/query", response_model=List[LeverageCard])
102
+ async def query_leverage(request: QueryRequest):
103
+ """
104
+ Query for leverage cards based on user intent
105
+
106
+ Uses sublinear similarity and SAFLA reasoning to find relevant repos
107
+ """
108
+ logger.info(f"Querying intent: {request.intent[:100]}...")
109
+
110
+ try:
111
+ # TODO: Generate embedding for intent
112
+ # TODO: Call Rust sublinear engine for similarity
113
+ # TODO: Apply SAFLA reasoning for outside-the-box insights
114
+ # TODO: Return ranked leverage cards
115
+
116
+ # Placeholder response
117
+ return [
118
+ LeverageCard(
119
+ repo="ruvnet/sublinear-time-solver",
120
+ capabilities=["O(log n) solving", "WASM acceleration", "MCP integration"],
121
+ summary="TRUE O(log n) matrix solver with consciousness exploration",
122
+ outside_box_reasoning="Could accelerate context similarity search by replacing vector comparisons with sublinear clustering",
123
+ integration_hint="Use as MCP tool via npx sublinear-time-solver mcp",
124
+ relevance_score=0.92,
125
+ runtime_complexity="O(log n)",
126
+ cached=False
127
+ )
128
+ ]
129
+ except Exception as e:
130
+ logger.error(f"Query error: {str(e)}")
131
+ raise HTTPException(status_code=500, detail=str(e))
132
+
133
+ @app.get("/cards")
134
+ async def get_cards(
135
+ limit: int = 50,
136
+ min_score: float = 0.0,
137
+ cached_only: bool = False
138
+ ):
139
+ """
140
+ List or filter saved leverage cards
141
+ """
142
+ logger.info(f"Fetching cards: limit={limit}, min_score={min_score}")
143
+
144
+ try:
145
+ # TODO: Query database for leverage cards
146
+ # TODO: Apply filters
147
+ # TODO: Return results
148
+
149
+ return {
150
+ "cards": [],
151
+ "total": 0,
152
+ "limit": limit
153
+ }
154
+ except Exception as e:
155
+ logger.error(f"Cards fetch error: {str(e)}")
156
+ raise HTTPException(status_code=500, detail=str(e))
157
+
158
+ @app.post("/compare")
159
+ async def compare_repos(request: CompareRequest):
160
+ """
161
+ Compare two repos using sublinear solver
162
+ """
163
+ logger.info(f"Comparing {request.repo_a} vs {request.repo_b}")
164
+
165
+ try:
166
+ # TODO: Fetch repo embeddings
167
+ # TODO: Call Rust sublinear comparison
168
+ # TODO: Return similarity score and analysis
169
+
170
+ return {
171
+ "repo_a": request.repo_a,
172
+ "repo_b": request.repo_b,
173
+ "similarity_score": 0.0,
174
+ "complexity": "O(log n)",
175
+ "analysis": "Comparison not yet implemented"
176
+ }
177
+ except Exception as e:
178
+ logger.error(f"Compare error: {str(e)}")
179
+ raise HTTPException(status_code=500, detail=str(e))
180
+
181
+ @app.post("/analyze")
182
+ async def analyze_reasoning(repo: str):
183
+ """
184
+ Explain reasoning chain using FACT replay
185
+ """
186
+ logger.info(f"Analyzing reasoning for: {repo}")
187
+
188
+ try:
189
+ # TODO: Fetch FACT cache entry
190
+ # TODO: Replay reasoning trace
191
+ # TODO: Return deterministic reasoning chain
192
+
193
+ return {
194
+ "repo": repo,
195
+ "reasoning_trace": [],
196
+ "cached": False,
197
+ "message": "Analysis not yet implemented"
198
+ }
199
+ except Exception as e:
200
+ logger.error(f"Analyze error: {str(e)}")
201
+ raise HTTPException(status_code=500, detail=str(e))
202
+
203
+ # MCP protocol endpoints
204
+ @app.get("/mcp/tools")
205
+ async def list_mcp_tools():
206
+ """List available MCP tools"""
207
+ return {
208
+ "tools": [
209
+ {
210
+ "name": "scan",
211
+ "description": "Scan GitHub org/user/topic for repos",
212
+ "inputSchema": {
213
+ "type": "object",
214
+ "properties": {
215
+ "source_type": {"type": "string", "enum": ["org", "user", "topic"]},
216
+ "source_name": {"type": "string"},
217
+ "limit": {"type": "integer", "default": 50}
218
+ },
219
+ "required": ["source_type", "source_name"]
220
+ }
221
+ },
222
+ {
223
+ "name": "query",
224
+ "description": "Query for leverage based on intent",
225
+ "inputSchema": {
226
+ "type": "object",
227
+ "properties": {
228
+ "intent": {"type": "string"},
229
+ "max_results": {"type": "integer", "default": 10},
230
+ "min_score": {"type": "number", "default": 0.7}
231
+ },
232
+ "required": ["intent"]
233
+ }
234
+ },
235
+ {
236
+ "name": "compare",
237
+ "description": "Compare two repos using sublinear solver",
238
+ "inputSchema": {
239
+ "type": "object",
240
+ "properties": {
241
+ "repo_a": {"type": "string"},
242
+ "repo_b": {"type": "string"}
243
+ },
244
+ "required": ["repo_a", "repo_b"]
245
+ }
246
+ },
247
+ {
248
+ "name": "analyze",
249
+ "description": "Analyze reasoning chain using FACT replay",
250
+ "inputSchema": {
251
+ "type": "object",
252
+ "properties": {
253
+ "repo": {"type": "string"}
254
+ },
255
+ "required": ["repo"]
256
+ }
257
+ }
258
+ ]
259
+ }
260
+
261
+ if __name__ == "__main__":
262
+ uvicorn.run(
263
+ "server:app",
264
+ host="0.0.0.0",
265
+ port=8000,
266
+ reload=True,
267
+ log_level="info"
268
+ )
@@ -0,0 +1,21 @@
1
+ """Storage layer for RuvScan"""
2
+
3
+ from .db import RuvScanDB
4
+ from .models import (
5
+ Repository,
6
+ LeverageCard,
7
+ FACTCacheEntry,
8
+ ScanJob,
9
+ SublinearComparison,
10
+ ReasoningTrace
11
+ )
12
+
13
+ __all__ = [
14
+ 'RuvScanDB',
15
+ 'Repository',
16
+ 'LeverageCard',
17
+ 'FACTCacheEntry',
18
+ 'ScanJob',
19
+ 'SublinearComparison',
20
+ 'ReasoningTrace'
21
+ ]