mcp-vector-search 0.12.6__py3-none-any.whl → 1.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. mcp_vector_search/__init__.py +3 -3
  2. mcp_vector_search/analysis/__init__.py +111 -0
  3. mcp_vector_search/analysis/baseline/__init__.py +68 -0
  4. mcp_vector_search/analysis/baseline/comparator.py +462 -0
  5. mcp_vector_search/analysis/baseline/manager.py +621 -0
  6. mcp_vector_search/analysis/collectors/__init__.py +74 -0
  7. mcp_vector_search/analysis/collectors/base.py +164 -0
  8. mcp_vector_search/analysis/collectors/cohesion.py +463 -0
  9. mcp_vector_search/analysis/collectors/complexity.py +743 -0
  10. mcp_vector_search/analysis/collectors/coupling.py +1162 -0
  11. mcp_vector_search/analysis/collectors/halstead.py +514 -0
  12. mcp_vector_search/analysis/collectors/smells.py +325 -0
  13. mcp_vector_search/analysis/debt.py +516 -0
  14. mcp_vector_search/analysis/interpretation.py +685 -0
  15. mcp_vector_search/analysis/metrics.py +414 -0
  16. mcp_vector_search/analysis/reporters/__init__.py +7 -0
  17. mcp_vector_search/analysis/reporters/console.py +646 -0
  18. mcp_vector_search/analysis/reporters/markdown.py +480 -0
  19. mcp_vector_search/analysis/reporters/sarif.py +377 -0
  20. mcp_vector_search/analysis/storage/__init__.py +93 -0
  21. mcp_vector_search/analysis/storage/metrics_store.py +762 -0
  22. mcp_vector_search/analysis/storage/schema.py +245 -0
  23. mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
  24. mcp_vector_search/analysis/trends.py +308 -0
  25. mcp_vector_search/analysis/visualizer/__init__.py +90 -0
  26. mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
  27. mcp_vector_search/analysis/visualizer/exporter.py +484 -0
  28. mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
  29. mcp_vector_search/analysis/visualizer/schemas.py +525 -0
  30. mcp_vector_search/cli/commands/analyze.py +1062 -0
  31. mcp_vector_search/cli/commands/chat.py +1455 -0
  32. mcp_vector_search/cli/commands/index.py +621 -5
  33. mcp_vector_search/cli/commands/index_background.py +467 -0
  34. mcp_vector_search/cli/commands/init.py +13 -0
  35. mcp_vector_search/cli/commands/install.py +597 -335
  36. mcp_vector_search/cli/commands/install_old.py +8 -4
  37. mcp_vector_search/cli/commands/mcp.py +78 -6
  38. mcp_vector_search/cli/commands/reset.py +68 -26
  39. mcp_vector_search/cli/commands/search.py +224 -8
  40. mcp_vector_search/cli/commands/setup.py +1184 -0
  41. mcp_vector_search/cli/commands/status.py +339 -5
  42. mcp_vector_search/cli/commands/uninstall.py +276 -357
  43. mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
  44. mcp_vector_search/cli/commands/visualize/cli.py +292 -0
  45. mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
  46. mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
  47. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +33 -0
  48. mcp_vector_search/cli/commands/visualize/graph_builder.py +647 -0
  49. mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
  50. mcp_vector_search/cli/commands/visualize/server.py +600 -0
  51. mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
  52. mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
  53. mcp_vector_search/cli/commands/visualize/templates/base.py +234 -0
  54. mcp_vector_search/cli/commands/visualize/templates/scripts.py +4542 -0
  55. mcp_vector_search/cli/commands/visualize/templates/styles.py +2522 -0
  56. mcp_vector_search/cli/didyoumean.py +27 -2
  57. mcp_vector_search/cli/main.py +127 -160
  58. mcp_vector_search/cli/output.py +158 -13
  59. mcp_vector_search/config/__init__.py +4 -0
  60. mcp_vector_search/config/default_thresholds.yaml +52 -0
  61. mcp_vector_search/config/settings.py +12 -0
  62. mcp_vector_search/config/thresholds.py +273 -0
  63. mcp_vector_search/core/__init__.py +16 -0
  64. mcp_vector_search/core/auto_indexer.py +3 -3
  65. mcp_vector_search/core/boilerplate.py +186 -0
  66. mcp_vector_search/core/config_utils.py +394 -0
  67. mcp_vector_search/core/database.py +406 -94
  68. mcp_vector_search/core/embeddings.py +24 -0
  69. mcp_vector_search/core/exceptions.py +11 -0
  70. mcp_vector_search/core/git.py +380 -0
  71. mcp_vector_search/core/git_hooks.py +4 -4
  72. mcp_vector_search/core/indexer.py +632 -54
  73. mcp_vector_search/core/llm_client.py +756 -0
  74. mcp_vector_search/core/models.py +91 -1
  75. mcp_vector_search/core/project.py +17 -0
  76. mcp_vector_search/core/relationships.py +473 -0
  77. mcp_vector_search/core/scheduler.py +11 -11
  78. mcp_vector_search/core/search.py +179 -29
  79. mcp_vector_search/mcp/server.py +819 -9
  80. mcp_vector_search/parsers/python.py +285 -5
  81. mcp_vector_search/utils/__init__.py +2 -0
  82. mcp_vector_search/utils/gitignore.py +0 -3
  83. mcp_vector_search/utils/gitignore_updater.py +212 -0
  84. mcp_vector_search/utils/monorepo.py +66 -4
  85. mcp_vector_search/utils/timing.py +10 -6
  86. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +184 -53
  87. mcp_vector_search-1.1.22.dist-info/RECORD +120 -0
  88. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +1 -1
  89. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +1 -0
  90. mcp_vector_search/cli/commands/visualize.py +0 -1467
  91. mcp_vector_search-0.12.6.dist-info/RECORD +0 -68
  92. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,600 @@
1
+ """HTTP server for visualization with streaming JSON support.
2
+
3
+ This module handles running the local HTTP server to serve the
4
+ D3.js visualization interface with chunked transfer for large JSON files.
5
+ Uses orjson for 5-10x faster JSON serialization.
6
+ """
7
+
8
+ import asyncio
9
+ import socket
10
+ import webbrowser
11
+ from collections.abc import AsyncGenerator
12
+ from pathlib import Path
13
+
14
+ import orjson
15
+ import uvicorn
16
+ from fastapi import FastAPI, Response
17
+ from fastapi.responses import FileResponse, StreamingResponse
18
+ from fastapi.staticfiles import StaticFiles
19
+ from rich.console import Console
20
+ from rich.panel import Panel
21
+
22
+ from mcp_vector_search import __version__
23
+
24
+ console = Console()
25
+
26
+
27
+ def find_free_port(start_port: int = 8501, end_port: int = 8599) -> int:
28
+ """Find a free port in the given range.
29
+
30
+ Args:
31
+ start_port: Starting port number to check (default: 8501)
32
+ end_port: Ending port number to check (default: 8599)
33
+
34
+ Returns:
35
+ First available port in the range
36
+
37
+ Raises:
38
+ OSError: If no free ports available in range
39
+ """
40
+ for test_port in range(start_port, end_port + 1):
41
+ try:
42
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
43
+ s.bind(("", test_port))
44
+ return test_port
45
+ except OSError:
46
+ continue
47
+ raise OSError(f"No free ports available in range {start_port}-{end_port}")
48
+
49
+
50
+ def create_app(viz_dir: Path) -> FastAPI:
51
+ """Create FastAPI application for visualization server.
52
+
53
+ Args:
54
+ viz_dir: Directory containing visualization files
55
+
56
+ Returns:
57
+ Configured FastAPI application
58
+
59
+ Design Decision: Streaming JSON with chunked transfer
60
+
61
+ Rationale: Safari's JSON.parse() cannot handle 6.3MB files in memory.
62
+ Selected streaming approach to send JSON in 100KB chunks, avoiding
63
+ browser memory limits and parser crashes.
64
+
65
+ Trade-offs:
66
+ - Memory: Constant memory usage vs. 6.3MB loaded at once
67
+ - Complexity: Requires streaming parser vs. simple JSON.parse()
68
+ - Performance: Slightly slower parsing but prevents crashes
69
+
70
+ Alternatives Considered:
71
+ 1. Compress JSON (gzip): Rejected - still requires full parse after decompression
72
+ 2. Split into multiple files: Rejected - requires graph structure changes
73
+ 3. Binary format (protobuf): Rejected - requires major refactoring
74
+
75
+ Error Handling:
76
+ - File not found: Returns 404 with clear error message
77
+ - Read errors: Logs exception and returns 500
78
+ - Connection interruption: Stream closes gracefully
79
+
80
+ Performance:
81
+ - Time: O(n) single file read pass
82
+ - Space: O(1) constant memory (100KB buffer)
83
+ - Expected: <10s for 6.3MB file on localhost
84
+ """
85
+ app = FastAPI(title="MCP Vector Search Visualization")
86
+
87
+ @app.get("/api/graph-status")
88
+ async def graph_status() -> Response:
89
+ """Get graph data generation status.
90
+
91
+ Returns:
92
+ JSON response with ready flag and file size
93
+ """
94
+ graph_file = viz_dir / "chunk-graph.json"
95
+
96
+ if not graph_file.exists():
97
+ return Response(
98
+ content='{"ready": false, "size": 0}',
99
+ media_type="application/json",
100
+ headers={"Cache-Control": "no-cache"},
101
+ )
102
+
103
+ try:
104
+ size = graph_file.stat().st_size
105
+ # Consider graph ready if file exists and has content (>100 bytes)
106
+ is_ready = size > 100
107
+ return Response(
108
+ content=f'{{"ready": {str(is_ready).lower()}, "size": {size}}}',
109
+ media_type="application/json",
110
+ headers={"Cache-Control": "no-cache"},
111
+ )
112
+ except Exception as e:
113
+ console.print(f"[red]Error checking graph status: {e}[/red]")
114
+ return Response(
115
+ content='{"ready": false, "size": 0}',
116
+ media_type="application/json",
117
+ headers={"Cache-Control": "no-cache"},
118
+ )
119
+
120
+ @app.get("/api/graph")
121
+ async def get_graph_data() -> Response:
122
+ """Get graph data for D3 tree visualization.
123
+
124
+ Returns:
125
+ JSON response with nodes and links
126
+ """
127
+ graph_file = viz_dir / "chunk-graph.json"
128
+
129
+ if not graph_file.exists():
130
+ return Response(
131
+ content='{"error": "Graph data not found", "nodes": [], "links": []}',
132
+ status_code=404,
133
+ media_type="application/json",
134
+ )
135
+
136
+ try:
137
+ with open(graph_file, "rb") as f:
138
+ data = orjson.loads(f.read())
139
+
140
+ # Return nodes and links using orjson for fast serialization
141
+ return Response(
142
+ content=orjson.dumps(
143
+ {"nodes": data.get("nodes", []), "links": data.get("links", [])}
144
+ ),
145
+ media_type="application/json",
146
+ headers={"Cache-Control": "no-cache"},
147
+ )
148
+ except Exception as e:
149
+ console.print(f"[red]Error loading graph data: {e}[/red]")
150
+ return Response(
151
+ content='{"error": "Failed to load graph data", "nodes": [], "links": []}',
152
+ status_code=500,
153
+ media_type="application/json",
154
+ )
155
+
156
+ @app.get("/api/relationships/{chunk_id}")
157
+ async def get_chunk_relationships(chunk_id: str) -> Response:
158
+ """Get all relationships for a chunk (semantic + callers) on-demand.
159
+
160
+ Lazy loads relationships when user expands a node, avoiding expensive
161
+ upfront computation. Results are cached in-memory for the session.
162
+
163
+ Args:
164
+ chunk_id: The chunk ID to find relationships for
165
+
166
+ Returns:
167
+ JSON response with semantic neighbors and callers
168
+ """
169
+ graph_file = viz_dir / "chunk-graph.json"
170
+
171
+ if not graph_file.exists():
172
+ return Response(
173
+ content='{"error": "Graph data not found"}',
174
+ status_code=404,
175
+ media_type="application/json",
176
+ )
177
+
178
+ try:
179
+ import ast
180
+
181
+ with open(graph_file, "rb") as f:
182
+ data = orjson.loads(f.read())
183
+
184
+ # Find the target chunk
185
+ target_node = None
186
+ for node in data.get("nodes", []):
187
+ if node.get("id") == chunk_id:
188
+ target_node = node
189
+ break
190
+
191
+ if not target_node:
192
+ return Response(
193
+ content='{"error": "Chunk not found"}',
194
+ status_code=404,
195
+ media_type="application/json",
196
+ )
197
+
198
+ function_name = target_node.get("function_name") or target_node.get(
199
+ "class_name"
200
+ )
201
+ target_file = target_node.get("file_path", "")
202
+ target_content = target_node.get("content", "")
203
+
204
+ # Compute callers (who calls this function)
205
+ callers = []
206
+
207
+ def extract_calls(code: str) -> set[str]:
208
+ calls = set()
209
+ try:
210
+ tree = ast.parse(code)
211
+ for node in ast.walk(tree):
212
+ if isinstance(node, ast.Call):
213
+ if isinstance(node.func, ast.Name):
214
+ calls.add(node.func.id)
215
+ elif isinstance(node.func, ast.Attribute):
216
+ calls.add(node.func.attr)
217
+ except SyntaxError:
218
+ pass
219
+ return calls
220
+
221
+ if function_name:
222
+ for node in data.get("nodes", []):
223
+ if node.get("type") != "chunk":
224
+ continue
225
+ node_file = node.get("file_path", "")
226
+ if node_file == target_file:
227
+ continue
228
+ content = node.get("content", "")
229
+ if function_name in extract_calls(content):
230
+ caller_name = node.get("function_name") or node.get(
231
+ "class_name"
232
+ )
233
+ if caller_name == "__init__":
234
+ continue
235
+ callers.append(
236
+ {
237
+ "id": node.get("id"),
238
+ "name": caller_name
239
+ or f"chunk_{node.get('start_line', 0)}",
240
+ "file": node_file,
241
+ "type": node.get("chunk_type", "code"),
242
+ }
243
+ )
244
+
245
+ # Compute semantic neighbors (similar code)
246
+ # Simple approach: find chunks with similar function names or content overlap
247
+ semantic = []
248
+ target_words = set(target_content.lower().split())
249
+
250
+ for node in data.get("nodes", []):
251
+ if node.get("type") != "chunk" or node.get("id") == chunk_id:
252
+ continue
253
+ content = node.get("content", "")
254
+ node_words = set(content.lower().split())
255
+ # Jaccard similarity
256
+ if target_words and node_words:
257
+ intersection = len(target_words & node_words)
258
+ union = len(target_words | node_words)
259
+ similarity = intersection / union if union > 0 else 0
260
+ if similarity > 0.3: # 30% threshold
261
+ semantic.append(
262
+ {
263
+ "id": node.get("id"),
264
+ "name": node.get("function_name")
265
+ or node.get("class_name")
266
+ or "chunk",
267
+ "file": node.get("file_path", ""),
268
+ "similarity": round(similarity, 2),
269
+ }
270
+ )
271
+
272
+ # Sort by similarity and limit
273
+ semantic.sort(key=lambda x: x["similarity"], reverse=True)
274
+ semantic = semantic[:10]
275
+
276
+ return Response(
277
+ content=orjson.dumps(
278
+ {
279
+ "chunk_id": chunk_id,
280
+ "callers": callers,
281
+ "caller_count": len(callers),
282
+ "semantic": semantic,
283
+ "semantic_count": len(semantic),
284
+ }
285
+ ),
286
+ media_type="application/json",
287
+ headers={"Cache-Control": "max-age=300"},
288
+ )
289
+ except Exception as e:
290
+ console.print(f"[red]Error computing relationships: {e}[/red]")
291
+ return Response(
292
+ content='{"error": "Failed to compute relationships"}',
293
+ status_code=500,
294
+ media_type="application/json",
295
+ )
296
+
297
+ @app.get("/api/callers/{chunk_id}")
298
+ async def get_chunk_callers(chunk_id: str) -> Response:
299
+ """Get callers for a specific code chunk (lazy loaded on-demand).
300
+
301
+ This computes callers for a single chunk instantly instead of
302
+ pre-computing all relationships (which takes 20+ minutes).
303
+
304
+ Args:
305
+ chunk_id: The chunk ID to find callers for
306
+
307
+ Returns:
308
+ JSON response with callers array
309
+ """
310
+ graph_file = viz_dir / "chunk-graph.json"
311
+
312
+ if not graph_file.exists():
313
+ return Response(
314
+ content='{"error": "Graph data not found", "callers": []}',
315
+ status_code=404,
316
+ media_type="application/json",
317
+ )
318
+
319
+ try:
320
+ import ast
321
+
322
+ with open(graph_file, "rb") as f:
323
+ data = orjson.loads(f.read())
324
+
325
+ # Find the target chunk
326
+ target_node = None
327
+ for node in data.get("nodes", []):
328
+ if node.get("id") == chunk_id:
329
+ target_node = node
330
+ break
331
+
332
+ if not target_node:
333
+ return Response(
334
+ content='{"error": "Chunk not found", "callers": []}',
335
+ status_code=404,
336
+ media_type="application/json",
337
+ )
338
+
339
+ # Get the function/class name from the target
340
+ function_name = target_node.get("function_name") or target_node.get(
341
+ "class_name"
342
+ )
343
+ if not function_name:
344
+ return Response(
345
+ content=orjson.dumps({"callers": [], "function_name": None}),
346
+ media_type="application/json",
347
+ )
348
+
349
+ target_file = target_node.get("file_path", "")
350
+
351
+ # Find callers by scanning other chunks
352
+ callers = []
353
+
354
+ def extract_calls(code: str) -> set[str]:
355
+ """Extract function calls from code using AST."""
356
+ calls = set()
357
+ try:
358
+ tree = ast.parse(code)
359
+ for node in ast.walk(tree):
360
+ if isinstance(node, ast.Call):
361
+ if isinstance(node.func, ast.Name):
362
+ calls.add(node.func.id)
363
+ elif isinstance(node.func, ast.Attribute):
364
+ calls.add(node.func.attr)
365
+ except SyntaxError:
366
+ pass
367
+ return calls
368
+
369
+ for node in data.get("nodes", []):
370
+ # Skip non-code chunks and same-file chunks
371
+ if node.get("type") != "chunk":
372
+ continue
373
+ node_file = node.get("file_path", "")
374
+ if node_file == target_file:
375
+ continue
376
+
377
+ # Check if this chunk calls our target function
378
+ content = node.get("content", "")
379
+ if function_name in extract_calls(content):
380
+ caller_name = node.get("function_name") or node.get("class_name")
381
+ if caller_name == "__init__":
382
+ continue # Skip noise
383
+
384
+ callers.append(
385
+ {
386
+ "id": node.get("id"),
387
+ "name": caller_name or f"chunk_{node.get('start_line', 0)}",
388
+ "file": node_file,
389
+ "type": node.get("chunk_type", "code"),
390
+ }
391
+ )
392
+
393
+ return Response(
394
+ content=orjson.dumps(
395
+ {
396
+ "callers": callers,
397
+ "function_name": function_name,
398
+ "count": len(callers),
399
+ }
400
+ ),
401
+ media_type="application/json",
402
+ headers={"Cache-Control": "max-age=300"}, # Cache for 5 minutes
403
+ )
404
+ except Exception as e:
405
+ console.print(f"[red]Error computing callers: {e}[/red]")
406
+ return Response(
407
+ content='{"error": "Failed to compute callers", "callers": []}',
408
+ status_code=500,
409
+ media_type="application/json",
410
+ )
411
+
412
+ @app.get("/api/chunks")
413
+ async def get_file_chunks(file_id: str) -> Response:
414
+ """Get code chunks for a specific file.
415
+
416
+ Args:
417
+ file_id: File node ID
418
+
419
+ Returns:
420
+ JSON response with chunks array
421
+ """
422
+ graph_file = viz_dir / "chunk-graph.json"
423
+
424
+ if not graph_file.exists():
425
+ return Response(
426
+ content='{"error": "Graph data not found", "chunks": []}',
427
+ status_code=404,
428
+ media_type="application/json",
429
+ )
430
+
431
+ try:
432
+ with open(graph_file, "rb") as f:
433
+ data = orjson.loads(f.read())
434
+
435
+ # Find chunks associated with this file
436
+ # Look for nodes that have this file as parent via containment links
437
+ chunks = []
438
+ for node in data.get("nodes", []):
439
+ if node.get("type") == "chunk" and node.get("file_id") == file_id:
440
+ chunks.append(
441
+ {
442
+ "id": node.get("id"),
443
+ "type": node.get("chunk_type", "code"),
444
+ "content": node.get("content", ""),
445
+ "start_line": node.get("start_line"),
446
+ "end_line": node.get("end_line"),
447
+ }
448
+ )
449
+
450
+ return Response(
451
+ content=orjson.dumps({"chunks": chunks}),
452
+ media_type="application/json",
453
+ headers={"Cache-Control": "no-cache"},
454
+ )
455
+ except Exception as e:
456
+ console.print(f"[red]Error loading chunks: {e}[/red]")
457
+ return Response(
458
+ content='{"error": "Failed to load chunks", "chunks": []}',
459
+ status_code=500,
460
+ media_type="application/json",
461
+ )
462
+
463
+ @app.get("/api/graph-data")
464
+ async def stream_graph_data() -> StreamingResponse:
465
+ """Stream chunk-graph.json in 100KB chunks (legacy endpoint).
466
+
467
+ Returns:
468
+ StreamingResponse with chunked transfer encoding
469
+
470
+ Performance:
471
+ - Chunk Size: 100KB (optimal for localhost transfer)
472
+ - Memory: O(1) constant buffer, not O(n) file size
473
+ - Transfer: Progressive, allows incremental parsing
474
+ """
475
+ graph_file = viz_dir / "chunk-graph.json"
476
+
477
+ if not graph_file.exists():
478
+ return Response(
479
+ content='{"error": "Graph data not found"}',
480
+ status_code=404,
481
+ media_type="application/json",
482
+ )
483
+
484
+ async def generate_chunks() -> AsyncGenerator[bytes, None]:
485
+ """Generate 100KB chunks from graph file.
486
+
487
+ Yields:
488
+ Byte chunks of JSON data
489
+ """
490
+ try:
491
+ # Read file in chunks to avoid loading entire file in memory
492
+ chunk_size = 100 * 1024 # 100KB chunks
493
+ with open(graph_file, "rb") as f:
494
+ while chunk := f.read(chunk_size):
495
+ yield chunk
496
+ # Small delay to prevent overwhelming the browser
497
+ await asyncio.sleep(0.01)
498
+ except Exception as e:
499
+ console.print(f"[red]Error streaming graph data: {e}[/red]")
500
+ raise
501
+
502
+ return StreamingResponse(
503
+ generate_chunks(),
504
+ media_type="application/json",
505
+ headers={"Cache-Control": "no-cache", "X-Content-Type-Options": "nosniff"},
506
+ )
507
+
508
+ @app.get("/")
509
+ async def serve_index() -> FileResponse:
510
+ """Serve index.html with no-cache headers to prevent stale content."""
511
+ return FileResponse(
512
+ viz_dir / "index.html",
513
+ headers={
514
+ "Cache-Control": "no-cache, no-store, must-revalidate",
515
+ "Pragma": "no-cache",
516
+ "Expires": "0",
517
+ },
518
+ )
519
+
520
+ # Mount static files AFTER API routes are defined
521
+ # Using /static prefix to avoid conflicts with API routes
522
+ app.mount("/static", StaticFiles(directory=str(viz_dir)), name="static")
523
+
524
+ # Also serve files directly at root level for backward compatibility
525
+ # BUT place this after explicit routes so /api/graph-data works
526
+ @app.get("/{path:path}")
527
+ async def serve_static(path: str) -> FileResponse:
528
+ """Serve static files from visualization directory."""
529
+ file_path = viz_dir / path
530
+ if file_path.exists() and file_path.is_file():
531
+ return FileResponse(file_path)
532
+ # Fallback to index.html for SPA routing
533
+ return FileResponse(
534
+ viz_dir / "index.html",
535
+ headers={
536
+ "Cache-Control": "no-cache, no-store, must-revalidate",
537
+ "Pragma": "no-cache",
538
+ "Expires": "0",
539
+ },
540
+ )
541
+
542
+ return app
543
+
544
+
545
+ def start_visualization_server(
546
+ port: int, viz_dir: Path, auto_open: bool = True
547
+ ) -> None:
548
+ """Start HTTP server for visualization with streaming support.
549
+
550
+ Args:
551
+ port: Port number to use
552
+ viz_dir: Directory containing visualization files
553
+ auto_open: Whether to automatically open browser
554
+
555
+ Raises:
556
+ typer.Exit: If server fails to start
557
+ """
558
+ try:
559
+ app = create_app(viz_dir)
560
+ url = f"http://localhost:{port}"
561
+
562
+ console.print()
563
+ console.print(
564
+ Panel.fit(
565
+ f"[green]✓[/green] Visualization server running\n\n"
566
+ f"URL: [cyan]{url}[/cyan]\n"
567
+ f"Directory: [dim]{viz_dir}[/dim]\n\n"
568
+ f"[dim]Press Ctrl+C to stop[/dim]",
569
+ title=f"Server Started v{__version__}",
570
+ border_style="green",
571
+ )
572
+ )
573
+
574
+ # Open browser
575
+ if auto_open:
576
+ webbrowser.open(url)
577
+
578
+ # Run server
579
+ config = uvicorn.Config(
580
+ app,
581
+ host="127.0.0.1",
582
+ port=port,
583
+ log_level="warning", # Reduce noise
584
+ access_log=False,
585
+ )
586
+ server = uvicorn.Server(config)
587
+ server.run()
588
+
589
+ except KeyboardInterrupt:
590
+ console.print("\n[yellow]Stopping server...[/yellow]")
591
+ except OSError as e:
592
+ if "Address already in use" in str(e):
593
+ console.print(
594
+ f"[red]✗ Port {port} is already in use. Try a different port with --port[/red]"
595
+ )
596
+ else:
597
+ console.print(f"[red]✗ Server error: {e}[/red]")
598
+ import typer
599
+
600
+ raise typer.Exit(1)