mcp-code-indexer 1.2.4__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_code_indexer/__init__.py +1 -1
- mcp_code_indexer/database/database.py +118 -7
- mcp_code_indexer/git_hook_handler.py +542 -0
- mcp_code_indexer/logging_config.py +76 -8
- mcp_code_indexer/main.py +567 -111
- mcp_code_indexer/server/mcp_server.py +9 -0
- {mcp_code_indexer-1.2.4.dist-info → mcp_code_indexer-1.4.1.dist-info}/METADATA +160 -38
- {mcp_code_indexer-1.2.4.dist-info → mcp_code_indexer-1.4.1.dist-info}/RECORD +12 -11
- {mcp_code_indexer-1.2.4.dist-info → mcp_code_indexer-1.4.1.dist-info}/WHEEL +0 -0
- {mcp_code_indexer-1.2.4.dist-info → mcp_code_indexer-1.4.1.dist-info}/entry_points.txt +0 -0
- {mcp_code_indexer-1.2.4.dist-info → mcp_code_indexer-1.4.1.dist-info}/licenses/LICENSE +0 -0
- {mcp_code_indexer-1.2.4.dist-info → mcp_code_indexer-1.4.1.dist-info}/top_level.txt +0 -0
mcp_code_indexer/main.py
CHANGED
@@ -80,11 +80,31 @@ def parse_arguments() -> argparse.Namespace:
|
|
80
80
|
help="Export descriptions for a project. Usage: --dumpdescriptions PROJECT_ID [BRANCH]"
|
81
81
|
)
|
82
82
|
|
83
|
+
parser.add_argument(
|
84
|
+
"--githook",
|
85
|
+
action="store_true",
|
86
|
+
help="Git hook mode: auto-update descriptions based on git diff using OpenRouter API"
|
87
|
+
)
|
88
|
+
|
89
|
+
parser.add_argument(
|
90
|
+
"--cleanup",
|
91
|
+
action="store_true",
|
92
|
+
help="Remove empty projects (no descriptions and no project overview)"
|
93
|
+
)
|
94
|
+
|
95
|
+
parser.add_argument(
|
96
|
+
"--map",
|
97
|
+
type=str,
|
98
|
+
metavar="PROJECT_NAME_OR_ID",
|
99
|
+
help="Generate a markdown project map for the specified project (by name or ID)"
|
100
|
+
)
|
101
|
+
|
83
102
|
return parser.parse_args()
|
84
103
|
|
85
104
|
|
86
105
|
async def handle_getprojects(args: argparse.Namespace) -> None:
|
87
106
|
"""Handle --getprojects command."""
|
107
|
+
db_manager = None
|
88
108
|
try:
|
89
109
|
from .database.database import DatabaseManager
|
90
110
|
|
@@ -126,19 +146,43 @@ async def handle_getprojects(args: argparse.Namespace) -> None:
|
|
126
146
|
except Exception as e:
|
127
147
|
print(f"Error: {e}", file=sys.stderr)
|
128
148
|
sys.exit(1)
|
149
|
+
finally:
|
150
|
+
# Clean up database connections
|
151
|
+
if db_manager:
|
152
|
+
await db_manager.close_pool()
|
129
153
|
|
130
154
|
|
131
155
|
async def handle_runcommand(args: argparse.Namespace) -> None:
|
132
156
|
"""Handle --runcommand command."""
|
133
157
|
from .server.mcp_server import MCPCodeIndexServer
|
158
|
+
from .logging_config import setup_command_logger
|
159
|
+
|
160
|
+
# Set up dedicated logging for runcommand
|
161
|
+
cache_dir = Path(args.cache_dir).expanduser()
|
162
|
+
logger = setup_command_logger("runcommand", cache_dir)
|
163
|
+
|
164
|
+
logger.info("Starting runcommand execution", extra={
|
165
|
+
"structured_data": {
|
166
|
+
"command": args.runcommand,
|
167
|
+
"args": {
|
168
|
+
"token_limit": args.token_limit,
|
169
|
+
"db_path": str(args.db_path),
|
170
|
+
"cache_dir": str(args.cache_dir)
|
171
|
+
}
|
172
|
+
}
|
173
|
+
})
|
134
174
|
|
135
175
|
try:
|
136
176
|
# Parse JSON (handle both single-line and multi-line)
|
177
|
+
logger.debug("Parsing JSON command")
|
137
178
|
json_data = json.loads(args.runcommand)
|
179
|
+
logger.debug("JSON parsed successfully", extra={"structured_data": {"parsed_json": json_data}})
|
138
180
|
except json.JSONDecodeError as e:
|
181
|
+
logger.warning("Initial JSON parse failed", extra={"structured_data": {"error": str(e)}})
|
139
182
|
print(f"Initial JSON parse failed: {e}", file=sys.stderr)
|
140
183
|
|
141
184
|
# Try to repair the JSON
|
185
|
+
logger.debug("Attempting JSON repair")
|
142
186
|
try:
|
143
187
|
import re
|
144
188
|
repaired = args.runcommand
|
@@ -156,10 +200,22 @@ async def handle_runcommand(args: argparse.Namespace) -> None:
|
|
156
200
|
repaired = re.sub(r',(\s*[}\]])', r'\1', repaired)
|
157
201
|
|
158
202
|
json_data = json.loads(repaired)
|
203
|
+
logger.info("JSON repaired successfully", extra={
|
204
|
+
"structured_data": {
|
205
|
+
"original": args.runcommand,
|
206
|
+
"repaired": repaired
|
207
|
+
}
|
208
|
+
})
|
159
209
|
print(f"JSON repaired successfully", file=sys.stderr)
|
160
210
|
print(f"Original: {args.runcommand}", file=sys.stderr)
|
161
211
|
print(f"Repaired: {repaired}", file=sys.stderr)
|
162
212
|
except json.JSONDecodeError as repair_error:
|
213
|
+
logger.error("JSON repair failed", extra={
|
214
|
+
"structured_data": {
|
215
|
+
"repair_error": str(repair_error),
|
216
|
+
"original_json": args.runcommand
|
217
|
+
}
|
218
|
+
})
|
163
219
|
print(f"JSON repair also failed: {repair_error}", file=sys.stderr)
|
164
220
|
print(f"Original JSON: {args.runcommand}", file=sys.stderr)
|
165
221
|
sys.exit(1)
|
@@ -168,93 +224,167 @@ async def handle_runcommand(args: argparse.Namespace) -> None:
|
|
168
224
|
db_path = Path(args.db_path).expanduser()
|
169
225
|
cache_dir = Path(args.cache_dir).expanduser()
|
170
226
|
|
227
|
+
logger.info("Initializing MCP server", extra={
|
228
|
+
"structured_data": {
|
229
|
+
"db_path": str(db_path),
|
230
|
+
"cache_dir": str(cache_dir),
|
231
|
+
"token_limit": args.token_limit
|
232
|
+
}
|
233
|
+
})
|
234
|
+
|
171
235
|
server = MCPCodeIndexServer(
|
172
236
|
token_limit=args.token_limit,
|
173
237
|
db_path=db_path,
|
174
238
|
cache_dir=cache_dir
|
175
239
|
)
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
240
|
+
|
241
|
+
try:
|
242
|
+
logger.debug("Initializing server database connection")
|
243
|
+
await server.initialize()
|
244
|
+
logger.debug("Server initialized successfully")
|
245
|
+
|
246
|
+
# Extract the tool call information from the JSON
|
247
|
+
if "method" in json_data and json_data["method"] == "tools/call":
|
248
|
+
tool_name = json_data["params"]["name"]
|
249
|
+
tool_arguments = json_data["params"]["arguments"]
|
250
|
+
logger.info("JSON-RPC format detected", extra={
|
251
|
+
"structured_data": {
|
252
|
+
"tool_name": tool_name,
|
253
|
+
"arguments_keys": list(tool_arguments.keys())
|
254
|
+
}
|
255
|
+
})
|
256
|
+
elif "projectName" in json_data and "folderPath" in json_data:
|
257
|
+
# Auto-detect: user provided just arguments, try to infer the tool
|
258
|
+
if "filePath" in json_data and "description" in json_data:
|
259
|
+
tool_name = "update_file_description"
|
260
|
+
tool_arguments = json_data
|
261
|
+
logger.info("Auto-detected tool: update_file_description")
|
262
|
+
print("Auto-detected tool: update_file_description", file=sys.stderr)
|
263
|
+
elif "branch" in json_data:
|
264
|
+
tool_name = "check_codebase_size"
|
265
|
+
tool_arguments = json_data
|
266
|
+
logger.info("Auto-detected tool: check_codebase_size")
|
267
|
+
print("Auto-detected tool: check_codebase_size", file=sys.stderr)
|
268
|
+
else:
|
269
|
+
logger.error("Could not auto-detect tool from arguments", extra={
|
270
|
+
"structured_data": {"provided_keys": list(json_data.keys())}
|
271
|
+
})
|
272
|
+
print("Error: Could not auto-detect tool from arguments. Please use full MCP format:", file=sys.stderr)
|
273
|
+
print('{"method": "tools/call", "params": {"name": "TOOL_NAME", "arguments": {...}}}', file=sys.stderr)
|
274
|
+
sys.exit(1)
|
192
275
|
else:
|
193
|
-
|
194
|
-
|
276
|
+
logger.error("Invalid JSON format", extra={
|
277
|
+
"structured_data": {"provided_keys": list(json_data.keys())}
|
278
|
+
})
|
279
|
+
print("Error: JSON must contain a valid MCP tool call", file=sys.stderr)
|
195
280
|
sys.exit(1)
|
196
281
|
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
282
|
+
# Map tool names to handler methods - use the same mapping as MCP server
|
283
|
+
tool_handlers = {
|
284
|
+
"get_file_description": server._handle_get_file_description,
|
285
|
+
"update_file_description": server._handle_update_file_description,
|
286
|
+
"check_codebase_size": server._handle_check_codebase_size,
|
287
|
+
"find_missing_descriptions": server._handle_find_missing_descriptions,
|
288
|
+
"search_descriptions": server._handle_search_descriptions,
|
289
|
+
"get_all_descriptions": server._handle_get_codebase_overview,
|
290
|
+
"get_codebase_overview": server._handle_get_condensed_overview,
|
291
|
+
"update_codebase_overview": server._handle_update_codebase_overview,
|
292
|
+
"get_word_frequency": server._handle_get_word_frequency,
|
293
|
+
"merge_branch_descriptions": server._handle_merge_branch_descriptions,
|
294
|
+
}
|
295
|
+
|
296
|
+
if tool_name not in tool_handlers:
|
297
|
+
logger.error("Unknown tool requested", extra={
|
298
|
+
"structured_data": {
|
299
|
+
"tool_name": tool_name,
|
300
|
+
"available_tools": list(tool_handlers.keys())
|
215
301
|
}
|
216
|
-
|
217
|
-
return
|
218
|
-
|
219
|
-
# Clean HTML entities from arguments before execution
|
220
|
-
def clean_html_entities(text: str) -> str:
|
221
|
-
if not text:
|
222
|
-
return text
|
223
|
-
import html
|
224
|
-
return html.unescape(text)
|
225
|
-
|
226
|
-
def clean_arguments(arguments: dict) -> dict:
|
227
|
-
cleaned = {}
|
228
|
-
for key, value in arguments.items():
|
229
|
-
if isinstance(value, str):
|
230
|
-
cleaned[key] = clean_html_entities(value)
|
231
|
-
elif isinstance(value, list):
|
232
|
-
cleaned[key] = [
|
233
|
-
clean_html_entities(item) if isinstance(item, str) else item
|
234
|
-
for item in value
|
235
|
-
]
|
236
|
-
elif isinstance(value, dict):
|
237
|
-
cleaned[key] = clean_arguments(value)
|
238
|
-
else:
|
239
|
-
cleaned[key] = value
|
240
|
-
return cleaned
|
241
|
-
|
242
|
-
cleaned_tool_arguments = clean_arguments(tool_arguments)
|
243
|
-
|
244
|
-
# Execute the tool handler directly
|
245
|
-
result = await tool_handlers[tool_name](cleaned_tool_arguments)
|
246
|
-
print(json.dumps(result, indent=2, default=str))
|
247
|
-
except Exception as e:
|
302
|
+
})
|
248
303
|
error_result = {
|
249
304
|
"error": {
|
250
|
-
"code": -
|
251
|
-
"message":
|
305
|
+
"code": -32601,
|
306
|
+
"message": f"Unknown tool: {tool_name}"
|
252
307
|
}
|
253
308
|
}
|
254
309
|
print(json.dumps(error_result, indent=2))
|
255
|
-
|
256
|
-
|
257
|
-
|
310
|
+
return
|
311
|
+
|
312
|
+
# Clean HTML entities from arguments before execution
|
313
|
+
def clean_html_entities(text: str) -> str:
|
314
|
+
if not text:
|
315
|
+
return text
|
316
|
+
import html
|
317
|
+
return html.unescape(text)
|
318
|
+
|
319
|
+
def clean_arguments(arguments: dict) -> dict:
|
320
|
+
cleaned = {}
|
321
|
+
for key, value in arguments.items():
|
322
|
+
if isinstance(value, str):
|
323
|
+
cleaned[key] = clean_html_entities(value)
|
324
|
+
elif isinstance(value, list):
|
325
|
+
cleaned[key] = [
|
326
|
+
clean_html_entities(item) if isinstance(item, str) else item
|
327
|
+
for item in value
|
328
|
+
]
|
329
|
+
elif isinstance(value, dict):
|
330
|
+
cleaned[key] = clean_arguments(value)
|
331
|
+
else:
|
332
|
+
cleaned[key] = value
|
333
|
+
return cleaned
|
334
|
+
|
335
|
+
cleaned_tool_arguments = clean_arguments(tool_arguments)
|
336
|
+
|
337
|
+
logger.info("Executing tool", extra={
|
338
|
+
"structured_data": {
|
339
|
+
"tool_name": tool_name,
|
340
|
+
"arguments": {k: v for k, v in cleaned_tool_arguments.items() if k not in ['description']} # Exclude long descriptions
|
341
|
+
}
|
342
|
+
})
|
343
|
+
|
344
|
+
# Execute the tool handler directly
|
345
|
+
import time
|
346
|
+
start_time = time.time()
|
347
|
+
result = await tool_handlers[tool_name](cleaned_tool_arguments)
|
348
|
+
execution_time = time.time() - start_time
|
349
|
+
|
350
|
+
logger.info("Tool execution completed", extra={
|
351
|
+
"structured_data": {
|
352
|
+
"tool_name": tool_name,
|
353
|
+
"execution_time_seconds": execution_time,
|
354
|
+
"result_type": type(result).__name__,
|
355
|
+
"result_size": len(json.dumps(result, default=str)) if result else 0
|
356
|
+
}
|
357
|
+
})
|
358
|
+
|
359
|
+
print(json.dumps(result, indent=2, default=str))
|
360
|
+
|
361
|
+
except Exception as e:
|
362
|
+
logger.error("Tool execution failed", extra={
|
363
|
+
"structured_data": {
|
364
|
+
"tool_name": tool_name if 'tool_name' in locals() else 'unknown',
|
365
|
+
"error_type": type(e).__name__,
|
366
|
+
"error_message": str(e)
|
367
|
+
}
|
368
|
+
})
|
369
|
+
error_result = {
|
370
|
+
"error": {
|
371
|
+
"code": -32603,
|
372
|
+
"message": str(e)
|
373
|
+
}
|
374
|
+
}
|
375
|
+
print(json.dumps(error_result, indent=2))
|
376
|
+
finally:
|
377
|
+
# Clean up database connections
|
378
|
+
if hasattr(server, 'db_manager') and server.db_manager:
|
379
|
+
logger.debug("Closing database connections")
|
380
|
+
await server.db_manager.close_pool()
|
381
|
+
logger.debug("Database connections closed")
|
382
|
+
logger.info("=== RUNCOMMAND SESSION ENDED ===")
|
383
|
+
|
384
|
+
# Close logger handlers to flush any remaining logs
|
385
|
+
for handler in logger.handlers[:]:
|
386
|
+
handler.close()
|
387
|
+
logger.removeHandler(handler)
|
258
388
|
|
259
389
|
|
260
390
|
async def handle_dumpdescriptions(args: argparse.Namespace) -> None:
|
@@ -269,54 +399,372 @@ async def handle_dumpdescriptions(args: argparse.Namespace) -> None:
|
|
269
399
|
project_id = args.dumpdescriptions[0]
|
270
400
|
branch = args.dumpdescriptions[1] if len(args.dumpdescriptions) > 1 else None
|
271
401
|
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
402
|
+
db_manager = None
|
403
|
+
try:
|
404
|
+
# Initialize database and token counter
|
405
|
+
db_path = Path(args.db_path).expanduser()
|
406
|
+
db_manager = DatabaseManager(db_path)
|
407
|
+
await db_manager.initialize()
|
408
|
+
|
409
|
+
token_counter = TokenCounter(args.token_limit)
|
410
|
+
|
411
|
+
# Get file descriptions
|
412
|
+
if branch:
|
413
|
+
file_descriptions = await db_manager.get_all_file_descriptions(
|
414
|
+
project_id=project_id,
|
415
|
+
branch=branch
|
416
|
+
)
|
417
|
+
print(f"File descriptions for project {project_id}, branch {branch}:")
|
418
|
+
else:
|
419
|
+
file_descriptions = await db_manager.get_all_file_descriptions(
|
420
|
+
project_id=project_id
|
421
|
+
)
|
422
|
+
print(f"File descriptions for project {project_id} (all branches):")
|
423
|
+
|
424
|
+
print("=" * 80)
|
425
|
+
|
426
|
+
if not file_descriptions:
|
427
|
+
print("No descriptions found.")
|
428
|
+
total_tokens = 0
|
429
|
+
else:
|
430
|
+
total_tokens = 0
|
431
|
+
for desc in file_descriptions:
|
432
|
+
print(f"File: {desc.file_path}")
|
433
|
+
if branch is None:
|
434
|
+
print(f"Branch: {desc.branch}")
|
435
|
+
print(f"Description: {desc.description}")
|
436
|
+
print("-" * 40)
|
437
|
+
|
438
|
+
# Count tokens for this description
|
439
|
+
desc_tokens = token_counter.count_file_description_tokens(desc)
|
440
|
+
total_tokens += desc_tokens
|
441
|
+
|
442
|
+
print("=" * 80)
|
443
|
+
print(f"Total descriptions: {len(file_descriptions)}")
|
444
|
+
print(f"Total tokens: {total_tokens}")
|
445
|
+
|
446
|
+
finally:
|
447
|
+
# Clean up database connections
|
448
|
+
if db_manager:
|
449
|
+
await db_manager.close_pool()
|
450
|
+
|
451
|
+
|
452
|
+
|
453
|
+
async def handle_githook(args: argparse.Namespace) -> None:
|
454
|
+
"""Handle --githook command."""
|
455
|
+
from .logging_config import setup_command_logger
|
276
456
|
|
277
|
-
|
457
|
+
# Set up dedicated logging for githook
|
458
|
+
cache_dir = Path(args.cache_dir).expanduser()
|
459
|
+
logger = setup_command_logger("githook", cache_dir)
|
278
460
|
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
461
|
+
try:
|
462
|
+
from .database.database import DatabaseManager
|
463
|
+
from .git_hook_handler import GitHookHandler
|
464
|
+
|
465
|
+
logger.info("Starting git hook execution", extra={
|
466
|
+
"structured_data": {
|
467
|
+
"args": {
|
468
|
+
"db_path": str(args.db_path),
|
469
|
+
"cache_dir": str(args.cache_dir),
|
470
|
+
"token_limit": args.token_limit
|
471
|
+
}
|
472
|
+
}
|
473
|
+
})
|
474
|
+
|
475
|
+
# Initialize database
|
476
|
+
db_path = Path(args.db_path).expanduser()
|
477
|
+
cache_dir = Path(args.cache_dir).expanduser()
|
478
|
+
|
479
|
+
logger.info("Setting up directories and database", extra={
|
480
|
+
"structured_data": {
|
481
|
+
"db_path": str(db_path),
|
482
|
+
"cache_dir": str(cache_dir)
|
483
|
+
}
|
484
|
+
})
|
485
|
+
|
486
|
+
# Create directories if they don't exist
|
487
|
+
db_path.parent.mkdir(parents=True, exist_ok=True)
|
488
|
+
cache_dir.mkdir(parents=True, exist_ok=True)
|
489
|
+
|
490
|
+
db_manager = DatabaseManager(db_path)
|
491
|
+
await db_manager.initialize()
|
492
|
+
logger.debug("Database initialized successfully")
|
493
|
+
|
494
|
+
# Initialize git hook handler
|
495
|
+
git_handler = GitHookHandler(db_manager, cache_dir)
|
496
|
+
logger.debug("Git hook handler initialized")
|
497
|
+
|
498
|
+
# Run git hook analysis
|
499
|
+
logger.info("Starting git hook analysis")
|
500
|
+
await git_handler.run_githook_mode()
|
501
|
+
logger.info("Git hook analysis completed successfully")
|
502
|
+
|
503
|
+
except Exception as e:
|
504
|
+
logger.error("Git hook execution failed", extra={
|
505
|
+
"structured_data": {
|
506
|
+
"error_type": type(e).__name__,
|
507
|
+
"error_message": str(e)
|
508
|
+
}
|
509
|
+
})
|
510
|
+
print(f"Git hook error: {e}", file=sys.stderr)
|
511
|
+
sys.exit(1)
|
512
|
+
finally:
|
513
|
+
logger.info("=== GITHOOK SESSION ENDED ===")
|
514
|
+
|
515
|
+
# Close logger handlers to flush any remaining logs
|
516
|
+
for handler in logger.handlers[:]:
|
517
|
+
handler.close()
|
518
|
+
logger.removeHandler(handler)
|
519
|
+
|
520
|
+
|
521
|
+
async def handle_cleanup(args: argparse.Namespace) -> None:
|
522
|
+
"""Handle --cleanup command."""
|
523
|
+
from .logging_config import setup_command_logger
|
309
524
|
|
310
|
-
|
311
|
-
|
312
|
-
|
525
|
+
# Set up dedicated logging for cleanup
|
526
|
+
cache_dir = Path(args.cache_dir).expanduser()
|
527
|
+
logger = setup_command_logger("cleanup", cache_dir)
|
528
|
+
|
529
|
+
db_manager = None
|
530
|
+
try:
|
531
|
+
from .database.database import DatabaseManager
|
532
|
+
|
533
|
+
logger.info("Starting database cleanup", extra={
|
534
|
+
"structured_data": {
|
535
|
+
"args": {
|
536
|
+
"db_path": str(args.db_path),
|
537
|
+
"cache_dir": str(args.cache_dir)
|
538
|
+
}
|
539
|
+
}
|
540
|
+
})
|
541
|
+
|
542
|
+
# Initialize database
|
543
|
+
db_path = Path(args.db_path).expanduser()
|
544
|
+
db_manager = DatabaseManager(db_path)
|
545
|
+
await db_manager.initialize()
|
546
|
+
logger.debug("Database initialized successfully")
|
547
|
+
|
548
|
+
# Perform cleanup
|
549
|
+
logger.info("Removing empty projects")
|
550
|
+
removed_count = await db_manager.cleanup_empty_projects()
|
551
|
+
|
552
|
+
if removed_count > 0:
|
553
|
+
print(f"Removed {removed_count} empty project(s)")
|
554
|
+
logger.info("Cleanup completed", extra={
|
555
|
+
"structured_data": {"removed_projects": removed_count}
|
556
|
+
})
|
557
|
+
else:
|
558
|
+
print("No empty projects found")
|
559
|
+
logger.info("No empty projects found")
|
560
|
+
|
561
|
+
except Exception as e:
|
562
|
+
logger.error("Cleanup failed", extra={
|
563
|
+
"structured_data": {
|
564
|
+
"error_type": type(e).__name__,
|
565
|
+
"error_message": str(e)
|
566
|
+
}
|
567
|
+
})
|
568
|
+
print(f"Cleanup error: {e}", file=sys.stderr)
|
569
|
+
sys.exit(1)
|
570
|
+
finally:
|
571
|
+
# Clean up database connections
|
572
|
+
if db_manager:
|
573
|
+
logger.debug("Closing database connections")
|
574
|
+
await db_manager.close_pool()
|
575
|
+
logger.debug("Database connections closed")
|
576
|
+
logger.info("=== CLEANUP SESSION ENDED ===")
|
577
|
+
|
578
|
+
# Close logger handlers to flush any remaining logs
|
579
|
+
for handler in logger.handlers[:]:
|
580
|
+
handler.close()
|
581
|
+
logger.removeHandler(handler)
|
313
582
|
|
314
583
|
|
584
|
+
async def handle_map(args: argparse.Namespace) -> None:
|
585
|
+
"""Handle --map command."""
|
586
|
+
from .logging_config import setup_command_logger
|
587
|
+
import re
|
588
|
+
from collections import defaultdict
|
589
|
+
from pathlib import Path as PathLib
|
590
|
+
|
591
|
+
# Set up dedicated logging for map
|
592
|
+
cache_dir = Path(args.cache_dir).expanduser()
|
593
|
+
logger = setup_command_logger("map", cache_dir)
|
594
|
+
|
595
|
+
db_manager = None
|
596
|
+
try:
|
597
|
+
from .database.database import DatabaseManager
|
598
|
+
|
599
|
+
logger.info("Starting project map generation", extra={
|
600
|
+
"structured_data": {
|
601
|
+
"project_identifier": args.map,
|
602
|
+
"args": {
|
603
|
+
"db_path": str(args.db_path),
|
604
|
+
"cache_dir": str(args.cache_dir)
|
605
|
+
}
|
606
|
+
}
|
607
|
+
})
|
608
|
+
|
609
|
+
# Initialize database
|
610
|
+
db_path = Path(args.db_path).expanduser()
|
611
|
+
db_manager = DatabaseManager(db_path)
|
612
|
+
await db_manager.initialize()
|
613
|
+
logger.debug("Database initialized successfully")
|
614
|
+
|
615
|
+
# Get project data
|
616
|
+
logger.info("Retrieving project data")
|
617
|
+
project_data = await db_manager.get_project_map_data(args.map)
|
618
|
+
|
619
|
+
if not project_data:
|
620
|
+
print(f"Error: Project '{args.map}' not found", file=sys.stderr)
|
621
|
+
logger.error("Project not found", extra={"structured_data": {"identifier": args.map}})
|
622
|
+
sys.exit(1)
|
623
|
+
|
624
|
+
project = project_data['project']
|
625
|
+
branch = project_data['branch']
|
626
|
+
overview = project_data['overview']
|
627
|
+
files = project_data['files']
|
628
|
+
|
629
|
+
logger.info("Generating markdown map", extra={
|
630
|
+
"structured_data": {
|
631
|
+
"project_name": project.name,
|
632
|
+
"branch": branch,
|
633
|
+
"file_count": len(files),
|
634
|
+
"has_overview": overview is not None
|
635
|
+
}
|
636
|
+
})
|
637
|
+
|
638
|
+
# Generate markdown
|
639
|
+
markdown_content = generate_project_markdown(project, branch, overview, files, logger)
|
640
|
+
|
641
|
+
# Output the markdown
|
642
|
+
print(markdown_content)
|
643
|
+
|
644
|
+
logger.info("Project map generated successfully")
|
645
|
+
|
646
|
+
except Exception as e:
|
647
|
+
logger.error("Map generation failed", extra={
|
648
|
+
"structured_data": {
|
649
|
+
"error_type": type(e).__name__,
|
650
|
+
"error_message": str(e)
|
651
|
+
}
|
652
|
+
})
|
653
|
+
print(f"Map generation error: {e}", file=sys.stderr)
|
654
|
+
sys.exit(1)
|
655
|
+
finally:
|
656
|
+
# Clean up database connections
|
657
|
+
if db_manager:
|
658
|
+
logger.debug("Closing database connections")
|
659
|
+
await db_manager.close_pool()
|
660
|
+
logger.debug("Database connections closed")
|
661
|
+
logger.info("=== MAP SESSION ENDED ===")
|
662
|
+
|
663
|
+
# Close logger handlers to flush any remaining logs
|
664
|
+
for handler in logger.handlers[:]:
|
665
|
+
handler.close()
|
666
|
+
logger.removeHandler(handler)
|
667
|
+
|
668
|
+
|
669
|
+
def generate_project_markdown(project, branch, overview, files, logger):
|
670
|
+
"""Generate the markdown content for the project map."""
|
671
|
+
import re
|
672
|
+
from collections import defaultdict
|
673
|
+
from pathlib import Path as PathLib
|
674
|
+
|
675
|
+
markdown_lines = []
|
676
|
+
|
677
|
+
# Project header
|
678
|
+
markdown_lines.append(f"# {project.name}")
|
679
|
+
markdown_lines.append("")
|
680
|
+
|
681
|
+
# Project metadata
|
682
|
+
if project.remote_origin:
|
683
|
+
markdown_lines.append(f"**Repository:** {project.remote_origin}")
|
684
|
+
if project.upstream_origin:
|
685
|
+
markdown_lines.append(f"**Upstream:** {project.upstream_origin}")
|
686
|
+
markdown_lines.append(f"**Branch:** {branch}")
|
687
|
+
markdown_lines.append("")
|
688
|
+
|
689
|
+
# Project overview (with header demotion if needed)
|
690
|
+
if overview and overview.overview:
|
691
|
+
markdown_lines.append("## Project Overview")
|
692
|
+
markdown_lines.append("")
|
693
|
+
|
694
|
+
# Check if overview contains H1 headers and demote if needed
|
695
|
+
overview_content = overview.overview
|
696
|
+
if re.search(r'^#\s', overview_content, re.MULTILINE):
|
697
|
+
logger.debug("H1 headers found in overview, demoting all headers")
|
698
|
+
# Demote all headers by one level
|
699
|
+
overview_content = re.sub(r'^(#{1,6})', r'#\1', overview_content, flags=re.MULTILINE)
|
700
|
+
|
701
|
+
markdown_lines.append(overview_content)
|
702
|
+
markdown_lines.append("")
|
703
|
+
|
704
|
+
# File structure
|
705
|
+
if files:
|
706
|
+
markdown_lines.append("## Codebase Structure")
|
707
|
+
markdown_lines.append("")
|
708
|
+
|
709
|
+
# Organize files by directory
|
710
|
+
directories = defaultdict(list)
|
711
|
+
for file_desc in files:
|
712
|
+
file_path = PathLib(file_desc.file_path)
|
713
|
+
if len(file_path.parts) == 1:
|
714
|
+
# Root level file
|
715
|
+
directories["(root)"].append(file_desc)
|
716
|
+
else:
|
717
|
+
# File in subdirectory
|
718
|
+
directory = str(file_path.parent)
|
719
|
+
directories[directory].append(file_desc)
|
720
|
+
|
721
|
+
# Sort directories (root first, then alphabetically)
|
722
|
+
sorted_dirs = sorted(directories.keys(), key=lambda x: ("" if x == "(root)" else x))
|
723
|
+
|
724
|
+
for directory in sorted_dirs:
|
725
|
+
dir_files = directories[directory]
|
726
|
+
|
727
|
+
# Directory header
|
728
|
+
if directory == "(root)":
|
729
|
+
markdown_lines.append("### Root Directory")
|
730
|
+
else:
|
731
|
+
# Create nested headers based on directory depth
|
732
|
+
depth = len(PathLib(directory).parts)
|
733
|
+
header_level = "#" * min(depth + 2, 6) # Cap at H6
|
734
|
+
markdown_lines.append(f"{header_level} {directory}/")
|
735
|
+
|
736
|
+
markdown_lines.append("")
|
737
|
+
|
738
|
+
# Files table
|
739
|
+
markdown_lines.append("| File | Description |")
|
740
|
+
markdown_lines.append("|------|-------------|")
|
741
|
+
|
742
|
+
for file_desc in sorted(dir_files, key=lambda x: x.file_path):
|
743
|
+
file_name = PathLib(file_desc.file_path).name
|
744
|
+
# Escape pipe characters in descriptions for markdown table
|
745
|
+
description = file_desc.description.replace("|", "\\|").replace("\n", " ").strip()
|
746
|
+
markdown_lines.append(f"| `{file_name}` | {description} |")
|
747
|
+
|
748
|
+
markdown_lines.append("")
|
749
|
+
|
750
|
+
# Footer with generation info
|
751
|
+
from datetime import datetime
|
752
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
753
|
+
markdown_lines.append("---")
|
754
|
+
markdown_lines.append(f"*Generated by MCP Code Indexer on {timestamp}*")
|
755
|
+
|
756
|
+
return "\n".join(markdown_lines)
|
757
|
+
|
315
758
|
|
316
759
|
async def main() -> None:
|
317
760
|
"""Main entry point for the MCP server."""
|
318
761
|
args = parse_arguments()
|
319
762
|
|
763
|
+
# Handle git hook command
|
764
|
+
if args.githook:
|
765
|
+
await handle_githook(args)
|
766
|
+
return
|
767
|
+
|
320
768
|
# Handle utility commands
|
321
769
|
if args.getprojects:
|
322
770
|
await handle_getprojects(args)
|
@@ -330,6 +778,14 @@ async def main() -> None:
|
|
330
778
|
await handle_dumpdescriptions(args)
|
331
779
|
return
|
332
780
|
|
781
|
+
if args.cleanup:
|
782
|
+
await handle_cleanup(args)
|
783
|
+
return
|
784
|
+
|
785
|
+
if args.map:
|
786
|
+
await handle_map(args)
|
787
|
+
return
|
788
|
+
|
333
789
|
# Setup structured logging
|
334
790
|
log_file = Path(args.cache_dir).expanduser() / "server.log" if args.cache_dir else None
|
335
791
|
logger = setup_logging(
|