mcp-code-indexer 3.1.3__tar.gz → 3.1.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_code_indexer-3.1.3/src/mcp_code_indexer.egg-info → mcp_code_indexer-3.1.5}/PKG-INFO +3 -3
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/README.md +2 -2
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/pyproject.toml +1 -1
- mcp_code_indexer-3.1.5/setup.cfg +17 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/setup.py +4 -2
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/__init__.py +8 -6
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/ask_handler.py +105 -75
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/claude_api_handler.py +125 -82
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/cleanup_manager.py +107 -81
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/database/connection_health.py +212 -161
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/database/database.py +529 -415
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/database/exceptions.py +167 -118
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/database/models.py +54 -19
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/database/retry_executor.py +139 -103
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/deepask_handler.py +178 -140
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/error_handler.py +88 -76
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/file_scanner.py +163 -141
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/git_hook_handler.py +352 -261
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/logging_config.py +76 -94
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/main.py +406 -320
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/middleware/error_middleware.py +106 -71
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/query_preprocessor.py +40 -40
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/server/mcp_server.py +785 -469
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/token_counter.py +54 -47
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5/src/mcp_code_indexer.egg-info}/PKG-INFO +3 -3
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer.egg-info/SOURCES.txt +1 -0
- mcp_code_indexer-3.1.3/setup.cfg +0 -4
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/LICENSE +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/MANIFEST.in +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/docs/api-reference.md +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/docs/architecture.md +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/docs/configuration.md +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/docs/contributing.md +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/docs/database-resilience.md +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/docs/git-hook-setup.md +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/docs/monitoring.md +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/docs/performance-tuning.md +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/requirements.txt +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/__main__.py +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/data/stop_words_english.txt +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/database/__init__.py +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/middleware/__init__.py +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/migrations/001_initial.sql +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/migrations/002_performance_indexes.sql +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/migrations/003_project_overviews.sql +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/migrations/004_remove_branch_dependency.sql +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/migrations/005_remove_git_remotes.sql +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/server/__init__.py +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/tiktoken_cache/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer/tools/__init__.py +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer.egg-info/dependency_links.txt +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer.egg-info/entry_points.txt +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer.egg-info/requires.txt +0 -0
- {mcp_code_indexer-3.1.3 → mcp_code_indexer-3.1.5}/src/mcp_code_indexer.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mcp-code-indexer
|
3
|
-
Version: 3.1.
|
3
|
+
Version: 3.1.5
|
4
4
|
Summary: MCP server that tracks file descriptions across codebases, enabling AI agents to efficiently navigate and understand code through searchable summaries and token-aware overviews.
|
5
5
|
Author: MCP Code Indexer Contributors
|
6
6
|
Maintainer: MCP Code Indexer Contributors
|
@@ -59,8 +59,8 @@ Dynamic: requires-python
|
|
59
59
|
|
60
60
|
# MCP Code Indexer 🚀
|
61
61
|
|
62
|
-
[](https://badge.fury.io/py/mcp-code-indexer)
|
63
|
+
[](https://pypi.org/project/mcp-code-indexer/)
|
64
64
|
[](https://opensource.org/licenses/MIT)
|
65
65
|
|
66
66
|
A production-ready **Model Context Protocol (MCP) server** that revolutionizes how AI agents navigate and understand codebases. Built for high-concurrency environments with advanced database resilience, the server provides instant access to intelligent descriptions, semantic search, and context-aware recommendations while maintaining 800+ writes/sec throughput.
|
@@ -1,7 +1,7 @@
|
|
1
1
|
# MCP Code Indexer 🚀
|
2
2
|
|
3
|
-
[](https://badge.fury.io/py/mcp-code-indexer)
|
4
|
+
[](https://pypi.org/project/mcp-code-indexer/)
|
5
5
|
[](https://opensource.org/licenses/MIT)
|
6
6
|
|
7
7
|
A production-ready **Model Context Protocol (MCP) server** that revolutionizes how AI agents navigate and understand codebases. Built for high-concurrency environments with advanced database resilience, the server provides instant access to intelligent descriptions, semantic search, and context-aware recommendations while maintaining 800+ writes/sec throughput.
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "mcp-code-indexer"
|
7
|
-
version = "3.1.
|
7
|
+
version = "3.1.5"
|
8
8
|
description = "MCP server that tracks file descriptions across codebases, enabling AI agents to efficiently navigate and understand code through searchable summaries and token-aware overviews."
|
9
9
|
readme = "README.md"
|
10
10
|
license = {text = "MIT"}
|
@@ -8,6 +8,7 @@ import sys
|
|
8
8
|
this_directory = Path(__file__).parent
|
9
9
|
long_description = (this_directory / "README.md").read_text()
|
10
10
|
|
11
|
+
|
11
12
|
# Read version from pyproject.toml
|
12
13
|
def get_version():
|
13
14
|
try:
|
@@ -15,7 +16,7 @@ def get_version():
|
|
15
16
|
import tomllib
|
16
17
|
else:
|
17
18
|
import tomli as tomllib
|
18
|
-
|
19
|
+
|
19
20
|
with open(this_directory / "pyproject.toml", "rb") as f:
|
20
21
|
data = tomllib.load(f)
|
21
22
|
return data["project"]["version"]
|
@@ -23,6 +24,7 @@ def get_version():
|
|
23
24
|
# Fail hard if version reading fails
|
24
25
|
raise RuntimeError(f"Could not read version from pyproject.toml: {e}")
|
25
26
|
|
27
|
+
|
26
28
|
setup(
|
27
29
|
name="mcp-code-indexer",
|
28
30
|
version=get_version(),
|
@@ -34,7 +36,7 @@ setup(
|
|
34
36
|
python_requires=">=3.9",
|
35
37
|
install_requires=[
|
36
38
|
"tiktoken>=0.9.0",
|
37
|
-
"mcp>=1.9.0",
|
39
|
+
"mcp>=1.9.0",
|
38
40
|
"gitignore_parser==0.1.11",
|
39
41
|
"pydantic>=2.8.0",
|
40
42
|
"aiofiles==23.2.0",
|
@@ -6,6 +6,9 @@ intelligent codebase navigation through searchable file descriptions,
|
|
6
6
|
token-aware overviews, and advanced merge capabilities.
|
7
7
|
"""
|
8
8
|
|
9
|
+
from .server.mcp_server import MCPCodeIndexServer
|
10
|
+
|
11
|
+
|
9
12
|
def _get_version() -> str:
|
10
13
|
"""Get version from package metadata or pyproject.toml."""
|
11
14
|
# First try to get version from installed package metadata
|
@@ -15,7 +18,7 @@ def _get_version() -> str:
|
|
15
18
|
except ImportError:
|
16
19
|
# Python < 3.8 fallback
|
17
20
|
from importlib_metadata import version
|
18
|
-
|
21
|
+
|
19
22
|
# Try different package name variations
|
20
23
|
for pkg_name in ["mcp-code-indexer", "mcp_code_indexer"]:
|
21
24
|
try:
|
@@ -24,12 +27,12 @@ def _get_version() -> str:
|
|
24
27
|
continue
|
25
28
|
except Exception:
|
26
29
|
pass
|
27
|
-
|
30
|
+
|
28
31
|
# Fallback to reading from pyproject.toml (for development)
|
29
32
|
try:
|
30
33
|
from pathlib import Path
|
31
34
|
import sys
|
32
|
-
|
35
|
+
|
33
36
|
if sys.version_info >= (3, 11):
|
34
37
|
import tomllib
|
35
38
|
else:
|
@@ -37,7 +40,7 @@ def _get_version() -> str:
|
|
37
40
|
import tomli as tomllib
|
38
41
|
except ImportError:
|
39
42
|
return "dev"
|
40
|
-
|
43
|
+
|
41
44
|
pyproject_path = Path(__file__).parent.parent.parent / "pyproject.toml"
|
42
45
|
with open(pyproject_path, "rb") as f:
|
43
46
|
data = tomllib.load(f)
|
@@ -45,11 +48,10 @@ def _get_version() -> str:
|
|
45
48
|
except Exception:
|
46
49
|
return "dev"
|
47
50
|
|
51
|
+
|
48
52
|
__version__ = _get_version()
|
49
53
|
__author__ = "MCP Code Indexer Contributors"
|
50
54
|
__email__ = ""
|
51
55
|
__license__ = "MIT"
|
52
56
|
|
53
|
-
from .server.mcp_server import MCPCodeIndexServer
|
54
|
-
|
55
57
|
__all__ = ["MCPCodeIndexServer", "__version__"]
|
@@ -16,23 +16,29 @@ from .database.database import DatabaseManager
|
|
16
16
|
|
17
17
|
class AskError(ClaudeAPIError):
|
18
18
|
"""Exception specific to Ask operations."""
|
19
|
+
|
19
20
|
pass
|
20
21
|
|
21
22
|
|
22
23
|
class AskHandler(ClaudeAPIHandler):
|
23
24
|
"""
|
24
25
|
Handler for simple Q&A operations using Claude API.
|
25
|
-
|
26
|
+
|
26
27
|
Provides functionality to:
|
27
28
|
- Combine project overview with user questions
|
28
29
|
- Send combined prompt to Claude for analysis
|
29
30
|
- Return formatted responses for CLI consumption
|
30
31
|
"""
|
31
|
-
|
32
|
-
def __init__(
|
32
|
+
|
33
|
+
def __init__(
|
34
|
+
self,
|
35
|
+
db_manager: DatabaseManager,
|
36
|
+
cache_dir: Path,
|
37
|
+
logger: Optional[logging.Logger] = None,
|
38
|
+
):
|
33
39
|
"""
|
34
40
|
Initialize AskHandler.
|
35
|
-
|
41
|
+
|
36
42
|
Args:
|
37
43
|
db_manager: Database manager instance
|
38
44
|
cache_dir: Cache directory for temporary files
|
@@ -40,64 +46,69 @@ class AskHandler(ClaudeAPIHandler):
|
|
40
46
|
"""
|
41
47
|
super().__init__(db_manager, cache_dir, logger)
|
42
48
|
self.logger = logger if logger is not None else logging.getLogger(__name__)
|
43
|
-
|
49
|
+
|
44
50
|
async def ask_question(
|
45
|
-
self,
|
46
|
-
project_info: Dict[str, str],
|
47
|
-
question: str,
|
48
|
-
include_overview: bool = True
|
51
|
+
self, project_info: Dict[str, str], question: str, include_overview: bool = True
|
49
52
|
) -> Dict[str, Any]:
|
50
53
|
"""
|
51
54
|
Ask a question about the project using Claude API.
|
52
|
-
|
55
|
+
|
53
56
|
Args:
|
54
57
|
project_info: Project information dict with projectName, folderPath, etc.
|
55
58
|
question: User's question about the project
|
56
59
|
include_overview: Whether to include project overview in context
|
57
|
-
|
60
|
+
|
58
61
|
Returns:
|
59
62
|
Dict containing response and metadata
|
60
63
|
"""
|
61
64
|
try:
|
62
|
-
self.logger.info(
|
65
|
+
self.logger.info(
|
66
|
+
f"Processing ask question for project: {project_info['projectName']}"
|
67
|
+
)
|
63
68
|
self.logger.info(f"Question: {question}")
|
64
|
-
|
69
|
+
|
65
70
|
# Validate inputs
|
66
71
|
if not question or not question.strip():
|
67
72
|
raise AskError("Question cannot be empty")
|
68
|
-
|
73
|
+
|
69
74
|
if not project_info.get("projectName"):
|
70
75
|
raise AskError("Project name is required")
|
71
|
-
|
76
|
+
|
72
77
|
# Get project overview if requested
|
73
78
|
overview = ""
|
74
79
|
if include_overview:
|
75
80
|
overview = await self.get_project_overview(project_info)
|
76
81
|
if not overview:
|
77
|
-
self.logger.warning(
|
82
|
+
self.logger.warning(
|
83
|
+
f"No project overview found for {project_info['projectName']}"
|
84
|
+
)
|
78
85
|
overview = "No project overview available."
|
79
|
-
|
86
|
+
|
80
87
|
# Build the prompt
|
81
88
|
prompt = self._build_ask_prompt(project_info, question, overview)
|
82
|
-
|
89
|
+
|
83
90
|
# Validate token limits
|
84
91
|
if not self.validate_token_limit(prompt):
|
85
92
|
raise AskError(
|
86
|
-
f"Question and project context exceed token limit of
|
87
|
-
"Please ask a more specific
|
93
|
+
f"Question and project context exceed token limit of "
|
94
|
+
f"{self.config.token_limit}. Please ask a more specific "
|
95
|
+
"question or use --deepask for enhanced search."
|
88
96
|
)
|
89
|
-
|
97
|
+
|
90
98
|
# Get token counts for reporting
|
91
99
|
overview_tokens = self.get_token_count(overview) if overview else 0
|
92
100
|
question_tokens = self.get_token_count(question)
|
93
101
|
total_prompt_tokens = self.get_token_count(prompt)
|
94
|
-
|
95
|
-
self.logger.info(
|
96
|
-
|
102
|
+
|
103
|
+
self.logger.info(
|
104
|
+
f"Token usage: overview={overview_tokens}, "
|
105
|
+
f"question={question_tokens}, total={total_prompt_tokens}"
|
106
|
+
)
|
107
|
+
|
97
108
|
# Call Claude API
|
98
109
|
system_prompt = self._get_system_prompt()
|
99
110
|
response = await self._call_claude_api(prompt, system_prompt)
|
100
|
-
|
111
|
+
|
101
112
|
# Format response
|
102
113
|
result = {
|
103
114
|
"answer": response.content,
|
@@ -109,16 +120,24 @@ class AskHandler(ClaudeAPIHandler):
|
|
109
120
|
"overview_tokens": overview_tokens,
|
110
121
|
"question_tokens": question_tokens,
|
111
122
|
"total_prompt_tokens": total_prompt_tokens,
|
112
|
-
"response_tokens":
|
113
|
-
|
123
|
+
"response_tokens": (
|
124
|
+
response.usage.get("completion_tokens")
|
125
|
+
if response.usage
|
126
|
+
else None
|
127
|
+
),
|
128
|
+
"total_tokens": (
|
129
|
+
response.usage.get("total_tokens")
|
130
|
+
if response.usage
|
131
|
+
else None
|
132
|
+
),
|
114
133
|
},
|
115
|
-
"include_overview": include_overview
|
116
|
-
}
|
134
|
+
"include_overview": include_overview,
|
135
|
+
},
|
117
136
|
}
|
118
|
-
|
119
|
-
self.logger.info(
|
137
|
+
|
138
|
+
self.logger.info("Ask question completed successfully")
|
120
139
|
return result
|
121
|
-
|
140
|
+
|
122
141
|
except Exception as e:
|
123
142
|
error_msg = f"Failed to process ask question: {str(e)}"
|
124
143
|
self.logger.error(error_msg)
|
@@ -126,76 +145,85 @@ class AskHandler(ClaudeAPIHandler):
|
|
126
145
|
raise
|
127
146
|
else:
|
128
147
|
raise AskError(error_msg)
|
129
|
-
|
130
|
-
def _build_ask_prompt(
|
148
|
+
|
149
|
+
def _build_ask_prompt(
|
150
|
+
self, project_info: Dict[str, str], question: str, overview: str
|
151
|
+
) -> str:
|
131
152
|
"""
|
132
153
|
Build the prompt for Claude API.
|
133
|
-
|
154
|
+
|
134
155
|
Args:
|
135
156
|
project_info: Project information
|
136
157
|
question: User's question
|
137
158
|
overview: Project overview (may be empty)
|
138
|
-
|
159
|
+
|
139
160
|
Returns:
|
140
161
|
Formatted prompt string
|
141
162
|
"""
|
142
163
|
project_name = project_info["projectName"]
|
143
|
-
|
144
|
-
if overview.strip():
|
145
|
-
prompt = f"""Please answer the following question about the codebase "{project_name}".
|
146
|
-
|
147
|
-
PROJECT OVERVIEW:
|
148
|
-
{overview}
|
149
164
|
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
165
|
+
if overview.strip():
|
166
|
+
prompt = (
|
167
|
+
f"Please answer the following question about the codebase "
|
168
|
+
f'"{project_name}".\n\n'
|
169
|
+
f"PROJECT OVERVIEW:\n{overview}\n\n"
|
170
|
+
f"QUESTION:\n{question}\n\n"
|
171
|
+
f"Please provide a clear, detailed answer based on the project "
|
172
|
+
f"overview above. If the overview doesn't contain enough "
|
173
|
+
f"information to fully answer the question, please say so and "
|
174
|
+
f"suggest what additional information might be needed."
|
175
|
+
)
|
154
176
|
else:
|
155
|
-
prompt =
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
QUESTION
|
160
|
-
|
177
|
+
prompt = (
|
178
|
+
f"Please answer the following question about the codebase "
|
179
|
+
f'"{project_name}".\n\n'
|
180
|
+
f"Note: No project overview is available for this codebase.\n\n"
|
181
|
+
f"QUESTION:\n{question}\n\n"
|
182
|
+
f"Please provide the best answer you can based on the project "
|
183
|
+
f"name and general software development knowledge. If you need "
|
184
|
+
f"more specific information about this codebase to provide a "
|
185
|
+
f"complete answer, please mention what would be helpful."
|
186
|
+
)
|
161
187
|
|
162
|
-
Please provide the best answer you can based on the project name and general software development knowledge. If you need more specific information about this codebase to provide a complete answer, please mention what would be helpful."""
|
163
|
-
|
164
188
|
return prompt
|
165
|
-
|
189
|
+
|
166
190
|
def _get_system_prompt(self) -> str:
|
167
191
|
"""Get system prompt for Claude API."""
|
168
|
-
return
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
If the project overview is insufficient to answer the question
|
179
|
-
|
192
|
+
return (
|
193
|
+
"You are a helpful software engineering assistant that analyzes "
|
194
|
+
"codebases and answers questions about them.\n\n"
|
195
|
+
"When answering questions:\n"
|
196
|
+
"1. Be specific and technical when appropriate\n"
|
197
|
+
"2. Reference the project overview when available\n"
|
198
|
+
"3. If information is missing, clearly state what you don't know\n"
|
199
|
+
"4. Provide actionable suggestions when possible\n"
|
200
|
+
"5. Use clear, professional language\n"
|
201
|
+
"6. Focus on the specific question asked\n\n"
|
202
|
+
"If the project overview is insufficient to answer the question "
|
203
|
+
"completely, explain what additional information would be needed "
|
204
|
+
"and suggest using --deepask for more detailed analysis."
|
205
|
+
)
|
206
|
+
|
180
207
|
def format_response(self, result: Dict[str, Any], format_type: str = "text") -> str:
|
181
208
|
"""
|
182
209
|
Format response for CLI output.
|
183
|
-
|
210
|
+
|
184
211
|
Args:
|
185
212
|
result: Result from ask_question
|
186
213
|
format_type: Output format ("text" or "json")
|
187
|
-
|
214
|
+
|
188
215
|
Returns:
|
189
216
|
Formatted response string
|
190
217
|
"""
|
191
218
|
if format_type == "json":
|
192
219
|
import json
|
220
|
+
|
193
221
|
return json.dumps(result, indent=2)
|
194
|
-
|
222
|
+
|
195
223
|
# Text format
|
196
224
|
answer = result["answer"]
|
197
225
|
metadata = result["metadata"]
|
198
|
-
|
226
|
+
|
199
227
|
output = []
|
200
228
|
output.append(f"Question: {result['question']}")
|
201
229
|
output.append(f"Project: {result['project_name']}")
|
@@ -206,10 +234,12 @@ If the project overview is insufficient to answer the question completely, expla
|
|
206
234
|
output.append("Metadata:")
|
207
235
|
output.append(f" Model: {metadata['model']}")
|
208
236
|
output.append(f" Overview included: {metadata['include_overview']}")
|
209
|
-
|
210
|
-
if metadata[
|
237
|
+
|
238
|
+
if metadata["token_usage"]["total_tokens"]:
|
211
239
|
output.append(f" Total tokens: {metadata['token_usage']['total_tokens']}")
|
212
240
|
else:
|
213
|
-
output.append(
|
214
|
-
|
241
|
+
output.append(
|
242
|
+
f" Prompt tokens: {metadata['token_usage']['total_prompt_tokens']}"
|
243
|
+
)
|
244
|
+
|
215
245
|
return "\n".join(output)
|