sirchmunk 0.0.1.post1__py3-none-any.whl → 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sirchmunk/api/__init__.py +1 -0
- sirchmunk/api/chat.py +1123 -0
- sirchmunk/api/components/__init__.py +0 -0
- sirchmunk/api/components/history_storage.py +402 -0
- sirchmunk/api/components/monitor_tracker.py +518 -0
- sirchmunk/api/components/settings_storage.py +353 -0
- sirchmunk/api/history.py +254 -0
- sirchmunk/api/knowledge.py +411 -0
- sirchmunk/api/main.py +120 -0
- sirchmunk/api/monitor.py +219 -0
- sirchmunk/api/run_server.py +54 -0
- sirchmunk/api/search.py +230 -0
- sirchmunk/api/settings.py +309 -0
- sirchmunk/api/tools.py +315 -0
- sirchmunk/cli/__init__.py +11 -0
- sirchmunk/cli/cli.py +789 -0
- sirchmunk/learnings/knowledge_base.py +5 -2
- sirchmunk/llm/prompts.py +12 -1
- sirchmunk/retrieve/text_retriever.py +186 -2
- sirchmunk/scan/file_scanner.py +2 -2
- sirchmunk/schema/knowledge.py +119 -35
- sirchmunk/search.py +384 -26
- sirchmunk/storage/__init__.py +2 -2
- sirchmunk/storage/{knowledge_manager.py → knowledge_storage.py} +265 -60
- sirchmunk/utils/constants.py +7 -5
- sirchmunk/utils/embedding_util.py +217 -0
- sirchmunk/utils/tokenizer_util.py +36 -1
- sirchmunk/version.py +1 -1
- {sirchmunk-0.0.1.post1.dist-info → sirchmunk-0.0.2.dist-info}/METADATA +124 -9
- sirchmunk-0.0.2.dist-info/RECORD +69 -0
- {sirchmunk-0.0.1.post1.dist-info → sirchmunk-0.0.2.dist-info}/WHEEL +1 -1
- sirchmunk-0.0.2.dist-info/top_level.txt +2 -0
- sirchmunk_mcp/__init__.py +25 -0
- sirchmunk_mcp/cli.py +478 -0
- sirchmunk_mcp/config.py +276 -0
- sirchmunk_mcp/server.py +355 -0
- sirchmunk_mcp/service.py +327 -0
- sirchmunk_mcp/setup.py +15 -0
- sirchmunk_mcp/tools.py +410 -0
- sirchmunk-0.0.1.post1.dist-info/RECORD +0 -45
- sirchmunk-0.0.1.post1.dist-info/top_level.txt +0 -1
- {sirchmunk-0.0.1.post1.dist-info → sirchmunk-0.0.2.dist-info}/entry_points.txt +0 -0
- {sirchmunk-0.0.1.post1.dist-info → sirchmunk-0.0.2.dist-info}/licenses/LICENSE +0 -0
sirchmunk/api/monitor.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
# Copyright (c) ModelScope Contributors. All rights reserved.
|
|
2
|
+
"""
|
|
3
|
+
Real-time system monitoring API endpoints
|
|
4
|
+
Provides actual system metrics and activity tracking
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from fastapi import APIRouter, HTTPException
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
|
|
10
|
+
from sirchmunk.api.components.monitor_tracker import get_monitor_tracker, llm_usage_tracker
|
|
11
|
+
|
|
12
|
+
router = APIRouter(prefix="/api/v1/monitor", tags=["monitor"])
|
|
13
|
+
|
|
14
|
+
# === API Endpoints ===
|
|
15
|
+
|
|
16
|
+
@router.get("/overview")
|
|
17
|
+
async def get_monitoring_overview():
|
|
18
|
+
"""
|
|
19
|
+
Get comprehensive monitoring overview
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
- System metrics (CPU, memory, disk)
|
|
23
|
+
- Chat activity statistics
|
|
24
|
+
- Knowledge cluster statistics
|
|
25
|
+
- Storage information
|
|
26
|
+
- Health status
|
|
27
|
+
"""
|
|
28
|
+
try:
|
|
29
|
+
tracker = get_monitor_tracker()
|
|
30
|
+
overview = tracker.get_overview()
|
|
31
|
+
|
|
32
|
+
return {
|
|
33
|
+
"success": True,
|
|
34
|
+
"data": overview
|
|
35
|
+
}
|
|
36
|
+
except Exception as e:
|
|
37
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
38
|
+
|
|
39
|
+
@router.get("/system")
|
|
40
|
+
async def get_system_metrics():
|
|
41
|
+
"""
|
|
42
|
+
Get current system metrics
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
- CPU usage and count
|
|
46
|
+
- Memory usage and capacity
|
|
47
|
+
- Disk usage and capacity
|
|
48
|
+
- Network connections
|
|
49
|
+
- System uptime
|
|
50
|
+
- Process-specific metrics
|
|
51
|
+
"""
|
|
52
|
+
try:
|
|
53
|
+
tracker = get_monitor_tracker()
|
|
54
|
+
metrics = tracker.get_system_metrics()
|
|
55
|
+
|
|
56
|
+
return {
|
|
57
|
+
"success": True,
|
|
58
|
+
"data": metrics
|
|
59
|
+
}
|
|
60
|
+
except Exception as e:
|
|
61
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
62
|
+
|
|
63
|
+
@router.get("/health")
|
|
64
|
+
async def get_health_status():
|
|
65
|
+
"""
|
|
66
|
+
Get comprehensive health status
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
- Overall health score
|
|
70
|
+
- Status (excellent/good/warning/critical)
|
|
71
|
+
- Issues list
|
|
72
|
+
- Service availability
|
|
73
|
+
"""
|
|
74
|
+
try:
|
|
75
|
+
tracker = get_monitor_tracker()
|
|
76
|
+
health = tracker.get_health_status()
|
|
77
|
+
|
|
78
|
+
return {
|
|
79
|
+
"success": True,
|
|
80
|
+
"data": health
|
|
81
|
+
}
|
|
82
|
+
except Exception as e:
|
|
83
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
84
|
+
|
|
85
|
+
@router.get("/chat")
|
|
86
|
+
async def get_chat_activity(hours: int = 24):
|
|
87
|
+
"""
|
|
88
|
+
Get chat activity statistics
|
|
89
|
+
|
|
90
|
+
Query params:
|
|
91
|
+
hours: Time window in hours (default: 24)
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
- Total sessions
|
|
95
|
+
- Total messages
|
|
96
|
+
- Recent sessions (top 10)
|
|
97
|
+
- Active sessions count
|
|
98
|
+
"""
|
|
99
|
+
try:
|
|
100
|
+
tracker = get_monitor_tracker()
|
|
101
|
+
activity = tracker.get_chat_activity(hours=hours)
|
|
102
|
+
|
|
103
|
+
return {
|
|
104
|
+
"success": True,
|
|
105
|
+
"data": activity
|
|
106
|
+
}
|
|
107
|
+
except Exception as e:
|
|
108
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
109
|
+
|
|
110
|
+
@router.get("/knowledge")
|
|
111
|
+
async def get_knowledge_activity():
|
|
112
|
+
"""
|
|
113
|
+
Get knowledge cluster activity statistics
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
- Total clusters
|
|
117
|
+
- Recent clusters (top 10)
|
|
118
|
+
- Lifecycle distribution
|
|
119
|
+
- Average confidence
|
|
120
|
+
"""
|
|
121
|
+
try:
|
|
122
|
+
tracker = get_monitor_tracker()
|
|
123
|
+
activity = tracker.get_knowledge_activity()
|
|
124
|
+
|
|
125
|
+
return {
|
|
126
|
+
"success": True,
|
|
127
|
+
"data": activity
|
|
128
|
+
}
|
|
129
|
+
except Exception as e:
|
|
130
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
131
|
+
|
|
132
|
+
@router.get("/storage")
|
|
133
|
+
async def get_storage_info():
|
|
134
|
+
"""
|
|
135
|
+
Get storage information
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
- Work path
|
|
139
|
+
- Cache path
|
|
140
|
+
- Database sizes
|
|
141
|
+
- Total cache size
|
|
142
|
+
"""
|
|
143
|
+
try:
|
|
144
|
+
tracker = get_monitor_tracker()
|
|
145
|
+
storage = tracker.get_storage_info()
|
|
146
|
+
|
|
147
|
+
return {
|
|
148
|
+
"success": True,
|
|
149
|
+
"data": storage
|
|
150
|
+
}
|
|
151
|
+
except Exception as e:
|
|
152
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
153
|
+
|
|
154
|
+
@router.get("/llm")
|
|
155
|
+
async def get_llm_usage():
|
|
156
|
+
"""
|
|
157
|
+
Get LLM usage statistics
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
- Total calls
|
|
161
|
+
- Total input/output/total tokens
|
|
162
|
+
- Calls per minute
|
|
163
|
+
- Models usage breakdown
|
|
164
|
+
"""
|
|
165
|
+
try:
|
|
166
|
+
stats = llm_usage_tracker.get_stats()
|
|
167
|
+
return {
|
|
168
|
+
"success": True,
|
|
169
|
+
"data": stats
|
|
170
|
+
}
|
|
171
|
+
except Exception as e:
|
|
172
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
173
|
+
|
|
174
|
+
@router.get("/status")
|
|
175
|
+
async def get_simple_status():
|
|
176
|
+
"""
|
|
177
|
+
Get simple status summary (for quick health checks)
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
Basic system status information
|
|
181
|
+
"""
|
|
182
|
+
try:
|
|
183
|
+
tracker = get_monitor_tracker()
|
|
184
|
+
health = tracker.get_health_status()
|
|
185
|
+
metrics = tracker.get_system_metrics()
|
|
186
|
+
|
|
187
|
+
return {
|
|
188
|
+
"success": True,
|
|
189
|
+
"status": health["overall_status"],
|
|
190
|
+
"health_score": health["health_score"],
|
|
191
|
+
"cpu_usage": metrics.get("cpu", {}).get("usage_percent", 0),
|
|
192
|
+
"memory_usage": metrics.get("memory", {}).get("usage_percent", 0),
|
|
193
|
+
"disk_usage": metrics.get("disk", {}).get("usage_percent", 0),
|
|
194
|
+
"uptime": metrics.get("uptime", ""),
|
|
195
|
+
"timestamp": datetime.now().isoformat()
|
|
196
|
+
}
|
|
197
|
+
except Exception as e:
|
|
198
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
199
|
+
|
|
200
|
+
@router.post("/refresh")
|
|
201
|
+
async def refresh_metrics():
|
|
202
|
+
"""
|
|
203
|
+
Force refresh of monitoring metrics
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
Updated overview data
|
|
207
|
+
"""
|
|
208
|
+
try:
|
|
209
|
+
# Simply get fresh data
|
|
210
|
+
tracker = get_monitor_tracker()
|
|
211
|
+
overview = tracker.get_overview()
|
|
212
|
+
|
|
213
|
+
return {
|
|
214
|
+
"success": True,
|
|
215
|
+
"message": "Metrics refreshed successfully",
|
|
216
|
+
"data": overview
|
|
217
|
+
}
|
|
218
|
+
except Exception as e:
|
|
219
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
FastAPI server runner for Sirchmunk API
|
|
4
|
+
Provides a simple way to start the API server with proper configuration
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
# Add project root to Python path
|
|
12
|
+
project_root = Path(__file__).parent.parent.parent
|
|
13
|
+
sys.path.insert(0, str(project_root))
|
|
14
|
+
|
|
15
|
+
def main():
|
|
16
|
+
"""Main entry point for running the FastAPI server"""
|
|
17
|
+
try:
|
|
18
|
+
import uvicorn
|
|
19
|
+
from sirchmunk.api.main import app
|
|
20
|
+
|
|
21
|
+
# Get configuration from environment
|
|
22
|
+
host = os.environ.get("API_HOST", "0.0.0.0")
|
|
23
|
+
port = int(os.environ.get("API_PORT", os.environ.get("BACKEND_PORT", "8584")))
|
|
24
|
+
reload = os.environ.get("API_RELOAD", "true").lower() == "true"
|
|
25
|
+
log_level = os.environ.get("API_LOG_LEVEL", "info")
|
|
26
|
+
|
|
27
|
+
print(f"🚀 Starting Sirchmunk API server...")
|
|
28
|
+
print(f" Host: {host}")
|
|
29
|
+
print(f" Port: {port}")
|
|
30
|
+
print(f" Reload: {reload}")
|
|
31
|
+
print(f" Log Level: {log_level}")
|
|
32
|
+
print(f" Docs: http://{host}:{port}/docs")
|
|
33
|
+
|
|
34
|
+
# Start the server
|
|
35
|
+
uvicorn.run(
|
|
36
|
+
app,
|
|
37
|
+
host=host,
|
|
38
|
+
port=port,
|
|
39
|
+
reload=reload,
|
|
40
|
+
log_level=log_level,
|
|
41
|
+
access_log=True
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
except ImportError as e:
|
|
45
|
+
print(f"❌ Failed to import required modules: {e}")
|
|
46
|
+
print(" Please install required dependencies:")
|
|
47
|
+
print(" pip install fastapi uvicorn")
|
|
48
|
+
sys.exit(1)
|
|
49
|
+
except Exception as e:
|
|
50
|
+
print(f"❌ Failed to start server: {e}")
|
|
51
|
+
sys.exit(1)
|
|
52
|
+
|
|
53
|
+
if __name__ == "__main__":
|
|
54
|
+
main()
|
sirchmunk/api/search.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
# Copyright (c) ModelScope Contributors. All rights reserved.
|
|
2
|
+
"""
|
|
3
|
+
Search API endpoints for CLI and programmatic access.
|
|
4
|
+
|
|
5
|
+
Provides HTTP endpoints for executing AgenticSearch queries,
|
|
6
|
+
designed for CLI client mode and external integrations.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from fastapi import APIRouter, HTTPException
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
from typing import List, Optional, Literal
|
|
12
|
+
import logging
|
|
13
|
+
|
|
14
|
+
from sirchmunk.search import AgenticSearch
|
|
15
|
+
from sirchmunk.llm.openai_chat import OpenAIChat
|
|
16
|
+
from sirchmunk.utils.constants import (
|
|
17
|
+
LLM_BASE_URL,
|
|
18
|
+
LLM_API_KEY,
|
|
19
|
+
LLM_MODEL_NAME,
|
|
20
|
+
DEFAULT_SIRCHMUNK_WORK_PATH,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
router = APIRouter(prefix="/api/v1", tags=["search"])
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# === Request/Response Models ===
|
|
30
|
+
|
|
31
|
+
class SearchRequest(BaseModel):
|
|
32
|
+
"""Request model for search endpoint."""
|
|
33
|
+
query: str = Field(..., description="Search query or question")
|
|
34
|
+
search_paths: List[str] = Field(
|
|
35
|
+
default_factory=list,
|
|
36
|
+
description="Paths to search (directories or files)"
|
|
37
|
+
)
|
|
38
|
+
mode: Literal["FAST", "DEEP", "FILENAME_ONLY"] = Field(
|
|
39
|
+
default="DEEP",
|
|
40
|
+
description="Search mode: FAST, DEEP, or FILENAME_ONLY"
|
|
41
|
+
)
|
|
42
|
+
max_depth: Optional[int] = Field(
|
|
43
|
+
default=None,
|
|
44
|
+
description="Maximum directory depth to search"
|
|
45
|
+
)
|
|
46
|
+
top_k_files: Optional[int] = Field(
|
|
47
|
+
default=None,
|
|
48
|
+
description="Number of top files to return"
|
|
49
|
+
)
|
|
50
|
+
keyword_levels: Optional[int] = Field(
|
|
51
|
+
default=None,
|
|
52
|
+
description="Number of keyword granularity levels"
|
|
53
|
+
)
|
|
54
|
+
include_patterns: Optional[List[str]] = Field(
|
|
55
|
+
default=None,
|
|
56
|
+
description="File patterns to include (glob)"
|
|
57
|
+
)
|
|
58
|
+
exclude_patterns: Optional[List[str]] = Field(
|
|
59
|
+
default=None,
|
|
60
|
+
description="File patterns to exclude (glob)"
|
|
61
|
+
)
|
|
62
|
+
return_cluster: bool = Field(
|
|
63
|
+
default=False,
|
|
64
|
+
description="Return full KnowledgeCluster object"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class SearchResponse(BaseModel):
|
|
69
|
+
"""Response model for search endpoint."""
|
|
70
|
+
success: bool
|
|
71
|
+
data: dict
|
|
72
|
+
error: Optional[str] = None
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# === Cached Search Instance ===
|
|
76
|
+
|
|
77
|
+
_search_instance: Optional[AgenticSearch] = None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _get_search_instance() -> AgenticSearch:
|
|
81
|
+
"""Get or create AgenticSearch instance.
|
|
82
|
+
|
|
83
|
+
Uses lazy initialization and caches the instance for reuse.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
AgenticSearch instance
|
|
87
|
+
|
|
88
|
+
Raises:
|
|
89
|
+
HTTPException: If LLM API key is not configured
|
|
90
|
+
"""
|
|
91
|
+
global _search_instance
|
|
92
|
+
|
|
93
|
+
if _search_instance is None:
|
|
94
|
+
if not LLM_API_KEY:
|
|
95
|
+
raise HTTPException(
|
|
96
|
+
status_code=500,
|
|
97
|
+
detail="LLM_API_KEY is not configured. Set it in your environment or .env file."
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
llm = OpenAIChat(
|
|
101
|
+
base_url=LLM_BASE_URL,
|
|
102
|
+
api_key=LLM_API_KEY,
|
|
103
|
+
model=LLM_MODEL_NAME,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
_search_instance = AgenticSearch(
|
|
107
|
+
llm=llm,
|
|
108
|
+
work_path=DEFAULT_SIRCHMUNK_WORK_PATH,
|
|
109
|
+
verbose=False,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
logger.info("AgenticSearch instance created for API")
|
|
113
|
+
|
|
114
|
+
return _search_instance
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
# === API Endpoints ===
|
|
118
|
+
|
|
119
|
+
@router.post("/search")
|
|
120
|
+
async def execute_search(request: SearchRequest) -> SearchResponse:
|
|
121
|
+
"""Execute an AgenticSearch query.
|
|
122
|
+
|
|
123
|
+
This endpoint performs a full search using AgenticSearch,
|
|
124
|
+
including keyword extraction, file retrieval, content analysis,
|
|
125
|
+
and summary generation.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
request: Search request parameters
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
SearchResponse with search results
|
|
132
|
+
|
|
133
|
+
Raises:
|
|
134
|
+
HTTPException: If search fails or configuration is invalid
|
|
135
|
+
"""
|
|
136
|
+
try:
|
|
137
|
+
searcher = _get_search_instance()
|
|
138
|
+
|
|
139
|
+
# Use current directory if no paths provided
|
|
140
|
+
search_paths = request.search_paths if request.search_paths else ["."]
|
|
141
|
+
|
|
142
|
+
logger.info(f"Executing search: query='{request.query}', mode={request.mode}, paths={search_paths}")
|
|
143
|
+
|
|
144
|
+
# Build search kwargs
|
|
145
|
+
search_kwargs = {
|
|
146
|
+
"input": request.query,
|
|
147
|
+
"search_paths": search_paths,
|
|
148
|
+
"mode": request.mode,
|
|
149
|
+
"return_cluster": request.return_cluster,
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
# Add optional parameters if provided
|
|
153
|
+
if request.max_depth is not None:
|
|
154
|
+
search_kwargs["max_depth"] = request.max_depth
|
|
155
|
+
if request.top_k_files is not None:
|
|
156
|
+
search_kwargs["top_k_files"] = request.top_k_files
|
|
157
|
+
if request.keyword_levels is not None:
|
|
158
|
+
search_kwargs["keyword_levels"] = request.keyword_levels
|
|
159
|
+
if request.include_patterns:
|
|
160
|
+
search_kwargs["include"] = request.include_patterns
|
|
161
|
+
if request.exclude_patterns:
|
|
162
|
+
search_kwargs["exclude"] = request.exclude_patterns
|
|
163
|
+
|
|
164
|
+
# Execute search
|
|
165
|
+
result = await searcher.search(**search_kwargs)
|
|
166
|
+
|
|
167
|
+
# Format response
|
|
168
|
+
if request.return_cluster and hasattr(result, "to_dict"):
|
|
169
|
+
# Return full cluster data
|
|
170
|
+
return SearchResponse(
|
|
171
|
+
success=True,
|
|
172
|
+
data={
|
|
173
|
+
"type": "cluster",
|
|
174
|
+
"cluster": result.to_dict(),
|
|
175
|
+
}
|
|
176
|
+
)
|
|
177
|
+
elif isinstance(result, list):
|
|
178
|
+
# FILENAME_ONLY mode returns list
|
|
179
|
+
return SearchResponse(
|
|
180
|
+
success=True,
|
|
181
|
+
data={
|
|
182
|
+
"type": "files",
|
|
183
|
+
"files": result,
|
|
184
|
+
"count": len(result),
|
|
185
|
+
}
|
|
186
|
+
)
|
|
187
|
+
else:
|
|
188
|
+
# Standard text summary
|
|
189
|
+
return SearchResponse(
|
|
190
|
+
success=True,
|
|
191
|
+
data={
|
|
192
|
+
"type": "summary",
|
|
193
|
+
"summary": str(result) if result else "No results found.",
|
|
194
|
+
}
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
except HTTPException:
|
|
198
|
+
raise
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.error(f"Search failed: {e}", exc_info=True)
|
|
201
|
+
raise HTTPException(
|
|
202
|
+
status_code=500,
|
|
203
|
+
detail=f"Search failed: {str(e)}"
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
@router.get("/search/status")
|
|
208
|
+
async def get_search_status():
|
|
209
|
+
"""Get search service status.
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Service status information
|
|
213
|
+
"""
|
|
214
|
+
try:
|
|
215
|
+
has_api_key = bool(LLM_API_KEY)
|
|
216
|
+
|
|
217
|
+
return {
|
|
218
|
+
"success": True,
|
|
219
|
+
"data": {
|
|
220
|
+
"status": "ready" if has_api_key else "not_configured",
|
|
221
|
+
"llm_configured": has_api_key,
|
|
222
|
+
"llm_model": LLM_MODEL_NAME if has_api_key else None,
|
|
223
|
+
"work_path": DEFAULT_SIRCHMUNK_WORK_PATH,
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
except Exception as e:
|
|
227
|
+
return {
|
|
228
|
+
"success": False,
|
|
229
|
+
"error": str(e),
|
|
230
|
+
}
|