local-deep-research 0.5.0__py3-none-any.whl → 0.5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- local_deep_research/__version__.py +1 -1
- local_deep_research/config/llm_config.py +61 -1
- local_deep_research/error_handling/__init__.py +13 -0
- local_deep_research/error_handling/error_reporter.py +236 -0
- local_deep_research/error_handling/report_generator.py +403 -0
- local_deep_research/web/database/migrations.py +16 -1
- local_deep_research/web/database/models.py +31 -31
- local_deep_research/web/models/database.py +13 -23
- local_deep_research/web/routes/history_routes.py +1 -1
- local_deep_research/web/routes/research_routes.py +65 -113
- local_deep_research/web/services/research_service.py +218 -160
- local_deep_research/web/static/js/components/progress.js +19 -13
- local_deep_research/web/static/js/components/results.js +1 -1
- local_deep_research/web/templates/pages/research.html +2 -2
- {local_deep_research-0.5.0.dist-info → local_deep_research-0.5.3.dist-info}/METADATA +1 -1
- {local_deep_research-0.5.0.dist-info → local_deep_research-0.5.3.dist-info}/RECORD +19 -17
- local_deep_research/test_migration.py +0 -188
- {local_deep_research-0.5.0.dist-info → local_deep_research-0.5.3.dist-info}/WHEEL +0 -0
- {local_deep_research-0.5.0.dist-info → local_deep_research-0.5.3.dist-info}/entry_points.txt +0 -0
- {local_deep_research-0.5.0.dist-info → local_deep_research-0.5.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,403 @@
|
|
1
|
+
"""
|
2
|
+
ErrorReportGenerator - Create user-friendly error reports
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import Dict, Any, Optional
|
6
|
+
from loguru import logger
|
7
|
+
|
8
|
+
from .error_reporter import ErrorReporter
|
9
|
+
|
10
|
+
|
11
|
+
class ErrorReportGenerator:
|
12
|
+
"""
|
13
|
+
Generates comprehensive, user-friendly error reports
|
14
|
+
"""
|
15
|
+
|
16
|
+
def __init__(self, llm=None):
|
17
|
+
"""
|
18
|
+
Initialize error report generator
|
19
|
+
|
20
|
+
Args:
|
21
|
+
llm: Optional LLM instance (unused, kept for compatibility)
|
22
|
+
"""
|
23
|
+
self.error_reporter = ErrorReporter()
|
24
|
+
|
25
|
+
def generate_error_report(
|
26
|
+
self,
|
27
|
+
error_message: str,
|
28
|
+
query: str,
|
29
|
+
partial_results: Optional[Dict[str, Any]] = None,
|
30
|
+
search_iterations: int = 0,
|
31
|
+
research_id: Optional[int] = None,
|
32
|
+
) -> str:
|
33
|
+
"""
|
34
|
+
Generate a comprehensive error report
|
35
|
+
|
36
|
+
Args:
|
37
|
+
error_message: The error that occurred
|
38
|
+
query: The research query
|
39
|
+
partial_results: Any partial results that were collected
|
40
|
+
search_iterations: Number of search iterations completed
|
41
|
+
research_id: Research ID for reference
|
42
|
+
|
43
|
+
Returns:
|
44
|
+
str: Formatted error report in Markdown
|
45
|
+
"""
|
46
|
+
try:
|
47
|
+
# Analyze the error
|
48
|
+
context = {
|
49
|
+
"query": query,
|
50
|
+
"search_iterations": search_iterations,
|
51
|
+
"research_id": research_id,
|
52
|
+
"partial_results": partial_results,
|
53
|
+
}
|
54
|
+
|
55
|
+
if partial_results:
|
56
|
+
context.update(partial_results)
|
57
|
+
|
58
|
+
error_analysis = self.error_reporter.analyze_error(
|
59
|
+
error_message, context
|
60
|
+
)
|
61
|
+
|
62
|
+
# Build the simplified report
|
63
|
+
report_parts = []
|
64
|
+
|
65
|
+
# Header with user-friendly error message and logs reference
|
66
|
+
user_friendly_message = self._make_error_user_friendly(
|
67
|
+
error_message
|
68
|
+
)
|
69
|
+
category_title = error_analysis.get("title", "Error")
|
70
|
+
|
71
|
+
report_parts.append("# ⚠️ Research Failed")
|
72
|
+
report_parts.append(f"\n**Error Type:** {category_title}")
|
73
|
+
report_parts.append(f"\n**What happened:** {user_friendly_message}")
|
74
|
+
report_parts.append(
|
75
|
+
'\n*For detailed error information, scroll down to the research logs and select "Errors" from the filter.*'
|
76
|
+
)
|
77
|
+
|
78
|
+
# Support links - moved up for better visibility
|
79
|
+
report_parts.append("\n## 💬 Get Help")
|
80
|
+
report_parts.append("We're here to help you get this working:")
|
81
|
+
report_parts.append(
|
82
|
+
"- 📖 **Documentation & guides:** [Wiki](https://github.com/LearningCircuit/local-deep-research/wiki)"
|
83
|
+
)
|
84
|
+
report_parts.append(
|
85
|
+
"- 💬 **Chat with the community:** [Discord #help-and-support](https://discord.gg/ttcqQeFcJ3)"
|
86
|
+
)
|
87
|
+
report_parts.append(
|
88
|
+
"- 🐛 **Report bugs or get help:** [GitHub Issues](https://github.com/LearningCircuit/local-deep-research/issues) *(don't hesitate to ask if you're stuck!)*"
|
89
|
+
)
|
90
|
+
report_parts.append(
|
91
|
+
"- 💭 **Join discussions:** [Reddit r/LocalDeepResearch](https://www.reddit.com/r/LocalDeepResearch/) *(checked less frequently)*"
|
92
|
+
)
|
93
|
+
|
94
|
+
# Show partial results if available (in expandable section)
|
95
|
+
if error_analysis.get("has_partial_results"):
|
96
|
+
partial_content = self._format_partial_results(partial_results)
|
97
|
+
if partial_content:
|
98
|
+
report_parts.append(
|
99
|
+
f"\n<details>\n<summary>📊 Partial Results Available</summary>\n\n{partial_content}\n</details>"
|
100
|
+
)
|
101
|
+
|
102
|
+
return "\n".join(report_parts)
|
103
|
+
|
104
|
+
except Exception as e:
|
105
|
+
# Fallback: always return something, even if error report generation fails
|
106
|
+
logger.exception(f"Failed to generate error report: {e}")
|
107
|
+
return f"""# ⚠️ Research Failed
|
108
|
+
|
109
|
+
**What happened:** {error_message}
|
110
|
+
|
111
|
+
## 💬 Get Help
|
112
|
+
We're here to help you get this working:
|
113
|
+
- 📖 **Documentation & guides:** [Wiki](https://github.com/LearningCircuit/local-deep-research/wiki)
|
114
|
+
- 💬 **Chat with the community:** [Discord #help-and-support](https://discord.gg/ttcqQeFcJ3)
|
115
|
+
- 🐛 **Report bugs or get help:** [GitHub Issues](https://github.com/LearningCircuit/local-deep-research/issues) *(don't hesitate to ask if you're stuck!)*
|
116
|
+
|
117
|
+
*Note: Error report generation failed - showing basic error information.*"""
|
118
|
+
|
119
|
+
def _format_partial_results(
|
120
|
+
self, partial_results: Optional[Dict[str, Any]]
|
121
|
+
) -> str:
|
122
|
+
"""
|
123
|
+
Format partial results for display
|
124
|
+
|
125
|
+
Args:
|
126
|
+
partial_results: Partial results data
|
127
|
+
|
128
|
+
Returns:
|
129
|
+
str: Formatted partial results
|
130
|
+
"""
|
131
|
+
if not partial_results:
|
132
|
+
return ""
|
133
|
+
|
134
|
+
formatted_parts = []
|
135
|
+
|
136
|
+
# Current knowledge summary
|
137
|
+
if "current_knowledge" in partial_results:
|
138
|
+
knowledge = partial_results["current_knowledge"]
|
139
|
+
if knowledge and len(knowledge.strip()) > 50:
|
140
|
+
formatted_parts.append("### Research Summary\n")
|
141
|
+
formatted_parts.append(
|
142
|
+
knowledge[:1000] + "..."
|
143
|
+
if len(knowledge) > 1000
|
144
|
+
else knowledge
|
145
|
+
)
|
146
|
+
formatted_parts.append("")
|
147
|
+
|
148
|
+
# Search results
|
149
|
+
if "search_results" in partial_results:
|
150
|
+
results = partial_results["search_results"]
|
151
|
+
if results:
|
152
|
+
formatted_parts.append("### Search Results Found\n")
|
153
|
+
for i, result in enumerate(results[:5], 1): # Show top 5
|
154
|
+
title = result.get("title", "Untitled")
|
155
|
+
url = result.get("url", "")
|
156
|
+
formatted_parts.append(f"{i}. **{title}**")
|
157
|
+
if url:
|
158
|
+
formatted_parts.append(f" - URL: {url}")
|
159
|
+
formatted_parts.append("")
|
160
|
+
|
161
|
+
# Findings
|
162
|
+
if "findings" in partial_results:
|
163
|
+
findings = partial_results["findings"]
|
164
|
+
if findings:
|
165
|
+
formatted_parts.append("### Research Findings\n")
|
166
|
+
for i, finding in enumerate(findings[:3], 1): # Show top 3
|
167
|
+
content = finding.get("content", "")
|
168
|
+
if content and not content.startswith("Error:"):
|
169
|
+
phase = finding.get("phase", f"Finding {i}")
|
170
|
+
formatted_parts.append(f"**{phase}:**")
|
171
|
+
formatted_parts.append(
|
172
|
+
content[:500] + "..."
|
173
|
+
if len(content) > 500
|
174
|
+
else content
|
175
|
+
)
|
176
|
+
formatted_parts.append("")
|
177
|
+
|
178
|
+
if formatted_parts:
|
179
|
+
formatted_parts.append(
|
180
|
+
"*Note: The above results were successfully collected before the error occurred.*"
|
181
|
+
)
|
182
|
+
|
183
|
+
return "\n".join(formatted_parts) if formatted_parts else ""
|
184
|
+
|
185
|
+
def _get_technical_context(
|
186
|
+
self,
|
187
|
+
error_analysis: Dict[str, Any],
|
188
|
+
partial_results: Optional[Dict[str, Any]],
|
189
|
+
) -> str:
|
190
|
+
"""
|
191
|
+
Get additional technical context for the error
|
192
|
+
|
193
|
+
Args:
|
194
|
+
error_analysis: Error analysis results
|
195
|
+
partial_results: Partial results if available
|
196
|
+
|
197
|
+
Returns:
|
198
|
+
str: Technical context information
|
199
|
+
"""
|
200
|
+
context_parts = []
|
201
|
+
|
202
|
+
# Add timing information if available
|
203
|
+
if partial_results:
|
204
|
+
if "start_time" in partial_results:
|
205
|
+
context_parts.append(
|
206
|
+
f"- **Start Time:** {partial_results['start_time']}"
|
207
|
+
)
|
208
|
+
|
209
|
+
if "last_activity" in partial_results:
|
210
|
+
context_parts.append(
|
211
|
+
f"- **Last Activity:** {partial_results['last_activity']}"
|
212
|
+
)
|
213
|
+
|
214
|
+
# Add model information
|
215
|
+
if "model_config" in partial_results:
|
216
|
+
config = partial_results["model_config"]
|
217
|
+
context_parts.append(
|
218
|
+
f"- **Model:** {config.get('model_name', 'Unknown')}"
|
219
|
+
)
|
220
|
+
context_parts.append(
|
221
|
+
f"- **Provider:** {config.get('provider', 'Unknown')}"
|
222
|
+
)
|
223
|
+
|
224
|
+
# Add search information
|
225
|
+
if "search_config" in partial_results:
|
226
|
+
search_config = partial_results["search_config"]
|
227
|
+
context_parts.append(
|
228
|
+
f"- **Search Engine:** {search_config.get('engine', 'Unknown')}"
|
229
|
+
)
|
230
|
+
context_parts.append(
|
231
|
+
f"- **Max Results:** {search_config.get('max_results', 'Unknown')}"
|
232
|
+
)
|
233
|
+
|
234
|
+
# Add any error codes or HTTP status
|
235
|
+
if "status_code" in partial_results:
|
236
|
+
context_parts.append(
|
237
|
+
f"- **Status Code:** {partial_results['status_code']}"
|
238
|
+
)
|
239
|
+
|
240
|
+
if "error_code" in partial_results:
|
241
|
+
context_parts.append(
|
242
|
+
f"- **Error Code:** {partial_results['error_code']}"
|
243
|
+
)
|
244
|
+
|
245
|
+
# Add error-specific context based on category
|
246
|
+
category = error_analysis.get("category")
|
247
|
+
if category:
|
248
|
+
if "connection" in category.value.lower():
|
249
|
+
context_parts.append(
|
250
|
+
"- **Network Error:** Connection-related issue detected"
|
251
|
+
)
|
252
|
+
context_parts.append(
|
253
|
+
"- **Retry Recommended:** Check service status and try again"
|
254
|
+
)
|
255
|
+
elif "model" in category.value.lower():
|
256
|
+
context_parts.append(
|
257
|
+
"- **Model Error:** Issue with AI model or configuration"
|
258
|
+
)
|
259
|
+
context_parts.append(
|
260
|
+
"- **Check:** Model service availability and parameters"
|
261
|
+
)
|
262
|
+
|
263
|
+
return "\n".join(context_parts) if context_parts else ""
|
264
|
+
|
265
|
+
def generate_quick_error_summary(
|
266
|
+
self, error_message: str
|
267
|
+
) -> Dict[str, str]:
|
268
|
+
"""
|
269
|
+
Generate a quick error summary for API responses
|
270
|
+
|
271
|
+
Args:
|
272
|
+
error_message: The error message
|
273
|
+
|
274
|
+
Returns:
|
275
|
+
dict: Quick error summary
|
276
|
+
"""
|
277
|
+
error_analysis = self.error_reporter.analyze_error(error_message)
|
278
|
+
|
279
|
+
return {
|
280
|
+
"title": error_analysis["title"],
|
281
|
+
"category": error_analysis["category"].value,
|
282
|
+
"severity": error_analysis["severity"],
|
283
|
+
"recoverable": error_analysis["recoverable"],
|
284
|
+
}
|
285
|
+
|
286
|
+
def _make_error_user_friendly(self, error_message: str) -> str:
|
287
|
+
"""
|
288
|
+
Replace cryptic technical error messages with user-friendly versions
|
289
|
+
|
290
|
+
Args:
|
291
|
+
error_message: The original technical error message
|
292
|
+
|
293
|
+
Returns:
|
294
|
+
str: User-friendly error message, or original if no replacement found
|
295
|
+
"""
|
296
|
+
# Dictionary of technical errors to user-friendly messages
|
297
|
+
error_replacements = {
|
298
|
+
"max_workers must be greater than 0": (
|
299
|
+
"The LLM failed to generate search questions. This usually means the LLM service isn't responding properly.\n\n"
|
300
|
+
"**Try this:**\n"
|
301
|
+
"- Check if your LLM service (Ollama/LM Studio) is running\n"
|
302
|
+
"- Restart the LLM service\n"
|
303
|
+
"- Try a different model"
|
304
|
+
),
|
305
|
+
"POST predict.*EOF": (
|
306
|
+
"Lost connection to Ollama. This usually means Ollama stopped responding or there's a network issue.\n\n"
|
307
|
+
"**Try this:**\n"
|
308
|
+
"- Restart Ollama: `ollama serve`\n"
|
309
|
+
"- Check if Ollama is still running: `ps aux | grep ollama`\n"
|
310
|
+
"- Try a different port if 11434 is in use"
|
311
|
+
),
|
312
|
+
"HTTP error 404.*research results": (
|
313
|
+
"The research completed but the results can't be displayed. The files were likely generated successfully.\n\n"
|
314
|
+
"**Try this:**\n"
|
315
|
+
"- Check the `research_outputs` folder for your report\n"
|
316
|
+
"- Ensure the folder has proper read/write permissions\n"
|
317
|
+
"- Restart the LDR web interface"
|
318
|
+
),
|
319
|
+
"Connection refused|\\[Errno 111\\]": (
|
320
|
+
"Cannot connect to the LLM service. The service might not be running or is using a different address.\n\n"
|
321
|
+
"**Try this:**\n"
|
322
|
+
"- Start your LLM service (Ollama: `ollama serve`, LM Studio: launch the app)\n"
|
323
|
+
"- **Docker on Mac/Windows:** Change URL from `http://localhost:1234` to `http://host.docker.internal:1234`\n"
|
324
|
+
"- **Docker on Linux:** Use your host IP instead of localhost (find with `hostname -I`)\n"
|
325
|
+
"- Check the service URL in settings matches where your LLM is running\n"
|
326
|
+
"- Verify the port number is correct (Ollama: 11434, LM Studio: 1234)"
|
327
|
+
),
|
328
|
+
"The search is longer than 256 characters": (
|
329
|
+
"Your search query is too long for GitHub's API (max 256 characters).\n\n"
|
330
|
+
"**Try this:**\n"
|
331
|
+
"- Shorten your research query\n"
|
332
|
+
"- Use a different search engine (DuckDuckGo, Searx, etc.)\n"
|
333
|
+
"- Break your research into smaller, focused queries"
|
334
|
+
),
|
335
|
+
"No module named.*local_deep_research": (
|
336
|
+
"Installation issue detected. The package isn't properly installed.\n\n"
|
337
|
+
"**Try this:**\n"
|
338
|
+
"- Reinstall: `pip install -e .` from the project directory\n"
|
339
|
+
"- Check you're using the right Python environment\n"
|
340
|
+
"- For Docker users: rebuild the container"
|
341
|
+
),
|
342
|
+
"Failed to create search engine|could not be found": (
|
343
|
+
"Search engine configuration problem.\n\n"
|
344
|
+
"**Try this:**\n"
|
345
|
+
"- Use the default search engine (auto)\n"
|
346
|
+
"- Check search engine settings in Advanced Options\n"
|
347
|
+
"- Ensure required API keys are set for external search engines"
|
348
|
+
),
|
349
|
+
"TypeError.*Context.*Size|'<' not supported between": (
|
350
|
+
"Model configuration issue. The context size setting might not be compatible with your model.\n\n"
|
351
|
+
"**Try this:**\n"
|
352
|
+
"- Check your model's maximum context size\n"
|
353
|
+
"- Leave context size settings at default\n"
|
354
|
+
"- Try a different model"
|
355
|
+
),
|
356
|
+
"Model.*not found in Ollama": (
|
357
|
+
"The specified model isn't available in Ollama.\n\n"
|
358
|
+
"**Try this:**\n"
|
359
|
+
"- Check available models: `ollama list`\n"
|
360
|
+
"- Pull the model: `ollama pull <model-name>`\n"
|
361
|
+
"- Use the exact model name shown in `ollama list` (e.g., 'gemma2:9b' not 'gemma:latest')"
|
362
|
+
),
|
363
|
+
"No auth credentials found|401.*API key": (
|
364
|
+
"API key is missing or incorrectly configured.\n\n"
|
365
|
+
"**Try this:**\n"
|
366
|
+
"- Set API key in the web UI settings (not in .env files)\n"
|
367
|
+
"- Go to Settings → Advanced → enter your API key\n"
|
368
|
+
"- For custom endpoints, ensure the key format matches what your provider expects"
|
369
|
+
),
|
370
|
+
"Attempt to write readonly database": (
|
371
|
+
"Permission issue with the database file.\n\n"
|
372
|
+
"**Try this:**\n"
|
373
|
+
"- On Windows: Run as Administrator\n"
|
374
|
+
"- On Linux/Mac: Check folder permissions\n"
|
375
|
+
"- Delete and recreate the database file if corrupted"
|
376
|
+
),
|
377
|
+
"Invalid value.*SearXNG|database.*locked": (
|
378
|
+
"SearXNG configuration or rate limiting issue.\n\n"
|
379
|
+
"**Try this:**\n"
|
380
|
+
"- Keep 'Search snippets only' enabled (don't turn it off)\n"
|
381
|
+
"- Restart SearXNG: `docker restart searxng`\n"
|
382
|
+
"- If rate limited, wait a few minutes or use a VPN"
|
383
|
+
),
|
384
|
+
"host.*localhost.*Docker|127\\.0\\.0\\.1.*Docker|localhost.*1234.*Docker|LM.*Studio.*Docker.*Mac": (
|
385
|
+
"Docker networking issue - can't connect to services on host.\n\n"
|
386
|
+
"**Try this:**\n"
|
387
|
+
"- **On Mac/Windows Docker:** Replace 'localhost' or '127.0.0.1' with 'host.docker.internal'\n"
|
388
|
+
"- **On Linux Docker:** Use your host's actual IP address (find with `hostname -I`)\n"
|
389
|
+
"- **Example:** Change `http://localhost:1234` to `http://host.docker.internal:1234`\n"
|
390
|
+
"- Ensure the service port isn't blocked by firewall\n"
|
391
|
+
"- Alternative: Use host networking mode (see wiki for setup)"
|
392
|
+
),
|
393
|
+
}
|
394
|
+
|
395
|
+
# Check each pattern and replace if found
|
396
|
+
for pattern, replacement in error_replacements.items():
|
397
|
+
import re
|
398
|
+
|
399
|
+
if re.search(pattern, error_message, re.IGNORECASE):
|
400
|
+
return f"{replacement}\n\nTechnical error: {error_message}"
|
401
|
+
|
402
|
+
# If no specific replacement found, return original message
|
403
|
+
return error_message
|
@@ -2,7 +2,14 @@ from loguru import logger
|
|
2
2
|
from sqlalchemy import inspect
|
3
3
|
|
4
4
|
from ..services.settings_manager import SettingsManager
|
5
|
-
from .models import
|
5
|
+
from .models import (
|
6
|
+
Base,
|
7
|
+
Journal,
|
8
|
+
Setting,
|
9
|
+
ResearchLog,
|
10
|
+
Research,
|
11
|
+
ResearchHistory,
|
12
|
+
)
|
6
13
|
|
7
14
|
|
8
15
|
def import_default_settings_file(db_session):
|
@@ -52,6 +59,14 @@ def run_migrations(engine, db_session=None):
|
|
52
59
|
logger.info("Creating research logs table.")
|
53
60
|
Base.metadata.create_all(engine, tables=[ResearchLog.__table__])
|
54
61
|
|
62
|
+
if not inspector.has_table(Research.__tablename__):
|
63
|
+
logger.info("Creating research table.")
|
64
|
+
Base.metadata.create_all(engine, tables=[Research.__table__])
|
65
|
+
|
66
|
+
if not inspector.has_table(ResearchHistory.__tablename__):
|
67
|
+
logger.info("Creating research table.")
|
68
|
+
Base.metadata.create_all(engine, tables=[ResearchHistory.__table__])
|
69
|
+
|
55
70
|
# Import existing settings from files
|
56
71
|
if db_session:
|
57
72
|
import_default_settings_file(db_session)
|
@@ -34,6 +34,37 @@ class ResearchStatus(enum.Enum):
|
|
34
34
|
CANCELLED = "cancelled"
|
35
35
|
|
36
36
|
|
37
|
+
class ResearchHistory(Base):
|
38
|
+
"""Represents the research table."""
|
39
|
+
|
40
|
+
__tablename__ = "research_history"
|
41
|
+
|
42
|
+
# Unique identifier for each record.
|
43
|
+
id = Column(Integer, primary_key=True, autoincrement=True)
|
44
|
+
# The search query.
|
45
|
+
query = Column(Text, nullable=False)
|
46
|
+
# The mode of research (e.g., 'quick_summary', 'detailed_report').
|
47
|
+
mode = Column(Text, nullable=False)
|
48
|
+
# Current status of the research.
|
49
|
+
status = Column(Text, nullable=False)
|
50
|
+
# The timestamp when the research started.
|
51
|
+
created_at = Column(Text, nullable=False)
|
52
|
+
# The timestamp when the research was completed.
|
53
|
+
completed_at = Column(Text)
|
54
|
+
# Duration of the research in seconds.
|
55
|
+
duration_seconds = Column(Integer)
|
56
|
+
# Path to the generated report.
|
57
|
+
report_path = Column(Text)
|
58
|
+
# Additional metadata about the research.
|
59
|
+
research_meta = Column(JSON)
|
60
|
+
# Latest progress log message.
|
61
|
+
progress_log = Column(JSON)
|
62
|
+
# Current progress of the research (as a percentage).
|
63
|
+
progress = Column(Integer)
|
64
|
+
# Title of the research report.
|
65
|
+
title = Column(Text)
|
66
|
+
|
67
|
+
|
37
68
|
class Research(Base):
|
38
69
|
__tablename__ = "research"
|
39
70
|
|
@@ -54,14 +85,6 @@ class Research(Base):
|
|
54
85
|
end_time = Column(DateTime, nullable=True)
|
55
86
|
error_message = Column(Text, nullable=True)
|
56
87
|
|
57
|
-
# Relationships
|
58
|
-
report = relationship(
|
59
|
-
"ResearchReport",
|
60
|
-
back_populates="research",
|
61
|
-
uselist=False,
|
62
|
-
cascade="all, delete-orphan",
|
63
|
-
)
|
64
|
-
|
65
88
|
|
66
89
|
class ResearchLog(Base):
|
67
90
|
__tablename__ = "app_logs"
|
@@ -88,29 +111,6 @@ class ResearchLog(Base):
|
|
88
111
|
)
|
89
112
|
|
90
113
|
|
91
|
-
class ResearchReport(Base):
|
92
|
-
__tablename__ = "research_report"
|
93
|
-
|
94
|
-
id = Column(Integer, primary_key=True, index=True)
|
95
|
-
research_id = Column(
|
96
|
-
Integer,
|
97
|
-
ForeignKey("research.id", ondelete="CASCADE"),
|
98
|
-
nullable=False,
|
99
|
-
unique=True,
|
100
|
-
)
|
101
|
-
content = Column(Text, nullable=True)
|
102
|
-
created_at = Column(DateTime, server_default=func.now(), nullable=False)
|
103
|
-
updated_at = Column(
|
104
|
-
DateTime, server_default=func.now(), onupdate=func.now(), nullable=False
|
105
|
-
)
|
106
|
-
report_metadata = Column(
|
107
|
-
JSON, nullable=True
|
108
|
-
) # Additional metadata about the report
|
109
|
-
|
110
|
-
# Relationships
|
111
|
-
research = relationship("Research", back_populates="report")
|
112
|
-
|
113
|
-
|
114
114
|
class SettingType(enum.Enum):
|
115
115
|
APP = "app"
|
116
116
|
LLM = "llm"
|
@@ -43,26 +43,6 @@ def init_db():
|
|
43
43
|
conn = get_db_connection()
|
44
44
|
cursor = conn.cursor()
|
45
45
|
|
46
|
-
# Create the table if it doesn't exist
|
47
|
-
cursor.execute(
|
48
|
-
"""
|
49
|
-
CREATE TABLE IF NOT EXISTS research_history (
|
50
|
-
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
51
|
-
query TEXT NOT NULL,
|
52
|
-
mode TEXT NOT NULL,
|
53
|
-
status TEXT NOT NULL,
|
54
|
-
created_at TEXT NOT NULL,
|
55
|
-
completed_at TEXT,
|
56
|
-
duration_seconds INTEGER,
|
57
|
-
report_path TEXT,
|
58
|
-
metadata TEXT,
|
59
|
-
progress_log TEXT,
|
60
|
-
progress INTEGER,
|
61
|
-
title TEXT
|
62
|
-
)
|
63
|
-
"""
|
64
|
-
)
|
65
|
-
|
66
46
|
# Create a dedicated table for research logs
|
67
47
|
cursor.execute(
|
68
48
|
"""
|
@@ -101,7 +81,7 @@ def init_db():
|
|
101
81
|
columns = [column[1] for column in cursor.fetchall()]
|
102
82
|
|
103
83
|
if "duration_seconds" not in columns:
|
104
|
-
|
84
|
+
logger.info(
|
105
85
|
"Adding missing 'duration_seconds' column to research_history table"
|
106
86
|
)
|
107
87
|
cursor.execute(
|
@@ -110,16 +90,26 @@ def init_db():
|
|
110
90
|
|
111
91
|
# Check if the progress column exists, add it if missing
|
112
92
|
if "progress" not in columns:
|
113
|
-
|
93
|
+
logger.info(
|
94
|
+
"Adding missing 'progress' column to research_history table"
|
95
|
+
)
|
114
96
|
cursor.execute(
|
115
97
|
"ALTER TABLE research_history ADD COLUMN progress INTEGER"
|
116
98
|
)
|
117
99
|
|
118
100
|
# Check if the title column exists, add it if missing
|
119
101
|
if "title" not in columns:
|
120
|
-
|
102
|
+
logger.info("Adding missing 'title' column to research_history table")
|
121
103
|
cursor.execute("ALTER TABLE research_history ADD COLUMN title TEXT")
|
122
104
|
|
105
|
+
# Check if the metadata column exists, and rename it to "research_meta"
|
106
|
+
# if it does.
|
107
|
+
if "metadata" in columns:
|
108
|
+
logger.info("Renaming 'metadata' column to 'research_meta'")
|
109
|
+
cursor.execute(
|
110
|
+
"ALTER TABLE research_history RENAME COLUMN metadata TO research_meta"
|
111
|
+
)
|
112
|
+
|
123
113
|
# Enable foreign key support
|
124
114
|
cursor.execute("PRAGMA foreign_keys = ON")
|
125
115
|
|
@@ -254,7 +254,7 @@ def get_research_details(research_id):
|
|
254
254
|
)
|
255
255
|
|
256
256
|
|
257
|
-
@history_bp.route("/report/<int:research_id>")
|
257
|
+
@history_bp.route("/history/report/<int:research_id>")
|
258
258
|
def get_report(research_id):
|
259
259
|
conn = get_db_connection()
|
260
260
|
conn.row_factory = lambda cursor, row: {
|