local-deep-research 0.5.0__py3-none-any.whl → 0.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__ = "0.5.0"
1
+ __version__ = "0.5.3"
@@ -397,6 +397,24 @@ def get_llm(
397
397
  llm = ChatOllama(
398
398
  model=model_name, base_url=base_url, **common_params
399
399
  )
400
+
401
+ # Log the actual client configuration after creation
402
+ logger.debug(
403
+ f"ChatOllama created - base_url attribute: {getattr(llm, 'base_url', 'not found')}"
404
+ )
405
+ if hasattr(llm, "_client"):
406
+ client = llm._client
407
+ logger.debug(f"ChatOllama _client type: {type(client)}")
408
+ if hasattr(client, "_client"):
409
+ inner_client = client._client
410
+ logger.debug(
411
+ f"ChatOllama inner client type: {type(inner_client)}"
412
+ )
413
+ if hasattr(inner_client, "base_url"):
414
+ logger.debug(
415
+ f"ChatOllama inner client base_url: {inner_client.base_url}"
416
+ )
417
+
400
418
  # Test invoke to validate model works
401
419
  logger.info("Testing Ollama model with simple invocation")
402
420
  test_result = llm.invoke("Hello")
@@ -545,7 +563,49 @@ def wrap_llm_without_think_tags(
545
563
  self.base_llm = base_llm
546
564
 
547
565
  def invoke(self, *args, **kwargs):
548
- response = self.base_llm.invoke(*args, **kwargs)
566
+ # Log detailed request information for Ollama models
567
+ if hasattr(self.base_llm, "base_url"):
568
+ logger.debug(
569
+ f"LLM Request - Base URL: {self.base_llm.base_url}"
570
+ )
571
+ logger.debug(
572
+ f"LLM Request - Model: {getattr(self.base_llm, 'model', 'unknown')}"
573
+ )
574
+ logger.debug(
575
+ f"LLM Request - Args count: {len(args)}, Kwargs: {list(kwargs.keys())}"
576
+ )
577
+
578
+ # Log the prompt if it's in args
579
+ if args and len(args) > 0:
580
+ prompt_text = (
581
+ str(args[0])[:200] + "..."
582
+ if len(str(args[0])) > 200
583
+ else str(args[0])
584
+ )
585
+ logger.debug(f"LLM Request - Prompt preview: {prompt_text}")
586
+
587
+ # Check if there's any client configuration
588
+ if hasattr(self.base_llm, "_client"):
589
+ client = self.base_llm._client
590
+ if hasattr(client, "_client") and hasattr(
591
+ client._client, "base_url"
592
+ ):
593
+ logger.debug(
594
+ f"LLM Request - Client base URL: {client._client.base_url}"
595
+ )
596
+
597
+ try:
598
+ response = self.base_llm.invoke(*args, **kwargs)
599
+ logger.debug(f"LLM Response - Success, type: {type(response)}")
600
+ except Exception as e:
601
+ logger.error(f"LLM Request - Failed with error: {str(e)}")
602
+ # Log any URL information from the error
603
+ error_str = str(e)
604
+ if "http://" in error_str or "https://" in error_str:
605
+ logger.error(
606
+ f"LLM Request - Error contains URL info: {error_str}"
607
+ )
608
+ raise
549
609
 
550
610
  # Process the response content if it has a content attribute
551
611
  if hasattr(response, "content"):
@@ -0,0 +1,13 @@
1
+ """
2
+ Error Handling Module for Local Deep Research
3
+
4
+ This module provides comprehensive error handling capabilities including:
5
+ - Error categorization and analysis
6
+ - User-friendly error report generation
7
+ - Integration with partial research results
8
+ """
9
+
10
+ from .error_reporter import ErrorReporter
11
+ from .report_generator import ErrorReportGenerator
12
+
13
+ __all__ = ["ErrorReporter", "ErrorReportGenerator"]
@@ -0,0 +1,236 @@
1
+ """
2
+ ErrorReporter - Main error categorization and handling logic
3
+ """
4
+
5
+ import re
6
+ from enum import Enum
7
+ from typing import Dict, Optional, Any
8
+ from loguru import logger
9
+
10
+
11
+ class ErrorCategory(Enum):
12
+ """Categories of errors that can occur during research"""
13
+
14
+ CONNECTION_ERROR = "connection_error"
15
+ MODEL_ERROR = "model_error"
16
+ SEARCH_ERROR = "search_error"
17
+ SYNTHESIS_ERROR = "synthesis_error"
18
+ FILE_ERROR = "file_error"
19
+ UNKNOWN_ERROR = "unknown_error"
20
+
21
+
22
+ class ErrorReporter:
23
+ """
24
+ Analyzes and categorizes errors to provide better user feedback
25
+ """
26
+
27
+ def __init__(self):
28
+ self.error_patterns = {
29
+ ErrorCategory.CONNECTION_ERROR: [
30
+ r"POST predict.*EOF",
31
+ r"Connection refused",
32
+ r"timeout",
33
+ r"Connection.*failed",
34
+ r"HTTP error \d+",
35
+ r"network.*error",
36
+ r"\[Errno 111\]",
37
+ r"host\.docker\.internal",
38
+ r"host.*localhost.*Docker",
39
+ r"127\.0\.0\.1.*Docker",
40
+ r"localhost.*1234.*Docker",
41
+ r"LM.*Studio.*Docker.*Mac",
42
+ ],
43
+ ErrorCategory.MODEL_ERROR: [
44
+ r"Model.*not found",
45
+ r"Invalid.*model",
46
+ r"Ollama.*not available",
47
+ r"API key.*invalid",
48
+ r"Authentication.*error",
49
+ r"max_workers must be greater than 0",
50
+ r"TypeError.*Context.*Size",
51
+ r"'<' not supported between",
52
+ r"No auth credentials found",
53
+ r"401.*API key",
54
+ ],
55
+ ErrorCategory.SEARCH_ERROR: [
56
+ r"Search.*failed",
57
+ r"No search results",
58
+ r"Search engine.*error",
59
+ r"Rate limit.*exceeded",
60
+ r"The search is longer than 256 characters",
61
+ r"Failed to create search engine",
62
+ r"could not be found",
63
+ r"GitHub API error",
64
+ r"database.*locked",
65
+ ],
66
+ ErrorCategory.SYNTHESIS_ERROR: [
67
+ r"Error.*synthesis",
68
+ r"Failed.*generate",
69
+ r"Synthesis.*timeout",
70
+ r"detailed.*report.*stuck",
71
+ r"report.*taking.*long",
72
+ r"progress.*100.*stuck",
73
+ ],
74
+ ErrorCategory.FILE_ERROR: [
75
+ r"Permission denied",
76
+ r"File.*not found",
77
+ r"Cannot write.*file",
78
+ r"Disk.*full",
79
+ r"No module named.*local_deep_research",
80
+ r"HTTP error 404.*research results",
81
+ r"Attempt to write readonly database",
82
+ ],
83
+ }
84
+
85
+ def categorize_error(self, error_message: str) -> ErrorCategory:
86
+ """
87
+ Categorize an error based on its message
88
+
89
+ Args:
90
+ error_message: The error message to categorize
91
+
92
+ Returns:
93
+ ErrorCategory: The categorized error type
94
+ """
95
+ error_message = str(error_message).lower()
96
+
97
+ for category, patterns in self.error_patterns.items():
98
+ for pattern in patterns:
99
+ if re.search(pattern.lower(), error_message):
100
+ logger.debug(
101
+ f"Categorized error as {category.value}: {pattern}"
102
+ )
103
+ return category
104
+
105
+ return ErrorCategory.UNKNOWN_ERROR
106
+
107
+ def get_user_friendly_title(self, category: ErrorCategory) -> str:
108
+ """
109
+ Get a user-friendly title for an error category
110
+
111
+ Args:
112
+ category: The error category
113
+
114
+ Returns:
115
+ str: User-friendly title
116
+ """
117
+ titles = {
118
+ ErrorCategory.CONNECTION_ERROR: "Connection Issue",
119
+ ErrorCategory.MODEL_ERROR: "LLM Service Error",
120
+ ErrorCategory.SEARCH_ERROR: "Search Service Error",
121
+ ErrorCategory.SYNTHESIS_ERROR: "Report Generation Error",
122
+ ErrorCategory.FILE_ERROR: "File System Error",
123
+ ErrorCategory.UNKNOWN_ERROR: "Unexpected Error",
124
+ }
125
+ return titles.get(category, "Error")
126
+
127
+ def get_suggested_actions(self, category: ErrorCategory) -> list:
128
+ """
129
+ Get suggested actions for resolving an error
130
+
131
+ Args:
132
+ category: The error category
133
+
134
+ Returns:
135
+ list: List of suggested actions
136
+ """
137
+ suggestions = {
138
+ ErrorCategory.CONNECTION_ERROR: [
139
+ "Check if the LLM service (Ollama/LM Studio) is running",
140
+ "Verify network connectivity",
141
+ "Try switching to a different model provider",
142
+ "Check the service logs for more details",
143
+ ],
144
+ ErrorCategory.MODEL_ERROR: [
145
+ "Verify the model name is correct",
146
+ "Check if the model is downloaded and available",
147
+ "Validate API keys if using external services",
148
+ "Try switching to a different model",
149
+ ],
150
+ ErrorCategory.SEARCH_ERROR: [
151
+ "Check internet connectivity",
152
+ "Try reducing the number of search results",
153
+ "Wait a moment and try again",
154
+ "Check if search service is configured correctly",
155
+ "For local documents: ensure the path is absolute and folder exists",
156
+ "Try a different search engine if one is failing",
157
+ ],
158
+ ErrorCategory.SYNTHESIS_ERROR: [
159
+ "The research data was collected successfully",
160
+ "Try switching to a different model for report generation",
161
+ "Check the partial results below",
162
+ "Review the detailed logs for more information",
163
+ ],
164
+ ErrorCategory.FILE_ERROR: [
165
+ "Check disk space availability",
166
+ "Verify write permissions",
167
+ "Try changing the output directory",
168
+ "Restart the application",
169
+ ],
170
+ ErrorCategory.UNKNOWN_ERROR: [
171
+ "Check the detailed logs below for more information",
172
+ "Try running the research again",
173
+ "Report this issue if it persists",
174
+ "Contact support with the error details",
175
+ ],
176
+ }
177
+ return suggestions.get(category, ["Check the logs for more details"])
178
+
179
+ def analyze_error(
180
+ self, error_message: str, context: Optional[Dict[str, Any]] = None
181
+ ) -> Dict[str, Any]:
182
+ """
183
+ Perform comprehensive error analysis
184
+
185
+ Args:
186
+ error_message: The error message to analyze
187
+ context: Optional context information
188
+
189
+ Returns:
190
+ dict: Comprehensive error analysis
191
+ """
192
+ category = self.categorize_error(error_message)
193
+
194
+ analysis = {
195
+ "category": category,
196
+ "title": self.get_user_friendly_title(category),
197
+ "original_error": error_message,
198
+ "suggestions": self.get_suggested_actions(category),
199
+ "severity": self._determine_severity(category),
200
+ "recoverable": self._is_recoverable(category),
201
+ }
202
+
203
+ # Add context-specific information
204
+ if context:
205
+ analysis["context"] = context
206
+ analysis["has_partial_results"] = bool(
207
+ context.get("findings")
208
+ or context.get("current_knowledge")
209
+ or context.get("search_results")
210
+ )
211
+
212
+ return analysis
213
+
214
+ def _determine_severity(self, category: ErrorCategory) -> str:
215
+ """Determine error severity level"""
216
+ severity_map = {
217
+ ErrorCategory.CONNECTION_ERROR: "high",
218
+ ErrorCategory.MODEL_ERROR: "high",
219
+ ErrorCategory.SEARCH_ERROR: "medium",
220
+ ErrorCategory.SYNTHESIS_ERROR: "low", # Can often show partial results
221
+ ErrorCategory.FILE_ERROR: "medium",
222
+ ErrorCategory.UNKNOWN_ERROR: "high",
223
+ }
224
+ return severity_map.get(category, "medium")
225
+
226
+ def _is_recoverable(self, category: ErrorCategory) -> bool:
227
+ """Determine if error is recoverable with user action"""
228
+ recoverable = {
229
+ ErrorCategory.CONNECTION_ERROR: True,
230
+ ErrorCategory.MODEL_ERROR: True,
231
+ ErrorCategory.SEARCH_ERROR: True,
232
+ ErrorCategory.SYNTHESIS_ERROR: True,
233
+ ErrorCategory.FILE_ERROR: True,
234
+ ErrorCategory.UNKNOWN_ERROR: False,
235
+ }
236
+ return recoverable.get(category, False)