fusesell 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fusesell might be problematic. Click here for more details.
- fusesell-1.2.0.dist-info/METADATA +872 -0
- fusesell-1.2.0.dist-info/RECORD +31 -0
- fusesell-1.2.0.dist-info/WHEEL +5 -0
- fusesell-1.2.0.dist-info/entry_points.txt +2 -0
- fusesell-1.2.0.dist-info/licenses/LICENSE +21 -0
- fusesell-1.2.0.dist-info/top_level.txt +2 -0
- fusesell.py +15 -0
- fusesell_local/__init__.py +37 -0
- fusesell_local/api.py +341 -0
- fusesell_local/cli.py +1450 -0
- fusesell_local/config/__init__.py +11 -0
- fusesell_local/config/prompts.py +245 -0
- fusesell_local/config/settings.py +277 -0
- fusesell_local/pipeline.py +932 -0
- fusesell_local/stages/__init__.py +19 -0
- fusesell_local/stages/base_stage.py +602 -0
- fusesell_local/stages/data_acquisition.py +1820 -0
- fusesell_local/stages/data_preparation.py +1231 -0
- fusesell_local/stages/follow_up.py +1590 -0
- fusesell_local/stages/initial_outreach.py +2337 -0
- fusesell_local/stages/lead_scoring.py +1452 -0
- fusesell_local/tests/test_api.py +65 -0
- fusesell_local/tests/test_cli.py +37 -0
- fusesell_local/utils/__init__.py +15 -0
- fusesell_local/utils/birthday_email_manager.py +467 -0
- fusesell_local/utils/data_manager.py +4050 -0
- fusesell_local/utils/event_scheduler.py +618 -0
- fusesell_local/utils/llm_client.py +283 -0
- fusesell_local/utils/logger.py +203 -0
- fusesell_local/utils/timezone_detector.py +914 -0
- fusesell_local/utils/validators.py +416 -0
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Client for OpenAI API integration
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
import openai
|
|
7
|
+
OPENAI_AVAILABLE = True
|
|
8
|
+
except ImportError:
|
|
9
|
+
OPENAI_AVAILABLE = False
|
|
10
|
+
openai = None
|
|
11
|
+
|
|
12
|
+
from typing import Dict, Any, List, Optional
|
|
13
|
+
import logging
|
|
14
|
+
import time
|
|
15
|
+
import json
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class LLMClient:
|
|
19
|
+
"""
|
|
20
|
+
Client for interacting with OpenAI's API.
|
|
21
|
+
Handles authentication, rate limiting, and error handling.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, api_key: str, model: str = "gpt-4o-mini", base_url: Optional[str] = None):
|
|
25
|
+
"""
|
|
26
|
+
Initialize LLM client.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
api_key: OpenAI API key
|
|
30
|
+
model: Model to use for completions
|
|
31
|
+
base_url: Optional base URL for API (for custom endpoints)
|
|
32
|
+
"""
|
|
33
|
+
if not OPENAI_AVAILABLE:
|
|
34
|
+
raise ImportError("OpenAI package not installed. Run: pip install openai")
|
|
35
|
+
|
|
36
|
+
self.api_key = api_key
|
|
37
|
+
self.model = model
|
|
38
|
+
self.logger = logging.getLogger("fusesell.llm_client")
|
|
39
|
+
|
|
40
|
+
# Initialize OpenAI client
|
|
41
|
+
if base_url:
|
|
42
|
+
self.client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
|
43
|
+
else:
|
|
44
|
+
self.client = openai.OpenAI(api_key=api_key)
|
|
45
|
+
|
|
46
|
+
def chat_completion(
|
|
47
|
+
self,
|
|
48
|
+
messages: List[Dict[str, str]],
|
|
49
|
+
temperature: float = 0.7,
|
|
50
|
+
max_tokens: Optional[int] = None,
|
|
51
|
+
response_format: Optional[Dict[str, str]] = None,
|
|
52
|
+
**kwargs
|
|
53
|
+
) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Create a chat completion.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
messages: List of message dictionaries with 'role' and 'content'
|
|
59
|
+
temperature: Sampling temperature (0-2)
|
|
60
|
+
max_tokens: Maximum tokens in response
|
|
61
|
+
response_format: Optional response format specification
|
|
62
|
+
**kwargs: Additional parameters for the API call
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Response content as string
|
|
66
|
+
|
|
67
|
+
Raises:
|
|
68
|
+
Exception: If API call fails after retries
|
|
69
|
+
"""
|
|
70
|
+
try:
|
|
71
|
+
# Prepare API call parameters
|
|
72
|
+
api_params = {
|
|
73
|
+
"model": self.model,
|
|
74
|
+
"messages": messages,
|
|
75
|
+
"temperature": temperature,
|
|
76
|
+
**kwargs
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if max_tokens:
|
|
80
|
+
api_params["max_tokens"] = max_tokens
|
|
81
|
+
|
|
82
|
+
if response_format:
|
|
83
|
+
api_params["response_format"] = response_format
|
|
84
|
+
|
|
85
|
+
self.logger.debug(f"Making API call with {len(messages)} messages")
|
|
86
|
+
|
|
87
|
+
# Make API call with retry logic
|
|
88
|
+
response = self._make_api_call_with_retry(api_params)
|
|
89
|
+
|
|
90
|
+
# Extract content from response
|
|
91
|
+
# Handle both OpenAI format and direct string responses
|
|
92
|
+
if isinstance(response, str):
|
|
93
|
+
# Check if response is HTML (indicates error)
|
|
94
|
+
if response.strip().startswith('<!doctype html') or response.strip().startswith('<html'):
|
|
95
|
+
raise ValueError(f"Received HTML response instead of JSON from LLM endpoint. This usually indicates an authentication or endpoint configuration issue.")
|
|
96
|
+
content = response
|
|
97
|
+
elif hasattr(response, 'choices') and len(response.choices) > 0:
|
|
98
|
+
content = response.choices[0].message.content
|
|
99
|
+
# Check if content is HTML
|
|
100
|
+
if content and (content.strip().startswith('<!doctype html') or content.strip().startswith('<html')):
|
|
101
|
+
raise ValueError(f"Received HTML response instead of text from LLM endpoint. This usually indicates an authentication or endpoint configuration issue.")
|
|
102
|
+
else:
|
|
103
|
+
# Fallback: try to extract content from response
|
|
104
|
+
content = str(response)
|
|
105
|
+
if content.strip().startswith('<!doctype html') or content.strip().startswith('<html'):
|
|
106
|
+
raise ValueError(f"Received HTML response instead of JSON from LLM endpoint. This usually indicates an authentication or endpoint configuration issue.")
|
|
107
|
+
|
|
108
|
+
# Log token usage if available
|
|
109
|
+
if hasattr(response, 'usage'):
|
|
110
|
+
self.logger.debug(f"Token usage - Prompt: {response.usage.prompt_tokens}, "
|
|
111
|
+
f"Completion: {response.usage.completion_tokens}, "
|
|
112
|
+
f"Total: {response.usage.total_tokens}")
|
|
113
|
+
|
|
114
|
+
return content
|
|
115
|
+
|
|
116
|
+
except Exception as e:
|
|
117
|
+
self.logger.error(f"Chat completion failed: {str(e)}")
|
|
118
|
+
raise
|
|
119
|
+
|
|
120
|
+
def _make_api_call_with_retry(self, api_params: Dict[str, Any], max_retries: int = 3) -> Any:
|
|
121
|
+
"""
|
|
122
|
+
Make API call with exponential backoff retry logic.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
api_params: Parameters for the API call
|
|
126
|
+
max_retries: Maximum number of retry attempts
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
API response object
|
|
130
|
+
|
|
131
|
+
Raises:
|
|
132
|
+
Exception: If all retry attempts fail
|
|
133
|
+
"""
|
|
134
|
+
last_exception = None
|
|
135
|
+
|
|
136
|
+
for attempt in range(max_retries + 1):
|
|
137
|
+
try:
|
|
138
|
+
response = self.client.chat.completions.create(**api_params)
|
|
139
|
+
return response
|
|
140
|
+
|
|
141
|
+
except openai.RateLimitError as e:
|
|
142
|
+
last_exception = e
|
|
143
|
+
if attempt < max_retries:
|
|
144
|
+
wait_time = (2 ** attempt) + 1 # Exponential backoff
|
|
145
|
+
self.logger.warning(f"Rate limit hit, waiting {wait_time}s before retry {attempt + 1}")
|
|
146
|
+
time.sleep(wait_time)
|
|
147
|
+
continue
|
|
148
|
+
else:
|
|
149
|
+
self.logger.error("Rate limit exceeded, max retries reached")
|
|
150
|
+
raise
|
|
151
|
+
|
|
152
|
+
except openai.APIError as e:
|
|
153
|
+
last_exception = e
|
|
154
|
+
if attempt < max_retries and e.status_code >= 500:
|
|
155
|
+
wait_time = (2 ** attempt) + 1
|
|
156
|
+
self.logger.warning(f"API error {e.status_code}, retrying in {wait_time}s")
|
|
157
|
+
time.sleep(wait_time)
|
|
158
|
+
continue
|
|
159
|
+
else:
|
|
160
|
+
self.logger.error(f"API error: {str(e)}")
|
|
161
|
+
raise
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
last_exception = e
|
|
165
|
+
self.logger.error(f"Unexpected error in API call: {str(e)}")
|
|
166
|
+
raise
|
|
167
|
+
|
|
168
|
+
# If we get here, all retries failed
|
|
169
|
+
raise last_exception
|
|
170
|
+
|
|
171
|
+
def structured_completion(
|
|
172
|
+
self,
|
|
173
|
+
prompt: str,
|
|
174
|
+
schema: Dict[str, Any],
|
|
175
|
+
temperature: float = 0.3
|
|
176
|
+
) -> Dict[str, Any]:
|
|
177
|
+
"""
|
|
178
|
+
Get structured JSON response from LLM.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
prompt: The prompt to send
|
|
182
|
+
schema: JSON schema for the expected response
|
|
183
|
+
temperature: Sampling temperature
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
Parsed JSON response
|
|
187
|
+
|
|
188
|
+
Raises:
|
|
189
|
+
ValueError: If response doesn't match schema or isn't valid JSON
|
|
190
|
+
"""
|
|
191
|
+
# Add JSON formatting instruction to prompt
|
|
192
|
+
json_prompt = f"""{prompt}
|
|
193
|
+
|
|
194
|
+
Please respond with valid JSON that matches this schema:
|
|
195
|
+
{json.dumps(schema, indent=2)}
|
|
196
|
+
|
|
197
|
+
Response:"""
|
|
198
|
+
|
|
199
|
+
messages = [{"role": "user", "content": json_prompt}]
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
# Try with JSON response format if supported
|
|
203
|
+
response = self.chat_completion(
|
|
204
|
+
messages=messages,
|
|
205
|
+
temperature=temperature,
|
|
206
|
+
response_format={"type": "json_object"}
|
|
207
|
+
)
|
|
208
|
+
except Exception:
|
|
209
|
+
# Fallback to regular completion
|
|
210
|
+
response = self.chat_completion(
|
|
211
|
+
messages=messages,
|
|
212
|
+
temperature=temperature
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
# Parse JSON response
|
|
216
|
+
try:
|
|
217
|
+
return json.loads(response)
|
|
218
|
+
except json.JSONDecodeError:
|
|
219
|
+
# Try to extract JSON from response
|
|
220
|
+
return self._extract_json_from_response(response)
|
|
221
|
+
|
|
222
|
+
def _extract_json_from_response(self, response: str) -> Dict[str, Any]:
|
|
223
|
+
"""
|
|
224
|
+
Extract JSON from LLM response that may contain additional text.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
response: Raw LLM response
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
Extracted JSON dictionary
|
|
231
|
+
|
|
232
|
+
Raises:
|
|
233
|
+
ValueError: If no valid JSON found
|
|
234
|
+
"""
|
|
235
|
+
# Try to find JSON in code blocks
|
|
236
|
+
if "```json" in response:
|
|
237
|
+
start = response.find("```json") + 7
|
|
238
|
+
end = response.find("```", start)
|
|
239
|
+
if end != -1:
|
|
240
|
+
json_str = response[start:end].strip()
|
|
241
|
+
try:
|
|
242
|
+
return json.loads(json_str)
|
|
243
|
+
except json.JSONDecodeError:
|
|
244
|
+
pass
|
|
245
|
+
|
|
246
|
+
# Try to find JSON by braces
|
|
247
|
+
start = response.find("{")
|
|
248
|
+
end = response.rfind("}") + 1
|
|
249
|
+
if start != -1 and end > start:
|
|
250
|
+
json_str = response[start:end]
|
|
251
|
+
try:
|
|
252
|
+
return json.loads(json_str)
|
|
253
|
+
except json.JSONDecodeError:
|
|
254
|
+
pass
|
|
255
|
+
|
|
256
|
+
# Try to find JSON array
|
|
257
|
+
start = response.find("[")
|
|
258
|
+
end = response.rfind("]") + 1
|
|
259
|
+
if start != -1 and end > start:
|
|
260
|
+
json_str = response[start:end]
|
|
261
|
+
try:
|
|
262
|
+
return json.loads(json_str)
|
|
263
|
+
except json.JSONDecodeError:
|
|
264
|
+
pass
|
|
265
|
+
|
|
266
|
+
raise ValueError(f"Could not extract valid JSON from response: {response[:200]}...")
|
|
267
|
+
|
|
268
|
+
def validate_api_key(self) -> bool:
|
|
269
|
+
"""
|
|
270
|
+
Validate that the API key works by making a simple test call.
|
|
271
|
+
|
|
272
|
+
Returns:
|
|
273
|
+
True if API key is valid, False otherwise
|
|
274
|
+
"""
|
|
275
|
+
try:
|
|
276
|
+
response = self.chat_completion(
|
|
277
|
+
messages=[{"role": "user", "content": "Hello"}],
|
|
278
|
+
max_tokens=5
|
|
279
|
+
)
|
|
280
|
+
return len(response) > 0
|
|
281
|
+
except Exception as e:
|
|
282
|
+
self.logger.error(f"API key validation failed: {str(e)}")
|
|
283
|
+
return False
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Logging configuration for FuseSell Local
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import sys
|
|
7
|
+
from typing import Optional
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Global flag to prevent multiple logging setups
|
|
13
|
+
_logging_configured = False
|
|
14
|
+
|
|
15
|
+
def setup_logging(
|
|
16
|
+
level: str = "INFO",
|
|
17
|
+
log_file: Optional[str] = None,
|
|
18
|
+
verbose: bool = False,
|
|
19
|
+
force_reconfigure: bool = False
|
|
20
|
+
) -> logging.Logger:
|
|
21
|
+
"""
|
|
22
|
+
Set up logging configuration for FuseSell.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
level: Logging level (DEBUG, INFO, WARNING, ERROR)
|
|
26
|
+
log_file: Optional log file path
|
|
27
|
+
verbose: Enable verbose logging
|
|
28
|
+
force_reconfigure: Force reconfiguration even if already configured
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Configured logger instance
|
|
32
|
+
"""
|
|
33
|
+
global _logging_configured
|
|
34
|
+
|
|
35
|
+
# Check if logging is already configured
|
|
36
|
+
if _logging_configured and not force_reconfigure:
|
|
37
|
+
logger = logging.getLogger("fusesell")
|
|
38
|
+
logger.debug("Logging already configured, skipping setup")
|
|
39
|
+
return logger
|
|
40
|
+
# Convert string level to logging constant
|
|
41
|
+
numeric_level = getattr(logging, level.upper(), logging.INFO)
|
|
42
|
+
|
|
43
|
+
# Create formatter
|
|
44
|
+
if verbose:
|
|
45
|
+
formatter = logging.Formatter(
|
|
46
|
+
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s'
|
|
47
|
+
)
|
|
48
|
+
else:
|
|
49
|
+
formatter = logging.Formatter(
|
|
50
|
+
'%(asctime)s - %(levelname)s - %(message)s'
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Configure root logger
|
|
54
|
+
root_logger = logging.getLogger()
|
|
55
|
+
root_logger.setLevel(numeric_level)
|
|
56
|
+
|
|
57
|
+
# Remove existing handlers
|
|
58
|
+
for handler in root_logger.handlers[:]:
|
|
59
|
+
root_logger.removeHandler(handler)
|
|
60
|
+
|
|
61
|
+
# Console handler
|
|
62
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
|
63
|
+
console_handler.setLevel(numeric_level)
|
|
64
|
+
console_handler.setFormatter(formatter)
|
|
65
|
+
root_logger.addHandler(console_handler)
|
|
66
|
+
|
|
67
|
+
# File handler if specified
|
|
68
|
+
if log_file:
|
|
69
|
+
try:
|
|
70
|
+
# Create log directory if it doesn't exist
|
|
71
|
+
log_path = Path(log_file)
|
|
72
|
+
log_path.parent.mkdir(parents=True, exist_ok=True)
|
|
73
|
+
|
|
74
|
+
file_handler = logging.FileHandler(log_file, encoding='utf-8')
|
|
75
|
+
file_handler.setLevel(numeric_level)
|
|
76
|
+
file_handler.setFormatter(formatter)
|
|
77
|
+
root_logger.addHandler(file_handler)
|
|
78
|
+
|
|
79
|
+
except Exception as e:
|
|
80
|
+
print(f"Warning: Could not set up file logging: {e}", file=sys.stderr)
|
|
81
|
+
|
|
82
|
+
# Get FuseSell logger
|
|
83
|
+
logger = logging.getLogger("fusesell")
|
|
84
|
+
logger.info(f"Logging initialized at {level} level")
|
|
85
|
+
|
|
86
|
+
if log_file:
|
|
87
|
+
logger.info(f"Logging to file: {log_file}")
|
|
88
|
+
|
|
89
|
+
# Mark logging as configured
|
|
90
|
+
_logging_configured = True
|
|
91
|
+
|
|
92
|
+
return logger
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def get_logger(name: str) -> logging.Logger:
|
|
96
|
+
"""
|
|
97
|
+
Get a logger instance for a specific component.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
name: Logger name (will be prefixed with 'fusesell.')
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
Logger instance
|
|
104
|
+
"""
|
|
105
|
+
return logging.getLogger(f"fusesell.{name}")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class LoggerMixin:
|
|
109
|
+
"""
|
|
110
|
+
Mixin class to add logging capabilities to other classes.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
@property
|
|
114
|
+
def logger(self) -> logging.Logger:
|
|
115
|
+
"""Get logger for this class."""
|
|
116
|
+
class_name = self.__class__.__name__.lower()
|
|
117
|
+
return get_logger(class_name)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def log_execution_start(execution_id: str, config: dict) -> None:
|
|
121
|
+
"""
|
|
122
|
+
Log the start of a FuseSell execution.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
execution_id: Unique execution identifier
|
|
126
|
+
config: Execution configuration
|
|
127
|
+
"""
|
|
128
|
+
logger = get_logger("execution")
|
|
129
|
+
logger.info(f"Starting execution {execution_id}")
|
|
130
|
+
logger.info(f"Organization: {config.get('org_name')} ({config.get('org_id')})")
|
|
131
|
+
logger.info(f"Customer: {config.get('customer_website')}")
|
|
132
|
+
logger.info(f"Language: {config.get('language', 'english')}")
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def log_execution_complete(execution_id: str, status: str, duration: float) -> None:
|
|
136
|
+
"""
|
|
137
|
+
Log the completion of a FuseSell execution.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
execution_id: Unique execution identifier
|
|
141
|
+
status: Execution status (completed, failed, etc.)
|
|
142
|
+
duration: Execution duration in seconds
|
|
143
|
+
"""
|
|
144
|
+
logger = get_logger("execution")
|
|
145
|
+
logger.info(f"Execution {execution_id} {status} in {duration:.2f} seconds")
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def log_stage_start(stage_name: str, execution_id: str) -> None:
|
|
149
|
+
"""
|
|
150
|
+
Log the start of a pipeline stage.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
stage_name: Name of the stage
|
|
154
|
+
execution_id: Execution identifier
|
|
155
|
+
"""
|
|
156
|
+
logger = get_logger("stage")
|
|
157
|
+
logger.info(f"Starting {stage_name} stage for execution {execution_id}")
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def log_stage_complete(stage_name: str, execution_id: str, status: str, duration: float) -> None:
|
|
161
|
+
"""
|
|
162
|
+
Log the completion of a pipeline stage.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
stage_name: Name of the stage
|
|
166
|
+
execution_id: Execution identifier
|
|
167
|
+
status: Stage status
|
|
168
|
+
duration: Stage duration in seconds
|
|
169
|
+
"""
|
|
170
|
+
logger = get_logger("stage")
|
|
171
|
+
logger.info(f"Stage {stage_name} {status} for execution {execution_id} in {duration:.2f} seconds")
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def log_api_call(service: str, endpoint: str, status_code: int, duration: float) -> None:
|
|
175
|
+
"""
|
|
176
|
+
Log API call details.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
service: Service name (e.g., 'openai', 'serper')
|
|
180
|
+
endpoint: API endpoint
|
|
181
|
+
status_code: HTTP status code
|
|
182
|
+
duration: Call duration in seconds
|
|
183
|
+
"""
|
|
184
|
+
logger = get_logger("api")
|
|
185
|
+
logger.debug(f"{service} API call to {endpoint}: {status_code} in {duration:.3f}s")
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def log_error(component: str, error: Exception, context: Optional[dict] = None) -> None:
|
|
189
|
+
"""
|
|
190
|
+
Log error with context information.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
component: Component where error occurred
|
|
194
|
+
error: Exception instance
|
|
195
|
+
context: Optional context information
|
|
196
|
+
"""
|
|
197
|
+
logger = get_logger("error")
|
|
198
|
+
logger.error(f"Error in {component}: {str(error)}")
|
|
199
|
+
|
|
200
|
+
if context:
|
|
201
|
+
logger.error(f"Context: {context}")
|
|
202
|
+
|
|
203
|
+
logger.debug("Exception details:", exc_info=True)
|