fusesell 1.3.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. fusesell-1.3.42.dist-info/METADATA +873 -0
  2. fusesell-1.3.42.dist-info/RECORD +35 -0
  3. fusesell-1.3.42.dist-info/WHEEL +5 -0
  4. fusesell-1.3.42.dist-info/entry_points.txt +2 -0
  5. fusesell-1.3.42.dist-info/licenses/LICENSE +21 -0
  6. fusesell-1.3.42.dist-info/top_level.txt +2 -0
  7. fusesell.py +20 -0
  8. fusesell_local/__init__.py +37 -0
  9. fusesell_local/api.py +343 -0
  10. fusesell_local/cli.py +1480 -0
  11. fusesell_local/config/__init__.py +11 -0
  12. fusesell_local/config/default_email_templates.json +34 -0
  13. fusesell_local/config/default_prompts.json +19 -0
  14. fusesell_local/config/default_scoring_criteria.json +154 -0
  15. fusesell_local/config/prompts.py +245 -0
  16. fusesell_local/config/settings.py +277 -0
  17. fusesell_local/pipeline.py +978 -0
  18. fusesell_local/stages/__init__.py +19 -0
  19. fusesell_local/stages/base_stage.py +603 -0
  20. fusesell_local/stages/data_acquisition.py +1820 -0
  21. fusesell_local/stages/data_preparation.py +1238 -0
  22. fusesell_local/stages/follow_up.py +1728 -0
  23. fusesell_local/stages/initial_outreach.py +2972 -0
  24. fusesell_local/stages/lead_scoring.py +1452 -0
  25. fusesell_local/utils/__init__.py +36 -0
  26. fusesell_local/utils/agent_context.py +552 -0
  27. fusesell_local/utils/auto_setup.py +361 -0
  28. fusesell_local/utils/birthday_email_manager.py +467 -0
  29. fusesell_local/utils/data_manager.py +4857 -0
  30. fusesell_local/utils/event_scheduler.py +959 -0
  31. fusesell_local/utils/llm_client.py +342 -0
  32. fusesell_local/utils/logger.py +203 -0
  33. fusesell_local/utils/output_helpers.py +2443 -0
  34. fusesell_local/utils/timezone_detector.py +914 -0
  35. fusesell_local/utils/validators.py +436 -0
@@ -0,0 +1,342 @@
1
+ """
2
+ LLM Client for OpenAI API integration
3
+ """
4
+
5
+ try:
6
+ import openai
7
+ OPENAI_AVAILABLE = True
8
+ except ImportError:
9
+ OPENAI_AVAILABLE = False
10
+ openai = None
11
+
12
+ from typing import Dict, Any, List, Optional
13
+ import logging
14
+ import time
15
+ import json
16
+ from urllib.parse import urlsplit, urlunsplit
17
+
18
+
19
+ def normalize_llm_base_url(base_url: Optional[str], provider: Optional[str] = None) -> Optional[str]:
20
+ """
21
+ Ensure LLM base URLs point to the OpenAI-compatible /v1 endpoint unless they already target
22
+ Azure deployment paths that do not expect the suffix.
23
+
24
+ Args:
25
+ base_url: User-provided base URL.
26
+ provider: Optional provider hint (e.g., 'azure-openai').
27
+
28
+ Returns:
29
+ Normalized base URL with `/v1` appended when needed, or ``None`` if input is empty.
30
+ """
31
+ if not base_url:
32
+ return None
33
+
34
+ normalized = base_url.strip()
35
+ if not normalized:
36
+ return None
37
+
38
+ provider_hint = (provider or "").lower()
39
+ if provider_hint.startswith("azure") or "openai.azure.com" in normalized.lower():
40
+ return normalized.rstrip("/")
41
+
42
+ try:
43
+ parsed = urlsplit(normalized)
44
+ except ValueError:
45
+ parsed = None
46
+
47
+ if parsed and parsed.scheme and parsed.netloc:
48
+ path = parsed.path.rstrip("/")
49
+ segments = [segment for segment in path.split("/") if segment]
50
+
51
+ if not segments:
52
+ new_path = "/v1"
53
+ elif segments[-1] in {"v1", "v1beta"} or "v1" in segments or "deployments" in segments:
54
+ new_path = "/" + "/".join(segments)
55
+ else:
56
+ new_path = f"{path}/v1" if path else "/v1"
57
+
58
+ rebuilt = urlunsplit(
59
+ (
60
+ parsed.scheme,
61
+ parsed.netloc,
62
+ new_path,
63
+ parsed.query,
64
+ parsed.fragment,
65
+ )
66
+ )
67
+ return rebuilt.rstrip("/")
68
+
69
+ stripped = normalized.rstrip("/")
70
+ if stripped.endswith("/v1") or "/v1/" in stripped:
71
+ return stripped
72
+ return f"{stripped}/v1"
73
+
74
+
75
+ class LLMClient:
76
+ """
77
+ Client for interacting with OpenAI's API.
78
+ Handles authentication, rate limiting, and error handling.
79
+ """
80
+
81
+ def __init__(self, api_key: str, model: str = "gpt-4.1-mini", base_url: Optional[str] = None):
82
+ """
83
+ Initialize LLM client.
84
+
85
+ Args:
86
+ api_key: OpenAI API key
87
+ model: Model to use for completions
88
+ base_url: Optional base URL for API (for custom endpoints)
89
+ """
90
+ if not OPENAI_AVAILABLE:
91
+ raise ImportError("OpenAI package not installed. Run: pip install openai")
92
+
93
+ self.api_key = api_key
94
+ self.model = model
95
+ self.logger = logging.getLogger("fusesell.llm_client")
96
+
97
+ normalized_base_url = normalize_llm_base_url(base_url)
98
+
99
+ # Initialize OpenAI client
100
+ if normalized_base_url:
101
+ self.client = openai.OpenAI(api_key=api_key, base_url=normalized_base_url)
102
+ else:
103
+ self.client = openai.OpenAI(api_key=api_key)
104
+
105
+ def chat_completion(
106
+ self,
107
+ messages: List[Dict[str, str]],
108
+ temperature: float = 0.7,
109
+ max_tokens: Optional[int] = None,
110
+ response_format: Optional[Dict[str, str]] = None,
111
+ **kwargs
112
+ ) -> str:
113
+ """
114
+ Create a chat completion.
115
+
116
+ Args:
117
+ messages: List of message dictionaries with 'role' and 'content'
118
+ temperature: Sampling temperature (0-2)
119
+ max_tokens: Maximum tokens in response
120
+ response_format: Optional response format specification
121
+ **kwargs: Additional parameters for the API call
122
+
123
+ Returns:
124
+ Response content as string
125
+
126
+ Raises:
127
+ Exception: If API call fails after retries
128
+ """
129
+ try:
130
+ # Prepare API call parameters
131
+ api_params = {
132
+ "model": self.model,
133
+ "messages": messages,
134
+ "temperature": temperature,
135
+ **kwargs
136
+ }
137
+
138
+ if max_tokens:
139
+ api_params["max_tokens"] = max_tokens
140
+
141
+ if response_format:
142
+ api_params["response_format"] = response_format
143
+
144
+ self.logger.debug(f"Making API call with {len(messages)} messages")
145
+
146
+ # Make API call with retry logic
147
+ response = self._make_api_call_with_retry(api_params)
148
+
149
+ # Extract content from response
150
+ # Handle both OpenAI format and direct string responses
151
+ if isinstance(response, str):
152
+ # Check if response is HTML (indicates error)
153
+ if response.strip().startswith('<!doctype html') or response.strip().startswith('<html'):
154
+ raise ValueError(f"Received HTML response instead of JSON from LLM endpoint. This usually indicates an authentication or endpoint configuration issue.")
155
+ content = response
156
+ elif hasattr(response, 'choices') and len(response.choices) > 0:
157
+ content = response.choices[0].message.content
158
+ # Check if content is HTML
159
+ if content and (content.strip().startswith('<!doctype html') or content.strip().startswith('<html')):
160
+ raise ValueError(f"Received HTML response instead of text from LLM endpoint. This usually indicates an authentication or endpoint configuration issue.")
161
+ else:
162
+ # Fallback: try to extract content from response
163
+ content = str(response)
164
+ if content.strip().startswith('<!doctype html') or content.strip().startswith('<html'):
165
+ raise ValueError(f"Received HTML response instead of JSON from LLM endpoint. This usually indicates an authentication or endpoint configuration issue.")
166
+
167
+ # Log token usage if available
168
+ if hasattr(response, 'usage'):
169
+ self.logger.debug(f"Token usage - Prompt: {response.usage.prompt_tokens}, "
170
+ f"Completion: {response.usage.completion_tokens}, "
171
+ f"Total: {response.usage.total_tokens}")
172
+
173
+ return content
174
+
175
+ except Exception as e:
176
+ self.logger.error(f"Chat completion failed: {str(e)}")
177
+ raise
178
+
179
+ def _make_api_call_with_retry(self, api_params: Dict[str, Any], max_retries: int = 3) -> Any:
180
+ """
181
+ Make API call with exponential backoff retry logic.
182
+
183
+ Args:
184
+ api_params: Parameters for the API call
185
+ max_retries: Maximum number of retry attempts
186
+
187
+ Returns:
188
+ API response object
189
+
190
+ Raises:
191
+ Exception: If all retry attempts fail
192
+ """
193
+ last_exception = None
194
+
195
+ for attempt in range(max_retries + 1):
196
+ try:
197
+ response = self.client.chat.completions.create(**api_params)
198
+ return response
199
+
200
+ except openai.RateLimitError as e:
201
+ last_exception = e
202
+ if attempt < max_retries:
203
+ wait_time = (2 ** attempt) + 1 # Exponential backoff
204
+ self.logger.warning(f"Rate limit hit, waiting {wait_time}s before retry {attempt + 1}")
205
+ time.sleep(wait_time)
206
+ continue
207
+ else:
208
+ self.logger.error("Rate limit exceeded, max retries reached")
209
+ raise
210
+
211
+ except openai.APIError as e:
212
+ last_exception = e
213
+ if attempt < max_retries and e.status_code >= 500:
214
+ wait_time = (2 ** attempt) + 1
215
+ self.logger.warning(f"API error {e.status_code}, retrying in {wait_time}s")
216
+ time.sleep(wait_time)
217
+ continue
218
+ else:
219
+ self.logger.error(f"API error: {str(e)}")
220
+ raise
221
+
222
+ except Exception as e:
223
+ last_exception = e
224
+ self.logger.error(f"Unexpected error in API call: {str(e)}")
225
+ raise
226
+
227
+ # If we get here, all retries failed
228
+ raise last_exception
229
+
230
+ def structured_completion(
231
+ self,
232
+ prompt: str,
233
+ schema: Dict[str, Any],
234
+ temperature: float = 0.3
235
+ ) -> Dict[str, Any]:
236
+ """
237
+ Get structured JSON response from LLM.
238
+
239
+ Args:
240
+ prompt: The prompt to send
241
+ schema: JSON schema for the expected response
242
+ temperature: Sampling temperature
243
+
244
+ Returns:
245
+ Parsed JSON response
246
+
247
+ Raises:
248
+ ValueError: If response doesn't match schema or isn't valid JSON
249
+ """
250
+ # Add JSON formatting instruction to prompt
251
+ json_prompt = f"""{prompt}
252
+
253
+ Please respond with valid JSON that matches this schema:
254
+ {json.dumps(schema, indent=2)}
255
+
256
+ Response:"""
257
+
258
+ messages = [{"role": "user", "content": json_prompt}]
259
+
260
+ try:
261
+ # Try with JSON response format if supported
262
+ response = self.chat_completion(
263
+ messages=messages,
264
+ temperature=temperature,
265
+ response_format={"type": "json_object"}
266
+ )
267
+ except Exception:
268
+ # Fallback to regular completion
269
+ response = self.chat_completion(
270
+ messages=messages,
271
+ temperature=temperature
272
+ )
273
+
274
+ # Parse JSON response
275
+ try:
276
+ return json.loads(response)
277
+ except json.JSONDecodeError:
278
+ # Try to extract JSON from response
279
+ return self._extract_json_from_response(response)
280
+
281
+ def _extract_json_from_response(self, response: str) -> Dict[str, Any]:
282
+ """
283
+ Extract JSON from LLM response that may contain additional text.
284
+
285
+ Args:
286
+ response: Raw LLM response
287
+
288
+ Returns:
289
+ Extracted JSON dictionary
290
+
291
+ Raises:
292
+ ValueError: If no valid JSON found
293
+ """
294
+ # Try to find JSON in code blocks
295
+ if "```json" in response:
296
+ start = response.find("```json") + 7
297
+ end = response.find("```", start)
298
+ if end != -1:
299
+ json_str = response[start:end].strip()
300
+ try:
301
+ return json.loads(json_str)
302
+ except json.JSONDecodeError:
303
+ pass
304
+
305
+ # Try to find JSON by braces
306
+ start = response.find("{")
307
+ end = response.rfind("}") + 1
308
+ if start != -1 and end > start:
309
+ json_str = response[start:end]
310
+ try:
311
+ return json.loads(json_str)
312
+ except json.JSONDecodeError:
313
+ pass
314
+
315
+ # Try to find JSON array
316
+ start = response.find("[")
317
+ end = response.rfind("]") + 1
318
+ if start != -1 and end > start:
319
+ json_str = response[start:end]
320
+ try:
321
+ return json.loads(json_str)
322
+ except json.JSONDecodeError:
323
+ pass
324
+
325
+ raise ValueError(f"Could not extract valid JSON from response: {response[:200]}...")
326
+
327
+ def validate_api_key(self) -> bool:
328
+ """
329
+ Validate that the API key works by making a simple test call.
330
+
331
+ Returns:
332
+ True if API key is valid, False otherwise
333
+ """
334
+ try:
335
+ response = self.chat_completion(
336
+ messages=[{"role": "user", "content": "Hello"}],
337
+ max_tokens=5
338
+ )
339
+ return len(response) > 0
340
+ except Exception as e:
341
+ self.logger.error(f"API key validation failed: {str(e)}")
342
+ return False
@@ -0,0 +1,203 @@
1
+ """
2
+ Logging configuration for FuseSell Local
3
+ """
4
+
5
+ import logging
6
+ import sys
7
+ from typing import Optional
8
+ from pathlib import Path
9
+ from datetime import datetime
10
+
11
+
12
+ # Global flag to prevent multiple logging setups
13
+ _logging_configured = False
14
+
15
+ def setup_logging(
16
+ level: str = "INFO",
17
+ log_file: Optional[str] = None,
18
+ verbose: bool = False,
19
+ force_reconfigure: bool = False
20
+ ) -> logging.Logger:
21
+ """
22
+ Set up logging configuration for FuseSell.
23
+
24
+ Args:
25
+ level: Logging level (DEBUG, INFO, WARNING, ERROR)
26
+ log_file: Optional log file path
27
+ verbose: Enable verbose logging
28
+ force_reconfigure: Force reconfiguration even if already configured
29
+
30
+ Returns:
31
+ Configured logger instance
32
+ """
33
+ global _logging_configured
34
+
35
+ # Check if logging is already configured
36
+ if _logging_configured and not force_reconfigure:
37
+ logger = logging.getLogger("fusesell")
38
+ logger.debug("Logging already configured, skipping setup")
39
+ return logger
40
+ # Convert string level to logging constant
41
+ numeric_level = getattr(logging, level.upper(), logging.INFO)
42
+
43
+ # Create formatter
44
+ if verbose:
45
+ formatter = logging.Formatter(
46
+ '%(asctime)s - %(name)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s'
47
+ )
48
+ else:
49
+ formatter = logging.Formatter(
50
+ '%(asctime)s - %(levelname)s - %(message)s'
51
+ )
52
+
53
+ # Configure root logger
54
+ root_logger = logging.getLogger()
55
+ root_logger.setLevel(numeric_level)
56
+
57
+ # Remove existing handlers
58
+ for handler in root_logger.handlers[:]:
59
+ root_logger.removeHandler(handler)
60
+
61
+ # Console handler
62
+ console_handler = logging.StreamHandler(sys.stdout)
63
+ console_handler.setLevel(numeric_level)
64
+ console_handler.setFormatter(formatter)
65
+ root_logger.addHandler(console_handler)
66
+
67
+ # File handler if specified
68
+ if log_file:
69
+ try:
70
+ # Create log directory if it doesn't exist
71
+ log_path = Path(log_file)
72
+ log_path.parent.mkdir(parents=True, exist_ok=True)
73
+
74
+ file_handler = logging.FileHandler(log_file, encoding='utf-8')
75
+ file_handler.setLevel(numeric_level)
76
+ file_handler.setFormatter(formatter)
77
+ root_logger.addHandler(file_handler)
78
+
79
+ except Exception as e:
80
+ print(f"Warning: Could not set up file logging: {e}", file=sys.stderr)
81
+
82
+ # Get FuseSell logger
83
+ logger = logging.getLogger("fusesell")
84
+ logger.info(f"Logging initialized at {level} level")
85
+
86
+ if log_file:
87
+ logger.info(f"Logging to file: {log_file}")
88
+
89
+ # Mark logging as configured
90
+ _logging_configured = True
91
+
92
+ return logger
93
+
94
+
95
+ def get_logger(name: str) -> logging.Logger:
96
+ """
97
+ Get a logger instance for a specific component.
98
+
99
+ Args:
100
+ name: Logger name (will be prefixed with 'fusesell.')
101
+
102
+ Returns:
103
+ Logger instance
104
+ """
105
+ return logging.getLogger(f"fusesell.{name}")
106
+
107
+
108
+ class LoggerMixin:
109
+ """
110
+ Mixin class to add logging capabilities to other classes.
111
+ """
112
+
113
+ @property
114
+ def logger(self) -> logging.Logger:
115
+ """Get logger for this class."""
116
+ class_name = self.__class__.__name__.lower()
117
+ return get_logger(class_name)
118
+
119
+
120
+ def log_execution_start(execution_id: str, config: dict) -> None:
121
+ """
122
+ Log the start of a FuseSell execution.
123
+
124
+ Args:
125
+ execution_id: Unique execution identifier
126
+ config: Execution configuration
127
+ """
128
+ logger = get_logger("execution")
129
+ logger.info(f"Starting execution {execution_id}")
130
+ logger.info(f"Organization: {config.get('org_name')} ({config.get('org_id')})")
131
+ logger.info(f"Customer: {config.get('customer_website')}")
132
+ logger.info(f"Language: {config.get('language', 'english')}")
133
+
134
+
135
+ def log_execution_complete(execution_id: str, status: str, duration: float) -> None:
136
+ """
137
+ Log the completion of a FuseSell execution.
138
+
139
+ Args:
140
+ execution_id: Unique execution identifier
141
+ status: Execution status (completed, failed, etc.)
142
+ duration: Execution duration in seconds
143
+ """
144
+ logger = get_logger("execution")
145
+ logger.info(f"Execution {execution_id} {status} in {duration:.2f} seconds")
146
+
147
+
148
+ def log_stage_start(stage_name: str, execution_id: str) -> None:
149
+ """
150
+ Log the start of a pipeline stage.
151
+
152
+ Args:
153
+ stage_name: Name of the stage
154
+ execution_id: Execution identifier
155
+ """
156
+ logger = get_logger("stage")
157
+ logger.info(f"Starting {stage_name} stage for execution {execution_id}")
158
+
159
+
160
+ def log_stage_complete(stage_name: str, execution_id: str, status: str, duration: float) -> None:
161
+ """
162
+ Log the completion of a pipeline stage.
163
+
164
+ Args:
165
+ stage_name: Name of the stage
166
+ execution_id: Execution identifier
167
+ status: Stage status
168
+ duration: Stage duration in seconds
169
+ """
170
+ logger = get_logger("stage")
171
+ logger.info(f"Stage {stage_name} {status} for execution {execution_id} in {duration:.2f} seconds")
172
+
173
+
174
+ def log_api_call(service: str, endpoint: str, status_code: int, duration: float) -> None:
175
+ """
176
+ Log API call details.
177
+
178
+ Args:
179
+ service: Service name (e.g., 'openai', 'serper')
180
+ endpoint: API endpoint
181
+ status_code: HTTP status code
182
+ duration: Call duration in seconds
183
+ """
184
+ logger = get_logger("api")
185
+ logger.debug(f"{service} API call to {endpoint}: {status_code} in {duration:.3f}s")
186
+
187
+
188
+ def log_error(component: str, error: Exception, context: Optional[dict] = None) -> None:
189
+ """
190
+ Log error with context information.
191
+
192
+ Args:
193
+ component: Component where error occurred
194
+ error: Exception instance
195
+ context: Optional context information
196
+ """
197
+ logger = get_logger("error")
198
+ logger.error(f"Error in {component}: {str(error)}")
199
+
200
+ if context:
201
+ logger.error(f"Context: {context}")
202
+
203
+ logger.debug("Exception details:", exc_info=True)