michael-agent 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. michael_agent/config/__init__.py +0 -0
  2. michael_agent/config/settings.py +66 -0
  3. michael_agent/dashboard/__init__.py +0 -0
  4. michael_agent/dashboard/app.py +1450 -0
  5. michael_agent/dashboard/static/__init__.py +0 -0
  6. michael_agent/dashboard/templates/__init__.py +0 -0
  7. michael_agent/langgraph_workflow/__init__.py +0 -0
  8. michael_agent/langgraph_workflow/graph_builder.py +358 -0
  9. michael_agent/langgraph_workflow/nodes/__init__.py +0 -0
  10. michael_agent/langgraph_workflow/nodes/assessment_handler.py +177 -0
  11. michael_agent/langgraph_workflow/nodes/jd_generator.py +139 -0
  12. michael_agent/langgraph_workflow/nodes/jd_poster.py +156 -0
  13. michael_agent/langgraph_workflow/nodes/question_generator.py +295 -0
  14. michael_agent/langgraph_workflow/nodes/recruiter_notifier.py +224 -0
  15. michael_agent/langgraph_workflow/nodes/resume_analyzer.py +631 -0
  16. michael_agent/langgraph_workflow/nodes/resume_ingestor.py +225 -0
  17. michael_agent/langgraph_workflow/nodes/sentiment_analysis.py +309 -0
  18. michael_agent/utils/__init__.py +0 -0
  19. michael_agent/utils/email_utils.py +140 -0
  20. michael_agent/utils/id_mapper.py +14 -0
  21. michael_agent/utils/jd_utils.py +34 -0
  22. michael_agent/utils/lms_api.py +226 -0
  23. michael_agent/utils/logging_utils.py +192 -0
  24. michael_agent/utils/monitor_utils.py +289 -0
  25. michael_agent/utils/node_tracer.py +88 -0
  26. {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/METADATA +2 -2
  27. michael_agent-1.0.2.dist-info/RECORD +32 -0
  28. michael_agent-1.0.0.dist-info/RECORD +0 -7
  29. {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/WHEEL +0 -0
  30. {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,225 @@
1
+ """
2
+ Resume Ingestor Node
3
+ Watches for new resumes from local directory
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ import shutil
9
+ import time
10
+ import atexit
11
+ import json
12
+ from typing import Dict, Any, Optional
13
+ from watchdog.observers import Observer
14
+ from watchdog.events import FileSystemEventHandler
15
+
16
+ # Import config
17
+ from config import settings
18
+
19
+ # Configure logging
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # Global variable for watchdog observer
24
+ _global_observer = None
25
+
26
+ # Global variable for watchdog observer
27
+ _global_observer = None
28
+
29
+ class ResumeFileHandler(FileSystemEventHandler):
30
+ """File system event handler for monitoring new resume files"""
31
+
32
+ def __init__(self, callback_fn):
33
+ self.callback_fn = callback_fn
34
+ self.extensions = ['.pdf', '.docx', '.doc', '.txt']
35
+
36
+ def on_created(self, event):
37
+ if not event.is_directory:
38
+ file_path = event.src_path
39
+ if any(file_path.lower().endswith(ext) for ext in self.extensions):
40
+ logger.info(f"New resume detected: {file_path}")
41
+ self.callback_fn(file_path)
42
+
43
+ def setup_watcher(watch_dir: str, callback_fn):
44
+ """Set up a file system watcher for the specified directory"""
45
+ observer = Observer()
46
+ event_handler = ResumeFileHandler(callback_fn)
47
+ observer.schedule(event_handler, watch_dir, recursive=False)
48
+ observer.start()
49
+
50
+ # Store in global variable for cleanup
51
+ global _global_observer
52
+ _global_observer = observer
53
+
54
+ return observer
55
+
56
+ def copy_resume_to_output(resume_path: str) -> str:
57
+ """Copy the resume to the output directory"""
58
+ try:
59
+ # Ensure output directory exists
60
+ os.makedirs(settings.OUTPUT_DIR, exist_ok=True)
61
+
62
+ # Copy the file to output directory
63
+ filename = os.path.basename(resume_path)
64
+ output_path = os.path.join(settings.OUTPUT_DIR, filename)
65
+ shutil.copy2(resume_path, output_path)
66
+
67
+ # Return the path to the copied file
68
+ return output_path
69
+ except Exception as e:
70
+ logger.error(f"Error copying resume to output: {str(e)}")
71
+ raise
72
+
73
+ def ingest_resume(state: Dict[str, Any]) -> Dict[str, Any]:
74
+ """Ingest new resumes for processing from the local file system"""
75
+ try:
76
+ # Check if this is just a setup call without processing existing files
77
+ setup_only = state.get("setup_only", False)
78
+
79
+ # Check if we already have a resume path in the state
80
+ if state.get("resume_path") and not setup_only:
81
+ logger.info(f"Processing resume from state: {state.get('resume_path')}")
82
+ resume_path = state.get("resume_path")
83
+
84
+ # Copy to output directory if it exists and is not already there
85
+ if os.path.exists(resume_path) and not resume_path.startswith(settings.OUTPUT_DIR):
86
+ output_path = copy_resume_to_output(resume_path)
87
+ # Update the path to point to the copied file
88
+ state["resume_path"] = output_path
89
+ logger.info(f"Copied resume to output directory: {output_path}")
90
+
91
+ # Move on with the workflow
92
+ return state
93
+
94
+ # Ensure output directory exists
95
+ os.makedirs(settings.OUTPUT_DIR, exist_ok=True)
96
+
97
+ # Ensure log directory exists
98
+ os.makedirs(settings.LOG_DIR, exist_ok=True)
99
+
100
+ # Set up local file system watcher if not already running
101
+ if 'resume_watcher' not in state or not state['resume_watcher']:
102
+ # Create the watch directory if it doesn't exist
103
+ os.makedirs(settings.RESUME_WATCH_DIR, exist_ok=True)
104
+
105
+ # Process any existing files in the directory (skip if setup_only)
106
+ if not setup_only:
107
+ for filename in os.listdir(settings.RESUME_WATCH_DIR):
108
+ file_path = os.path.join(settings.RESUME_WATCH_DIR, filename)
109
+ if os.path.isfile(file_path) and any(file_path.lower().endswith(ext) for ext in ['.pdf', '.docx', '.doc', '.txt']) and not filename.endswith('.json'):
110
+ logger.info(f"Processing existing resume: {file_path}")
111
+ output_path = copy_resume_to_output(file_path)
112
+ if 'resumes' not in state:
113
+ state['resumes'] = []
114
+ state['resumes'].append({
115
+ 'filename': os.path.basename(file_path),
116
+ 'path': output_path,
117
+ 'status': 'received'
118
+ })
119
+
120
+ # Automatically trigger workflow for this resume
121
+ from langgraph_workflow.graph_builder import process_new_resume
122
+ try:
123
+ process_new_resume(file_path)
124
+ logger.info(f"Triggered workflow for existing resume: {file_path}")
125
+ except Exception as e:
126
+ logger.error(f"Failed to trigger workflow for existing resume {file_path}: {str(e)}")
127
+ else:
128
+ logger.info("Setup only mode - skipping processing of existing files")
129
+
130
+ # Set up the watcher for future files
131
+ def process_new_file(file_path):
132
+ logger.info(f"New resume detected: {file_path}")
133
+ output_path = copy_resume_to_output(file_path)
134
+ # Update the state (in a real app, you'd need proper concurrency handling)
135
+ if 'resumes' not in state:
136
+ state['resumes'] = []
137
+ state['resumes'].append({
138
+ 'filename': os.path.basename(file_path),
139
+ 'path': output_path,
140
+ 'status': 'received'
141
+ })
142
+
143
+ # Automatically trigger workflow for this resume, specifying resume_ingestor as the entry point
144
+ from langgraph_workflow.graph_builder import process_new_resume
145
+ try:
146
+ process_new_resume(file_path)
147
+ logger.info(f"Triggered workflow for new resume: {file_path}")
148
+ except Exception as e:
149
+ logger.error(f"Failed to trigger workflow for new resume {file_path}: {str(e)}")
150
+
151
+ # Start the watcher
152
+ observer = setup_watcher(settings.RESUME_WATCH_DIR, process_new_file)
153
+ state['resume_watcher'] = observer
154
+
155
+ return state
156
+ except Exception as e:
157
+ logger.error(f"Error in resume ingestion: {str(e)}")
158
+ # Add error to state
159
+ if 'errors' not in state:
160
+ state['errors'] = []
161
+ state['errors'].append(f"Resume ingestion error: {str(e)}")
162
+ return state
163
+
164
+ def extract_job_id_from_filename(filename: str) -> str:
165
+ """Extract job ID from the resume filename"""
166
+ try:
167
+ # Assuming the job ID is the first part of the filename, before an underscore
168
+ job_id = os.path.basename(filename).split('_')[0]
169
+ logger.info(f"Extracted job ID {job_id} from filename {filename}")
170
+ return job_id
171
+ except Exception as e:
172
+ logger.error(f"Error extracting job ID from filename {filename}: {str(e)}")
173
+ return ""
174
+
175
+ def process_resume(file_path: str, state: Dict[str, Any] = None) -> Dict[str, Any]:
176
+ """
177
+ Process a resume file
178
+ """
179
+ logger.info(f"Processing resume from state: {file_path}")
180
+
181
+ # Extract job ID from filename (e.g., 20250626225207_Michael_Jone.pdf)
182
+ try:
183
+ job_id = extract_job_id_from_filename(file_path)
184
+ logger.info(f"Extracted job ID from filename: {job_id}")
185
+
186
+ # Update state with the extracted job ID
187
+ if state is None:
188
+ state = {}
189
+ state["job_id"] = job_id
190
+ logger.info(f"Updated state with job ID: {job_id}")
191
+ except Exception as e:
192
+ logger.error(f"Failed to extract job ID from filename: {e}")
193
+
194
+ # Copy resume to processed directory
195
+ output_dir = "./processed_resumes"
196
+ os.makedirs(output_dir, exist_ok=True)
197
+
198
+ # Get just the filename without path
199
+ filename = os.path.basename(file_path)
200
+ output_path = os.path.join(output_dir, filename)
201
+
202
+ shutil.copy(file_path, output_path)
203
+ logger.info(f"Copied resume to output directory: {output_path}")
204
+
205
+ # Return updated state with resume path and job_id
206
+ return {
207
+ "status": "completed_ingest_resume",
208
+ "resume_path": output_path,
209
+ "job_id": job_id
210
+ }
211
+
212
+ def cleanup(state: Dict[str, Any]) -> None:
213
+ """Clean up resources when shutting down"""
214
+ try:
215
+ if 'resume_watcher' in state and state['resume_watcher']:
216
+ logger.info("Stopping resume watcher...")
217
+ state['resume_watcher'].stop()
218
+ state['resume_watcher'].join(timeout=5.0) # Wait up to 5 seconds for clean shutdown
219
+ logger.info("Resume watcher stopped")
220
+ except Exception as e:
221
+ logger.error(f"Error stopping resume watcher: {str(e)}")
222
+
223
+ # Register cleanup function to be called on program exit
224
+ import atexit
225
+ atexit.register(lambda: cleanup({'resume_watcher': globals().get('_global_observer')}))
@@ -0,0 +1,309 @@
1
+ """
2
+ Sentiment Analysis Node
3
+ Analyzes cover letters for sentiment using Azure OpenAI
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ from typing import Dict, Any, List, Optional
9
+ import json
10
+
11
+ # Try to import Azure OpenAI
12
+ try:
13
+ from langchain_openai import AzureChatOpenAI, AzureOpenAIEmbeddings
14
+ from langchain_core.messages import HumanMessage
15
+ import numpy as np
16
+ from sklearn.metrics.pairwise import cosine_similarity
17
+ OPENAI_AVAILABLE = True
18
+ except ImportError:
19
+ OPENAI_AVAILABLE = False
20
+
21
+ # Import config
22
+ from config import settings
23
+
24
+ # Configure logging
25
+ logging.basicConfig(level=logging.INFO)
26
+ logger = logging.getLogger(__name__)
27
+
28
+ def create_azure_openai_client():
29
+ """Create and configure the Azure OpenAI client"""
30
+ if not OPENAI_AVAILABLE:
31
+ logger.warning("Azure OpenAI not available. Install with 'pip install langchain-openai'")
32
+ return None
33
+
34
+ try:
35
+ # Use the correct parameter names for Azure OpenAI
36
+ api_key = settings.AZURE_OPENAI_API_KEY or settings.AZURE_OPENAI_KEY or settings.OPENAI_API_KEY
37
+ endpoint = settings.AZURE_OPENAI_ENDPOINT or settings.OPENAI_API_BASE
38
+ api_version = settings.AZURE_OPENAI_API_VERSION or settings.OPENAI_API_VERSION
39
+ deployment = settings.AZURE_OPENAI_DEPLOYMENT
40
+
41
+ return AzureChatOpenAI(
42
+ azure_deployment=deployment, # Use azure_deployment parameter
43
+ api_version=api_version,
44
+ azure_endpoint=endpoint, # Use azure_endpoint instead of openai_api_base
45
+ api_key=api_key
46
+ )
47
+ except Exception as e:
48
+ logger.error(f"Error initializing Azure OpenAI client: {str(e)}")
49
+ return None
50
+
51
+ def create_embeddings_client():
52
+ """
53
+ This function is no longer used as we've removed the embedding-based approach.
54
+ It's kept for backward compatibility but now returns None.
55
+ """
56
+ logger.info("Embeddings client creation skipped - using direct LLM approach instead")
57
+ return None
58
+
59
+ def extract_cover_letter(resume_text: str) -> Optional[str]:
60
+ """Extract cover letter text from resume text"""
61
+ # This is a very basic implementation - in practice, you'd need more sophisticated logic
62
+
63
+ # Look for "cover letter" or "letter of interest" in the text
64
+ lines = resume_text.lower().split('\n')
65
+ cover_letter_text = None
66
+
67
+ for i, line in enumerate(lines):
68
+ if "cover letter" in line or "letter of interest" in line or "dear hiring" in line:
69
+ # Extract next 20 lines as the potential cover letter
70
+ potential_cover_letter = "\n".join(lines[i:i+20])
71
+ if len(potential_cover_letter) > 100: # Only use if it's substantial
72
+ cover_letter_text = potential_cover_letter
73
+ break
74
+
75
+ return cover_letter_text
76
+
77
+ def analyze_text_sentiment_openai_chat(client, text: str) -> Dict[str, Any]:
78
+ """
79
+ Analyze text sentiment using Azure OpenAI Chat
80
+ Args:
81
+ client: Azure OpenAI client
82
+ text: Text to analyze
83
+ Returns:
84
+ Sentiment analysis result
85
+ """
86
+ try:
87
+ # Create the prompt
88
+ prompt = f"""
89
+ Analyze the sentiment of the following text. Respond with a JSON object containing:
90
+ - sentiment: either "positive", "neutral", or "negative"
91
+ - positive_score: a float between 0 and 1
92
+ - neutral_score: a float between 0 and 1
93
+ - negative_score: a float between 0 and 1
94
+ - sentences: the number of sentences in the text
95
+
96
+ Here's the text to analyze:
97
+
98
+ {text}
99
+ """
100
+
101
+ # Get the response from Azure OpenAI
102
+ response = client.invoke([HumanMessage(content=prompt)])
103
+
104
+ # Parse the response as JSON
105
+ try:
106
+ content = response.content
107
+ # Extract JSON if it's wrapped in markdown code blocks
108
+ if "```json" in content and "```" in content:
109
+ json_part = content.split("```json")[1].split("```")[0].strip()
110
+ sentiment_data = json.loads(json_part)
111
+ else:
112
+ sentiment_data = json.loads(content)
113
+ except json.JSONDecodeError:
114
+ # If response is not valid JSON, extract key values using simple parsing
115
+ content = response.content.lower()
116
+ sentiment = "positive" if "positive" in content else "negative" if "negative" in content else "neutral"
117
+ return {
118
+ "sentiment": sentiment,
119
+ "positive_score": 0.33,
120
+ "neutral_score": 0.34,
121
+ "negative_score": 0.33,
122
+ "sentences": len(text.split('.')),
123
+ "parsing_method": "fallback"
124
+ }
125
+
126
+ return {
127
+ "sentiment": sentiment_data.get("sentiment", "neutral"),
128
+ "positive_score": sentiment_data.get("positive_score", 0.33),
129
+ "neutral_score": sentiment_data.get("neutral_score", 0.34),
130
+ "negative_score": sentiment_data.get("negative_score", 0.33),
131
+ "sentences": sentiment_data.get("sentences", len(text.split('.'))),
132
+ "parsing_method": "json"
133
+ }
134
+ except Exception as e:
135
+ logger.error(f"Error analyzing sentiment with Azure OpenAI Chat: {str(e)}")
136
+ return {
137
+ "sentiment": "neutral",
138
+ "positive_score": 0.33,
139
+ "neutral_score": 0.34,
140
+ "negative_score": 0.33,
141
+ "sentences": len(text.split('.')),
142
+ "error": str(e),
143
+ }
144
+
145
+ def analyze_text_sentiment_embeddings(client, text: str) -> Dict[str, Any]:
146
+ """
147
+ This function is kept for backward compatibility but redirects to using the chat-based sentiment analysis
148
+ as we've removed the embedding approach
149
+ """
150
+ logger.info("Embeddings-based sentiment analysis has been removed, using chat-based analysis instead")
151
+
152
+ # Try to get a chat client and use that instead
153
+ chat_client = create_azure_openai_client()
154
+ if chat_client:
155
+ return analyze_text_sentiment_openai_chat(chat_client, text)
156
+ else:
157
+ # Fall back to basic analysis if chat client creation fails
158
+ return analyze_text_sentiment_basic(text)
159
+
160
+ def analyze_text_sentiment_basic(text: str) -> Dict[str, Any]:
161
+ """Basic sentiment analysis without Azure services"""
162
+ # This is a very simplified sentiment analysis - in practice, use a proper NLP library
163
+ positive_words = ['excellent', 'good', 'great', 'best', 'outstanding', 'impressive',
164
+ 'excited', 'passion', 'enthusiastic', 'enjoy', 'love', 'interested',
165
+ 'committed', 'dedicated', 'eager']
166
+
167
+ negative_words = ['unfortunately', 'bad', 'worst', 'poor', 'difficult', 'challenge',
168
+ 'issue', 'problem', 'concern', 'disappointed', 'regret', 'sorry']
169
+
170
+ text_lower = text.lower()
171
+
172
+ # Count occurrences of positive and negative words
173
+ positive_count = sum(text_lower.count(word) for word in positive_words)
174
+ negative_count = sum(text_lower.count(word) for word in negative_words)
175
+ total = positive_count + negative_count
176
+
177
+ # Calculate sentiment scores
178
+ if total == 0:
179
+ return {
180
+ "sentiment": "neutral",
181
+ "positive_score": 0.33,
182
+ "neutral_score": 0.34,
183
+ "negative_score": 0.33,
184
+ "sentences": len(text.split('.')),
185
+ "method": "basic"
186
+ }
187
+
188
+ positive_score = positive_count / total if total > 0 else 0.33
189
+ negative_score = negative_count / total if total > 0 else 0.33
190
+ neutral_score = 1 - (positive_score + negative_score)
191
+
192
+ # Determine sentiment
193
+ if positive_score > 0.6:
194
+ sentiment = "positive"
195
+ elif negative_score > 0.6:
196
+ sentiment = "negative"
197
+ else:
198
+ sentiment = "neutral"
199
+
200
+ return {
201
+ "sentiment": sentiment,
202
+ "positive_score": positive_score,
203
+ "neutral_score": neutral_score,
204
+ "negative_score": negative_score,
205
+ "sentences": len(text.split('.')),
206
+ "method": "basic"
207
+ }
208
+
209
+ def analyze_sentiment(state: Dict[str, Any]) -> Dict[str, Any]:
210
+ """
211
+ LangGraph node to analyze sentiment of cover letters using Azure OpenAI
212
+
213
+ Args:
214
+ state: The current workflow state
215
+
216
+ Returns:
217
+ Updated workflow state with sentiment analysis
218
+ """
219
+ logger.info("Starting sentiment analysis with Azure OpenAI")
220
+
221
+ # Initialize errors list if not present
222
+ if "errors" not in state:
223
+ state["errors"] = []
224
+
225
+ # Check if sentiment score already exists in state
226
+ if state.get("sentiment_score") is not None:
227
+ logger.info("Sentiment already analyzed, skipping analysis")
228
+ return state
229
+
230
+ # Check if resume text exists
231
+ resume_text = state.get("resume_text")
232
+ if not resume_text:
233
+ error_message = "Missing resume text for sentiment analysis"
234
+ logger.error(error_message)
235
+
236
+ # Add error to state
237
+ state["errors"].append({
238
+ "step": "sentiment_analysis",
239
+ "error": error_message
240
+ })
241
+
242
+ # Set default sentiment
243
+ state["sentiment_score"] = {
244
+ "sentiment": "neutral",
245
+ "positive_score": 0.33,
246
+ "neutral_score": 0.34,
247
+ "negative_score": 0.33,
248
+ "available": False
249
+ }
250
+ return state
251
+
252
+ try:
253
+ # Extract cover letter
254
+ cover_letter = extract_cover_letter(resume_text)
255
+
256
+ # If no cover letter found, use the first 1000 characters of resume as a sample
257
+ sample_text = cover_letter if cover_letter else resume_text[:1000]
258
+
259
+ # Initialize clients and results
260
+ sentiment_result = None
261
+ chat_client = None
262
+
263
+ # Try to create Azure OpenAI client for chat if available
264
+ if OPENAI_AVAILABLE and (settings.AZURE_OPENAI_API_KEY or settings.AZURE_OPENAI_KEY):
265
+ chat_client = create_azure_openai_client()
266
+ # We no longer use embeddings, so don't initialize that client
267
+
268
+ # Only use chat-based or basic sentiment analysis
269
+ if chat_client:
270
+ # Method 1: Use Azure OpenAI Chat for sentiment analysis
271
+ sentiment_result = analyze_text_sentiment_openai_chat(chat_client, sample_text)
272
+ logger.info("Used Azure OpenAI Chat for sentiment analysis")
273
+ else:
274
+ # Fallback: Basic sentiment analysis
275
+ sentiment_result = analyze_text_sentiment_basic(sample_text)
276
+ logger.info("Used basic sentiment analysis (Azure services unavailable)")
277
+
278
+ # Add additional context
279
+ sentiment_result["cover_letter_available"] = True if cover_letter else False
280
+ sentiment_result["text_used"] = "cover_letter" if cover_letter else "resume_sample"
281
+ sentiment_result["available"] = True
282
+
283
+ # Update state
284
+ state["sentiment_score"] = sentiment_result
285
+ state["status"] = "sentiment_analyzed"
286
+
287
+ logger.info(f"Sentiment analyzed successfully: {sentiment_result['sentiment']}")
288
+
289
+ except Exception as e:
290
+ error_message = f"Error analyzing sentiment: {str(e)}"
291
+ logger.error(error_message)
292
+
293
+ # Add error to state
294
+ state["errors"].append({
295
+ "step": "sentiment_analysis",
296
+ "error": error_message
297
+ })
298
+
299
+ # Set default sentiment
300
+ state["sentiment_score"] = {
301
+ "sentiment": "neutral",
302
+ "positive_score": 0.33,
303
+ "neutral_score": 0.34,
304
+ "negative_score": 0.33,
305
+ "available": False,
306
+ "error": str(e)
307
+ }
308
+
309
+ return state
File without changes
@@ -0,0 +1,140 @@
1
+ """
2
+ Email utilities for sending notifications using SMTP or Azure Communication Services
3
+ """
4
+
5
+ import os
6
+ import smtplib
7
+ import logging
8
+ from email.mime.text import MIMEText
9
+ from email.mime.multipart import MIMEMultipart
10
+ from email.mime.application import MIMEApplication
11
+
12
+ # Try to import Azure Communication Services
13
+ try:
14
+ from azure.communication.email import EmailClient
15
+ AZURE_EMAIL_AVAILABLE = True
16
+ except ImportError:
17
+ AZURE_EMAIL_AVAILABLE = False
18
+
19
+ from config import settings
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ def send_email_smtp(recipient_email, subject, body, html_content=None, attachments=None):
24
+ """
25
+ Send an email using SMTP protocol
26
+
27
+ Args:
28
+ recipient_email: The recipient's email address
29
+ subject: Email subject
30
+ body: Plain text email body
31
+ html_content: Optional HTML content
32
+ attachments: List of file paths to attach
33
+
34
+ Returns:
35
+ True if sent successfully, False otherwise
36
+ """
37
+ try:
38
+ # Create a multipart message
39
+ msg = MIMEMultipart('alternative')
40
+ msg['Subject'] = subject
41
+ msg['From'] = settings.EMAIL_SENDER
42
+ msg['To'] = recipient_email
43
+
44
+ # Add plain text content
45
+ msg.attach(MIMEText(body, 'plain'))
46
+
47
+ # Add HTML content if provided
48
+ if html_content:
49
+ msg.attach(MIMEText(html_content, 'html'))
50
+
51
+ # Add attachments if provided
52
+ if attachments:
53
+ for file_path in attachments:
54
+ with open(file_path, 'rb') as file:
55
+ part = MIMEApplication(file.read(), Name=os.path.basename(file_path))
56
+ part['Content-Disposition'] = f'attachment; filename="{os.path.basename(file_path)}"'
57
+ msg.attach(part)
58
+
59
+ # Connect to SMTP server and send email
60
+ with smtplib.SMTP(settings.SMTP_SERVER, settings.SMTP_PORT) as server:
61
+ server.ehlo()
62
+ server.starttls()
63
+ server.login(settings.SMTP_USERNAME, settings.SMTP_PASSWORD)
64
+ server.send_message(msg)
65
+
66
+ logger.info(f"Email sent to {recipient_email} with subject '{subject}'")
67
+ return True
68
+ except Exception as e:
69
+ logger.error(f"Error sending email via SMTP: {str(e)}")
70
+ return False
71
+
72
+ def send_email_azure(recipient_email, subject, body, html_content=None):
73
+ """
74
+ Send an email using Azure Communication Services
75
+
76
+ Args:
77
+ recipient_email: The recipient's email address
78
+ subject: Email subject
79
+ body: Plain text email body
80
+ html_content: Optional HTML content
81
+
82
+ Returns:
83
+ True if sent successfully, False otherwise
84
+ """
85
+ if not AZURE_EMAIL_AVAILABLE:
86
+ logger.error("Azure Communication Services not available. Install with 'pip install azure-communication-email'")
87
+ return False
88
+
89
+ try:
90
+ # Create the email client
91
+ email_client = EmailClient.from_connection_string(settings.AZURE_COMMUNICATION_CONNECTION_STRING)
92
+
93
+ # Use HTML content if provided, otherwise use plain text
94
+ content = html_content if html_content else body
95
+ content_type = "html" if html_content else "plainText"
96
+
97
+ # Create the email message
98
+ message = {
99
+ "senderAddress": settings.AZURE_COMMUNICATION_SENDER_EMAIL,
100
+ "recipients": {
101
+ "to": [{"address": recipient_email}]
102
+ },
103
+ "content": {
104
+ "subject": subject,
105
+ "plainText": body,
106
+ "html": content if content_type == "html" else None
107
+ }
108
+ }
109
+
110
+ # Send the email
111
+ poller = email_client.begin_send(message)
112
+ result = poller.result()
113
+
114
+ logger.info(f"Email sent to {recipient_email} with subject '{subject}' via Azure Communication Services")
115
+ return True
116
+ except Exception as e:
117
+ logger.error(f"Error sending email via Azure Communication Services: {str(e)}")
118
+ return False
119
+
120
+ def send_email(recipient_email, subject, body, html_content=None, attachments=None, use_azure=False):
121
+ """
122
+ Send an email using the preferred method (SMTP or Azure Communication Services)
123
+
124
+ Args:
125
+ recipient_email: The recipient's email address
126
+ subject: Email subject
127
+ body: Plain text email body
128
+ html_content: Optional HTML content
129
+ attachments: List of file paths to attach (only used with SMTP)
130
+ use_azure: Force using Azure Communication Services if available
131
+
132
+ Returns:
133
+ True if sent successfully, False otherwise
134
+ """
135
+ # Use Azure if requested and available
136
+ if use_azure and AZURE_EMAIL_AVAILABLE and settings.AZURE_COMMUNICATION_CONNECTION_STRING:
137
+ return send_email_azure(recipient_email, subject, body, html_content)
138
+
139
+ # Fall back to SMTP
140
+ return send_email_smtp(recipient_email, subject, body, html_content, attachments)
@@ -0,0 +1,14 @@
1
+ import os
2
+ import json
3
+ import logging
4
+ from typing import Optional, List, Dict
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ def get_timestamp_id(job_id: str) -> str:
9
+ """For backward compatibility: return the same job_id"""
10
+ return job_id
11
+
12
+ def get_all_ids_for_timestamp(timestamp_id: str) -> List[str]:
13
+ """For backward compatibility: return list with same job_id"""
14
+ return [timestamp_id]