michael-agent 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- michael_agent/dashboard/static/__init__.py +0 -0
- michael_agent/dashboard/templates/__init__.py +0 -0
- michael_agent/langgraph_workflow/nodes/__init__.py +0 -0
- michael_agent/langgraph_workflow/nodes/assessment_handler.py +177 -0
- michael_agent/langgraph_workflow/nodes/jd_generator.py +139 -0
- michael_agent/langgraph_workflow/nodes/jd_poster.py +156 -0
- michael_agent/langgraph_workflow/nodes/question_generator.py +295 -0
- michael_agent/langgraph_workflow/nodes/recruiter_notifier.py +224 -0
- michael_agent/langgraph_workflow/nodes/resume_analyzer.py +631 -0
- michael_agent/langgraph_workflow/nodes/resume_ingestor.py +225 -0
- michael_agent/langgraph_workflow/nodes/sentiment_analysis.py +309 -0
- {michael_agent-1.0.1.dist-info → michael_agent-1.0.2.dist-info}/METADATA +1 -1
- {michael_agent-1.0.1.dist-info → michael_agent-1.0.2.dist-info}/RECORD +15 -4
- {michael_agent-1.0.1.dist-info → michael_agent-1.0.2.dist-info}/WHEEL +0 -0
- {michael_agent-1.0.1.dist-info → michael_agent-1.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,295 @@
|
|
1
|
+
"""
|
2
|
+
Question Generator Node
|
3
|
+
Generates custom interview questions based on resume and job description
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from typing import Dict, Any, List
|
8
|
+
from langchain_openai import AzureChatOpenAI
|
9
|
+
|
10
|
+
from config import settings
|
11
|
+
|
12
|
+
# Configure logging
|
13
|
+
logging.basicConfig(level=logging.INFO)
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
def create_llm():
|
17
|
+
"""Create and configure the LLM with Azure OpenAI"""
|
18
|
+
try:
|
19
|
+
return AzureChatOpenAI(
|
20
|
+
temperature=0.7, # Higher temperature for more diverse questions
|
21
|
+
deployment_name=settings.AZURE_OPENAI_DEPLOYMENT,
|
22
|
+
azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,
|
23
|
+
api_key=settings.AZURE_OPENAI_KEY,
|
24
|
+
api_version=settings.AZURE_OPENAI_API_VERSION,
|
25
|
+
)
|
26
|
+
except Exception as e:
|
27
|
+
logger.error(f"Error initializing Azure OpenAI: {str(e)}")
|
28
|
+
return None
|
29
|
+
|
30
|
+
def get_question_generation_prompt(resume_text: str, jd_text: str, relevance_score: float) -> str:
|
31
|
+
"""Generate the prompt for interview question generation"""
|
32
|
+
return f"""
|
33
|
+
You are an expert technical interviewer and recruiter.
|
34
|
+
|
35
|
+
Generate a set of interview questions for a candidate based on their resume and the job description.
|
36
|
+
The questions should help assess the candidate's fit for the role.
|
37
|
+
|
38
|
+
Resume:
|
39
|
+
{resume_text[:2000]} # Limit to avoid token limits
|
40
|
+
|
41
|
+
Job Description:
|
42
|
+
{jd_text[:1000]}
|
43
|
+
|
44
|
+
The candidate's resume relevance score is {relevance_score:.2f} out of 1.0.
|
45
|
+
|
46
|
+
Generate 10 questions in the following JSON format:
|
47
|
+
```json
|
48
|
+
{{
|
49
|
+
"technical_questions": [
|
50
|
+
{{
|
51
|
+
"question": "string",
|
52
|
+
"difficulty": "easy|medium|hard",
|
53
|
+
"category": "technical_skill|domain_knowledge|problem_solving",
|
54
|
+
"purpose": "brief explanation of what this question assesses"
|
55
|
+
}},
|
56
|
+
// 4 more technical questions...
|
57
|
+
],
|
58
|
+
"behavioral_questions": [
|
59
|
+
{{
|
60
|
+
"question": "string",
|
61
|
+
"category": "teamwork|leadership|conflict_resolution|problem_solving|adaptability",
|
62
|
+
"purpose": "brief explanation of what this question assesses"
|
63
|
+
}},
|
64
|
+
// 4 more behavioral questions...
|
65
|
+
],
|
66
|
+
"follow_up_areas": ["Area 1", "Area 2", "Area 3"] // Important areas to explore further based on resume gaps
|
67
|
+
}}
|
68
|
+
```
|
69
|
+
|
70
|
+
Focus on questions that will reveal the candidate's true abilities related to the job requirements.
|
71
|
+
Include questions that address any potential gaps between the resume and job description.
|
72
|
+
"""
|
73
|
+
|
74
|
+
def generate_interview_questions(state: Dict[str, Any]) -> Dict[str, Any]:
|
75
|
+
"""Generate interview questions based on resume and job description."""
|
76
|
+
logger = logging.getLogger(__name__)
|
77
|
+
logger.info("Starting interview question generation")
|
78
|
+
|
79
|
+
resume_text = state.get("resume_text")
|
80
|
+
job_description_text = state.get("job_description_text")
|
81
|
+
|
82
|
+
# Add print statements to verify the input data
|
83
|
+
print(f"[QUESTION_GENERATOR] Resume text available: {resume_text is not None}, length: {len(resume_text) if resume_text else 0}")
|
84
|
+
print(f"[QUESTION_GENERATOR] Job description text available: {job_description_text is not None}, length: {len(job_description_text) if job_description_text else 0}")
|
85
|
+
|
86
|
+
if job_description_text:
|
87
|
+
print(f"[QUESTION_GENERATOR] JD First 200 chars: {job_description_text[:200]}...")
|
88
|
+
|
89
|
+
# Generate some basic questions even if we're missing job description
|
90
|
+
if not resume_text:
|
91
|
+
logger.error("Missing resume text or job description text for question generation")
|
92
|
+
# Generate generic questions when no data is available
|
93
|
+
questions = {
|
94
|
+
"technical_questions": [
|
95
|
+
{
|
96
|
+
"question": "Can you describe your experience with the technologies mentioned in your resume?",
|
97
|
+
"difficulty": "medium",
|
98
|
+
"category": "technical_skill",
|
99
|
+
"purpose": "Assess general technical experience and honesty"
|
100
|
+
},
|
101
|
+
{
|
102
|
+
"question": "How do you approach problem-solving in your work?",
|
103
|
+
"difficulty": "medium",
|
104
|
+
"category": "problem_solving",
|
105
|
+
"purpose": "Evaluate general problem-solving approach"
|
106
|
+
}
|
107
|
+
],
|
108
|
+
"behavioral_questions": [
|
109
|
+
{
|
110
|
+
"question": "Can you tell me about a time when you had to work with a difficult team member?",
|
111
|
+
"category": "teamwork",
|
112
|
+
"purpose": "Assess interpersonal skills and conflict resolution"
|
113
|
+
},
|
114
|
+
{
|
115
|
+
"question": "Describe a situation where you had to learn something new quickly.",
|
116
|
+
"category": "adaptability",
|
117
|
+
"purpose": "Evaluate learning agility and adaptability"
|
118
|
+
}
|
119
|
+
],
|
120
|
+
"follow_up_areas": [
|
121
|
+
"Technical skills verification",
|
122
|
+
"Past project details",
|
123
|
+
"Team collaboration"
|
124
|
+
]
|
125
|
+
}
|
126
|
+
|
127
|
+
return {
|
128
|
+
"status": "completed_generate_interview_questions",
|
129
|
+
"interview_questions": questions,
|
130
|
+
"errors": [{"step": "question_generator", "error": "Missing resume text or job description text for question generation"}]
|
131
|
+
}
|
132
|
+
|
133
|
+
# Check if questions already exist in state
|
134
|
+
if state.get("interview_questions"):
|
135
|
+
logger.info("Interview questions already exist, skipping generation")
|
136
|
+
return state
|
137
|
+
|
138
|
+
# Check if required data exists
|
139
|
+
resume_text = state.get("resume_text")
|
140
|
+
jd_text = state.get("job_description_text")
|
141
|
+
|
142
|
+
if not resume_text or not jd_text:
|
143
|
+
error_message = "Missing resume text or job description text for question generation"
|
144
|
+
logger.error(error_message)
|
145
|
+
|
146
|
+
# Add error to state
|
147
|
+
state["errors"].append({
|
148
|
+
"step": "question_generator",
|
149
|
+
"error": error_message
|
150
|
+
})
|
151
|
+
|
152
|
+
# Set default questions
|
153
|
+
state["interview_questions"] = generate_default_questions()
|
154
|
+
return state
|
155
|
+
|
156
|
+
try:
|
157
|
+
# Create the language model
|
158
|
+
llm = create_llm()
|
159
|
+
if not llm:
|
160
|
+
raise ValueError("Failed to initialize Azure OpenAI client")
|
161
|
+
|
162
|
+
# Get resume score from state (default to 0.5 if missing)
|
163
|
+
relevance_score = state.get("relevance_score", 0.5)
|
164
|
+
|
165
|
+
# Generate the prompt
|
166
|
+
prompt = get_question_generation_prompt(resume_text, jd_text, relevance_score)
|
167
|
+
print(f"[QUESTION_GENERATOR] Created prompt with resume length {len(resume_text)} and JD length {len(jd_text)}")
|
168
|
+
|
169
|
+
# Invoke the language model
|
170
|
+
response = llm.invoke(prompt)
|
171
|
+
generated_text = response.content
|
172
|
+
|
173
|
+
# Extract JSON from response (in case there's surrounding text)
|
174
|
+
import re
|
175
|
+
import json
|
176
|
+
|
177
|
+
json_match = re.search(r'```json\s*(.*?)\s*```', generated_text, re.DOTALL)
|
178
|
+
if json_match:
|
179
|
+
json_str = json_match.group(1)
|
180
|
+
else:
|
181
|
+
# If no markdown code block, try to find JSON directly
|
182
|
+
json_match = re.search(r'({[\s\S]*})', generated_text)
|
183
|
+
if json_match:
|
184
|
+
json_str = json_match.group(1)
|
185
|
+
else:
|
186
|
+
raise ValueError("Could not extract JSON from LLM response")
|
187
|
+
|
188
|
+
# Parse the JSON
|
189
|
+
questions = json.loads(json_str)
|
190
|
+
|
191
|
+
# Update the state with the generated questions
|
192
|
+
state["interview_questions"] = questions
|
193
|
+
state["status"] = "questions_generated"
|
194
|
+
|
195
|
+
# Save a snapshot of the state with the questions
|
196
|
+
save_snapshot(state)
|
197
|
+
|
198
|
+
logger.info("Interview questions generated successfully")
|
199
|
+
|
200
|
+
except Exception as e:
|
201
|
+
error_message = f"Error generating interview questions: {str(e)}"
|
202
|
+
logger.error(error_message)
|
203
|
+
|
204
|
+
# Add error to state
|
205
|
+
state["errors"].append({
|
206
|
+
"step": "question_generator",
|
207
|
+
"error": error_message
|
208
|
+
})
|
209
|
+
|
210
|
+
# Use default questions as fallback
|
211
|
+
state["interview_questions"] = generate_default_questions()
|
212
|
+
|
213
|
+
return state
|
214
|
+
|
215
|
+
def generate_default_questions():
|
216
|
+
"""Generate default interview questions as fallback"""
|
217
|
+
return {
|
218
|
+
"technical_questions": [
|
219
|
+
{
|
220
|
+
"question": "Can you describe your experience with the technologies mentioned in your resume?",
|
221
|
+
"difficulty": "medium",
|
222
|
+
"category": "technical_skill",
|
223
|
+
"purpose": "Assess general technical experience and honesty"
|
224
|
+
},
|
225
|
+
{
|
226
|
+
"question": "How do you approach problem-solving in your work?",
|
227
|
+
"difficulty": "medium",
|
228
|
+
"category": "problem_solving",
|
229
|
+
"purpose": "Evaluate general problem-solving approach"
|
230
|
+
}
|
231
|
+
],
|
232
|
+
"behavioral_questions": [
|
233
|
+
{
|
234
|
+
"question": "Can you tell me about a time when you had to work with a difficult team member?",
|
235
|
+
"category": "teamwork",
|
236
|
+
"purpose": "Assess interpersonal skills and conflict resolution"
|
237
|
+
},
|
238
|
+
{
|
239
|
+
"question": "Describe a situation where you had to learn something new quickly.",
|
240
|
+
"category": "adaptability",
|
241
|
+
"purpose": "Evaluate learning agility and adaptability"
|
242
|
+
}
|
243
|
+
],
|
244
|
+
"follow_up_areas": ["Technical skills verification", "Past project details", "Team collaboration"]
|
245
|
+
}
|
246
|
+
|
247
|
+
def save_snapshot(state: Dict[str, Any]) -> None:
|
248
|
+
"""Save a snapshot of the state to a JSON file for dashboard access"""
|
249
|
+
try:
|
250
|
+
import os
|
251
|
+
import json
|
252
|
+
import time
|
253
|
+
|
254
|
+
job_id = state.get("job_id")
|
255
|
+
if not job_id:
|
256
|
+
logger.warning("No job ID found in state, cannot save snapshot")
|
257
|
+
return
|
258
|
+
|
259
|
+
# Create a timestamped snapshot filename
|
260
|
+
timestamp = time.strftime("%Y%m%d%H%M%S")
|
261
|
+
snapshot_dir = os.path.join("./logs", "snapshots")
|
262
|
+
os.makedirs(snapshot_dir, exist_ok=True)
|
263
|
+
|
264
|
+
snapshot_path = os.path.join(snapshot_dir, f"{timestamp}_{job_id}_after_question_generator.json")
|
265
|
+
|
266
|
+
# Create a clean version of state for saving
|
267
|
+
save_state = {
|
268
|
+
"job_id": job_id,
|
269
|
+
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%S.%f"),
|
270
|
+
"status": state.get("status", "unknown"),
|
271
|
+
"resume_path": state.get("resume_path", ""),
|
272
|
+
"errors": state.get("errors", []),
|
273
|
+
}
|
274
|
+
|
275
|
+
# Add candidate name and email if available
|
276
|
+
if "candidate_name" in state:
|
277
|
+
save_state["candidate_name"] = state["candidate_name"]
|
278
|
+
|
279
|
+
if "candidate_email" in state:
|
280
|
+
save_state["candidate_email"] = state["candidate_email"]
|
281
|
+
|
282
|
+
# Add interview questions if available
|
283
|
+
if "interview_questions" in state:
|
284
|
+
save_state["interview_questions"] = state["interview_questions"]
|
285
|
+
|
286
|
+
# Save the snapshot
|
287
|
+
with open(snapshot_path, 'w') as f:
|
288
|
+
json.dump(save_state, f, indent=2)
|
289
|
+
|
290
|
+
logger.info(f"Saved question generator snapshot to {snapshot_path}")
|
291
|
+
|
292
|
+
except Exception as e:
|
293
|
+
logger.error(f"Error saving state snapshot: {str(e)}")
|
294
|
+
import traceback
|
295
|
+
logger.error(traceback.format_exc())
|
@@ -0,0 +1,224 @@
|
|
1
|
+
"""
|
2
|
+
Recruiter Notifier Node
|
3
|
+
Composes and sends emails to recruiters with candidate summary
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from typing import Dict, Any, List
|
8
|
+
|
9
|
+
from utils.email_utils import send_email
|
10
|
+
from config import settings
|
11
|
+
|
12
|
+
# Configure logging
|
13
|
+
logging.basicConfig(level=logging.INFO)
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
def create_recruiter_email_content(state: Dict[str, Any]) -> Dict[str, str]:
|
17
|
+
"""Create email content for recruiter notification"""
|
18
|
+
# Get data from state
|
19
|
+
candidate_name = state.get("candidate_name", "Unknown Candidate")
|
20
|
+
job_data = state.get("job_description", {})
|
21
|
+
position_name = job_data.get("position", "Unspecified Position")
|
22
|
+
relevance_score = state.get("relevance_score", 0)
|
23
|
+
relevance_percentage = int(relevance_score * 100)
|
24
|
+
sentiment_data = state.get("sentiment_score", {})
|
25
|
+
sentiment = sentiment_data.get("sentiment", "neutral")
|
26
|
+
resume_data = state.get("resume_data", {})
|
27
|
+
|
28
|
+
# Format interview questions
|
29
|
+
interview_questions = state.get("interview_questions", {})
|
30
|
+
tech_questions = interview_questions.get("technical_questions", [])
|
31
|
+
behavioral_questions = interview_questions.get("behavioral_questions", [])
|
32
|
+
|
33
|
+
# Create email subject
|
34
|
+
subject = f"Candidate Assessment: {candidate_name} for {position_name} ({relevance_percentage}% Match)"
|
35
|
+
|
36
|
+
# Create plain text email
|
37
|
+
plain_text = f"""
|
38
|
+
Candidate Assessment Report
|
39
|
+
|
40
|
+
Candidate: {candidate_name}
|
41
|
+
Position: {position_name}
|
42
|
+
Match Score: {relevance_percentage}%
|
43
|
+
Sentiment Analysis: {sentiment.capitalize()}
|
44
|
+
|
45
|
+
Resume Summary:
|
46
|
+
- Email: {resume_data.get('email', 'Not provided')}
|
47
|
+
- Phone: {resume_data.get('phone', 'Not provided')}
|
48
|
+
|
49
|
+
Assessment Status: {state.get('assessment', {}).get('status', 'Not sent')}
|
50
|
+
|
51
|
+
Recommended Technical Interview Questions:
|
52
|
+
{_format_questions_text(tech_questions[:3])}
|
53
|
+
|
54
|
+
Recommended Behavioral Questions:
|
55
|
+
{_format_questions_text(behavioral_questions[:3])}
|
56
|
+
|
57
|
+
View the full candidate profile in the dashboard.
|
58
|
+
"""
|
59
|
+
|
60
|
+
# Create HTML email content
|
61
|
+
sentiment_color = {
|
62
|
+
"positive": "green",
|
63
|
+
"neutral": "gray",
|
64
|
+
"negative": "orange"
|
65
|
+
}.get(sentiment, "gray")
|
66
|
+
|
67
|
+
html_content = f"""
|
68
|
+
<html>
|
69
|
+
<body style="font-family: Arial, sans-serif; line-height: 1.6; color: #333;">
|
70
|
+
<div style="max-width: 600px; margin: 0 auto; padding: 20px;">
|
71
|
+
<h1 style="color: #2c3e50; border-bottom: 2px solid #3498db; padding-bottom: 10px;">Candidate Assessment Report</h1>
|
72
|
+
|
73
|
+
<table style="width: 100%; border-collapse: collapse; margin-bottom: 20px;">
|
74
|
+
<tr>
|
75
|
+
<td style="padding: 8px; width: 30%;"><strong>Candidate:</strong></td>
|
76
|
+
<td style="padding: 8px;">{candidate_name}</td>
|
77
|
+
</tr>
|
78
|
+
<tr>
|
79
|
+
<td style="padding: 8px;"><strong>Position:</strong></td>
|
80
|
+
<td style="padding: 8px;">{position_name}</td>
|
81
|
+
</tr>
|
82
|
+
<tr>
|
83
|
+
<td style="padding: 8px;"><strong>Match Score:</strong></td>
|
84
|
+
<td style="padding: 8px;">
|
85
|
+
<div style="background-color: #f0f0f0; border-radius: 10px; height: 20px; width: 200px;">
|
86
|
+
<div style="background-color: #3498db; border-radius: 10px; height: 20px; width: {relevance_percentage*2}px;"></div>
|
87
|
+
</div>
|
88
|
+
<span style="margin-left: 10px;">{relevance_percentage}%</span>
|
89
|
+
</td>
|
90
|
+
</tr>
|
91
|
+
<tr>
|
92
|
+
<td style="padding: 8px;"><strong>Sentiment:</strong></td>
|
93
|
+
<td style="padding: 8px; color: {sentiment_color};">{sentiment.capitalize()}</td>
|
94
|
+
</tr>
|
95
|
+
</table>
|
96
|
+
|
97
|
+
<h2 style="color: #2c3e50;">Contact Information</h2>
|
98
|
+
<p>
|
99
|
+
<strong>Email:</strong> {resume_data.get('email', 'Not provided')}<br>
|
100
|
+
<strong>Phone:</strong> {resume_data.get('phone', 'Not provided')}
|
101
|
+
</p>
|
102
|
+
|
103
|
+
<h2 style="color: #2c3e50;">Assessment Status</h2>
|
104
|
+
<p>{state.get('assessment', {}).get('status', 'Not sent').capitalize()}</p>
|
105
|
+
|
106
|
+
<h2 style="color: #2c3e50;">Recommended Interview Questions</h2>
|
107
|
+
|
108
|
+
<h3>Technical Questions</h3>
|
109
|
+
{_format_questions_html(tech_questions[:3])}
|
110
|
+
|
111
|
+
<h3>Behavioral Questions</h3>
|
112
|
+
{_format_questions_html(behavioral_questions[:3])}
|
113
|
+
|
114
|
+
<p style="text-align: center; margin-top: 30px;">
|
115
|
+
<a href="http://localhost:5000/dashboard"
|
116
|
+
style="background-color: #3498db; color: white; padding: 10px 20px; text-decoration: none; border-radius: 5px;">
|
117
|
+
View Full Profile in Dashboard
|
118
|
+
</a>
|
119
|
+
</p>
|
120
|
+
</div>
|
121
|
+
</body>
|
122
|
+
</html>
|
123
|
+
"""
|
124
|
+
|
125
|
+
return {
|
126
|
+
"subject": subject,
|
127
|
+
"plain_text": plain_text,
|
128
|
+
"html_content": html_content
|
129
|
+
}
|
130
|
+
|
131
|
+
def _format_questions_text(questions: List[Dict[str, str]]) -> str:
|
132
|
+
"""Format questions for plain text email"""
|
133
|
+
result = ""
|
134
|
+
for i, q in enumerate(questions, 1):
|
135
|
+
result += f"{i}. {q.get('question', '')}\n"
|
136
|
+
return result
|
137
|
+
|
138
|
+
def _format_questions_html(questions: List[Dict[str, str]]) -> str:
|
139
|
+
"""Format questions for HTML email"""
|
140
|
+
result = "<ol>"
|
141
|
+
for q in questions:
|
142
|
+
purpose = q.get('purpose', '')
|
143
|
+
difficulty = q.get('difficulty', '')
|
144
|
+
difficulty_span = f"<span style='color: {'green' if difficulty == 'easy' else 'orange' if difficulty == 'medium' else 'red'}'>({difficulty})</span>" if difficulty else ""
|
145
|
+
|
146
|
+
result += f"<li><strong>{q.get('question', '')}</strong> {difficulty_span}<br>"
|
147
|
+
if purpose:
|
148
|
+
result += f"<em>Purpose: {purpose}</em></li>"
|
149
|
+
result += "</ol>"
|
150
|
+
return result
|
151
|
+
|
152
|
+
def notify_recruiter(state: Dict[str, Any]) -> Dict[str, Any]:
|
153
|
+
"""
|
154
|
+
LangGraph node to notify recruiters about candidates
|
155
|
+
|
156
|
+
Args:
|
157
|
+
state: The current workflow state
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
Updated workflow state with notification status
|
161
|
+
"""
|
162
|
+
logger.info("Starting recruiter notification")
|
163
|
+
|
164
|
+
# Check if notification already exists in state
|
165
|
+
if state.get("notification_status"):
|
166
|
+
logger.info("Notification already sent, skipping")
|
167
|
+
return state
|
168
|
+
|
169
|
+
# Get recruiter email from settings (or state if available)
|
170
|
+
recruiter_email = state.get("recruiter_email", settings.DEFAULT_RECRUITER_EMAIL)
|
171
|
+
|
172
|
+
if not recruiter_email:
|
173
|
+
error_message = "Missing recruiter email for notification"
|
174
|
+
logger.error(error_message)
|
175
|
+
state["errors"].append({
|
176
|
+
"step": "recruiter_notifier",
|
177
|
+
"error": error_message
|
178
|
+
})
|
179
|
+
state["notification_status"] = {"status": "failed", "reason": "missing_email"}
|
180
|
+
return state
|
181
|
+
|
182
|
+
try:
|
183
|
+
# Create email content
|
184
|
+
email_content = create_recruiter_email_content(state)
|
185
|
+
|
186
|
+
# Send email
|
187
|
+
email_sent = send_email(
|
188
|
+
recipient_email=recruiter_email,
|
189
|
+
subject=email_content["subject"],
|
190
|
+
body=email_content["plain_text"],
|
191
|
+
html_content=email_content["html_content"]
|
192
|
+
)
|
193
|
+
|
194
|
+
if email_sent:
|
195
|
+
state["notification_status"] = {
|
196
|
+
"status": "sent",
|
197
|
+
"recipient": recruiter_email,
|
198
|
+
"timestamp": state["timestamp"]
|
199
|
+
}
|
200
|
+
state["status"] = "notification_sent"
|
201
|
+
logger.info(f"Recruiter notification sent to {recruiter_email}")
|
202
|
+
else:
|
203
|
+
state["notification_status"] = {"status": "failed", "reason": "email_error"}
|
204
|
+
state["errors"].append({
|
205
|
+
"step": "recruiter_notifier",
|
206
|
+
"error": "Failed to send email to recruiter"
|
207
|
+
})
|
208
|
+
logger.error(f"Failed to send notification to {recruiter_email}")
|
209
|
+
|
210
|
+
except Exception as e:
|
211
|
+
error_message = f"Error sending recruiter notification: {str(e)}"
|
212
|
+
logger.error(error_message)
|
213
|
+
|
214
|
+
state["errors"].append({
|
215
|
+
"step": "recruiter_notifier",
|
216
|
+
"error": error_message
|
217
|
+
})
|
218
|
+
|
219
|
+
state["notification_status"] = {
|
220
|
+
"status": "failed",
|
221
|
+
"reason": str(e)
|
222
|
+
}
|
223
|
+
|
224
|
+
return state
|