michael-agent 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- michael_agent/config/__init__.py +0 -0
- michael_agent/config/settings.py +66 -0
- michael_agent/dashboard/__init__.py +0 -0
- michael_agent/dashboard/app.py +1450 -0
- michael_agent/dashboard/static/__init__.py +0 -0
- michael_agent/dashboard/templates/__init__.py +0 -0
- michael_agent/langgraph_workflow/__init__.py +0 -0
- michael_agent/langgraph_workflow/graph_builder.py +358 -0
- michael_agent/langgraph_workflow/nodes/__init__.py +0 -0
- michael_agent/langgraph_workflow/nodes/assessment_handler.py +177 -0
- michael_agent/langgraph_workflow/nodes/jd_generator.py +139 -0
- michael_agent/langgraph_workflow/nodes/jd_poster.py +156 -0
- michael_agent/langgraph_workflow/nodes/question_generator.py +295 -0
- michael_agent/langgraph_workflow/nodes/recruiter_notifier.py +224 -0
- michael_agent/langgraph_workflow/nodes/resume_analyzer.py +631 -0
- michael_agent/langgraph_workflow/nodes/resume_ingestor.py +225 -0
- michael_agent/langgraph_workflow/nodes/sentiment_analysis.py +309 -0
- michael_agent/utils/__init__.py +0 -0
- michael_agent/utils/email_utils.py +140 -0
- michael_agent/utils/id_mapper.py +14 -0
- michael_agent/utils/jd_utils.py +34 -0
- michael_agent/utils/lms_api.py +226 -0
- michael_agent/utils/logging_utils.py +192 -0
- michael_agent/utils/monitor_utils.py +289 -0
- michael_agent/utils/node_tracer.py +88 -0
- {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/METADATA +2 -2
- michael_agent-1.0.2.dist-info/RECORD +32 -0
- michael_agent-1.0.0.dist-info/RECORD +0 -7
- {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/WHEEL +0 -0
- {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/top_level.txt +0 -0
File without changes
|
File without changes
|
File without changes
|
@@ -0,0 +1,358 @@
|
|
1
|
+
"""
|
2
|
+
LangGraph workflow builder and orchestrator
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
import json
|
7
|
+
import logging
|
8
|
+
import sys
|
9
|
+
import traceback
|
10
|
+
from typing import Dict, List, Any, Optional, TypedDict, Literal
|
11
|
+
from datetime import datetime
|
12
|
+
|
13
|
+
|
14
|
+
# LangGraph and LangChain imports
|
15
|
+
from langgraph.graph import StateGraph, END, MessagesState
|
16
|
+
from langgraph.prebuilt.tool_node import ToolNode
|
17
|
+
from langgraph.checkpoint.memory import InMemorySaver
|
18
|
+
|
19
|
+
# Import nodes (workflow steps)
|
20
|
+
from .nodes.jd_generator import generate_job_description
|
21
|
+
from .nodes.jd_poster import post_job_description
|
22
|
+
from .nodes.resume_ingestor import ingest_resume
|
23
|
+
from .nodes.resume_analyzer import analyze_resume
|
24
|
+
from .nodes.sentiment_analysis import analyze_sentiment
|
25
|
+
from .nodes.assessment_handler import handle_assessment
|
26
|
+
from .nodes.question_generator import generate_interview_questions
|
27
|
+
from .nodes.recruiter_notifier import notify_recruiter
|
28
|
+
|
29
|
+
# Import config
|
30
|
+
from config import settings
|
31
|
+
|
32
|
+
# Import custom logging utilities
|
33
|
+
from utils.logging_utils import workflow_logger as logger
|
34
|
+
from utils.logging_utils import save_state_snapshot
|
35
|
+
|
36
|
+
# Define helper function to get project root
|
37
|
+
def get_project_root():
|
38
|
+
"""Get the absolute path to the project root directory"""
|
39
|
+
# Assuming this file is in project_root/smart_recruit_agent/langgraph_workflow/
|
40
|
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
41
|
+
return os.path.abspath(os.path.join(current_dir, "../.."))
|
42
|
+
|
43
|
+
# Define the workflow state
|
44
|
+
class WorkflowState(TypedDict):
|
45
|
+
"""Type definition for the workflow state that gets passed between nodes"""
|
46
|
+
job_id: str
|
47
|
+
timestamp: str
|
48
|
+
status: str
|
49
|
+
candidate_name: Optional[str]
|
50
|
+
candidate_email: Optional[str]
|
51
|
+
resume_path: Optional[str]
|
52
|
+
resume_text: Optional[str]
|
53
|
+
resume_data: Optional[Dict[str, Any]]
|
54
|
+
job_description: Optional[Dict[str, Any]]
|
55
|
+
job_description_text: Optional[str]
|
56
|
+
relevance_score: Optional[float]
|
57
|
+
sentiment_score: Optional[Dict[str, Any]]
|
58
|
+
assessment: Optional[Dict[str, Any]]
|
59
|
+
interview_questions: Optional[List[Dict[str, Any]]]
|
60
|
+
notification_status: Optional[Dict[str, Any]]
|
61
|
+
errors: List[Dict[str, Any]]
|
62
|
+
|
63
|
+
def create_initial_state(resume_path: str = None, job_id: str = None) -> Dict[str, Any]:
|
64
|
+
"""Create initial state for the workflow"""
|
65
|
+
|
66
|
+
# If no job_id is provided, use timestamp
|
67
|
+
if not job_id:
|
68
|
+
# Create a timestamp-based job ID instead of UUID
|
69
|
+
job_id = datetime.now().strftime('%Y%m%d%H%M%S')
|
70
|
+
|
71
|
+
logger.info(f"Creating initial state with job ID: {job_id}")
|
72
|
+
|
73
|
+
# Create initial state
|
74
|
+
initial_state = {
|
75
|
+
"job_id": job_id,
|
76
|
+
"status": "initialized",
|
77
|
+
"timestamp": datetime.now().isoformat(),
|
78
|
+
"errors": []
|
79
|
+
}
|
80
|
+
|
81
|
+
# Add resume path if provided
|
82
|
+
if resume_path:
|
83
|
+
initial_state["resume_path"] = resume_path
|
84
|
+
|
85
|
+
return initial_state
|
86
|
+
|
87
|
+
def should_send_assessment(state: WorkflowState) -> Literal["send_assessment", "skip_assessment"]:
|
88
|
+
"""Conditional routing to determine if assessment should be sent"""
|
89
|
+
# Log the decision point
|
90
|
+
logger.info(f"[Job {state.get('job_id')}] Evaluating whether to send assessment")
|
91
|
+
|
92
|
+
# Check if automatic assessment sending is enabled
|
93
|
+
if not settings.AUTOMATIC_TEST_SENDING:
|
94
|
+
logger.info(f"[Job {state.get('job_id')}] Automatic assessment sending is disabled")
|
95
|
+
return "skip_assessment"
|
96
|
+
|
97
|
+
# Check if candidate email is available
|
98
|
+
if not state.get("candidate_email"):
|
99
|
+
logger.warning(f"[Job {state.get('job_id')}] No candidate email available for sending assessment")
|
100
|
+
return "skip_assessment"
|
101
|
+
|
102
|
+
# Check if score meets threshold
|
103
|
+
score = state.get("relevance_score", 0)
|
104
|
+
if score is None or score < settings.MINIMAL_SCORE_THRESHOLD:
|
105
|
+
logger.info(f"[Job {state.get('job_id')}] Score {score} below threshold {settings.MINIMAL_SCORE_THRESHOLD}, skipping assessment")
|
106
|
+
return "skip_assessment"
|
107
|
+
|
108
|
+
logger.info(f"[Job {state.get('job_id')}] Assessment will be sent to {state.get('candidate_email')}")
|
109
|
+
return "send_assessment"
|
110
|
+
|
111
|
+
def should_notify_recruiter(state: WorkflowState) -> Literal["notify", "end"]:
|
112
|
+
"""Conditional routing to determine if recruiter should be notified"""
|
113
|
+
# Log the decision point
|
114
|
+
logger.info(f"[Job {state.get('job_id')}] Evaluating whether to notify recruiter")
|
115
|
+
|
116
|
+
# Check if automatic notification is enabled
|
117
|
+
if not settings.AUTOMATIC_RECRUITER_NOTIFICATION:
|
118
|
+
logger.info(f"[Job {state.get('job_id')}] Automatic recruiter notification is disabled")
|
119
|
+
return "end"
|
120
|
+
|
121
|
+
# Check if we have resume data to send
|
122
|
+
if not state.get("resume_data"):
|
123
|
+
error_msg = "No resume data available for notification"
|
124
|
+
logger.warning(f"[Job {state.get('job_id')}] {error_msg}")
|
125
|
+
state["errors"].append({
|
126
|
+
"step": "notification_routing",
|
127
|
+
"error": error_msg,
|
128
|
+
"timestamp": datetime.now().isoformat()
|
129
|
+
})
|
130
|
+
return "end"
|
131
|
+
|
132
|
+
logger.info(f"[Job {state.get('job_id')}] Recruiter will be notified about candidate {state.get('candidate_name')}")
|
133
|
+
return "notify"
|
134
|
+
|
135
|
+
def build_workflow() -> StateGraph:
|
136
|
+
"""Build the LangGraph workflow"""
|
137
|
+
# Create a new workflow graph
|
138
|
+
logger.info("Building LangGraph workflow")
|
139
|
+
workflow = StateGraph(WorkflowState)
|
140
|
+
|
141
|
+
# Add nodes (workflow steps)
|
142
|
+
workflow.add_node("jd_generator", generate_job_description)
|
143
|
+
workflow.add_node("jd_poster", post_job_description)
|
144
|
+
workflow.add_node("resume_ingestor", ingest_resume)
|
145
|
+
workflow.add_node("resume_analyzer", analyze_resume)
|
146
|
+
workflow.add_node("sentiment_analysis", analyze_sentiment)
|
147
|
+
workflow.add_node("assessment_handler", handle_assessment)
|
148
|
+
workflow.add_node("question_generator", generate_interview_questions)
|
149
|
+
workflow.add_node("recruiter_notifier", notify_recruiter)
|
150
|
+
|
151
|
+
# Define the workflow edges (flow)
|
152
|
+
logger.debug("Defining workflow edges")
|
153
|
+
workflow.add_edge("jd_generator", "jd_poster")
|
154
|
+
workflow.add_edge("jd_poster", "resume_ingestor")
|
155
|
+
workflow.add_edge("resume_ingestor", "resume_analyzer")
|
156
|
+
workflow.add_edge("resume_analyzer", "sentiment_analysis")
|
157
|
+
|
158
|
+
# After sentiment analysis, decide whether to send assessment
|
159
|
+
workflow.add_conditional_edges(
|
160
|
+
"sentiment_analysis",
|
161
|
+
should_send_assessment,
|
162
|
+
{
|
163
|
+
"send_assessment": "assessment_handler",
|
164
|
+
"skip_assessment": "question_generator"
|
165
|
+
}
|
166
|
+
)
|
167
|
+
|
168
|
+
workflow.add_edge("assessment_handler", "question_generator")
|
169
|
+
|
170
|
+
# After question generation, decide whether to notify recruiter
|
171
|
+
workflow.add_conditional_edges(
|
172
|
+
"question_generator",
|
173
|
+
should_notify_recruiter,
|
174
|
+
{
|
175
|
+
"notify": "recruiter_notifier",
|
176
|
+
"end": END
|
177
|
+
}
|
178
|
+
)
|
179
|
+
|
180
|
+
workflow.add_edge("recruiter_notifier", END)
|
181
|
+
|
182
|
+
# Replace the multiple entry points with a conditional router
|
183
|
+
|
184
|
+
def workflow_entry_router(state: WorkflowState) -> Dict[str, Any]:
|
185
|
+
"""Route to the appropriate entry point based on available data"""
|
186
|
+
logger.info(f"[Job {state.get('job_id')}] Routing workflow to appropriate entry point")
|
187
|
+
|
188
|
+
# Create a copy of the state to avoid mutating the input
|
189
|
+
new_state = state.copy()
|
190
|
+
|
191
|
+
if state.get("resume_path") and not state.get("job_description"):
|
192
|
+
logger.info(f"[Job {state.get('job_id')}] Starting with resume processing")
|
193
|
+
new_state["entry_router"] = "resume_ingestor"
|
194
|
+
elif state.get("job_description") and not state.get("resume_path"):
|
195
|
+
logger.info(f"[Job {state.get('job_id')}] Starting with job description")
|
196
|
+
new_state["entry_router"] = "jd_generator"
|
197
|
+
elif state.get("resume_text") and not state.get("resume_data"):
|
198
|
+
logger.info(f"[Job {state.get('job_id')}] Starting with resume analysis")
|
199
|
+
new_state["entry_router"] = "resume_analyzer"
|
200
|
+
else:
|
201
|
+
# Default path
|
202
|
+
logger.info(f"[Job {state.get('job_id')}] Using default entry point (job description generator)")
|
203
|
+
new_state["entry_router"] = "jd_generator"
|
204
|
+
|
205
|
+
return new_state
|
206
|
+
|
207
|
+
# Replace the multiple START edges with a single entry and conditional router
|
208
|
+
workflow.add_node("entry_router", workflow_entry_router)
|
209
|
+
from langgraph.graph import START
|
210
|
+
workflow.add_edge(START, "entry_router")
|
211
|
+
workflow.add_conditional_edges(
|
212
|
+
"entry_router",
|
213
|
+
lambda x: x["entry_router"],
|
214
|
+
{
|
215
|
+
"jd_generator": "jd_generator",
|
216
|
+
"resume_ingestor": "resume_ingestor",
|
217
|
+
"resume_analyzer": "resume_analyzer"
|
218
|
+
}
|
219
|
+
)
|
220
|
+
|
221
|
+
logger.info("Workflow graph built successfully")
|
222
|
+
return workflow
|
223
|
+
|
224
|
+
def start_workflow():
|
225
|
+
"""Start the LangGraph workflow"""
|
226
|
+
try:
|
227
|
+
# Create all required directories
|
228
|
+
for directory in [
|
229
|
+
settings.RESUME_WATCH_DIR,
|
230
|
+
settings.OUTPUT_DIR,
|
231
|
+
settings.LOG_DIR,
|
232
|
+
settings.LANGGRAPH_CHECKPOINT_DIR,
|
233
|
+
os.path.join(settings.LOG_DIR, "snapshots")
|
234
|
+
]:
|
235
|
+
os.makedirs(directory, exist_ok=True)
|
236
|
+
logger.info(f"Ensured directory exists: {directory}")
|
237
|
+
|
238
|
+
# Build the workflow graph
|
239
|
+
workflow = build_workflow()
|
240
|
+
|
241
|
+
# Setup in-memory checkpointer
|
242
|
+
checkpointer = InMemorySaver()
|
243
|
+
|
244
|
+
# Compile the workflow
|
245
|
+
logger.info("Compiling workflow graph")
|
246
|
+
app = workflow.compile(checkpointer=checkpointer)
|
247
|
+
|
248
|
+
# Setup the file watcher to automatically process new resumes
|
249
|
+
logger.info("Setting up resume watcher...")
|
250
|
+
state = create_initial_state() # Use proper initial state type
|
251
|
+
|
252
|
+
# Set up the resume watcher without running the job description generator
|
253
|
+
# We're just initializing the watcher functionality
|
254
|
+
try:
|
255
|
+
# Call ingest_resume with a special flag to indicate it's just for setup
|
256
|
+
state["setup_only"] = True
|
257
|
+
ingest_resume(state) # This sets up the watcher
|
258
|
+
logger.info("Resume watcher set up successfully")
|
259
|
+
except Exception as e:
|
260
|
+
logger.error(f"Error setting up resume watcher: {str(e)}")
|
261
|
+
# Continue anyway - we still want to return the app
|
262
|
+
|
263
|
+
logger.info("Workflow engine successfully started")
|
264
|
+
logger.info("Listening for new resumes...")
|
265
|
+
|
266
|
+
return app
|
267
|
+
except Exception as e:
|
268
|
+
logger.error(f"Failed to start workflow engine: {str(e)}")
|
269
|
+
logger.error(traceback.format_exc())
|
270
|
+
raise
|
271
|
+
|
272
|
+
def process_new_resume(resume_path: str, job_description: Dict[str, Any] = None):
|
273
|
+
"""Process a new resume through the workflow"""
|
274
|
+
try:
|
275
|
+
# Extract job ID from filename (e.g., 20250626225207_Michael_Jone.pdf)
|
276
|
+
filename = os.path.basename(resume_path)
|
277
|
+
job_id = filename.split('_')[0]
|
278
|
+
logger.info(f"Extracted job ID from resume filename: {job_id}")
|
279
|
+
|
280
|
+
# Create initial state with extracted job ID
|
281
|
+
initial_state = create_initial_state(resume_path=resume_path, job_id=job_id)
|
282
|
+
|
283
|
+
# Start workflow with initial state
|
284
|
+
workflow = build_workflow()
|
285
|
+
logger.info(f"[Job {job_id}] Starting workflow processing with job ID {job_id}")
|
286
|
+
save_state_snapshot(initial_state, "initial_state")
|
287
|
+
|
288
|
+
# Configure the workflow with in-memory checkpoints
|
289
|
+
checkpointer = InMemorySaver()
|
290
|
+
|
291
|
+
# Compile the workflow with checkpoints
|
292
|
+
logger.info(f"[Job {job_id}] Compiling workflow graph")
|
293
|
+
app = workflow.compile(checkpointer=checkpointer)
|
294
|
+
|
295
|
+
# Execute the workflow
|
296
|
+
try:
|
297
|
+
logger.info(f"[Job {job_id}] Beginning workflow execution")
|
298
|
+
event_count = 0
|
299
|
+
|
300
|
+
# Add thread_id for proper checkpointing
|
301
|
+
config = {"configurable": {"thread_id": job_id}}
|
302
|
+
|
303
|
+
# Correctly process the stream events
|
304
|
+
for event in app.stream(initial_state, config=config):
|
305
|
+
event_count += 1
|
306
|
+
|
307
|
+
# Extract the current step from the event
|
308
|
+
# The event format depends on LangGraph version, but typically
|
309
|
+
# contains the node name as a key
|
310
|
+
if isinstance(event, dict) and len(event) > 0:
|
311
|
+
current_step = list(event.keys())[0]
|
312
|
+
else:
|
313
|
+
current_step = "unknown"
|
314
|
+
|
315
|
+
# Get the current state if available in the event
|
316
|
+
current_state = None
|
317
|
+
if current_step in event and isinstance(event[current_step], dict):
|
318
|
+
current_state = event[current_step]
|
319
|
+
|
320
|
+
# Save state snapshot after each significant step
|
321
|
+
if current_state:
|
322
|
+
save_state_snapshot(current_state, f"after_{current_step}")
|
323
|
+
|
324
|
+
# Update status in state if possible
|
325
|
+
if "status" in current_state:
|
326
|
+
current_state["status"] = f"completed_{current_step}"
|
327
|
+
|
328
|
+
# Log progress with job id for tracking
|
329
|
+
logger.info(f"[Job {job_id}] Step {event_count} completed: {current_step}")
|
330
|
+
|
331
|
+
except Exception as e:
|
332
|
+
logger.error(f"[Job {job_id}] Error in workflow execution: {str(e)}")
|
333
|
+
logger.error(traceback.format_exc())
|
334
|
+
|
335
|
+
# Create error state for logging
|
336
|
+
error_state = {**initial_state}
|
337
|
+
if "errors" not in error_state:
|
338
|
+
error_state["errors"] = []
|
339
|
+
|
340
|
+
error_state["errors"].append({
|
341
|
+
"step": "workflow_execution",
|
342
|
+
"timestamp": datetime.now().isoformat(),
|
343
|
+
"error": str(e),
|
344
|
+
"traceback": traceback.format_exc()
|
345
|
+
})
|
346
|
+
|
347
|
+
error_state["status"] = "workflow_execution_error"
|
348
|
+
save_state_snapshot(error_state, "workflow_error")
|
349
|
+
|
350
|
+
logger.info(f"[Job {job_id}] Workflow completed for resume {resume_path}")
|
351
|
+
except Exception as e:
|
352
|
+
logger.error(f"Failed to process new resume {resume_path}: {str(e)}")
|
353
|
+
logger.error(traceback.format_exc())
|
354
|
+
raise
|
355
|
+
|
356
|
+
if __name__ == "__main__":
|
357
|
+
# For testing the workflow directly
|
358
|
+
start_workflow()
|
File without changes
|
@@ -0,0 +1,177 @@
|
|
1
|
+
"""
|
2
|
+
Assessment Handler Node
|
3
|
+
Sends assessments to candidates via email or LMS integration
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from typing import Dict, Any, List
|
8
|
+
|
9
|
+
from utils.email_utils import send_email
|
10
|
+
from utils.lms_api import get_lms_client
|
11
|
+
|
12
|
+
from config import settings
|
13
|
+
|
14
|
+
# Configure logging
|
15
|
+
logging.basicConfig(level=logging.INFO)
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
def create_assessment_email(candidate_name: str, position_name: str, test_link: str) -> Dict[str, str]:
|
19
|
+
"""Create email content for assessment"""
|
20
|
+
subject = f"Assessment for {position_name} Position"
|
21
|
+
|
22
|
+
plain_text = f"""
|
23
|
+
Hello {candidate_name},
|
24
|
+
|
25
|
+
Thank you for your interest in the {position_name} position. As part of our evaluation process,
|
26
|
+
we'd like you to complete an assessment.
|
27
|
+
|
28
|
+
Please click the link below to start the assessment:
|
29
|
+
{test_link}
|
30
|
+
|
31
|
+
The assessment should take approximately 60 minutes to complete.
|
32
|
+
|
33
|
+
Best regards,
|
34
|
+
Recruitment Team
|
35
|
+
"""
|
36
|
+
|
37
|
+
html_content = f"""
|
38
|
+
<html>
|
39
|
+
<body>
|
40
|
+
<p>Hello {candidate_name},</p>
|
41
|
+
<p>Thank you for your interest in the <strong>{position_name}</strong> position. As part of our evaluation process,
|
42
|
+
we'd like you to complete an assessment.</p>
|
43
|
+
<p>Please click the link below to start the assessment:</p>
|
44
|
+
<p><a href="{test_link}">{test_link}</a></p>
|
45
|
+
<p>The assessment should take approximately 60 minutes to complete.</p>
|
46
|
+
<p>Best regards,<br>Recruitment Team</p>
|
47
|
+
</body>
|
48
|
+
</html>
|
49
|
+
"""
|
50
|
+
|
51
|
+
return {
|
52
|
+
"subject": subject,
|
53
|
+
"plain_text": plain_text,
|
54
|
+
"html_content": html_content
|
55
|
+
}
|
56
|
+
|
57
|
+
def handle_assessment(state: Dict[str, Any]) -> Dict[str, Any]:
|
58
|
+
"""
|
59
|
+
LangGraph node to handle sending assessments to candidates
|
60
|
+
|
61
|
+
Args:
|
62
|
+
state: The current workflow state
|
63
|
+
|
64
|
+
Returns:
|
65
|
+
Updated workflow state with assessment status
|
66
|
+
"""
|
67
|
+
logger.info("Starting assessment handler")
|
68
|
+
|
69
|
+
# Check if candidate email exists in state
|
70
|
+
candidate_email = state.get("candidate_email")
|
71
|
+
candidate_name = state.get("candidate_name", "Candidate")
|
72
|
+
|
73
|
+
if not candidate_email:
|
74
|
+
error_message = "Missing candidate email for assessment"
|
75
|
+
logger.error(error_message)
|
76
|
+
state["errors"].append({
|
77
|
+
"step": "assessment_handler",
|
78
|
+
"error": error_message
|
79
|
+
})
|
80
|
+
state["assessment"] = {"status": "failed", "reason": "missing_email"}
|
81
|
+
return state
|
82
|
+
|
83
|
+
# Get job position name
|
84
|
+
job_data = state.get("job_description", {})
|
85
|
+
position_name = job_data.get("position", "open position")
|
86
|
+
|
87
|
+
try:
|
88
|
+
# Check if we should use LMS integration
|
89
|
+
if settings.LMS_API_URL and settings.LMS_API_KEY:
|
90
|
+
# Send assessment via LMS
|
91
|
+
lms_client = get_lms_client()
|
92
|
+
lms_result = lms_client.send_assessment(
|
93
|
+
candidate_email=candidate_email,
|
94
|
+
candidate_name=candidate_name,
|
95
|
+
position_name=position_name
|
96
|
+
)
|
97
|
+
|
98
|
+
if lms_result.get("success"):
|
99
|
+
state["assessment"] = {
|
100
|
+
"status": "sent",
|
101
|
+
"method": "lms",
|
102
|
+
"lms_type": settings.LMS_TYPE,
|
103
|
+
"assessment_id": lms_result.get("assessment_id"),
|
104
|
+
"invitation_id": lms_result.get("invitation_id")
|
105
|
+
}
|
106
|
+
logger.info(f"Assessment sent to {candidate_email} via LMS")
|
107
|
+
else:
|
108
|
+
# LMS failed, fall back to email
|
109
|
+
logger.warning(f"LMS assessment failed: {lms_result.get('error')}, falling back to email")
|
110
|
+
assessment_link = f"https://example.com/assessment?id={state['job_id']}"
|
111
|
+
email_content = create_assessment_email(candidate_name, position_name, assessment_link)
|
112
|
+
|
113
|
+
email_sent = send_email(
|
114
|
+
recipient_email=candidate_email,
|
115
|
+
subject=email_content["subject"],
|
116
|
+
body=email_content["plain_text"],
|
117
|
+
html_content=email_content["html_content"]
|
118
|
+
)
|
119
|
+
|
120
|
+
state["assessment"] = {
|
121
|
+
"status": "sent" if email_sent else "failed",
|
122
|
+
"method": "email",
|
123
|
+
"assessment_link": assessment_link
|
124
|
+
}
|
125
|
+
|
126
|
+
if email_sent:
|
127
|
+
logger.info(f"Assessment email sent to {candidate_email}")
|
128
|
+
else:
|
129
|
+
logger.error(f"Failed to send assessment email to {candidate_email}")
|
130
|
+
state["errors"].append({
|
131
|
+
"step": "assessment_handler",
|
132
|
+
"error": "Failed to send assessment email"
|
133
|
+
})
|
134
|
+
else:
|
135
|
+
# No LMS configured, send via email
|
136
|
+
assessment_link = f"https://example.com/assessment?id={state['job_id']}"
|
137
|
+
email_content = create_assessment_email(candidate_name, position_name, assessment_link)
|
138
|
+
|
139
|
+
email_sent = send_email(
|
140
|
+
recipient_email=candidate_email,
|
141
|
+
subject=email_content["subject"],
|
142
|
+
body=email_content["plain_text"],
|
143
|
+
html_content=email_content["html_content"]
|
144
|
+
)
|
145
|
+
|
146
|
+
state["assessment"] = {
|
147
|
+
"status": "sent" if email_sent else "failed",
|
148
|
+
"method": "email",
|
149
|
+
"assessment_link": assessment_link
|
150
|
+
}
|
151
|
+
|
152
|
+
if email_sent:
|
153
|
+
logger.info(f"Assessment email sent to {candidate_email}")
|
154
|
+
else:
|
155
|
+
logger.error(f"Failed to send assessment email to {candidate_email}")
|
156
|
+
state["errors"].append({
|
157
|
+
"step": "assessment_handler",
|
158
|
+
"error": "Failed to send assessment email"
|
159
|
+
})
|
160
|
+
|
161
|
+
state["status"] = "assessment_handled"
|
162
|
+
|
163
|
+
except Exception as e:
|
164
|
+
error_message = f"Error sending assessment: {str(e)}"
|
165
|
+
logger.error(error_message)
|
166
|
+
|
167
|
+
state["errors"].append({
|
168
|
+
"step": "assessment_handler",
|
169
|
+
"error": error_message
|
170
|
+
})
|
171
|
+
|
172
|
+
state["assessment"] = {
|
173
|
+
"status": "failed",
|
174
|
+
"reason": str(e)
|
175
|
+
}
|
176
|
+
|
177
|
+
return state
|
@@ -0,0 +1,139 @@
|
|
1
|
+
"""
|
2
|
+
Job Description Generator Node
|
3
|
+
Generates detailed job descriptions using Azure OpenAI
|
4
|
+
"""
|
5
|
+
|
6
|
+
import os
|
7
|
+
import logging
|
8
|
+
from typing import Dict, Any
|
9
|
+
from langchain_openai import AzureChatOpenAI
|
10
|
+
from dotenv import load_dotenv
|
11
|
+
|
12
|
+
# Import the JD creation utility (from your existing code)
|
13
|
+
import sys
|
14
|
+
import os
|
15
|
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")))
|
16
|
+
|
17
|
+
# Import config
|
18
|
+
from config import settings
|
19
|
+
|
20
|
+
# Configure logging
|
21
|
+
logging.basicConfig(level=logging.INFO)
|
22
|
+
logger = logging.getLogger(__name__)
|
23
|
+
|
24
|
+
def create_llm():
|
25
|
+
"""Create and configure the LLM with Azure OpenAI"""
|
26
|
+
try:
|
27
|
+
api_key = settings.AZURE_OPENAI_KEY or settings.AZURE_OPENAI_API_KEY or settings.OPENAI_API_KEY
|
28
|
+
endpoint = settings.AZURE_OPENAI_ENDPOINT or settings.OPENAI_API_BASE
|
29
|
+
api_version = settings.AZURE_OPENAI_API_VERSION or settings.OPENAI_API_VERSION
|
30
|
+
|
31
|
+
return AzureChatOpenAI(
|
32
|
+
temperature=0.3,
|
33
|
+
deployment_name=settings.AZURE_OPENAI_DEPLOYMENT, # Using deployment_name instead of deployment
|
34
|
+
azure_endpoint=endpoint,
|
35
|
+
api_key=api_key,
|
36
|
+
api_version=api_version,
|
37
|
+
)
|
38
|
+
except Exception as e:
|
39
|
+
logger.error(f"Error initializing Azure OpenAI: {str(e)}")
|
40
|
+
return None
|
41
|
+
|
42
|
+
def get_jd_prompt(job_data: Dict[str, Any]) -> str:
|
43
|
+
"""Generate the prompt for job description creation"""
|
44
|
+
position = job_data.get('position', 'Software Engineer') # Default to Software Engineer if position not provided
|
45
|
+
return f"""
|
46
|
+
You are an expert HR content writer specializing in job descriptions.
|
47
|
+
Create a comprehensive job description for a {position} role.
|
48
|
+
|
49
|
+
Include the following sections:
|
50
|
+
|
51
|
+
- Introduction to the role and company
|
52
|
+
- Work tasks and responsibilities
|
53
|
+
- Required qualifications and skills
|
54
|
+
- Preferred skills and experience
|
55
|
+
- Compensation and benefits information
|
56
|
+
- About the organization
|
57
|
+
- Application process information
|
58
|
+
- include only the required skills and preferred skills in the job description
|
59
|
+
|
60
|
+
Location: {job_data.get('location', 'Not specified')}
|
61
|
+
Business area: {job_data.get('business_area', 'Not specified')}
|
62
|
+
Employment type: {job_data.get('employment_type', 'Full-time')}
|
63
|
+
Experience level: {job_data.get('experience_level', 'Not specified')}
|
64
|
+
Work arrangement: {job_data.get('work_arrangement', 'Not specified')}
|
65
|
+
|
66
|
+
Required skills: {', '.join(job_data.get('required_skills', []))}
|
67
|
+
Preferred skills: {', '.join(job_data.get('preferred_skills', []))}
|
68
|
+
|
69
|
+
Write in professional English, be concise yet comprehensive, and highlight the value
|
70
|
+
proposition for potential candidates.
|
71
|
+
"""
|
72
|
+
|
73
|
+
def generate_job_description(state: Dict[str, Any]) -> Dict[str, Any]:
|
74
|
+
"""
|
75
|
+
LangGraph node to generate a job description
|
76
|
+
|
77
|
+
Args:
|
78
|
+
state: The current workflow state
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
Updated workflow state with job description
|
82
|
+
"""
|
83
|
+
logger.info("Starting job description generation")
|
84
|
+
|
85
|
+
# Check if job description already exists in state
|
86
|
+
if state.get("job_description") and state.get("job_description_text"):
|
87
|
+
logger.info("Job description already exists, skipping generation")
|
88
|
+
return state
|
89
|
+
|
90
|
+
try:
|
91
|
+
# Create the language model
|
92
|
+
llm = create_llm()
|
93
|
+
if not llm:
|
94
|
+
raise ValueError("Failed to initialize Azure OpenAI client")
|
95
|
+
|
96
|
+
# Prepare job data (use sample data if not provided)
|
97
|
+
job_data = state.get("job_description", {})
|
98
|
+
if not job_data:
|
99
|
+
# Use default job data if none provided
|
100
|
+
job_data = {
|
101
|
+
"position": "Software Engineer",
|
102
|
+
"location": "Remote",
|
103
|
+
"business_area": "Engineering",
|
104
|
+
"employment_type": "Full-time",
|
105
|
+
"experience_level": "Mid-level",
|
106
|
+
"work_arrangement": "Remote",
|
107
|
+
"required_skills": ["Python", "JavaScript", "API Development"],
|
108
|
+
"preferred_skills": ["Azure", "CI/CD", "TypeScript"]
|
109
|
+
}
|
110
|
+
|
111
|
+
# Generate the prompt
|
112
|
+
prompt = get_jd_prompt(job_data)
|
113
|
+
|
114
|
+
# Invoke the language model
|
115
|
+
response = llm.invoke(prompt)
|
116
|
+
generated_text = response.content
|
117
|
+
|
118
|
+
# Update the state with the generated job description
|
119
|
+
state["job_description"] = job_data
|
120
|
+
state["job_description_text"] = generated_text
|
121
|
+
state["status"] = "jd_generated"
|
122
|
+
|
123
|
+
logger.info("Job description generated successfully")
|
124
|
+
|
125
|
+
except Exception as e:
|
126
|
+
error_message = f"Error generating job description: {str(e)}"
|
127
|
+
logger.error(error_message)
|
128
|
+
|
129
|
+
# Add error to state
|
130
|
+
state["errors"].append({
|
131
|
+
"step": "jd_generator",
|
132
|
+
"error": error_message
|
133
|
+
})
|
134
|
+
|
135
|
+
# Set fallback job description text if needed
|
136
|
+
if not state.get("job_description_text"):
|
137
|
+
state["job_description_text"] = "Default job description text for fallback purposes."
|
138
|
+
|
139
|
+
return state
|