michael-agent 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- michael_agent/dashboard/static/__init__.py +0 -0
- michael_agent/dashboard/static/styles.css +311 -0
- michael_agent/dashboard/templates/__init__.py +0 -0
- michael_agent/dashboard/templates/career_portal.html +482 -0
- michael_agent/dashboard/templates/dashboard.html +807 -0
- michael_agent/dashboard/templates/jd_creation.html +318 -0
- michael_agent/dashboard/templates/resume_scoring.html +1032 -0
- michael_agent/dashboard/templates/upload_resume.html +411 -0
- michael_agent/langgraph_workflow/nodes/__init__.py +0 -0
- michael_agent/langgraph_workflow/nodes/assessment_handler.py +177 -0
- michael_agent/langgraph_workflow/nodes/jd_generator.py +139 -0
- michael_agent/langgraph_workflow/nodes/jd_poster.py +156 -0
- michael_agent/langgraph_workflow/nodes/question_generator.py +295 -0
- michael_agent/langgraph_workflow/nodes/recruiter_notifier.py +224 -0
- michael_agent/langgraph_workflow/nodes/resume_analyzer.py +631 -0
- michael_agent/langgraph_workflow/nodes/resume_ingestor.py +225 -0
- michael_agent/langgraph_workflow/nodes/sentiment_analysis.py +309 -0
- {michael_agent-1.0.1.dist-info → michael_agent-1.0.3.dist-info}/METADATA +1 -1
- michael_agent-1.0.3.dist-info/RECORD +38 -0
- michael_agent-1.0.1.dist-info/RECORD +0 -21
- {michael_agent-1.0.1.dist-info → michael_agent-1.0.3.dist-info}/WHEEL +0 -0
- {michael_agent-1.0.1.dist-info → michael_agent-1.0.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,139 @@
|
|
1
|
+
"""
|
2
|
+
Job Description Generator Node
|
3
|
+
Generates detailed job descriptions using Azure OpenAI
|
4
|
+
"""
|
5
|
+
|
6
|
+
import os
|
7
|
+
import logging
|
8
|
+
from typing import Dict, Any
|
9
|
+
from langchain_openai import AzureChatOpenAI
|
10
|
+
from dotenv import load_dotenv
|
11
|
+
|
12
|
+
# Import the JD creation utility (from your existing code)
|
13
|
+
import sys
|
14
|
+
import os
|
15
|
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")))
|
16
|
+
|
17
|
+
# Import config
|
18
|
+
from config import settings
|
19
|
+
|
20
|
+
# Configure logging
|
21
|
+
logging.basicConfig(level=logging.INFO)
|
22
|
+
logger = logging.getLogger(__name__)
|
23
|
+
|
24
|
+
def create_llm():
|
25
|
+
"""Create and configure the LLM with Azure OpenAI"""
|
26
|
+
try:
|
27
|
+
api_key = settings.AZURE_OPENAI_KEY or settings.AZURE_OPENAI_API_KEY or settings.OPENAI_API_KEY
|
28
|
+
endpoint = settings.AZURE_OPENAI_ENDPOINT or settings.OPENAI_API_BASE
|
29
|
+
api_version = settings.AZURE_OPENAI_API_VERSION or settings.OPENAI_API_VERSION
|
30
|
+
|
31
|
+
return AzureChatOpenAI(
|
32
|
+
temperature=0.3,
|
33
|
+
deployment_name=settings.AZURE_OPENAI_DEPLOYMENT, # Using deployment_name instead of deployment
|
34
|
+
azure_endpoint=endpoint,
|
35
|
+
api_key=api_key,
|
36
|
+
api_version=api_version,
|
37
|
+
)
|
38
|
+
except Exception as e:
|
39
|
+
logger.error(f"Error initializing Azure OpenAI: {str(e)}")
|
40
|
+
return None
|
41
|
+
|
42
|
+
def get_jd_prompt(job_data: Dict[str, Any]) -> str:
|
43
|
+
"""Generate the prompt for job description creation"""
|
44
|
+
position = job_data.get('position', 'Software Engineer') # Default to Software Engineer if position not provided
|
45
|
+
return f"""
|
46
|
+
You are an expert HR content writer specializing in job descriptions.
|
47
|
+
Create a comprehensive job description for a {position} role.
|
48
|
+
|
49
|
+
Include the following sections:
|
50
|
+
|
51
|
+
- Introduction to the role and company
|
52
|
+
- Work tasks and responsibilities
|
53
|
+
- Required qualifications and skills
|
54
|
+
- Preferred skills and experience
|
55
|
+
- Compensation and benefits information
|
56
|
+
- About the organization
|
57
|
+
- Application process information
|
58
|
+
- include only the required skills and preferred skills in the job description
|
59
|
+
|
60
|
+
Location: {job_data.get('location', 'Not specified')}
|
61
|
+
Business area: {job_data.get('business_area', 'Not specified')}
|
62
|
+
Employment type: {job_data.get('employment_type', 'Full-time')}
|
63
|
+
Experience level: {job_data.get('experience_level', 'Not specified')}
|
64
|
+
Work arrangement: {job_data.get('work_arrangement', 'Not specified')}
|
65
|
+
|
66
|
+
Required skills: {', '.join(job_data.get('required_skills', []))}
|
67
|
+
Preferred skills: {', '.join(job_data.get('preferred_skills', []))}
|
68
|
+
|
69
|
+
Write in professional English, be concise yet comprehensive, and highlight the value
|
70
|
+
proposition for potential candidates.
|
71
|
+
"""
|
72
|
+
|
73
|
+
def generate_job_description(state: Dict[str, Any]) -> Dict[str, Any]:
|
74
|
+
"""
|
75
|
+
LangGraph node to generate a job description
|
76
|
+
|
77
|
+
Args:
|
78
|
+
state: The current workflow state
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
Updated workflow state with job description
|
82
|
+
"""
|
83
|
+
logger.info("Starting job description generation")
|
84
|
+
|
85
|
+
# Check if job description already exists in state
|
86
|
+
if state.get("job_description") and state.get("job_description_text"):
|
87
|
+
logger.info("Job description already exists, skipping generation")
|
88
|
+
return state
|
89
|
+
|
90
|
+
try:
|
91
|
+
# Create the language model
|
92
|
+
llm = create_llm()
|
93
|
+
if not llm:
|
94
|
+
raise ValueError("Failed to initialize Azure OpenAI client")
|
95
|
+
|
96
|
+
# Prepare job data (use sample data if not provided)
|
97
|
+
job_data = state.get("job_description", {})
|
98
|
+
if not job_data:
|
99
|
+
# Use default job data if none provided
|
100
|
+
job_data = {
|
101
|
+
"position": "Software Engineer",
|
102
|
+
"location": "Remote",
|
103
|
+
"business_area": "Engineering",
|
104
|
+
"employment_type": "Full-time",
|
105
|
+
"experience_level": "Mid-level",
|
106
|
+
"work_arrangement": "Remote",
|
107
|
+
"required_skills": ["Python", "JavaScript", "API Development"],
|
108
|
+
"preferred_skills": ["Azure", "CI/CD", "TypeScript"]
|
109
|
+
}
|
110
|
+
|
111
|
+
# Generate the prompt
|
112
|
+
prompt = get_jd_prompt(job_data)
|
113
|
+
|
114
|
+
# Invoke the language model
|
115
|
+
response = llm.invoke(prompt)
|
116
|
+
generated_text = response.content
|
117
|
+
|
118
|
+
# Update the state with the generated job description
|
119
|
+
state["job_description"] = job_data
|
120
|
+
state["job_description_text"] = generated_text
|
121
|
+
state["status"] = "jd_generated"
|
122
|
+
|
123
|
+
logger.info("Job description generated successfully")
|
124
|
+
|
125
|
+
except Exception as e:
|
126
|
+
error_message = f"Error generating job description: {str(e)}"
|
127
|
+
logger.error(error_message)
|
128
|
+
|
129
|
+
# Add error to state
|
130
|
+
state["errors"].append({
|
131
|
+
"step": "jd_generator",
|
132
|
+
"error": error_message
|
133
|
+
})
|
134
|
+
|
135
|
+
# Set fallback job description text if needed
|
136
|
+
if not state.get("job_description_text"):
|
137
|
+
state["job_description_text"] = "Default job description text for fallback purposes."
|
138
|
+
|
139
|
+
return state
|
@@ -0,0 +1,156 @@
|
|
1
|
+
"""
|
2
|
+
JD Poster Node
|
3
|
+
Mock posts job descriptions to external platforms
|
4
|
+
"""
|
5
|
+
|
6
|
+
import os
|
7
|
+
import json
|
8
|
+
import logging
|
9
|
+
import requests
|
10
|
+
import time
|
11
|
+
import uuid
|
12
|
+
from typing import Dict, Any, List
|
13
|
+
from datetime import datetime
|
14
|
+
|
15
|
+
# Import config
|
16
|
+
from config import settings
|
17
|
+
|
18
|
+
# Configure logging
|
19
|
+
logging.basicConfig(level=logging.INFO)
|
20
|
+
logger = logging.getLogger(__name__)
|
21
|
+
|
22
|
+
def mock_post_to_linkedin(job_data: Dict[str, Any]) -> Dict[str, Any]:
|
23
|
+
"""Mock posting a job to LinkedIn"""
|
24
|
+
logger.info("Mocking job post to LinkedIn")
|
25
|
+
# Simulate API call delay
|
26
|
+
time.sleep(0.5)
|
27
|
+
|
28
|
+
return {
|
29
|
+
"platform": "LinkedIn",
|
30
|
+
"status": "success",
|
31
|
+
"post_id": f"li-{uuid.uuid4()}",
|
32
|
+
"timestamp": datetime.now().isoformat()
|
33
|
+
}
|
34
|
+
|
35
|
+
def mock_post_to_indeed(job_data: Dict[str, Any]) -> Dict[str, Any]:
|
36
|
+
"""Mock posting a job to Indeed"""
|
37
|
+
logger.info("Mocking job post to Indeed")
|
38
|
+
# Simulate API call delay
|
39
|
+
time.sleep(0.5)
|
40
|
+
|
41
|
+
return {
|
42
|
+
"platform": "Indeed",
|
43
|
+
"status": "success",
|
44
|
+
"post_id": f"ind-{uuid.uuid4()}",
|
45
|
+
"timestamp": datetime.now().isoformat()
|
46
|
+
}
|
47
|
+
|
48
|
+
def mock_post_to_glassdoor(job_data: Dict[str, Any]) -> Dict[str, Any]:
|
49
|
+
"""Mock posting a job to Glassdoor"""
|
50
|
+
logger.info("Mocking job post to Glassdoor")
|
51
|
+
# Simulate API call delay
|
52
|
+
time.sleep(0.5)
|
53
|
+
|
54
|
+
return {
|
55
|
+
"platform": "Glassdoor",
|
56
|
+
"status": "success",
|
57
|
+
"post_id": f"gd-{uuid.uuid4()}",
|
58
|
+
"timestamp": datetime.now().isoformat()
|
59
|
+
}
|
60
|
+
|
61
|
+
def save_job_description(job_id: str, job_data: Dict[str, Any], job_text: str) -> str:
|
62
|
+
"""Save the job description to a file"""
|
63
|
+
# Ensure log directory exists
|
64
|
+
job_logs_dir = settings.JOB_DESCRIPTIONS_DIR
|
65
|
+
os.makedirs(job_logs_dir, exist_ok=True)
|
66
|
+
|
67
|
+
# Generate timestamp for filename if no job_id provided
|
68
|
+
if not job_id:
|
69
|
+
job_id = datetime.now().strftime("%Y%m%d%H%M%S")
|
70
|
+
|
71
|
+
# Create the job description file
|
72
|
+
file_path = os.path.join(job_logs_dir, f"{job_id}.json")
|
73
|
+
|
74
|
+
# Prepare data to save
|
75
|
+
data_to_save = {
|
76
|
+
"job_id": job_id,
|
77
|
+
"timestamp": datetime.now().isoformat(),
|
78
|
+
"job_data": job_data,
|
79
|
+
"job_description": job_text
|
80
|
+
}
|
81
|
+
|
82
|
+
# Save to file
|
83
|
+
with open(file_path, "w") as f:
|
84
|
+
json.dump(data_to_save, f, indent=2)
|
85
|
+
|
86
|
+
logger.info(f"Job description saved to {file_path}")
|
87
|
+
return file_path
|
88
|
+
|
89
|
+
def post_job_description(state: Dict[str, Any]) -> Dict[str, Any]:
|
90
|
+
"""
|
91
|
+
LangGraph node to post job descriptions to job boards
|
92
|
+
|
93
|
+
Args:
|
94
|
+
state: The current workflow state
|
95
|
+
|
96
|
+
Returns:
|
97
|
+
Updated workflow state with posting results
|
98
|
+
"""
|
99
|
+
logger.info("Starting job posting")
|
100
|
+
|
101
|
+
# Check if job description already exists in state
|
102
|
+
if not state.get("job_description") or not state.get("job_description_text"):
|
103
|
+
error_message = "No job description available for posting"
|
104
|
+
logger.error(error_message)
|
105
|
+
|
106
|
+
# Add error to state
|
107
|
+
state["errors"].append({
|
108
|
+
"step": "jd_poster",
|
109
|
+
"error": error_message
|
110
|
+
})
|
111
|
+
|
112
|
+
return state
|
113
|
+
|
114
|
+
try:
|
115
|
+
job_id = state.get("job_id", "")
|
116
|
+
job_data = state.get("job_description", {})
|
117
|
+
job_text = state.get("job_description_text", "")
|
118
|
+
|
119
|
+
# Save job description to file
|
120
|
+
job_file_path = save_job_description(job_id, job_data, job_text)
|
121
|
+
|
122
|
+
# Ensure the file path is stored in state
|
123
|
+
state["job_file_path"] = job_file_path
|
124
|
+
|
125
|
+
# Mock post to job platforms
|
126
|
+
posting_results = []
|
127
|
+
|
128
|
+
# LinkedIn
|
129
|
+
linkedin_result = mock_post_to_linkedin(job_data)
|
130
|
+
posting_results.append(linkedin_result)
|
131
|
+
|
132
|
+
# Indeed
|
133
|
+
indeed_result = mock_post_to_indeed(job_data)
|
134
|
+
posting_results.append(indeed_result)
|
135
|
+
|
136
|
+
# Glassdoor
|
137
|
+
glassdoor_result = mock_post_to_glassdoor(job_data)
|
138
|
+
posting_results.append(glassdoor_result)
|
139
|
+
|
140
|
+
# Update state with results
|
141
|
+
state["job_posting_results"] = posting_results
|
142
|
+
state["status"] = "job_posted"
|
143
|
+
|
144
|
+
logger.info(f"Job posted successfully to {len(posting_results)} platforms")
|
145
|
+
|
146
|
+
except Exception as e:
|
147
|
+
error_message = f"Error posting job description: {str(e)}"
|
148
|
+
logger.error(error_message)
|
149
|
+
|
150
|
+
# Add error to state
|
151
|
+
state["errors"].append({
|
152
|
+
"step": "jd_poster",
|
153
|
+
"error": error_message
|
154
|
+
})
|
155
|
+
|
156
|
+
return state
|
@@ -0,0 +1,295 @@
|
|
1
|
+
"""
|
2
|
+
Question Generator Node
|
3
|
+
Generates custom interview questions based on resume and job description
|
4
|
+
"""
|
5
|
+
|
6
|
+
import logging
|
7
|
+
from typing import Dict, Any, List
|
8
|
+
from langchain_openai import AzureChatOpenAI
|
9
|
+
|
10
|
+
from config import settings
|
11
|
+
|
12
|
+
# Configure logging
|
13
|
+
logging.basicConfig(level=logging.INFO)
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
def create_llm():
|
17
|
+
"""Create and configure the LLM with Azure OpenAI"""
|
18
|
+
try:
|
19
|
+
return AzureChatOpenAI(
|
20
|
+
temperature=0.7, # Higher temperature for more diverse questions
|
21
|
+
deployment_name=settings.AZURE_OPENAI_DEPLOYMENT,
|
22
|
+
azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,
|
23
|
+
api_key=settings.AZURE_OPENAI_KEY,
|
24
|
+
api_version=settings.AZURE_OPENAI_API_VERSION,
|
25
|
+
)
|
26
|
+
except Exception as e:
|
27
|
+
logger.error(f"Error initializing Azure OpenAI: {str(e)}")
|
28
|
+
return None
|
29
|
+
|
30
|
+
def get_question_generation_prompt(resume_text: str, jd_text: str, relevance_score: float) -> str:
|
31
|
+
"""Generate the prompt for interview question generation"""
|
32
|
+
return f"""
|
33
|
+
You are an expert technical interviewer and recruiter.
|
34
|
+
|
35
|
+
Generate a set of interview questions for a candidate based on their resume and the job description.
|
36
|
+
The questions should help assess the candidate's fit for the role.
|
37
|
+
|
38
|
+
Resume:
|
39
|
+
{resume_text[:2000]} # Limit to avoid token limits
|
40
|
+
|
41
|
+
Job Description:
|
42
|
+
{jd_text[:1000]}
|
43
|
+
|
44
|
+
The candidate's resume relevance score is {relevance_score:.2f} out of 1.0.
|
45
|
+
|
46
|
+
Generate 10 questions in the following JSON format:
|
47
|
+
```json
|
48
|
+
{{
|
49
|
+
"technical_questions": [
|
50
|
+
{{
|
51
|
+
"question": "string",
|
52
|
+
"difficulty": "easy|medium|hard",
|
53
|
+
"category": "technical_skill|domain_knowledge|problem_solving",
|
54
|
+
"purpose": "brief explanation of what this question assesses"
|
55
|
+
}},
|
56
|
+
// 4 more technical questions...
|
57
|
+
],
|
58
|
+
"behavioral_questions": [
|
59
|
+
{{
|
60
|
+
"question": "string",
|
61
|
+
"category": "teamwork|leadership|conflict_resolution|problem_solving|adaptability",
|
62
|
+
"purpose": "brief explanation of what this question assesses"
|
63
|
+
}},
|
64
|
+
// 4 more behavioral questions...
|
65
|
+
],
|
66
|
+
"follow_up_areas": ["Area 1", "Area 2", "Area 3"] // Important areas to explore further based on resume gaps
|
67
|
+
}}
|
68
|
+
```
|
69
|
+
|
70
|
+
Focus on questions that will reveal the candidate's true abilities related to the job requirements.
|
71
|
+
Include questions that address any potential gaps between the resume and job description.
|
72
|
+
"""
|
73
|
+
|
74
|
+
def generate_interview_questions(state: Dict[str, Any]) -> Dict[str, Any]:
|
75
|
+
"""Generate interview questions based on resume and job description."""
|
76
|
+
logger = logging.getLogger(__name__)
|
77
|
+
logger.info("Starting interview question generation")
|
78
|
+
|
79
|
+
resume_text = state.get("resume_text")
|
80
|
+
job_description_text = state.get("job_description_text")
|
81
|
+
|
82
|
+
# Add print statements to verify the input data
|
83
|
+
print(f"[QUESTION_GENERATOR] Resume text available: {resume_text is not None}, length: {len(resume_text) if resume_text else 0}")
|
84
|
+
print(f"[QUESTION_GENERATOR] Job description text available: {job_description_text is not None}, length: {len(job_description_text) if job_description_text else 0}")
|
85
|
+
|
86
|
+
if job_description_text:
|
87
|
+
print(f"[QUESTION_GENERATOR] JD First 200 chars: {job_description_text[:200]}...")
|
88
|
+
|
89
|
+
# Generate some basic questions even if we're missing job description
|
90
|
+
if not resume_text:
|
91
|
+
logger.error("Missing resume text or job description text for question generation")
|
92
|
+
# Generate generic questions when no data is available
|
93
|
+
questions = {
|
94
|
+
"technical_questions": [
|
95
|
+
{
|
96
|
+
"question": "Can you describe your experience with the technologies mentioned in your resume?",
|
97
|
+
"difficulty": "medium",
|
98
|
+
"category": "technical_skill",
|
99
|
+
"purpose": "Assess general technical experience and honesty"
|
100
|
+
},
|
101
|
+
{
|
102
|
+
"question": "How do you approach problem-solving in your work?",
|
103
|
+
"difficulty": "medium",
|
104
|
+
"category": "problem_solving",
|
105
|
+
"purpose": "Evaluate general problem-solving approach"
|
106
|
+
}
|
107
|
+
],
|
108
|
+
"behavioral_questions": [
|
109
|
+
{
|
110
|
+
"question": "Can you tell me about a time when you had to work with a difficult team member?",
|
111
|
+
"category": "teamwork",
|
112
|
+
"purpose": "Assess interpersonal skills and conflict resolution"
|
113
|
+
},
|
114
|
+
{
|
115
|
+
"question": "Describe a situation where you had to learn something new quickly.",
|
116
|
+
"category": "adaptability",
|
117
|
+
"purpose": "Evaluate learning agility and adaptability"
|
118
|
+
}
|
119
|
+
],
|
120
|
+
"follow_up_areas": [
|
121
|
+
"Technical skills verification",
|
122
|
+
"Past project details",
|
123
|
+
"Team collaboration"
|
124
|
+
]
|
125
|
+
}
|
126
|
+
|
127
|
+
return {
|
128
|
+
"status": "completed_generate_interview_questions",
|
129
|
+
"interview_questions": questions,
|
130
|
+
"errors": [{"step": "question_generator", "error": "Missing resume text or job description text for question generation"}]
|
131
|
+
}
|
132
|
+
|
133
|
+
# Check if questions already exist in state
|
134
|
+
if state.get("interview_questions"):
|
135
|
+
logger.info("Interview questions already exist, skipping generation")
|
136
|
+
return state
|
137
|
+
|
138
|
+
# Check if required data exists
|
139
|
+
resume_text = state.get("resume_text")
|
140
|
+
jd_text = state.get("job_description_text")
|
141
|
+
|
142
|
+
if not resume_text or not jd_text:
|
143
|
+
error_message = "Missing resume text or job description text for question generation"
|
144
|
+
logger.error(error_message)
|
145
|
+
|
146
|
+
# Add error to state
|
147
|
+
state["errors"].append({
|
148
|
+
"step": "question_generator",
|
149
|
+
"error": error_message
|
150
|
+
})
|
151
|
+
|
152
|
+
# Set default questions
|
153
|
+
state["interview_questions"] = generate_default_questions()
|
154
|
+
return state
|
155
|
+
|
156
|
+
try:
|
157
|
+
# Create the language model
|
158
|
+
llm = create_llm()
|
159
|
+
if not llm:
|
160
|
+
raise ValueError("Failed to initialize Azure OpenAI client")
|
161
|
+
|
162
|
+
# Get resume score from state (default to 0.5 if missing)
|
163
|
+
relevance_score = state.get("relevance_score", 0.5)
|
164
|
+
|
165
|
+
# Generate the prompt
|
166
|
+
prompt = get_question_generation_prompt(resume_text, jd_text, relevance_score)
|
167
|
+
print(f"[QUESTION_GENERATOR] Created prompt with resume length {len(resume_text)} and JD length {len(jd_text)}")
|
168
|
+
|
169
|
+
# Invoke the language model
|
170
|
+
response = llm.invoke(prompt)
|
171
|
+
generated_text = response.content
|
172
|
+
|
173
|
+
# Extract JSON from response (in case there's surrounding text)
|
174
|
+
import re
|
175
|
+
import json
|
176
|
+
|
177
|
+
json_match = re.search(r'```json\s*(.*?)\s*```', generated_text, re.DOTALL)
|
178
|
+
if json_match:
|
179
|
+
json_str = json_match.group(1)
|
180
|
+
else:
|
181
|
+
# If no markdown code block, try to find JSON directly
|
182
|
+
json_match = re.search(r'({[\s\S]*})', generated_text)
|
183
|
+
if json_match:
|
184
|
+
json_str = json_match.group(1)
|
185
|
+
else:
|
186
|
+
raise ValueError("Could not extract JSON from LLM response")
|
187
|
+
|
188
|
+
# Parse the JSON
|
189
|
+
questions = json.loads(json_str)
|
190
|
+
|
191
|
+
# Update the state with the generated questions
|
192
|
+
state["interview_questions"] = questions
|
193
|
+
state["status"] = "questions_generated"
|
194
|
+
|
195
|
+
# Save a snapshot of the state with the questions
|
196
|
+
save_snapshot(state)
|
197
|
+
|
198
|
+
logger.info("Interview questions generated successfully")
|
199
|
+
|
200
|
+
except Exception as e:
|
201
|
+
error_message = f"Error generating interview questions: {str(e)}"
|
202
|
+
logger.error(error_message)
|
203
|
+
|
204
|
+
# Add error to state
|
205
|
+
state["errors"].append({
|
206
|
+
"step": "question_generator",
|
207
|
+
"error": error_message
|
208
|
+
})
|
209
|
+
|
210
|
+
# Use default questions as fallback
|
211
|
+
state["interview_questions"] = generate_default_questions()
|
212
|
+
|
213
|
+
return state
|
214
|
+
|
215
|
+
def generate_default_questions():
|
216
|
+
"""Generate default interview questions as fallback"""
|
217
|
+
return {
|
218
|
+
"technical_questions": [
|
219
|
+
{
|
220
|
+
"question": "Can you describe your experience with the technologies mentioned in your resume?",
|
221
|
+
"difficulty": "medium",
|
222
|
+
"category": "technical_skill",
|
223
|
+
"purpose": "Assess general technical experience and honesty"
|
224
|
+
},
|
225
|
+
{
|
226
|
+
"question": "How do you approach problem-solving in your work?",
|
227
|
+
"difficulty": "medium",
|
228
|
+
"category": "problem_solving",
|
229
|
+
"purpose": "Evaluate general problem-solving approach"
|
230
|
+
}
|
231
|
+
],
|
232
|
+
"behavioral_questions": [
|
233
|
+
{
|
234
|
+
"question": "Can you tell me about a time when you had to work with a difficult team member?",
|
235
|
+
"category": "teamwork",
|
236
|
+
"purpose": "Assess interpersonal skills and conflict resolution"
|
237
|
+
},
|
238
|
+
{
|
239
|
+
"question": "Describe a situation where you had to learn something new quickly.",
|
240
|
+
"category": "adaptability",
|
241
|
+
"purpose": "Evaluate learning agility and adaptability"
|
242
|
+
}
|
243
|
+
],
|
244
|
+
"follow_up_areas": ["Technical skills verification", "Past project details", "Team collaboration"]
|
245
|
+
}
|
246
|
+
|
247
|
+
def save_snapshot(state: Dict[str, Any]) -> None:
|
248
|
+
"""Save a snapshot of the state to a JSON file for dashboard access"""
|
249
|
+
try:
|
250
|
+
import os
|
251
|
+
import json
|
252
|
+
import time
|
253
|
+
|
254
|
+
job_id = state.get("job_id")
|
255
|
+
if not job_id:
|
256
|
+
logger.warning("No job ID found in state, cannot save snapshot")
|
257
|
+
return
|
258
|
+
|
259
|
+
# Create a timestamped snapshot filename
|
260
|
+
timestamp = time.strftime("%Y%m%d%H%M%S")
|
261
|
+
snapshot_dir = os.path.join("./logs", "snapshots")
|
262
|
+
os.makedirs(snapshot_dir, exist_ok=True)
|
263
|
+
|
264
|
+
snapshot_path = os.path.join(snapshot_dir, f"{timestamp}_{job_id}_after_question_generator.json")
|
265
|
+
|
266
|
+
# Create a clean version of state for saving
|
267
|
+
save_state = {
|
268
|
+
"job_id": job_id,
|
269
|
+
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%S.%f"),
|
270
|
+
"status": state.get("status", "unknown"),
|
271
|
+
"resume_path": state.get("resume_path", ""),
|
272
|
+
"errors": state.get("errors", []),
|
273
|
+
}
|
274
|
+
|
275
|
+
# Add candidate name and email if available
|
276
|
+
if "candidate_name" in state:
|
277
|
+
save_state["candidate_name"] = state["candidate_name"]
|
278
|
+
|
279
|
+
if "candidate_email" in state:
|
280
|
+
save_state["candidate_email"] = state["candidate_email"]
|
281
|
+
|
282
|
+
# Add interview questions if available
|
283
|
+
if "interview_questions" in state:
|
284
|
+
save_state["interview_questions"] = state["interview_questions"]
|
285
|
+
|
286
|
+
# Save the snapshot
|
287
|
+
with open(snapshot_path, 'w') as f:
|
288
|
+
json.dump(save_state, f, indent=2)
|
289
|
+
|
290
|
+
logger.info(f"Saved question generator snapshot to {snapshot_path}")
|
291
|
+
|
292
|
+
except Exception as e:
|
293
|
+
logger.error(f"Error saving state snapshot: {str(e)}")
|
294
|
+
import traceback
|
295
|
+
logger.error(traceback.format_exc())
|