michael-agent 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- michael_agent/config/__init__.py +0 -0
- michael_agent/config/settings.py +66 -0
- michael_agent/dashboard/__init__.py +0 -0
- michael_agent/dashboard/app.py +1450 -0
- michael_agent/dashboard/static/__init__.py +0 -0
- michael_agent/dashboard/templates/__init__.py +0 -0
- michael_agent/langgraph_workflow/__init__.py +0 -0
- michael_agent/langgraph_workflow/graph_builder.py +358 -0
- michael_agent/langgraph_workflow/nodes/__init__.py +0 -0
- michael_agent/langgraph_workflow/nodes/assessment_handler.py +177 -0
- michael_agent/langgraph_workflow/nodes/jd_generator.py +139 -0
- michael_agent/langgraph_workflow/nodes/jd_poster.py +156 -0
- michael_agent/langgraph_workflow/nodes/question_generator.py +295 -0
- michael_agent/langgraph_workflow/nodes/recruiter_notifier.py +224 -0
- michael_agent/langgraph_workflow/nodes/resume_analyzer.py +631 -0
- michael_agent/langgraph_workflow/nodes/resume_ingestor.py +225 -0
- michael_agent/langgraph_workflow/nodes/sentiment_analysis.py +309 -0
- michael_agent/utils/__init__.py +0 -0
- michael_agent/utils/email_utils.py +140 -0
- michael_agent/utils/id_mapper.py +14 -0
- michael_agent/utils/jd_utils.py +34 -0
- michael_agent/utils/lms_api.py +226 -0
- michael_agent/utils/logging_utils.py +192 -0
- michael_agent/utils/monitor_utils.py +289 -0
- michael_agent/utils/node_tracer.py +88 -0
- {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/METADATA +2 -2
- michael_agent-1.0.2.dist-info/RECORD +32 -0
- michael_agent-1.0.0.dist-info/RECORD +0 -7
- {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/WHEEL +0 -0
- {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1450 @@
|
|
1
|
+
"""
|
2
|
+
Flask dashboard for monitoring the SmartRecruitAgent workflow
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from os.path import join
|
7
|
+
import json
|
8
|
+
import time
|
9
|
+
from datetime import datetime
|
10
|
+
from flask import Flask, render_template, jsonify, request
|
11
|
+
from flask_socketio import SocketIO
|
12
|
+
import threading
|
13
|
+
import logging
|
14
|
+
import random # Make sure this is here at the top level
|
15
|
+
import traceback
|
16
|
+
|
17
|
+
from config import settings
|
18
|
+
|
19
|
+
# Configure logging
|
20
|
+
logging.basicConfig(level=logging.INFO)
|
21
|
+
logger = logging.getLogger(__name__)
|
22
|
+
|
23
|
+
# Initialize Flask app
|
24
|
+
app = Flask(__name__)
|
25
|
+
app.config['SECRET_KEY'] = 'smartrecruit2025'
|
26
|
+
|
27
|
+
# Initialize Socket.IO for real-time updates
|
28
|
+
# Configure for async mode with eventlet for production use
|
29
|
+
try:
|
30
|
+
import eventlet
|
31
|
+
async_mode = 'eventlet'
|
32
|
+
logger.info("Using eventlet for Socket.IO")
|
33
|
+
except ImportError:
|
34
|
+
try:
|
35
|
+
import gevent
|
36
|
+
async_mode = 'gevent'
|
37
|
+
logger.info("Using gevent for Socket.IO")
|
38
|
+
except ImportError:
|
39
|
+
async_mode = 'threading'
|
40
|
+
logger.warning("Using threading mode for Socket.IO - this is not recommended for production use")
|
41
|
+
|
42
|
+
socketio = SocketIO(app, async_mode=async_mode, cors_allowed_origins="*")
|
43
|
+
|
44
|
+
# In-memory store for workflow status (in production, use a database)
|
45
|
+
workflow_logs = []
|
46
|
+
node_statuses = {
|
47
|
+
'jd_generator': {'status': 'idle', 'last_run': None},
|
48
|
+
'jd_poster': {'status': 'idle', 'last_run': None},
|
49
|
+
'resume_ingestor': {'status': 'idle', 'last_run': None},
|
50
|
+
'resume_analyzer': {'status': 'idle', 'last_run': None},
|
51
|
+
'sentiment_analysis': {'status': 'idle', 'last_run': None},
|
52
|
+
'assessment_handler': {'status': 'idle', 'last_run': None},
|
53
|
+
'question_generator': {'status': 'idle', 'last_run': None},
|
54
|
+
'recruiter_notifier': {'status': 'idle', 'last_run': None}
|
55
|
+
}
|
56
|
+
|
57
|
+
# Check if a JSON log file exists and load it
|
58
|
+
LOG_FILE = os.path.join(settings.LOG_DIR, 'workflow_logs.json')
|
59
|
+
if os.path.exists(LOG_FILE):
|
60
|
+
try:
|
61
|
+
with open(LOG_FILE, 'r') as f:
|
62
|
+
workflow_logs = json.load(f)
|
63
|
+
except Exception as e:
|
64
|
+
logger.error(f"Error loading log file: {e}")
|
65
|
+
|
66
|
+
# Setup thread variable at module level
|
67
|
+
background_thread = None
|
68
|
+
|
69
|
+
@app.route('/')
|
70
|
+
def index():
|
71
|
+
"""Main dashboard route"""
|
72
|
+
global background_thread
|
73
|
+
|
74
|
+
# Start background thread if it's not already running
|
75
|
+
if background_thread is None or not background_thread.is_alive():
|
76
|
+
background_thread = threading.Thread(target=background_updater)
|
77
|
+
background_thread.daemon = True
|
78
|
+
background_thread.start()
|
79
|
+
|
80
|
+
return render_template('dashboard.html')
|
81
|
+
|
82
|
+
@app.route('/dashboard')
|
83
|
+
def dashboard():
|
84
|
+
"""Main dashboard route"""
|
85
|
+
return render_template('dashboard.html')
|
86
|
+
|
87
|
+
@app.route('/jd-creation')
|
88
|
+
def jd_creation():
|
89
|
+
"""Job Description creation page"""
|
90
|
+
return render_template('jd_creation.html')
|
91
|
+
|
92
|
+
@app.route('/career-portal')
|
93
|
+
def career_portal():
|
94
|
+
"""Career portal page for job listings and applications"""
|
95
|
+
return render_template('career_portal.html')
|
96
|
+
|
97
|
+
@app.route('/resume-scoring')
|
98
|
+
def resume_scoring():
|
99
|
+
"""Resume scoring analysis page"""
|
100
|
+
return render_template('resume_scoring.html')
|
101
|
+
|
102
|
+
@app.route('/upload-resume')
|
103
|
+
def upload_resume():
|
104
|
+
"""Resume upload page"""
|
105
|
+
return render_template('upload_resume.html')
|
106
|
+
|
107
|
+
@app.route('/api/logs')
|
108
|
+
def get_logs():
|
109
|
+
"""API endpoint to get workflow logs"""
|
110
|
+
return jsonify({
|
111
|
+
'logs': workflow_logs,
|
112
|
+
'node_statuses': node_statuses
|
113
|
+
})
|
114
|
+
|
115
|
+
@app.route('/api/status')
|
116
|
+
def get_status():
|
117
|
+
"""API endpoint to get current system status"""
|
118
|
+
return jsonify({
|
119
|
+
'node_statuses': node_statuses,
|
120
|
+
'stats': {
|
121
|
+
'total_resumes_processed': len(workflow_logs),
|
122
|
+
'resumes_today': len([log for log in workflow_logs if
|
123
|
+
log.get('timestamp', '').startswith(datetime.now().strftime('%Y-%m-%d'))])
|
124
|
+
}
|
125
|
+
})
|
126
|
+
|
127
|
+
@app.route('/api/retry', methods=['POST'])
|
128
|
+
def retry_job():
|
129
|
+
"""API endpoint to retry a failed job"""
|
130
|
+
data = request.json
|
131
|
+
job_id = data.get('job_id')
|
132
|
+
|
133
|
+
# In a real implementation, this would trigger the LangGraph workflow to retry
|
134
|
+
# For now, just update the status in the logs
|
135
|
+
for log in workflow_logs:
|
136
|
+
if log.get('job_id') == job_id:
|
137
|
+
log['status'] = 'retrying'
|
138
|
+
|
139
|
+
# Emit update to clients
|
140
|
+
socketio.emit('log_update', {'logs': workflow_logs})
|
141
|
+
|
142
|
+
return jsonify({'success': True})
|
143
|
+
|
144
|
+
def update_node_status(node_name, status, details=None):
|
145
|
+
"""Update the status of a workflow node"""
|
146
|
+
if node_name in node_statuses:
|
147
|
+
node_statuses[node_name]['status'] = status
|
148
|
+
node_statuses[node_name]['last_run'] = datetime.now().isoformat()
|
149
|
+
if details:
|
150
|
+
node_statuses[node_name]['details'] = details
|
151
|
+
|
152
|
+
# Emit update to clients
|
153
|
+
socketio.emit('status_update', {'node_statuses': node_statuses})
|
154
|
+
|
155
|
+
# Save to disk
|
156
|
+
save_logs_to_file()
|
157
|
+
else:
|
158
|
+
logger.error(f"Unknown node name: {node_name}")
|
159
|
+
|
160
|
+
def add_log_entry(log_data):
|
161
|
+
"""Add a new log entry to the workflow logs"""
|
162
|
+
if 'timestamp' not in log_data:
|
163
|
+
log_data['timestamp'] = datetime.now().isoformat()
|
164
|
+
|
165
|
+
workflow_logs.append(log_data)
|
166
|
+
|
167
|
+
# Emit update to clients
|
168
|
+
socketio.emit('log_update', {'logs': workflow_logs})
|
169
|
+
|
170
|
+
# Save to disk
|
171
|
+
save_logs_to_file()
|
172
|
+
|
173
|
+
def save_logs_to_file():
|
174
|
+
"""Save logs to disk"""
|
175
|
+
try:
|
176
|
+
os.makedirs(settings.LOG_DIR, exist_ok=True)
|
177
|
+
with open(LOG_FILE, 'w') as f:
|
178
|
+
json.dump(workflow_logs, f, indent=2, default=str)
|
179
|
+
|
180
|
+
# Also write node statuses to separate file
|
181
|
+
node_status_file = os.path.join(settings.LOG_DIR, 'node_statuses.json')
|
182
|
+
with open(node_status_file, 'w') as f:
|
183
|
+
json.dump(node_statuses, f, indent=2, default=str)
|
184
|
+
|
185
|
+
logger.debug(f"Log files saved successfully")
|
186
|
+
except Exception as e:
|
187
|
+
logger.error(f"Error saving log file: {str(e)}")
|
188
|
+
logger.error(traceback.format_exc())
|
189
|
+
|
190
|
+
# Background thread for dashboard updates
|
191
|
+
def background_updater():
|
192
|
+
"""Background thread for periodic dashboard updates"""
|
193
|
+
try:
|
194
|
+
logger.info("Background updater thread started")
|
195
|
+
while True:
|
196
|
+
try:
|
197
|
+
# Emit heartbeat for client connectivity check
|
198
|
+
current_time = datetime.now().isoformat()
|
199
|
+
socketio.emit('heartbeat', {'time': current_time})
|
200
|
+
|
201
|
+
# Check for any file system changes if needed
|
202
|
+
check_for_workflow_updates()
|
203
|
+
|
204
|
+
# Sleep for the configured interval
|
205
|
+
time.sleep(settings.DASHBOARD_UPDATE_INTERVAL)
|
206
|
+
except Exception as e:
|
207
|
+
logger.error(f"Error in background updater: {str(e)}")
|
208
|
+
# Sleep a bit to avoid thrashing in case of persistent errors
|
209
|
+
time.sleep(max(5, settings.DASHBOARD_UPDATE_INTERVAL))
|
210
|
+
except Exception as e:
|
211
|
+
logger.error(f"Fatal error in background updater: {str(e)}")
|
212
|
+
logger.error(traceback.format_exc())
|
213
|
+
|
214
|
+
def check_for_workflow_updates():
|
215
|
+
"""Check for updates to workflow status from filesystem"""
|
216
|
+
# This function can be expanded to check for new log files, etc.
|
217
|
+
pass
|
218
|
+
|
219
|
+
with app.app_context():
|
220
|
+
def start_background_thread():
|
221
|
+
"""Start the background thread before the first request"""
|
222
|
+
thread = threading.Thread(target=background_updater)
|
223
|
+
thread.daemon = True
|
224
|
+
thread.start()
|
225
|
+
|
226
|
+
# Start the thread immediately
|
227
|
+
start_background_thread()
|
228
|
+
|
229
|
+
# API for LangGraph nodes to post updates
|
230
|
+
@app.route('/api/update', methods=['POST'])
|
231
|
+
def update_workflow():
|
232
|
+
"""API endpoint for LangGraph nodes to post updates"""
|
233
|
+
data = request.json
|
234
|
+
|
235
|
+
if 'node_name' in data and 'status' in data:
|
236
|
+
update_node_status(data['node_name'], data['status'], data.get('details'))
|
237
|
+
|
238
|
+
if 'log_entry' in data:
|
239
|
+
add_log_entry(data['log_entry'])
|
240
|
+
|
241
|
+
return jsonify({'success': True})
|
242
|
+
|
243
|
+
# API endpoints for Job Description creation
|
244
|
+
@app.route('/api/generate-jd', methods=['POST'])
|
245
|
+
def generate_jd():
|
246
|
+
"""API endpoint to generate a job description"""
|
247
|
+
data = request.json
|
248
|
+
|
249
|
+
try:
|
250
|
+
# In a real implementation, this would use the JD generator from LangGraph
|
251
|
+
# For demo purposes, we'll simulate a response
|
252
|
+
|
253
|
+
from sys import path
|
254
|
+
from os.path import dirname, abspath, join
|
255
|
+
import importlib.util
|
256
|
+
|
257
|
+
# Import the JD generator module
|
258
|
+
jd_generator_path = join(dirname(dirname(abspath(__file__))),
|
259
|
+
'langgraph_workflow', 'nodes', 'jd_generator.py')
|
260
|
+
spec = importlib.util.spec_from_file_location("jd_generator", jd_generator_path)
|
261
|
+
jd_generator = importlib.util.module_from_spec(spec)
|
262
|
+
spec.loader.exec_module(jd_generator)
|
263
|
+
|
264
|
+
# Create initial state with job data
|
265
|
+
state = {
|
266
|
+
"job_description": data,
|
267
|
+
"errors": []
|
268
|
+
}
|
269
|
+
|
270
|
+
# Use the JD generator node to create the job description
|
271
|
+
result_state = jd_generator.generate_job_description(state)
|
272
|
+
|
273
|
+
return jsonify({
|
274
|
+
'success': True,
|
275
|
+
'job_description_text': result_state.get('job_description_text', 'Error generating job description')
|
276
|
+
})
|
277
|
+
except Exception as e:
|
278
|
+
logger.error(f"Error generating job description: {str(e)}")
|
279
|
+
return jsonify({
|
280
|
+
'success': False,
|
281
|
+
'error': str(e)
|
282
|
+
}), 500
|
283
|
+
|
284
|
+
@app.route('/api/save-jd', methods=['POST'])
|
285
|
+
def save_jd():
|
286
|
+
try:
|
287
|
+
data = request.json
|
288
|
+
|
289
|
+
# Create a timestamp-based job ID
|
290
|
+
timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
|
291
|
+
job_id = timestamp
|
292
|
+
|
293
|
+
# Job data structure
|
294
|
+
jd_data = {
|
295
|
+
"job_id": job_id,
|
296
|
+
"timestamp": datetime.now().isoformat(),
|
297
|
+
"job_data": data.get("metadata", {}),
|
298
|
+
"job_description": data.get("content", "")
|
299
|
+
}
|
300
|
+
|
301
|
+
# Save to job_descriptions directory
|
302
|
+
os.makedirs("job_descriptions", exist_ok=True)
|
303
|
+
filepath = os.path.join("job_descriptions", f"{job_id}.json")
|
304
|
+
|
305
|
+
with open(filepath, 'w') as f:
|
306
|
+
json.dump(jd_data, f, indent=2)
|
307
|
+
|
308
|
+
return jsonify({"success": True, "job_id": job_id})
|
309
|
+
except Exception as e:
|
310
|
+
return jsonify({"success": False, "error": str(e)})
|
311
|
+
|
312
|
+
# API endpoints for Resume Scoring and Candidate Management
|
313
|
+
@app.route('/api/jobs')
|
314
|
+
def get_jobs():
|
315
|
+
"""API endpoint to get list of jobs"""
|
316
|
+
try:
|
317
|
+
# In a real implementation, this would fetch from a database
|
318
|
+
# For demo purposes, we'll read from saved JSON files
|
319
|
+
|
320
|
+
import json
|
321
|
+
import os
|
322
|
+
import re
|
323
|
+
from glob import glob
|
324
|
+
from os.path import join
|
325
|
+
|
326
|
+
jd_dir = join(settings.JOB_DESCRIPTIONS_DIR)
|
327
|
+
if not os.path.exists(jd_dir):
|
328
|
+
return jsonify({'jobs': []})
|
329
|
+
|
330
|
+
jobs = []
|
331
|
+
for jd_file in glob(join(jd_dir, '*.json')):
|
332
|
+
try:
|
333
|
+
with open(jd_file, 'r') as f:
|
334
|
+
jd_data = json.load(f)
|
335
|
+
|
336
|
+
# Extract title from job description using regex
|
337
|
+
title_match = re.search(r'\*\*Job Title:\s*(.*?)\*\*', jd_data.get('job_description', ''))
|
338
|
+
title = title_match.group(1) if title_match else "Untitled Job"
|
339
|
+
|
340
|
+
# Get job ID from filename or job_id field
|
341
|
+
job_id = jd_data.get('job_id', os.path.basename(jd_file).split('.')[0])
|
342
|
+
|
343
|
+
# Create timestamp if not available
|
344
|
+
created_at = jd_data.get('timestamp', jd_data.get('created_at', ''))
|
345
|
+
|
346
|
+
jobs.append({
|
347
|
+
'id': job_id,
|
348
|
+
'title': title,
|
349
|
+
'created_at': created_at
|
350
|
+
})
|
351
|
+
except Exception as e:
|
352
|
+
logger.error(f"Error processing job file {jd_file}: {str(e)}")
|
353
|
+
|
354
|
+
# Sort by created_at (newest first) with improved safety checks for None values
|
355
|
+
def sort_key(x):
|
356
|
+
created_at = x.get('created_at')
|
357
|
+
# Return a default datetime string for None values to avoid comparison issues
|
358
|
+
return created_at if created_at is not None else '1970-01-01T00:00:00'
|
359
|
+
|
360
|
+
# Try to sort, but if that fails, return unsorted list rather than failing
|
361
|
+
try:
|
362
|
+
jobs.sort(key=sort_key, reverse=True)
|
363
|
+
except Exception as sort_error:
|
364
|
+
logger.warning(f"Error sorting jobs: {str(sort_error)}. Returning unsorted list.")
|
365
|
+
|
366
|
+
return jsonify({'jobs': jobs})
|
367
|
+
except Exception as e:
|
368
|
+
logger.error(f"Error getting jobs: {str(e)}")
|
369
|
+
return jsonify({'jobs': [], 'error': str(e)}), 500
|
370
|
+
|
371
|
+
@app.route('/api/career-jobs')
|
372
|
+
def get_career_jobs():
|
373
|
+
"""API endpoint to get list of jobs for the career portal"""
|
374
|
+
try:
|
375
|
+
# In a real implementation, this would fetch from a database
|
376
|
+
# For demo purposes, we'll read from saved JSON files
|
377
|
+
|
378
|
+
import json
|
379
|
+
import os
|
380
|
+
from glob import glob
|
381
|
+
from os.path import join
|
382
|
+
import re
|
383
|
+
|
384
|
+
jd_dir = join(settings.JOB_DESCRIPTIONS_DIR)
|
385
|
+
logger.info(f"Looking for job descriptions in: {jd_dir}")
|
386
|
+
|
387
|
+
if not os.path.exists(jd_dir):
|
388
|
+
logger.warning(f"Job descriptions directory does not exist: {jd_dir}")
|
389
|
+
return jsonify({'jobs': []})
|
390
|
+
|
391
|
+
job_files = glob(join(jd_dir, '*.json'))
|
392
|
+
logger.info(f"Found {len(job_files)} job description files")
|
393
|
+
|
394
|
+
jobs = []
|
395
|
+
for jd_file in job_files:
|
396
|
+
try:
|
397
|
+
logger.info(f"Processing job file: {jd_file}")
|
398
|
+
with open(jd_file, 'r') as f:
|
399
|
+
jd_data = json.load(f)
|
400
|
+
|
401
|
+
# Extract title from job description using regex
|
402
|
+
title_match = re.search(r'\*\*Job Title:\s*(.*?)\*\*', jd_data.get('job_description', ''))
|
403
|
+
title = title_match.group(1) if title_match else "Untitled Job"
|
404
|
+
|
405
|
+
# Extract location from job data or job description
|
406
|
+
location = "Not specified"
|
407
|
+
if 'job_data' in jd_data and 'location' in jd_data['job_data']:
|
408
|
+
location = jd_data['job_data']['location']
|
409
|
+
else:
|
410
|
+
location_match = re.search(r'\*\*Location:\*\*\s*(.*?)(?:\n|$)', jd_data.get('job_description', ''))
|
411
|
+
if location_match:
|
412
|
+
location = location_match.group(1)
|
413
|
+
|
414
|
+
# Extract employment type
|
415
|
+
employment_type = "Full-time"
|
416
|
+
emp_type_match = re.search(r'\*\*Employment Type:\*\*\s*(.*?)(?:\n|$)', jd_data.get('job_description', ''))
|
417
|
+
if emp_type_match:
|
418
|
+
employment_type = emp_type_match.group(1)
|
419
|
+
|
420
|
+
# Extract experience level
|
421
|
+
experience_level = "Not specified"
|
422
|
+
exp_level_match = re.search(r'\*\*Experience Level:\*\*\s*(.*?)(?:\n|$)', jd_data.get('job_description', ''))
|
423
|
+
if exp_level_match:
|
424
|
+
experience_level = exp_level_match.group(1)
|
425
|
+
|
426
|
+
# Extract skills from job data
|
427
|
+
required_skills = []
|
428
|
+
preferred_skills = []
|
429
|
+
|
430
|
+
if 'job_data' in jd_data:
|
431
|
+
required_skills = jd_data['job_data'].get('required_skills', [])
|
432
|
+
preferred_skills = jd_data['job_data'].get('preferred_skills', [])
|
433
|
+
|
434
|
+
# Get job ID from filename or job_id field
|
435
|
+
job_id = jd_data.get('job_id', os.path.basename(jd_file).split('.')[0])
|
436
|
+
|
437
|
+
# Create timestamp if not available
|
438
|
+
created_at = jd_data.get('timestamp', jd_data.get('created_at', ''))
|
439
|
+
|
440
|
+
logger.info(f"Extracted job data: id={job_id}, title={title}, location={location}")
|
441
|
+
|
442
|
+
jobs.append({
|
443
|
+
'id': job_id,
|
444
|
+
'title': title,
|
445
|
+
'content': jd_data.get('job_description', ''),
|
446
|
+
'created_at': created_at,
|
447
|
+
'location': location,
|
448
|
+
'employment_type': employment_type,
|
449
|
+
'experience_level': experience_level,
|
450
|
+
'required_skills': required_skills,
|
451
|
+
'preferred_skills': preferred_skills
|
452
|
+
})
|
453
|
+
except Exception as file_error:
|
454
|
+
logger.error(f"Error processing job file {jd_file}: {str(file_error)}")
|
455
|
+
|
456
|
+
# Sort by created_at (newest first) with improved safety checks for None values
|
457
|
+
def sort_key(x):
|
458
|
+
created_at = x.get('created_at')
|
459
|
+
# Return a default datetime string for None values to avoid comparison issues
|
460
|
+
return created_at if created_at is not None else '1970-01-01T00:00:00'
|
461
|
+
|
462
|
+
# Try to sort, but if that fails, return unsorted list rather than failing
|
463
|
+
try:
|
464
|
+
jobs.sort(key=sort_key, reverse=True)
|
465
|
+
except Exception as sort_error:
|
466
|
+
logger.warning(f"Error sorting jobs: {str(sort_error)}. Returning unsorted list.")
|
467
|
+
|
468
|
+
return jsonify({'jobs': jobs})
|
469
|
+
except Exception as e:
|
470
|
+
logger.error(f"Error getting jobs for career portal: {str(e)}")
|
471
|
+
return jsonify({'jobs': [], 'error': str(e)}), 500
|
472
|
+
|
473
|
+
@app.route('/api/jobs/<job_id>')
|
474
|
+
def get_job(job_id):
|
475
|
+
"""API endpoint to get job details"""
|
476
|
+
try:
|
477
|
+
# In a real implementation, this would fetch from a database
|
478
|
+
# For demo purposes, we'll read from saved JSON file
|
479
|
+
|
480
|
+
import json
|
481
|
+
import os
|
482
|
+
import re
|
483
|
+
|
484
|
+
jd_file = join(settings.JOB_DESCRIPTIONS_DIR, f'{job_id}.json')
|
485
|
+
if not os.path.exists(jd_file):
|
486
|
+
return jsonify({'error': 'Job not found'}), 404
|
487
|
+
|
488
|
+
with open(jd_file, 'r') as f:
|
489
|
+
jd_data = json.load(f)
|
490
|
+
|
491
|
+
# Extract title from job description using regex
|
492
|
+
title_match = re.search(r'\*\*Job Title:\s*(.*?)\*\*', jd_data.get('job_description', ''))
|
493
|
+
title = title_match.group(1) if title_match else "Untitled Job"
|
494
|
+
|
495
|
+
# Extract location from job data or job description
|
496
|
+
location = "Not specified"
|
497
|
+
if 'job_data' in jd_data and 'location' in jd_data['job_data']:
|
498
|
+
location = jd_data['job_data']['location']
|
499
|
+
else:
|
500
|
+
location_match = re.search(r'\*\*Location:\*\*\s*(.*?)(?:\n|$)', jd_data.get('job_description', ''))
|
501
|
+
if location_match:
|
502
|
+
location = location_match.group(1)
|
503
|
+
|
504
|
+
# Extract employment type
|
505
|
+
employment_type = "Full-time"
|
506
|
+
emp_type_match = re.search(r'\*\*Employment Type:\*\*\s*(.*?)(?:\n|$)', jd_data.get('job_description', ''))
|
507
|
+
if emp_type_match:
|
508
|
+
employment_type = emp_type_match.group(1)
|
509
|
+
|
510
|
+
# Extract experience level
|
511
|
+
experience_level = "Not specified"
|
512
|
+
exp_level_match = re.search(r'\*\*Experience Level:\*\*\s*(.*?)(?:\n|$)', jd_data.get('job_description', ''))
|
513
|
+
if exp_level_match:
|
514
|
+
experience_level = exp_level_match.group(1)
|
515
|
+
|
516
|
+
# Extract skills from job data
|
517
|
+
required_skills = []
|
518
|
+
preferred_skills = []
|
519
|
+
|
520
|
+
if 'job_data' in jd_data:
|
521
|
+
required_skills = jd_data['job_data'].get('required_skills', [])
|
522
|
+
preferred_skills = jd_data['job_data'].get('preferred_skills', [])
|
523
|
+
|
524
|
+
return jsonify({
|
525
|
+
'id': job_id,
|
526
|
+
'title': title,
|
527
|
+
'content': jd_data.get('job_description', ''),
|
528
|
+
'created_at': jd_data.get('timestamp', jd_data.get('created_at', '')),
|
529
|
+
'location': location,
|
530
|
+
'employment_type': employment_type,
|
531
|
+
'experience_level': experience_level,
|
532
|
+
'required_skills': required_skills,
|
533
|
+
'preferred_skills': preferred_skills
|
534
|
+
})
|
535
|
+
except Exception as e:
|
536
|
+
logger.error(f"Error getting job details: {str(e)}")
|
537
|
+
return jsonify({'error': str(e)}), 500
|
538
|
+
|
539
|
+
@app.route('/api/candidates')
|
540
|
+
def get_candidates():
|
541
|
+
"""API endpoint to get candidates for a job"""
|
542
|
+
logger.info(f"Request arguments: {request.args}")
|
543
|
+
job_id = request.args.get('job_id')
|
544
|
+
logger.info(f"Processing candidates request for job_id: {job_id}")
|
545
|
+
|
546
|
+
# Debug logging for settings
|
547
|
+
logger.info(f"RESUME_WATCH_DIR setting: {settings.RESUME_WATCH_DIR}")
|
548
|
+
logger.info(f"LOG_DIR setting: {settings.LOG_DIR}")
|
549
|
+
|
550
|
+
# Import needed modules at the top of the function
|
551
|
+
from datetime import datetime, timedelta
|
552
|
+
import traceback
|
553
|
+
|
554
|
+
# If no job ID is specified, return an error
|
555
|
+
if not job_id:
|
556
|
+
return jsonify({'error': 'Job ID is required'}), 400
|
557
|
+
|
558
|
+
# Filter parameters
|
559
|
+
min_score = request.args.get('min_score', type=int, default=0)
|
560
|
+
status_filter = request.args.get('status', '').split(',') if request.args.get('status') else []
|
561
|
+
logger.info(f"Filters - min_score: {min_score}, status_filter: {status_filter}")
|
562
|
+
|
563
|
+
try:
|
564
|
+
# Get job details
|
565
|
+
import json
|
566
|
+
import os
|
567
|
+
|
568
|
+
jd_file = join(settings.JOB_DESCRIPTIONS_DIR, f'{job_id}.json')
|
569
|
+
if not os.path.exists(jd_file):
|
570
|
+
return jsonify({'error': 'Job not found'}), 404
|
571
|
+
|
572
|
+
with open(jd_file, 'r') as f:
|
573
|
+
jd_data = json.load(f)
|
574
|
+
|
575
|
+
metadata = jd_data.get('job_data', {})
|
576
|
+
required_skills = metadata.get('required_skills', [])
|
577
|
+
|
578
|
+
# Data collections for results and stats
|
579
|
+
candidates = []
|
580
|
+
score_ranges = {'0-50': 0, '50-70': 0, '70-85': 0, '85-100': 0}
|
581
|
+
qualified = 0
|
582
|
+
unqualified = 0
|
583
|
+
|
584
|
+
# Track processed candidates by email to avoid duplicates
|
585
|
+
# We'll prefer data from snapshot files over application metadata
|
586
|
+
processed_candidate_emails = {}
|
587
|
+
application_candidates = {}
|
588
|
+
|
589
|
+
# Define search paths
|
590
|
+
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
591
|
+
project_root = os.path.dirname(current_dir)
|
592
|
+
|
593
|
+
# Look for snapshot files first (these contain the most complete data)
|
594
|
+
snapshot_locations = [
|
595
|
+
os.path.join(current_dir, 'logs', 'snapshots'),
|
596
|
+
os.path.join(project_root, 'logs', 'snapshots'),
|
597
|
+
os.path.join(settings.LOG_DIR, 'snapshots'),
|
598
|
+
'./logs/snapshots'
|
599
|
+
]
|
600
|
+
|
601
|
+
# Process snapshot files first (from resume analyzer and question generator)
|
602
|
+
snapshot_files = []
|
603
|
+
for snapshots_dir in snapshot_locations:
|
604
|
+
if os.path.exists(snapshots_dir):
|
605
|
+
logger.info(f"Snapshots directory exists at {snapshots_dir}, checking for processed resume files")
|
606
|
+
for filename in os.listdir(snapshots_dir):
|
607
|
+
if filename.endswith('.json') and ('after_question_generator' in filename):
|
608
|
+
if job_id in filename:
|
609
|
+
snapshot_path = os.path.join(snapshots_dir, filename)
|
610
|
+
logger.info(f"Adding snapshot file: {snapshot_path}")
|
611
|
+
snapshot_files.append(snapshot_path)
|
612
|
+
|
613
|
+
# Process all snapshot files
|
614
|
+
for snapshot_file in snapshot_files:
|
615
|
+
try:
|
616
|
+
with open(snapshot_file, 'r') as f:
|
617
|
+
data = json.load(f)
|
618
|
+
|
619
|
+
if data.get('job_id') != job_id:
|
620
|
+
continue
|
621
|
+
|
622
|
+
# Extract candidate data from snapshot
|
623
|
+
candidate_name = data.get('candidate_name')
|
624
|
+
candidate_email = data.get('candidate_email')
|
625
|
+
resume_path = data.get('resume_path')
|
626
|
+
score = int(float(data.get('relevance_score', 0)) * 100)
|
627
|
+
|
628
|
+
# If we have resume_data, use that
|
629
|
+
if 'resume_data' in data:
|
630
|
+
resume_data = data['resume_data']
|
631
|
+
if not candidate_name:
|
632
|
+
candidate_name = resume_data.get('name', '')
|
633
|
+
if not candidate_email:
|
634
|
+
candidate_email = resume_data.get('email', '')
|
635
|
+
|
636
|
+
skills = resume_data.get('skills', [])
|
637
|
+
experience = resume_data.get('experience', [])
|
638
|
+
education = resume_data.get('education', [])
|
639
|
+
else:
|
640
|
+
skills = []
|
641
|
+
experience = []
|
642
|
+
education = []
|
643
|
+
|
644
|
+
# Skip if no email found
|
645
|
+
if not candidate_email:
|
646
|
+
continue
|
647
|
+
|
648
|
+
# Create unique key for this candidate
|
649
|
+
candidate_key = candidate_email.lower()
|
650
|
+
|
651
|
+
# Calculate skills match
|
652
|
+
skills_match = {"matched": 0, "total": len(required_skills)}
|
653
|
+
matched_skills = []
|
654
|
+
if required_skills and skills:
|
655
|
+
for req_skill in required_skills:
|
656
|
+
found = False
|
657
|
+
for skill in skills:
|
658
|
+
if req_skill.lower() in skill.lower() or skill.lower() in req_skill.lower():
|
659
|
+
found = True
|
660
|
+
break
|
661
|
+
matched_skills.append({"name": req_skill, "found": found})
|
662
|
+
if found:
|
663
|
+
skills_match["matched"] += 1
|
664
|
+
|
665
|
+
# Add additional skills not in required
|
666
|
+
additional_skills = []
|
667
|
+
if skills and required_skills:
|
668
|
+
for skill in skills:
|
669
|
+
is_required = False
|
670
|
+
for req_skill in required_skills:
|
671
|
+
if req_skill.lower() in skill.lower() or skill.lower() in req_skill.lower():
|
672
|
+
is_required = True
|
673
|
+
break
|
674
|
+
if not is_required:
|
675
|
+
additional_skills.append(skill)
|
676
|
+
|
677
|
+
# Create candidate data
|
678
|
+
candidate_data = {
|
679
|
+
'id': os.path.basename(snapshot_file).split('.')[0],
|
680
|
+
'name': candidate_name,
|
681
|
+
'email': candidate_email,
|
682
|
+
'score': score,
|
683
|
+
'status': data.get('status', 'new'),
|
684
|
+
'resume_path': resume_path,
|
685
|
+
'skills': skills,
|
686
|
+
'experience': experience,
|
687
|
+
'education': education,
|
688
|
+
'skills_match': {
|
689
|
+
'matched': skills_match["matched"],
|
690
|
+
'total': skills_match["total"],
|
691
|
+
'required': matched_skills,
|
692
|
+
'additional': additional_skills,
|
693
|
+
'percentage': int(skills_match["matched"] / skills_match["total"] * 100) if skills_match["total"] > 0 else 0
|
694
|
+
},
|
695
|
+
'date_processed': data.get('timestamp', datetime.now().isoformat()),
|
696
|
+
'source': 'snapshot'
|
697
|
+
}
|
698
|
+
|
699
|
+
# Add or update the candidate
|
700
|
+
processed_candidate_emails[candidate_key] = candidate_data
|
701
|
+
|
702
|
+
# Update statistics
|
703
|
+
if score < 50:
|
704
|
+
score_ranges['0-50'] += 1
|
705
|
+
unqualified += 1
|
706
|
+
elif score < 70:
|
707
|
+
score_ranges['50-70'] += 1
|
708
|
+
unqualified += 1
|
709
|
+
elif score < 85:
|
710
|
+
score_ranges['70-85'] += 1
|
711
|
+
qualified += 1
|
712
|
+
else:
|
713
|
+
score_ranges['85-100'] += 1
|
714
|
+
qualified += 1
|
715
|
+
|
716
|
+
except Exception as e:
|
717
|
+
logger.error(f"Error processing snapshot file {snapshot_file}: {str(e)}")
|
718
|
+
logger.error(traceback.format_exc())
|
719
|
+
|
720
|
+
# Now process application files, but only if we don't have snapshot data for them
|
721
|
+
resume_locations = [
|
722
|
+
os.path.join(current_dir, 'incoming_resumes'),
|
723
|
+
os.path.join(project_root, 'incoming_resumes'),
|
724
|
+
settings.RESUME_WATCH_DIR,
|
725
|
+
'./incoming_resumes'
|
726
|
+
]
|
727
|
+
|
728
|
+
for resume_dir in resume_locations:
|
729
|
+
if os.path.exists(resume_dir) and os.path.isdir(resume_dir):
|
730
|
+
for filename in os.listdir(resume_dir):
|
731
|
+
if filename.endswith('.json') and job_id in filename:
|
732
|
+
try:
|
733
|
+
resume_path = os.path.join(resume_dir, filename)
|
734
|
+
with open(resume_path, 'r') as f:
|
735
|
+
data = json.load(f)
|
736
|
+
|
737
|
+
if data.get('job_id') != job_id:
|
738
|
+
continue
|
739
|
+
|
740
|
+
candidate_name = data.get('name', '')
|
741
|
+
candidate_email = data.get('email', '')
|
742
|
+
|
743
|
+
if not candidate_email:
|
744
|
+
continue
|
745
|
+
|
746
|
+
candidate_key = candidate_email.lower()
|
747
|
+
|
748
|
+
# Only use application data if we don't have snapshot data
|
749
|
+
if candidate_key not in processed_candidate_emails:
|
750
|
+
application_candidates[candidate_key] = {
|
751
|
+
'id': os.path.basename(resume_path).split('.')[0],
|
752
|
+
'name': candidate_name,
|
753
|
+
'email': candidate_email,
|
754
|
+
'score': 0, # No score for application only data
|
755
|
+
'status': data.get('status', 'new'),
|
756
|
+
'resume_path': data.get('resume_path', ''),
|
757
|
+
'skills': [],
|
758
|
+
'experience': [],
|
759
|
+
'education': [],
|
760
|
+
'skills_match': {
|
761
|
+
'matched': 0,
|
762
|
+
'total': len(required_skills),
|
763
|
+
'required': [],
|
764
|
+
'additional': [],
|
765
|
+
'percentage': 0
|
766
|
+
},
|
767
|
+
'date_processed': data.get('application_date', datetime.now().isoformat()),
|
768
|
+
'source': 'application'
|
769
|
+
}
|
770
|
+
except Exception as e:
|
771
|
+
logger.error(f"Error processing application file: {str(e)}")
|
772
|
+
|
773
|
+
# First add all snapshot candidates to the final list
|
774
|
+
candidates = list(processed_candidate_emails.values())
|
775
|
+
|
776
|
+
# Then add application candidates only if they aren't already present
|
777
|
+
# Commented out to exclude application data completely
|
778
|
+
# candidates.extend(list(application_candidates.values()))
|
779
|
+
|
780
|
+
# Sort candidates by score (highest first)
|
781
|
+
candidates.sort(key=lambda x: x.get('score', 0), reverse=True)
|
782
|
+
|
783
|
+
logger.info(f"Returning {len(candidates)} candidates")
|
784
|
+
|
785
|
+
return jsonify({
|
786
|
+
'candidates': candidates,
|
787
|
+
'statistics': {
|
788
|
+
'total': len(candidates),
|
789
|
+
'qualified': qualified,
|
790
|
+
'unqualified': unqualified,
|
791
|
+
'score_ranges': score_ranges
|
792
|
+
}
|
793
|
+
})
|
794
|
+
except Exception as e:
|
795
|
+
logger.error(f"Error getting candidates: {str(e)}")
|
796
|
+
logger.error(traceback.format_exc())
|
797
|
+
return jsonify({'error': str(e)}), 500
|
798
|
+
|
799
|
+
@app.route('/api/candidates/<candidate_id>')
|
800
|
+
def get_candidate(candidate_id):
|
801
|
+
"""API endpoint to get candidate details"""
|
802
|
+
try:
|
803
|
+
# Try to locate this candidate in our resume files
|
804
|
+
logger.info(f"Looking for candidate details with ID: {candidate_id}")
|
805
|
+
|
806
|
+
# Define possible locations for the candidate file
|
807
|
+
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
808
|
+
project_root = os.path.dirname(current_dir)
|
809
|
+
|
810
|
+
# Define locations to search for candidate data
|
811
|
+
search_locations = [
|
812
|
+
os.path.join(current_dir, 'logs', 'snapshots'), # Look in snapshots first for most detailed data
|
813
|
+
os.path.join(project_root, 'logs', 'snapshots'),
|
814
|
+
os.path.join(settings.LOG_DIR, 'snapshots'),
|
815
|
+
'./logs/snapshots',
|
816
|
+
os.path.join(current_dir, 'incoming_resumes'),
|
817
|
+
os.path.join(project_root, 'incoming_resumes'),
|
818
|
+
os.path.abspath(settings.RESUME_WATCH_DIR),
|
819
|
+
'./incoming_resumes'
|
820
|
+
]
|
821
|
+
|
822
|
+
# Candidate data we'll populate
|
823
|
+
candidate_data = None
|
824
|
+
|
825
|
+
# Search all locations for candidate files
|
826
|
+
for location in search_locations:
|
827
|
+
if not os.path.exists(location) or not os.path.isdir(location):
|
828
|
+
continue
|
829
|
+
|
830
|
+
# First try exact match with candidate_id
|
831
|
+
for filename in os.listdir(location):
|
832
|
+
if filename.endswith('.json') and candidate_id in filename:
|
833
|
+
file_path = os.path.join(location, filename)
|
834
|
+
logger.info(f"Found candidate file: {file_path}")
|
835
|
+
|
836
|
+
try:
|
837
|
+
with open(file_path, 'r') as f:
|
838
|
+
candidate_data = json.load(f)
|
839
|
+
break
|
840
|
+
except Exception as e:
|
841
|
+
logger.error(f"Error reading candidate file: {str(e)}")
|
842
|
+
|
843
|
+
if candidate_data:
|
844
|
+
break
|
845
|
+
|
846
|
+
# Try finding snapshot files (as backup)
|
847
|
+
for filename in os.listdir(location):
|
848
|
+
if filename.endswith('.json') and ('after_question_generator' in filename):
|
849
|
+
file_path = os.path.join(location, filename)
|
850
|
+
|
851
|
+
# Check if this snapshot matches our job ID
|
852
|
+
try:
|
853
|
+
with open(file_path, 'r') as f:
|
854
|
+
data = json.load(f)
|
855
|
+
job_id_match = False
|
856
|
+
|
857
|
+
# Extract job_id from candidate_id if possible
|
858
|
+
if '_' in candidate_id:
|
859
|
+
job_id_from_candidate = candidate_id.split('_')[0]
|
860
|
+
if data.get('job_id') == job_id_from_candidate:
|
861
|
+
job_id_match = True
|
862
|
+
|
863
|
+
# If we found a match or if this is the exact candidate
|
864
|
+
if job_id_match or candidate_id in filename:
|
865
|
+
candidate_data = data
|
866
|
+
logger.info(f"Found candidate data in snapshot: {file_path}")
|
867
|
+
break
|
868
|
+
except Exception as e:
|
869
|
+
logger.error(f"Error reading snapshot file: {str(e)}")
|
870
|
+
|
871
|
+
if candidate_data:
|
872
|
+
break
|
873
|
+
|
874
|
+
if not candidate_data:
|
875
|
+
return jsonify({'error': 'Candidate not found'}), 404
|
876
|
+
|
877
|
+
# Extract fields from candidate data
|
878
|
+
name = candidate_data.get('candidate_name', candidate_data.get('name', ''))
|
879
|
+
email = candidate_data.get('candidate_email', candidate_data.get('email', ''))
|
880
|
+
phone = ''
|
881
|
+
resume_path = candidate_data.get('resume_path', '')
|
882
|
+
|
883
|
+
# Get data from resume_data if available
|
884
|
+
if 'resume_data' in candidate_data:
|
885
|
+
resume_data = candidate_data['resume_data']
|
886
|
+
if not name:
|
887
|
+
name = resume_data.get('name', '')
|
888
|
+
if not email:
|
889
|
+
email = resume_data.get('email', '')
|
890
|
+
phone = resume_data.get('phone', '')
|
891
|
+
|
892
|
+
# Get skills, experience, and education
|
893
|
+
skills = resume_data.get('skills', [])
|
894
|
+
experience = resume_data.get('experience', [])
|
895
|
+
education = resume_data.get('education', [])
|
896
|
+
else:
|
897
|
+
# Default empty values if resume_data isn't available
|
898
|
+
skills = []
|
899
|
+
experience = []
|
900
|
+
education = []
|
901
|
+
|
902
|
+
# Get relevance score
|
903
|
+
score = int(float(candidate_data.get('relevance_score', 0)) * 100)
|
904
|
+
|
905
|
+
# Get job ID from candidate data or parse from candidate_id
|
906
|
+
job_id = candidate_data.get('job_id', '')
|
907
|
+
if not job_id and '_' in candidate_id:
|
908
|
+
job_id = candidate_id.split('_')[0]
|
909
|
+
|
910
|
+
# Get job details to extract required skills
|
911
|
+
required_skills = []
|
912
|
+
job_file = os.path.join(settings.JOB_DESCRIPTIONS_DIR, f'{job_id}.json')
|
913
|
+
if os.path.exists(job_file):
|
914
|
+
try:
|
915
|
+
with open(job_file, 'r') as f:
|
916
|
+
job_data = json.load(f)
|
917
|
+
if 'job_data' in job_data:
|
918
|
+
required_skills = job_data['job_data'].get('required_skills', [])
|
919
|
+
except Exception as e:
|
920
|
+
logger.error(f"Error reading job file: {str(e)}")
|
921
|
+
|
922
|
+
# Calculate skills analysis
|
923
|
+
skills_analysis = {
|
924
|
+
'required': [],
|
925
|
+
'additional': []
|
926
|
+
}
|
927
|
+
|
928
|
+
# Make sure skills is a list
|
929
|
+
if not isinstance(skills, list):
|
930
|
+
skills = []
|
931
|
+
|
932
|
+
# Make sure required_skills is a list
|
933
|
+
if not isinstance(required_skills, list):
|
934
|
+
required_skills = []
|
935
|
+
|
936
|
+
# Analyze required skills
|
937
|
+
for req_skill in required_skills:
|
938
|
+
found = False
|
939
|
+
for skill in skills:
|
940
|
+
if isinstance(skill, str) and isinstance(req_skill, str):
|
941
|
+
if req_skill.lower() in skill.lower() or skill.lower() in req_skill.lower():
|
942
|
+
found = True
|
943
|
+
break
|
944
|
+
skills_analysis['required'].append({
|
945
|
+
'name': req_skill,
|
946
|
+
'found': found
|
947
|
+
})
|
948
|
+
|
949
|
+
# Find additional skills
|
950
|
+
for skill in skills:
|
951
|
+
if not isinstance(skill, str):
|
952
|
+
continue
|
953
|
+
|
954
|
+
is_required = False
|
955
|
+
for req_skill in required_skills:
|
956
|
+
if isinstance(req_skill, str):
|
957
|
+
if req_skill.lower() in skill.lower() or skill.lower() in req_skill.lower():
|
958
|
+
is_required = True
|
959
|
+
break
|
960
|
+
if not is_required:
|
961
|
+
skills_analysis['additional'].append(skill)
|
962
|
+
|
963
|
+
# Get sentiment analysis if available
|
964
|
+
ai_analysis = {
|
965
|
+
'strengths': [],
|
966
|
+
'weaknesses': [],
|
967
|
+
'overall': "No detailed analysis available."
|
968
|
+
}
|
969
|
+
|
970
|
+
# Try to extract sentiment data, which might be in different formats
|
971
|
+
if 'sentiment_score' in candidate_data:
|
972
|
+
sentiment_data = candidate_data['sentiment_score']
|
973
|
+
|
974
|
+
# Ensure sentiment_data is properly formatted to avoid errors
|
975
|
+
if isinstance(sentiment_data, dict) and sentiment_data.get('sentiment'):
|
976
|
+
# Generate a basic analysis based on sentiment score
|
977
|
+
sentiment = sentiment_data.get('sentiment')
|
978
|
+
positive_score = sentiment_data.get('positive_score', 0)
|
979
|
+
negative_score = sentiment_data.get('negative_score', 0)
|
980
|
+
|
981
|
+
if sentiment == 'positive':
|
982
|
+
ai_analysis['overall'] = f"The candidate's resume shows a positive outlook with a score of {int(positive_score*100)}%. This indicates good potential for the role."
|
983
|
+
ai_analysis['strengths'] = ["Communication skills reflected in resume",
|
984
|
+
"Relevant experience for the position"]
|
985
|
+
elif sentiment == 'neutral':
|
986
|
+
ai_analysis['overall'] = f"The candidate's resume shows a neutral outlook with a balanced sentiment profile. Additional screening recommended."
|
987
|
+
elif sentiment == 'negative':
|
988
|
+
ai_analysis['overall'] = f"The candidate's resume shows some concerns with a negative score of {int(negative_score*100)}%. Further evaluation recommended."
|
989
|
+
ai_analysis['weaknesses'] = ["Resume may lack enthusiasm",
|
990
|
+
"May need additional screening"]
|
991
|
+
|
992
|
+
# Check if there are resume strengths and weaknesses in resume_data
|
993
|
+
if 'resume_data' in candidate_data:
|
994
|
+
resume_data = candidate_data['resume_data']
|
995
|
+
|
996
|
+
# Get skills from resume as strengths if no specific strengths exist
|
997
|
+
if not ai_analysis['strengths'] and 'skills' in resume_data and resume_data['skills']:
|
998
|
+
relevant_skills = [skill for skill in resume_data['skills']
|
999
|
+
if any(req.lower() in skill.lower() for req in required_skills)] if required_skills else []
|
1000
|
+
|
1001
|
+
if relevant_skills:
|
1002
|
+
ai_analysis['strengths'] = [f"Demonstrated expertise in {skill}" for skill in relevant_skills[:3]]
|
1003
|
+
ai_analysis['overall'] = "The candidate shows relevant skills and experience for the position."
|
1004
|
+
|
1005
|
+
# Look for areas that could be improved based on missing required skills
|
1006
|
+
if not ai_analysis['weaknesses'] and required_skills:
|
1007
|
+
missing_skills = []
|
1008
|
+
for req_skill in required_skills:
|
1009
|
+
if not any(req_skill.lower() in skill.lower() for skill in resume_data.get('skills', [])):
|
1010
|
+
missing_skills.append(req_skill)
|
1011
|
+
|
1012
|
+
if missing_skills:
|
1013
|
+
ai_analysis['weaknesses'] = [f"No demonstrated experience with {skill}" for skill in missing_skills[:3]]
|
1014
|
+
|
1015
|
+
if ai_analysis['overall'] == "No detailed analysis available.":
|
1016
|
+
ai_analysis['overall'] = "The candidate is missing some key skills but might be trainable."
|
1017
|
+
|
1018
|
+
# If we have relevant experience, mention it as a strength
|
1019
|
+
if not ai_analysis['strengths'] and 'experience' in candidate_data.get('resume_data', {}):
|
1020
|
+
experiences = candidate_data['resume_data']['experience']
|
1021
|
+
if experiences and len(experiences) > 0:
|
1022
|
+
ai_analysis['strengths'] = ["Has relevant work experience",
|
1023
|
+
f"Experience at {experiences[0].get('company', 'previous company')}"]
|
1024
|
+
|
1025
|
+
if ai_analysis['overall'] == "No detailed analysis available.":
|
1026
|
+
ai_analysis['overall'] = "The candidate has valuable work experience that may be relevant to the role."
|
1027
|
+
|
1028
|
+
# Add more detailed debug logging
|
1029
|
+
logger.info(f"Returning candidate data for {candidate_id}, name={name}, has_experience={len(experience)}, has_education={len(education)}")
|
1030
|
+
|
1031
|
+
# Return data with sensible defaults for all fields to prevent frontend errors
|
1032
|
+
response_data = {
|
1033
|
+
'id': candidate_id,
|
1034
|
+
'name': name or 'Unknown Candidate',
|
1035
|
+
'email': email or 'No email provided',
|
1036
|
+
'phone': phone or 'No phone provided',
|
1037
|
+
'score': score or 0,
|
1038
|
+
'skills_analysis': skills_analysis or {
|
1039
|
+
'required': [],
|
1040
|
+
'additional': []
|
1041
|
+
},
|
1042
|
+
'experience': experience or [],
|
1043
|
+
'education': education or [],
|
1044
|
+
'ai_analysis': ai_analysis or {
|
1045
|
+
'strengths': [],
|
1046
|
+
'weaknesses': [],
|
1047
|
+
'overall': 'No analysis available'
|
1048
|
+
},
|
1049
|
+
'resume_path': resume_path or '',
|
1050
|
+
'status': candidate_data.get('status', 'new') or 'new'
|
1051
|
+
}
|
1052
|
+
|
1053
|
+
# Log the actual data being sent back
|
1054
|
+
logger.info(f"Response data structure: {list(response_data.keys())}")
|
1055
|
+
|
1056
|
+
return jsonify(response_data)
|
1057
|
+
|
1058
|
+
except Exception as e:
|
1059
|
+
logger.error(f"Error getting candidate details: {str(e)}")
|
1060
|
+
logger.error(traceback.format_exc())
|
1061
|
+
return jsonify({'error': str(e)}), 500
|
1062
|
+
|
1063
|
+
@app.route('/api/generate-questions/<candidate_id>', methods=['GET'])
|
1064
|
+
def generate_questions(candidate_id):
|
1065
|
+
"""API endpoint to generate interview questions for a candidate"""
|
1066
|
+
try:
|
1067
|
+
logger.info(f"Looking for interview questions for candidate {candidate_id}")
|
1068
|
+
|
1069
|
+
# Define search directories for snapshots
|
1070
|
+
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
1071
|
+
project_root = os.path.dirname(current_dir)
|
1072
|
+
|
1073
|
+
log_locations = [
|
1074
|
+
os.path.join(current_dir, 'logs', 'snapshots'),
|
1075
|
+
os.path.join(project_root, 'logs', 'snapshots'),
|
1076
|
+
os.path.join(settings.LOG_DIR, 'snapshots'),
|
1077
|
+
'./logs/snapshots'
|
1078
|
+
]
|
1079
|
+
|
1080
|
+
# Extract job ID from candidate ID if possible
|
1081
|
+
job_id = None
|
1082
|
+
if '_' in candidate_id:
|
1083
|
+
job_id = candidate_id.split('_')[0]
|
1084
|
+
|
1085
|
+
# Look for snapshot files with interview questions
|
1086
|
+
question_data = None
|
1087
|
+
|
1088
|
+
for location in log_locations:
|
1089
|
+
if not os.path.exists(location) or not os.path.isdir(location):
|
1090
|
+
continue
|
1091
|
+
|
1092
|
+
# First, look for files specifically matching this candidate
|
1093
|
+
for filename in os.listdir(location):
|
1094
|
+
if filename.endswith('.json') and 'after_question_generator' in filename:
|
1095
|
+
file_path = os.path.join(location, filename)
|
1096
|
+
|
1097
|
+
try:
|
1098
|
+
with open(file_path, 'r') as f:
|
1099
|
+
data = json.load(f)
|
1100
|
+
|
1101
|
+
# Check if this is for the right job or candidate
|
1102
|
+
if (job_id and data.get('job_id') == job_id) or candidate_id in filename:
|
1103
|
+
# If it has interview questions data
|
1104
|
+
if 'interview_questions' in data:
|
1105
|
+
question_data = data
|
1106
|
+
logger.info(f"Found interview questions in snapshot: {file_path}")
|
1107
|
+
break
|
1108
|
+
except Exception as e:
|
1109
|
+
logger.error(f"Error reading question file: {str(e)}")
|
1110
|
+
|
1111
|
+
if question_data:
|
1112
|
+
break
|
1113
|
+
|
1114
|
+
# If we found interview questions, use them
|
1115
|
+
if question_data and 'interview_questions' in question_data:
|
1116
|
+
logger.info("Using existing questions from snapshot")
|
1117
|
+
|
1118
|
+
# Return the complete interview_questions structure as it is
|
1119
|
+
return jsonify({
|
1120
|
+
'success': True,
|
1121
|
+
'candidate_id': candidate_id,
|
1122
|
+
'questions': question_data['interview_questions']
|
1123
|
+
})
|
1124
|
+
else:
|
1125
|
+
# If no questions found
|
1126
|
+
logger.warning("No interview questions found for candidate %s", candidate_id)
|
1127
|
+
return jsonify({
|
1128
|
+
'success': False,
|
1129
|
+
'error': 'Interview questions not found for this candidate'
|
1130
|
+
}), 404
|
1131
|
+
|
1132
|
+
except Exception as e:
|
1133
|
+
logger.error(f"Error generating interview questions: {str(e)}")
|
1134
|
+
logger.error(traceback.format_exc())
|
1135
|
+
return jsonify({'error': str(e)}), 500
|
1136
|
+
|
1137
|
+
@app.route('/api/candidates/<candidate_id>/status', methods=['PUT'])
|
1138
|
+
def update_candidate_status(candidate_id):
|
1139
|
+
"""API endpoint to update candidate status"""
|
1140
|
+
data = request.json
|
1141
|
+
|
1142
|
+
try:
|
1143
|
+
# In a real implementation, this would update a database
|
1144
|
+
# For demo purposes, we'll just return success
|
1145
|
+
return jsonify({
|
1146
|
+
'success': True,
|
1147
|
+
'candidate_id': candidate_id,
|
1148
|
+
'new_status': data.get('status')
|
1149
|
+
})
|
1150
|
+
except Exception as e:
|
1151
|
+
logger.error(f"Error updating candidate status: {str(e)}")
|
1152
|
+
return jsonify({
|
1153
|
+
'success': False,
|
1154
|
+
'error': str(e)
|
1155
|
+
}), 500
|
1156
|
+
|
1157
|
+
@app.route('/api/export-candidates')
|
1158
|
+
def export_candidates():
|
1159
|
+
"""API endpoint to export candidates to CSV"""
|
1160
|
+
# In a real implementation, this would generate a CSV file
|
1161
|
+
# For demo purposes, we'll just return a success message
|
1162
|
+
return jsonify({
|
1163
|
+
'success': True,
|
1164
|
+
'message': 'Export functionality would generate a CSV file in a real implementation'
|
1165
|
+
})
|
1166
|
+
|
1167
|
+
@app.route('/api/apply', methods=['POST'])
|
1168
|
+
def apply_for_job():
|
1169
|
+
"""API endpoint to handle job applications from the career portal"""
|
1170
|
+
try:
|
1171
|
+
# Check if all required fields are present
|
1172
|
+
if 'resume_file' not in request.files:
|
1173
|
+
return jsonify({'success': False, 'error': 'No resume file provided'}), 400
|
1174
|
+
|
1175
|
+
resume_file = request.files['resume_file']
|
1176
|
+
job_id = request.form.get('job_id')
|
1177
|
+
name = request.form.get('name')
|
1178
|
+
email = request.form.get('email')
|
1179
|
+
|
1180
|
+
if not resume_file or not job_id or not name or not email:
|
1181
|
+
return jsonify({'success': False, 'error': 'Missing required fields'}), 400
|
1182
|
+
|
1183
|
+
# Check if directory exists and create if not
|
1184
|
+
import os
|
1185
|
+
from datetime import datetime
|
1186
|
+
|
1187
|
+
# Create directory structure if not exists
|
1188
|
+
incoming_dir = os.path.join(settings.RESUME_WATCH_DIR)
|
1189
|
+
os.makedirs(incoming_dir, exist_ok=True)
|
1190
|
+
|
1191
|
+
# Generate unique filename with timestamp
|
1192
|
+
timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
|
1193
|
+
filename = f"{job_id}_{request.form.get('name').replace(' ', '_')}.{resume_file.filename.split('.')[-1]}"
|
1194
|
+
filepath = os.path.join(incoming_dir, filename)
|
1195
|
+
|
1196
|
+
# Save the file
|
1197
|
+
resume_file.save(filepath)
|
1198
|
+
logger.info(f"Saved resume file to {filepath}")
|
1199
|
+
|
1200
|
+
# Create metadata file with applicant details
|
1201
|
+
metadata = {
|
1202
|
+
'job_id': job_id,
|
1203
|
+
'name': name,
|
1204
|
+
'email': email,
|
1205
|
+
'phone': request.form.get('phone', ''),
|
1206
|
+
'cover_letter': request.form.get('cover_letter', ''),
|
1207
|
+
'application_date': datetime.now().isoformat(),
|
1208
|
+
'status': 'new',
|
1209
|
+
'resume_path': filepath
|
1210
|
+
}
|
1211
|
+
|
1212
|
+
metadata_path = f"{filepath}.json"
|
1213
|
+
with open(metadata_path, 'w') as f:
|
1214
|
+
json.dump(metadata, f, indent=2)
|
1215
|
+
|
1216
|
+
# In a real implementation, trigger the workflow to process the resume
|
1217
|
+
from langgraph_workflow.graph_builder import process_new_resume
|
1218
|
+
|
1219
|
+
# Get job description
|
1220
|
+
job_file = os.path.join(settings.JOB_DESCRIPTIONS_DIR, f'{job_id}.json')
|
1221
|
+
job_description = None
|
1222
|
+
|
1223
|
+
if os.path.exists(job_file):
|
1224
|
+
with open(job_file, 'r') as f:
|
1225
|
+
job_data = json.load(f)
|
1226
|
+
|
1227
|
+
# Extract title from job description using regex
|
1228
|
+
import re
|
1229
|
+
title_match = re.search(r'\*\*Job Title:\s*(.*?)\*\*', job_data.get('job_description', ''))
|
1230
|
+
title = title_match.group(1) if title_match else "Untitled Job"
|
1231
|
+
|
1232
|
+
job_description = {
|
1233
|
+
'id': job_id,
|
1234
|
+
'title': title,
|
1235
|
+
'content': job_data.get('job_description', ''),
|
1236
|
+
'metadata': job_data.get('job_data', {})
|
1237
|
+
}
|
1238
|
+
|
1239
|
+
# We only need to save the file and metadata - the file system watcher will handle the processing
|
1240
|
+
# This prevents duplicate workflow execution
|
1241
|
+
logger.info(f"Resume uploaded through career portal - file system watcher will process it automatically")
|
1242
|
+
|
1243
|
+
return jsonify({
|
1244
|
+
'success': True,
|
1245
|
+
'message': 'Application submitted successfully',
|
1246
|
+
'job_id': job_id,
|
1247
|
+
'resume_path': filepath
|
1248
|
+
})
|
1249
|
+
|
1250
|
+
except Exception as e:
|
1251
|
+
error_msg = f"Error processing application: {str(e)}"
|
1252
|
+
logger.error(error_msg)
|
1253
|
+
logger.error(f"Traceback: {traceback.format_exc()}")
|
1254
|
+
return jsonify({'success': False, 'error': error_msg}), 500
|
1255
|
+
|
1256
|
+
def process_candidate_data(data, job_id=None, min_score=None, status_filter=None):
|
1257
|
+
"""Process candidate data from various sources"""
|
1258
|
+
# If relevance_score is not in the data, add a default value
|
1259
|
+
if 'relevance_score' not in data:
|
1260
|
+
data['relevance_score'] = 0.0 # Default score
|
1261
|
+
|
1262
|
+
# Rest of the function remains the same...
|
1263
|
+
|
1264
|
+
def extract_candidate_info(data, job_id=None):
|
1265
|
+
"""Extract candidate information from file data"""
|
1266
|
+
candidate = {}
|
1267
|
+
|
1268
|
+
# Try to get candidate name and email
|
1269
|
+
if 'name' in data:
|
1270
|
+
logger.info("Found name directly in JSON data, assuming it's applicant metadata")
|
1271
|
+
candidate['name'] = data.get('name', 'Unknown')
|
1272
|
+
candidate['email'] = data.get('email', '')
|
1273
|
+
candidate['status'] = data.get('status', 'new')
|
1274
|
+
candidate['id'] = f"{job_id}_{data['name'].replace(' ', '_')}"
|
1275
|
+
candidate['resume_path'] = data.get('resume_path', '')
|
1276
|
+
|
1277
|
+
# Set default relevance score if not available
|
1278
|
+
candidate['relevance_score'] = 0.0
|
1279
|
+
|
1280
|
+
# Add application date if available
|
1281
|
+
if 'application_date' in data:
|
1282
|
+
candidate['application_date'] = data.get('application_date', '')
|
1283
|
+
|
1284
|
+
elif 'resume_data' in data:
|
1285
|
+
logger.info("Found resume_data key, using that for candidate data")
|
1286
|
+
candidate['name'] = data.get('candidate_name', 'Unknown')
|
1287
|
+
candidate['email'] = data.get('candidate_email', '')
|
1288
|
+
candidate['status'] = data.get('status', 'new')
|
1289
|
+
candidate['id'] = data.get('timestamp', '').split('_')[0] # Use timestamp as ID
|
1290
|
+
candidate['resume_path'] = data.get('resume_path', '')
|
1291
|
+
|
1292
|
+
# Add relevance score if available, otherwise default to 0
|
1293
|
+
candidate['relevance_score'] = 0.0
|
1294
|
+
if 'relevance_score' in data:
|
1295
|
+
candidate['relevance_score'] = data['relevance_score']
|
1296
|
+
|
1297
|
+
elif 'candidate_name' in data:
|
1298
|
+
logger.info("Found candidate_name key directly in JSON data")
|
1299
|
+
candidate['name'] = data.get('candidate_name', 'Unknown')
|
1300
|
+
candidate['email'] = data.get('candidate_email', '')
|
1301
|
+
candidate['status'] = data.get('status', 'new')
|
1302
|
+
candidate['id'] = f"{job_id}_{data['candidate_name'].replace(' ', '_')}"
|
1303
|
+
candidate['resume_path'] = data.get('resume_path', '')
|
1304
|
+
|
1305
|
+
# Add relevance score if available
|
1306
|
+
candidate['relevance_score'] = 0.0
|
1307
|
+
if 'relevance_score' in data:
|
1308
|
+
candidate['relevance_score'] = data['relevance_score']
|
1309
|
+
|
1310
|
+
return candidate
|
1311
|
+
|
1312
|
+
@app.route('/resumes/<candidate_id>')
|
1313
|
+
def serve_resume(candidate_id):
|
1314
|
+
"""Serve the resume PDF file for a candidate"""
|
1315
|
+
try:
|
1316
|
+
# Import necessary modules
|
1317
|
+
import os
|
1318
|
+
from flask import send_file, abort
|
1319
|
+
import glob
|
1320
|
+
|
1321
|
+
logger.info(f"Looking for resume file for candidate: {candidate_id}")
|
1322
|
+
|
1323
|
+
# Extract job ID from candidate ID
|
1324
|
+
job_id = None
|
1325
|
+
if '_' in candidate_id:
|
1326
|
+
job_id = candidate_id.split('_')[0]
|
1327
|
+
logger.info(f"Extracted job ID: {job_id}")
|
1328
|
+
|
1329
|
+
# Define locations to search for resume files
|
1330
|
+
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
1331
|
+
project_root = os.path.dirname(current_dir)
|
1332
|
+
|
1333
|
+
search_locations = [
|
1334
|
+
os.path.join(current_dir, 'incoming_resumes'),
|
1335
|
+
os.path.join(project_root, 'incoming_resumes'),
|
1336
|
+
os.path.abspath(settings.RESUME_WATCH_DIR),
|
1337
|
+
os.path.join(current_dir, 'processed_resumes'),
|
1338
|
+
os.path.join(project_root, 'processed_resumes'),
|
1339
|
+
'./incoming_resumes',
|
1340
|
+
'./processed_resumes'
|
1341
|
+
]
|
1342
|
+
|
1343
|
+
# Search for resume path in snapshot files first
|
1344
|
+
snapshot_locations = [
|
1345
|
+
os.path.join(current_dir, 'logs', 'snapshots'),
|
1346
|
+
os.path.join(project_root, 'logs', 'snapshots'),
|
1347
|
+
os.path.join(settings.LOG_DIR, 'snapshots'),
|
1348
|
+
'./logs/snapshots'
|
1349
|
+
]
|
1350
|
+
|
1351
|
+
# First check for resume path in snapshot files
|
1352
|
+
resume_path = None
|
1353
|
+
for snapshot_dir in snapshot_locations:
|
1354
|
+
if not os.path.exists(snapshot_dir) or not os.path.isdir(snapshot_dir):
|
1355
|
+
continue
|
1356
|
+
|
1357
|
+
# Look for snapshot files for this candidate/job
|
1358
|
+
for filename in os.listdir(snapshot_dir):
|
1359
|
+
if filename.endswith('.json') and ('after_question_generator' in filename):
|
1360
|
+
# Check if this snapshot file is for our candidate
|
1361
|
+
if job_id and job_id in filename:
|
1362
|
+
snapshot_path = os.path.join(snapshot_dir, filename)
|
1363
|
+
logger.info(f"Checking snapshot file: {snapshot_path}")
|
1364
|
+
|
1365
|
+
try:
|
1366
|
+
with open(snapshot_path, 'r') as f:
|
1367
|
+
data = json.load(f)
|
1368
|
+
if 'resume_path' in data:
|
1369
|
+
resume_path = data['resume_path']
|
1370
|
+
logger.info(f"Found resume path in snapshot: {resume_path}")
|
1371
|
+
break
|
1372
|
+
except Exception as e:
|
1373
|
+
logger.error(f"Error reading snapshot file: {str(e)}")
|
1374
|
+
|
1375
|
+
if resume_path:
|
1376
|
+
break
|
1377
|
+
|
1378
|
+
# If we found a resume path in the snapshot, check if it exists
|
1379
|
+
if resume_path:
|
1380
|
+
# Handle relative paths
|
1381
|
+
if resume_path.startswith('./'):
|
1382
|
+
absolute_paths_to_try = [
|
1383
|
+
os.path.join(current_dir, resume_path[2:]),
|
1384
|
+
os.path.join(project_root, resume_path[2:]),
|
1385
|
+
os.path.abspath(resume_path),
|
1386
|
+
resume_path
|
1387
|
+
]
|
1388
|
+
|
1389
|
+
for path in absolute_paths_to_try:
|
1390
|
+
if os.path.exists(path):
|
1391
|
+
logger.info(f"Found resume at absolute path: {path}")
|
1392
|
+
return send_file(path, mimetype='application/pdf')
|
1393
|
+
|
1394
|
+
# If no resume found from snapshot path, search directly for PDF files
|
1395
|
+
for location in search_locations:
|
1396
|
+
if not os.path.exists(location) or not os.path.isdir(location):
|
1397
|
+
continue
|
1398
|
+
|
1399
|
+
logger.info(f"Searching directory: {location}")
|
1400
|
+
# First try exact match
|
1401
|
+
for filename in os.listdir(location):
|
1402
|
+
logger.info(f"Found file: {filename}")
|
1403
|
+
if filename.endswith('.pdf') and candidate_id in filename:
|
1404
|
+
pdf_path = os.path.join(location, filename)
|
1405
|
+
logger.info(f"Found exact match resume: {pdf_path}")
|
1406
|
+
return send_file(pdf_path, mimetype='application/pdf')
|
1407
|
+
|
1408
|
+
# If no exact match, try job ID match
|
1409
|
+
if job_id:
|
1410
|
+
for filename in os.listdir(location):
|
1411
|
+
if filename.endswith('.pdf') and job_id in filename:
|
1412
|
+
pdf_path = os.path.join(location, filename)
|
1413
|
+
logger.info(f"Found resume by job ID: {pdf_path}")
|
1414
|
+
return send_file(pdf_path, mimetype='application/pdf')
|
1415
|
+
|
1416
|
+
# As a last resort, try using glob to find files matching the pattern
|
1417
|
+
if job_id:
|
1418
|
+
for location in search_locations:
|
1419
|
+
if not os.path.exists(location):
|
1420
|
+
continue
|
1421
|
+
|
1422
|
+
# Look for PDFs containing the job ID
|
1423
|
+
pattern = os.path.join(location, f"*{job_id}*.pdf")
|
1424
|
+
logger.info(f"Trying glob pattern: {pattern}")
|
1425
|
+
matching_files = glob.glob(pattern)
|
1426
|
+
|
1427
|
+
if matching_files:
|
1428
|
+
logger.info(f"Found resume via glob: {matching_files[0]}")
|
1429
|
+
return send_file(matching_files[0], mimetype='application/pdf')
|
1430
|
+
|
1431
|
+
# If nothing found, log error and return 404
|
1432
|
+
logger.error(f"Resume not found for candidate {candidate_id}")
|
1433
|
+
return jsonify({'error': 'Resume file not found'}), 404
|
1434
|
+
|
1435
|
+
except Exception as e:
|
1436
|
+
logger.error(f"Error serving resume: {str(e)}")
|
1437
|
+
logger.error(traceback.format_exc())
|
1438
|
+
return jsonify({'error': str(e)}), 500
|
1439
|
+
|
1440
|
+
if __name__ == '__main__':
|
1441
|
+
# Only run directly if this file is executed directly,
|
1442
|
+
# not when imported as a module
|
1443
|
+
try:
|
1444
|
+
import eventlet
|
1445
|
+
eventlet.monkey_patch()
|
1446
|
+
logger.info("Eventlet monkey patching applied")
|
1447
|
+
except ImportError:
|
1448
|
+
logger.warning("Eventlet not available - WebSocket functionality may be limited")
|
1449
|
+
|
1450
|
+
socketio.run(app, host='0.0.0.0', port=5000, debug=True, allow_unsafe_werkzeug=True)
|