michael-agent 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,225 @@
1
+ """
2
+ Resume Ingestor Node
3
+ Watches for new resumes from local directory
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ import shutil
9
+ import time
10
+ import atexit
11
+ import json
12
+ from typing import Dict, Any, Optional
13
+ from watchdog.observers import Observer
14
+ from watchdog.events import FileSystemEventHandler
15
+
16
+ # Import config
17
+ from config import settings
18
+
19
+ # Configure logging
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # Global variable for watchdog observer
24
+ _global_observer = None
25
+
26
+ # Global variable for watchdog observer
27
+ _global_observer = None
28
+
29
+ class ResumeFileHandler(FileSystemEventHandler):
30
+ """File system event handler for monitoring new resume files"""
31
+
32
+ def __init__(self, callback_fn):
33
+ self.callback_fn = callback_fn
34
+ self.extensions = ['.pdf', '.docx', '.doc', '.txt']
35
+
36
+ def on_created(self, event):
37
+ if not event.is_directory:
38
+ file_path = event.src_path
39
+ if any(file_path.lower().endswith(ext) for ext in self.extensions):
40
+ logger.info(f"New resume detected: {file_path}")
41
+ self.callback_fn(file_path)
42
+
43
+ def setup_watcher(watch_dir: str, callback_fn):
44
+ """Set up a file system watcher for the specified directory"""
45
+ observer = Observer()
46
+ event_handler = ResumeFileHandler(callback_fn)
47
+ observer.schedule(event_handler, watch_dir, recursive=False)
48
+ observer.start()
49
+
50
+ # Store in global variable for cleanup
51
+ global _global_observer
52
+ _global_observer = observer
53
+
54
+ return observer
55
+
56
+ def copy_resume_to_output(resume_path: str) -> str:
57
+ """Copy the resume to the output directory"""
58
+ try:
59
+ # Ensure output directory exists
60
+ os.makedirs(settings.OUTPUT_DIR, exist_ok=True)
61
+
62
+ # Copy the file to output directory
63
+ filename = os.path.basename(resume_path)
64
+ output_path = os.path.join(settings.OUTPUT_DIR, filename)
65
+ shutil.copy2(resume_path, output_path)
66
+
67
+ # Return the path to the copied file
68
+ return output_path
69
+ except Exception as e:
70
+ logger.error(f"Error copying resume to output: {str(e)}")
71
+ raise
72
+
73
+ def ingest_resume(state: Dict[str, Any]) -> Dict[str, Any]:
74
+ """Ingest new resumes for processing from the local file system"""
75
+ try:
76
+ # Check if this is just a setup call without processing existing files
77
+ setup_only = state.get("setup_only", False)
78
+
79
+ # Check if we already have a resume path in the state
80
+ if state.get("resume_path") and not setup_only:
81
+ logger.info(f"Processing resume from state: {state.get('resume_path')}")
82
+ resume_path = state.get("resume_path")
83
+
84
+ # Copy to output directory if it exists and is not already there
85
+ if os.path.exists(resume_path) and not resume_path.startswith(settings.OUTPUT_DIR):
86
+ output_path = copy_resume_to_output(resume_path)
87
+ # Update the path to point to the copied file
88
+ state["resume_path"] = output_path
89
+ logger.info(f"Copied resume to output directory: {output_path}")
90
+
91
+ # Move on with the workflow
92
+ return state
93
+
94
+ # Ensure output directory exists
95
+ os.makedirs(settings.OUTPUT_DIR, exist_ok=True)
96
+
97
+ # Ensure log directory exists
98
+ os.makedirs(settings.LOG_DIR, exist_ok=True)
99
+
100
+ # Set up local file system watcher if not already running
101
+ if 'resume_watcher' not in state or not state['resume_watcher']:
102
+ # Create the watch directory if it doesn't exist
103
+ os.makedirs(settings.RESUME_WATCH_DIR, exist_ok=True)
104
+
105
+ # Process any existing files in the directory (skip if setup_only)
106
+ if not setup_only:
107
+ for filename in os.listdir(settings.RESUME_WATCH_DIR):
108
+ file_path = os.path.join(settings.RESUME_WATCH_DIR, filename)
109
+ if os.path.isfile(file_path) and any(file_path.lower().endswith(ext) for ext in ['.pdf', '.docx', '.doc', '.txt']) and not filename.endswith('.json'):
110
+ logger.info(f"Processing existing resume: {file_path}")
111
+ output_path = copy_resume_to_output(file_path)
112
+ if 'resumes' not in state:
113
+ state['resumes'] = []
114
+ state['resumes'].append({
115
+ 'filename': os.path.basename(file_path),
116
+ 'path': output_path,
117
+ 'status': 'received'
118
+ })
119
+
120
+ # Automatically trigger workflow for this resume
121
+ from langgraph_workflow.graph_builder import process_new_resume
122
+ try:
123
+ process_new_resume(file_path)
124
+ logger.info(f"Triggered workflow for existing resume: {file_path}")
125
+ except Exception as e:
126
+ logger.error(f"Failed to trigger workflow for existing resume {file_path}: {str(e)}")
127
+ else:
128
+ logger.info("Setup only mode - skipping processing of existing files")
129
+
130
+ # Set up the watcher for future files
131
+ def process_new_file(file_path):
132
+ logger.info(f"New resume detected: {file_path}")
133
+ output_path = copy_resume_to_output(file_path)
134
+ # Update the state (in a real app, you'd need proper concurrency handling)
135
+ if 'resumes' not in state:
136
+ state['resumes'] = []
137
+ state['resumes'].append({
138
+ 'filename': os.path.basename(file_path),
139
+ 'path': output_path,
140
+ 'status': 'received'
141
+ })
142
+
143
+ # Automatically trigger workflow for this resume, specifying resume_ingestor as the entry point
144
+ from langgraph_workflow.graph_builder import process_new_resume
145
+ try:
146
+ process_new_resume(file_path)
147
+ logger.info(f"Triggered workflow for new resume: {file_path}")
148
+ except Exception as e:
149
+ logger.error(f"Failed to trigger workflow for new resume {file_path}: {str(e)}")
150
+
151
+ # Start the watcher
152
+ observer = setup_watcher(settings.RESUME_WATCH_DIR, process_new_file)
153
+ state['resume_watcher'] = observer
154
+
155
+ return state
156
+ except Exception as e:
157
+ logger.error(f"Error in resume ingestion: {str(e)}")
158
+ # Add error to state
159
+ if 'errors' not in state:
160
+ state['errors'] = []
161
+ state['errors'].append(f"Resume ingestion error: {str(e)}")
162
+ return state
163
+
164
+ def extract_job_id_from_filename(filename: str) -> str:
165
+ """Extract job ID from the resume filename"""
166
+ try:
167
+ # Assuming the job ID is the first part of the filename, before an underscore
168
+ job_id = os.path.basename(filename).split('_')[0]
169
+ logger.info(f"Extracted job ID {job_id} from filename {filename}")
170
+ return job_id
171
+ except Exception as e:
172
+ logger.error(f"Error extracting job ID from filename {filename}: {str(e)}")
173
+ return ""
174
+
175
+ def process_resume(file_path: str, state: Dict[str, Any] = None) -> Dict[str, Any]:
176
+ """
177
+ Process a resume file
178
+ """
179
+ logger.info(f"Processing resume from state: {file_path}")
180
+
181
+ # Extract job ID from filename (e.g., 20250626225207_Michael_Jone.pdf)
182
+ try:
183
+ job_id = extract_job_id_from_filename(file_path)
184
+ logger.info(f"Extracted job ID from filename: {job_id}")
185
+
186
+ # Update state with the extracted job ID
187
+ if state is None:
188
+ state = {}
189
+ state["job_id"] = job_id
190
+ logger.info(f"Updated state with job ID: {job_id}")
191
+ except Exception as e:
192
+ logger.error(f"Failed to extract job ID from filename: {e}")
193
+
194
+ # Copy resume to processed directory
195
+ output_dir = "./processed_resumes"
196
+ os.makedirs(output_dir, exist_ok=True)
197
+
198
+ # Get just the filename without path
199
+ filename = os.path.basename(file_path)
200
+ output_path = os.path.join(output_dir, filename)
201
+
202
+ shutil.copy(file_path, output_path)
203
+ logger.info(f"Copied resume to output directory: {output_path}")
204
+
205
+ # Return updated state with resume path and job_id
206
+ return {
207
+ "status": "completed_ingest_resume",
208
+ "resume_path": output_path,
209
+ "job_id": job_id
210
+ }
211
+
212
+ def cleanup(state: Dict[str, Any]) -> None:
213
+ """Clean up resources when shutting down"""
214
+ try:
215
+ if 'resume_watcher' in state and state['resume_watcher']:
216
+ logger.info("Stopping resume watcher...")
217
+ state['resume_watcher'].stop()
218
+ state['resume_watcher'].join(timeout=5.0) # Wait up to 5 seconds for clean shutdown
219
+ logger.info("Resume watcher stopped")
220
+ except Exception as e:
221
+ logger.error(f"Error stopping resume watcher: {str(e)}")
222
+
223
+ # Register cleanup function to be called on program exit
224
+ import atexit
225
+ atexit.register(lambda: cleanup({'resume_watcher': globals().get('_global_observer')}))
@@ -0,0 +1,309 @@
1
+ """
2
+ Sentiment Analysis Node
3
+ Analyzes cover letters for sentiment using Azure OpenAI
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ from typing import Dict, Any, List, Optional
9
+ import json
10
+
11
+ # Try to import Azure OpenAI
12
+ try:
13
+ from langchain_openai import AzureChatOpenAI, AzureOpenAIEmbeddings
14
+ from langchain_core.messages import HumanMessage
15
+ import numpy as np
16
+ from sklearn.metrics.pairwise import cosine_similarity
17
+ OPENAI_AVAILABLE = True
18
+ except ImportError:
19
+ OPENAI_AVAILABLE = False
20
+
21
+ # Import config
22
+ from config import settings
23
+
24
+ # Configure logging
25
+ logging.basicConfig(level=logging.INFO)
26
+ logger = logging.getLogger(__name__)
27
+
28
+ def create_azure_openai_client():
29
+ """Create and configure the Azure OpenAI client"""
30
+ if not OPENAI_AVAILABLE:
31
+ logger.warning("Azure OpenAI not available. Install with 'pip install langchain-openai'")
32
+ return None
33
+
34
+ try:
35
+ # Use the correct parameter names for Azure OpenAI
36
+ api_key = settings.AZURE_OPENAI_API_KEY or settings.AZURE_OPENAI_KEY or settings.OPENAI_API_KEY
37
+ endpoint = settings.AZURE_OPENAI_ENDPOINT or settings.OPENAI_API_BASE
38
+ api_version = settings.AZURE_OPENAI_API_VERSION or settings.OPENAI_API_VERSION
39
+ deployment = settings.AZURE_OPENAI_DEPLOYMENT
40
+
41
+ return AzureChatOpenAI(
42
+ azure_deployment=deployment, # Use azure_deployment parameter
43
+ api_version=api_version,
44
+ azure_endpoint=endpoint, # Use azure_endpoint instead of openai_api_base
45
+ api_key=api_key
46
+ )
47
+ except Exception as e:
48
+ logger.error(f"Error initializing Azure OpenAI client: {str(e)}")
49
+ return None
50
+
51
+ def create_embeddings_client():
52
+ """
53
+ This function is no longer used as we've removed the embedding-based approach.
54
+ It's kept for backward compatibility but now returns None.
55
+ """
56
+ logger.info("Embeddings client creation skipped - using direct LLM approach instead")
57
+ return None
58
+
59
+ def extract_cover_letter(resume_text: str) -> Optional[str]:
60
+ """Extract cover letter text from resume text"""
61
+ # This is a very basic implementation - in practice, you'd need more sophisticated logic
62
+
63
+ # Look for "cover letter" or "letter of interest" in the text
64
+ lines = resume_text.lower().split('\n')
65
+ cover_letter_text = None
66
+
67
+ for i, line in enumerate(lines):
68
+ if "cover letter" in line or "letter of interest" in line or "dear hiring" in line:
69
+ # Extract next 20 lines as the potential cover letter
70
+ potential_cover_letter = "\n".join(lines[i:i+20])
71
+ if len(potential_cover_letter) > 100: # Only use if it's substantial
72
+ cover_letter_text = potential_cover_letter
73
+ break
74
+
75
+ return cover_letter_text
76
+
77
+ def analyze_text_sentiment_openai_chat(client, text: str) -> Dict[str, Any]:
78
+ """
79
+ Analyze text sentiment using Azure OpenAI Chat
80
+ Args:
81
+ client: Azure OpenAI client
82
+ text: Text to analyze
83
+ Returns:
84
+ Sentiment analysis result
85
+ """
86
+ try:
87
+ # Create the prompt
88
+ prompt = f"""
89
+ Analyze the sentiment of the following text. Respond with a JSON object containing:
90
+ - sentiment: either "positive", "neutral", or "negative"
91
+ - positive_score: a float between 0 and 1
92
+ - neutral_score: a float between 0 and 1
93
+ - negative_score: a float between 0 and 1
94
+ - sentences: the number of sentences in the text
95
+
96
+ Here's the text to analyze:
97
+
98
+ {text}
99
+ """
100
+
101
+ # Get the response from Azure OpenAI
102
+ response = client.invoke([HumanMessage(content=prompt)])
103
+
104
+ # Parse the response as JSON
105
+ try:
106
+ content = response.content
107
+ # Extract JSON if it's wrapped in markdown code blocks
108
+ if "```json" in content and "```" in content:
109
+ json_part = content.split("```json")[1].split("```")[0].strip()
110
+ sentiment_data = json.loads(json_part)
111
+ else:
112
+ sentiment_data = json.loads(content)
113
+ except json.JSONDecodeError:
114
+ # If response is not valid JSON, extract key values using simple parsing
115
+ content = response.content.lower()
116
+ sentiment = "positive" if "positive" in content else "negative" if "negative" in content else "neutral"
117
+ return {
118
+ "sentiment": sentiment,
119
+ "positive_score": 0.33,
120
+ "neutral_score": 0.34,
121
+ "negative_score": 0.33,
122
+ "sentences": len(text.split('.')),
123
+ "parsing_method": "fallback"
124
+ }
125
+
126
+ return {
127
+ "sentiment": sentiment_data.get("sentiment", "neutral"),
128
+ "positive_score": sentiment_data.get("positive_score", 0.33),
129
+ "neutral_score": sentiment_data.get("neutral_score", 0.34),
130
+ "negative_score": sentiment_data.get("negative_score", 0.33),
131
+ "sentences": sentiment_data.get("sentences", len(text.split('.'))),
132
+ "parsing_method": "json"
133
+ }
134
+ except Exception as e:
135
+ logger.error(f"Error analyzing sentiment with Azure OpenAI Chat: {str(e)}")
136
+ return {
137
+ "sentiment": "neutral",
138
+ "positive_score": 0.33,
139
+ "neutral_score": 0.34,
140
+ "negative_score": 0.33,
141
+ "sentences": len(text.split('.')),
142
+ "error": str(e),
143
+ }
144
+
145
+ def analyze_text_sentiment_embeddings(client, text: str) -> Dict[str, Any]:
146
+ """
147
+ This function is kept for backward compatibility but redirects to using the chat-based sentiment analysis
148
+ as we've removed the embedding approach
149
+ """
150
+ logger.info("Embeddings-based sentiment analysis has been removed, using chat-based analysis instead")
151
+
152
+ # Try to get a chat client and use that instead
153
+ chat_client = create_azure_openai_client()
154
+ if chat_client:
155
+ return analyze_text_sentiment_openai_chat(chat_client, text)
156
+ else:
157
+ # Fall back to basic analysis if chat client creation fails
158
+ return analyze_text_sentiment_basic(text)
159
+
160
+ def analyze_text_sentiment_basic(text: str) -> Dict[str, Any]:
161
+ """Basic sentiment analysis without Azure services"""
162
+ # This is a very simplified sentiment analysis - in practice, use a proper NLP library
163
+ positive_words = ['excellent', 'good', 'great', 'best', 'outstanding', 'impressive',
164
+ 'excited', 'passion', 'enthusiastic', 'enjoy', 'love', 'interested',
165
+ 'committed', 'dedicated', 'eager']
166
+
167
+ negative_words = ['unfortunately', 'bad', 'worst', 'poor', 'difficult', 'challenge',
168
+ 'issue', 'problem', 'concern', 'disappointed', 'regret', 'sorry']
169
+
170
+ text_lower = text.lower()
171
+
172
+ # Count occurrences of positive and negative words
173
+ positive_count = sum(text_lower.count(word) for word in positive_words)
174
+ negative_count = sum(text_lower.count(word) for word in negative_words)
175
+ total = positive_count + negative_count
176
+
177
+ # Calculate sentiment scores
178
+ if total == 0:
179
+ return {
180
+ "sentiment": "neutral",
181
+ "positive_score": 0.33,
182
+ "neutral_score": 0.34,
183
+ "negative_score": 0.33,
184
+ "sentences": len(text.split('.')),
185
+ "method": "basic"
186
+ }
187
+
188
+ positive_score = positive_count / total if total > 0 else 0.33
189
+ negative_score = negative_count / total if total > 0 else 0.33
190
+ neutral_score = 1 - (positive_score + negative_score)
191
+
192
+ # Determine sentiment
193
+ if positive_score > 0.6:
194
+ sentiment = "positive"
195
+ elif negative_score > 0.6:
196
+ sentiment = "negative"
197
+ else:
198
+ sentiment = "neutral"
199
+
200
+ return {
201
+ "sentiment": sentiment,
202
+ "positive_score": positive_score,
203
+ "neutral_score": neutral_score,
204
+ "negative_score": negative_score,
205
+ "sentences": len(text.split('.')),
206
+ "method": "basic"
207
+ }
208
+
209
+ def analyze_sentiment(state: Dict[str, Any]) -> Dict[str, Any]:
210
+ """
211
+ LangGraph node to analyze sentiment of cover letters using Azure OpenAI
212
+
213
+ Args:
214
+ state: The current workflow state
215
+
216
+ Returns:
217
+ Updated workflow state with sentiment analysis
218
+ """
219
+ logger.info("Starting sentiment analysis with Azure OpenAI")
220
+
221
+ # Initialize errors list if not present
222
+ if "errors" not in state:
223
+ state["errors"] = []
224
+
225
+ # Check if sentiment score already exists in state
226
+ if state.get("sentiment_score") is not None:
227
+ logger.info("Sentiment already analyzed, skipping analysis")
228
+ return state
229
+
230
+ # Check if resume text exists
231
+ resume_text = state.get("resume_text")
232
+ if not resume_text:
233
+ error_message = "Missing resume text for sentiment analysis"
234
+ logger.error(error_message)
235
+
236
+ # Add error to state
237
+ state["errors"].append({
238
+ "step": "sentiment_analysis",
239
+ "error": error_message
240
+ })
241
+
242
+ # Set default sentiment
243
+ state["sentiment_score"] = {
244
+ "sentiment": "neutral",
245
+ "positive_score": 0.33,
246
+ "neutral_score": 0.34,
247
+ "negative_score": 0.33,
248
+ "available": False
249
+ }
250
+ return state
251
+
252
+ try:
253
+ # Extract cover letter
254
+ cover_letter = extract_cover_letter(resume_text)
255
+
256
+ # If no cover letter found, use the first 1000 characters of resume as a sample
257
+ sample_text = cover_letter if cover_letter else resume_text[:1000]
258
+
259
+ # Initialize clients and results
260
+ sentiment_result = None
261
+ chat_client = None
262
+
263
+ # Try to create Azure OpenAI client for chat if available
264
+ if OPENAI_AVAILABLE and (settings.AZURE_OPENAI_API_KEY or settings.AZURE_OPENAI_KEY):
265
+ chat_client = create_azure_openai_client()
266
+ # We no longer use embeddings, so don't initialize that client
267
+
268
+ # Only use chat-based or basic sentiment analysis
269
+ if chat_client:
270
+ # Method 1: Use Azure OpenAI Chat for sentiment analysis
271
+ sentiment_result = analyze_text_sentiment_openai_chat(chat_client, sample_text)
272
+ logger.info("Used Azure OpenAI Chat for sentiment analysis")
273
+ else:
274
+ # Fallback: Basic sentiment analysis
275
+ sentiment_result = analyze_text_sentiment_basic(sample_text)
276
+ logger.info("Used basic sentiment analysis (Azure services unavailable)")
277
+
278
+ # Add additional context
279
+ sentiment_result["cover_letter_available"] = True if cover_letter else False
280
+ sentiment_result["text_used"] = "cover_letter" if cover_letter else "resume_sample"
281
+ sentiment_result["available"] = True
282
+
283
+ # Update state
284
+ state["sentiment_score"] = sentiment_result
285
+ state["status"] = "sentiment_analyzed"
286
+
287
+ logger.info(f"Sentiment analyzed successfully: {sentiment_result['sentiment']}")
288
+
289
+ except Exception as e:
290
+ error_message = f"Error analyzing sentiment: {str(e)}"
291
+ logger.error(error_message)
292
+
293
+ # Add error to state
294
+ state["errors"].append({
295
+ "step": "sentiment_analysis",
296
+ "error": error_message
297
+ })
298
+
299
+ # Set default sentiment
300
+ state["sentiment_score"] = {
301
+ "sentiment": "neutral",
302
+ "positive_score": 0.33,
303
+ "neutral_score": 0.34,
304
+ "negative_score": 0.33,
305
+ "available": False,
306
+ "error": str(e)
307
+ }
308
+
309
+ return state
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: michael_agent
3
- Version: 1.0.1
3
+ Version: 1.0.2
4
4
  Summary: SmartRecruitAgent - A recruitment automation library
5
5
  Home-page: https://github.com/yourusername/agent
6
6
  Author: Michael Jone
@@ -5,8 +5,19 @@ michael_agent/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
5
5
  michael_agent/config/settings.py,sha256=_4uvWQnMscK01Sd0zT5wesVW5uN0njtKYRMsjMQXEOY,3180
6
6
  michael_agent/dashboard/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  michael_agent/dashboard/app.py,sha256=UtMswD7TGGJBY9cMeuFQPJAtgaRiXFso6PsTHtvPGN8,61963
8
+ michael_agent/dashboard/static/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ michael_agent/dashboard/templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
10
  michael_agent/langgraph_workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
11
  michael_agent/langgraph_workflow/graph_builder.py,sha256=xdsZ_lVWFn5B8xNXg_L49H-Jwfj-p7nxPVOwtc9Rf2U,14487
12
+ michael_agent/langgraph_workflow/nodes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
+ michael_agent/langgraph_workflow/nodes/assessment_handler.py,sha256=qgMB8fJdyA2CP8VWY2m-7-418LcY302S-SR_JHamSTE,6401
14
+ michael_agent/langgraph_workflow/nodes/jd_generator.py,sha256=G8cM3NkGqd44iUAbJJDed9kFjJ-F02_FXB6I_7AE_kA,5105
15
+ michael_agent/langgraph_workflow/nodes/jd_poster.py,sha256=6F1jQRG_IoiopIOpIDjSpuCE3I6_A7-ZEMkV8FtKXQs,4550
16
+ michael_agent/langgraph_workflow/nodes/question_generator.py,sha256=XgDc5f7-ifsJ3UdzB22NjKMqjUcG2_elTZ5LOPGVkt8,11670
17
+ michael_agent/langgraph_workflow/nodes/recruiter_notifier.py,sha256=xLVhRP1I-QIcO_b0lYLuMnMTpGHAFakG-luPJrhkN6Y,8522
18
+ michael_agent/langgraph_workflow/nodes/resume_analyzer.py,sha256=XG4MksqSqhhNwGSfDauIbEpPmxogDJ6skJgR-xpeY0g,24027
19
+ michael_agent/langgraph_workflow/nodes/resume_ingestor.py,sha256=h14J4AcFk22BWoFHCPRkK3HpzY8RvwGW6_jjqBxLXNU,9279
20
+ michael_agent/langgraph_workflow/nodes/sentiment_analysis.py,sha256=H-geV4AbFbt1EpiLKnpaXdvrrjjXMN-Dzzg4sZOjhdM,11657
10
21
  michael_agent/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
22
  michael_agent/utils/email_utils.py,sha256=PsL3QTQuV_iVou_2Y3o_Dohz7tN9YNp9FPxsTKkDRv0,4989
12
23
  michael_agent/utils/id_mapper.py,sha256=GzYRuAhGWf2BUAb9hVMS3KR8bmYExnmXRWkQ_j-kWaw,397
@@ -15,7 +26,7 @@ michael_agent/utils/lms_api.py,sha256=tmntU6tjyAdMLak_vfoxBkWNIPUKvejeEwb2t6yQBU
15
26
  michael_agent/utils/logging_utils.py,sha256=Ld7fs2uuCOM0bx-totxHzKzKHl5lfAe3TXeH1QYJBjw,7179
16
27
  michael_agent/utils/monitor_utils.py,sha256=1Ig6C79bQ_OOLKhgFNmm0ybntQavqzyJ3zsxD0iZxxw,11069
17
28
  michael_agent/utils/node_tracer.py,sha256=N1MWly4qfzh87Fo1xRS5hpefoAvfSyZIPvMOegPrtBY,3411
18
- michael_agent-1.0.1.dist-info/METADATA,sha256=i5HPTugPpkcmGxMHOfAnUHCjrOTrH-QzdKJjJFTze1M,1340
19
- michael_agent-1.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
20
- michael_agent-1.0.1.dist-info/top_level.txt,sha256=-r35JOIHnK3RsMhJ77tDKfWtmfGDr_iT2642k-suUDo,14
21
- michael_agent-1.0.1.dist-info/RECORD,,
29
+ michael_agent-1.0.2.dist-info/METADATA,sha256=4mAZuyRUC_RoCuQdFpaJnXK1kVSsTnqN36dFwRyPYE8,1340
30
+ michael_agent-1.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
31
+ michael_agent-1.0.2.dist-info/top_level.txt,sha256=-r35JOIHnK3RsMhJ77tDKfWtmfGDr_iT2642k-suUDo,14
32
+ michael_agent-1.0.2.dist-info/RECORD,,