michael-agent 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. michael_agent/config/__init__.py +0 -0
  2. michael_agent/config/settings.py +66 -0
  3. michael_agent/dashboard/__init__.py +0 -0
  4. michael_agent/dashboard/app.py +1450 -0
  5. michael_agent/dashboard/static/__init__.py +0 -0
  6. michael_agent/dashboard/templates/__init__.py +0 -0
  7. michael_agent/langgraph_workflow/__init__.py +0 -0
  8. michael_agent/langgraph_workflow/graph_builder.py +358 -0
  9. michael_agent/langgraph_workflow/nodes/__init__.py +0 -0
  10. michael_agent/langgraph_workflow/nodes/assessment_handler.py +177 -0
  11. michael_agent/langgraph_workflow/nodes/jd_generator.py +139 -0
  12. michael_agent/langgraph_workflow/nodes/jd_poster.py +156 -0
  13. michael_agent/langgraph_workflow/nodes/question_generator.py +295 -0
  14. michael_agent/langgraph_workflow/nodes/recruiter_notifier.py +224 -0
  15. michael_agent/langgraph_workflow/nodes/resume_analyzer.py +631 -0
  16. michael_agent/langgraph_workflow/nodes/resume_ingestor.py +225 -0
  17. michael_agent/langgraph_workflow/nodes/sentiment_analysis.py +309 -0
  18. michael_agent/utils/__init__.py +0 -0
  19. michael_agent/utils/email_utils.py +140 -0
  20. michael_agent/utils/id_mapper.py +14 -0
  21. michael_agent/utils/jd_utils.py +34 -0
  22. michael_agent/utils/lms_api.py +226 -0
  23. michael_agent/utils/logging_utils.py +192 -0
  24. michael_agent/utils/monitor_utils.py +289 -0
  25. michael_agent/utils/node_tracer.py +88 -0
  26. {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/METADATA +2 -2
  27. michael_agent-1.0.2.dist-info/RECORD +32 -0
  28. michael_agent-1.0.0.dist-info/RECORD +0 -7
  29. {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/WHEEL +0 -0
  30. {michael_agent-1.0.0.dist-info → michael_agent-1.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,289 @@
1
+ """
2
+ Workflow monitoring utility for SmartRecruitAgent
3
+ Provides tools to monitor and track workflow progress
4
+ """
5
+
6
+ import os
7
+ import json
8
+ import time
9
+ import logging
10
+ import pandas as pd
11
+ from typing import Dict, List, Optional
12
+ from datetime import datetime, timedelta
13
+ import matplotlib.pyplot as plt
14
+ from tabulate import tabulate
15
+
16
+ # Import config
17
+ from config import settings
18
+
19
+ # Configure logging
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
22
+
23
+ def load_log_entries(log_file_path: str, hours: int = 24) -> List[Dict]:
24
+ """Load log entries from the specified file, filtered by time range"""
25
+ if not os.path.exists(log_file_path):
26
+ logger.warning(f"Log file not found: {log_file_path}")
27
+ return []
28
+
29
+ cutoff_time = datetime.now() - timedelta(hours=hours)
30
+ entries = []
31
+
32
+ try:
33
+ with open(log_file_path, 'r') as f:
34
+ for line in f:
35
+ try:
36
+ # Parse the log line to extract timestamp and JSON content
37
+ parts = line.strip().split(' | ', 3)
38
+ if len(parts) >= 4:
39
+ timestamp_str = parts[0]
40
+ log_level = parts[1]
41
+ module = parts[2]
42
+ message = parts[3]
43
+
44
+ # Parse timestamp
45
+ timestamp = datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M:%S')
46
+
47
+ # Skip entries older than cutoff time
48
+ if timestamp < cutoff_time:
49
+ continue
50
+
51
+ # For state_transitions.log, message is a JSON string
52
+ if 'state_transitions' in log_file_path:
53
+ try:
54
+ data = json.loads(message)
55
+ data['timestamp'] = timestamp_str
56
+ data['log_level'] = log_level
57
+ entries.append(data)
58
+ except json.JSONDecodeError:
59
+ logger.debug(f"Failed to parse JSON: {message}")
60
+ else:
61
+ # For regular logs
62
+ entries.append({
63
+ 'timestamp': timestamp_str,
64
+ 'log_level': log_level,
65
+ 'module': module,
66
+ 'message': message
67
+ })
68
+ except Exception as e:
69
+ logger.debug(f"Error parsing log line: {e}")
70
+ continue
71
+ except Exception as e:
72
+ logger.error(f"Error reading log file {log_file_path}: {e}")
73
+
74
+ return entries
75
+
76
+ def load_state_snapshots(hours: int = 24) -> Dict[str, Dict]:
77
+ """Load state snapshots from the last specified hours"""
78
+ snapshots_dir = os.path.join(settings.LOG_DIR, "snapshots")
79
+ if not os.path.exists(snapshots_dir):
80
+ logger.warning(f"Snapshots directory not found: {snapshots_dir}")
81
+ return {}
82
+
83
+ cutoff_time = datetime.now() - timedelta(hours=hours)
84
+ snapshots = {}
85
+
86
+ try:
87
+ for filename in os.listdir(snapshots_dir):
88
+ if not filename.endswith('.json'):
89
+ continue
90
+
91
+ filepath = os.path.join(snapshots_dir, filename)
92
+ file_mtime = datetime.fromtimestamp(os.path.getmtime(filepath))
93
+
94
+ # Skip files older than cutoff time
95
+ if file_mtime < cutoff_time:
96
+ continue
97
+
98
+ try:
99
+ with open(filepath, 'r') as f:
100
+ data = json.load(f)
101
+ snapshots[filename] = data
102
+ except Exception as e:
103
+ logger.debug(f"Error reading snapshot {filename}: {e}")
104
+ except Exception as e:
105
+ logger.error(f"Error accessing snapshots directory: {e}")
106
+
107
+ return snapshots
108
+
109
+ def generate_workflow_summary(hours: int = 24) -> pd.DataFrame:
110
+ """Generate a summary of workflow executions from the logs"""
111
+ workflow_logs = load_log_entries(os.path.join(settings.LOG_DIR, 'workflow.log'), hours)
112
+
113
+ # Extract job IDs and create a summary
114
+ job_data = {}
115
+ for entry in workflow_logs:
116
+ message = entry.get('message', '')
117
+
118
+ # Try to extract job ID from message
119
+ if '[Job ' in message:
120
+ parts = message.split('[Job ', 1)[1].split(']', 1)
121
+ if len(parts) >= 2:
122
+ job_id = parts[0].strip()
123
+ msg_content = parts[1].strip()
124
+
125
+ if job_id not in job_data:
126
+ job_data[job_id] = {
127
+ 'start_time': entry.get('timestamp'),
128
+ 'last_update': entry.get('timestamp'),
129
+ 'steps_completed': 0,
130
+ 'current_status': 'Unknown',
131
+ 'errors': 0
132
+ }
133
+
134
+ # Update job data
135
+ job_data[job_id]['last_update'] = entry.get('timestamp')
136
+
137
+ if 'Step ' in msg_content:
138
+ job_data[job_id]['steps_completed'] += 1
139
+
140
+ if 'completed:' in msg_content:
141
+ status = msg_content.split('completed:', 1)[1].strip()
142
+ job_data[job_id]['current_status'] = status
143
+
144
+ if 'Error' in message or 'error' in message:
145
+ job_data[job_id]['errors'] += 1
146
+
147
+ # Convert to DataFrame for easier display
148
+ if job_data:
149
+ df = pd.DataFrame.from_dict(job_data, orient='index')
150
+ df.index.name = 'job_id'
151
+ return df
152
+ else:
153
+ return pd.DataFrame(columns=['start_time', 'last_update', 'steps_completed', 'current_status', 'errors'])
154
+
155
+ def display_errors(hours: int = 24) -> pd.DataFrame:
156
+ """Display all errors from the workflow logs"""
157
+ workflow_logs = load_log_entries(os.path.join(settings.LOG_DIR, 'workflow.log'), hours)
158
+
159
+ error_entries = []
160
+ for entry in workflow_logs:
161
+ message = entry.get('message', '')
162
+ if entry.get('log_level') == 'ERROR' or 'Error' in message or 'error' in message:
163
+ # Try to extract job ID from message
164
+ job_id = 'Unknown'
165
+ if '[Job ' in message:
166
+ try:
167
+ job_id = message.split('[Job ', 1)[1].split(']', 1)[0].strip()
168
+ except:
169
+ pass
170
+
171
+ error_entries.append({
172
+ 'timestamp': entry.get('timestamp'),
173
+ 'job_id': job_id,
174
+ 'error': message
175
+ })
176
+
177
+ # Convert to DataFrame for easier display
178
+ if error_entries:
179
+ df = pd.DataFrame(error_entries)
180
+ return df
181
+ else:
182
+ return pd.DataFrame(columns=['timestamp', 'job_id', 'error'])
183
+
184
+ def generate_job_timeline(job_id: str) -> Dict:
185
+ """Generate a timeline of steps for a specific job"""
186
+ workflow_logs = load_log_entries(os.path.join(settings.LOG_DIR, 'workflow.log'))
187
+
188
+ timeline = []
189
+ for entry in workflow_logs:
190
+ message = entry.get('message', '')
191
+ if f'[Job {job_id}]' in message:
192
+ timestamp = entry.get('timestamp')
193
+
194
+ # Extract step information if available
195
+ if 'Step ' in message and 'completed:' in message:
196
+ try:
197
+ step_num = int(message.split('Step ', 1)[1].split(' ', 1)[0])
198
+ step_name = message.split('completed:', 1)[1].strip()
199
+
200
+ timeline.append({
201
+ 'timestamp': timestamp,
202
+ 'step_number': step_num,
203
+ 'step_name': step_name
204
+ })
205
+ except:
206
+ pass
207
+
208
+ return sorted(timeline, key=lambda x: x.get('step_number', 0))
209
+
210
+ def monitor_active_jobs():
211
+ """Display a summary of active jobs in the terminal"""
212
+ print("\n" + "="*80)
213
+ print(" SmartRecruitAgent Workflow Monitor ".center(80, "="))
214
+ print("="*80)
215
+
216
+ # Get workflow summary
217
+ df = generate_workflow_summary(hours=24)
218
+
219
+ if df.empty:
220
+ print("\nNo active jobs found in the last 24 hours.")
221
+ else:
222
+ # Calculate job duration
223
+ df['duration'] = pd.to_datetime(df['last_update']) - pd.to_datetime(df['start_time'])
224
+ df['duration'] = df['duration'].astype(str).str.split('.').str[0] # Format duration
225
+
226
+ # Format the output
227
+ summary_df = df[['start_time', 'duration', 'steps_completed', 'current_status', 'errors']].copy()
228
+ print(f"\nActive Jobs (Last 24 Hours): {len(summary_df)}")
229
+ print(tabulate(summary_df, headers='keys', tablefmt='grid'))
230
+
231
+ # Display recent errors
232
+ error_df = display_errors(hours=6)
233
+ if not error_df.empty:
234
+ print("\nRecent Errors (Last 6 Hours):")
235
+ print(tabulate(error_df, headers='keys', tablefmt='grid', maxcolwidths=[20, 10, 50]))
236
+
237
+ print("\n" + "="*80)
238
+
239
+ def show_job_details(job_id: str):
240
+ """Show detailed information for a specific job"""
241
+ timeline = generate_job_timeline(job_id)
242
+
243
+ print("\n" + "="*80)
244
+ print(f" Job Details: {job_id} ".center(80, "="))
245
+ print("="*80)
246
+
247
+ if not timeline:
248
+ print(f"\nNo data found for job ID: {job_id}")
249
+ return
250
+
251
+ # Print timeline
252
+ print("\nExecution Timeline:")
253
+ for step in timeline:
254
+ print(f"[{step['timestamp']}] Step {step['step_number']}: {step['step_name']}")
255
+
256
+ # Look for snapshots for this job
257
+ snapshots = load_state_snapshots()
258
+ job_snapshots = {k: v for k, v in snapshots.items() if job_id in k}
259
+
260
+ if job_snapshots:
261
+ print("\nState Snapshots Available:")
262
+ for filename in job_snapshots:
263
+ print(f" - {filename}")
264
+
265
+ # Print the latest snapshot details
266
+ latest = max(job_snapshots.keys())
267
+ print(f"\nLatest State ({latest}):")
268
+ snapshot = job_snapshots[latest]
269
+
270
+ # Print key information
271
+ if 'candidate_name' in snapshot:
272
+ print(f"Candidate: {snapshot.get('candidate_name', 'Unknown')}")
273
+ if 'candidate_email' in snapshot:
274
+ print(f"Email: {snapshot.get('candidate_email', 'Unknown')}")
275
+ if 'resume_path' in snapshot:
276
+ print(f"Resume: {snapshot.get('resume_path', 'Unknown')}")
277
+ if 'relevance_score' in snapshot and snapshot['relevance_score'] is not None:
278
+ print(f"Relevance Score: {snapshot.get('relevance_score', 'N/A')}")
279
+
280
+ # Print any errors
281
+ if 'errors' in snapshot and snapshot['errors']:
282
+ print("\nErrors:")
283
+ for error in snapshot['errors']:
284
+ print(f" - [{error.get('step', 'unknown')}] {error.get('error', 'Unknown error')}")
285
+
286
+ print("\n" + "="*80)
287
+
288
+ if __name__ == "__main__":
289
+ monitor_active_jobs()
@@ -0,0 +1,88 @@
1
+ """
2
+ Utility to apply logging decorators to workflow nodes
3
+ Ensures consistent logging across all workflow steps
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import inspect
9
+ import logging
10
+ import importlib
11
+ from types import ModuleType
12
+ from typing import Dict, Any, Callable
13
+ from functools import wraps
14
+
15
+ # Import our logging decorator
16
+ from utils.logging_utils import log_step, workflow_logger as logger
17
+
18
+ def trace_node(node_function: Callable) -> Callable:
19
+ """Apply tracing to a node function"""
20
+ # First, apply our standard logging decorator
21
+ traced_function = log_step(node_function)
22
+
23
+ @wraps(node_function)
24
+ def wrapper(state: Dict[str, Any], *args, **kwargs) -> Dict[str, Any]:
25
+ # Get the job ID for logging
26
+ job_id = state.get("job_id", "unknown")
27
+ logger.debug(f"[Job {job_id}] Entering node {node_function.__name__}")
28
+
29
+ try:
30
+ # Call the decorated function
31
+ result = traced_function(state, *args, **kwargs)
32
+ return result
33
+ except Exception as e:
34
+ # Log the error and re-raise it (our decorator will handle it)
35
+ logger.error(f"[Job {job_id}] Exception in {node_function.__name__}: {str(e)}")
36
+ raise
37
+
38
+ return wrapper
39
+
40
+ def trace_workflow_nodes():
41
+ """Apply tracing to all workflow nodes"""
42
+ # Define the base path to our nodes directory
43
+ nodes_dir = os.path.join(os.path.dirname(__file__), '..', 'langgraph_workflow', 'nodes')
44
+ sys.path.append(os.path.abspath(os.path.join(nodes_dir, '..')))
45
+
46
+ nodes_path = 'langgraph_workflow.nodes'
47
+ logger.info(f"Applying tracing to workflow nodes in {nodes_path}")
48
+
49
+ # Get all Python files in the nodes directory (excluding __init__ and __pycache__)
50
+ node_modules = []
51
+ for filename in os.listdir(nodes_dir):
52
+ if (filename.endswith('.py') and
53
+ filename != '__init__.py' and
54
+ not filename.startswith('__')):
55
+ module_name = filename[:-3] # Remove .py extension
56
+ node_modules.append(module_name)
57
+
58
+ # Apply tracing to node functions in each module
59
+ for module_name in node_modules:
60
+ full_module_name = f"{nodes_path}.{module_name}"
61
+ try:
62
+ # Import the module
63
+ module = importlib.import_module(full_module_name)
64
+
65
+ # Find all functions in the module that might be node functions
66
+ traced_count = 0
67
+ for name, obj in inspect.getmembers(module):
68
+ if (inspect.isfunction(obj) and
69
+ not name.startswith('_') and
70
+ 'state' in inspect.signature(obj).parameters):
71
+
72
+ # This looks like a node function, apply tracing
73
+ original_function = obj
74
+ traced_function = trace_node(original_function)
75
+
76
+ # Replace the original function with our traced version
77
+ setattr(module, name, traced_function)
78
+ traced_count += 1
79
+
80
+ logger.info(f"Applied tracing to {traced_count} functions in {full_module_name}")
81
+
82
+ except Exception as e:
83
+ logger.error(f"Error applying tracing to {full_module_name}: {str(e)}")
84
+
85
+ logger.info("Tracing applied to all workflow nodes")
86
+
87
+ # Apply tracing automatically when this module is imported
88
+ trace_workflow_nodes()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: michael_agent
3
- Version: 1.0.0
3
+ Version: 1.0.2
4
4
  Summary: SmartRecruitAgent - A recruitment automation library
5
5
  Home-page: https://github.com/yourusername/agent
6
6
  Author: Michael Jone
@@ -8,7 +8,7 @@ Author-email: your_email@example.com
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
11
- Requires-Python: >=3.7
11
+ Requires-Python: >=3.10
12
12
  Description-Content-Type: text/markdown
13
13
  Requires-Dist: python-dotenv
14
14
  Requires-Dist: langchain
@@ -0,0 +1,32 @@
1
+ michael_agent/__init__.py,sha256=prEeI3mdpO8R5QTpniRR_Tl21uqF7pGJHwBidQ9JIKQ,179
2
+ michael_agent/main.py,sha256=j5BXOxg_8YE-bnu3cKrylQCHCssZ6PP4UbGorRVJMd4,3385
3
+ michael_agent/monitor.py,sha256=RThpdPW7lf5zI3ilMShVeDf4Vao5Yq0E4Rao9uuS9XY,2473
4
+ michael_agent/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ michael_agent/config/settings.py,sha256=_4uvWQnMscK01Sd0zT5wesVW5uN0njtKYRMsjMQXEOY,3180
6
+ michael_agent/dashboard/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ michael_agent/dashboard/app.py,sha256=UtMswD7TGGJBY9cMeuFQPJAtgaRiXFso6PsTHtvPGN8,61963
8
+ michael_agent/dashboard/static/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ michael_agent/dashboard/templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ michael_agent/langgraph_workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ michael_agent/langgraph_workflow/graph_builder.py,sha256=xdsZ_lVWFn5B8xNXg_L49H-Jwfj-p7nxPVOwtc9Rf2U,14487
12
+ michael_agent/langgraph_workflow/nodes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
+ michael_agent/langgraph_workflow/nodes/assessment_handler.py,sha256=qgMB8fJdyA2CP8VWY2m-7-418LcY302S-SR_JHamSTE,6401
14
+ michael_agent/langgraph_workflow/nodes/jd_generator.py,sha256=G8cM3NkGqd44iUAbJJDed9kFjJ-F02_FXB6I_7AE_kA,5105
15
+ michael_agent/langgraph_workflow/nodes/jd_poster.py,sha256=6F1jQRG_IoiopIOpIDjSpuCE3I6_A7-ZEMkV8FtKXQs,4550
16
+ michael_agent/langgraph_workflow/nodes/question_generator.py,sha256=XgDc5f7-ifsJ3UdzB22NjKMqjUcG2_elTZ5LOPGVkt8,11670
17
+ michael_agent/langgraph_workflow/nodes/recruiter_notifier.py,sha256=xLVhRP1I-QIcO_b0lYLuMnMTpGHAFakG-luPJrhkN6Y,8522
18
+ michael_agent/langgraph_workflow/nodes/resume_analyzer.py,sha256=XG4MksqSqhhNwGSfDauIbEpPmxogDJ6skJgR-xpeY0g,24027
19
+ michael_agent/langgraph_workflow/nodes/resume_ingestor.py,sha256=h14J4AcFk22BWoFHCPRkK3HpzY8RvwGW6_jjqBxLXNU,9279
20
+ michael_agent/langgraph_workflow/nodes/sentiment_analysis.py,sha256=H-geV4AbFbt1EpiLKnpaXdvrrjjXMN-Dzzg4sZOjhdM,11657
21
+ michael_agent/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ michael_agent/utils/email_utils.py,sha256=PsL3QTQuV_iVou_2Y3o_Dohz7tN9YNp9FPxsTKkDRv0,4989
23
+ michael_agent/utils/id_mapper.py,sha256=GzYRuAhGWf2BUAb9hVMS3KR8bmYExnmXRWkQ_j-kWaw,397
24
+ michael_agent/utils/jd_utils.py,sha256=OqzI0os3sIssa-L_-SrMe901yzjE0Z_zllztYNKReZ4,1234
25
+ michael_agent/utils/lms_api.py,sha256=tmntU6tjyAdMLak_vfoxBkWNIPUKvejeEwb2t6yQBUM,8436
26
+ michael_agent/utils/logging_utils.py,sha256=Ld7fs2uuCOM0bx-totxHzKzKHl5lfAe3TXeH1QYJBjw,7179
27
+ michael_agent/utils/monitor_utils.py,sha256=1Ig6C79bQ_OOLKhgFNmm0ybntQavqzyJ3zsxD0iZxxw,11069
28
+ michael_agent/utils/node_tracer.py,sha256=N1MWly4qfzh87Fo1xRS5hpefoAvfSyZIPvMOegPrtBY,3411
29
+ michael_agent-1.0.2.dist-info/METADATA,sha256=4mAZuyRUC_RoCuQdFpaJnXK1kVSsTnqN36dFwRyPYE8,1340
30
+ michael_agent-1.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
31
+ michael_agent-1.0.2.dist-info/top_level.txt,sha256=-r35JOIHnK3RsMhJ77tDKfWtmfGDr_iT2642k-suUDo,14
32
+ michael_agent-1.0.2.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- michael_agent/__init__.py,sha256=prEeI3mdpO8R5QTpniRR_Tl21uqF7pGJHwBidQ9JIKQ,179
2
- michael_agent/main.py,sha256=j5BXOxg_8YE-bnu3cKrylQCHCssZ6PP4UbGorRVJMd4,3385
3
- michael_agent/monitor.py,sha256=RThpdPW7lf5zI3ilMShVeDf4Vao5Yq0E4Rao9uuS9XY,2473
4
- michael_agent-1.0.0.dist-info/METADATA,sha256=KRL2sOmwfc4WDlyLbDWRZX3UMtWSLHoJ20LBofSHsLY,1339
5
- michael_agent-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
6
- michael_agent-1.0.0.dist-info/top_level.txt,sha256=-r35JOIHnK3RsMhJ77tDKfWtmfGDr_iT2642k-suUDo,14
7
- michael_agent-1.0.0.dist-info/RECORD,,