tooluniverse 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tooluniverse might be problematic. Click here for more details.

@@ -1,1611 +0,0 @@
1
- # =============================================================================
2
- # 🧑‍⚕️ HUMAN EXPERT MCP INTEGRATION TOOL
3
- # =============================================================================
4
- """
5
- This script creates an MCP server that allows human experts to receive and respond to
6
- requests from ToolUniverse. The expert can monitor incoming questions in real-time
7
- and provide expert responses through an interactive interface.
8
-
9
- The tool is designed for scenarios where human expertise is needed for:
10
- - Complex clinical decisions requiring medical judgment
11
- - Review and validation of AI recommendations
12
- - Providing expert opinions on specialized topics
13
- - Quality assurance and oversight of automated responses
14
-
15
- Usage:
16
- python human_expert_mcp_server.py # Start MCP server only
17
- python human_expert_mcp_server.py --web-only # Start web interface only
18
- python human_expert_mcp_server.py --interface-only # Start terminal interface only
19
-
20
-
21
- """
22
-
23
- # =============================================================================
24
- # ⚙️ MCP SERVER CONFIGURATION
25
- # =============================================================================
26
- from fastmcp import FastMCP
27
- import asyncio
28
- from concurrent.futures import ThreadPoolExecutor
29
- import threading
30
- import uuid
31
- import queue
32
- import time
33
- from datetime import datetime
34
- import argparse
35
- import requests
36
- from typing import Dict, Optional, TYPE_CHECKING
37
-
38
- if TYPE_CHECKING:
39
- pass
40
- import webbrowser
41
- from threading import Timer
42
- import sys
43
-
44
- # Try to import Flask for web interface
45
- try:
46
- from flask import Flask, render_template_string, request, jsonify, redirect, url_for
47
-
48
- FLASK_AVAILABLE = True
49
- except ImportError:
50
- FLASK_AVAILABLE = False
51
- print("⚠️ Flask not available. Web interface will be disabled.")
52
- print(" Install with: pip install flask")
53
-
54
- # Server configuration
55
- server = FastMCP("Human Expert MCP Server", stateless_http=True)
56
- executor = ThreadPoolExecutor(max_workers=3)
57
-
58
- # Flask web app for expert interface (if available)
59
- web_app: Optional["Flask"]
60
- if FLASK_AVAILABLE:
61
- web_app = Flask(__name__)
62
- web_app.secret_key = "human_expert_interface_secret_key"
63
- else:
64
- web_app = None
65
-
66
- # =============================================================================
67
- # 🔧 HUMAN EXPERT SYSTEM CONFIGURATION
68
- # =============================================================================
69
-
70
-
71
- class HumanExpertSystem:
72
- def __init__(self):
73
- # Queue to store incoming requests
74
- self.request_queue = queue.Queue()
75
- # Dictionary to store responses: request_id -> response
76
- self.responses = {}
77
- # Dictionary to store request status: request_id -> status
78
- self.request_status = {}
79
- # Lock for thread safety
80
- self.lock = threading.Lock()
81
- # Expert info
82
- self.expert_info = {
83
- "name": "Medical Expert",
84
- "specialties": [
85
- "Clinical Medicine",
86
- "Pharmacology",
87
- "Drug Interactions",
88
- "Oncology",
89
- "Cardiology",
90
- ],
91
- "availability": True,
92
- }
93
- # Notification settings
94
- self.notification_enabled = True
95
- self.audio_alerts = True
96
-
97
- def submit_request(
98
- self, request_id: str, question: str, context: Optional[Dict] = None
99
- ) -> str:
100
- """Submit a new request for expert review"""
101
- with self.lock:
102
- request_data = {
103
- "id": request_id,
104
- "question": question,
105
- "context": context or {},
106
- "timestamp": datetime.now().isoformat(),
107
- "status": "pending",
108
- }
109
-
110
- self.request_queue.put(request_data)
111
- self.request_status[request_id] = "pending"
112
-
113
- # Enhanced console notification
114
- print(f"\n{'='*80}")
115
- print(f"🔔 NEW EXPERT CONSULTATION REQUEST [{request_id}]")
116
- print(f"{'='*80}")
117
- print(f"📝 Question: {question}")
118
- print(
119
- f"🎯 Specialty: {context.get('specialty', 'general') if context else 'general'}"
120
- )
121
- print(
122
- f"⚡ Priority: {context.get('priority', 'normal') if context else 'normal'}"
123
- )
124
- if context and context.get("context"):
125
- print(f"📋 Context: {context.get('context')}")
126
- print(f"⏰ Time: {request_data['timestamp']}")
127
- print("🌐 View in web interface: http://localhost:8080")
128
- print(f"{'='*80}")
129
-
130
- # Audio alert (system beep)
131
- if self.audio_alerts:
132
- try:
133
- # Try to make a system beep
134
- print("\a") # ASCII bell character
135
- except Exception:
136
- pass
137
-
138
- return request_id
139
-
140
- def get_pending_requests(self) -> list:
141
- """Get all pending requests"""
142
- pending: list = []
143
- temp_queue: "queue.Queue[Dict]" = queue.Queue()
144
-
145
- # Extract all items from queue
146
- while not self.request_queue.empty():
147
- try:
148
- item = self.request_queue.get_nowait()
149
- pending.append(item)
150
- temp_queue.put(item)
151
- except queue.Empty:
152
- break
153
-
154
- # Put items back in queue
155
- while not temp_queue.empty():
156
- self.request_queue.put(temp_queue.get())
157
-
158
- return pending
159
-
160
- def submit_response(self, request_id: str, response: str) -> bool:
161
- """Submit expert response for a request"""
162
- with self.lock:
163
- if request_id in self.request_status:
164
- self.responses[request_id] = {
165
- "response": response,
166
- "timestamp": datetime.now().isoformat(),
167
- "expert": self.expert_info["name"],
168
- }
169
- self.request_status[request_id] = "completed"
170
-
171
- # Remove from queue
172
- temp_queue: queue.Queue = queue.Queue()
173
- while not self.request_queue.empty():
174
- try:
175
- item = self.request_queue.get_nowait()
176
- if item["id"] != request_id:
177
- temp_queue.put(item)
178
- except queue.Empty:
179
- break
180
-
181
- while not temp_queue.empty():
182
- self.request_queue.put(temp_queue.get())
183
-
184
- print(f"\n✅ RESPONSE SUBMITTED [{request_id}]")
185
- print(f"👨‍⚕️ Expert: {self.expert_info['name']}")
186
- print(f"📝 Response: {response}")
187
- print("=" * 80)
188
-
189
- return True
190
- return False
191
-
192
- def get_response(self, request_id: str, timeout: int = 300) -> Optional[Dict]:
193
- """Wait for and retrieve expert response"""
194
- start_time = time.time()
195
-
196
- while time.time() - start_time < timeout:
197
- with self.lock:
198
- if request_id in self.responses:
199
- return self.responses[request_id]
200
-
201
- time.sleep(1) # Check every second
202
-
203
- return None
204
-
205
-
206
- # Global expert system instance
207
- expert_system = HumanExpertSystem()
208
-
209
- # =============================================================================
210
- # 🌐 WEB-BASED EXPERT INTERFACE
211
- # =============================================================================
212
-
213
- # HTML template for the web interface
214
- WEB_INTERFACE_TEMPLATE = """
215
- <!DOCTYPE html>
216
- <html lang="en">
217
- <head>
218
- <meta charset="UTF-8">
219
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
220
- <title>🧑‍⚕️ Human Expert Interface</title>
221
- <style>
222
- * { margin: 0; padding: 0; box-sizing: border-box; }
223
- body {
224
- font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
225
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
226
- min-height: 100vh;
227
- padding: 20px;
228
- }
229
- .container {
230
- max-width: 1200px;
231
- margin: 0 auto;
232
- background: white;
233
- border-radius: 15px;
234
- box-shadow: 0 20px 40px rgba(0,0,0,0.1);
235
- overflow: hidden;
236
- }
237
- .header {
238
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
239
- color: white;
240
- padding: 30px;
241
- text-align: center;
242
- }
243
- .header h1 { font-size: 2.5em; margin-bottom: 10px; }
244
- .header p { font-size: 1.2em; opacity: 0.9; }
245
- .content { padding: 30px; }
246
- .stats-grid {
247
- display: grid;
248
- grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
249
- gap: 20px;
250
- margin-bottom: 30px;
251
- }
252
- .stat-card {
253
- background: #f8f9fa;
254
- padding: 20px;
255
- border-radius: 10px;
256
- text-align: center;
257
- border-left: 4px solid #667eea;
258
- }
259
- .stat-card h3 { color: #333; font-size: 2em; margin-bottom: 5px; }
260
- .stat-card p { color: #666; }
261
- .requests-section { margin-bottom: 30px; }
262
- .request-card {
263
- border: 1px solid #e0e0e0;
264
- border-radius: 10px;
265
- margin-bottom: 20px;
266
- overflow: hidden;
267
- transition: all 0.3s ease;
268
- }
269
- .request-card:hover {
270
- box-shadow: 0 5px 15px rgba(0,0,0,0.1);
271
- transform: translateY(-2px);
272
- }
273
- .request-header {
274
- background: #f8f9fa;
275
- padding: 15px 20px;
276
- border-bottom: 1px solid #e0e0e0;
277
- display: flex;
278
- justify-content: between;
279
- align-items: center;
280
- }
281
- .request-content { padding: 20px; }
282
- .request-question {
283
- background: #fff8dc;
284
- padding: 15px;
285
- border-radius: 8px;
286
- margin-bottom: 15px;
287
- border-left: 4px solid #ffd700;
288
- }
289
- .badge {
290
- padding: 4px 12px;
291
- border-radius: 20px;
292
- font-size: 0.8em;
293
- font-weight: bold;
294
- text-transform: uppercase;
295
- }
296
- .badge.high { background: #ffebee; color: #c62828; }
297
- .badge.normal { background: #e8f5e8; color: #2e7d32; }
298
- .badge.urgent { background: #ffcdd2; color: #d32f2f; }
299
- .response-form { margin-top: 15px; }
300
- .response-textarea {
301
- width: 100%;
302
- min-height: 120px;
303
- padding: 15px;
304
- border: 1px solid #ddd;
305
- border-radius: 8px;
306
- font-family: inherit;
307
- resize: vertical;
308
- }
309
- .btn {
310
- padding: 12px 24px;
311
- border: none;
312
- border-radius: 8px;
313
- cursor: pointer;
314
- font-weight: bold;
315
- transition: all 0.3s ease;
316
- }
317
- .btn-primary {
318
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
319
- color: white;
320
- }
321
- .btn-primary:hover {
322
- transform: translateY(-2px);
323
- box-shadow: 0 5px 15px rgba(102, 126, 234, 0.4);
324
- }
325
- .btn-secondary { background: #6c757d; color: white; }
326
- .btn-secondary:hover { background: #5a6268; }
327
- .alert {
328
- padding: 15px;
329
- border-radius: 8px;
330
- margin-bottom: 20px;
331
- }
332
- .alert-success { background: #d4edda; color: #155724; border: 1px solid #c3e6cb; }
333
- .alert-info { background: #cce7ff; color: #004085; border: 1px solid #b3d7ff; }
334
- .alert-warning { background: #fff3cd; color: #856404; border: 1px solid #ffeaa7; }
335
- .meta-info {
336
- display: flex;
337
- gap: 20px;
338
- margin-bottom: 15px;
339
- font-size: 0.9em;
340
- color: #666;
341
- }
342
- .meta-item { display: flex; align-items: center; gap: 5px; }
343
- .auto-refresh {
344
- position: fixed;
345
- top: 20px;
346
- right: 20px;
347
- background: rgba(255,255,255,0.9);
348
- padding: 10px 15px;
349
- border-radius: 25px;
350
- box-shadow: 0 5px 15px rgba(0,0,0,0.1);
351
- }
352
- .loading {
353
- display: inline-block;
354
- width: 20px;
355
- height: 20px;
356
- border: 3px solid #f3f3f3;
357
- border-top: 3px solid #667eea;
358
- border-radius: 50%;
359
- animation: spin 1s linear infinite;
360
- }
361
- @keyframes spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } }
362
- .no-requests {
363
- text-align: center;
364
- padding: 60px 20px;
365
- color: #666;
366
- }
367
- .no-requests h3 { margin-bottom: 15px; }
368
- .footer {
369
- background: #f8f9fa;
370
- padding: 20px;
371
- text-align: center;
372
- border-top: 1px solid #e0e0e0;
373
- color: #666;
374
- }
375
- </style>
376
- </head>
377
- <body>
378
- <div class="auto-refresh">
379
- <label>
380
- <input type="checkbox" id="autoRefresh" checked> Auto-refresh (10s)
381
- </label>
382
- <span id="refreshStatus"></span>
383
- </div>
384
-
385
- <div class="container">
386
- <div class="header">
387
- <h1>🧑‍⚕️ Human Expert Interface</h1>
388
- <p>ToolUniverse Expert Consultation System</p>
389
- </div>
390
-
391
- <div class="content">
392
- <!-- Status Messages -->
393
- {% if message %}
394
- <div class="alert alert-{{ message_type }}">{{ message }}</div>
395
- {% endif %}
396
-
397
- <!-- Statistics Dashboard -->
398
- <div class="stats-grid">
399
- <div class="stat-card">
400
- <h3>{{ stats.pending_requests }}</h3>
401
- <p>Pending Requests</p>
402
- </div>
403
- <div class="stat-card">
404
- <h3>{{ stats.total_requests }}</h3>
405
- <p>Total Requests</p>
406
- </div>
407
- <div class="stat-card">
408
- <h3>{{ stats.completed_responses }}</h3>
409
- <p>Completed</p>
410
- </div>
411
- <div class="stat-card">
412
- <h3>{{ stats.response_rate }}%</h3>
413
- <p>Response Rate</p>
414
- </div>
415
- </div>
416
-
417
- <!-- Expert Info -->
418
- <div class="alert alert-info">
419
- <strong>👨‍⚕️ Expert:</strong> {{ expert_info.name }} |
420
- <strong>🎯 Specialties:</strong> {{ expert_info.specialties | join(', ') }} |
421
- <strong>🟢 Status:</strong> {{ 'Available' if expert_info.availability else 'Unavailable' }}
422
- </div>
423
-
424
- <!-- Pending Requests -->
425
- <div class="requests-section">
426
- <h2>📋 Pending Consultation Requests</h2>
427
-
428
- {% if pending_requests %}
429
- {% for req in pending_requests %}
430
- <div class="request-card">
431
- <div class="request-header">
432
- <div>
433
- <strong>Request #{{ req.request_id }}</strong>
434
- <span class="badge {{ req.priority }}">{{ req.priority }}</span>
435
- </div>
436
- <div>{{ req.specialty | title }}</div>
437
- </div>
438
-
439
- <div class="request-content">
440
- <div class="meta-info">
441
- <div class="meta-item">⏰ {{ req.age_minutes }} minutes ago</div>
442
- <div class="meta-item">📅 {{ req.timestamp }}</div>
443
- <div class="meta-item">🎯 {{ req.specialty }}</div>
444
- </div>
445
-
446
- <div class="request-question">
447
- <strong>❓ Question:</strong><br>
448
- {{ req.question }}
449
- </div>
450
-
451
- {% if req.context %}
452
- <div style="background: #f0f8ff; padding: 15px; border-radius: 8px; margin-bottom: 15px; border-left: 4px solid #4dabf7;">
453
- <strong>📋 Context:</strong><br>
454
- {{ req.context }}
455
- </div>
456
- {% endif %}
457
-
458
- <form class="response-form" method="POST" action="/submit_response">
459
- <input type="hidden" name="request_id" value="{{ req.request_id }}">
460
- <textarea name="response" class="response-textarea"
461
- placeholder="Enter your expert response and recommendations..." required></textarea>
462
- <div style="margin-top: 15px;">
463
- <button type="submit" class="btn btn-primary">✅ Submit Expert Response</button>
464
- <button type="button" class="btn btn-secondary" onclick="markAsReviewed('{{ req.request_id }}')">
465
- 👁️ Mark as Reviewed
466
- </button>
467
- </div>
468
- </form>
469
- </div>
470
- </div>
471
- {% endfor %}
472
- {% else %}
473
- <div class="no-requests">
474
- <h3>🎉 No Pending Requests</h3>
475
- <p>All consultation requests have been handled. New requests will appear here automatically.</p>
476
- </div>
477
- {% endif %}
478
- </div>
479
- </div>
480
-
481
- <div class="footer">
482
- <p>🧑‍⚕️ Human Expert Medical Consultation System | Last updated: <span id="lastUpdate">{{ current_time }}</span></p>
483
- </div>
484
- </div>
485
-
486
- <script>
487
- let autoRefreshInterval;
488
- const autoRefreshCheckbox = document.getElementById('autoRefresh');
489
- const refreshStatus = document.getElementById('refreshStatus');
490
- const lastUpdateSpan = document.getElementById('lastUpdate');
491
-
492
- function updateLastUpdate() {
493
- lastUpdateSpan.textContent = new Date().toLocaleString();
494
- }
495
-
496
- function refreshPage() {
497
- refreshStatus.innerHTML = '<span class="loading"></span>';
498
- setTimeout(() => {
499
- window.location.reload();
500
- }, 500);
501
- }
502
-
503
- function startAutoRefresh() {
504
- if (autoRefreshInterval) clearInterval(autoRefreshInterval);
505
- autoRefreshInterval = setInterval(refreshPage, 10000); // 10 seconds
506
- }
507
-
508
- function stopAutoRefresh() {
509
- if (autoRefreshInterval) {
510
- clearInterval(autoRefreshInterval);
511
- autoRefreshInterval = null;
512
- }
513
- refreshStatus.textContent = '';
514
- }
515
-
516
- autoRefreshCheckbox.addEventListener('change', function() {
517
- if (this.checked) {
518
- startAutoRefresh();
519
- } else {
520
- stopAutoRefresh();
521
- }
522
- });
523
-
524
- function markAsReviewed(requestId) {
525
- if (confirm('Mark this request as reviewed? This will not submit a response.')) {
526
- // Could implement a "reviewed but not responded" status
527
- console.log('Marked as reviewed:', requestId);
528
- }
529
- }
530
-
531
- // Start auto-refresh by default
532
- startAutoRefresh();
533
-
534
- // Update timestamp periodically
535
- setInterval(updateLastUpdate, 1000);
536
- </script>
537
- </body>
538
- </html>
539
- """
540
-
541
- # Flask web routes
542
- if FLASK_AVAILABLE and web_app is not None:
543
-
544
- @web_app.route("/")
545
- def expert_dashboard():
546
- """Main expert dashboard"""
547
- try:
548
- # Try to get data from running MCP server first
549
- pending = []
550
- stats = {
551
- "pending_requests": 0,
552
- "total_requests": 0,
553
- "completed_responses": 0,
554
- "response_rate": 0.0,
555
- }
556
-
557
- try:
558
- import requests as http_requests
559
-
560
- # Get status from MCP server
561
- payload = {
562
- "jsonrpc": "2.0",
563
- "id": "web-status",
564
- "method": "tools/call",
565
- "params": {"name": "get_expert_status", "arguments": {}},
566
- }
567
-
568
- headers = {
569
- "Content-Type": "application/json",
570
- "Accept": "application/json, text/event-stream",
571
- }
572
-
573
- response = http_requests.post(
574
- "http://localhost:7002/mcp",
575
- json=payload,
576
- headers=headers,
577
- timeout=5,
578
- )
579
-
580
- if response.status_code == 200:
581
- # Parse SSE response
582
- response_text = response.text
583
- if "data: " in response_text:
584
- # Extract JSON from SSE format
585
- json_part = response_text.split("data: ")[1].split("\n")[0]
586
- import json
587
-
588
- mcp_response = json.loads(json_part)
589
-
590
- if (
591
- "result" in mcp_response
592
- and "content" in mcp_response["result"]
593
- ):
594
- content = mcp_response["result"]["content"]
595
- if content and len(content) > 0 and "text" in content[0]:
596
- status_data = json.loads(content[0]["text"])
597
- if "statistics" in status_data:
598
- stats = status_data["statistics"]
599
-
600
- # Get pending requests from MCP server
601
- payload["params"]["name"] = "list_pending_expert_requests"
602
- response = http_requests.post(
603
- "http://localhost:7002/mcp",
604
- json=payload,
605
- headers=headers,
606
- timeout=5,
607
- )
608
-
609
- if response.status_code == 200:
610
- response_text = response.text
611
- if "data: " in response_text:
612
- json_part = response_text.split("data: ")[1].split("\n")[0]
613
- mcp_response = json.loads(json_part)
614
-
615
- if (
616
- "result" in mcp_response
617
- and "content" in mcp_response["result"]
618
- ):
619
- content = mcp_response["result"]["content"]
620
- if content and len(content) > 0 and "text" in content[0]:
621
- result_data = json.loads(content[0]["text"])
622
- if "pending_requests" in result_data:
623
- pending = result_data["pending_requests"]
624
-
625
- except Exception as e:
626
- print(f"Warning: Could not connect to MCP server: {e}")
627
- # Fallback to local data
628
- pending = expert_system.get_pending_requests()
629
-
630
- with expert_system.lock:
631
- total_responses = len(expert_system.responses)
632
- total_requests = len(expert_system.request_status)
633
-
634
- stats = {
635
- "pending_requests": len(pending),
636
- "total_requests": total_requests,
637
- "completed_responses": total_responses,
638
- "response_rate": round(
639
- total_responses / max(total_requests, 1) * 100, 1
640
- ),
641
- }
642
-
643
- # Format requests for display
644
- formatted_requests = []
645
- for req in pending:
646
- if isinstance(req, dict):
647
- # Handle both MCP server format and local format
648
- if "request_id" in req:
649
- # MCP server format
650
- formatted_req = req.copy()
651
- formatted_req["context"] = req.get("context", "")
652
- else:
653
- # Local format
654
- age_seconds = (
655
- datetime.now() - datetime.fromisoformat(req["timestamp"])
656
- ).total_seconds()
657
- formatted_req = {
658
- "request_id": req["id"],
659
- "question": req["question"],
660
- "specialty": req.get("context", {}).get(
661
- "specialty", "general"
662
- ),
663
- "priority": req.get("context", {}).get(
664
- "priority", "normal"
665
- ),
666
- "age_minutes": round(age_seconds / 60, 1),
667
- "timestamp": req["timestamp"],
668
- "context": req.get("context", {}).get("context", ""),
669
- }
670
- formatted_requests.append(formatted_req)
671
-
672
- return render_template_string(
673
- WEB_INTERFACE_TEMPLATE,
674
- pending_requests=formatted_requests,
675
- stats=stats,
676
- expert_info=expert_system.expert_info,
677
- current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
678
- message=request.args.get("message"),
679
- message_type=request.args.get("message_type", "info"),
680
- )
681
-
682
- except Exception as e:
683
- error_msg = f"Error loading dashboard: {str(e)}"
684
- return render_template_string(
685
- WEB_INTERFACE_TEMPLATE,
686
- pending_requests=[],
687
- stats={
688
- "pending_requests": 0,
689
- "total_requests": 0,
690
- "completed_responses": 0,
691
- "response_rate": 0,
692
- },
693
- expert_info=expert_system.expert_info,
694
- current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
695
- message=error_msg,
696
- message_type="warning",
697
- )
698
-
699
- @web_app.route("/submit_response", methods=["POST"])
700
- def submit_expert_response_web():
701
- """Handle expert response submission from web interface"""
702
- try:
703
- request_id = request.form.get("request_id")
704
- response_text = request.form.get("response")
705
-
706
- if not request_id or not response_text:
707
- return redirect(
708
- url_for(
709
- "expert_dashboard",
710
- message="Missing request ID or response text",
711
- message_type="warning",
712
- )
713
- )
714
-
715
- # Try to submit response through MCP server first
716
- success = False
717
- try:
718
- import requests as http_requests
719
-
720
- payload = {
721
- "jsonrpc": "2.0",
722
- "id": "web-submit-response",
723
- "method": "tools/call",
724
- "params": {
725
- "name": "submit_expert_response",
726
- "arguments": {
727
- "request_id": request_id,
728
- "response": response_text.strip(),
729
- },
730
- },
731
- }
732
-
733
- headers = {
734
- "Content-Type": "application/json",
735
- "Accept": "application/json, text/event-stream",
736
- }
737
-
738
- response = http_requests.post(
739
- "http://localhost:7002/mcp",
740
- json=payload,
741
- headers=headers,
742
- timeout=10,
743
- )
744
-
745
- if response.status_code == 200:
746
- # Parse SSE response
747
- response_text_response = response.text
748
- if "data: " in response_text_response:
749
- # Extract JSON from SSE format
750
- json_part = response_text_response.split("data: ")[1].split(
751
- "\n"
752
- )[0]
753
- import json
754
-
755
- mcp_response = json.loads(json_part)
756
-
757
- if (
758
- "result" in mcp_response
759
- and "content" in mcp_response["result"]
760
- ):
761
- content = mcp_response["result"]["content"]
762
- if content and len(content) > 0 and "text" in content[0]:
763
- result_data = json.loads(content[0]["text"])
764
- if result_data.get("status") == "success":
765
- success = True
766
-
767
- except Exception as e:
768
- print(f"Warning: Could not submit response through MCP server: {e}")
769
- # Fallback to local submission
770
- success = expert_system.submit_response(
771
- request_id, response_text.strip()
772
- )
773
-
774
- if success:
775
- return redirect(
776
- url_for(
777
- "expert_dashboard",
778
- message=f"Expert response submitted successfully for request {request_id}",
779
- message_type="success",
780
- )
781
- )
782
- else:
783
- return redirect(
784
- url_for(
785
- "expert_dashboard",
786
- message=f"Failed to submit response. Request {request_id} may not exist.",
787
- message_type="warning",
788
- )
789
- )
790
-
791
- except Exception as e:
792
- return redirect(
793
- url_for(
794
- "expert_dashboard",
795
- message=f"Error submitting response: {str(e)}",
796
- message_type="warning",
797
- )
798
- )
799
-
800
- @web_app.route("/api/status")
801
- def api_status():
802
- """API endpoint for status information"""
803
- try:
804
- pending = expert_system.get_pending_requests()
805
-
806
- with expert_system.lock:
807
- total_responses = len(expert_system.responses)
808
- total_requests = len(expert_system.request_status)
809
-
810
- return jsonify(
811
- {
812
- "status": "active",
813
- "expert_info": expert_system.expert_info,
814
- "statistics": {
815
- "pending_requests": len(pending),
816
- "total_requests": total_requests,
817
- "completed_responses": total_responses,
818
- "response_rate": round(
819
- total_responses / max(total_requests, 1) * 100, 1
820
- ),
821
- },
822
- "system_time": datetime.now().isoformat(),
823
- }
824
- )
825
-
826
- except Exception as e:
827
- return jsonify({"error": str(e)}), 500
828
-
829
- @web_app.route("/api/requests")
830
- def api_requests():
831
- """API endpoint for pending requests"""
832
- try:
833
- # Check if we're running in web-only mode (need to call MCP server)
834
- if (
835
- not hasattr(expert_system, "request_queue")
836
- or expert_system.request_queue.empty()
837
- ):
838
- # Try to get data from running MCP server
839
- try:
840
- import requests as http_requests
841
-
842
- payload = {
843
- "jsonrpc": "2.0",
844
- "id": "web-api-requests",
845
- "method": "tools/call",
846
- "params": {
847
- "name": "list_pending_expert_requests",
848
- "arguments": {},
849
- },
850
- }
851
-
852
- headers = {
853
- "Content-Type": "application/json",
854
- "Accept": "application/json, text/event-stream",
855
- }
856
-
857
- response = http_requests.post(
858
- "http://localhost:7002/mcp",
859
- json=payload,
860
- headers=headers,
861
- timeout=5,
862
- )
863
-
864
- if response.status_code == 200:
865
- # Parse SSE response
866
- response_text = response.text
867
- if "data: " in response_text:
868
- # Extract JSON from SSE format
869
- json_part = response_text.split("data: ")[1].split("\n")[0]
870
- import json
871
-
872
- mcp_response = json.loads(json_part)
873
-
874
- if (
875
- "result" in mcp_response
876
- and "content" in mcp_response["result"]
877
- ):
878
- content = mcp_response["result"]["content"]
879
- if (
880
- content
881
- and len(content) > 0
882
- and "text" in content[0]
883
- ):
884
- result_data = json.loads(content[0]["text"])
885
- if "pending_requests" in result_data:
886
- return {
887
- "requests": result_data["pending_requests"]
888
- }
889
-
890
- except Exception as e:
891
- print(f"Warning: Could not connect to MCP server: {e}")
892
-
893
- # Fallback to local data (for integrated mode)
894
- pending = expert_system.get_pending_requests()
895
-
896
- formatted_requests = []
897
- for req in pending:
898
- age_seconds = (
899
- datetime.now() - datetime.fromisoformat(req["timestamp"])
900
- ).total_seconds()
901
- formatted_req = {
902
- "request_id": req["id"],
903
- "question": req["question"],
904
- "specialty": req.get("context", {}).get("specialty", "general"),
905
- "priority": req.get("context", {}).get("priority", "normal"),
906
- "age_minutes": round(age_seconds / 60, 1),
907
- "timestamp": req["timestamp"],
908
- "context": req.get("context", {}),
909
- }
910
- formatted_requests.append(formatted_req)
911
-
912
- return jsonify({"requests": formatted_requests})
913
-
914
- except Exception as e:
915
- return jsonify({"error": str(e)}), 500
916
-
917
-
918
- # =============================================================================
919
- # 🔧 BACKGROUND MONITORING THREAD
920
- # =============================================================================
921
- def start_monitoring_thread():
922
- """Start background thread to display pending requests"""
923
-
924
- def monitor():
925
- last_check = time.time()
926
- last_count = 0
927
-
928
- while True:
929
- try:
930
- current_time = time.time()
931
- if current_time - last_check >= 30: # Check every 30 seconds
932
- pending = expert_system.get_pending_requests()
933
- current_count = len(pending)
934
-
935
- if pending:
936
- print(f"\n{'='*80}")
937
- print(
938
- f"⏰ PENDING REQUESTS CHECK ({datetime.now().strftime('%H:%M:%S')})"
939
- )
940
- print(
941
- f"📊 {current_count} request(s) waiting for expert response"
942
- )
943
-
944
- # Show alert if new requests arrived
945
- if current_count > last_count:
946
- new_requests = current_count - last_count
947
- print(f"🔔 {new_requests} NEW REQUEST(S) ARRIVED!")
948
- print("🌐 Web Interface: http://localhost:8080")
949
- # Audio alert for new requests
950
- try:
951
- print("\a") # System beep
952
- except Exception:
953
- pass
954
-
955
- # Show details for recent requests
956
- for i, req in enumerate(
957
- pending[-3:], 1
958
- ): # Show last 3 requests
959
- age = (
960
- datetime.now()
961
- - datetime.fromisoformat(req["timestamp"])
962
- ).total_seconds()
963
- priority = req.get("context", {}).get("priority", "normal")
964
- specialty = req.get("context", {}).get(
965
- "specialty", "general"
966
- )
967
-
968
- print(f" {i}. [{req['id']}] 🎯{specialty} ⚡{priority}")
969
- print(
970
- f" 📝 {req['question'][:80]}{'...' if len(req['question']) > 80 else ''}"
971
- )
972
- print(f" ⏰ Waiting {age:.0f}s")
973
-
974
- print(f"{'='*80}")
975
-
976
- last_check = current_time
977
- last_count = current_count
978
-
979
- time.sleep(5)
980
- except Exception as e:
981
- print(f"Monitoring error: {e}")
982
- time.sleep(10)
983
-
984
- monitor_thread = threading.Thread(target=monitor, daemon=True)
985
- monitor_thread.start()
986
-
987
-
988
- # =============================================================================
989
- # 🔧 EXPERT TOOLS - FOR HUMAN INTERACTION
990
- # =============================================================================
991
-
992
-
993
- @server.tool()
994
- async def consult_human_expert(
995
- question: str,
996
- specialty: str = "general",
997
- priority: str = "normal",
998
- context: str = "",
999
- timeout_minutes: int = 5,
1000
- ):
1001
- """
1002
- Consult a human expert for complex medical questions requiring human judgment.
1003
-
1004
- This tool submits questions to human medical experts who can provide:
1005
- - Clinical decision support
1006
- - Drug interaction analysis validation
1007
- - Treatment recommendation review
1008
- - Complex case interpretation
1009
- - Quality assurance for AI recommendations
1010
-
1011
- Args:
1012
- question: The medical question or case requiring expert consultation
1013
- specialty: Area of expertise needed (e.g., "cardiology", "oncology", "pharmacology")
1014
- priority: Request priority ("low", "normal", "high", "urgent")
1015
- context: Additional context or background information
1016
- timeout_minutes: How long to wait for expert response (default: 5 minutes)
1017
-
1018
- Returns:
1019
- Expert response with clinical recommendations and professional judgment
1020
- """
1021
-
1022
- request_id = str(uuid.uuid4())[:8]
1023
- timeout_seconds = timeout_minutes * 60
1024
-
1025
- print(f"\n🔔 EXPERT CONSULTATION REQUEST [{request_id}]")
1026
- print(f"🎯 Specialty: {specialty}")
1027
- print(f"⚡ Priority: {priority}")
1028
- print(f"⏱️ Timeout: {timeout_minutes} minutes")
1029
-
1030
- try:
1031
- # Submit request to expert system
1032
- context_data = {
1033
- "specialty": specialty,
1034
- "priority": priority,
1035
- "context": context,
1036
- }
1037
-
1038
- expert_system.submit_request(request_id, question, context_data)
1039
-
1040
- # Wait for expert response
1041
- print(f"⏳ Waiting for expert response (max {timeout_minutes} minutes)...")
1042
-
1043
- # Use asyncio-compatible waiting
1044
- loop = asyncio.get_running_loop()
1045
-
1046
- def wait_for_response():
1047
- return expert_system.get_response(request_id, timeout_seconds)
1048
-
1049
- response_data = await loop.run_in_executor(executor, wait_for_response)
1050
-
1051
- if response_data:
1052
- return {
1053
- "status": "completed",
1054
- "expert_response": response_data["response"],
1055
- "expert_name": response_data["expert"],
1056
- "response_time": response_data["timestamp"],
1057
- "request_id": request_id,
1058
- "specialty": specialty,
1059
- "priority": priority,
1060
- }
1061
- else:
1062
- return {
1063
- "status": "timeout",
1064
- "message": f"No expert response received within {timeout_minutes} minutes",
1065
- "request_id": request_id,
1066
- "note": "Request may still be processed. Check with get_expert_response tool later.",
1067
- }
1068
-
1069
- except Exception as e:
1070
- print(f"❌ Expert consultation failed: {str(e)}")
1071
- return {
1072
- "status": "error",
1073
- "error": f"Expert consultation failed: {str(e)}",
1074
- "request_id": request_id,
1075
- }
1076
-
1077
-
1078
- @server.tool()
1079
- async def get_expert_response(request_id: str):
1080
- """
1081
- Check if an expert response is available for a previous request.
1082
-
1083
- Args:
1084
- request_id: The ID of the expert consultation request
1085
-
1086
- Returns:
1087
- Expert response if available, or status update
1088
- """
1089
-
1090
- try:
1091
- with expert_system.lock:
1092
- if request_id in expert_system.responses:
1093
- response_data = expert_system.responses[request_id]
1094
- return {
1095
- "status": "completed",
1096
- "expert_response": response_data["response"],
1097
- "expert_name": response_data["expert"],
1098
- "response_time": response_data["timestamp"],
1099
- "request_id": request_id,
1100
- }
1101
- elif request_id in expert_system.request_status:
1102
- status = expert_system.request_status[request_id]
1103
- return {
1104
- "status": status,
1105
- "message": f"Request {request_id} is {status}",
1106
- "request_id": request_id,
1107
- }
1108
- else:
1109
- return {
1110
- "status": "not_found",
1111
- "message": f"Request {request_id} not found",
1112
- "request_id": request_id,
1113
- }
1114
-
1115
- except Exception as e:
1116
- return {
1117
- "status": "error",
1118
- "error": f"Failed to check expert response: {str(e)}",
1119
- "request_id": request_id,
1120
- }
1121
-
1122
-
1123
- @server.tool()
1124
- async def list_pending_expert_requests():
1125
- """
1126
- List all pending expert consultation requests (for expert use).
1127
-
1128
- Returns:
1129
- List of all pending requests waiting for expert response
1130
- """
1131
-
1132
- try:
1133
- pending = expert_system.get_pending_requests()
1134
-
1135
- if not pending:
1136
- return {
1137
- "status": "no_requests",
1138
- "message": "No pending expert requests",
1139
- "count": 0,
1140
- }
1141
-
1142
- requests_summary = []
1143
- for req in pending:
1144
- age_seconds = (
1145
- datetime.now() - datetime.fromisoformat(req["timestamp"])
1146
- ).total_seconds()
1147
- requests_summary.append(
1148
- {
1149
- "request_id": req["id"],
1150
- "question": req["question"],
1151
- "specialty": req.get("context", {}).get("specialty", "general"),
1152
- "priority": req.get("context", {}).get("priority", "normal"),
1153
- "age_minutes": round(age_seconds / 60, 1),
1154
- "timestamp": req["timestamp"],
1155
- }
1156
- )
1157
-
1158
- return {
1159
- "status": "success",
1160
- "pending_requests": requests_summary,
1161
- "count": len(requests_summary),
1162
- "expert_info": expert_system.expert_info,
1163
- }
1164
-
1165
- except Exception as e:
1166
- return {
1167
- "status": "error",
1168
- "error": f"Failed to list pending requests: {str(e)}",
1169
- }
1170
-
1171
-
1172
- @server.tool()
1173
- async def submit_expert_response(request_id: str, response: str):
1174
- """
1175
- Submit expert response to a consultation request (for expert use).
1176
-
1177
- Args:
1178
- request_id: The ID of the request to respond to
1179
- response: The expert's response and recommendations
1180
-
1181
- Returns:
1182
- Confirmation of response submission
1183
- """
1184
-
1185
- try:
1186
- success = expert_system.submit_response(request_id, response)
1187
-
1188
- if success:
1189
- return {
1190
- "status": "success",
1191
- "message": f"Expert response submitted for request {request_id}",
1192
- "request_id": request_id,
1193
- "expert": expert_system.expert_info["name"],
1194
- "timestamp": datetime.now().isoformat(),
1195
- }
1196
- else:
1197
- return {
1198
- "status": "failed",
1199
- "message": f"Request {request_id} not found or already completed",
1200
- "request_id": request_id,
1201
- }
1202
-
1203
- except Exception as e:
1204
- return {
1205
- "status": "error",
1206
- "error": f"Failed to submit expert response: {str(e)}",
1207
- "request_id": request_id,
1208
- }
1209
-
1210
-
1211
- @server.tool()
1212
- async def get_expert_status():
1213
- """
1214
- Get current expert system status and statistics.
1215
-
1216
- Returns:
1217
- Current status of the expert system including pending requests and expert info
1218
- """
1219
-
1220
- try:
1221
- pending = expert_system.get_pending_requests()
1222
-
1223
- with expert_system.lock:
1224
- total_responses = len(expert_system.responses)
1225
- total_requests = len(expert_system.request_status)
1226
-
1227
- return {
1228
- "status": "active",
1229
- "expert_info": expert_system.expert_info,
1230
- "statistics": {
1231
- "pending_requests": len(pending),
1232
- "total_requests": total_requests,
1233
- "completed_responses": total_responses,
1234
- "response_rate": round(
1235
- total_responses / max(total_requests, 1) * 100, 1
1236
- ),
1237
- },
1238
- "system_time": datetime.now().isoformat(),
1239
- }
1240
-
1241
- except Exception as e:
1242
- return {"status": "error", "error": f"Failed to get expert status: {str(e)}"}
1243
-
1244
-
1245
- # =============================================================================
1246
- # 🧑‍⚕️ EXPERT INTERFACE CLASS
1247
- # =============================================================================
1248
-
1249
-
1250
- class ExpertInterface:
1251
- def __init__(self, server_url="http://localhost:7002"):
1252
- self.server_url = server_url
1253
- self.expert_name = "Medical Expert"
1254
-
1255
- def call_tool(self, tool_name, **kwargs):
1256
- """Call MCP tool via HTTP"""
1257
- try:
1258
- response = requests.post(
1259
- f"{self.server_url}/tools/{tool_name}", json=kwargs, timeout=10
1260
- )
1261
- return response.json()
1262
- except Exception as e:
1263
- return {"error": f"Failed to call tool: {str(e)}"}
1264
-
1265
- def list_pending_requests(self):
1266
- """List all pending expert requests"""
1267
- print("\n🔍 Checking for pending requests...")
1268
- result = self.call_tool("list_pending_expert_requests")
1269
-
1270
- if result.get("status") == "no_requests":
1271
- print("✅ No pending requests")
1272
- return []
1273
- elif result.get("status") == "success":
1274
- requests_list = result.get("pending_requests", [])
1275
- print(f"\n📋 Found {len(requests_list)} pending request(s):")
1276
- print("=" * 80)
1277
-
1278
- for i, req in enumerate(requests_list, 1):
1279
- print(f"\n{i}. REQUEST ID: {req['request_id']}")
1280
- print(f" 🎯 Specialty: {req['specialty']}")
1281
- print(f" ⚡ Priority: {req['priority']}")
1282
- print(f" ⏱️ Age: {req['age_minutes']} minutes")
1283
- print(f" 📝 Question: {req['question']}")
1284
- print("-" * 60)
1285
-
1286
- return requests_list
1287
- else:
1288
- print(f"❌ Error: {result.get('error', 'Unknown error')}")
1289
- return []
1290
-
1291
- def submit_response(self, request_id, response):
1292
- """Submit expert response"""
1293
- print(f"\n📤 Submitting response for request {request_id}...")
1294
-
1295
- result = self.call_tool(
1296
- "submit_expert_response", request_id=request_id, response=response
1297
- )
1298
-
1299
- if result.get("status") == "success":
1300
- print("✅ Response submitted successfully!")
1301
- print(f" 📝 Request ID: {request_id}")
1302
- print(f" 👨‍⚕️ Expert: {result.get('expert')}")
1303
- print(f" ⏰ Time: {result.get('timestamp')}")
1304
- else:
1305
- print(
1306
- f"❌ Failed to submit response: {result.get('message', 'Unknown error')}"
1307
- )
1308
-
1309
- def get_status(self):
1310
- """Get system status"""
1311
- result = self.call_tool("get_expert_status")
1312
-
1313
- if result.get("status") == "active":
1314
- stats = result.get("statistics", {})
1315
- expert_info = result.get("expert_info", {})
1316
-
1317
- print("\n📊 EXPERT SYSTEM STATUS")
1318
- print("=" * 50)
1319
- print(f"👨‍⚕️ Expert: {expert_info.get('name', 'Unknown')}")
1320
- print(f"🎯 Specialties: {', '.join(expert_info.get('specialties', []))}")
1321
- print(
1322
- f"🟢 Status: {'Available' if expert_info.get('availability') else 'Unavailable'}"
1323
- )
1324
- print("\n📈 STATISTICS")
1325
- print(f"⏳ Pending requests: {stats.get('pending_requests', 0)}")
1326
- print(f"📊 Total requests: {stats.get('total_requests', 0)}")
1327
- print(f"✅ Completed responses: {stats.get('completed_responses', 0)}")
1328
- print(f"📈 Response rate: {stats.get('response_rate', 0)}%")
1329
- print(f"⏰ System time: {result.get('system_time')}")
1330
- else:
1331
- print(f"❌ System error: {result.get('error', 'Unknown error')}")
1332
-
1333
- def interactive_mode(self):
1334
- """Run interactive expert interface"""
1335
- print("🧑‍⚕️ HUMAN EXPERT INTERFACE")
1336
- print("=" * 50)
1337
- print("Commands:")
1338
- print(" 1 - List pending requests")
1339
- print(" 2 - Submit response")
1340
- print(" 3 - Get system status")
1341
- print(" 4 - Auto-monitor mode")
1342
- print(" q - Quit")
1343
- print("=" * 50)
1344
-
1345
- while True:
1346
- try:
1347
- command = input("\n💬 Enter command (1-4, q): ").strip().lower()
1348
-
1349
- if command == "q":
1350
- print("👋 Goodbye!")
1351
- break
1352
- elif command == "1":
1353
- self.list_pending_requests()
1354
- elif command == "2":
1355
- self.handle_response_submission()
1356
- elif command == "3":
1357
- self.get_status()
1358
- elif command == "4":
1359
- self.auto_monitor_mode()
1360
- else:
1361
- print("❌ Invalid command. Please enter 1-4 or q.")
1362
-
1363
- except KeyboardInterrupt:
1364
- print("\n👋 Goodbye!")
1365
- break
1366
- except Exception as e:
1367
- print(f"❌ Error: {str(e)}")
1368
-
1369
- def handle_response_submission(self):
1370
- """Handle expert response submission"""
1371
- # First, list pending requests
1372
- requests_list = self.list_pending_requests()
1373
-
1374
- if not requests_list:
1375
- return
1376
-
1377
- try:
1378
- # Get request selection
1379
- while True:
1380
- selection = input(
1381
- f"\n🎯 Select request number (1-{len(requests_list)}) or 'c' to cancel: "
1382
- ).strip()
1383
-
1384
- if selection.lower() == "c":
1385
- return
1386
-
1387
- try:
1388
- index = int(selection) - 1
1389
- if 0 <= index < len(requests_list):
1390
- selected_request = requests_list[index]
1391
- break
1392
- else:
1393
- print(
1394
- f"❌ Please enter a number between 1 and {len(requests_list)}"
1395
- )
1396
- except ValueError:
1397
- print("❌ Please enter a valid number")
1398
-
1399
- # Display selected request details
1400
- print(f"\n📝 RESPONDING TO REQUEST: {selected_request['request_id']}")
1401
- print(f"🎯 Specialty: {selected_request['specialty']}")
1402
- print(f"⚡ Priority: {selected_request['priority']}")
1403
- print(f"❓ Question: {selected_request['question']}")
1404
-
1405
- # Get expert response
1406
- print("\n✍️ Enter your expert response (press Enter twice to finish):")
1407
- response_lines = []
1408
- empty_lines = 0
1409
-
1410
- while empty_lines < 2:
1411
- line = input()
1412
- if line.strip() == "":
1413
- empty_lines += 1
1414
- else:
1415
- empty_lines = 0
1416
- response_lines.append(line)
1417
-
1418
- # Remove trailing empty lines
1419
- while response_lines and response_lines[-1].strip() == "":
1420
- response_lines.pop()
1421
-
1422
- response = "\n".join(response_lines)
1423
-
1424
- if response.strip():
1425
- # Confirm submission
1426
- print("\n📋 RESPONSE PREVIEW:")
1427
- print("-" * 40)
1428
- print(response)
1429
- print("-" * 40)
1430
-
1431
- confirm = input("\n🤔 Submit this response? (y/n): ").strip().lower()
1432
-
1433
- if confirm == "y":
1434
- self.submit_response(selected_request["request_id"], response)
1435
- else:
1436
- print("❌ Response cancelled")
1437
- else:
1438
- print("❌ Empty response cancelled")
1439
-
1440
- except Exception as e:
1441
- print(f"❌ Error handling response: {str(e)}")
1442
-
1443
- def auto_monitor_mode(self):
1444
- """Auto-monitor for new requests"""
1445
- print("\n🔄 AUTO-MONITOR MODE")
1446
- print("Checking for new requests every 10 seconds...")
1447
- print("Press Ctrl+C to return to main menu")
1448
-
1449
- last_count = 0
1450
-
1451
- try:
1452
- while True:
1453
- result = self.call_tool("list_pending_expert_requests")
1454
-
1455
- if result.get("status") == "success":
1456
- current_count = result.get("count", 0)
1457
-
1458
- if current_count != last_count:
1459
- if current_count > last_count:
1460
- print(
1461
- f"\n🔔 NEW REQUEST(S) DETECTED! Total pending: {current_count}"
1462
- )
1463
- self.list_pending_requests()
1464
- else:
1465
- print(
1466
- f"\n✅ Request(s) completed. Remaining: {current_count}"
1467
- )
1468
-
1469
- last_count = current_count
1470
- else:
1471
- print(".", end="", flush=True)
1472
-
1473
- time.sleep(10)
1474
-
1475
- except KeyboardInterrupt:
1476
- print("\n🔄 Returning to main menu...")
1477
-
1478
-
1479
- def run_expert_interface():
1480
- """Run the expert interface"""
1481
- print("🧑‍⚕️ Human Expert MCP Interface")
1482
- print("Connecting to server...")
1483
-
1484
- # Check if server is running
1485
- try:
1486
- interface = ExpertInterface()
1487
- result = interface.call_tool("get_expert_status")
1488
-
1489
- if "error" in result:
1490
- print("❌ Cannot connect to MCP server!")
1491
- print("Please make sure the MCP server is running.")
1492
- print("Start with: python human_expert_mcp_server.py")
1493
- return
1494
-
1495
- print("✅ Connected to expert MCP server")
1496
- interface.interactive_mode()
1497
-
1498
- except Exception as e:
1499
- print(f"❌ Error: {str(e)}")
1500
- print("Please make sure the MCP server is running on http://localhost:7002")
1501
-
1502
-
1503
- # =============================================================================
1504
- # 🌐 WEB SERVER FUNCTIONS
1505
- # =============================================================================
1506
-
1507
-
1508
- def start_web_server():
1509
- """Start the Flask web server for expert interface"""
1510
- if not FLASK_AVAILABLE:
1511
- print("❌ Flask not available. Cannot start web interface.")
1512
- print(" Install with: pip install flask")
1513
- return
1514
-
1515
- try:
1516
- print("🌐 Starting web interface on http://localhost:8080")
1517
- print("📱 Web interface will be accessible in your browser...")
1518
-
1519
- # Configure Flask to run quietly
1520
- import logging
1521
-
1522
- log = logging.getLogger("werkzeug")
1523
- log.setLevel(logging.ERROR)
1524
-
1525
- web_app.run(host="0.0.0.0", port=8080, debug=False, use_reloader=False)
1526
- except Exception as e:
1527
- print(f"❌ Web server error: {str(e)}")
1528
- if "Address already in use" in str(e):
1529
- print(" Port 8080 is already in use. Try:")
1530
- print(" - Stop other services using port 8080")
1531
- print(" - Or run: lsof -ti:8080 | xargs kill -9")
1532
-
1533
-
1534
- def open_web_interface():
1535
- """Open web interface in default browser"""
1536
- if not FLASK_AVAILABLE:
1537
- print("⚠️ Web interface not available (Flask not installed)")
1538
- return
1539
-
1540
- def open_browser():
1541
- try:
1542
- webbrowser.open("http://localhost:8080")
1543
- except Exception as e:
1544
- print(f"Could not open browser automatically: {str(e)}")
1545
- print("Please manually open: http://localhost:8080")
1546
-
1547
- # Delay browser opening to allow server to start
1548
- Timer(2.0, open_browser).start()
1549
-
1550
-
1551
- # =============================================================================
1552
- # ⚙️ SERVER STARTUP
1553
- # =============================================================================
1554
- if __name__ == "__main__":
1555
- parser = argparse.ArgumentParser(description="Human Expert MCP Server")
1556
- parser.add_argument(
1557
- "--interface-only",
1558
- action="store_true",
1559
- help="Start only the expert terminal interface (server must be running)",
1560
- )
1561
- parser.add_argument(
1562
- "--web-only",
1563
- action="store_true",
1564
- help="Start only the web interface (server must be running)",
1565
- )
1566
- parser.add_argument(
1567
- "--no-browser",
1568
- action="store_true",
1569
- help="Do not automatically open browser for web interface",
1570
- )
1571
- args = parser.parse_args()
1572
-
1573
- if args.interface_only:
1574
- # Run only the terminal expert interface
1575
- run_expert_interface()
1576
- elif args.web_only:
1577
- # Run only the web interface
1578
- if not FLASK_AVAILABLE:
1579
- print("❌ Cannot start web interface: Flask not installed")
1580
- print(" Install with: pip install flask")
1581
- sys.exit(1)
1582
-
1583
- print("🌐 Starting Human Expert Web Interface...")
1584
- if not args.no_browser:
1585
- open_web_interface()
1586
- start_web_server()
1587
- else:
1588
- # Start only the MCP server (default)
1589
- print("🧑‍⚕️ Starting Human Expert MCP Server...")
1590
- print("📋 Available tools:")
1591
- print(" - consult_human_expert: Submit questions to human experts")
1592
- print(" - get_expert_response: Check for expert responses")
1593
- print(" - list_pending_expert_requests: View pending requests (for experts)")
1594
- print(" - submit_expert_response: Submit expert responses (for experts)")
1595
- print(" - get_expert_status: Get system status")
1596
- print("\n🔄 Starting background monitoring...")
1597
-
1598
- # Start monitoring thread
1599
- start_monitoring_thread()
1600
-
1601
- print("\n🎯 Expert Interface Options:")
1602
- print(" 🌐 Web Interface: python start_web_interface.py")
1603
- print(" 💻 Terminal Interface: python start_terminal_interface.py")
1604
-
1605
- print("\n🚀 MCP Server running on http://0.0.0.0:7002")
1606
- print(
1607
- "💡 Tip: Use ToolUniverse in another terminal to send expert consultation requests"
1608
- )
1609
-
1610
- # Start server
1611
- server.run(transport="streamable-http", host="0.0.0.0", port=7002)