computer-use-ootb-internal 0.0.173__py3-none-any.whl → 0.0.175__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- computer_use_ootb_internal/app_teachmode.py +641 -636
- computer_use_ootb_internal/computer_use_demo/animation/test_animation.py +39 -39
- computer_use_ootb_internal/computer_use_demo/executor/teachmode_executor.py +6 -0
- computer_use_ootb_internal/computer_use_demo/tools/computer.py +8 -3
- computer_use_ootb_internal/guard_service.py +950 -950
- computer_use_ootb_internal/preparation/powerpoint_prepare.py +73 -90
- computer_use_ootb_internal/preparation/star_rail_prepare.py +99 -99
- computer_use_ootb_internal/run_teachmode_ootb_args.py +235 -227
- computer_use_ootb_internal/service_manager.py +194 -194
- computer_use_ootb_internal/signal_connection.py +47 -47
- computer_use_ootb_internal/test_click_0425.py +57 -57
- {computer_use_ootb_internal-0.0.173.dist-info → computer_use_ootb_internal-0.0.175.dist-info}/METADATA +8 -9
- {computer_use_ootb_internal-0.0.173.dist-info → computer_use_ootb_internal-0.0.175.dist-info}/RECORD +15 -15
- computer_use_ootb_internal-0.0.175.dist-info/entry_points.txt +2 -0
- computer_use_ootb_internal-0.0.173.dist-info/entry_points.txt +0 -4
- {computer_use_ootb_internal-0.0.173.dist-info → computer_use_ootb_internal-0.0.175.dist-info}/WHEEL +0 -0
@@ -1,637 +1,642 @@
|
|
1
|
-
import argparse
|
2
|
-
import time
|
3
|
-
import json
|
4
|
-
from datetime import datetime
|
5
|
-
import threading
|
6
|
-
import requests
|
7
|
-
import platform # Add platform import
|
8
|
-
import pyautogui # Add pyautogui import
|
9
|
-
import webbrowser # Add webbrowser import
|
10
|
-
import os # Import os for path joining
|
11
|
-
import logging # Import logging
|
12
|
-
import importlib # For dynamic imports
|
13
|
-
import pkgutil # To find modules
|
14
|
-
import sys # For logging setup
|
15
|
-
import traceback # For logging setup
|
16
|
-
from logging.handlers import RotatingFileHandler # For logging setup
|
17
|
-
from fastapi import FastAPI, Request
|
18
|
-
from fastapi.responses import JSONResponse
|
19
|
-
from fastapi.middleware.cors import CORSMiddleware
|
20
|
-
from computer_use_ootb_internal.computer_use_demo.tools.computer import get_screen_details
|
21
|
-
from computer_use_ootb_internal.run_teachmode_ootb_args import simple_teachmode_sampling_loop
|
22
|
-
from computer_use_ootb_internal.computer_use_demo.executor.teachmode_executor import TeachmodeExecutor
|
23
|
-
import uvicorn # Assuming uvicorn is used to run FastAPI
|
24
|
-
import concurrent.futures
|
25
|
-
import asyncio
|
26
|
-
|
27
|
-
# --- App Logging Setup ---
|
28
|
-
try:
|
29
|
-
# Log to user's AppData directory for better accessibility
|
30
|
-
log_dir_base = os.environ.get('APPDATA', os.path.expanduser('~'))
|
31
|
-
log_dir = os.path.join(log_dir_base, 'OOTBAppLogs')
|
32
|
-
os.makedirs(log_dir, exist_ok=True)
|
33
|
-
log_file = os.path.join(log_dir, 'ootb_app.log')
|
34
|
-
|
35
|
-
log_format = '%(asctime)s - %(levelname)s - %(process)d - %(threadName)s - %(message)s'
|
36
|
-
log_level = logging.INFO # Or logging.DEBUG for more detail
|
37
|
-
|
38
|
-
# Use rotating file handler
|
39
|
-
handler = RotatingFileHandler(log_file, maxBytes=5*1024*1024, backupCount=2, encoding='utf-8')
|
40
|
-
handler.setFormatter(logging.Formatter(log_format))
|
41
|
-
|
42
|
-
# Configure root logger
|
43
|
-
logging.basicConfig(level=log_level, handlers=[handler])
|
44
|
-
|
45
|
-
# Add stream handler to see logs if running interactively (optional)
|
46
|
-
# logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
|
47
|
-
|
48
|
-
logging.info("="*20 + " OOTB App Starting " + "="*20)
|
49
|
-
logging.info(f"Running with args: {sys.argv}")
|
50
|
-
logging.info(f"Python Executable: {sys.executable}")
|
51
|
-
logging.info(f"Working Directory: {os.getcwd()}")
|
52
|
-
logging.info(f"User: {os.getenv('USERNAME')}")
|
53
|
-
|
54
|
-
except Exception as log_setup_e:
|
55
|
-
print(f"FATAL: Failed to set up logging: {log_setup_e}")
|
56
|
-
# Fallback logging might be needed here if file logging fails
|
57
|
-
|
58
|
-
# --- End App Logging Setup ---
|
59
|
-
|
60
|
-
app = FastAPI()
|
61
|
-
|
62
|
-
# Add CORS middleware to allow requests from the frontend
|
63
|
-
app.add_middleware(
|
64
|
-
CORSMiddleware,
|
65
|
-
allow_origins=["*"],
|
66
|
-
allow_credentials=True,
|
67
|
-
allow_methods=["*"],
|
68
|
-
allow_headers=["*"],
|
69
|
-
)
|
70
|
-
|
71
|
-
# Rate limiter for API endpoints
|
72
|
-
class RateLimiter:
|
73
|
-
def __init__(self, interval_seconds=2):
|
74
|
-
self.interval = interval_seconds
|
75
|
-
self.last_request_time = {}
|
76
|
-
self.lock = threading.Lock()
|
77
|
-
|
78
|
-
def allow_request(self, endpoint):
|
79
|
-
with self.lock:
|
80
|
-
current_time = time.time()
|
81
|
-
# Priority endpoints always allowed
|
82
|
-
if endpoint in ["/update_params", "/update_message"]:
|
83
|
-
return True
|
84
|
-
|
85
|
-
# For other endpoints, apply rate limiting
|
86
|
-
if endpoint not in self.last_request_time:
|
87
|
-
self.last_request_time[endpoint] = current_time
|
88
|
-
return True
|
89
|
-
|
90
|
-
elapsed = current_time - self.last_request_time[endpoint]
|
91
|
-
if elapsed < self.interval:
|
92
|
-
return False
|
93
|
-
|
94
|
-
self.last_request_time[endpoint] = current_time
|
95
|
-
return True
|
96
|
-
|
97
|
-
|
98
|
-
def log_ootb_request(server_url, ootb_request_type, data):
|
99
|
-
logging.info(f"OOTB Request: Type={ootb_request_type}, Data={data}")
|
100
|
-
# Keep the requests post for now if it serves a specific purpose
|
101
|
-
logging_data = {
|
102
|
-
"type": ootb_request_type,
|
103
|
-
"data": data,
|
104
|
-
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
105
|
-
}
|
106
|
-
if not server_url.endswith("/update_ootb_logging"):
|
107
|
-
server_logging_url = server_url + "/update_ootb_logging"
|
108
|
-
else:
|
109
|
-
server_logging_url = server_url
|
110
|
-
try:
|
111
|
-
requests.post(server_logging_url, json=logging_data, timeout=5)
|
112
|
-
except Exception as req_log_e:
|
113
|
-
logging.warning(f"Could not log ootb request to server {server_logging_url}: {req_log_e}")
|
114
|
-
|
115
|
-
|
116
|
-
class SharedState:
|
117
|
-
def __init__(self, args):
|
118
|
-
self.args = args
|
119
|
-
self.task_updated = False
|
120
|
-
self.chatbot_messages = []
|
121
|
-
# Store all state-related data here
|
122
|
-
self.model = args.model
|
123
|
-
self.task = getattr(args, 'task', "")
|
124
|
-
self.selected_screen = args.selected_screen
|
125
|
-
self.user_id = args.user_id
|
126
|
-
self.trace_id = args.trace_id
|
127
|
-
self.api_keys = args.api_keys
|
128
|
-
self.server_url = args.server_url
|
129
|
-
self.message_queue = []
|
130
|
-
self.is_processing = False
|
131
|
-
self.should_stop = False
|
132
|
-
self.is_paused = False
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
#
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
#
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
if
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
shared_state.
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
shared_state.
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
shared_state.
|
233
|
-
shared_state.
|
234
|
-
shared_state.
|
235
|
-
shared_state.
|
236
|
-
shared_state.
|
237
|
-
shared_state.
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
shared_state.
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
shared_state.
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
#
|
339
|
-
shared_state.
|
340
|
-
shared_state.
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
)
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
results
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
)
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
shared_state
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
# Check
|
515
|
-
|
516
|
-
print(
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
shared_state.
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
shared_state.
|
563
|
-
shared_state.
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
parser
|
578
|
-
|
579
|
-
parser.add_argument("--
|
580
|
-
parser.add_argument("--
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
589
|
-
|
590
|
-
if not hasattr(args, '
|
591
|
-
if not hasattr(args, '
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
logging.
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
1
|
+
import argparse
|
2
|
+
import time
|
3
|
+
import json
|
4
|
+
from datetime import datetime
|
5
|
+
import threading
|
6
|
+
import requests
|
7
|
+
import platform # Add platform import
|
8
|
+
import pyautogui # Add pyautogui import
|
9
|
+
import webbrowser # Add webbrowser import
|
10
|
+
import os # Import os for path joining
|
11
|
+
import logging # Import logging
|
12
|
+
import importlib # For dynamic imports
|
13
|
+
import pkgutil # To find modules
|
14
|
+
import sys # For logging setup
|
15
|
+
import traceback # For logging setup
|
16
|
+
from logging.handlers import RotatingFileHandler # For logging setup
|
17
|
+
from fastapi import FastAPI, Request
|
18
|
+
from fastapi.responses import JSONResponse
|
19
|
+
from fastapi.middleware.cors import CORSMiddleware
|
20
|
+
from computer_use_ootb_internal.computer_use_demo.tools.computer import get_screen_details
|
21
|
+
from computer_use_ootb_internal.run_teachmode_ootb_args import simple_teachmode_sampling_loop
|
22
|
+
from computer_use_ootb_internal.computer_use_demo.executor.teachmode_executor import TeachmodeExecutor
|
23
|
+
import uvicorn # Assuming uvicorn is used to run FastAPI
|
24
|
+
import concurrent.futures
|
25
|
+
import asyncio
|
26
|
+
|
27
|
+
# --- App Logging Setup ---
|
28
|
+
try:
|
29
|
+
# Log to user's AppData directory for better accessibility
|
30
|
+
log_dir_base = os.environ.get('APPDATA', os.path.expanduser('~'))
|
31
|
+
log_dir = os.path.join(log_dir_base, 'OOTBAppLogs')
|
32
|
+
os.makedirs(log_dir, exist_ok=True)
|
33
|
+
log_file = os.path.join(log_dir, 'ootb_app.log')
|
34
|
+
|
35
|
+
log_format = '%(asctime)s - %(levelname)s - %(process)d - %(threadName)s - %(message)s'
|
36
|
+
log_level = logging.INFO # Or logging.DEBUG for more detail
|
37
|
+
|
38
|
+
# Use rotating file handler
|
39
|
+
handler = RotatingFileHandler(log_file, maxBytes=5*1024*1024, backupCount=2, encoding='utf-8')
|
40
|
+
handler.setFormatter(logging.Formatter(log_format))
|
41
|
+
|
42
|
+
# Configure root logger
|
43
|
+
logging.basicConfig(level=log_level, handlers=[handler])
|
44
|
+
|
45
|
+
# Add stream handler to see logs if running interactively (optional)
|
46
|
+
# logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
|
47
|
+
|
48
|
+
logging.info("="*20 + " OOTB App Starting " + "="*20)
|
49
|
+
logging.info(f"Running with args: {sys.argv}")
|
50
|
+
logging.info(f"Python Executable: {sys.executable}")
|
51
|
+
logging.info(f"Working Directory: {os.getcwd()}")
|
52
|
+
logging.info(f"User: {os.getenv('USERNAME')}")
|
53
|
+
|
54
|
+
except Exception as log_setup_e:
|
55
|
+
print(f"FATAL: Failed to set up logging: {log_setup_e}")
|
56
|
+
# Fallback logging might be needed here if file logging fails
|
57
|
+
|
58
|
+
# --- End App Logging Setup ---
|
59
|
+
|
60
|
+
app = FastAPI()
|
61
|
+
|
62
|
+
# Add CORS middleware to allow requests from the frontend
|
63
|
+
app.add_middleware(
|
64
|
+
CORSMiddleware,
|
65
|
+
allow_origins=["*"],
|
66
|
+
allow_credentials=True,
|
67
|
+
allow_methods=["*"],
|
68
|
+
allow_headers=["*"],
|
69
|
+
)
|
70
|
+
|
71
|
+
# Rate limiter for API endpoints
|
72
|
+
class RateLimiter:
|
73
|
+
def __init__(self, interval_seconds=2):
|
74
|
+
self.interval = interval_seconds
|
75
|
+
self.last_request_time = {}
|
76
|
+
self.lock = threading.Lock()
|
77
|
+
|
78
|
+
def allow_request(self, endpoint):
|
79
|
+
with self.lock:
|
80
|
+
current_time = time.time()
|
81
|
+
# Priority endpoints always allowed
|
82
|
+
if endpoint in ["/update_params", "/update_message"]:
|
83
|
+
return True
|
84
|
+
|
85
|
+
# For other endpoints, apply rate limiting
|
86
|
+
if endpoint not in self.last_request_time:
|
87
|
+
self.last_request_time[endpoint] = current_time
|
88
|
+
return True
|
89
|
+
|
90
|
+
elapsed = current_time - self.last_request_time[endpoint]
|
91
|
+
if elapsed < self.interval:
|
92
|
+
return False
|
93
|
+
|
94
|
+
self.last_request_time[endpoint] = current_time
|
95
|
+
return True
|
96
|
+
|
97
|
+
|
98
|
+
def log_ootb_request(server_url, ootb_request_type, data):
|
99
|
+
logging.info(f"OOTB Request: Type={ootb_request_type}, Data={data}")
|
100
|
+
# Keep the requests post for now if it serves a specific purpose
|
101
|
+
logging_data = {
|
102
|
+
"type": ootb_request_type,
|
103
|
+
"data": data,
|
104
|
+
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
105
|
+
}
|
106
|
+
if not server_url.endswith("/update_ootb_logging"):
|
107
|
+
server_logging_url = server_url + "/update_ootb_logging"
|
108
|
+
else:
|
109
|
+
server_logging_url = server_url
|
110
|
+
try:
|
111
|
+
requests.post(server_logging_url, json=logging_data, timeout=5)
|
112
|
+
except Exception as req_log_e:
|
113
|
+
logging.warning(f"Could not log ootb request to server {server_logging_url}: {req_log_e}")
|
114
|
+
|
115
|
+
|
116
|
+
class SharedState:
|
117
|
+
def __init__(self, args):
|
118
|
+
self.args = args
|
119
|
+
self.task_updated = False
|
120
|
+
self.chatbot_messages = []
|
121
|
+
# Store all state-related data here
|
122
|
+
self.model = args.model
|
123
|
+
self.task = getattr(args, 'task', "")
|
124
|
+
self.selected_screen = args.selected_screen
|
125
|
+
self.user_id = args.user_id
|
126
|
+
self.trace_id = args.trace_id
|
127
|
+
self.api_keys = args.api_keys
|
128
|
+
self.server_url = args.server_url
|
129
|
+
self.message_queue = []
|
130
|
+
self.is_processing = False
|
131
|
+
self.should_stop = False
|
132
|
+
self.is_paused = False
|
133
|
+
self.full_screen_game_mode = getattr(args, 'full_screen_game_mode', 0)
|
134
|
+
# Add a new event to better control stopping
|
135
|
+
self.stop_event = threading.Event()
|
136
|
+
# Add a reference to the processing thread
|
137
|
+
self.processing_thread = None
|
138
|
+
|
139
|
+
shared_state = None
|
140
|
+
rate_limiter = RateLimiter(interval_seconds=2)
|
141
|
+
|
142
|
+
# Set up logging for this module
|
143
|
+
log = logging.getLogger(__name__)
|
144
|
+
|
145
|
+
def prepare_environment(state):
|
146
|
+
"""Dynamically loads and runs preparation logic based on software name."""
|
147
|
+
# Determine software name from state (user_id, trace_id, or task)
|
148
|
+
software_name = ""
|
149
|
+
|
150
|
+
# Check user_id first
|
151
|
+
user_id = getattr(state, 'user_id', '').lower()
|
152
|
+
task = getattr(state, 'task', '').lower()
|
153
|
+
trace_id = getattr(state, 'trace_id', '').lower()
|
154
|
+
|
155
|
+
log.info(f"Checking for software in: user_id='{user_id}', trace_id='{trace_id}', task='{task}'")
|
156
|
+
|
157
|
+
# Look for known software indicators
|
158
|
+
if "star rail" in user_id or "star rail" in trace_id:
|
159
|
+
software_name = "star rail"
|
160
|
+
elif "powerpoint" in user_id or "powerpoint" in trace_id or "powerpoint" in task:
|
161
|
+
software_name = "powerpoint"
|
162
|
+
# Add more software checks here as needed
|
163
|
+
|
164
|
+
# If no specific software found, check task for keywords
|
165
|
+
if not software_name:
|
166
|
+
log.info("No specific software detected from IDs, checking task content")
|
167
|
+
|
168
|
+
if not software_name:
|
169
|
+
log.info("No specific software preparation identified. Skipping preparation.")
|
170
|
+
return
|
171
|
+
|
172
|
+
log.info(f"Identified software for preparation: '{software_name}'")
|
173
|
+
|
174
|
+
# Normalize the software name to be a valid Python module name
|
175
|
+
# Replace spaces/hyphens with underscores, convert to lowercase
|
176
|
+
module_name_base = software_name.replace(" ", "_").replace("-", "_").lower()
|
177
|
+
module_to_run = f"{module_name_base}_prepare"
|
178
|
+
|
179
|
+
log.info(f"Attempting preparation for software: '{software_name}' (Module: '{module_to_run}')")
|
180
|
+
|
181
|
+
try:
|
182
|
+
# Construct the full module path within the package
|
183
|
+
prep_package = "computer_use_ootb_internal.preparation"
|
184
|
+
full_module_path = f"{prep_package}.{module_to_run}"
|
185
|
+
|
186
|
+
# Dynamically import the module
|
187
|
+
# Check if module exists first using pkgutil to avoid import errors
|
188
|
+
log.debug(f"Looking for preparation module: {full_module_path}")
|
189
|
+
loader = pkgutil.find_loader(full_module_path)
|
190
|
+
if loader is None:
|
191
|
+
log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
|
192
|
+
return
|
193
|
+
|
194
|
+
log.debug(f"Importing preparation module: {full_module_path}")
|
195
|
+
prep_module = importlib.import_module(full_module_path)
|
196
|
+
|
197
|
+
# Check if the module has the expected function
|
198
|
+
if hasattr(prep_module, "run_preparation") and callable(prep_module.run_preparation):
|
199
|
+
log.info(f"Running preparation function from {full_module_path}...")
|
200
|
+
prep_module.run_preparation(state)
|
201
|
+
log.info(f"Preparation function from {full_module_path} completed.")
|
202
|
+
else:
|
203
|
+
log.warning(f"Module {full_module_path} found, but does not have a callable 'run_preparation' function. Skipping.")
|
204
|
+
|
205
|
+
except ModuleNotFoundError:
|
206
|
+
log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
|
207
|
+
except Exception as e:
|
208
|
+
log.error(f"Error during dynamic preparation loading/execution for '{module_to_run}': {e}", exc_info=True)
|
209
|
+
|
210
|
+
|
211
|
+
@app.post("/update_params")
|
212
|
+
async def update_parameters(request: Request):
|
213
|
+
logging.info("Received request to /update_params")
|
214
|
+
try:
|
215
|
+
data = await request.json()
|
216
|
+
|
217
|
+
if 'task' not in data:
|
218
|
+
return JSONResponse(
|
219
|
+
content={"status": "error", "message": "Missing required field: task"},
|
220
|
+
status_code=400
|
221
|
+
)
|
222
|
+
|
223
|
+
# Clear message histories before updating parameters
|
224
|
+
shared_state.message_queue.clear()
|
225
|
+
shared_state.chatbot_messages.clear()
|
226
|
+
logging.info("Cleared message queue and chatbot messages.")
|
227
|
+
|
228
|
+
shared_state.args = argparse.Namespace(**data)
|
229
|
+
shared_state.task_updated = True
|
230
|
+
|
231
|
+
# Update shared state when parameters change
|
232
|
+
shared_state.model = getattr(shared_state.args, 'model', "teach-mode-gpt-4o")
|
233
|
+
shared_state.task = getattr(shared_state.args, 'task', "Following the instructions to complete the task.")
|
234
|
+
shared_state.selected_screen = getattr(shared_state.args, 'selected_screen', 0)
|
235
|
+
shared_state.user_id = getattr(shared_state.args, 'user_id', "hero_cases")
|
236
|
+
shared_state.trace_id = getattr(shared_state.args, 'trace_id', "build_scroll_combat")
|
237
|
+
shared_state.api_keys = getattr(shared_state.args, 'api_keys', "sk-proj-1234567890")
|
238
|
+
shared_state.server_url = getattr(shared_state.args, 'server_url', "http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com")
|
239
|
+
|
240
|
+
log_ootb_request(shared_state.server_url, "update_params", data)
|
241
|
+
|
242
|
+
# Call the (now dynamic) preparation function here, after parameters are updated
|
243
|
+
prepare_environment(shared_state)
|
244
|
+
|
245
|
+
logging.info("Parameters updated successfully.")
|
246
|
+
return JSONResponse(
|
247
|
+
content={"status": "success", "message": "Parameters updated", "new_args": vars(shared_state.args)},
|
248
|
+
status_code=200
|
249
|
+
)
|
250
|
+
except Exception as e:
|
251
|
+
logging.error("Error processing /update_params:", exc_info=True)
|
252
|
+
return JSONResponse(content={"status": "error", "message": "Internal server error"}, status_code=500)
|
253
|
+
|
254
|
+
@app.post("/update_message")
|
255
|
+
async def update_message(request: Request):
|
256
|
+
data = await request.json()
|
257
|
+
|
258
|
+
if 'message' not in data:
|
259
|
+
return JSONResponse(
|
260
|
+
content={"status": "error", "message": "Missing required field: message"},
|
261
|
+
status_code=400
|
262
|
+
)
|
263
|
+
|
264
|
+
log_ootb_request(shared_state.server_url, "update_message", data)
|
265
|
+
|
266
|
+
message = data['message']
|
267
|
+
full_screen_game_mode = data.get('full_screen_game_mode', 0) # Default to 0 if not provided
|
268
|
+
|
269
|
+
# shared_state.chatbot_messages.append({"role": "user", "content": message, "type": "text"})
|
270
|
+
shared_state.task = message
|
271
|
+
shared_state.args.task = message
|
272
|
+
shared_state.full_screen_game_mode = full_screen_game_mode
|
273
|
+
|
274
|
+
# Reset stop event before starting
|
275
|
+
shared_state.stop_event.clear()
|
276
|
+
|
277
|
+
# Start processing if not already running
|
278
|
+
if not shared_state.is_processing:
|
279
|
+
# Create and store the thread
|
280
|
+
shared_state.processing_thread = threading.Thread(target=process_input, daemon=True)
|
281
|
+
shared_state.processing_thread.start()
|
282
|
+
|
283
|
+
return JSONResponse(
|
284
|
+
content={"status": "success", "message": "Message received", "task": shared_state.task},
|
285
|
+
status_code=200
|
286
|
+
)
|
287
|
+
|
288
|
+
@app.get("/get_messages")
|
289
|
+
async def get_messages(request: Request):
|
290
|
+
# Apply rate limiting
|
291
|
+
if not rate_limiter.allow_request(request.url.path):
|
292
|
+
return JSONResponse(
|
293
|
+
content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
|
294
|
+
status_code=429
|
295
|
+
)
|
296
|
+
|
297
|
+
# log_ootb_request(shared_state.server_url, "get_messages", {})
|
298
|
+
|
299
|
+
# Return all messages in the queue and clear it
|
300
|
+
messages = shared_state.message_queue.copy()
|
301
|
+
shared_state.message_queue = []
|
302
|
+
|
303
|
+
return JSONResponse(
|
304
|
+
content={"status": "success", "messages": messages},
|
305
|
+
status_code=200
|
306
|
+
)
|
307
|
+
|
308
|
+
@app.get("/get_screens")
|
309
|
+
async def get_screens(request: Request):
|
310
|
+
# Apply rate limiting
|
311
|
+
if not rate_limiter.allow_request(request.url.path):
|
312
|
+
return JSONResponse(
|
313
|
+
content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
|
314
|
+
status_code=429
|
315
|
+
)
|
316
|
+
|
317
|
+
log_ootb_request(shared_state.server_url, "get_screens", {})
|
318
|
+
|
319
|
+
screen_options, primary_index = get_screen_details()
|
320
|
+
|
321
|
+
return JSONResponse(
|
322
|
+
content={"status": "success", "screens": screen_options, "primary_index": primary_index},
|
323
|
+
status_code=200
|
324
|
+
)
|
325
|
+
|
326
|
+
@app.post("/stop_processing")
|
327
|
+
async def stop_processing(request: Request):
|
328
|
+
# Apply rate limiting
|
329
|
+
if not rate_limiter.allow_request(request.url.path):
|
330
|
+
return JSONResponse(
|
331
|
+
content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
|
332
|
+
status_code=429
|
333
|
+
)
|
334
|
+
|
335
|
+
log_ootb_request(shared_state.server_url, "stop_processing", {})
|
336
|
+
|
337
|
+
if shared_state.is_processing:
|
338
|
+
# Set both flags to ensure stopping the current task
|
339
|
+
shared_state.should_stop = True
|
340
|
+
shared_state.stop_event.set()
|
341
|
+
|
342
|
+
# Clear message histories
|
343
|
+
shared_state.message_queue.clear()
|
344
|
+
shared_state.chatbot_messages.clear()
|
345
|
+
logging.info("Cleared message queue and chatbot messages during stop.")
|
346
|
+
|
347
|
+
# Send an immediate message to the queue to inform the user
|
348
|
+
stop_initiated_msg = {"role": "assistant", "content": f"Stopping task '{shared_state.task}'...", "type": "text", "action_type": ""}
|
349
|
+
# Append the stop message AFTER clearing, so it's the only one left
|
350
|
+
shared_state.message_queue.append(stop_initiated_msg)
|
351
|
+
shared_state.chatbot_messages.append(stop_initiated_msg)
|
352
|
+
|
353
|
+
return JSONResponse(
|
354
|
+
content={"status": "success", "message": "Task is being stopped, server will remain available for new tasks"},
|
355
|
+
status_code=200
|
356
|
+
)
|
357
|
+
else:
|
358
|
+
# Clear message histories even if not processing, to ensure clean state
|
359
|
+
shared_state.message_queue.clear()
|
360
|
+
shared_state.chatbot_messages.clear()
|
361
|
+
logging.info("Cleared message queue and chatbot messages (no active process to stop).")
|
362
|
+
return JSONResponse(
|
363
|
+
content={"status": "error", "message": "No active processing to stop"},
|
364
|
+
status_code=400
|
365
|
+
)
|
366
|
+
|
367
|
+
@app.post("/toggle_pause")
|
368
|
+
async def toggle_pause(request: Request):
|
369
|
+
# Apply rate limiting
|
370
|
+
if not rate_limiter.allow_request(request.url.path):
|
371
|
+
return JSONResponse(
|
372
|
+
content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
|
373
|
+
status_code=429
|
374
|
+
)
|
375
|
+
|
376
|
+
log_ootb_request(shared_state.server_url, "toggle_pause", {})
|
377
|
+
|
378
|
+
if not shared_state.is_processing:
|
379
|
+
return JSONResponse(
|
380
|
+
content={"status": "error", "message": "No active processing to pause/resume"},
|
381
|
+
status_code=400
|
382
|
+
)
|
383
|
+
|
384
|
+
# Toggle the pause state
|
385
|
+
shared_state.is_paused = not shared_state.is_paused
|
386
|
+
current_state = shared_state.is_paused
|
387
|
+
|
388
|
+
print(f"Toggled pause state to: {current_state}")
|
389
|
+
|
390
|
+
status_message = "paused" if current_state else "resumed"
|
391
|
+
|
392
|
+
# Add a message to the queue to inform the user
|
393
|
+
if current_state:
|
394
|
+
message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been paused. Click Continue to resume.", "type": "text", "action_type": ""}
|
395
|
+
else:
|
396
|
+
message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been resumed.", "type": "text", "action_type": ""}
|
397
|
+
|
398
|
+
shared_state.chatbot_messages.append(message)
|
399
|
+
shared_state.message_queue.append(message)
|
400
|
+
|
401
|
+
return JSONResponse(
|
402
|
+
content={
|
403
|
+
"status": "success",
|
404
|
+
"message": f"Processing {status_message}",
|
405
|
+
"is_paused": current_state
|
406
|
+
},
|
407
|
+
status_code=200
|
408
|
+
)
|
409
|
+
|
410
|
+
@app.get("/status")
|
411
|
+
async def get_status(request: Request):
|
412
|
+
# Apply rate limiting
|
413
|
+
if not rate_limiter.allow_request(request.url.path):
|
414
|
+
return JSONResponse(
|
415
|
+
content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
|
416
|
+
status_code=429
|
417
|
+
)
|
418
|
+
|
419
|
+
# log_ootb_request(shared_state.server_url, "get_status", {})
|
420
|
+
|
421
|
+
print(f"Status check - Processing: {shared_state.is_processing}, Paused: {shared_state.is_paused}")
|
422
|
+
return JSONResponse(
|
423
|
+
content={
|
424
|
+
"status": "success",
|
425
|
+
"is_processing": shared_state.is_processing,
|
426
|
+
"is_paused": shared_state.is_paused
|
427
|
+
},
|
428
|
+
status_code=200
|
429
|
+
)
|
430
|
+
|
431
|
+
@app.post("/exec_computer_tool")
|
432
|
+
async def exec_computer_tool(request: Request):
|
433
|
+
logging.info("Received request to /exec_computer_tool")
|
434
|
+
try:
|
435
|
+
data = await request.json()
|
436
|
+
|
437
|
+
# Extract parameters from the request
|
438
|
+
selected_screen = data.get('selected_screen', 0)
|
439
|
+
full_screen_game_mode = data.get('full_screen_game_mode', 0)
|
440
|
+
response = data.get('response', {})
|
441
|
+
|
442
|
+
logging.info(f"Executing TeachmodeExecutor with: screen={selected_screen}, mode={full_screen_game_mode}, response={response}")
|
443
|
+
|
444
|
+
# Create TeachmodeExecutor in a separate process to avoid event loop conflicts
|
445
|
+
# Since TeachmodeExecutor uses asyncio.run() internally, we need to run it in a way
|
446
|
+
# that doesn't conflict with FastAPI's event loop
|
447
|
+
|
448
|
+
def run_executor():
|
449
|
+
executor = TeachmodeExecutor(
|
450
|
+
selected_screen=selected_screen,
|
451
|
+
full_screen_game_mode=full_screen_game_mode
|
452
|
+
)
|
453
|
+
|
454
|
+
results = []
|
455
|
+
try:
|
456
|
+
for action_result in executor(response):
|
457
|
+
results.append(action_result)
|
458
|
+
except Exception as exec_error:
|
459
|
+
logging.error(f"Error executing action: {exec_error}", exc_info=True)
|
460
|
+
return {"error": str(exec_error)}
|
461
|
+
|
462
|
+
return results
|
463
|
+
|
464
|
+
# Execute in a thread pool to avoid blocking the event loop
|
465
|
+
with concurrent.futures.ThreadPoolExecutor() as pool:
|
466
|
+
results = await asyncio.get_event_loop().run_in_executor(pool, run_executor)
|
467
|
+
|
468
|
+
if isinstance(results, dict) and "error" in results:
|
469
|
+
return JSONResponse(
|
470
|
+
content={"status": "error", "message": results["error"]},
|
471
|
+
status_code=500
|
472
|
+
)
|
473
|
+
|
474
|
+
logging.info(f"Action results: {results}")
|
475
|
+
|
476
|
+
return JSONResponse(
|
477
|
+
content={"status": "success", "results": results},
|
478
|
+
status_code=200
|
479
|
+
)
|
480
|
+
except Exception as e:
|
481
|
+
logging.error("Error processing /exec_computer_tool:", exc_info=True)
|
482
|
+
return JSONResponse(
|
483
|
+
content={"status": "error", "message": f"Internal server error: {str(e)}"},
|
484
|
+
status_code=500
|
485
|
+
)
|
486
|
+
|
487
|
+
def process_input():
|
488
|
+
global shared_state
|
489
|
+
logging.info("process_input thread started.")
|
490
|
+
shared_state.is_processing = True
|
491
|
+
shared_state.should_stop = False
|
492
|
+
shared_state.is_paused = False
|
493
|
+
shared_state.stop_event.clear() # Ensure stop event is cleared at the start
|
494
|
+
|
495
|
+
print(f"start sampling loop: {shared_state.chatbot_messages}")
|
496
|
+
print(f"shared_state.args before sampling loop: {shared_state.args}")
|
497
|
+
|
498
|
+
|
499
|
+
try:
|
500
|
+
# Get the generator for the sampling loop
|
501
|
+
sampling_loop = simple_teachmode_sampling_loop(
|
502
|
+
model=shared_state.model,
|
503
|
+
task=shared_state.task,
|
504
|
+
selected_screen=shared_state.selected_screen,
|
505
|
+
user_id=shared_state.user_id,
|
506
|
+
trace_id=shared_state.trace_id,
|
507
|
+
api_keys=shared_state.api_keys,
|
508
|
+
server_url=shared_state.server_url,
|
509
|
+
full_screen_game_mode=shared_state.full_screen_game_mode,
|
510
|
+
)
|
511
|
+
|
512
|
+
# Process messages from the sampling loop
|
513
|
+
for loop_msg in sampling_loop:
|
514
|
+
# Check stop condition more frequently
|
515
|
+
if shared_state.should_stop or shared_state.stop_event.is_set():
|
516
|
+
print("Processing stopped by user")
|
517
|
+
break
|
518
|
+
|
519
|
+
# Check if paused and wait while paused
|
520
|
+
while shared_state.is_paused and not shared_state.should_stop and not shared_state.stop_event.is_set():
|
521
|
+
print(f"Processing paused at: {time.strftime('%H:%M:%S')}")
|
522
|
+
# Wait a short time and check stop condition regularly
|
523
|
+
for _ in range(5): # Check 5 times per second
|
524
|
+
if shared_state.should_stop or shared_state.stop_event.is_set():
|
525
|
+
break
|
526
|
+
time.sleep(0.2)
|
527
|
+
|
528
|
+
# Check again after pause loop
|
529
|
+
if shared_state.should_stop or shared_state.stop_event.is_set():
|
530
|
+
print("Processing stopped while paused or resuming")
|
531
|
+
break
|
532
|
+
|
533
|
+
shared_state.chatbot_messages.append(loop_msg)
|
534
|
+
shared_state.message_queue.append(loop_msg)
|
535
|
+
|
536
|
+
# Short sleep to allow stop signals to be processed
|
537
|
+
for _ in range(5): # Check 5 times per second
|
538
|
+
if shared_state.should_stop or shared_state.stop_event.is_set():
|
539
|
+
print("Processing stopped during sleep")
|
540
|
+
break
|
541
|
+
time.sleep(0.1)
|
542
|
+
|
543
|
+
if shared_state.should_stop or shared_state.stop_event.is_set():
|
544
|
+
break
|
545
|
+
|
546
|
+
except Exception as e:
|
547
|
+
# Handle any exceptions in the processing loop
|
548
|
+
error_msg = f"Error during task processing: {e}"
|
549
|
+
print(error_msg)
|
550
|
+
error_message = {"role": "assistant", "content": error_msg, "type": "error", "action_type": ""}
|
551
|
+
shared_state.message_queue.append(error_message)
|
552
|
+
|
553
|
+
finally:
|
554
|
+
# Handle completion or interruption
|
555
|
+
if shared_state.should_stop or shared_state.stop_event.is_set():
|
556
|
+
stop_msg = f"Task '{shared_state.task}' was stopped. Ready for new tasks."
|
557
|
+
final_message = {"role": "assistant", "content": stop_msg, "type": "text", "action_type": ""}
|
558
|
+
else:
|
559
|
+
complete_msg = f"Task '{shared_state.task}' completed. Thanks for using Teachmode-OOTB."
|
560
|
+
final_message = {"role": "assistant", "content": complete_msg, "type": "text", "action_type": ""}
|
561
|
+
|
562
|
+
shared_state.chatbot_messages.append(final_message)
|
563
|
+
shared_state.message_queue.append(final_message)
|
564
|
+
|
565
|
+
# Reset all state flags to allow for new tasks
|
566
|
+
shared_state.is_processing = False
|
567
|
+
shared_state.should_stop = False
|
568
|
+
shared_state.is_paused = False
|
569
|
+
shared_state.stop_event.clear()
|
570
|
+
print("Processing completed, ready for new tasks")
|
571
|
+
logging.info("process_input thread finished.")
|
572
|
+
|
573
|
+
def main():
|
574
|
+
# Logging is set up at the top level now
|
575
|
+
logging.info("App main() function starting setup.")
|
576
|
+
global app, shared_state, rate_limiter # Ensure app is global if needed by uvicorn
|
577
|
+
parser = argparse.ArgumentParser()
|
578
|
+
# Add arguments, but NOT host and port
|
579
|
+
parser.add_argument("--model", type=str, default="teach-mode-gpt-4o", help="Model name")
|
580
|
+
parser.add_argument("--task", type=str, default="Following the instructions to complete the task.", help="Initial task description")
|
581
|
+
parser.add_argument("--selected_screen", type=int, default=0, help="Selected screen index")
|
582
|
+
parser.add_argument("--user_id", type=str, default="hero_cases", help="User ID for the session")
|
583
|
+
parser.add_argument("--trace_id", type=str, default="build_scroll_combat", help="Trace ID for the session")
|
584
|
+
parser.add_argument("--api_keys", type=str, default="sk-proj-1234567890", help="API keys")
|
585
|
+
parser.add_argument("--server_url", type=str, default="http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com", help="Server URL for the session")
|
586
|
+
|
587
|
+
args = parser.parse_args()
|
588
|
+
|
589
|
+
# Validate args or set defaults if needed (keep these)
|
590
|
+
if not hasattr(args, 'model'): args.model = "default_model"
|
591
|
+
if not hasattr(args, 'task'): args.task = "default_task"
|
592
|
+
if not hasattr(args, 'selected_screen'): args.selected_screen = 0
|
593
|
+
if not hasattr(args, 'user_id'): args.user_id = "unknown_user"
|
594
|
+
if not hasattr(args, 'trace_id'): args.trace_id = "unknown_trace"
|
595
|
+
if not hasattr(args, 'api_keys'): args.api_keys = "none"
|
596
|
+
if not hasattr(args, 'server_url'): args.server_url = "none"
|
597
|
+
|
598
|
+
shared_state = SharedState(args)
|
599
|
+
rate_limiter = RateLimiter(interval_seconds=2) # Re-initialize rate limiter
|
600
|
+
logging.info(f"Shared state initialized for user: {args.user_id}")
|
601
|
+
|
602
|
+
# --- Restore original port calculation logic ---
|
603
|
+
port = 7888 # Default port
|
604
|
+
host = "0.0.0.0" # Listen on all interfaces
|
605
|
+
|
606
|
+
if platform.system() == "Windows":
|
607
|
+
try:
|
608
|
+
username = os.environ["USERNAME"].lower()
|
609
|
+
logging.info(f"Determining port based on Windows username: {username}")
|
610
|
+
if username == "altair":
|
611
|
+
port = 14000
|
612
|
+
elif username.startswith("guest") and username[5:].isdigit():
|
613
|
+
num = int(username[5:])
|
614
|
+
if 1 <= num <= 10: # Assuming max 10 guests for this range
|
615
|
+
port = 14000 + num
|
616
|
+
else:
|
617
|
+
logging.warning(f"Guest user number {num} out of range (1-10), using default port {port}.")
|
618
|
+
else:
|
619
|
+
logging.info(f"Username '{username}' doesn't match specific rules, using default port {port}.")
|
620
|
+
except Exception as e:
|
621
|
+
logging.error(f"Error determining port from username: {e}. Using default port {port}.", exc_info=True)
|
622
|
+
else:
|
623
|
+
logging.info(f"Not running on Windows, using default port {port}.")
|
624
|
+
# --- End of restored port calculation ---
|
625
|
+
|
626
|
+
logging.info(f"Final Host={host}, Port={port}")
|
627
|
+
|
628
|
+
try:
|
629
|
+
logging.info(f"Starting Uvicorn server on {host}:{port}")
|
630
|
+
# Use the calculated port and specific host
|
631
|
+
uvicorn.run(app, host=host, port=port)
|
632
|
+
logging.info("Uvicorn server stopped.")
|
633
|
+
except Exception as main_e:
|
634
|
+
logging.error("Error in main execution:", exc_info=True)
|
635
|
+
finally:
|
636
|
+
logging.info("App main() function finished.")
|
637
|
+
|
638
|
+
if __name__ == "__main__":
|
639
|
+
main()
|
640
|
+
|
641
|
+
# Test log_ootb_request
|
637
642
|
log_ootb_request("http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com", "test_request", {"message": "Test message"})
|