computer-use-ootb-internal 0.0.171__py3-none-any.whl → 0.0.173__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,611 +1,637 @@
1
- import argparse
2
- import time
3
- import json
4
- from datetime import datetime
5
- import threading
6
- import requests
7
- import platform # Add platform import
8
- import pyautogui # Add pyautogui import
9
- import webbrowser # Add webbrowser import
10
- import os # Import os for path joining
11
- import logging # Import logging
12
- import importlib # For dynamic imports
13
- import pkgutil # To find modules
14
- import sys # For logging setup
15
- import traceback # For logging setup
16
- from logging.handlers import RotatingFileHandler # For logging setup
17
- from fastapi import FastAPI, Request
18
- from fastapi.responses import JSONResponse
19
- from fastapi.middleware.cors import CORSMiddleware
20
- from computer_use_ootb_internal.computer_use_demo.tools.computer import get_screen_details
21
- from computer_use_ootb_internal.run_teachmode_ootb_args import simple_teachmode_sampling_loop
22
- from computer_use_ootb_internal.computer_use_demo.executor.teachmode_executor import TeachmodeExecutor
23
- import uvicorn # Assuming uvicorn is used to run FastAPI
24
- import concurrent.futures
25
- import asyncio
26
-
27
- # --- App Logging Setup ---
28
- try:
29
- # Log to user's AppData directory for better accessibility
30
- log_dir_base = os.environ.get('APPDATA', os.path.expanduser('~'))
31
- log_dir = os.path.join(log_dir_base, 'OOTBAppLogs')
32
- os.makedirs(log_dir, exist_ok=True)
33
- log_file = os.path.join(log_dir, 'ootb_app.log')
34
-
35
- log_format = '%(asctime)s - %(levelname)s - %(process)d - %(threadName)s - %(message)s'
36
- log_level = logging.INFO # Or logging.DEBUG for more detail
37
-
38
- # Use rotating file handler
39
- handler = RotatingFileHandler(log_file, maxBytes=5*1024*1024, backupCount=2, encoding='utf-8')
40
- handler.setFormatter(logging.Formatter(log_format))
41
-
42
- # Configure root logger
43
- logging.basicConfig(level=log_level, handlers=[handler])
44
-
45
- # Add stream handler to see logs if running interactively (optional)
46
- # logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
47
-
48
- logging.info("="*20 + " OOTB App Starting " + "="*20)
49
- logging.info(f"Running with args: {sys.argv}")
50
- logging.info(f"Python Executable: {sys.executable}")
51
- logging.info(f"Working Directory: {os.getcwd()}")
52
- logging.info(f"User: {os.getenv('USERNAME')}")
53
-
54
- except Exception as log_setup_e:
55
- print(f"FATAL: Failed to set up logging: {log_setup_e}")
56
- # Fallback logging might be needed here if file logging fails
57
-
58
- # --- End App Logging Setup ---
59
-
60
- app = FastAPI()
61
-
62
- # Add CORS middleware to allow requests from the frontend
63
- app.add_middleware(
64
- CORSMiddleware,
65
- allow_origins=["*"],
66
- allow_credentials=True,
67
- allow_methods=["*"],
68
- allow_headers=["*"],
69
- )
70
-
71
- # Rate limiter for API endpoints
72
- class RateLimiter:
73
- def __init__(self, interval_seconds=2):
74
- self.interval = interval_seconds
75
- self.last_request_time = {}
76
- self.lock = threading.Lock()
77
-
78
- def allow_request(self, endpoint):
79
- with self.lock:
80
- current_time = time.time()
81
- # Priority endpoints always allowed
82
- if endpoint in ["/update_params", "/update_message"]:
83
- return True
84
-
85
- # For other endpoints, apply rate limiting
86
- if endpoint not in self.last_request_time:
87
- self.last_request_time[endpoint] = current_time
88
- return True
89
-
90
- elapsed = current_time - self.last_request_time[endpoint]
91
- if elapsed < self.interval:
92
- return False
93
-
94
- self.last_request_time[endpoint] = current_time
95
- return True
96
-
97
-
98
- def log_ootb_request(server_url, ootb_request_type, data):
99
- logging.info(f"OOTB Request: Type={ootb_request_type}, Data={data}")
100
- # Keep the requests post for now if it serves a specific purpose
101
- logging_data = {
102
- "type": ootb_request_type,
103
- "data": data,
104
- "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
105
- }
106
- if not server_url.endswith("/update_ootb_logging"):
107
- server_logging_url = server_url + "/update_ootb_logging"
108
- else:
109
- server_logging_url = server_url
110
- try:
111
- requests.post(server_logging_url, json=logging_data, timeout=5)
112
- except Exception as req_log_e:
113
- logging.warning(f"Could not log ootb request to server {server_logging_url}: {req_log_e}")
114
-
115
-
116
- class SharedState:
117
- def __init__(self, args):
118
- self.args = args
119
- self.task_updated = False
120
- self.chatbot_messages = []
121
- # Store all state-related data here
122
- self.model = args.model
123
- self.task = getattr(args, 'task', "")
124
- self.selected_screen = args.selected_screen
125
- self.user_id = args.user_id
126
- self.trace_id = args.trace_id
127
- self.api_keys = args.api_keys
128
- self.server_url = args.server_url
129
- self.message_queue = []
130
- self.is_processing = False
131
- self.should_stop = False
132
- self.is_paused = False
133
- # Add a new event to better control stopping
134
- self.stop_event = threading.Event()
135
- # Add a reference to the processing thread
136
- self.processing_thread = None
137
-
138
- shared_state = None
139
- rate_limiter = RateLimiter(interval_seconds=2)
140
-
141
- # Set up logging for this module
142
- log = logging.getLogger(__name__)
143
-
144
- def prepare_environment(state):
145
- """Dynamically loads and runs preparation logic based on software name."""
146
- # TODO: Replace hardcoded software name with value from shared_state when available
147
- software_name = "star rail"
148
- # Normalize the software name to be a valid Python module name
149
- # Replace spaces/hyphens with underscores, convert to lowercase
150
- module_name_base = software_name.replace(" ", "_").replace("-", "_").lower()
151
- module_to_run = f"{module_name_base}_prepare"
152
-
153
- log.info(f"Attempting preparation for software: '{software_name}' (Module: '{module_to_run}')")
154
-
155
- try:
156
- # Construct the full module path within the package
157
- prep_package = "computer_use_ootb_internal.preparation"
158
- full_module_path = f"{prep_package}.{module_to_run}"
159
-
160
- # Dynamically import the module
161
- # Check if module exists first using pkgutil to avoid import errors
162
- # Note: pkgutil.find_loader might be deprecated, consider importlib.util.find_spec
163
- loader = pkgutil.find_loader(full_module_path)
164
- if loader is None:
165
- log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
166
- return
167
-
168
- prep_module = importlib.import_module(full_module_path)
169
-
170
- # Check if the module has the expected function
171
- if hasattr(prep_module, "run_preparation") and callable(prep_module.run_preparation):
172
- log.info(f"Running preparation function from {full_module_path}...")
173
- prep_module.run_preparation(state)
174
- log.info(f"Preparation function from {full_module_path} completed.")
175
- else:
176
- log.warning(f"Module {full_module_path} found, but does not have a callable 'run_preparation' function. Skipping.")
177
-
178
- except ModuleNotFoundError:
179
- log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
180
- except Exception as e:
181
- log.error(f"Error during dynamic preparation loading/execution for '{module_to_run}': {e}", exc_info=True)
182
-
183
-
184
- @app.post("/update_params")
185
- async def update_parameters(request: Request):
186
- logging.info("Received request to /update_params")
187
- try:
188
- data = await request.json()
189
-
190
- if 'task' not in data:
191
- return JSONResponse(
192
- content={"status": "error", "message": "Missing required field: task"},
193
- status_code=400
194
- )
195
-
196
- # Clear message histories before updating parameters
197
- shared_state.message_queue.clear()
198
- shared_state.chatbot_messages.clear()
199
- logging.info("Cleared message queue and chatbot messages.")
200
-
201
- shared_state.args = argparse.Namespace(**data)
202
- shared_state.task_updated = True
203
-
204
- # Update shared state when parameters change
205
- shared_state.model = getattr(shared_state.args, 'model', "teach-mode-gpt-4o")
206
- shared_state.task = getattr(shared_state.args, 'task', "Following the instructions to complete the task.")
207
- shared_state.selected_screen = getattr(shared_state.args, 'selected_screen', 0)
208
- shared_state.user_id = getattr(shared_state.args, 'user_id', "hero_cases")
209
- shared_state.trace_id = getattr(shared_state.args, 'trace_id', "build_scroll_combat")
210
- shared_state.api_keys = getattr(shared_state.args, 'api_keys', "sk-proj-1234567890")
211
- shared_state.server_url = getattr(shared_state.args, 'server_url', "http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com")
212
-
213
- log_ootb_request(shared_state.server_url, "update_params", data)
214
-
215
- # Call the (now dynamic) preparation function here, after parameters are updated
216
- prepare_environment(shared_state)
217
-
218
- logging.info("Parameters updated successfully.")
219
- return JSONResponse(
220
- content={"status": "success", "message": "Parameters updated", "new_args": vars(shared_state.args)},
221
- status_code=200
222
- )
223
- except Exception as e:
224
- logging.error("Error processing /update_params:", exc_info=True)
225
- return JSONResponse(content={"status": "error", "message": "Internal server error"}, status_code=500)
226
-
227
- @app.post("/update_message")
228
- async def update_message(request: Request):
229
- data = await request.json()
230
-
231
- if 'message' not in data:
232
- return JSONResponse(
233
- content={"status": "error", "message": "Missing required field: message"},
234
- status_code=400
235
- )
236
-
237
- log_ootb_request(shared_state.server_url, "update_message", data)
238
-
239
- message = data['message']
240
- # shared_state.chatbot_messages.append({"role": "user", "content": message, "type": "text"})
241
- shared_state.task = message
242
- shared_state.args.task = message
243
-
244
- # Reset stop event before starting
245
- shared_state.stop_event.clear()
246
-
247
- # Start processing if not already running
248
- if not shared_state.is_processing:
249
- # Create and store the thread
250
- shared_state.processing_thread = threading.Thread(target=process_input, daemon=True)
251
- shared_state.processing_thread.start()
252
-
253
- return JSONResponse(
254
- content={"status": "success", "message": "Message received", "task": shared_state.task},
255
- status_code=200
256
- )
257
-
258
- @app.get("/get_messages")
259
- async def get_messages(request: Request):
260
- # Apply rate limiting
261
- if not rate_limiter.allow_request(request.url.path):
262
- return JSONResponse(
263
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
264
- status_code=429
265
- )
266
-
267
- # log_ootb_request(shared_state.server_url, "get_messages", {})
268
-
269
- # Return all messages in the queue and clear it
270
- messages = shared_state.message_queue.copy()
271
- shared_state.message_queue = []
272
-
273
- return JSONResponse(
274
- content={"status": "success", "messages": messages},
275
- status_code=200
276
- )
277
-
278
- @app.get("/get_screens")
279
- async def get_screens(request: Request):
280
- # Apply rate limiting
281
- if not rate_limiter.allow_request(request.url.path):
282
- return JSONResponse(
283
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
284
- status_code=429
285
- )
286
-
287
- log_ootb_request(shared_state.server_url, "get_screens", {})
288
-
289
- screen_options, primary_index = get_screen_details()
290
-
291
- return JSONResponse(
292
- content={"status": "success", "screens": screen_options, "primary_index": primary_index},
293
- status_code=200
294
- )
295
-
296
- @app.post("/stop_processing")
297
- async def stop_processing(request: Request):
298
- # Apply rate limiting
299
- if not rate_limiter.allow_request(request.url.path):
300
- return JSONResponse(
301
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
302
- status_code=429
303
- )
304
-
305
- log_ootb_request(shared_state.server_url, "stop_processing", {})
306
-
307
- if shared_state.is_processing:
308
- # Set both flags to ensure stopping the current task
309
- shared_state.should_stop = True
310
- shared_state.stop_event.set()
311
-
312
- # Clear message histories
313
- shared_state.message_queue.clear()
314
- shared_state.chatbot_messages.clear()
315
- logging.info("Cleared message queue and chatbot messages during stop.")
316
-
317
- # Send an immediate message to the queue to inform the user
318
- stop_initiated_msg = {"role": "assistant", "content": f"Stopping task '{shared_state.task}'...", "type": "text", "action_type": ""}
319
- # Append the stop message AFTER clearing, so it's the only one left
320
- shared_state.message_queue.append(stop_initiated_msg)
321
- shared_state.chatbot_messages.append(stop_initiated_msg)
322
-
323
- return JSONResponse(
324
- content={"status": "success", "message": "Task is being stopped, server will remain available for new tasks"},
325
- status_code=200
326
- )
327
- else:
328
- # Clear message histories even if not processing, to ensure clean state
329
- shared_state.message_queue.clear()
330
- shared_state.chatbot_messages.clear()
331
- logging.info("Cleared message queue and chatbot messages (no active process to stop).")
332
- return JSONResponse(
333
- content={"status": "error", "message": "No active processing to stop"},
334
- status_code=400
335
- )
336
-
337
- @app.post("/toggle_pause")
338
- async def toggle_pause(request: Request):
339
- # Apply rate limiting
340
- if not rate_limiter.allow_request(request.url.path):
341
- return JSONResponse(
342
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
343
- status_code=429
344
- )
345
-
346
- log_ootb_request(shared_state.server_url, "toggle_pause", {})
347
-
348
- if not shared_state.is_processing:
349
- return JSONResponse(
350
- content={"status": "error", "message": "No active processing to pause/resume"},
351
- status_code=400
352
- )
353
-
354
- # Toggle the pause state
355
- shared_state.is_paused = not shared_state.is_paused
356
- current_state = shared_state.is_paused
357
-
358
- print(f"Toggled pause state to: {current_state}")
359
-
360
- status_message = "paused" if current_state else "resumed"
361
-
362
- # Add a message to the queue to inform the user
363
- if current_state:
364
- message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been paused. Click Continue to resume.", "type": "text", "action_type": ""}
365
- else:
366
- message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been resumed.", "type": "text", "action_type": ""}
367
-
368
- shared_state.chatbot_messages.append(message)
369
- shared_state.message_queue.append(message)
370
-
371
- return JSONResponse(
372
- content={
373
- "status": "success",
374
- "message": f"Processing {status_message}",
375
- "is_paused": current_state
376
- },
377
- status_code=200
378
- )
379
-
380
- @app.get("/status")
381
- async def get_status(request: Request):
382
- # Apply rate limiting
383
- if not rate_limiter.allow_request(request.url.path):
384
- return JSONResponse(
385
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
386
- status_code=429
387
- )
388
-
389
- # log_ootb_request(shared_state.server_url, "get_status", {})
390
-
391
- print(f"Status check - Processing: {shared_state.is_processing}, Paused: {shared_state.is_paused}")
392
- return JSONResponse(
393
- content={
394
- "status": "success",
395
- "is_processing": shared_state.is_processing,
396
- "is_paused": shared_state.is_paused
397
- },
398
- status_code=200
399
- )
400
-
401
- @app.post("/exec_computer_tool")
402
- async def exec_computer_tool(request: Request):
403
- logging.info("Received request to /exec_computer_tool")
404
- try:
405
- data = await request.json()
406
-
407
- # Extract parameters from the request
408
- selected_screen = data.get('selected_screen', 0)
409
- full_screen_game_mode = data.get('full_screen_game_mode', 0)
410
- response = data.get('response', {})
411
-
412
- logging.info(f"Executing TeachmodeExecutor with: screen={selected_screen}, mode={full_screen_game_mode}, response={response}")
413
-
414
- # Create TeachmodeExecutor in a separate process to avoid event loop conflicts
415
- # Since TeachmodeExecutor uses asyncio.run() internally, we need to run it in a way
416
- # that doesn't conflict with FastAPI's event loop
417
-
418
- def run_executor():
419
- executor = TeachmodeExecutor(
420
- selected_screen=selected_screen,
421
- full_screen_game_mode=full_screen_game_mode
422
- )
423
-
424
- results = []
425
- try:
426
- for action_result in executor(response):
427
- results.append(action_result)
428
- except Exception as exec_error:
429
- logging.error(f"Error executing action: {exec_error}", exc_info=True)
430
- return {"error": str(exec_error)}
431
-
432
- return results
433
-
434
- # Execute in a thread pool to avoid blocking the event loop
435
- with concurrent.futures.ThreadPoolExecutor() as pool:
436
- results = await asyncio.get_event_loop().run_in_executor(pool, run_executor)
437
-
438
- if isinstance(results, dict) and "error" in results:
439
- return JSONResponse(
440
- content={"status": "error", "message": results["error"]},
441
- status_code=500
442
- )
443
-
444
- logging.info(f"Action results: {results}")
445
-
446
- return JSONResponse(
447
- content={"status": "success", "results": results},
448
- status_code=200
449
- )
450
- except Exception as e:
451
- logging.error("Error processing /exec_computer_tool:", exc_info=True)
452
- return JSONResponse(
453
- content={"status": "error", "message": f"Internal server error: {str(e)}"},
454
- status_code=500
455
- )
456
-
457
- def process_input():
458
- global shared_state
459
- logging.info("process_input thread started.")
460
- shared_state.is_processing = True
461
- shared_state.should_stop = False
462
- shared_state.is_paused = False
463
- shared_state.stop_event.clear() # Ensure stop event is cleared at the start
464
-
465
- print(f"start sampling loop: {shared_state.chatbot_messages}")
466
- print(f"shared_state.args before sampling loop: {shared_state.args}")
467
-
468
-
469
- try:
470
- # Get the generator for the sampling loop
471
- sampling_loop = simple_teachmode_sampling_loop(
472
- model=shared_state.model,
473
- task=shared_state.task,
474
- selected_screen=shared_state.selected_screen,
475
- user_id=shared_state.user_id,
476
- trace_id=shared_state.trace_id,
477
- api_keys=shared_state.api_keys,
478
- server_url=shared_state.server_url,
479
- )
480
-
481
- # Process messages from the sampling loop
482
- for loop_msg in sampling_loop:
483
- # Check stop condition more frequently
484
- if shared_state.should_stop or shared_state.stop_event.is_set():
485
- print("Processing stopped by user")
486
- break
487
-
488
- # Check if paused and wait while paused
489
- while shared_state.is_paused and not shared_state.should_stop and not shared_state.stop_event.is_set():
490
- print(f"Processing paused at: {time.strftime('%H:%M:%S')}")
491
- # Wait a short time and check stop condition regularly
492
- for _ in range(5): # Check 5 times per second
493
- if shared_state.should_stop or shared_state.stop_event.is_set():
494
- break
495
- time.sleep(0.2)
496
-
497
- # Check again after pause loop
498
- if shared_state.should_stop or shared_state.stop_event.is_set():
499
- print("Processing stopped while paused or resuming")
500
- break
501
-
502
- shared_state.chatbot_messages.append(loop_msg)
503
- shared_state.message_queue.append(loop_msg)
504
-
505
- # Short sleep to allow stop signals to be processed
506
- for _ in range(5): # Check 5 times per second
507
- if shared_state.should_stop or shared_state.stop_event.is_set():
508
- print("Processing stopped during sleep")
509
- break
510
- time.sleep(0.1)
511
-
512
- if shared_state.should_stop or shared_state.stop_event.is_set():
513
- break
514
-
515
- except Exception as e:
516
- # Handle any exceptions in the processing loop
517
- error_msg = f"Error during task processing: {e}"
518
- print(error_msg)
519
- error_message = {"role": "assistant", "content": error_msg, "type": "error", "action_type": ""}
520
- shared_state.message_queue.append(error_message)
521
-
522
- finally:
523
- # Handle completion or interruption
524
- if shared_state.should_stop or shared_state.stop_event.is_set():
525
- stop_msg = f"Task '{shared_state.task}' was stopped. Ready for new tasks."
526
- final_message = {"role": "assistant", "content": stop_msg, "type": "text", "action_type": ""}
527
- else:
528
- complete_msg = f"Task '{shared_state.task}' completed. Thanks for using Teachmode-OOTB."
529
- final_message = {"role": "assistant", "content": complete_msg, "type": "text", "action_type": ""}
530
-
531
- shared_state.chatbot_messages.append(final_message)
532
- shared_state.message_queue.append(final_message)
533
-
534
- # Reset all state flags to allow for new tasks
535
- shared_state.is_processing = False
536
- shared_state.should_stop = False
537
- shared_state.is_paused = False
538
- shared_state.stop_event.clear()
539
- print("Processing completed, ready for new tasks")
540
- logging.info("process_input thread finished.")
541
-
542
- def main():
543
- # Logging is set up at the top level now
544
- logging.info("App main() function starting setup.")
545
- global app, shared_state, rate_limiter # Ensure app is global if needed by uvicorn
546
- parser = argparse.ArgumentParser()
547
- # Add arguments, but NOT host and port
548
- parser.add_argument("--model", type=str, default="teach-mode-gpt-4o", help="Model name")
549
- parser.add_argument("--task", type=str, default="Following the instructions to complete the task.", help="Initial task description")
550
- parser.add_argument("--selected_screen", type=int, default=0, help="Selected screen index")
551
- parser.add_argument("--user_id", type=str, default="hero_cases", help="User ID for the session")
552
- parser.add_argument("--trace_id", type=str, default="build_scroll_combat", help="Trace ID for the session")
553
- parser.add_argument("--api_keys", type=str, default="sk-proj-1234567890", help="API keys")
554
- parser.add_argument("--server_url", type=str, default="http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com", help="Server URL for the session")
555
-
556
- args = parser.parse_args()
557
-
558
- # Validate args or set defaults if needed (keep these)
559
- if not hasattr(args, 'model'): args.model = "default_model"
560
- if not hasattr(args, 'task'): args.task = "default_task"
561
- if not hasattr(args, 'selected_screen'): args.selected_screen = 0
562
- if not hasattr(args, 'user_id'): args.user_id = "unknown_user"
563
- if not hasattr(args, 'trace_id'): args.trace_id = "unknown_trace"
564
- if not hasattr(args, 'api_keys'): args.api_keys = "none"
565
- if not hasattr(args, 'server_url'): args.server_url = "none"
566
-
567
- shared_state = SharedState(args)
568
- rate_limiter = RateLimiter(interval_seconds=2) # Re-initialize rate limiter
569
- logging.info(f"Shared state initialized for user: {args.user_id}")
570
-
571
- # --- Restore original port calculation logic ---
572
- port = 7888 # Default port
573
- host = "0.0.0.0" # Listen on all interfaces
574
-
575
- if platform.system() == "Windows":
576
- try:
577
- username = os.environ["USERNAME"].lower()
578
- logging.info(f"Determining port based on Windows username: {username}")
579
- if username == "altair":
580
- port = 14000
581
- elif username.startswith("guest") and username[5:].isdigit():
582
- num = int(username[5:])
583
- if 1 <= num <= 10: # Assuming max 10 guests for this range
584
- port = 14000 + num
585
- else:
586
- logging.warning(f"Guest user number {num} out of range (1-10), using default port {port}.")
587
- else:
588
- logging.info(f"Username '{username}' doesn't match specific rules, using default port {port}.")
589
- except Exception as e:
590
- logging.error(f"Error determining port from username: {e}. Using default port {port}.", exc_info=True)
591
- else:
592
- logging.info(f"Not running on Windows, using default port {port}.")
593
- # --- End of restored port calculation ---
594
-
595
- logging.info(f"Final Host={host}, Port={port}")
596
-
597
- try:
598
- logging.info(f"Starting Uvicorn server on {host}:{port}")
599
- # Use the calculated port and specific host
600
- uvicorn.run(app, host=host, port=port)
601
- logging.info("Uvicorn server stopped.")
602
- except Exception as main_e:
603
- logging.error("Error in main execution:", exc_info=True)
604
- finally:
605
- logging.info("App main() function finished.")
606
-
607
- if __name__ == "__main__":
608
- main()
609
-
610
- # Test log_ootb_request
1
+ import argparse
2
+ import time
3
+ import json
4
+ from datetime import datetime
5
+ import threading
6
+ import requests
7
+ import platform # Add platform import
8
+ import pyautogui # Add pyautogui import
9
+ import webbrowser # Add webbrowser import
10
+ import os # Import os for path joining
11
+ import logging # Import logging
12
+ import importlib # For dynamic imports
13
+ import pkgutil # To find modules
14
+ import sys # For logging setup
15
+ import traceback # For logging setup
16
+ from logging.handlers import RotatingFileHandler # For logging setup
17
+ from fastapi import FastAPI, Request
18
+ from fastapi.responses import JSONResponse
19
+ from fastapi.middleware.cors import CORSMiddleware
20
+ from computer_use_ootb_internal.computer_use_demo.tools.computer import get_screen_details
21
+ from computer_use_ootb_internal.run_teachmode_ootb_args import simple_teachmode_sampling_loop
22
+ from computer_use_ootb_internal.computer_use_demo.executor.teachmode_executor import TeachmodeExecutor
23
+ import uvicorn # Assuming uvicorn is used to run FastAPI
24
+ import concurrent.futures
25
+ import asyncio
26
+
27
+ # --- App Logging Setup ---
28
+ try:
29
+ # Log to user's AppData directory for better accessibility
30
+ log_dir_base = os.environ.get('APPDATA', os.path.expanduser('~'))
31
+ log_dir = os.path.join(log_dir_base, 'OOTBAppLogs')
32
+ os.makedirs(log_dir, exist_ok=True)
33
+ log_file = os.path.join(log_dir, 'ootb_app.log')
34
+
35
+ log_format = '%(asctime)s - %(levelname)s - %(process)d - %(threadName)s - %(message)s'
36
+ log_level = logging.INFO # Or logging.DEBUG for more detail
37
+
38
+ # Use rotating file handler
39
+ handler = RotatingFileHandler(log_file, maxBytes=5*1024*1024, backupCount=2, encoding='utf-8')
40
+ handler.setFormatter(logging.Formatter(log_format))
41
+
42
+ # Configure root logger
43
+ logging.basicConfig(level=log_level, handlers=[handler])
44
+
45
+ # Add stream handler to see logs if running interactively (optional)
46
+ # logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
47
+
48
+ logging.info("="*20 + " OOTB App Starting " + "="*20)
49
+ logging.info(f"Running with args: {sys.argv}")
50
+ logging.info(f"Python Executable: {sys.executable}")
51
+ logging.info(f"Working Directory: {os.getcwd()}")
52
+ logging.info(f"User: {os.getenv('USERNAME')}")
53
+
54
+ except Exception as log_setup_e:
55
+ print(f"FATAL: Failed to set up logging: {log_setup_e}")
56
+ # Fallback logging might be needed here if file logging fails
57
+
58
+ # --- End App Logging Setup ---
59
+
60
+ app = FastAPI()
61
+
62
+ # Add CORS middleware to allow requests from the frontend
63
+ app.add_middleware(
64
+ CORSMiddleware,
65
+ allow_origins=["*"],
66
+ allow_credentials=True,
67
+ allow_methods=["*"],
68
+ allow_headers=["*"],
69
+ )
70
+
71
+ # Rate limiter for API endpoints
72
+ class RateLimiter:
73
+ def __init__(self, interval_seconds=2):
74
+ self.interval = interval_seconds
75
+ self.last_request_time = {}
76
+ self.lock = threading.Lock()
77
+
78
+ def allow_request(self, endpoint):
79
+ with self.lock:
80
+ current_time = time.time()
81
+ # Priority endpoints always allowed
82
+ if endpoint in ["/update_params", "/update_message"]:
83
+ return True
84
+
85
+ # For other endpoints, apply rate limiting
86
+ if endpoint not in self.last_request_time:
87
+ self.last_request_time[endpoint] = current_time
88
+ return True
89
+
90
+ elapsed = current_time - self.last_request_time[endpoint]
91
+ if elapsed < self.interval:
92
+ return False
93
+
94
+ self.last_request_time[endpoint] = current_time
95
+ return True
96
+
97
+
98
+ def log_ootb_request(server_url, ootb_request_type, data):
99
+ logging.info(f"OOTB Request: Type={ootb_request_type}, Data={data}")
100
+ # Keep the requests post for now if it serves a specific purpose
101
+ logging_data = {
102
+ "type": ootb_request_type,
103
+ "data": data,
104
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
105
+ }
106
+ if not server_url.endswith("/update_ootb_logging"):
107
+ server_logging_url = server_url + "/update_ootb_logging"
108
+ else:
109
+ server_logging_url = server_url
110
+ try:
111
+ requests.post(server_logging_url, json=logging_data, timeout=5)
112
+ except Exception as req_log_e:
113
+ logging.warning(f"Could not log ootb request to server {server_logging_url}: {req_log_e}")
114
+
115
+
116
+ class SharedState:
117
+ def __init__(self, args):
118
+ self.args = args
119
+ self.task_updated = False
120
+ self.chatbot_messages = []
121
+ # Store all state-related data here
122
+ self.model = args.model
123
+ self.task = getattr(args, 'task', "")
124
+ self.selected_screen = args.selected_screen
125
+ self.user_id = args.user_id
126
+ self.trace_id = args.trace_id
127
+ self.api_keys = args.api_keys
128
+ self.server_url = args.server_url
129
+ self.message_queue = []
130
+ self.is_processing = False
131
+ self.should_stop = False
132
+ self.is_paused = False
133
+ # Add a new event to better control stopping
134
+ self.stop_event = threading.Event()
135
+ # Add a reference to the processing thread
136
+ self.processing_thread = None
137
+
138
+ shared_state = None
139
+ rate_limiter = RateLimiter(interval_seconds=2)
140
+
141
+ # Set up logging for this module
142
+ log = logging.getLogger(__name__)
143
+
144
+ def prepare_environment(state):
145
+ """Dynamically loads and runs preparation logic based on software name."""
146
+ # Determine software name from state (user_id, trace_id, or task)
147
+ software_name = ""
148
+
149
+ # Check user_id first
150
+ user_id = getattr(state, 'user_id', '').lower()
151
+ task = getattr(state, 'task', '').lower()
152
+ trace_id = getattr(state, 'trace_id', '').lower()
153
+
154
+ log.info(f"Checking for software in: user_id='{user_id}', trace_id='{trace_id}', task='{task}'")
155
+
156
+ # Look for known software indicators
157
+ if "star rail" in user_id or "star rail" in trace_id:
158
+ software_name = "star rail"
159
+ elif "powerpoint" in user_id or "powerpoint" in trace_id or "powerpoint" in task:
160
+ software_name = "powerpoint"
161
+ # Add more software checks here as needed
162
+
163
+ # If no specific software found, check task for keywords
164
+ if not software_name:
165
+ log.info("No specific software detected from IDs, checking task content")
166
+
167
+ if not software_name:
168
+ log.info("No specific software preparation identified. Skipping preparation.")
169
+ return
170
+
171
+ log.info(f"Identified software for preparation: '{software_name}'")
172
+
173
+ # Normalize the software name to be a valid Python module name
174
+ # Replace spaces/hyphens with underscores, convert to lowercase
175
+ module_name_base = software_name.replace(" ", "_").replace("-", "_").lower()
176
+ module_to_run = f"{module_name_base}_prepare"
177
+
178
+ log.info(f"Attempting preparation for software: '{software_name}' (Module: '{module_to_run}')")
179
+
180
+ try:
181
+ # Construct the full module path within the package
182
+ prep_package = "computer_use_ootb_internal.preparation"
183
+ full_module_path = f"{prep_package}.{module_to_run}"
184
+
185
+ # Dynamically import the module
186
+ # Check if module exists first using pkgutil to avoid import errors
187
+ log.debug(f"Looking for preparation module: {full_module_path}")
188
+ loader = pkgutil.find_loader(full_module_path)
189
+ if loader is None:
190
+ log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
191
+ return
192
+
193
+ log.debug(f"Importing preparation module: {full_module_path}")
194
+ prep_module = importlib.import_module(full_module_path)
195
+
196
+ # Check if the module has the expected function
197
+ if hasattr(prep_module, "run_preparation") and callable(prep_module.run_preparation):
198
+ log.info(f"Running preparation function from {full_module_path}...")
199
+ prep_module.run_preparation(state)
200
+ log.info(f"Preparation function from {full_module_path} completed.")
201
+ else:
202
+ log.warning(f"Module {full_module_path} found, but does not have a callable 'run_preparation' function. Skipping.")
203
+
204
+ except ModuleNotFoundError:
205
+ log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
206
+ except Exception as e:
207
+ log.error(f"Error during dynamic preparation loading/execution for '{module_to_run}': {e}", exc_info=True)
208
+
209
+
210
+ @app.post("/update_params")
211
+ async def update_parameters(request: Request):
212
+ logging.info("Received request to /update_params")
213
+ try:
214
+ data = await request.json()
215
+
216
+ if 'task' not in data:
217
+ return JSONResponse(
218
+ content={"status": "error", "message": "Missing required field: task"},
219
+ status_code=400
220
+ )
221
+
222
+ # Clear message histories before updating parameters
223
+ shared_state.message_queue.clear()
224
+ shared_state.chatbot_messages.clear()
225
+ logging.info("Cleared message queue and chatbot messages.")
226
+
227
+ shared_state.args = argparse.Namespace(**data)
228
+ shared_state.task_updated = True
229
+
230
+ # Update shared state when parameters change
231
+ shared_state.model = getattr(shared_state.args, 'model', "teach-mode-gpt-4o")
232
+ shared_state.task = getattr(shared_state.args, 'task', "Following the instructions to complete the task.")
233
+ shared_state.selected_screen = getattr(shared_state.args, 'selected_screen', 0)
234
+ shared_state.user_id = getattr(shared_state.args, 'user_id', "hero_cases")
235
+ shared_state.trace_id = getattr(shared_state.args, 'trace_id', "build_scroll_combat")
236
+ shared_state.api_keys = getattr(shared_state.args, 'api_keys', "sk-proj-1234567890")
237
+ shared_state.server_url = getattr(shared_state.args, 'server_url', "http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com")
238
+
239
+ log_ootb_request(shared_state.server_url, "update_params", data)
240
+
241
+ # Call the (now dynamic) preparation function here, after parameters are updated
242
+ prepare_environment(shared_state)
243
+
244
+ logging.info("Parameters updated successfully.")
245
+ return JSONResponse(
246
+ content={"status": "success", "message": "Parameters updated", "new_args": vars(shared_state.args)},
247
+ status_code=200
248
+ )
249
+ except Exception as e:
250
+ logging.error("Error processing /update_params:", exc_info=True)
251
+ return JSONResponse(content={"status": "error", "message": "Internal server error"}, status_code=500)
252
+
253
+ @app.post("/update_message")
254
+ async def update_message(request: Request):
255
+ data = await request.json()
256
+
257
+ if 'message' not in data:
258
+ return JSONResponse(
259
+ content={"status": "error", "message": "Missing required field: message"},
260
+ status_code=400
261
+ )
262
+
263
+ log_ootb_request(shared_state.server_url, "update_message", data)
264
+
265
+ message = data['message']
266
+ # shared_state.chatbot_messages.append({"role": "user", "content": message, "type": "text"})
267
+ shared_state.task = message
268
+ shared_state.args.task = message
269
+
270
+ # Reset stop event before starting
271
+ shared_state.stop_event.clear()
272
+
273
+ # Start processing if not already running
274
+ if not shared_state.is_processing:
275
+ # Create and store the thread
276
+ shared_state.processing_thread = threading.Thread(target=process_input, daemon=True)
277
+ shared_state.processing_thread.start()
278
+
279
+ return JSONResponse(
280
+ content={"status": "success", "message": "Message received", "task": shared_state.task},
281
+ status_code=200
282
+ )
283
+
284
+ @app.get("/get_messages")
285
+ async def get_messages(request: Request):
286
+ # Apply rate limiting
287
+ if not rate_limiter.allow_request(request.url.path):
288
+ return JSONResponse(
289
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
290
+ status_code=429
291
+ )
292
+
293
+ # log_ootb_request(shared_state.server_url, "get_messages", {})
294
+
295
+ # Return all messages in the queue and clear it
296
+ messages = shared_state.message_queue.copy()
297
+ shared_state.message_queue = []
298
+
299
+ return JSONResponse(
300
+ content={"status": "success", "messages": messages},
301
+ status_code=200
302
+ )
303
+
304
+ @app.get("/get_screens")
305
+ async def get_screens(request: Request):
306
+ # Apply rate limiting
307
+ if not rate_limiter.allow_request(request.url.path):
308
+ return JSONResponse(
309
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
310
+ status_code=429
311
+ )
312
+
313
+ log_ootb_request(shared_state.server_url, "get_screens", {})
314
+
315
+ screen_options, primary_index = get_screen_details()
316
+
317
+ return JSONResponse(
318
+ content={"status": "success", "screens": screen_options, "primary_index": primary_index},
319
+ status_code=200
320
+ )
321
+
322
+ @app.post("/stop_processing")
323
+ async def stop_processing(request: Request):
324
+ # Apply rate limiting
325
+ if not rate_limiter.allow_request(request.url.path):
326
+ return JSONResponse(
327
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
328
+ status_code=429
329
+ )
330
+
331
+ log_ootb_request(shared_state.server_url, "stop_processing", {})
332
+
333
+ if shared_state.is_processing:
334
+ # Set both flags to ensure stopping the current task
335
+ shared_state.should_stop = True
336
+ shared_state.stop_event.set()
337
+
338
+ # Clear message histories
339
+ shared_state.message_queue.clear()
340
+ shared_state.chatbot_messages.clear()
341
+ logging.info("Cleared message queue and chatbot messages during stop.")
342
+
343
+ # Send an immediate message to the queue to inform the user
344
+ stop_initiated_msg = {"role": "assistant", "content": f"Stopping task '{shared_state.task}'...", "type": "text", "action_type": ""}
345
+ # Append the stop message AFTER clearing, so it's the only one left
346
+ shared_state.message_queue.append(stop_initiated_msg)
347
+ shared_state.chatbot_messages.append(stop_initiated_msg)
348
+
349
+ return JSONResponse(
350
+ content={"status": "success", "message": "Task is being stopped, server will remain available for new tasks"},
351
+ status_code=200
352
+ )
353
+ else:
354
+ # Clear message histories even if not processing, to ensure clean state
355
+ shared_state.message_queue.clear()
356
+ shared_state.chatbot_messages.clear()
357
+ logging.info("Cleared message queue and chatbot messages (no active process to stop).")
358
+ return JSONResponse(
359
+ content={"status": "error", "message": "No active processing to stop"},
360
+ status_code=400
361
+ )
362
+
363
+ @app.post("/toggle_pause")
364
+ async def toggle_pause(request: Request):
365
+ # Apply rate limiting
366
+ if not rate_limiter.allow_request(request.url.path):
367
+ return JSONResponse(
368
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
369
+ status_code=429
370
+ )
371
+
372
+ log_ootb_request(shared_state.server_url, "toggle_pause", {})
373
+
374
+ if not shared_state.is_processing:
375
+ return JSONResponse(
376
+ content={"status": "error", "message": "No active processing to pause/resume"},
377
+ status_code=400
378
+ )
379
+
380
+ # Toggle the pause state
381
+ shared_state.is_paused = not shared_state.is_paused
382
+ current_state = shared_state.is_paused
383
+
384
+ print(f"Toggled pause state to: {current_state}")
385
+
386
+ status_message = "paused" if current_state else "resumed"
387
+
388
+ # Add a message to the queue to inform the user
389
+ if current_state:
390
+ message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been paused. Click Continue to resume.", "type": "text", "action_type": ""}
391
+ else:
392
+ message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been resumed.", "type": "text", "action_type": ""}
393
+
394
+ shared_state.chatbot_messages.append(message)
395
+ shared_state.message_queue.append(message)
396
+
397
+ return JSONResponse(
398
+ content={
399
+ "status": "success",
400
+ "message": f"Processing {status_message}",
401
+ "is_paused": current_state
402
+ },
403
+ status_code=200
404
+ )
405
+
406
+ @app.get("/status")
407
+ async def get_status(request: Request):
408
+ # Apply rate limiting
409
+ if not rate_limiter.allow_request(request.url.path):
410
+ return JSONResponse(
411
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
412
+ status_code=429
413
+ )
414
+
415
+ # log_ootb_request(shared_state.server_url, "get_status", {})
416
+
417
+ print(f"Status check - Processing: {shared_state.is_processing}, Paused: {shared_state.is_paused}")
418
+ return JSONResponse(
419
+ content={
420
+ "status": "success",
421
+ "is_processing": shared_state.is_processing,
422
+ "is_paused": shared_state.is_paused
423
+ },
424
+ status_code=200
425
+ )
426
+
427
+ @app.post("/exec_computer_tool")
428
+ async def exec_computer_tool(request: Request):
429
+ logging.info("Received request to /exec_computer_tool")
430
+ try:
431
+ data = await request.json()
432
+
433
+ # Extract parameters from the request
434
+ selected_screen = data.get('selected_screen', 0)
435
+ full_screen_game_mode = data.get('full_screen_game_mode', 0)
436
+ response = data.get('response', {})
437
+
438
+ logging.info(f"Executing TeachmodeExecutor with: screen={selected_screen}, mode={full_screen_game_mode}, response={response}")
439
+
440
+ # Create TeachmodeExecutor in a separate process to avoid event loop conflicts
441
+ # Since TeachmodeExecutor uses asyncio.run() internally, we need to run it in a way
442
+ # that doesn't conflict with FastAPI's event loop
443
+
444
+ def run_executor():
445
+ executor = TeachmodeExecutor(
446
+ selected_screen=selected_screen,
447
+ full_screen_game_mode=full_screen_game_mode
448
+ )
449
+
450
+ results = []
451
+ try:
452
+ for action_result in executor(response):
453
+ results.append(action_result)
454
+ except Exception as exec_error:
455
+ logging.error(f"Error executing action: {exec_error}", exc_info=True)
456
+ return {"error": str(exec_error)}
457
+
458
+ return results
459
+
460
+ # Execute in a thread pool to avoid blocking the event loop
461
+ with concurrent.futures.ThreadPoolExecutor() as pool:
462
+ results = await asyncio.get_event_loop().run_in_executor(pool, run_executor)
463
+
464
+ if isinstance(results, dict) and "error" in results:
465
+ return JSONResponse(
466
+ content={"status": "error", "message": results["error"]},
467
+ status_code=500
468
+ )
469
+
470
+ logging.info(f"Action results: {results}")
471
+
472
+ return JSONResponse(
473
+ content={"status": "success", "results": results},
474
+ status_code=200
475
+ )
476
+ except Exception as e:
477
+ logging.error("Error processing /exec_computer_tool:", exc_info=True)
478
+ return JSONResponse(
479
+ content={"status": "error", "message": f"Internal server error: {str(e)}"},
480
+ status_code=500
481
+ )
482
+
483
+ def process_input():
484
+ global shared_state
485
+ logging.info("process_input thread started.")
486
+ shared_state.is_processing = True
487
+ shared_state.should_stop = False
488
+ shared_state.is_paused = False
489
+ shared_state.stop_event.clear() # Ensure stop event is cleared at the start
490
+
491
+ print(f"start sampling loop: {shared_state.chatbot_messages}")
492
+ print(f"shared_state.args before sampling loop: {shared_state.args}")
493
+
494
+
495
+ try:
496
+ # Get the generator for the sampling loop
497
+ sampling_loop = simple_teachmode_sampling_loop(
498
+ model=shared_state.model,
499
+ task=shared_state.task,
500
+ selected_screen=shared_state.selected_screen,
501
+ user_id=shared_state.user_id,
502
+ trace_id=shared_state.trace_id,
503
+ api_keys=shared_state.api_keys,
504
+ server_url=shared_state.server_url,
505
+ )
506
+
507
+ # Process messages from the sampling loop
508
+ for loop_msg in sampling_loop:
509
+ # Check stop condition more frequently
510
+ if shared_state.should_stop or shared_state.stop_event.is_set():
511
+ print("Processing stopped by user")
512
+ break
513
+
514
+ # Check if paused and wait while paused
515
+ while shared_state.is_paused and not shared_state.should_stop and not shared_state.stop_event.is_set():
516
+ print(f"Processing paused at: {time.strftime('%H:%M:%S')}")
517
+ # Wait a short time and check stop condition regularly
518
+ for _ in range(5): # Check 5 times per second
519
+ if shared_state.should_stop or shared_state.stop_event.is_set():
520
+ break
521
+ time.sleep(0.2)
522
+
523
+ # Check again after pause loop
524
+ if shared_state.should_stop or shared_state.stop_event.is_set():
525
+ print("Processing stopped while paused or resuming")
526
+ break
527
+
528
+ shared_state.chatbot_messages.append(loop_msg)
529
+ shared_state.message_queue.append(loop_msg)
530
+
531
+ # Short sleep to allow stop signals to be processed
532
+ for _ in range(5): # Check 5 times per second
533
+ if shared_state.should_stop or shared_state.stop_event.is_set():
534
+ print("Processing stopped during sleep")
535
+ break
536
+ time.sleep(0.1)
537
+
538
+ if shared_state.should_stop or shared_state.stop_event.is_set():
539
+ break
540
+
541
+ except Exception as e:
542
+ # Handle any exceptions in the processing loop
543
+ error_msg = f"Error during task processing: {e}"
544
+ print(error_msg)
545
+ error_message = {"role": "assistant", "content": error_msg, "type": "error", "action_type": ""}
546
+ shared_state.message_queue.append(error_message)
547
+
548
+ finally:
549
+ # Handle completion or interruption
550
+ if shared_state.should_stop or shared_state.stop_event.is_set():
551
+ stop_msg = f"Task '{shared_state.task}' was stopped. Ready for new tasks."
552
+ final_message = {"role": "assistant", "content": stop_msg, "type": "text", "action_type": ""}
553
+ else:
554
+ complete_msg = f"Task '{shared_state.task}' completed. Thanks for using Teachmode-OOTB."
555
+ final_message = {"role": "assistant", "content": complete_msg, "type": "text", "action_type": ""}
556
+
557
+ shared_state.chatbot_messages.append(final_message)
558
+ shared_state.message_queue.append(final_message)
559
+
560
+ # Reset all state flags to allow for new tasks
561
+ shared_state.is_processing = False
562
+ shared_state.should_stop = False
563
+ shared_state.is_paused = False
564
+ shared_state.stop_event.clear()
565
+ print("Processing completed, ready for new tasks")
566
+ logging.info("process_input thread finished.")
567
+
568
+ def main():
569
+ # Logging is set up at the top level now
570
+ logging.info("App main() function starting setup.")
571
+ global app, shared_state, rate_limiter # Ensure app is global if needed by uvicorn
572
+ parser = argparse.ArgumentParser()
573
+ # Add arguments, but NOT host and port
574
+ parser.add_argument("--model", type=str, default="teach-mode-gpt-4o", help="Model name")
575
+ parser.add_argument("--task", type=str, default="Following the instructions to complete the task.", help="Initial task description")
576
+ parser.add_argument("--selected_screen", type=int, default=0, help="Selected screen index")
577
+ parser.add_argument("--user_id", type=str, default="hero_cases", help="User ID for the session")
578
+ parser.add_argument("--trace_id", type=str, default="build_scroll_combat", help="Trace ID for the session")
579
+ parser.add_argument("--api_keys", type=str, default="sk-proj-1234567890", help="API keys")
580
+ parser.add_argument("--server_url", type=str, default="http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com", help="Server URL for the session")
581
+
582
+ args = parser.parse_args()
583
+
584
+ # Validate args or set defaults if needed (keep these)
585
+ if not hasattr(args, 'model'): args.model = "default_model"
586
+ if not hasattr(args, 'task'): args.task = "default_task"
587
+ if not hasattr(args, 'selected_screen'): args.selected_screen = 0
588
+ if not hasattr(args, 'user_id'): args.user_id = "unknown_user"
589
+ if not hasattr(args, 'trace_id'): args.trace_id = "unknown_trace"
590
+ if not hasattr(args, 'api_keys'): args.api_keys = "none"
591
+ if not hasattr(args, 'server_url'): args.server_url = "none"
592
+
593
+ shared_state = SharedState(args)
594
+ rate_limiter = RateLimiter(interval_seconds=2) # Re-initialize rate limiter
595
+ logging.info(f"Shared state initialized for user: {args.user_id}")
596
+
597
+ # --- Restore original port calculation logic ---
598
+ port = 7888 # Default port
599
+ host = "0.0.0.0" # Listen on all interfaces
600
+
601
+ if platform.system() == "Windows":
602
+ try:
603
+ username = os.environ["USERNAME"].lower()
604
+ logging.info(f"Determining port based on Windows username: {username}")
605
+ if username == "altair":
606
+ port = 14000
607
+ elif username.startswith("guest") and username[5:].isdigit():
608
+ num = int(username[5:])
609
+ if 1 <= num <= 10: # Assuming max 10 guests for this range
610
+ port = 14000 + num
611
+ else:
612
+ logging.warning(f"Guest user number {num} out of range (1-10), using default port {port}.")
613
+ else:
614
+ logging.info(f"Username '{username}' doesn't match specific rules, using default port {port}.")
615
+ except Exception as e:
616
+ logging.error(f"Error determining port from username: {e}. Using default port {port}.", exc_info=True)
617
+ else:
618
+ logging.info(f"Not running on Windows, using default port {port}.")
619
+ # --- End of restored port calculation ---
620
+
621
+ logging.info(f"Final Host={host}, Port={port}")
622
+
623
+ try:
624
+ logging.info(f"Starting Uvicorn server on {host}:{port}")
625
+ # Use the calculated port and specific host
626
+ uvicorn.run(app, host=host, port=port)
627
+ logging.info("Uvicorn server stopped.")
628
+ except Exception as main_e:
629
+ logging.error("Error in main execution:", exc_info=True)
630
+ finally:
631
+ logging.info("App main() function finished.")
632
+
633
+ if __name__ == "__main__":
634
+ main()
635
+
636
+ # Test log_ootb_request
611
637
  log_ootb_request("http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com", "test_request", {"message": "Test message"})