computer-use-ootb-internal 0.0.173__py3-none-any.whl → 0.0.175__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,637 +1,642 @@
1
- import argparse
2
- import time
3
- import json
4
- from datetime import datetime
5
- import threading
6
- import requests
7
- import platform # Add platform import
8
- import pyautogui # Add pyautogui import
9
- import webbrowser # Add webbrowser import
10
- import os # Import os for path joining
11
- import logging # Import logging
12
- import importlib # For dynamic imports
13
- import pkgutil # To find modules
14
- import sys # For logging setup
15
- import traceback # For logging setup
16
- from logging.handlers import RotatingFileHandler # For logging setup
17
- from fastapi import FastAPI, Request
18
- from fastapi.responses import JSONResponse
19
- from fastapi.middleware.cors import CORSMiddleware
20
- from computer_use_ootb_internal.computer_use_demo.tools.computer import get_screen_details
21
- from computer_use_ootb_internal.run_teachmode_ootb_args import simple_teachmode_sampling_loop
22
- from computer_use_ootb_internal.computer_use_demo.executor.teachmode_executor import TeachmodeExecutor
23
- import uvicorn # Assuming uvicorn is used to run FastAPI
24
- import concurrent.futures
25
- import asyncio
26
-
27
- # --- App Logging Setup ---
28
- try:
29
- # Log to user's AppData directory for better accessibility
30
- log_dir_base = os.environ.get('APPDATA', os.path.expanduser('~'))
31
- log_dir = os.path.join(log_dir_base, 'OOTBAppLogs')
32
- os.makedirs(log_dir, exist_ok=True)
33
- log_file = os.path.join(log_dir, 'ootb_app.log')
34
-
35
- log_format = '%(asctime)s - %(levelname)s - %(process)d - %(threadName)s - %(message)s'
36
- log_level = logging.INFO # Or logging.DEBUG for more detail
37
-
38
- # Use rotating file handler
39
- handler = RotatingFileHandler(log_file, maxBytes=5*1024*1024, backupCount=2, encoding='utf-8')
40
- handler.setFormatter(logging.Formatter(log_format))
41
-
42
- # Configure root logger
43
- logging.basicConfig(level=log_level, handlers=[handler])
44
-
45
- # Add stream handler to see logs if running interactively (optional)
46
- # logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
47
-
48
- logging.info("="*20 + " OOTB App Starting " + "="*20)
49
- logging.info(f"Running with args: {sys.argv}")
50
- logging.info(f"Python Executable: {sys.executable}")
51
- logging.info(f"Working Directory: {os.getcwd()}")
52
- logging.info(f"User: {os.getenv('USERNAME')}")
53
-
54
- except Exception as log_setup_e:
55
- print(f"FATAL: Failed to set up logging: {log_setup_e}")
56
- # Fallback logging might be needed here if file logging fails
57
-
58
- # --- End App Logging Setup ---
59
-
60
- app = FastAPI()
61
-
62
- # Add CORS middleware to allow requests from the frontend
63
- app.add_middleware(
64
- CORSMiddleware,
65
- allow_origins=["*"],
66
- allow_credentials=True,
67
- allow_methods=["*"],
68
- allow_headers=["*"],
69
- )
70
-
71
- # Rate limiter for API endpoints
72
- class RateLimiter:
73
- def __init__(self, interval_seconds=2):
74
- self.interval = interval_seconds
75
- self.last_request_time = {}
76
- self.lock = threading.Lock()
77
-
78
- def allow_request(self, endpoint):
79
- with self.lock:
80
- current_time = time.time()
81
- # Priority endpoints always allowed
82
- if endpoint in ["/update_params", "/update_message"]:
83
- return True
84
-
85
- # For other endpoints, apply rate limiting
86
- if endpoint not in self.last_request_time:
87
- self.last_request_time[endpoint] = current_time
88
- return True
89
-
90
- elapsed = current_time - self.last_request_time[endpoint]
91
- if elapsed < self.interval:
92
- return False
93
-
94
- self.last_request_time[endpoint] = current_time
95
- return True
96
-
97
-
98
- def log_ootb_request(server_url, ootb_request_type, data):
99
- logging.info(f"OOTB Request: Type={ootb_request_type}, Data={data}")
100
- # Keep the requests post for now if it serves a specific purpose
101
- logging_data = {
102
- "type": ootb_request_type,
103
- "data": data,
104
- "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
105
- }
106
- if not server_url.endswith("/update_ootb_logging"):
107
- server_logging_url = server_url + "/update_ootb_logging"
108
- else:
109
- server_logging_url = server_url
110
- try:
111
- requests.post(server_logging_url, json=logging_data, timeout=5)
112
- except Exception as req_log_e:
113
- logging.warning(f"Could not log ootb request to server {server_logging_url}: {req_log_e}")
114
-
115
-
116
- class SharedState:
117
- def __init__(self, args):
118
- self.args = args
119
- self.task_updated = False
120
- self.chatbot_messages = []
121
- # Store all state-related data here
122
- self.model = args.model
123
- self.task = getattr(args, 'task', "")
124
- self.selected_screen = args.selected_screen
125
- self.user_id = args.user_id
126
- self.trace_id = args.trace_id
127
- self.api_keys = args.api_keys
128
- self.server_url = args.server_url
129
- self.message_queue = []
130
- self.is_processing = False
131
- self.should_stop = False
132
- self.is_paused = False
133
- # Add a new event to better control stopping
134
- self.stop_event = threading.Event()
135
- # Add a reference to the processing thread
136
- self.processing_thread = None
137
-
138
- shared_state = None
139
- rate_limiter = RateLimiter(interval_seconds=2)
140
-
141
- # Set up logging for this module
142
- log = logging.getLogger(__name__)
143
-
144
- def prepare_environment(state):
145
- """Dynamically loads and runs preparation logic based on software name."""
146
- # Determine software name from state (user_id, trace_id, or task)
147
- software_name = ""
148
-
149
- # Check user_id first
150
- user_id = getattr(state, 'user_id', '').lower()
151
- task = getattr(state, 'task', '').lower()
152
- trace_id = getattr(state, 'trace_id', '').lower()
153
-
154
- log.info(f"Checking for software in: user_id='{user_id}', trace_id='{trace_id}', task='{task}'")
155
-
156
- # Look for known software indicators
157
- if "star rail" in user_id or "star rail" in trace_id:
158
- software_name = "star rail"
159
- elif "powerpoint" in user_id or "powerpoint" in trace_id or "powerpoint" in task:
160
- software_name = "powerpoint"
161
- # Add more software checks here as needed
162
-
163
- # If no specific software found, check task for keywords
164
- if not software_name:
165
- log.info("No specific software detected from IDs, checking task content")
166
-
167
- if not software_name:
168
- log.info("No specific software preparation identified. Skipping preparation.")
169
- return
170
-
171
- log.info(f"Identified software for preparation: '{software_name}'")
172
-
173
- # Normalize the software name to be a valid Python module name
174
- # Replace spaces/hyphens with underscores, convert to lowercase
175
- module_name_base = software_name.replace(" ", "_").replace("-", "_").lower()
176
- module_to_run = f"{module_name_base}_prepare"
177
-
178
- log.info(f"Attempting preparation for software: '{software_name}' (Module: '{module_to_run}')")
179
-
180
- try:
181
- # Construct the full module path within the package
182
- prep_package = "computer_use_ootb_internal.preparation"
183
- full_module_path = f"{prep_package}.{module_to_run}"
184
-
185
- # Dynamically import the module
186
- # Check if module exists first using pkgutil to avoid import errors
187
- log.debug(f"Looking for preparation module: {full_module_path}")
188
- loader = pkgutil.find_loader(full_module_path)
189
- if loader is None:
190
- log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
191
- return
192
-
193
- log.debug(f"Importing preparation module: {full_module_path}")
194
- prep_module = importlib.import_module(full_module_path)
195
-
196
- # Check if the module has the expected function
197
- if hasattr(prep_module, "run_preparation") and callable(prep_module.run_preparation):
198
- log.info(f"Running preparation function from {full_module_path}...")
199
- prep_module.run_preparation(state)
200
- log.info(f"Preparation function from {full_module_path} completed.")
201
- else:
202
- log.warning(f"Module {full_module_path} found, but does not have a callable 'run_preparation' function. Skipping.")
203
-
204
- except ModuleNotFoundError:
205
- log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
206
- except Exception as e:
207
- log.error(f"Error during dynamic preparation loading/execution for '{module_to_run}': {e}", exc_info=True)
208
-
209
-
210
- @app.post("/update_params")
211
- async def update_parameters(request: Request):
212
- logging.info("Received request to /update_params")
213
- try:
214
- data = await request.json()
215
-
216
- if 'task' not in data:
217
- return JSONResponse(
218
- content={"status": "error", "message": "Missing required field: task"},
219
- status_code=400
220
- )
221
-
222
- # Clear message histories before updating parameters
223
- shared_state.message_queue.clear()
224
- shared_state.chatbot_messages.clear()
225
- logging.info("Cleared message queue and chatbot messages.")
226
-
227
- shared_state.args = argparse.Namespace(**data)
228
- shared_state.task_updated = True
229
-
230
- # Update shared state when parameters change
231
- shared_state.model = getattr(shared_state.args, 'model', "teach-mode-gpt-4o")
232
- shared_state.task = getattr(shared_state.args, 'task', "Following the instructions to complete the task.")
233
- shared_state.selected_screen = getattr(shared_state.args, 'selected_screen', 0)
234
- shared_state.user_id = getattr(shared_state.args, 'user_id', "hero_cases")
235
- shared_state.trace_id = getattr(shared_state.args, 'trace_id', "build_scroll_combat")
236
- shared_state.api_keys = getattr(shared_state.args, 'api_keys', "sk-proj-1234567890")
237
- shared_state.server_url = getattr(shared_state.args, 'server_url', "http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com")
238
-
239
- log_ootb_request(shared_state.server_url, "update_params", data)
240
-
241
- # Call the (now dynamic) preparation function here, after parameters are updated
242
- prepare_environment(shared_state)
243
-
244
- logging.info("Parameters updated successfully.")
245
- return JSONResponse(
246
- content={"status": "success", "message": "Parameters updated", "new_args": vars(shared_state.args)},
247
- status_code=200
248
- )
249
- except Exception as e:
250
- logging.error("Error processing /update_params:", exc_info=True)
251
- return JSONResponse(content={"status": "error", "message": "Internal server error"}, status_code=500)
252
-
253
- @app.post("/update_message")
254
- async def update_message(request: Request):
255
- data = await request.json()
256
-
257
- if 'message' not in data:
258
- return JSONResponse(
259
- content={"status": "error", "message": "Missing required field: message"},
260
- status_code=400
261
- )
262
-
263
- log_ootb_request(shared_state.server_url, "update_message", data)
264
-
265
- message = data['message']
266
- # shared_state.chatbot_messages.append({"role": "user", "content": message, "type": "text"})
267
- shared_state.task = message
268
- shared_state.args.task = message
269
-
270
- # Reset stop event before starting
271
- shared_state.stop_event.clear()
272
-
273
- # Start processing if not already running
274
- if not shared_state.is_processing:
275
- # Create and store the thread
276
- shared_state.processing_thread = threading.Thread(target=process_input, daemon=True)
277
- shared_state.processing_thread.start()
278
-
279
- return JSONResponse(
280
- content={"status": "success", "message": "Message received", "task": shared_state.task},
281
- status_code=200
282
- )
283
-
284
- @app.get("/get_messages")
285
- async def get_messages(request: Request):
286
- # Apply rate limiting
287
- if not rate_limiter.allow_request(request.url.path):
288
- return JSONResponse(
289
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
290
- status_code=429
291
- )
292
-
293
- # log_ootb_request(shared_state.server_url, "get_messages", {})
294
-
295
- # Return all messages in the queue and clear it
296
- messages = shared_state.message_queue.copy()
297
- shared_state.message_queue = []
298
-
299
- return JSONResponse(
300
- content={"status": "success", "messages": messages},
301
- status_code=200
302
- )
303
-
304
- @app.get("/get_screens")
305
- async def get_screens(request: Request):
306
- # Apply rate limiting
307
- if not rate_limiter.allow_request(request.url.path):
308
- return JSONResponse(
309
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
310
- status_code=429
311
- )
312
-
313
- log_ootb_request(shared_state.server_url, "get_screens", {})
314
-
315
- screen_options, primary_index = get_screen_details()
316
-
317
- return JSONResponse(
318
- content={"status": "success", "screens": screen_options, "primary_index": primary_index},
319
- status_code=200
320
- )
321
-
322
- @app.post("/stop_processing")
323
- async def stop_processing(request: Request):
324
- # Apply rate limiting
325
- if not rate_limiter.allow_request(request.url.path):
326
- return JSONResponse(
327
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
328
- status_code=429
329
- )
330
-
331
- log_ootb_request(shared_state.server_url, "stop_processing", {})
332
-
333
- if shared_state.is_processing:
334
- # Set both flags to ensure stopping the current task
335
- shared_state.should_stop = True
336
- shared_state.stop_event.set()
337
-
338
- # Clear message histories
339
- shared_state.message_queue.clear()
340
- shared_state.chatbot_messages.clear()
341
- logging.info("Cleared message queue and chatbot messages during stop.")
342
-
343
- # Send an immediate message to the queue to inform the user
344
- stop_initiated_msg = {"role": "assistant", "content": f"Stopping task '{shared_state.task}'...", "type": "text", "action_type": ""}
345
- # Append the stop message AFTER clearing, so it's the only one left
346
- shared_state.message_queue.append(stop_initiated_msg)
347
- shared_state.chatbot_messages.append(stop_initiated_msg)
348
-
349
- return JSONResponse(
350
- content={"status": "success", "message": "Task is being stopped, server will remain available for new tasks"},
351
- status_code=200
352
- )
353
- else:
354
- # Clear message histories even if not processing, to ensure clean state
355
- shared_state.message_queue.clear()
356
- shared_state.chatbot_messages.clear()
357
- logging.info("Cleared message queue and chatbot messages (no active process to stop).")
358
- return JSONResponse(
359
- content={"status": "error", "message": "No active processing to stop"},
360
- status_code=400
361
- )
362
-
363
- @app.post("/toggle_pause")
364
- async def toggle_pause(request: Request):
365
- # Apply rate limiting
366
- if not rate_limiter.allow_request(request.url.path):
367
- return JSONResponse(
368
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
369
- status_code=429
370
- )
371
-
372
- log_ootb_request(shared_state.server_url, "toggle_pause", {})
373
-
374
- if not shared_state.is_processing:
375
- return JSONResponse(
376
- content={"status": "error", "message": "No active processing to pause/resume"},
377
- status_code=400
378
- )
379
-
380
- # Toggle the pause state
381
- shared_state.is_paused = not shared_state.is_paused
382
- current_state = shared_state.is_paused
383
-
384
- print(f"Toggled pause state to: {current_state}")
385
-
386
- status_message = "paused" if current_state else "resumed"
387
-
388
- # Add a message to the queue to inform the user
389
- if current_state:
390
- message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been paused. Click Continue to resume.", "type": "text", "action_type": ""}
391
- else:
392
- message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been resumed.", "type": "text", "action_type": ""}
393
-
394
- shared_state.chatbot_messages.append(message)
395
- shared_state.message_queue.append(message)
396
-
397
- return JSONResponse(
398
- content={
399
- "status": "success",
400
- "message": f"Processing {status_message}",
401
- "is_paused": current_state
402
- },
403
- status_code=200
404
- )
405
-
406
- @app.get("/status")
407
- async def get_status(request: Request):
408
- # Apply rate limiting
409
- if not rate_limiter.allow_request(request.url.path):
410
- return JSONResponse(
411
- content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
412
- status_code=429
413
- )
414
-
415
- # log_ootb_request(shared_state.server_url, "get_status", {})
416
-
417
- print(f"Status check - Processing: {shared_state.is_processing}, Paused: {shared_state.is_paused}")
418
- return JSONResponse(
419
- content={
420
- "status": "success",
421
- "is_processing": shared_state.is_processing,
422
- "is_paused": shared_state.is_paused
423
- },
424
- status_code=200
425
- )
426
-
427
- @app.post("/exec_computer_tool")
428
- async def exec_computer_tool(request: Request):
429
- logging.info("Received request to /exec_computer_tool")
430
- try:
431
- data = await request.json()
432
-
433
- # Extract parameters from the request
434
- selected_screen = data.get('selected_screen', 0)
435
- full_screen_game_mode = data.get('full_screen_game_mode', 0)
436
- response = data.get('response', {})
437
-
438
- logging.info(f"Executing TeachmodeExecutor with: screen={selected_screen}, mode={full_screen_game_mode}, response={response}")
439
-
440
- # Create TeachmodeExecutor in a separate process to avoid event loop conflicts
441
- # Since TeachmodeExecutor uses asyncio.run() internally, we need to run it in a way
442
- # that doesn't conflict with FastAPI's event loop
443
-
444
- def run_executor():
445
- executor = TeachmodeExecutor(
446
- selected_screen=selected_screen,
447
- full_screen_game_mode=full_screen_game_mode
448
- )
449
-
450
- results = []
451
- try:
452
- for action_result in executor(response):
453
- results.append(action_result)
454
- except Exception as exec_error:
455
- logging.error(f"Error executing action: {exec_error}", exc_info=True)
456
- return {"error": str(exec_error)}
457
-
458
- return results
459
-
460
- # Execute in a thread pool to avoid blocking the event loop
461
- with concurrent.futures.ThreadPoolExecutor() as pool:
462
- results = await asyncio.get_event_loop().run_in_executor(pool, run_executor)
463
-
464
- if isinstance(results, dict) and "error" in results:
465
- return JSONResponse(
466
- content={"status": "error", "message": results["error"]},
467
- status_code=500
468
- )
469
-
470
- logging.info(f"Action results: {results}")
471
-
472
- return JSONResponse(
473
- content={"status": "success", "results": results},
474
- status_code=200
475
- )
476
- except Exception as e:
477
- logging.error("Error processing /exec_computer_tool:", exc_info=True)
478
- return JSONResponse(
479
- content={"status": "error", "message": f"Internal server error: {str(e)}"},
480
- status_code=500
481
- )
482
-
483
- def process_input():
484
- global shared_state
485
- logging.info("process_input thread started.")
486
- shared_state.is_processing = True
487
- shared_state.should_stop = False
488
- shared_state.is_paused = False
489
- shared_state.stop_event.clear() # Ensure stop event is cleared at the start
490
-
491
- print(f"start sampling loop: {shared_state.chatbot_messages}")
492
- print(f"shared_state.args before sampling loop: {shared_state.args}")
493
-
494
-
495
- try:
496
- # Get the generator for the sampling loop
497
- sampling_loop = simple_teachmode_sampling_loop(
498
- model=shared_state.model,
499
- task=shared_state.task,
500
- selected_screen=shared_state.selected_screen,
501
- user_id=shared_state.user_id,
502
- trace_id=shared_state.trace_id,
503
- api_keys=shared_state.api_keys,
504
- server_url=shared_state.server_url,
505
- )
506
-
507
- # Process messages from the sampling loop
508
- for loop_msg in sampling_loop:
509
- # Check stop condition more frequently
510
- if shared_state.should_stop or shared_state.stop_event.is_set():
511
- print("Processing stopped by user")
512
- break
513
-
514
- # Check if paused and wait while paused
515
- while shared_state.is_paused and not shared_state.should_stop and not shared_state.stop_event.is_set():
516
- print(f"Processing paused at: {time.strftime('%H:%M:%S')}")
517
- # Wait a short time and check stop condition regularly
518
- for _ in range(5): # Check 5 times per second
519
- if shared_state.should_stop or shared_state.stop_event.is_set():
520
- break
521
- time.sleep(0.2)
522
-
523
- # Check again after pause loop
524
- if shared_state.should_stop or shared_state.stop_event.is_set():
525
- print("Processing stopped while paused or resuming")
526
- break
527
-
528
- shared_state.chatbot_messages.append(loop_msg)
529
- shared_state.message_queue.append(loop_msg)
530
-
531
- # Short sleep to allow stop signals to be processed
532
- for _ in range(5): # Check 5 times per second
533
- if shared_state.should_stop or shared_state.stop_event.is_set():
534
- print("Processing stopped during sleep")
535
- break
536
- time.sleep(0.1)
537
-
538
- if shared_state.should_stop or shared_state.stop_event.is_set():
539
- break
540
-
541
- except Exception as e:
542
- # Handle any exceptions in the processing loop
543
- error_msg = f"Error during task processing: {e}"
544
- print(error_msg)
545
- error_message = {"role": "assistant", "content": error_msg, "type": "error", "action_type": ""}
546
- shared_state.message_queue.append(error_message)
547
-
548
- finally:
549
- # Handle completion or interruption
550
- if shared_state.should_stop or shared_state.stop_event.is_set():
551
- stop_msg = f"Task '{shared_state.task}' was stopped. Ready for new tasks."
552
- final_message = {"role": "assistant", "content": stop_msg, "type": "text", "action_type": ""}
553
- else:
554
- complete_msg = f"Task '{shared_state.task}' completed. Thanks for using Teachmode-OOTB."
555
- final_message = {"role": "assistant", "content": complete_msg, "type": "text", "action_type": ""}
556
-
557
- shared_state.chatbot_messages.append(final_message)
558
- shared_state.message_queue.append(final_message)
559
-
560
- # Reset all state flags to allow for new tasks
561
- shared_state.is_processing = False
562
- shared_state.should_stop = False
563
- shared_state.is_paused = False
564
- shared_state.stop_event.clear()
565
- print("Processing completed, ready for new tasks")
566
- logging.info("process_input thread finished.")
567
-
568
- def main():
569
- # Logging is set up at the top level now
570
- logging.info("App main() function starting setup.")
571
- global app, shared_state, rate_limiter # Ensure app is global if needed by uvicorn
572
- parser = argparse.ArgumentParser()
573
- # Add arguments, but NOT host and port
574
- parser.add_argument("--model", type=str, default="teach-mode-gpt-4o", help="Model name")
575
- parser.add_argument("--task", type=str, default="Following the instructions to complete the task.", help="Initial task description")
576
- parser.add_argument("--selected_screen", type=int, default=0, help="Selected screen index")
577
- parser.add_argument("--user_id", type=str, default="hero_cases", help="User ID for the session")
578
- parser.add_argument("--trace_id", type=str, default="build_scroll_combat", help="Trace ID for the session")
579
- parser.add_argument("--api_keys", type=str, default="sk-proj-1234567890", help="API keys")
580
- parser.add_argument("--server_url", type=str, default="http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com", help="Server URL for the session")
581
-
582
- args = parser.parse_args()
583
-
584
- # Validate args or set defaults if needed (keep these)
585
- if not hasattr(args, 'model'): args.model = "default_model"
586
- if not hasattr(args, 'task'): args.task = "default_task"
587
- if not hasattr(args, 'selected_screen'): args.selected_screen = 0
588
- if not hasattr(args, 'user_id'): args.user_id = "unknown_user"
589
- if not hasattr(args, 'trace_id'): args.trace_id = "unknown_trace"
590
- if not hasattr(args, 'api_keys'): args.api_keys = "none"
591
- if not hasattr(args, 'server_url'): args.server_url = "none"
592
-
593
- shared_state = SharedState(args)
594
- rate_limiter = RateLimiter(interval_seconds=2) # Re-initialize rate limiter
595
- logging.info(f"Shared state initialized for user: {args.user_id}")
596
-
597
- # --- Restore original port calculation logic ---
598
- port = 7888 # Default port
599
- host = "0.0.0.0" # Listen on all interfaces
600
-
601
- if platform.system() == "Windows":
602
- try:
603
- username = os.environ["USERNAME"].lower()
604
- logging.info(f"Determining port based on Windows username: {username}")
605
- if username == "altair":
606
- port = 14000
607
- elif username.startswith("guest") and username[5:].isdigit():
608
- num = int(username[5:])
609
- if 1 <= num <= 10: # Assuming max 10 guests for this range
610
- port = 14000 + num
611
- else:
612
- logging.warning(f"Guest user number {num} out of range (1-10), using default port {port}.")
613
- else:
614
- logging.info(f"Username '{username}' doesn't match specific rules, using default port {port}.")
615
- except Exception as e:
616
- logging.error(f"Error determining port from username: {e}. Using default port {port}.", exc_info=True)
617
- else:
618
- logging.info(f"Not running on Windows, using default port {port}.")
619
- # --- End of restored port calculation ---
620
-
621
- logging.info(f"Final Host={host}, Port={port}")
622
-
623
- try:
624
- logging.info(f"Starting Uvicorn server on {host}:{port}")
625
- # Use the calculated port and specific host
626
- uvicorn.run(app, host=host, port=port)
627
- logging.info("Uvicorn server stopped.")
628
- except Exception as main_e:
629
- logging.error("Error in main execution:", exc_info=True)
630
- finally:
631
- logging.info("App main() function finished.")
632
-
633
- if __name__ == "__main__":
634
- main()
635
-
636
- # Test log_ootb_request
1
+ import argparse
2
+ import time
3
+ import json
4
+ from datetime import datetime
5
+ import threading
6
+ import requests
7
+ import platform # Add platform import
8
+ import pyautogui # Add pyautogui import
9
+ import webbrowser # Add webbrowser import
10
+ import os # Import os for path joining
11
+ import logging # Import logging
12
+ import importlib # For dynamic imports
13
+ import pkgutil # To find modules
14
+ import sys # For logging setup
15
+ import traceback # For logging setup
16
+ from logging.handlers import RotatingFileHandler # For logging setup
17
+ from fastapi import FastAPI, Request
18
+ from fastapi.responses import JSONResponse
19
+ from fastapi.middleware.cors import CORSMiddleware
20
+ from computer_use_ootb_internal.computer_use_demo.tools.computer import get_screen_details
21
+ from computer_use_ootb_internal.run_teachmode_ootb_args import simple_teachmode_sampling_loop
22
+ from computer_use_ootb_internal.computer_use_demo.executor.teachmode_executor import TeachmodeExecutor
23
+ import uvicorn # Assuming uvicorn is used to run FastAPI
24
+ import concurrent.futures
25
+ import asyncio
26
+
27
+ # --- App Logging Setup ---
28
+ try:
29
+ # Log to user's AppData directory for better accessibility
30
+ log_dir_base = os.environ.get('APPDATA', os.path.expanduser('~'))
31
+ log_dir = os.path.join(log_dir_base, 'OOTBAppLogs')
32
+ os.makedirs(log_dir, exist_ok=True)
33
+ log_file = os.path.join(log_dir, 'ootb_app.log')
34
+
35
+ log_format = '%(asctime)s - %(levelname)s - %(process)d - %(threadName)s - %(message)s'
36
+ log_level = logging.INFO # Or logging.DEBUG for more detail
37
+
38
+ # Use rotating file handler
39
+ handler = RotatingFileHandler(log_file, maxBytes=5*1024*1024, backupCount=2, encoding='utf-8')
40
+ handler.setFormatter(logging.Formatter(log_format))
41
+
42
+ # Configure root logger
43
+ logging.basicConfig(level=log_level, handlers=[handler])
44
+
45
+ # Add stream handler to see logs if running interactively (optional)
46
+ # logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
47
+
48
+ logging.info("="*20 + " OOTB App Starting " + "="*20)
49
+ logging.info(f"Running with args: {sys.argv}")
50
+ logging.info(f"Python Executable: {sys.executable}")
51
+ logging.info(f"Working Directory: {os.getcwd()}")
52
+ logging.info(f"User: {os.getenv('USERNAME')}")
53
+
54
+ except Exception as log_setup_e:
55
+ print(f"FATAL: Failed to set up logging: {log_setup_e}")
56
+ # Fallback logging might be needed here if file logging fails
57
+
58
+ # --- End App Logging Setup ---
59
+
60
+ app = FastAPI()
61
+
62
+ # Add CORS middleware to allow requests from the frontend
63
+ app.add_middleware(
64
+ CORSMiddleware,
65
+ allow_origins=["*"],
66
+ allow_credentials=True,
67
+ allow_methods=["*"],
68
+ allow_headers=["*"],
69
+ )
70
+
71
+ # Rate limiter for API endpoints
72
+ class RateLimiter:
73
+ def __init__(self, interval_seconds=2):
74
+ self.interval = interval_seconds
75
+ self.last_request_time = {}
76
+ self.lock = threading.Lock()
77
+
78
+ def allow_request(self, endpoint):
79
+ with self.lock:
80
+ current_time = time.time()
81
+ # Priority endpoints always allowed
82
+ if endpoint in ["/update_params", "/update_message"]:
83
+ return True
84
+
85
+ # For other endpoints, apply rate limiting
86
+ if endpoint not in self.last_request_time:
87
+ self.last_request_time[endpoint] = current_time
88
+ return True
89
+
90
+ elapsed = current_time - self.last_request_time[endpoint]
91
+ if elapsed < self.interval:
92
+ return False
93
+
94
+ self.last_request_time[endpoint] = current_time
95
+ return True
96
+
97
+
98
+ def log_ootb_request(server_url, ootb_request_type, data):
99
+ logging.info(f"OOTB Request: Type={ootb_request_type}, Data={data}")
100
+ # Keep the requests post for now if it serves a specific purpose
101
+ logging_data = {
102
+ "type": ootb_request_type,
103
+ "data": data,
104
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
105
+ }
106
+ if not server_url.endswith("/update_ootb_logging"):
107
+ server_logging_url = server_url + "/update_ootb_logging"
108
+ else:
109
+ server_logging_url = server_url
110
+ try:
111
+ requests.post(server_logging_url, json=logging_data, timeout=5)
112
+ except Exception as req_log_e:
113
+ logging.warning(f"Could not log ootb request to server {server_logging_url}: {req_log_e}")
114
+
115
+
116
+ class SharedState:
117
+ def __init__(self, args):
118
+ self.args = args
119
+ self.task_updated = False
120
+ self.chatbot_messages = []
121
+ # Store all state-related data here
122
+ self.model = args.model
123
+ self.task = getattr(args, 'task', "")
124
+ self.selected_screen = args.selected_screen
125
+ self.user_id = args.user_id
126
+ self.trace_id = args.trace_id
127
+ self.api_keys = args.api_keys
128
+ self.server_url = args.server_url
129
+ self.message_queue = []
130
+ self.is_processing = False
131
+ self.should_stop = False
132
+ self.is_paused = False
133
+ self.full_screen_game_mode = getattr(args, 'full_screen_game_mode', 0)
134
+ # Add a new event to better control stopping
135
+ self.stop_event = threading.Event()
136
+ # Add a reference to the processing thread
137
+ self.processing_thread = None
138
+
139
+ shared_state = None
140
+ rate_limiter = RateLimiter(interval_seconds=2)
141
+
142
+ # Set up logging for this module
143
+ log = logging.getLogger(__name__)
144
+
145
+ def prepare_environment(state):
146
+ """Dynamically loads and runs preparation logic based on software name."""
147
+ # Determine software name from state (user_id, trace_id, or task)
148
+ software_name = ""
149
+
150
+ # Check user_id first
151
+ user_id = getattr(state, 'user_id', '').lower()
152
+ task = getattr(state, 'task', '').lower()
153
+ trace_id = getattr(state, 'trace_id', '').lower()
154
+
155
+ log.info(f"Checking for software in: user_id='{user_id}', trace_id='{trace_id}', task='{task}'")
156
+
157
+ # Look for known software indicators
158
+ if "star rail" in user_id or "star rail" in trace_id:
159
+ software_name = "star rail"
160
+ elif "powerpoint" in user_id or "powerpoint" in trace_id or "powerpoint" in task:
161
+ software_name = "powerpoint"
162
+ # Add more software checks here as needed
163
+
164
+ # If no specific software found, check task for keywords
165
+ if not software_name:
166
+ log.info("No specific software detected from IDs, checking task content")
167
+
168
+ if not software_name:
169
+ log.info("No specific software preparation identified. Skipping preparation.")
170
+ return
171
+
172
+ log.info(f"Identified software for preparation: '{software_name}'")
173
+
174
+ # Normalize the software name to be a valid Python module name
175
+ # Replace spaces/hyphens with underscores, convert to lowercase
176
+ module_name_base = software_name.replace(" ", "_").replace("-", "_").lower()
177
+ module_to_run = f"{module_name_base}_prepare"
178
+
179
+ log.info(f"Attempting preparation for software: '{software_name}' (Module: '{module_to_run}')")
180
+
181
+ try:
182
+ # Construct the full module path within the package
183
+ prep_package = "computer_use_ootb_internal.preparation"
184
+ full_module_path = f"{prep_package}.{module_to_run}"
185
+
186
+ # Dynamically import the module
187
+ # Check if module exists first using pkgutil to avoid import errors
188
+ log.debug(f"Looking for preparation module: {full_module_path}")
189
+ loader = pkgutil.find_loader(full_module_path)
190
+ if loader is None:
191
+ log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
192
+ return
193
+
194
+ log.debug(f"Importing preparation module: {full_module_path}")
195
+ prep_module = importlib.import_module(full_module_path)
196
+
197
+ # Check if the module has the expected function
198
+ if hasattr(prep_module, "run_preparation") and callable(prep_module.run_preparation):
199
+ log.info(f"Running preparation function from {full_module_path}...")
200
+ prep_module.run_preparation(state)
201
+ log.info(f"Preparation function from {full_module_path} completed.")
202
+ else:
203
+ log.warning(f"Module {full_module_path} found, but does not have a callable 'run_preparation' function. Skipping.")
204
+
205
+ except ModuleNotFoundError:
206
+ log.warning(f"Preparation module '{full_module_path}' not found. Skipping preparation.")
207
+ except Exception as e:
208
+ log.error(f"Error during dynamic preparation loading/execution for '{module_to_run}': {e}", exc_info=True)
209
+
210
+
211
+ @app.post("/update_params")
212
+ async def update_parameters(request: Request):
213
+ logging.info("Received request to /update_params")
214
+ try:
215
+ data = await request.json()
216
+
217
+ if 'task' not in data:
218
+ return JSONResponse(
219
+ content={"status": "error", "message": "Missing required field: task"},
220
+ status_code=400
221
+ )
222
+
223
+ # Clear message histories before updating parameters
224
+ shared_state.message_queue.clear()
225
+ shared_state.chatbot_messages.clear()
226
+ logging.info("Cleared message queue and chatbot messages.")
227
+
228
+ shared_state.args = argparse.Namespace(**data)
229
+ shared_state.task_updated = True
230
+
231
+ # Update shared state when parameters change
232
+ shared_state.model = getattr(shared_state.args, 'model', "teach-mode-gpt-4o")
233
+ shared_state.task = getattr(shared_state.args, 'task', "Following the instructions to complete the task.")
234
+ shared_state.selected_screen = getattr(shared_state.args, 'selected_screen', 0)
235
+ shared_state.user_id = getattr(shared_state.args, 'user_id', "hero_cases")
236
+ shared_state.trace_id = getattr(shared_state.args, 'trace_id', "build_scroll_combat")
237
+ shared_state.api_keys = getattr(shared_state.args, 'api_keys', "sk-proj-1234567890")
238
+ shared_state.server_url = getattr(shared_state.args, 'server_url', "http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com")
239
+
240
+ log_ootb_request(shared_state.server_url, "update_params", data)
241
+
242
+ # Call the (now dynamic) preparation function here, after parameters are updated
243
+ prepare_environment(shared_state)
244
+
245
+ logging.info("Parameters updated successfully.")
246
+ return JSONResponse(
247
+ content={"status": "success", "message": "Parameters updated", "new_args": vars(shared_state.args)},
248
+ status_code=200
249
+ )
250
+ except Exception as e:
251
+ logging.error("Error processing /update_params:", exc_info=True)
252
+ return JSONResponse(content={"status": "error", "message": "Internal server error"}, status_code=500)
253
+
254
+ @app.post("/update_message")
255
+ async def update_message(request: Request):
256
+ data = await request.json()
257
+
258
+ if 'message' not in data:
259
+ return JSONResponse(
260
+ content={"status": "error", "message": "Missing required field: message"},
261
+ status_code=400
262
+ )
263
+
264
+ log_ootb_request(shared_state.server_url, "update_message", data)
265
+
266
+ message = data['message']
267
+ full_screen_game_mode = data.get('full_screen_game_mode', 0) # Default to 0 if not provided
268
+
269
+ # shared_state.chatbot_messages.append({"role": "user", "content": message, "type": "text"})
270
+ shared_state.task = message
271
+ shared_state.args.task = message
272
+ shared_state.full_screen_game_mode = full_screen_game_mode
273
+
274
+ # Reset stop event before starting
275
+ shared_state.stop_event.clear()
276
+
277
+ # Start processing if not already running
278
+ if not shared_state.is_processing:
279
+ # Create and store the thread
280
+ shared_state.processing_thread = threading.Thread(target=process_input, daemon=True)
281
+ shared_state.processing_thread.start()
282
+
283
+ return JSONResponse(
284
+ content={"status": "success", "message": "Message received", "task": shared_state.task},
285
+ status_code=200
286
+ )
287
+
288
+ @app.get("/get_messages")
289
+ async def get_messages(request: Request):
290
+ # Apply rate limiting
291
+ if not rate_limiter.allow_request(request.url.path):
292
+ return JSONResponse(
293
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
294
+ status_code=429
295
+ )
296
+
297
+ # log_ootb_request(shared_state.server_url, "get_messages", {})
298
+
299
+ # Return all messages in the queue and clear it
300
+ messages = shared_state.message_queue.copy()
301
+ shared_state.message_queue = []
302
+
303
+ return JSONResponse(
304
+ content={"status": "success", "messages": messages},
305
+ status_code=200
306
+ )
307
+
308
+ @app.get("/get_screens")
309
+ async def get_screens(request: Request):
310
+ # Apply rate limiting
311
+ if not rate_limiter.allow_request(request.url.path):
312
+ return JSONResponse(
313
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
314
+ status_code=429
315
+ )
316
+
317
+ log_ootb_request(shared_state.server_url, "get_screens", {})
318
+
319
+ screen_options, primary_index = get_screen_details()
320
+
321
+ return JSONResponse(
322
+ content={"status": "success", "screens": screen_options, "primary_index": primary_index},
323
+ status_code=200
324
+ )
325
+
326
+ @app.post("/stop_processing")
327
+ async def stop_processing(request: Request):
328
+ # Apply rate limiting
329
+ if not rate_limiter.allow_request(request.url.path):
330
+ return JSONResponse(
331
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
332
+ status_code=429
333
+ )
334
+
335
+ log_ootb_request(shared_state.server_url, "stop_processing", {})
336
+
337
+ if shared_state.is_processing:
338
+ # Set both flags to ensure stopping the current task
339
+ shared_state.should_stop = True
340
+ shared_state.stop_event.set()
341
+
342
+ # Clear message histories
343
+ shared_state.message_queue.clear()
344
+ shared_state.chatbot_messages.clear()
345
+ logging.info("Cleared message queue and chatbot messages during stop.")
346
+
347
+ # Send an immediate message to the queue to inform the user
348
+ stop_initiated_msg = {"role": "assistant", "content": f"Stopping task '{shared_state.task}'...", "type": "text", "action_type": ""}
349
+ # Append the stop message AFTER clearing, so it's the only one left
350
+ shared_state.message_queue.append(stop_initiated_msg)
351
+ shared_state.chatbot_messages.append(stop_initiated_msg)
352
+
353
+ return JSONResponse(
354
+ content={"status": "success", "message": "Task is being stopped, server will remain available for new tasks"},
355
+ status_code=200
356
+ )
357
+ else:
358
+ # Clear message histories even if not processing, to ensure clean state
359
+ shared_state.message_queue.clear()
360
+ shared_state.chatbot_messages.clear()
361
+ logging.info("Cleared message queue and chatbot messages (no active process to stop).")
362
+ return JSONResponse(
363
+ content={"status": "error", "message": "No active processing to stop"},
364
+ status_code=400
365
+ )
366
+
367
+ @app.post("/toggle_pause")
368
+ async def toggle_pause(request: Request):
369
+ # Apply rate limiting
370
+ if not rate_limiter.allow_request(request.url.path):
371
+ return JSONResponse(
372
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
373
+ status_code=429
374
+ )
375
+
376
+ log_ootb_request(shared_state.server_url, "toggle_pause", {})
377
+
378
+ if not shared_state.is_processing:
379
+ return JSONResponse(
380
+ content={"status": "error", "message": "No active processing to pause/resume"},
381
+ status_code=400
382
+ )
383
+
384
+ # Toggle the pause state
385
+ shared_state.is_paused = not shared_state.is_paused
386
+ current_state = shared_state.is_paused
387
+
388
+ print(f"Toggled pause state to: {current_state}")
389
+
390
+ status_message = "paused" if current_state else "resumed"
391
+
392
+ # Add a message to the queue to inform the user
393
+ if current_state:
394
+ message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been paused. Click Continue to resume.", "type": "text", "action_type": ""}
395
+ else:
396
+ message = {"role": "assistant", "content": f"Task '{shared_state.task}' has been resumed.", "type": "text", "action_type": ""}
397
+
398
+ shared_state.chatbot_messages.append(message)
399
+ shared_state.message_queue.append(message)
400
+
401
+ return JSONResponse(
402
+ content={
403
+ "status": "success",
404
+ "message": f"Processing {status_message}",
405
+ "is_paused": current_state
406
+ },
407
+ status_code=200
408
+ )
409
+
410
+ @app.get("/status")
411
+ async def get_status(request: Request):
412
+ # Apply rate limiting
413
+ if not rate_limiter.allow_request(request.url.path):
414
+ return JSONResponse(
415
+ content={"status": "error", "message": "Rate limit exceeded. Try again after 2 seconds."},
416
+ status_code=429
417
+ )
418
+
419
+ # log_ootb_request(shared_state.server_url, "get_status", {})
420
+
421
+ print(f"Status check - Processing: {shared_state.is_processing}, Paused: {shared_state.is_paused}")
422
+ return JSONResponse(
423
+ content={
424
+ "status": "success",
425
+ "is_processing": shared_state.is_processing,
426
+ "is_paused": shared_state.is_paused
427
+ },
428
+ status_code=200
429
+ )
430
+
431
+ @app.post("/exec_computer_tool")
432
+ async def exec_computer_tool(request: Request):
433
+ logging.info("Received request to /exec_computer_tool")
434
+ try:
435
+ data = await request.json()
436
+
437
+ # Extract parameters from the request
438
+ selected_screen = data.get('selected_screen', 0)
439
+ full_screen_game_mode = data.get('full_screen_game_mode', 0)
440
+ response = data.get('response', {})
441
+
442
+ logging.info(f"Executing TeachmodeExecutor with: screen={selected_screen}, mode={full_screen_game_mode}, response={response}")
443
+
444
+ # Create TeachmodeExecutor in a separate process to avoid event loop conflicts
445
+ # Since TeachmodeExecutor uses asyncio.run() internally, we need to run it in a way
446
+ # that doesn't conflict with FastAPI's event loop
447
+
448
+ def run_executor():
449
+ executor = TeachmodeExecutor(
450
+ selected_screen=selected_screen,
451
+ full_screen_game_mode=full_screen_game_mode
452
+ )
453
+
454
+ results = []
455
+ try:
456
+ for action_result in executor(response):
457
+ results.append(action_result)
458
+ except Exception as exec_error:
459
+ logging.error(f"Error executing action: {exec_error}", exc_info=True)
460
+ return {"error": str(exec_error)}
461
+
462
+ return results
463
+
464
+ # Execute in a thread pool to avoid blocking the event loop
465
+ with concurrent.futures.ThreadPoolExecutor() as pool:
466
+ results = await asyncio.get_event_loop().run_in_executor(pool, run_executor)
467
+
468
+ if isinstance(results, dict) and "error" in results:
469
+ return JSONResponse(
470
+ content={"status": "error", "message": results["error"]},
471
+ status_code=500
472
+ )
473
+
474
+ logging.info(f"Action results: {results}")
475
+
476
+ return JSONResponse(
477
+ content={"status": "success", "results": results},
478
+ status_code=200
479
+ )
480
+ except Exception as e:
481
+ logging.error("Error processing /exec_computer_tool:", exc_info=True)
482
+ return JSONResponse(
483
+ content={"status": "error", "message": f"Internal server error: {str(e)}"},
484
+ status_code=500
485
+ )
486
+
487
+ def process_input():
488
+ global shared_state
489
+ logging.info("process_input thread started.")
490
+ shared_state.is_processing = True
491
+ shared_state.should_stop = False
492
+ shared_state.is_paused = False
493
+ shared_state.stop_event.clear() # Ensure stop event is cleared at the start
494
+
495
+ print(f"start sampling loop: {shared_state.chatbot_messages}")
496
+ print(f"shared_state.args before sampling loop: {shared_state.args}")
497
+
498
+
499
+ try:
500
+ # Get the generator for the sampling loop
501
+ sampling_loop = simple_teachmode_sampling_loop(
502
+ model=shared_state.model,
503
+ task=shared_state.task,
504
+ selected_screen=shared_state.selected_screen,
505
+ user_id=shared_state.user_id,
506
+ trace_id=shared_state.trace_id,
507
+ api_keys=shared_state.api_keys,
508
+ server_url=shared_state.server_url,
509
+ full_screen_game_mode=shared_state.full_screen_game_mode,
510
+ )
511
+
512
+ # Process messages from the sampling loop
513
+ for loop_msg in sampling_loop:
514
+ # Check stop condition more frequently
515
+ if shared_state.should_stop or shared_state.stop_event.is_set():
516
+ print("Processing stopped by user")
517
+ break
518
+
519
+ # Check if paused and wait while paused
520
+ while shared_state.is_paused and not shared_state.should_stop and not shared_state.stop_event.is_set():
521
+ print(f"Processing paused at: {time.strftime('%H:%M:%S')}")
522
+ # Wait a short time and check stop condition regularly
523
+ for _ in range(5): # Check 5 times per second
524
+ if shared_state.should_stop or shared_state.stop_event.is_set():
525
+ break
526
+ time.sleep(0.2)
527
+
528
+ # Check again after pause loop
529
+ if shared_state.should_stop or shared_state.stop_event.is_set():
530
+ print("Processing stopped while paused or resuming")
531
+ break
532
+
533
+ shared_state.chatbot_messages.append(loop_msg)
534
+ shared_state.message_queue.append(loop_msg)
535
+
536
+ # Short sleep to allow stop signals to be processed
537
+ for _ in range(5): # Check 5 times per second
538
+ if shared_state.should_stop or shared_state.stop_event.is_set():
539
+ print("Processing stopped during sleep")
540
+ break
541
+ time.sleep(0.1)
542
+
543
+ if shared_state.should_stop or shared_state.stop_event.is_set():
544
+ break
545
+
546
+ except Exception as e:
547
+ # Handle any exceptions in the processing loop
548
+ error_msg = f"Error during task processing: {e}"
549
+ print(error_msg)
550
+ error_message = {"role": "assistant", "content": error_msg, "type": "error", "action_type": ""}
551
+ shared_state.message_queue.append(error_message)
552
+
553
+ finally:
554
+ # Handle completion or interruption
555
+ if shared_state.should_stop or shared_state.stop_event.is_set():
556
+ stop_msg = f"Task '{shared_state.task}' was stopped. Ready for new tasks."
557
+ final_message = {"role": "assistant", "content": stop_msg, "type": "text", "action_type": ""}
558
+ else:
559
+ complete_msg = f"Task '{shared_state.task}' completed. Thanks for using Teachmode-OOTB."
560
+ final_message = {"role": "assistant", "content": complete_msg, "type": "text", "action_type": ""}
561
+
562
+ shared_state.chatbot_messages.append(final_message)
563
+ shared_state.message_queue.append(final_message)
564
+
565
+ # Reset all state flags to allow for new tasks
566
+ shared_state.is_processing = False
567
+ shared_state.should_stop = False
568
+ shared_state.is_paused = False
569
+ shared_state.stop_event.clear()
570
+ print("Processing completed, ready for new tasks")
571
+ logging.info("process_input thread finished.")
572
+
573
+ def main():
574
+ # Logging is set up at the top level now
575
+ logging.info("App main() function starting setup.")
576
+ global app, shared_state, rate_limiter # Ensure app is global if needed by uvicorn
577
+ parser = argparse.ArgumentParser()
578
+ # Add arguments, but NOT host and port
579
+ parser.add_argument("--model", type=str, default="teach-mode-gpt-4o", help="Model name")
580
+ parser.add_argument("--task", type=str, default="Following the instructions to complete the task.", help="Initial task description")
581
+ parser.add_argument("--selected_screen", type=int, default=0, help="Selected screen index")
582
+ parser.add_argument("--user_id", type=str, default="hero_cases", help="User ID for the session")
583
+ parser.add_argument("--trace_id", type=str, default="build_scroll_combat", help="Trace ID for the session")
584
+ parser.add_argument("--api_keys", type=str, default="sk-proj-1234567890", help="API keys")
585
+ parser.add_argument("--server_url", type=str, default="http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com", help="Server URL for the session")
586
+
587
+ args = parser.parse_args()
588
+
589
+ # Validate args or set defaults if needed (keep these)
590
+ if not hasattr(args, 'model'): args.model = "default_model"
591
+ if not hasattr(args, 'task'): args.task = "default_task"
592
+ if not hasattr(args, 'selected_screen'): args.selected_screen = 0
593
+ if not hasattr(args, 'user_id'): args.user_id = "unknown_user"
594
+ if not hasattr(args, 'trace_id'): args.trace_id = "unknown_trace"
595
+ if not hasattr(args, 'api_keys'): args.api_keys = "none"
596
+ if not hasattr(args, 'server_url'): args.server_url = "none"
597
+
598
+ shared_state = SharedState(args)
599
+ rate_limiter = RateLimiter(interval_seconds=2) # Re-initialize rate limiter
600
+ logging.info(f"Shared state initialized for user: {args.user_id}")
601
+
602
+ # --- Restore original port calculation logic ---
603
+ port = 7888 # Default port
604
+ host = "0.0.0.0" # Listen on all interfaces
605
+
606
+ if platform.system() == "Windows":
607
+ try:
608
+ username = os.environ["USERNAME"].lower()
609
+ logging.info(f"Determining port based on Windows username: {username}")
610
+ if username == "altair":
611
+ port = 14000
612
+ elif username.startswith("guest") and username[5:].isdigit():
613
+ num = int(username[5:])
614
+ if 1 <= num <= 10: # Assuming max 10 guests for this range
615
+ port = 14000 + num
616
+ else:
617
+ logging.warning(f"Guest user number {num} out of range (1-10), using default port {port}.")
618
+ else:
619
+ logging.info(f"Username '{username}' doesn't match specific rules, using default port {port}.")
620
+ except Exception as e:
621
+ logging.error(f"Error determining port from username: {e}. Using default port {port}.", exc_info=True)
622
+ else:
623
+ logging.info(f"Not running on Windows, using default port {port}.")
624
+ # --- End of restored port calculation ---
625
+
626
+ logging.info(f"Final Host={host}, Port={port}")
627
+
628
+ try:
629
+ logging.info(f"Starting Uvicorn server on {host}:{port}")
630
+ # Use the calculated port and specific host
631
+ uvicorn.run(app, host=host, port=port)
632
+ logging.info("Uvicorn server stopped.")
633
+ except Exception as main_e:
634
+ logging.error("Error in main execution:", exc_info=True)
635
+ finally:
636
+ logging.info("App main() function finished.")
637
+
638
+ if __name__ == "__main__":
639
+ main()
640
+
641
+ # Test log_ootb_request
637
642
  log_ootb_request("http://ec2-44-234-43-86.us-west-2.compute.amazonaws.com", "test_request", {"message": "Test message"})