cua-computer 0.2.11__py3-none-any.whl → 0.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- computer/__init__.py +2 -2
- computer/computer.py +4 -4
- computer/helpers.py +4 -1
- computer/interface/linux.py +1 -1
- computer/interface/macos.py +1 -1
- computer/interface/windows.py +1 -1
- computer/providers/lume_api.py +0 -13
- computer/providers/lumier/provider.py +57 -64
- computer/telemetry.py +5 -5
- {cua_computer-0.2.11.dist-info → cua_computer-0.2.12.dist-info}/METADATA +1 -1
- {cua_computer-0.2.11.dist-info → cua_computer-0.2.12.dist-info}/RECORD +13 -13
- {cua_computer-0.2.11.dist-info → cua_computer-0.2.12.dist-info}/WHEEL +0 -0
- {cua_computer-0.2.11.dist-info → cua_computer-0.2.12.dist-info}/entry_points.txt +0 -0
computer/__init__.py
CHANGED
@@ -6,14 +6,14 @@ import sys
|
|
6
6
|
__version__ = "0.1.0"
|
7
7
|
|
8
8
|
# Initialize logging
|
9
|
-
logger = logging.getLogger("
|
9
|
+
logger = logging.getLogger("computer")
|
10
10
|
|
11
11
|
# Initialize telemetry when the package is imported
|
12
12
|
try:
|
13
13
|
# Import from core telemetry
|
14
14
|
from core.telemetry import (
|
15
|
-
is_telemetry_enabled,
|
16
15
|
flush,
|
16
|
+
is_telemetry_enabled,
|
17
17
|
record_event,
|
18
18
|
)
|
19
19
|
|
computer/computer.py
CHANGED
@@ -85,7 +85,7 @@ class Computer:
|
|
85
85
|
experiments: Optional list of experimental features to enable (e.g. ["app-use"])
|
86
86
|
"""
|
87
87
|
|
88
|
-
self.logger = Logger("
|
88
|
+
self.logger = Logger("computer", verbosity)
|
89
89
|
self.logger.info("Initializing Computer...")
|
90
90
|
|
91
91
|
# Store original parameters
|
@@ -132,11 +132,11 @@ class Computer:
|
|
132
132
|
|
133
133
|
# Configure root logger
|
134
134
|
self.verbosity = verbosity
|
135
|
-
self.logger = Logger("
|
135
|
+
self.logger = Logger("computer", verbosity)
|
136
136
|
|
137
137
|
# Configure component loggers with proper hierarchy
|
138
|
-
self.vm_logger = Logger("
|
139
|
-
self.interface_logger = Logger("
|
138
|
+
self.vm_logger = Logger("computer.vm", verbosity)
|
139
|
+
self.interface_logger = Logger("computer.interface", verbosity)
|
140
140
|
|
141
141
|
if not use_host_computer_server:
|
142
142
|
if ":" not in image or len(image.split(":")) != 2:
|
computer/helpers.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
"""
|
2
2
|
Helper functions and decorators for the Computer module.
|
3
3
|
"""
|
4
|
+
import logging
|
4
5
|
import asyncio
|
5
6
|
from functools import wraps
|
6
7
|
from typing import Any, Callable, Optional, TypeVar, cast
|
@@ -8,6 +9,8 @@ from typing import Any, Callable, Optional, TypeVar, cast
|
|
8
9
|
# Global reference to the default computer instance
|
9
10
|
_default_computer = None
|
10
11
|
|
12
|
+
logger = logging.getLogger(__name__)
|
13
|
+
|
11
14
|
def set_default_computer(computer):
|
12
15
|
"""
|
13
16
|
Set the default computer instance to be used by the remote decorator.
|
@@ -41,7 +44,7 @@ def sandboxed(venv_name: str = "default", computer: str = "default", max_retries
|
|
41
44
|
try:
|
42
45
|
return await comp.venv_exec(venv_name, func, *args, **kwargs)
|
43
46
|
except Exception as e:
|
44
|
-
|
47
|
+
logger.error(f"Attempt {i+1} failed: {e}")
|
45
48
|
await asyncio.sleep(1)
|
46
49
|
if i == max_retries - 1:
|
47
50
|
raise e
|
computer/interface/linux.py
CHANGED
@@ -30,7 +30,7 @@ class LinuxComputerInterface(BaseComputerInterface):
|
|
30
30
|
self._command_lock = asyncio.Lock() # Lock to ensure only one command at a time
|
31
31
|
|
32
32
|
# Set logger name for Linux interface
|
33
|
-
self.logger = Logger("
|
33
|
+
self.logger = Logger("computer.interface.linux", LogLevel.NORMAL)
|
34
34
|
|
35
35
|
@property
|
36
36
|
def ws_uri(self) -> str:
|
computer/interface/macos.py
CHANGED
@@ -29,7 +29,7 @@ class MacOSComputerInterface(BaseComputerInterface):
|
|
29
29
|
self._command_lock = asyncio.Lock() # Lock to ensure only one command at a time
|
30
30
|
|
31
31
|
# Set logger name for macOS interface
|
32
|
-
self.logger = Logger("
|
32
|
+
self.logger = Logger("computer.interface.macos", LogLevel.NORMAL)
|
33
33
|
|
34
34
|
@property
|
35
35
|
def ws_uri(self) -> str:
|
computer/interface/windows.py
CHANGED
@@ -30,7 +30,7 @@ class WindowsComputerInterface(BaseComputerInterface):
|
|
30
30
|
self._command_lock = asyncio.Lock() # Lock to ensure only one command at a time
|
31
31
|
|
32
32
|
# Set logger name for Windows interface
|
33
|
-
self.logger = Logger("
|
33
|
+
self.logger = Logger("computer.interface.windows", LogLevel.NORMAL)
|
34
34
|
|
35
35
|
@property
|
36
36
|
def ws_uri(self) -> str:
|
computer/providers/lume_api.py
CHANGED
@@ -66,8 +66,6 @@ def lume_api_get(
|
|
66
66
|
|
67
67
|
# Only print the curl command when debug is enabled
|
68
68
|
display_curl_string = ' '.join(display_cmd)
|
69
|
-
if debug or verbose:
|
70
|
-
print(f"DEBUG: Executing curl API call: {display_curl_string}")
|
71
69
|
logger.debug(f"Executing API request: {display_curl_string}")
|
72
70
|
|
73
71
|
# Execute the command - for execution we need to use shell=True to handle URLs with special characters
|
@@ -172,8 +170,6 @@ def lume_api_run(
|
|
172
170
|
payload["sharedDirectories"] = run_opts["shared_directories"]
|
173
171
|
|
174
172
|
# Log the payload for debugging
|
175
|
-
if debug or verbose:
|
176
|
-
print(f"DEBUG: Payload for {vm_name} run request: {json.dumps(payload, indent=2)}")
|
177
173
|
logger.debug(f"API payload: {json.dumps(payload, indent=2)}")
|
178
174
|
|
179
175
|
# Construct the curl command
|
@@ -184,11 +180,6 @@ def lume_api_run(
|
|
184
180
|
api_url
|
185
181
|
]
|
186
182
|
|
187
|
-
# Always print the command for debugging
|
188
|
-
if debug or verbose:
|
189
|
-
print(f"DEBUG: Executing curl run API call: {' '.join(cmd)}")
|
190
|
-
print(f"Run payload: {json.dumps(payload, indent=2)}")
|
191
|
-
|
192
183
|
# Execute the command
|
193
184
|
try:
|
194
185
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
@@ -405,8 +396,6 @@ def lume_api_pull(
|
|
405
396
|
f"http://{host}:{port}/lume/pull"
|
406
397
|
])
|
407
398
|
|
408
|
-
if debug or verbose:
|
409
|
-
print(f"DEBUG: Executing curl API call: {' '.join(pull_cmd)}")
|
410
399
|
logger.debug(f"Executing API request: {' '.join(pull_cmd)}")
|
411
400
|
|
412
401
|
try:
|
@@ -474,8 +463,6 @@ def lume_api_delete(
|
|
474
463
|
|
475
464
|
# Only print the curl command when debug is enabled
|
476
465
|
display_curl_string = ' '.join(display_cmd)
|
477
|
-
if debug or verbose:
|
478
|
-
print(f"DEBUG: Executing curl API call: {display_curl_string}")
|
479
466
|
logger.debug(f"Executing API request: {display_curl_string}")
|
480
467
|
|
481
468
|
# Execute the command - for execution we need to use shell=True to handle URLs with special characters
|
@@ -305,7 +305,7 @@ class LumierProvider(BaseVMProvider):
|
|
305
305
|
cmd = ["docker", "run", "-d", "--name", self.container_name]
|
306
306
|
|
307
307
|
cmd.extend(["-p", f"{self.vnc_port}:8006"])
|
308
|
-
|
308
|
+
logger.debug(f"Using specified noVNC_port: {self.vnc_port}")
|
309
309
|
|
310
310
|
# Set API URL using the API port
|
311
311
|
self._api_url = f"http://{self.host}:{self.api_port}"
|
@@ -324,7 +324,7 @@ class LumierProvider(BaseVMProvider):
|
|
324
324
|
"-v", f"{storage_dir}:/storage",
|
325
325
|
"-e", f"HOST_STORAGE_PATH={storage_dir}"
|
326
326
|
])
|
327
|
-
|
327
|
+
logger.debug(f"Using persistent storage at: {storage_dir}")
|
328
328
|
|
329
329
|
# Add shared folder volume mount if shared_path is specified
|
330
330
|
if self.shared_path:
|
@@ -337,12 +337,12 @@ class LumierProvider(BaseVMProvider):
|
|
337
337
|
"-v", f"{shared_dir}:/shared",
|
338
338
|
"-e", f"HOST_SHARED_PATH={shared_dir}"
|
339
339
|
])
|
340
|
-
|
340
|
+
logger.debug(f"Using shared folder at: {shared_dir}")
|
341
341
|
|
342
342
|
# Add environment variables
|
343
343
|
# Always use the container_name as the VM_NAME for consistency
|
344
344
|
# Use the VM image passed from the Computer class
|
345
|
-
|
345
|
+
logger.debug(f"Using VM image: {self.image}")
|
346
346
|
|
347
347
|
# If ghcr.io is in the image, use the full image name
|
348
348
|
if "ghcr.io" in self.image:
|
@@ -362,22 +362,22 @@ class LumierProvider(BaseVMProvider):
|
|
362
362
|
|
363
363
|
# First check if the image exists locally
|
364
364
|
try:
|
365
|
-
|
365
|
+
logger.debug(f"Checking if Docker image {lumier_image} exists locally...")
|
366
366
|
check_image_cmd = ["docker", "image", "inspect", lumier_image]
|
367
367
|
subprocess.run(check_image_cmd, capture_output=True, check=True)
|
368
|
-
|
368
|
+
logger.debug(f"Docker image {lumier_image} found locally.")
|
369
369
|
except subprocess.CalledProcessError:
|
370
370
|
# Image doesn't exist locally
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
371
|
+
logger.warning(f"\nWARNING: Docker image {lumier_image} not found locally.")
|
372
|
+
logger.warning("The system will attempt to pull it from Docker Hub, which may fail if you have network connectivity issues.")
|
373
|
+
logger.warning("If the Docker pull fails, you may need to manually pull the image first with:")
|
374
|
+
logger.warning(f" docker pull {lumier_image}\n")
|
375
375
|
|
376
376
|
# Add the image to the command
|
377
377
|
cmd.append(lumier_image)
|
378
378
|
|
379
379
|
# Print the Docker command for debugging
|
380
|
-
|
380
|
+
logger.debug(f"DOCKER COMMAND: {' '.join(cmd)}")
|
381
381
|
|
382
382
|
# Run the container with improved error handling
|
383
383
|
try:
|
@@ -395,8 +395,8 @@ class LumierProvider(BaseVMProvider):
|
|
395
395
|
raise
|
396
396
|
|
397
397
|
# Container started, now check VM status with polling
|
398
|
-
|
399
|
-
|
398
|
+
logger.debug("Container started, checking VM status...")
|
399
|
+
logger.debug("NOTE: This may take some time while the VM image is being pulled and initialized")
|
400
400
|
|
401
401
|
# Start a background thread to show container logs in real-time
|
402
402
|
import threading
|
@@ -404,8 +404,8 @@ class LumierProvider(BaseVMProvider):
|
|
404
404
|
def show_container_logs():
|
405
405
|
# Give the container a moment to start generating logs
|
406
406
|
time.sleep(1)
|
407
|
-
|
408
|
-
|
407
|
+
logger.debug(f"\n---- CONTAINER LOGS FOR '{name}' (LIVE) ----")
|
408
|
+
logger.debug("Showing logs as they are generated. Press Ctrl+C to stop viewing logs...\n")
|
409
409
|
|
410
410
|
try:
|
411
411
|
# Use docker logs with follow option
|
@@ -415,17 +415,17 @@ class LumierProvider(BaseVMProvider):
|
|
415
415
|
|
416
416
|
# Read and print logs line by line
|
417
417
|
for line in process.stdout:
|
418
|
-
|
418
|
+
logger.debug(line, end='')
|
419
419
|
|
420
420
|
# Break if process has exited
|
421
421
|
if process.poll() is not None:
|
422
422
|
break
|
423
423
|
except Exception as e:
|
424
|
-
|
424
|
+
logger.error(f"\nError showing container logs: {e}")
|
425
425
|
if self.verbose:
|
426
426
|
logger.error(f"Error in log streaming thread: {e}")
|
427
427
|
finally:
|
428
|
-
|
428
|
+
logger.debug("\n---- LOG STREAMING ENDED ----")
|
429
429
|
# Make sure process is terminated
|
430
430
|
if 'process' in locals() and process.poll() is None:
|
431
431
|
process.terminate()
|
@@ -452,11 +452,11 @@ class LumierProvider(BaseVMProvider):
|
|
452
452
|
else:
|
453
453
|
wait_time = min(30, 5 + (attempt * 2))
|
454
454
|
|
455
|
-
|
455
|
+
logger.debug(f"Waiting {wait_time}s before retry #{attempt+1}...")
|
456
456
|
await asyncio.sleep(wait_time)
|
457
457
|
|
458
458
|
# Try to get VM status
|
459
|
-
|
459
|
+
logger.debug(f"Checking VM status (attempt {attempt+1})...")
|
460
460
|
vm_status = await self.get_vm(name)
|
461
461
|
|
462
462
|
# Check for API errors
|
@@ -468,20 +468,20 @@ class LumierProvider(BaseVMProvider):
|
|
468
468
|
# since _lume_api_get already logged the technical details
|
469
469
|
if consecutive_errors == 1 or attempt % 5 == 0:
|
470
470
|
if 'Empty reply from server' in error_msg:
|
471
|
-
|
472
|
-
|
471
|
+
logger.info("API server is starting up - container is running, but API isn't fully initialized yet.")
|
472
|
+
logger.info("This is expected during the initial VM setup - will continue polling...")
|
473
473
|
else:
|
474
474
|
# Don't repeat the exact same error message each time
|
475
|
-
logger.
|
475
|
+
logger.warning(f"API request error (attempt {attempt+1}): {error_msg}")
|
476
476
|
# Just log that we're still working on it
|
477
477
|
if attempt > 3:
|
478
|
-
|
478
|
+
logger.debug("Still waiting for the API server to become available...")
|
479
479
|
|
480
480
|
# If we're getting errors but container is running, that's normal during startup
|
481
481
|
if vm_status.get('status') == 'running':
|
482
482
|
if not vm_running:
|
483
|
-
|
484
|
-
|
483
|
+
logger.info("Container is running, waiting for the VM within it to become fully ready...")
|
484
|
+
logger.info("This might take a minute while the VM initializes...")
|
485
485
|
vm_running = True
|
486
486
|
|
487
487
|
# Increase counter and continue
|
@@ -497,35 +497,35 @@ class LumierProvider(BaseVMProvider):
|
|
497
497
|
|
498
498
|
# Check if we have an IP address, which means the VM is fully ready
|
499
499
|
if 'ip_address' in vm_status and vm_status['ip_address']:
|
500
|
-
|
500
|
+
logger.info(f"VM is now fully running with IP: {vm_status.get('ip_address')}")
|
501
501
|
if 'vnc_url' in vm_status and vm_status['vnc_url']:
|
502
|
-
|
502
|
+
logger.info(f"VNC URL: {vm_status.get('vnc_url')}")
|
503
503
|
return vm_status
|
504
504
|
else:
|
505
|
-
|
506
|
-
|
505
|
+
logger.debug("VM is running but still initializing network interfaces...")
|
506
|
+
logger.debug("Waiting for IP address to be assigned...")
|
507
507
|
else:
|
508
508
|
# VM exists but might still be starting up
|
509
509
|
status = vm_status.get('status', 'unknown')
|
510
|
-
|
510
|
+
logger.debug(f"VM found but status is: {status}. Continuing to poll...")
|
511
511
|
|
512
512
|
# Increase counter for next iteration's delay calculation
|
513
513
|
attempt += 1
|
514
514
|
|
515
515
|
# If we reach a very large number of attempts, give a reassuring message but continue
|
516
516
|
if attempt % 10 == 0:
|
517
|
-
|
517
|
+
logger.debug(f"Still waiting after {attempt} attempts. This might take several minutes for first-time setup.")
|
518
518
|
if not vm_running and attempt >= 20:
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
519
|
+
logger.warning("\nNOTE: First-time VM initialization can be slow as images are downloaded.")
|
520
|
+
logger.warning("If this continues for more than 10 minutes, you may want to check:")
|
521
|
+
logger.warning(" 1. Docker logs with: docker logs " + name)
|
522
|
+
logger.warning(" 2. If your network can access container registries")
|
523
|
+
logger.warning("Press Ctrl+C to abort if needed.\n")
|
524
524
|
|
525
525
|
# After 150 attempts (likely over 30-40 minutes), return current status
|
526
526
|
if attempt >= 150:
|
527
|
-
|
528
|
-
|
527
|
+
logger.debug(f"Reached 150 polling attempts. VM status is: {vm_status.get('status', 'unknown')}")
|
528
|
+
logger.debug("Returning current VM status, but please check Docker logs if there are issues.")
|
529
529
|
return vm_status
|
530
530
|
|
531
531
|
except Exception as e:
|
@@ -535,9 +535,9 @@ class LumierProvider(BaseVMProvider):
|
|
535
535
|
|
536
536
|
# If we've had too many consecutive errors, might be a deeper problem
|
537
537
|
if consecutive_errors >= 10:
|
538
|
-
|
539
|
-
|
540
|
-
|
538
|
+
logger.warning(f"\nWARNING: Encountered {consecutive_errors} consecutive errors while checking VM status.")
|
539
|
+
logger.warning("You may need to check the Docker container logs or restart the process.")
|
540
|
+
logger.warning(f"Error details: {str(e)}\n")
|
541
541
|
|
542
542
|
# Increase attempt counter for next iteration
|
543
543
|
attempt += 1
|
@@ -545,7 +545,7 @@ class LumierProvider(BaseVMProvider):
|
|
545
545
|
# After many consecutive errors, add a delay to avoid hammering the system
|
546
546
|
if attempt > 5:
|
547
547
|
error_delay = min(30, 10 + attempt)
|
548
|
-
|
548
|
+
logger.warning(f"Multiple connection errors, waiting {error_delay}s before next attempt...")
|
549
549
|
await asyncio.sleep(error_delay)
|
550
550
|
|
551
551
|
except subprocess.CalledProcessError as e:
|
@@ -568,7 +568,7 @@ class LumierProvider(BaseVMProvider):
|
|
568
568
|
api_ready = False
|
569
569
|
container_running = False
|
570
570
|
|
571
|
-
|
571
|
+
logger.debug(f"Waiting for container {container_name} to be ready (timeout: {timeout}s)...")
|
572
572
|
|
573
573
|
while time.time() - start_time < timeout:
|
574
574
|
# Check if container is running
|
@@ -579,7 +579,6 @@ class LumierProvider(BaseVMProvider):
|
|
579
579
|
|
580
580
|
if container_status and container_status.startswith("Up"):
|
581
581
|
container_running = True
|
582
|
-
print(f"Container {container_name} is running")
|
583
582
|
logger.info(f"Container {container_name} is running with status: {container_status}")
|
584
583
|
else:
|
585
584
|
logger.warning(f"Container {container_name} not yet running, status: {container_status}")
|
@@ -603,7 +602,6 @@ class LumierProvider(BaseVMProvider):
|
|
603
602
|
|
604
603
|
if result.returncode == 0 and "ok" in result.stdout.lower():
|
605
604
|
api_ready = True
|
606
|
-
print(f"API is ready at {api_url}")
|
607
605
|
logger.info(f"API is ready at {api_url}")
|
608
606
|
break
|
609
607
|
else:
|
@@ -621,7 +619,6 @@ class LumierProvider(BaseVMProvider):
|
|
621
619
|
if vm_result.returncode == 0 and vm_result.stdout.strip():
|
622
620
|
# VM API responded with something - consider the API ready
|
623
621
|
api_ready = True
|
624
|
-
print(f"VM API is ready at {vm_api_url}")
|
625
622
|
logger.info(f"VM API is ready at {vm_api_url}")
|
626
623
|
break
|
627
624
|
else:
|
@@ -643,7 +640,6 @@ class LumierProvider(BaseVMProvider):
|
|
643
640
|
else:
|
644
641
|
curl_error = f"Unknown curl error code: {curl_code}"
|
645
642
|
|
646
|
-
print(f"API not ready yet: {curl_error}")
|
647
643
|
logger.info(f"API not ready yet: {curl_error}")
|
648
644
|
except subprocess.SubprocessError as e:
|
649
645
|
logger.warning(f"Error checking API status: {e}")
|
@@ -652,22 +648,19 @@ class LumierProvider(BaseVMProvider):
|
|
652
648
|
# a bit longer before checking again, as the container may still be initializing
|
653
649
|
elapsed_seconds = time.time() - start_time
|
654
650
|
if int(elapsed_seconds) % 5 == 0: # Only print status every 5 seconds to reduce verbosity
|
655
|
-
|
651
|
+
logger.debug(f"Waiting for API to initialize... ({elapsed_seconds:.1f}s / {timeout}s)")
|
656
652
|
|
657
653
|
await asyncio.sleep(3) # Longer sleep between API checks
|
658
654
|
|
659
655
|
# Handle timeout - if the container is running but API is not ready, that's not
|
660
656
|
# necessarily an error - the API might just need more time to start up
|
661
657
|
if not container_running:
|
662
|
-
print(f"Timed out waiting for container {container_name} to start")
|
663
658
|
logger.warning(f"Timed out waiting for container {container_name} to start")
|
664
659
|
return False
|
665
660
|
|
666
661
|
if not api_ready:
|
667
|
-
print(f"Container {container_name} is running, but API is not fully ready yet.")
|
668
|
-
print("Proceeding with operations. API will become available shortly.")
|
669
|
-
print("NOTE: You may see some 'API request failed' messages while the API initializes.")
|
670
662
|
logger.warning(f"Container {container_name} is running, but API is not fully ready yet.")
|
663
|
+
logger.warning(f"NOTE: You may see some 'API request failed' messages while the API initializes.")
|
671
664
|
|
672
665
|
# Return True if container is running, even if API isn't ready yet
|
673
666
|
# This allows VM operations to proceed, with appropriate retries for API calls
|
@@ -777,8 +770,8 @@ class LumierProvider(BaseVMProvider):
|
|
777
770
|
# For follow mode with timeout, we'll run the command and handle the timeout
|
778
771
|
log_cmd.append(container_name)
|
779
772
|
logger.info(f"Following logs for container '{container_name}' with timeout {timeout}s")
|
780
|
-
|
781
|
-
|
773
|
+
logger.info(f"\n---- CONTAINER LOGS FOR '{container_name}' (LIVE) ----")
|
774
|
+
logger.info(f"Press Ctrl+C to stop following logs\n")
|
782
775
|
|
783
776
|
try:
|
784
777
|
# Run with timeout
|
@@ -790,7 +783,7 @@ class LumierProvider(BaseVMProvider):
|
|
790
783
|
process.wait(timeout=timeout)
|
791
784
|
except subprocess.TimeoutExpired:
|
792
785
|
process.terminate() # Stop after timeout
|
793
|
-
|
786
|
+
logger.info(f"\n---- LOG FOLLOWING STOPPED (timeout {timeout}s reached) ----")
|
794
787
|
else:
|
795
788
|
# Without timeout, wait for user interruption
|
796
789
|
process.wait()
|
@@ -798,14 +791,14 @@ class LumierProvider(BaseVMProvider):
|
|
798
791
|
return "Logs were displayed to console in follow mode"
|
799
792
|
except KeyboardInterrupt:
|
800
793
|
process.terminate()
|
801
|
-
|
794
|
+
logger.info("\n---- LOG FOLLOWING STOPPED (user interrupted) ----")
|
802
795
|
return "Logs were displayed to console in follow mode (interrupted)"
|
803
796
|
else:
|
804
797
|
# For follow mode without timeout, we'll print a helpful message
|
805
798
|
log_cmd.append(container_name)
|
806
799
|
logger.info(f"Following logs for container '{container_name}' indefinitely")
|
807
|
-
|
808
|
-
|
800
|
+
logger.info(f"\n---- CONTAINER LOGS FOR '{container_name}' (LIVE) ----")
|
801
|
+
logger.info(f"Press Ctrl+C to stop following logs\n")
|
809
802
|
|
810
803
|
try:
|
811
804
|
# Run the command and let it run until interrupted
|
@@ -814,7 +807,7 @@ class LumierProvider(BaseVMProvider):
|
|
814
807
|
return "Logs were displayed to console in follow mode"
|
815
808
|
except KeyboardInterrupt:
|
816
809
|
process.terminate()
|
817
|
-
|
810
|
+
logger.info("\n---- LOG FOLLOWING STOPPED (user interrupted) ----")
|
818
811
|
return "Logs were displayed to console in follow mode (interrupted)"
|
819
812
|
else:
|
820
813
|
# For non-follow mode, capture and return the logs as a string
|
@@ -827,11 +820,11 @@ class LumierProvider(BaseVMProvider):
|
|
827
820
|
|
828
821
|
# Only print header and logs if there's content
|
829
822
|
if logs.strip():
|
830
|
-
|
831
|
-
|
832
|
-
|
823
|
+
logger.info(f"\n---- CONTAINER LOGS FOR '{container_name}' (LAST {num_lines} LINES) ----\n")
|
824
|
+
logger.info(logs)
|
825
|
+
logger.info(f"\n---- END OF LOGS ----")
|
833
826
|
else:
|
834
|
-
|
827
|
+
logger.info(f"\nNo logs available for container '{container_name}'")
|
835
828
|
|
836
829
|
return logs
|
837
830
|
except subprocess.CalledProcessError as e:
|
computer/telemetry.py
CHANGED
@@ -9,10 +9,10 @@ TELEMETRY_AVAILABLE = False
|
|
9
9
|
|
10
10
|
try:
|
11
11
|
from core.telemetry import (
|
12
|
-
record_event,
|
13
12
|
increment,
|
14
13
|
is_telemetry_enabled,
|
15
14
|
is_telemetry_globally_disabled,
|
15
|
+
record_event,
|
16
16
|
)
|
17
17
|
|
18
18
|
def increment_counter(counter_name: str, value: int = 1) -> None:
|
@@ -22,14 +22,14 @@ try:
|
|
22
22
|
|
23
23
|
def set_dimension(name: str, value: Any) -> None:
|
24
24
|
"""Set a dimension that will be attached to all events."""
|
25
|
-
logger = logging.getLogger("
|
25
|
+
logger = logging.getLogger("computer.telemetry")
|
26
26
|
logger.debug(f"Setting dimension {name}={value}")
|
27
27
|
|
28
28
|
TELEMETRY_AVAILABLE = True
|
29
|
-
logger = logging.getLogger("
|
29
|
+
logger = logging.getLogger("computer.telemetry")
|
30
30
|
logger.info("Successfully imported telemetry")
|
31
31
|
except ImportError as e:
|
32
|
-
logger = logging.getLogger("
|
32
|
+
logger = logging.getLogger("computer.telemetry")
|
33
33
|
logger.warning(f"Could not import telemetry: {e}")
|
34
34
|
TELEMETRY_AVAILABLE = False
|
35
35
|
|
@@ -40,7 +40,7 @@ def _noop(*args: Any, **kwargs: Any) -> None:
|
|
40
40
|
pass
|
41
41
|
|
42
42
|
|
43
|
-
logger = logging.getLogger("
|
43
|
+
logger = logging.getLogger("computer.telemetry")
|
44
44
|
|
45
45
|
# If telemetry isn't available, use no-op functions
|
46
46
|
if not TELEMETRY_AVAILABLE:
|
@@ -1,14 +1,14 @@
|
|
1
|
-
computer/__init__.py,sha256=
|
2
|
-
computer/computer.py,sha256=
|
1
|
+
computer/__init__.py,sha256=44ZBq815dMihgAHmBKn1S_GFNbElCXyZInh3hle1k9Y,1237
|
2
|
+
computer/computer.py,sha256=bHo7pdJoz8p3YSERYvdY7aLYdqYdiXbPVQydirlhwkM,41390
|
3
3
|
computer/diorama_computer.py,sha256=jOP7_eXxxU6SMIoE25ni0YXPK0E7p5sZeLKmkYLh6G8,3871
|
4
|
-
computer/helpers.py,sha256=
|
4
|
+
computer/helpers.py,sha256=iHkO2WhuCLc15g67kfMnpQWxfNRlz2YeJNEvYaL9jlM,1826
|
5
5
|
computer/interface/__init__.py,sha256=xQvYjq5PMn9ZJOmRR5mWtONTl_0HVd8ACvW6AQnzDdw,262
|
6
6
|
computer/interface/base.py,sha256=Uc3pp-8_9YJpawAwt1ixaVN3N0_MtY6nAOSvuKy0Mpc,7863
|
7
7
|
computer/interface/factory.py,sha256=Eas5u9sOZ8FegwX51dP9M37oZBjy2EiVcmhTPc98L3Y,1639
|
8
|
-
computer/interface/linux.py,sha256=
|
9
|
-
computer/interface/macos.py,sha256=
|
8
|
+
computer/interface/linux.py,sha256=xx9xB33wExmS03c7CLS_vqzZCjtXz1eC6M0NDkDtwFM,30473
|
9
|
+
computer/interface/macos.py,sha256=SLY-oDr_13l6vW_2U2igCbACtzAJEJHTdAMEOfy_q0g,30853
|
10
10
|
computer/interface/models.py,sha256=CYbX3PLlWqjFuDiLWMiBzPmmXB8_g9VNLfBFBC6RtvI,3317
|
11
|
-
computer/interface/windows.py,sha256=
|
11
|
+
computer/interface/windows.py,sha256=i3QMPTEEIQxn-oeKukQVbDmciqezHAQrlT1DJvLWPFg,30447
|
12
12
|
computer/logger.py,sha256=UVvnmZGOWVF9TCsixEbeQnDZ3wBPAJ2anW3Zp-MoJ8Y,2896
|
13
13
|
computer/models.py,sha256=iFNM1QfZArD8uf66XJXb2EDIREsfrxqqA5_liLBMfrE,1188
|
14
14
|
computer/providers/__init__.py,sha256=hS9lLxmmHa1u82XJJ_xuqSKipClsYUEPx-8OK9ogtVg,194
|
@@ -18,19 +18,19 @@ computer/providers/cloud/provider.py,sha256=XEdCrnZzRwvvkPHIwfhfJl3xB6W7tZKdBI0d
|
|
18
18
|
computer/providers/factory.py,sha256=T0G9lhFUofCXzQGf6C-pdHlquFXMiuy_IbQaOgIOgRQ,5677
|
19
19
|
computer/providers/lume/__init__.py,sha256=E6hTbVQF5lLZD8JyG4rTwUnCBO4q9K8UkYNQ31R0h7c,193
|
20
20
|
computer/providers/lume/provider.py,sha256=grLZeXd4Y8iYsNq2gfNGcQq1bnTcNYNepEv-mxmROG4,20562
|
21
|
-
computer/providers/lume_api.py,sha256=
|
21
|
+
computer/providers/lume_api.py,sha256=i9dXJGrUhfA49VSY4p6_O6_AzeLNlRppG7jbM3jIJmU,19581
|
22
22
|
computer/providers/lumier/__init__.py,sha256=qz8coMA2K5MVoqNC12SDXJe6lI7z2pn6RHssUOMY5Ug,212
|
23
|
-
computer/providers/lumier/provider.py,sha256=
|
23
|
+
computer/providers/lumier/provider.py,sha256=BDgnTuik42H9OuCmnd-1TxM8p4vl_ahfrhNbi0FNCMM,46644
|
24
24
|
computer/providers/winsandbox/__init__.py,sha256=WsMVBBa_qFfqVHPQzg6j4PegQwLiIudkzUedpYkrfXU,244
|
25
25
|
computer/providers/winsandbox/provider.py,sha256=4D5C6VGmxyNdsDqojzO4O9Nn2GCOVmN6BT78euF-OSU,18374
|
26
26
|
computer/providers/winsandbox/setup_script.ps1,sha256=8aGwR7PEvqnYzCNyXTDKIwJ6pYrwyWYLRjmNT_jYIwQ,4623
|
27
|
-
computer/telemetry.py,sha256=
|
27
|
+
computer/telemetry.py,sha256=jHM3LJAgO2ltN3wlQ6mqCPUcmlS8F955KI70no-T3xA,3730
|
28
28
|
computer/ui/__init__.py,sha256=pmo05ek9qiB_x7DPeE6Vf_8RsIOqTD0w1dBLMHfoOnY,45
|
29
29
|
computer/ui/__main__.py,sha256=Jwy2oC_mGZLN0fX7WLqpjaQkbXMeM3ISrUc8WSRUG0c,284
|
30
30
|
computer/ui/gradio/__init__.py,sha256=5_KimixM48-X74FCsLw7LbSt39MQfUMEL8-M9amK3Cw,117
|
31
31
|
computer/ui/gradio/app.py,sha256=5_AG2dQR9RtFrGQNonScAw64rlswclKW26tYlFBdXtM,70396
|
32
32
|
computer/utils.py,sha256=zY50NXB7r51GNLQ6l7lhG_qv0_ufpQ8n0-SDhCei8m4,2838
|
33
|
-
cua_computer-0.2.
|
34
|
-
cua_computer-0.2.
|
35
|
-
cua_computer-0.2.
|
36
|
-
cua_computer-0.2.
|
33
|
+
cua_computer-0.2.12.dist-info/METADATA,sha256=aK8odbfrvNEiiioGjvE8gFpy89vGFp9P7oNmLx88Lw0,5845
|
34
|
+
cua_computer-0.2.12.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
35
|
+
cua_computer-0.2.12.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
36
|
+
cua_computer-0.2.12.dist-info/RECORD,,
|
File without changes
|
File without changes
|