portacode 0.3.19.dev4__py3-none-any.whl → 1.4.11.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of portacode might be problematic. Click here for more details.

Files changed (92) hide show
  1. portacode/_version.py +16 -3
  2. portacode/cli.py +143 -17
  3. portacode/connection/client.py +149 -10
  4. portacode/connection/handlers/WEBSOCKET_PROTOCOL.md +824 -21
  5. portacode/connection/handlers/__init__.py +28 -1
  6. portacode/connection/handlers/base.py +78 -16
  7. portacode/connection/handlers/chunked_content.py +244 -0
  8. portacode/connection/handlers/diff_handlers.py +603 -0
  9. portacode/connection/handlers/file_handlers.py +902 -17
  10. portacode/connection/handlers/project_aware_file_handlers.py +226 -0
  11. portacode/connection/handlers/project_state/README.md +312 -0
  12. portacode/connection/handlers/project_state/__init__.py +92 -0
  13. portacode/connection/handlers/project_state/file_system_watcher.py +179 -0
  14. portacode/connection/handlers/project_state/git_manager.py +1502 -0
  15. portacode/connection/handlers/project_state/handlers.py +875 -0
  16. portacode/connection/handlers/project_state/manager.py +1331 -0
  17. portacode/connection/handlers/project_state/models.py +108 -0
  18. portacode/connection/handlers/project_state/utils.py +50 -0
  19. portacode/connection/handlers/project_state_handlers.py +45 -2185
  20. portacode/connection/handlers/proxmox_infra.py +361 -0
  21. portacode/connection/handlers/registry.py +15 -4
  22. portacode/connection/handlers/session.py +483 -32
  23. portacode/connection/handlers/system_handlers.py +147 -8
  24. portacode/connection/handlers/tab_factory.py +53 -46
  25. portacode/connection/handlers/terminal_handlers.py +21 -8
  26. portacode/connection/handlers/update_handler.py +61 -0
  27. portacode/connection/multiplex.py +60 -2
  28. portacode/connection/terminal.py +214 -24
  29. portacode/keypair.py +63 -1
  30. portacode/link_capture/__init__.py +38 -0
  31. portacode/link_capture/__pycache__/__init__.cpython-311.pyc +0 -0
  32. portacode/link_capture/bin/__pycache__/link_capture_wrapper.cpython-311.pyc +0 -0
  33. portacode/link_capture/bin/elinks +3 -0
  34. portacode/link_capture/bin/gio-open +3 -0
  35. portacode/link_capture/bin/gnome-open +3 -0
  36. portacode/link_capture/bin/gvfs-open +3 -0
  37. portacode/link_capture/bin/kde-open +3 -0
  38. portacode/link_capture/bin/kfmclient +3 -0
  39. portacode/link_capture/bin/link_capture_exec.sh +11 -0
  40. portacode/link_capture/bin/link_capture_wrapper.py +75 -0
  41. portacode/link_capture/bin/links +3 -0
  42. portacode/link_capture/bin/links2 +3 -0
  43. portacode/link_capture/bin/lynx +3 -0
  44. portacode/link_capture/bin/mate-open +3 -0
  45. portacode/link_capture/bin/netsurf +3 -0
  46. portacode/link_capture/bin/sensible-browser +3 -0
  47. portacode/link_capture/bin/w3m +3 -0
  48. portacode/link_capture/bin/x-www-browser +3 -0
  49. portacode/link_capture/bin/xdg-open +3 -0
  50. portacode/logging_categories.py +140 -0
  51. portacode/pairing.py +103 -0
  52. portacode/static/js/test-ntp-clock.html +63 -0
  53. portacode/static/js/utils/ntp-clock.js +232 -0
  54. portacode/utils/NTP_ARCHITECTURE.md +136 -0
  55. portacode/utils/__init__.py +1 -0
  56. portacode/utils/diff_apply.py +456 -0
  57. portacode/utils/diff_renderer.py +371 -0
  58. portacode/utils/ntp_clock.py +65 -0
  59. portacode-1.4.11.dev1.dist-info/METADATA +298 -0
  60. portacode-1.4.11.dev1.dist-info/RECORD +97 -0
  61. {portacode-0.3.19.dev4.dist-info → portacode-1.4.11.dev1.dist-info}/WHEEL +1 -1
  62. portacode-1.4.11.dev1.dist-info/top_level.txt +3 -0
  63. test_modules/README.md +296 -0
  64. test_modules/__init__.py +1 -0
  65. test_modules/test_device_online.py +44 -0
  66. test_modules/test_file_operations.py +743 -0
  67. test_modules/test_git_status_ui.py +370 -0
  68. test_modules/test_login_flow.py +50 -0
  69. test_modules/test_navigate_testing_folder.py +361 -0
  70. test_modules/test_play_store_screenshots.py +294 -0
  71. test_modules/test_terminal_buffer_performance.py +261 -0
  72. test_modules/test_terminal_interaction.py +80 -0
  73. test_modules/test_terminal_loading_race_condition.py +95 -0
  74. test_modules/test_terminal_start.py +56 -0
  75. testing_framework/.env.example +21 -0
  76. testing_framework/README.md +334 -0
  77. testing_framework/__init__.py +17 -0
  78. testing_framework/cli.py +326 -0
  79. testing_framework/core/__init__.py +1 -0
  80. testing_framework/core/base_test.py +336 -0
  81. testing_framework/core/cli_manager.py +177 -0
  82. testing_framework/core/hierarchical_runner.py +577 -0
  83. testing_framework/core/playwright_manager.py +520 -0
  84. testing_framework/core/runner.py +447 -0
  85. testing_framework/core/shared_cli_manager.py +234 -0
  86. testing_framework/core/test_discovery.py +112 -0
  87. testing_framework/requirements.txt +12 -0
  88. portacode-0.3.19.dev4.dist-info/METADATA +0 -241
  89. portacode-0.3.19.dev4.dist-info/RECORD +0 -30
  90. portacode-0.3.19.dev4.dist-info/top_level.txt +0 -1
  91. {portacode-0.3.19.dev4.dist-info → portacode-1.4.11.dev1.dist-info}/entry_points.txt +0 -0
  92. {portacode-0.3.19.dev4.dist-info → portacode-1.4.11.dev1.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,261 @@
1
+ """Test terminal buffer performance and WebSocket message sizes with high-volume output."""
2
+
3
+ import os
4
+ import time
5
+ import json
6
+ import shutil
7
+ from pathlib import Path
8
+ from playwright.async_api import expect
9
+ from testing_framework.core.base_test import BaseTest, TestResult, TestCategory
10
+
11
+ # Global test folder path
12
+ TESTING_FOLDER_PATH = "/home/menas/testing_folder"
13
+
14
+
15
+ class TerminalBufferPerformanceTest(BaseTest):
16
+ """Test terminal buffer performance with high-volume output from Gemini CLI."""
17
+
18
+ def __init__(self):
19
+ super().__init__(
20
+ name="terminal_buffer_performance_test",
21
+ category=TestCategory.PERFORMANCE,
22
+ description="Test terminal buffer behavior and WebSocket message sizes with high-volume output from Gemini CLI",
23
+ tags=["terminal", "buffer", "performance", "websocket", "gemini", "memory"],
24
+ depends_on=["device_online_test"],
25
+ start_url="/dashboard/"
26
+ )
27
+
28
+ async def run(self) -> TestResult:
29
+ """Test terminal buffer performance with massive output."""
30
+ page = self.playwright_manager.page
31
+ assert_that = self.assert_that()
32
+ stats = self.stats()
33
+
34
+ # Step 1: Navigate to testing_folder project (copied from working test)
35
+ device_card = page.locator(".device-card.online").filter(has_text="portacode streamer")
36
+ await device_card.wait_for()
37
+
38
+ # Click the Editor button in the device card
39
+ stats.start_timer("editor_button_click")
40
+ editor_button = device_card.get_by_text("Editor")
41
+ await editor_button.wait_for()
42
+ await editor_button.click()
43
+
44
+ editor_click_time = stats.end_timer("editor_button_click")
45
+ stats.record_stat("editor_button_click_time_ms", editor_click_time)
46
+
47
+ # Navigate to testing_folder project
48
+ stats.start_timer("project_navigation")
49
+
50
+ # Wait for the project selector modal to appear
51
+ await page.wait_for_selector("#projectSelectorModal.show", timeout=10000)
52
+
53
+ # Wait for projects to load in the modal
54
+ await page.wait_for_selector(".item-list .section-header", timeout=10000)
55
+
56
+ # Look for testing_folder project item and click it
57
+ # Projects are displayed as items with class "item project"
58
+
59
+ # First let's see what projects are available for debugging
60
+ project_items = page.locator('.item.project')
61
+ project_count = await project_items.count()
62
+
63
+ # If there are projects, look for testing_folder specifically
64
+ if project_count > 0:
65
+ # Try to find testing_folder specifically first
66
+ testing_folder_item = page.locator('.item.project').filter(has_text="testing_folder")
67
+ testing_folder_count = await testing_folder_item.count()
68
+
69
+ if testing_folder_count > 0:
70
+ # Found testing_folder project - this is ideal!
71
+ await testing_folder_item.first.click()
72
+ stats.record_stat("found_testing_folder", True)
73
+ else:
74
+ # If no testing_folder, try any project with "test" in the name as fallback
75
+ test_item = page.locator('.item.project').filter(has_text="test")
76
+ test_count = await test_item.count()
77
+ if test_count > 0:
78
+ await test_item.first.click()
79
+ stats.record_stat("found_testing_folder", False)
80
+ stats.record_stat("fallback_reason", "used_test_project")
81
+ else:
82
+ # Use first available project as last resort
83
+ await project_items.first.click()
84
+ stats.record_stat("found_testing_folder", False)
85
+ stats.record_stat("fallback_reason", "used_first_available")
86
+ else:
87
+ # No projects found
88
+ assert_that.is_true(False, "No projects found in modal")
89
+
90
+ navigation_time = stats.end_timer("project_navigation")
91
+ stats.record_stat("project_navigation_time_ms", navigation_time)
92
+
93
+ # Wait for page to load with file explorer
94
+ stats.start_timer("page_load")
95
+ # Wait for the page to load properly
96
+ await page.wait_for_timeout(2000)
97
+
98
+ page_load_time = stats.end_timer("page_load")
99
+ stats.record_stat("page_load_time_ms", page_load_time)
100
+
101
+ # Step 2: Click the add terminal button (copied from working test)
102
+ stats.start_timer("terminal_setup")
103
+ add_terminal_btn = page.locator(".add-terminal-btn")
104
+ await add_terminal_btn.wait_for(timeout=10000)
105
+ await add_terminal_btn.click()
106
+
107
+ # Wait for terminal to appear and focus on it properly
108
+ terminal_textarea = page.locator("code-terminal")
109
+ await terminal_textarea.wait_for()
110
+ await terminal_textarea.focus()
111
+ await page.wait_for_timeout(4000) # Longer delay for focus stability
112
+
113
+ terminal_setup_time = stats.end_timer("terminal_setup")
114
+ stats.record_stat("terminal_setup_time_ms", terminal_setup_time)
115
+
116
+ # Step 3: Run gemini with a prompt to generate massive output
117
+ stats.start_timer("gemini_test")
118
+
119
+ # Start gemini
120
+ await page.keyboard.type("gemini")
121
+ await page.keyboard.press("Enter")
122
+ gemini_text_box_placeholder = "Type your message or @path/to/file"
123
+ await page.wait_for_timeout(20000) # Wait 20 seconds
124
+
125
+ # Send a prompt that will generate lots of output
126
+ gemini_prompt = "Please write a very detailed, comprehensive technical explanation of how neural networks work, including mathematical formulas, code examples, detailed explanations of backpropagation, different architectures like CNNs and RNNs, training procedures, and real-world applications. Make it as detailed and long as possible - at least 5000 words with examples and code snippets."
127
+ await page.keyboard.type(gemini_prompt)
128
+ await page.keyboard.press("Enter")
129
+
130
+ # Wait for Gemini to generate output
131
+ await page.wait_for_timeout(20000) # 20 seconds max wait
132
+
133
+ gemini_time = stats.end_timer("gemini_test")
134
+ stats.record_stat("gemini_test_time_ms", gemini_time)
135
+
136
+ # Record final statistics
137
+ total_time = editor_click_time + navigation_time + page_load_time + terminal_setup_time + gemini_time
138
+
139
+ # Check for failures
140
+ if assert_that.has_failures():
141
+ return TestResult(self.name, False, assert_that.get_failure_message())
142
+
143
+ # Success message with key metrics
144
+ success_msg = f"""Terminal buffer performance test completed in {total_time:.1f}ms
145
+ Gemini output generated for {gemini_time:.1f}ms - check websockets.json for buffer behavior"""
146
+
147
+ return TestResult(
148
+ self.name,
149
+ True,
150
+ success_msg,
151
+ artifacts=stats.get_stats()
152
+ )
153
+
154
+ async def _count_websocket_messages(self) -> int:
155
+ """Count total WebSocket messages so far."""
156
+ try:
157
+ # The framework should be logging WebSocket messages to websockets.json
158
+ # We'll try to read it if available
159
+ ws_log_path = Path("test_results") / "current_run" / "websockets.json"
160
+ if ws_log_path.exists():
161
+ with open(ws_log_path, 'r') as f:
162
+ messages = json.load(f)
163
+ return len(messages) if isinstance(messages, list) else 0
164
+ return 0
165
+ except Exception:
166
+ return 0
167
+
168
+ async def _analyze_websocket_messages(self) -> dict:
169
+ """Analyze WebSocket messages for size patterns."""
170
+ try:
171
+ ws_log_path = Path("test_results") / "current_run" / "websockets.json"
172
+ if not ws_log_path.exists():
173
+ return {"error": "websockets.json not found"}
174
+
175
+ with open(ws_log_path, 'r') as f:
176
+ messages = json.load(f)
177
+
178
+ if not isinstance(messages, list):
179
+ return {"error": "invalid websockets.json format"}
180
+
181
+ analysis = {
182
+ "total_messages": len(messages),
183
+ "terminal_data_messages": 0,
184
+ "terminal_list_messages": 0,
185
+ "largest_message_size": 0,
186
+ "largest_message_type": "",
187
+ "message_sizes": [],
188
+ "terminal_data_sizes": [],
189
+ "terminal_list_sizes": []
190
+ }
191
+
192
+ for msg in messages:
193
+ if isinstance(msg, dict) and "data" in msg:
194
+ msg_str = json.dumps(msg)
195
+ msg_size = len(msg_str.encode('utf-8'))
196
+ analysis["message_sizes"].append(msg_size)
197
+
198
+ if msg_size > analysis["largest_message_size"]:
199
+ analysis["largest_message_size"] = msg_size
200
+ analysis["largest_message_type"] = msg.get("data", {}).get("event", "unknown")
201
+
202
+ # Check message type
203
+ event = msg.get("data", {}).get("event", "")
204
+ if event == "terminal_data":
205
+ analysis["terminal_data_messages"] += 1
206
+ analysis["terminal_data_sizes"].append(msg_size)
207
+ elif event == "terminal_list":
208
+ analysis["terminal_list_messages"] += 1
209
+ analysis["terminal_list_sizes"].append(msg_size)
210
+
211
+ # Calculate statistics
212
+ if analysis["message_sizes"]:
213
+ analysis["avg_message_size"] = sum(analysis["message_sizes"]) / len(analysis["message_sizes"])
214
+ analysis["max_message_size"] = max(analysis["message_sizes"])
215
+ analysis["min_message_size"] = min(analysis["message_sizes"])
216
+
217
+ if analysis["terminal_data_sizes"]:
218
+ analysis["avg_terminal_data_size"] = sum(analysis["terminal_data_sizes"]) / len(analysis["terminal_data_sizes"])
219
+ analysis["max_terminal_data_size"] = max(analysis["terminal_data_sizes"])
220
+
221
+ if analysis["terminal_list_sizes"]:
222
+ analysis["avg_terminal_list_size"] = sum(analysis["terminal_list_sizes"]) / len(analysis["terminal_list_sizes"])
223
+ analysis["max_terminal_list_size"] = max(analysis["terminal_list_sizes"])
224
+
225
+ return analysis
226
+
227
+ except Exception as e:
228
+ return {"error": f"Failed to analyze WebSocket messages: {str(e)}"}
229
+
230
+ async def setup(self):
231
+ """Setup for terminal buffer performance test - ensure testing folder exists."""
232
+ try:
233
+ # Ensure the testing folder exists but is empty
234
+ os.makedirs(TESTING_FOLDER_PATH, exist_ok=True)
235
+
236
+ # Clean out any existing content so we start fresh
237
+ for item in os.listdir(TESTING_FOLDER_PATH):
238
+ item_path = os.path.join(TESTING_FOLDER_PATH, item)
239
+ if os.path.isfile(item_path):
240
+ os.remove(item_path)
241
+ elif os.path.isdir(item_path):
242
+ shutil.rmtree(item_path)
243
+
244
+ except Exception as e:
245
+ print(f"❌ Setup failed: {e}")
246
+ raise Exception(f"Failed to set up test project: {e}")
247
+
248
+ async def teardown(self):
249
+ """Teardown for terminal buffer performance test."""
250
+ try:
251
+ if os.path.exists(TESTING_FOLDER_PATH):
252
+ # Clean up all content
253
+ for item in os.listdir(TESTING_FOLDER_PATH):
254
+ item_path = os.path.join(TESTING_FOLDER_PATH, item)
255
+ if os.path.isfile(item_path):
256
+ os.remove(item_path)
257
+ elif os.path.isdir(item_path):
258
+ shutil.rmtree(item_path)
259
+ except Exception as e:
260
+ print(f"⚠️ Cleanup warning: {e}")
261
+ # Don't fail the test just because cleanup had issues
@@ -0,0 +1,80 @@
1
+ """Test terminal interaction - opening terminal and running commands."""
2
+
3
+ from testing_framework.core.base_test import BaseTest, TestResult, TestCategory
4
+
5
+
6
+ class TerminalInteractionTest(BaseTest):
7
+ """Test terminal interaction - opening terminal and running commands."""
8
+
9
+ def __init__(self):
10
+ super().__init__(
11
+ name="terminal_interaction_test",
12
+ category=TestCategory.INTEGRATION,
13
+ description="Test terminal interaction - click terminal chip, run ls command, measure timing",
14
+ tags=["terminal", "interaction", "command", "timing"],
15
+ depends_on=["terminal_start_test"],
16
+ start_url="/dashboard/"
17
+ )
18
+
19
+ async def run(self) -> TestResult:
20
+ """Test terminal interaction with command execution timing."""
21
+ page = self.playwright_manager.page
22
+ stats = self.stats()
23
+
24
+ # Wait for any modal to be closed before clicking terminal chip
25
+ await page.wait_for_function(
26
+ "() => !document.querySelector('#startModal.show')",
27
+ timeout=10000
28
+ )
29
+
30
+ # Click terminal chip to open terminal
31
+ device_card = page.locator(".device-card.online").filter(has_text="portacode streamer")
32
+ terminal_chip = device_card.locator(".terminal-chip-channel")
33
+ await terminal_chip.click()
34
+
35
+ # Wait for terminal and prompt
36
+ await page.wait_for_function(
37
+ "() => document.querySelector('.xterm-rows')?.textContent.includes('menas@portacode-streamer:~$')"
38
+ )
39
+
40
+ # Ensure terminal is properly focused and ready for input
41
+ terminal_textarea = page.locator(".xterm-helper-textarea")
42
+ await terminal_textarea.focus()
43
+ await page.wait_for_timeout(200) # Longer delay for focus stability
44
+
45
+ # Double-check focus and click terminal area if needed
46
+ is_focused = await terminal_textarea.evaluate("el => document.activeElement === el")
47
+ if not is_focused:
48
+ await page.locator(".xterm-screen").click()
49
+ await terminal_textarea.focus()
50
+ await page.wait_for_timeout(100)
51
+
52
+ # Send ls command and measure timing
53
+ stats.start_timer("command_execution")
54
+ await page.keyboard.type("ls\n")
55
+
56
+ # Wait for output containing client_sessions.json
57
+ await page.wait_for_function(
58
+ "() => document.querySelector('.xterm-rows')?.textContent.includes('client_sessions.json')"
59
+ )
60
+
61
+ command_time = stats.end_timer("command_execution")
62
+
63
+ return TestResult(
64
+ self.name,
65
+ True,
66
+ f"Command executed in {command_time:.1f}ms",
67
+ artifacts=stats.get_stats()
68
+ )
69
+
70
+ async def setup(self):
71
+ """Setup for terminal interaction test."""
72
+ pass
73
+
74
+ async def teardown(self):
75
+ """Teardown for terminal interaction test."""
76
+ try:
77
+ page = self.playwright_manager.page
78
+ await page.evaluate('document.querySelector("#termModal")?.querySelector(".btn-close")?.click()')
79
+ except:
80
+ pass
@@ -0,0 +1,95 @@
1
+ from testing_framework.core.base_test import BaseTest, TestResult, TestCategory
2
+ import asyncio
3
+
4
+ class TerminalLoadingRaceConditionTest(BaseTest):
5
+ def __init__(self):
6
+ super().__init__(
7
+ name="terminal_loading_race_condition_test",
8
+ category=TestCategory.INTEGRATION,
9
+ description="Test that terminals load immediately without showing empty state message",
10
+ tags=["terminal", "websocket", "race-condition", "critical"],
11
+ depends_on=["device_online_test"],
12
+ start_url="/project/1d98e739-de00-4d65-a13b-c6c82173683f/"
13
+ )
14
+
15
+ async def run(self) -> TestResult:
16
+ """Test that terminals load immediately without ever showing empty state."""
17
+ page = self.playwright_manager.page
18
+ assert_that = self.assert_that()
19
+
20
+ try:
21
+ # start_url should have navigated us to the project page already
22
+ # Wait a moment for components to initialize
23
+ await asyncio.sleep(1)
24
+
25
+ # Click the add terminal button to start a terminal first
26
+ add_terminal_btn = page.locator(".add-terminal-btn")
27
+ await add_terminal_btn.wait_for(timeout=10000)
28
+ await add_terminal_btn.click()
29
+ await asyncio.sleep(2)
30
+
31
+ # Now reload the page to test the race condition
32
+ await page.reload()
33
+ await asyncio.sleep(1)
34
+
35
+ # Check after first reload
36
+ empty_message_selector = "text=Click the '+' to create a new terminal."
37
+ await asyncio.sleep(3)
38
+
39
+ if await page.is_visible(empty_message_selector):
40
+ return TestResult(
41
+ self.name,
42
+ False,
43
+ f"RACE CONDITION DETECTED: Empty terminal message shown after first reload"
44
+ )
45
+
46
+ # Do a hard reload (Ctrl+Shift+R) and test again
47
+ await page.reload(wait_until="networkidle")
48
+ await asyncio.sleep(1)
49
+ await asyncio.sleep(3)
50
+
51
+ if await page.is_visible(empty_message_selector):
52
+ return TestResult(
53
+ self.name,
54
+ False,
55
+ f"RACE CONDITION DETECTED: Empty terminal message shown after hard reload"
56
+ )
57
+
58
+ # One more normal reload for final check
59
+ await page.reload()
60
+ await asyncio.sleep(1)
61
+ await asyncio.sleep(3)
62
+
63
+ # Check the final state - what actually appeared
64
+ loading_visible = await page.is_visible("text=Loading Terminals...")
65
+ empty_message_visible = await page.is_visible(empty_message_selector)
66
+ terminal_area_visible = await page.is_visible("div#term-area")
67
+
68
+ # The race condition manifests as showing empty message when terminals should exist
69
+ if empty_message_visible:
70
+ return TestResult(
71
+ self.name,
72
+ False,
73
+ f"RACE CONDITION DETECTED: Empty terminal message shown when terminals should exist. This means terminal_list processing failed."
74
+ )
75
+ elif terminal_area_visible:
76
+ return TestResult(
77
+ self.name,
78
+ True,
79
+ "SUCCESS: Terminal area loaded properly without showing empty state"
80
+ )
81
+ elif loading_visible:
82
+ return TestResult(
83
+ self.name,
84
+ False,
85
+ "STUCK: Still showing loading state after 3+ seconds"
86
+ )
87
+ else:
88
+ return TestResult(
89
+ self.name,
90
+ False,
91
+ f"UNEXPECTED STATE: No terminal component states found. loading={loading_visible}, empty={empty_message_visible}, terminal={terminal_area_visible}"
92
+ )
93
+
94
+ except Exception as e:
95
+ return TestResult(self.name, False, f"Test failed with exception: {str(e)}")
@@ -0,0 +1,56 @@
1
+ """Test starting a terminal in the device."""
2
+
3
+ from testing_framework.core.base_test import BaseTest, TestResult, TestCategory
4
+
5
+
6
+ class TerminalStartTest(BaseTest):
7
+ """Test starting a new terminal in the device."""
8
+
9
+ def __init__(self):
10
+ super().__init__(
11
+ name="terminal_start_test",
12
+ category=TestCategory.INTEGRATION,
13
+ description="Verify new terminal can be started and measure timing",
14
+ tags=["terminal", "device", "timing"],
15
+ depends_on=["device_online_test"],
16
+ start_url="/dashboard/"
17
+ )
18
+
19
+ async def run(self) -> TestResult:
20
+ """Test terminal start functionality with timing."""
21
+ page = self.playwright_manager.page
22
+ stats = self.stats()
23
+
24
+ # Find portacode device and click Terminal button
25
+ device_card = page.locator(".device-card.online").filter(has_text="portacode streamer")
26
+ terminal_button = device_card.get_by_text("Terminal")
27
+
28
+ # Start timing and create terminal
29
+ stats.start_timer("terminal_creation")
30
+ await terminal_button.click()
31
+
32
+ # Wait for modal and click Start Terminal
33
+ await page.wait_for_selector("text=Start New Terminal")
34
+ await page.get_by_text("Start Terminal").click()
35
+
36
+ # Wait for terminal chip to appear
37
+ terminal_chip = device_card.locator(".terminal-chip-channel")
38
+ await terminal_chip.wait_for()
39
+
40
+ creation_time = stats.end_timer("terminal_creation")
41
+ stats.record_stat("terminal_creation_time_ms", creation_time)
42
+
43
+ return TestResult(
44
+ self.name,
45
+ True,
46
+ f"Terminal started in {creation_time:.1f}ms",
47
+ artifacts=stats.get_stats()
48
+ )
49
+
50
+ async def setup(self):
51
+ """Setup for terminal start test."""
52
+ pass
53
+
54
+ async def teardown(self):
55
+ """Teardown for terminal start test."""
56
+ pass
@@ -0,0 +1,21 @@
1
+ # Testing Framework Environment Configuration
2
+ # Copy this file to .env and configure with your values
3
+
4
+ # Web Application URL
5
+ TEST_BASE_URL=http://192.168.1.188:8001/
6
+
7
+ # Login Credentials
8
+ TEST_USERNAME=your_username_here
9
+ TEST_PASSWORD=your_password_here
10
+
11
+ # Optional: Browser Settings
12
+ TEST_BROWSER=chromium # chromium, firefox, or webkit
13
+ TEST_HEADLESS=false # true for headless mode, false for visible browser
14
+
15
+ # Optional: Test Output Directories
16
+ TEST_RESULTS_DIR=test_results
17
+ TEST_RECORDINGS_DIR=test_recordings
18
+ TEST_LOGS_DIR=test_results
19
+
20
+ # Automation testing token (used by the testing framework to bypass captcha. Same token must be defined in ../main.env)
21
+ TEST_RUNNER_BYPASS_TOKEN=same-as-in-main-env