portacode 1.3.33__py3-none-any.whl → 1.3.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of portacode might be problematic. Click here for more details.

portacode/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '1.3.33'
32
- __version_tuple__ = version_tuple = (1, 3, 33)
31
+ __version__ = version = '1.3.35'
32
+ __version_tuple__ = version_tuple = (1, 3, 35)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -43,6 +43,7 @@ This document describes the complete protocol for communicating with devices thr
43
43
  - [`system_info`](#system_info)
44
44
  - [File Actions](#file-actions)
45
45
  - [`file_read`](#file_read)
46
+ - [`file_search`](#file_search)
46
47
  - [`file_write`](#file_write)
47
48
  - [`directory_list`](#directory_list)
48
49
  - [`file_info`](#file_info)
@@ -80,6 +81,7 @@ This document describes the complete protocol for communicating with devices thr
80
81
  - [`system_info`](#system_info-event)
81
82
  - [File Events](#file-events)
82
83
  - [`file_read_response`](#file_read_response)
84
+ - [`file_search_response`](#file_search_response)
83
85
  - [`file_write_response`](#file_write_response)
84
86
  - [`directory_list_response`](#directory_list_response)
85
87
  - [`file_info_response`](#file_info_response)
@@ -317,12 +319,46 @@ Reads the content of a file. Handled by [`file_read`](./file_handlers.py).
317
319
  **Payload Fields:**
318
320
 
319
321
  * `path` (string, mandatory): The absolute path to the file to read.
322
+ * `start_line` (integer, optional): 1-based line number to start reading from. Defaults to `1`.
323
+ * `end_line` (integer, optional): 1-based line number to stop reading at (inclusive). When provided, limits the response to the range between `start_line` and `end_line`.
324
+ * `max_lines` (integer, optional): Maximum number of lines to return (capped at 2000). Useful for pagination when `end_line` is not specified.
325
+ * `encoding` (string, optional): Text encoding to use when reading the file. Defaults to `utf-8` with replacement for invalid bytes.
320
326
 
321
327
  **Responses:**
322
328
 
323
329
  * On success, the device will respond with a [`file_read_response`](#file_read_response) event.
324
330
  * On error, a generic [`error`](#error) event is sent.
325
331
 
332
+ ### `file_search`
333
+
334
+ Searches for text matches within files beneath a given root directory. Handled by [`file_search`](./file_handlers.py).
335
+
336
+ **Payload Fields:**
337
+
338
+ * `root_path` (string, mandatory): The absolute path that acts as the search root (typically a project folder).
339
+ * `query` (string, mandatory): The search query. Treated as plain text unless `regex=true`.
340
+ * `match_case` (boolean, optional): When `true`, performs a case-sensitive search. Defaults to `false`.
341
+ * `regex` (boolean, optional): When `true`, interprets `query` as a regular expression. Defaults to `false`.
342
+ * `whole_word` (boolean, optional): When `true`, matches only whole words. Works with both plain text and regex queries.
343
+ * `include_patterns` (array[string], optional): Glob patterns that files must match to be included (e.g., `["src/**/*.py"]`).
344
+ * `exclude_patterns` (array[string], optional): Glob patterns for files/directories to skip (e.g., `["**/tests/**"]`).
345
+ * `include_hidden` (boolean, optional): When `true`, includes hidden files and folders. Defaults to `false`.
346
+ * `max_results` (integer, optional): Maximum number of match entries to return (capped at 500). Defaults to `40`.
347
+ * `max_matches_per_file` (integer, optional): Maximum number of matches to return per file (capped at 50). Defaults to `5`.
348
+ * `max_file_size` (integer, optional): Maximum file size in bytes to scan (defaults to 1 MiB).
349
+ * `max_line_length` (integer, optional): Maximum number of characters to return per matching line (defaults to `200`).
350
+
351
+ **Default Behaviour:**
352
+
353
+ * Binary files and large vendor/static directories (e.g., `node_modules`, `dist`, `static`) are skipped automatically unless custom `exclude_patterns` are provided.
354
+ * Only common source/text extensions are scanned by default (override with `include_patterns` to widen the scope).
355
+ * Searches stop after 10 seconds, respecting both per-file and global match limits to avoid oversized responses.
356
+
357
+ **Responses:**
358
+
359
+ * On success, the device will respond with a [`file_search_response`](#file_search_response) event containing the matches.
360
+ * On error, a generic [`error`](#error) event is sent.
361
+
326
362
  ### `file_write`
327
363
 
328
364
  Writes content to a file. Handled by [`file_write`](./file_handlers.py).
@@ -345,6 +381,8 @@ Lists the contents of a directory. Handled by [`directory_list`](./file_handlers
345
381
 
346
382
  * `path` (string, optional): The path to the directory to list. Defaults to the current directory.
347
383
  * `show_hidden` (boolean, optional): Whether to include hidden files in the listing. Defaults to `false`.
384
+ * `limit` (integer, optional): Maximum number of entries to return (defaults to “all”). Values above 1000 are clamped to 1000.
385
+ * `offset` (integer, optional): Number of entries to skip before collecting results (defaults to `0`).
348
386
 
349
387
  **Responses:**
350
388
 
@@ -814,8 +852,39 @@ Returns the content of a file in response to a `file_read` action. Handled by [`
814
852
  **Event Fields:**
815
853
 
816
854
  * `path` (string, mandatory): The path of the file that was read.
817
- * `content` (string, mandatory): The content of the file.
818
- * `size` (integer, mandatory): The size of the file in bytes.
855
+ * `content` (string, mandatory): The file content returned (may be a slice when pagination parameters are used).
856
+ * `size` (integer, mandatory): The total size of the file in bytes.
857
+ * `total_lines` (integer, optional): Total number of lines detected in the file.
858
+ * `returned_lines` (integer, optional): Number of lines included in `content`.
859
+ * `start_line` (integer, optional): The first line number included in the response (if any lines were returned).
860
+ * `requested_start_line` (integer, optional): The requested starting line supplied in the command.
861
+ * `end_line` (integer, optional): The last line number included in the response.
862
+ * `has_more_before` (boolean, optional): Whether there is additional content before the returned range.
863
+ * `has_more_after` (boolean, optional): Whether there is additional content after the returned range.
864
+ * `encoding` (string, optional): Encoding that was used while reading the file.
865
+
866
+ ### <a name="file_search_response"></a>`file_search_response`
867
+
868
+ Returns aggregated search results in response to a `file_search` action. Handled by [`file_search`](./file_handlers.py).
869
+
870
+ **Event Fields:**
871
+
872
+ * `root_path` (string, mandatory): The root directory that was searched.
873
+ * `query` (string, mandatory): The query string that was used.
874
+ * `match_case` (boolean, mandatory): Indicates if the search was case sensitive.
875
+ * `regex` (boolean, mandatory): Indicates if the query was interpreted as a regular expression.
876
+ * `whole_word` (boolean, mandatory): Indicates if the search matched whole words only.
877
+ * `include_patterns` (array[string], mandatory): Effective include glob patterns.
878
+ * `exclude_patterns` (array[string], mandatory): Effective exclude glob patterns.
879
+ * `matches` (array, mandatory): List of match objects containing `relative_path`, `path`, `line_number`, `line`, `match_spans` `[start, end]`, `match_count`, and `line_truncated` (boolean).
880
+ * `matches_returned` (integer, mandatory): Number of match entries returned (length of `matches`).
881
+ * `total_matches` (integer, mandatory): Total number of matches found while scanning.
882
+ * `files_scanned` (integer, mandatory): Count of files inspected.
883
+ * `truncated` (boolean, mandatory): Indicates if additional matches exist beyond those returned.
884
+ * `truncated_count` (integer, optional): Number of matches that were omitted due to truncation limits.
885
+ * `max_results` (integer, mandatory): Maximum number of matches requested.
886
+ * `max_matches_per_file` (integer, mandatory): Maximum matches requested per file.
887
+ * `errors` (array[string], optional): Non-fatal errors encountered during scanning (e.g., unreadable files).
819
888
 
820
889
  ### <a name="file_write_response"></a>`file_write_response`
821
890
 
@@ -835,7 +904,11 @@ Returns the contents of a directory in response to a `directory_list` action. Ha
835
904
 
836
905
  * `path` (string, mandatory): The path of the directory that was listed.
837
906
  * `items` (array, mandatory): A list of objects, each representing a file or directory in the listed directory.
838
- * `count` (integer, mandatory): The number of items in the `items` list.
907
+ * `count` (integer, mandatory): The number of items returned in this response (honours `limit`/`offset`).
908
+ * `total_count` (integer, mandatory): Total number of entries in the directory before pagination.
909
+ * `offset` (integer, optional): Offset that was applied.
910
+ * `limit` (integer, optional): Limit that was applied (or `null` if none).
911
+ * `has_more` (boolean, optional): Indicates whether additional items remain beyond the returned slice.
839
912
 
840
913
  ### <a name="file_info_response"></a>`file_info_response`
841
914
 
@@ -1277,4 +1350,4 @@ Sent by the server to clients to provide initial device list snapshot.
1277
1350
 
1278
1351
  **Event Fields:**
1279
1352
 
1280
- * `devices` (array, mandatory): Array of device objects with status information
1353
+ * `devices` (array, mandatory): Array of device objects with status information
@@ -23,6 +23,7 @@ from .file_handlers import (
23
23
  FileCreateHandler,
24
24
  FolderCreateHandler,
25
25
  FileRenameHandler,
26
+ FileSearchHandler,
26
27
  ContentRequestHandler,
27
28
  )
28
29
  from .project_state_handlers import (
@@ -58,6 +59,7 @@ __all__ = [
58
59
  "FileCreateHandler",
59
60
  "FolderCreateHandler",
60
61
  "FileRenameHandler",
62
+ "FileSearchHandler",
61
63
  "ContentRequestHandler",
62
64
  # Project state handlers
63
65
  "ProjectStateFolderExpandHandler",
@@ -71,4 +73,4 @@ __all__ = [
71
73
  "ProjectStateGitUnstageHandler",
72
74
  "ProjectStateGitRevertHandler",
73
75
  "ProjectStateGitCommitHandler",
74
- ]
76
+ ]
@@ -2,7 +2,13 @@
2
2
 
3
3
  import os
4
4
  import logging
5
- from typing import Any, Dict, List
5
+ import fnmatch
6
+ import re
7
+ import json
8
+ import shutil
9
+ import subprocess
10
+ import time
11
+ from typing import Any, Dict, List, Optional, Sequence
6
12
  from pathlib import Path
7
13
 
8
14
  from .base import AsyncHandler, SyncHandler
@@ -22,27 +28,101 @@ class FileReadHandler(SyncHandler):
22
28
  return "file_read"
23
29
 
24
30
  def execute(self, message: Dict[str, Any]) -> Dict[str, Any]:
25
- """Read file contents."""
31
+ """Read file contents with optional pagination."""
26
32
  file_path = message.get("path")
27
33
  if not file_path:
28
34
  raise ValueError("path parameter is required")
29
-
35
+
36
+ encoding = message.get("encoding", "utf-8")
37
+ start_line = self._coerce_positive_int(message.get("start_line"), default=1)
38
+ max_lines = self._coerce_positive_int(message.get("max_lines"), allow_none=True)
39
+ end_line = self._coerce_positive_int(message.get("end_line"), allow_none=True)
40
+
41
+ if start_line < 1:
42
+ start_line = 1
43
+
44
+ if end_line is not None and end_line >= start_line:
45
+ range_len = end_line - start_line + 1
46
+ if max_lines is None:
47
+ max_lines = range_len
48
+ else:
49
+ max_lines = min(max_lines, range_len)
50
+
51
+ if max_lines is not None:
52
+ max_lines = min(max_lines, 2000)
53
+
30
54
  try:
31
- with open(file_path, 'r', encoding='utf-8') as f:
32
- content = f.read()
33
-
34
- return {
35
- "event": "file_read_response",
36
- "path": file_path,
37
- "content": content,
38
- "size": len(content),
39
- }
55
+ file_size = os.path.getsize(file_path)
56
+ except FileNotFoundError:
57
+ raise ValueError(f"File not found: {file_path}")
58
+ except PermissionError:
59
+ raise RuntimeError(f"Permission denied: {file_path}")
60
+
61
+ total_lines = 0
62
+ collected_lines: List[str] = []
63
+ truncated_after = False
64
+
65
+ try:
66
+ with open(file_path, "r", encoding=encoding, errors="replace") as file_obj:
67
+ for idx, line in enumerate(file_obj, start=1):
68
+ total_lines += 1
69
+ if idx < start_line:
70
+ continue
71
+
72
+ if max_lines is not None and len(collected_lines) >= max_lines:
73
+ truncated_after = True
74
+ continue
75
+
76
+ collected_lines.append(line)
40
77
  except FileNotFoundError:
41
78
  raise ValueError(f"File not found: {file_path}")
42
79
  except PermissionError:
43
80
  raise RuntimeError(f"Permission denied: {file_path}")
44
- except UnicodeDecodeError:
45
- raise RuntimeError(f"File is not text or uses unsupported encoding: {file_path}")
81
+ except OSError as exc:
82
+ raise RuntimeError(f"Error reading file: {exc}")
83
+
84
+ returned_start_line = start_line if collected_lines else None
85
+ returned_end_line = (
86
+ start_line + len(collected_lines) - 1 if collected_lines else None
87
+ )
88
+ has_more_before = bool(collected_lines) and start_line > 1
89
+ has_more_after = truncated_after or (
90
+ returned_end_line is not None and total_lines > returned_end_line
91
+ )
92
+
93
+ return {
94
+ "event": "file_read_response",
95
+ "path": file_path,
96
+ "content": "".join(collected_lines),
97
+ "size": file_size,
98
+ "total_lines": total_lines,
99
+ "returned_lines": len(collected_lines),
100
+ "start_line": returned_start_line,
101
+ "requested_start_line": start_line,
102
+ "end_line": returned_end_line,
103
+ "has_more_before": has_more_before,
104
+ "has_more_after": has_more_after,
105
+ "encoding": encoding,
106
+ }
107
+
108
+ @staticmethod
109
+ def _coerce_positive_int(
110
+ value: Any,
111
+ *,
112
+ default: Optional[int] = None,
113
+ allow_none: bool = False,
114
+ ) -> Optional[int]:
115
+ if value is None:
116
+ if allow_none:
117
+ return None
118
+ return default or 0
119
+ try:
120
+ coerced = int(value)
121
+ except (TypeError, ValueError):
122
+ return None if allow_none else (default or 0)
123
+ if coerced <= 0:
124
+ return None if allow_none else (default or 0)
125
+ return coerced
46
126
 
47
127
 
48
128
  class FileWriteHandler(SyncHandler):
@@ -90,6 +170,24 @@ class DirectoryListHandler(SyncHandler):
90
170
  """List directory contents."""
91
171
  path = message.get("path", ".")
92
172
  show_hidden = message.get("show_hidden", False)
173
+ limit_raw = message.get("limit")
174
+ offset_raw = message.get("offset", 0)
175
+
176
+ def _parse_positive_int(value, *, allow_none=False, minimum=0, maximum=None):
177
+ if value is None:
178
+ return None if allow_none else minimum
179
+ try:
180
+ parsed = int(value)
181
+ except (TypeError, ValueError):
182
+ return None if allow_none else minimum
183
+ if parsed < minimum:
184
+ parsed = minimum
185
+ if maximum is not None and parsed > maximum:
186
+ parsed = maximum
187
+ return parsed
188
+
189
+ offset = _parse_positive_int(offset_raw, minimum=0)
190
+ limit = _parse_positive_int(limit_raw, allow_none=True, minimum=1, maximum=1000)
93
191
 
94
192
  try:
95
193
  items = []
@@ -113,11 +211,31 @@ class DirectoryListHandler(SyncHandler):
113
211
  # Skip items we can't stat
114
212
  continue
115
213
 
214
+ total_count = len(items)
215
+
216
+ if offset:
217
+ if offset >= total_count:
218
+ sliced_items = []
219
+ else:
220
+ sliced_items = items[offset:]
221
+ else:
222
+ sliced_items = items
223
+
224
+ if limit is not None and limit >= 0:
225
+ sliced_items = sliced_items[:limit]
226
+
227
+ returned_count = len(sliced_items)
228
+ has_more = total_count > offset + returned_count if total_count else False
229
+
116
230
  return {
117
231
  "event": "directory_list_response",
118
232
  "path": path,
119
- "items": items,
120
- "count": len(items),
233
+ "items": sliced_items,
234
+ "count": returned_count,
235
+ "total_count": total_count,
236
+ "offset": offset,
237
+ "limit": limit,
238
+ "has_more": has_more,
121
239
  }
122
240
  except FileNotFoundError:
123
241
  raise ValueError(f"Directory not found: {path}")
@@ -372,6 +490,545 @@ class FileRenameHandler(SyncHandler):
372
490
  raise RuntimeError(f"Failed to rename: {e}")
373
491
 
374
492
 
493
+ class FileSearchHandler(SyncHandler):
494
+ """Handler for searching text within files under a root directory."""
495
+
496
+ DEFAULT_EXCLUDE_DIRS: Sequence[str] = (
497
+ ".git",
498
+ ".hg",
499
+ ".svn",
500
+ "__pycache__",
501
+ "node_modules",
502
+ "vendor",
503
+ "dist",
504
+ "build",
505
+ "tmp",
506
+ "static",
507
+ "assets",
508
+ "coverage",
509
+ )
510
+
511
+ DEFAULT_EXCLUDE_FILE_GLOBS: Sequence[str] = (
512
+ "*.min.js",
513
+ "*.min.css",
514
+ )
515
+
516
+ BINARY_EXTENSIONS: Sequence[str] = (
517
+ ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".svg", ".ico",
518
+ ".pdf", ".zip", ".tar", ".gz", ".bz2", ".xz", ".7z",
519
+ ".ttf", ".woff", ".woff2", ".eot",
520
+ ".mp3", ".mp4", ".mov", ".avi", ".wav", ".flac",
521
+ ".exe", ".dll", ".so", ".dylib",
522
+ ".class", ".jar",
523
+ )
524
+
525
+ DEFAULT_INCLUDE_EXTENSIONS: Sequence[str] = (
526
+ ".py", ".pyi", ".pyx",
527
+ ".js", ".jsx", ".ts", ".tsx", ".mjs", ".cjs",
528
+ ".json", ".yaml", ".yml", ".toml", ".ini", ".cfg", ".conf",
529
+ ".md", ".markdown", ".rst", ".txt",
530
+ ".html", ".htm", ".css", ".scss", ".less",
531
+ ".go", ".rs", ".java", ".kt", ".kts",
532
+ ".c", ".h", ".hpp", ".hh", ".cc", ".cpp", ".cxx",
533
+ ".cs", ".php", ".rb", ".swift", ".scala", ".sql",
534
+ ".sh", ".bash", ".zsh", ".fish",
535
+ ".env", ".dockerfile", ".gradle", ".mk", ".make", ".bat", ".ps1",
536
+ )
537
+
538
+ ALWAYS_INCLUDE_FILENAMES: Sequence[str] = (
539
+ "Makefile",
540
+ "Dockerfile",
541
+ "Jenkinsfile",
542
+ "Procfile",
543
+ "Gemfile",
544
+ "CMakeLists.txt",
545
+ "build.gradle",
546
+ "settings.gradle",
547
+ "package.json",
548
+ "pnpm-lock.yaml",
549
+ "yarn.lock",
550
+ "requirements.txt",
551
+ "pyproject.toml",
552
+ )
553
+
554
+ @property
555
+ def command_name(self) -> str:
556
+ return "file_search"
557
+
558
+ def _search_with_rg(
559
+ self,
560
+ *,
561
+ root_path: str,
562
+ query: str,
563
+ match_case: bool,
564
+ use_regex: bool,
565
+ whole_word: bool,
566
+ include_hidden: bool,
567
+ max_results: int,
568
+ max_per_file: int,
569
+ max_file_size: int,
570
+ include_patterns: List[str],
571
+ exclude_patterns: List[str],
572
+ max_line_length: int,
573
+ using_default_includes: bool,
574
+ ) -> Optional[Dict[str, Any]]:
575
+ """Perform fast search using ripgrep if available."""
576
+ if shutil.which("rg") is None:
577
+ return None
578
+
579
+ cmd = [
580
+ "rg",
581
+ "--json",
582
+ "--line-number",
583
+ "--color",
584
+ "never",
585
+ "--no-heading",
586
+ "--max-count",
587
+ str(max_per_file),
588
+ f"--max-filesize={max_file_size}B",
589
+ ]
590
+
591
+ if not match_case:
592
+ cmd.append("--ignore-case")
593
+ if not use_regex:
594
+ cmd.append("--fixed-strings")
595
+ if whole_word:
596
+ cmd.append("--word-regexp")
597
+ if include_hidden:
598
+ cmd.append("--hidden")
599
+
600
+ if using_default_includes:
601
+ for ext in self.DEFAULT_INCLUDE_EXTENSIONS:
602
+ cmd.extend(["-g", f"*{ext}"])
603
+ for name in self.ALWAYS_INCLUDE_FILENAMES:
604
+ cmd.extend(["-g", name])
605
+ for pattern in include_patterns:
606
+ cmd.extend(["-g", pattern])
607
+ for pattern in exclude_patterns:
608
+ cmd.extend(["-g", f"!{pattern}"])
609
+
610
+ cmd.append(query)
611
+ cmd.append(".")
612
+
613
+ matches: List[Dict[str, Any]] = []
614
+ truncated = False
615
+ truncated_count = 0
616
+ files_scanned = 0
617
+ errors: List[str] = []
618
+ stop_search = False
619
+ deadline = time.monotonic() + 10.0 # hard cap to avoid long-running scans
620
+
621
+ try:
622
+ proc = subprocess.Popen(
623
+ cmd,
624
+ cwd=root_path,
625
+ stdout=subprocess.PIPE,
626
+ stderr=subprocess.PIPE,
627
+ text=True,
628
+ )
629
+ except Exception as exc:
630
+ logger.warning("Failed to execute ripgrep: %s", exc)
631
+ return None
632
+
633
+ try:
634
+ assert proc.stdout is not None
635
+ for line in proc.stdout:
636
+ line = line.strip()
637
+ if not line:
638
+ continue
639
+
640
+ if time.monotonic() > deadline:
641
+ truncated = True
642
+ errors.append("Search aborted after reaching 10s execution limit.")
643
+ stop_search = True
644
+ break
645
+
646
+ try:
647
+ payload = json.loads(line)
648
+ except json.JSONDecodeError:
649
+ continue
650
+
651
+ event_type = payload.get("type")
652
+
653
+ if event_type == "begin":
654
+ files_scanned += 1
655
+ continue
656
+
657
+ if event_type == "match":
658
+ data = payload.get("data", {})
659
+ line_text = data.get("lines", {}).get("text", "")
660
+ line_number = data.get("line_number")
661
+ path_info = data.get("path", {}).get("text") or data.get("path", {}).get("bytes")
662
+ if not path_info:
663
+ continue
664
+ absolute_path = os.path.join(root_path, path_info)
665
+ relative_path = path_info
666
+
667
+ submatches = data.get("submatches", [])
668
+ if len(matches) >= max_results:
669
+ truncated = True
670
+ truncated_count += len(submatches)
671
+ stop_search = True
672
+ break
673
+
674
+ available = max_results - len(matches)
675
+ spans: List[List[int]] = []
676
+ for submatch in submatches:
677
+ if len(spans) >= available:
678
+ truncated = True
679
+ truncated_count += len(submatches) - len(spans)
680
+ stop_search = True
681
+ break
682
+ start = submatch.get("start", {}).get("offset")
683
+ end = submatch.get("end", {}).get("offset")
684
+ if start is None or end is None:
685
+ continue
686
+ spans.append([start, end])
687
+
688
+ if spans:
689
+ clean_line = line_text.rstrip("\n")
690
+ truncated_line = clean_line
691
+ line_truncated = False
692
+ if len(clean_line) > max_line_length:
693
+ truncated_line = clean_line[:max_line_length] + "..."
694
+ line_truncated = True
695
+
696
+ matches.append(
697
+ {
698
+ "path": absolute_path,
699
+ "relative_path": relative_path,
700
+ "line_number": line_number,
701
+ "line": truncated_line,
702
+ "match_spans": spans,
703
+ "match_count": len(spans),
704
+ "line_truncated": line_truncated,
705
+ }
706
+ )
707
+
708
+ if stop_search:
709
+ break
710
+ elif event_type == "message":
711
+ message = payload.get("data", {}).get("msg") or payload.get("data", {}).get("text")
712
+ if message:
713
+ errors.append(message)
714
+
715
+ if stop_search:
716
+ break
717
+ finally:
718
+ if stop_search and proc.poll() is None:
719
+ try:
720
+ proc.terminate()
721
+ proc.wait(timeout=1.0)
722
+ except Exception:
723
+ proc.kill()
724
+ else:
725
+ proc.wait()
726
+
727
+ stderr_output = ""
728
+ if proc.stderr:
729
+ try:
730
+ stderr_output = proc.stderr.read().strip()
731
+ except Exception:
732
+ stderr_output = ""
733
+ if stderr_output:
734
+ errors.append(stderr_output)
735
+
736
+ return {
737
+ "event": "file_search_response",
738
+ "root_path": root_path,
739
+ "query": query,
740
+ "match_case": match_case,
741
+ "regex": use_regex,
742
+ "whole_word": whole_word,
743
+ "include_patterns": include_patterns,
744
+ "exclude_patterns": exclude_patterns,
745
+ "matches": matches,
746
+ "matches_returned": len(matches),
747
+ "total_matches": len(matches) + truncated_count,
748
+ "files_scanned": files_scanned,
749
+ "truncated": truncated or truncated_count > 0,
750
+ "truncated_count": truncated_count,
751
+ "max_results": max_results,
752
+ "max_matches_per_file": max_per_file,
753
+ "errors": errors,
754
+ }
755
+
756
+ def execute(self, message: Dict[str, Any]) -> Dict[str, Any]:
757
+ root_path = message.get("root_path")
758
+ query = message.get("query")
759
+
760
+ if not root_path:
761
+ raise ValueError("root_path parameter is required")
762
+ if not query:
763
+ raise ValueError("query parameter is required")
764
+
765
+ if not os.path.isdir(root_path):
766
+ raise ValueError(f"Root path is not a directory: {root_path}")
767
+
768
+ match_case = bool(message.get("match_case", False))
769
+ use_regex = bool(message.get("regex", False))
770
+ whole_word = bool(message.get("whole_word", False))
771
+ include_hidden = bool(message.get("include_hidden", False))
772
+
773
+ max_results = self._clamp_int(message.get("max_results"), default=40, min_value=1, max_value=500)
774
+ max_per_file = self._clamp_int(
775
+ message.get("max_matches_per_file"),
776
+ default=5,
777
+ min_value=1,
778
+ max_value=50,
779
+ )
780
+ max_file_size = self._clamp_int(
781
+ message.get("max_file_size"),
782
+ default=1024 * 1024,
783
+ min_value=1024,
784
+ max_value=10 * 1024 * 1024,
785
+ )
786
+ max_line_length = self._clamp_int(
787
+ message.get("max_line_length"),
788
+ default=200,
789
+ min_value=32,
790
+ max_value=1024,
791
+ )
792
+
793
+ include_patterns = self._normalize_patterns(message.get("include_patterns"))
794
+ using_default_includes = not include_patterns
795
+ raw_exclude_patterns = self._normalize_patterns(message.get("exclude_patterns"))
796
+ using_default_excludes = not raw_exclude_patterns
797
+ if using_default_excludes:
798
+ exclude_patterns = []
799
+ for directory in self.DEFAULT_EXCLUDE_DIRS:
800
+ exclude_patterns.append(f"{directory}/**")
801
+ exclude_patterns.append(f"**/{directory}/**")
802
+ exclude_patterns.extend(self.DEFAULT_EXCLUDE_FILE_GLOBS)
803
+ else:
804
+ exclude_patterns = raw_exclude_patterns
805
+
806
+ flags = 0 if match_case else re.IGNORECASE
807
+ pattern = query if use_regex else re.escape(query)
808
+ if whole_word:
809
+ pattern = r"\b" + pattern + r"\b"
810
+
811
+ try:
812
+ compiled = re.compile(pattern, flags)
813
+ except re.error as exc:
814
+ raise ValueError(f"Invalid regular expression: {exc}") from exc
815
+
816
+ rg_result = self._search_with_rg(
817
+ root_path=root_path,
818
+ query=query,
819
+ match_case=match_case,
820
+ use_regex=use_regex,
821
+ whole_word=whole_word,
822
+ include_hidden=include_hidden,
823
+ max_results=max_results,
824
+ max_per_file=max_per_file,
825
+ max_file_size=max_file_size,
826
+ include_patterns=include_patterns,
827
+ exclude_patterns=exclude_patterns,
828
+ max_line_length=max_line_length,
829
+ using_default_includes=using_default_includes,
830
+ )
831
+ if rg_result is not None:
832
+ return rg_result
833
+
834
+ matches: List[Dict[str, Any]] = []
835
+ truncated = False
836
+ truncated_count = 0
837
+ files_scanned = 0
838
+ errors: List[str] = []
839
+ stop_search = False
840
+
841
+ binary_exts = {ext.lower() for ext in self.BINARY_EXTENSIONS}
842
+ allowed_exts = {ext.lower() for ext in self.DEFAULT_INCLUDE_EXTENSIONS}
843
+
844
+ deadline = time.monotonic() + 10.0
845
+
846
+ for dirpath, dirnames, filenames in os.walk(root_path):
847
+ if not include_hidden:
848
+ dirnames[:] = [d for d in dirnames if not d.startswith(".")]
849
+
850
+ for filename in filenames:
851
+ if time.monotonic() > deadline:
852
+ truncated = True
853
+ errors.append("Search aborted after reaching 10s execution limit.")
854
+ stop_search = True
855
+ break
856
+ if not include_hidden and filename.startswith("."):
857
+ continue
858
+
859
+ abs_path = os.path.join(dirpath, filename)
860
+ rel_path = os.path.relpath(abs_path, root_path)
861
+
862
+ if using_default_excludes:
863
+ path_parts = rel_path.replace("\\", "/").split("/")
864
+ if any(part in self.DEFAULT_EXCLUDE_DIRS for part in path_parts):
865
+ continue
866
+
867
+ if using_default_includes:
868
+ ext = os.path.splitext(filename)[1].lower()
869
+ if ext not in allowed_exts and filename not in self.ALWAYS_INCLUDE_FILENAMES:
870
+ continue
871
+
872
+ if os.path.splitext(filename)[1].lower() in binary_exts:
873
+ continue
874
+
875
+ if not self._should_include(rel_path, include_patterns, exclude_patterns):
876
+ continue
877
+
878
+ try:
879
+ size = os.path.getsize(abs_path)
880
+ except OSError:
881
+ errors.append(f"Failed to stat file: {rel_path}")
882
+ continue
883
+
884
+ if size > max_file_size:
885
+ errors.append(f"Skipped (too large): {rel_path} ({size} bytes)")
886
+ continue
887
+
888
+ files_scanned += 1
889
+ matches_for_file = 0
890
+
891
+ try:
892
+ with open(abs_path, "r", encoding="utf-8", errors="replace") as file_obj:
893
+ stop_current_file = False
894
+ for line_number, line in enumerate(file_obj, start=1):
895
+ if time.monotonic() > deadline:
896
+ truncated = True
897
+ errors.append("Search aborted after reaching 10s execution limit.")
898
+ stop_search = True
899
+ stop_current_file = True
900
+ break
901
+ iter_matches = list(compiled.finditer(line))
902
+ if not iter_matches:
903
+ continue
904
+
905
+ # Enforce per-file cap
906
+ remaining_per_file = max_per_file - matches_for_file
907
+ if remaining_per_file <= 0:
908
+ truncated = True
909
+ truncated_count += len(iter_matches)
910
+ stop_current_file = True
911
+ break
912
+
913
+ spans = [
914
+ [match.start(), match.end()] for match in iter_matches[:remaining_per_file]
915
+ ]
916
+ dropped_from_file = len(iter_matches) - len(spans)
917
+ if dropped_from_file > 0:
918
+ truncated = True
919
+ truncated_count += dropped_from_file
920
+
921
+ # Enforce global cap
922
+ remaining_global = max_results - len(matches)
923
+ if remaining_global <= 0:
924
+ truncated = True
925
+ truncated_count += len(spans)
926
+ stop_search = True
927
+ break
928
+
929
+ if len(spans) > remaining_global:
930
+ truncated = True
931
+ truncated_count += len(spans) - remaining_global
932
+ spans = spans[:remaining_global]
933
+ stop_search = True
934
+
935
+ if spans:
936
+ clean_line = line.rstrip("\n")
937
+ truncated_line = clean_line
938
+ line_truncated = False
939
+ if len(clean_line) > max_line_length:
940
+ truncated_line = clean_line[:max_line_length] + "..."
941
+ line_truncated = True
942
+
943
+ matches.append(
944
+ {
945
+ "path": abs_path,
946
+ "relative_path": rel_path,
947
+ "line_number": line_number,
948
+ "line": truncated_line,
949
+ "match_spans": spans,
950
+ "match_count": len(spans),
951
+ "line_truncated": line_truncated,
952
+ }
953
+ )
954
+ matches_for_file += len(spans)
955
+
956
+ if stop_search or matches_for_file >= max_per_file:
957
+ break
958
+ if stop_current_file:
959
+ break
960
+ except (OSError, UnicodeDecodeError):
961
+ errors.append(f"Failed to read file: {rel_path}")
962
+ continue
963
+
964
+ if stop_search:
965
+ break
966
+ if stop_search:
967
+ break
968
+
969
+ total_matches = len(matches) + truncated_count
970
+
971
+ return {
972
+ "event": "file_search_response",
973
+ "root_path": root_path,
974
+ "query": query,
975
+ "match_case": match_case,
976
+ "regex": use_regex,
977
+ "whole_word": whole_word,
978
+ "include_patterns": include_patterns,
979
+ "exclude_patterns": exclude_patterns,
980
+ "matches": matches,
981
+ "matches_returned": len(matches),
982
+ "total_matches": total_matches,
983
+ "files_scanned": files_scanned,
984
+ "truncated": truncated,
985
+ "truncated_count": truncated_count,
986
+ "max_results": max_results,
987
+ "max_matches_per_file": max_per_file,
988
+ "errors": errors,
989
+ }
990
+
991
+ @staticmethod
992
+ def _normalize_patterns(patterns: Optional[Any]) -> List[str]:
993
+ if not patterns:
994
+ return []
995
+ if isinstance(patterns, str):
996
+ patterns = [patterns]
997
+ normalized: List[str] = []
998
+ for pattern in patterns:
999
+ if isinstance(pattern, str) and pattern.strip():
1000
+ normalized.append(pattern.strip())
1001
+ return normalized
1002
+
1003
+ @staticmethod
1004
+ def _should_include(
1005
+ relative_path: str,
1006
+ include_patterns: List[str],
1007
+ exclude_patterns: List[str],
1008
+ ) -> bool:
1009
+ if include_patterns:
1010
+ if not any(fnmatch.fnmatch(relative_path, pat) for pat in include_patterns):
1011
+ return False
1012
+ if exclude_patterns:
1013
+ if any(fnmatch.fnmatch(relative_path, pat) for pat in exclude_patterns):
1014
+ return False
1015
+ return True
1016
+
1017
+ @staticmethod
1018
+ def _clamp_int(
1019
+ value: Optional[Any],
1020
+ *,
1021
+ default: int,
1022
+ min_value: int,
1023
+ max_value: int,
1024
+ ) -> int:
1025
+ try:
1026
+ coerced = int(value)
1027
+ except (TypeError, ValueError):
1028
+ coerced = default
1029
+ return max(min_value, min(max_value, coerced))
1030
+
1031
+
375
1032
  class ContentRequestHandler(AsyncHandler):
376
1033
  """Handler for requesting content by hash for caching optimization."""
377
1034
 
@@ -434,4 +1091,4 @@ def cache_content(content_hash: str, content: str) -> None:
434
1091
 
435
1092
  def get_cached_content(content_hash: str) -> str:
436
1093
  """Get cached content by hash."""
437
- return _content_cache.get(content_hash)
1094
+ return _content_cache.get(content_hash)
@@ -764,14 +764,13 @@ class ProjectStateDiffContentHandler(AsyncHandler):
764
764
  if content is None or (content_type == "all" and not all([matching_tab.original_content, matching_tab.modified_content])):
765
765
  if content_type in ["original", "modified", "all"]:
766
766
  # Re-generate the diff content if needed
767
- await manager.create_diff_tab(
768
- source_client_session,
769
- file_path,
770
- from_ref,
771
- to_ref,
772
- from_hash,
773
- to_hash,
774
- activate=False # Don't activate, just ensure content is loaded
767
+ await manager.open_diff_tab(
768
+ source_client_session,
769
+ file_path,
770
+ from_ref,
771
+ to_ref,
772
+ from_hash,
773
+ to_hash
775
774
  )
776
775
 
777
776
  # Try to get content again after regeneration (use same matching logic)
@@ -162,53 +162,7 @@ class TabFactory:
162
162
  await self._load_binary_content(file_path, tab_info, file_size)
163
163
 
164
164
  return TabInfo(**tab_info)
165
-
166
- async def create_diff_tab(self, file_path: str, original_content: str,
167
- modified_content: str, tab_id: Optional[str] = None,
168
- diff_details: Optional[Dict[str, Any]] = None) -> TabInfo:
169
- """Create a diff tab for comparing file versions.
170
-
171
- Args:
172
- file_path: Path to the file being compared
173
- original_content: Original version of the file
174
- modified_content: Modified version of the file
175
- tab_id: Optional tab ID, will generate UUID if not provided
176
- diff_details: Optional detailed diff information from diff-match-patch
177
-
178
- Returns:
179
- TabInfo object configured for diff viewing
180
- """
181
- if tab_id is None:
182
- tab_id = str(uuid.uuid4())
183
-
184
- file_path = Path(file_path)
185
-
186
- metadata = {'diff_mode': True}
187
- if diff_details:
188
- metadata['diff_details'] = diff_details
189
-
190
- # Cache diff content
191
- original_hash = generate_content_hash(original_content)
192
- modified_hash = generate_content_hash(modified_content)
193
- cache_content(original_hash, original_content)
194
- cache_content(modified_hash, modified_content)
195
-
196
- return TabInfo(
197
- tab_id=tab_id,
198
- tab_type='diff',
199
- title=f"{file_path.name} (diff)",
200
- file_path=str(file_path),
201
- content=None, # Diff tabs don't use regular content
202
- original_content=original_content,
203
- modified_content=modified_content,
204
- original_content_hash=original_hash,
205
- modified_content_hash=modified_hash,
206
- is_dirty=False,
207
- mime_type=None,
208
- encoding='utf-8',
209
- metadata=metadata
210
- )
211
-
165
+
212
166
  async def create_diff_tab_with_title(self, file_path: str, original_content: str,
213
167
  modified_content: str, title: str,
214
168
  tab_id: Optional[str] = None,
@@ -33,6 +33,7 @@ from .handlers import (
33
33
  DirectoryListHandler,
34
34
  FileInfoHandler,
35
35
  FileDeleteHandler,
36
+ FileSearchHandler,
36
37
  FileRenameHandler,
37
38
  ContentRequestHandler,
38
39
  ProjectStateFolderExpandHandler,
@@ -439,6 +440,7 @@ class TerminalManager:
439
440
  self._command_registry.register(ProjectAwareFileCreateHandler) # Use project-aware version
440
441
  self._command_registry.register(ProjectAwareFolderCreateHandler) # Use project-aware version
441
442
  self._command_registry.register(FileRenameHandler)
443
+ self._command_registry.register(FileSearchHandler)
442
444
  self._command_registry.register(ContentRequestHandler)
443
445
  # Project state handlers
444
446
  self._command_registry.register(ProjectStateFolderExpandHandler)
@@ -828,4 +830,4 @@ class TerminalManager:
828
830
  await self._request_client_sessions()
829
831
  logger.info("Client session request sent after reconnection")
830
832
  except Exception as exc:
831
- logger.error("Failed to handle reconnection: %s", exc)
833
+ logger.error("Failed to handle reconnection: %s", exc)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: portacode
3
- Version: 1.3.33
3
+ Version: 1.3.35
4
4
  Summary: Portacode CLI client and SDK
5
5
  Home-page: https://github.com/portacode/portacode
6
6
  Author: Meena Erian
@@ -1,7 +1,7 @@
1
1
  portacode/README.md,sha256=4dKtpvR8LNgZPVz37GmkQCMWIr_u25Ao63iW56s7Ke4,775
2
2
  portacode/__init__.py,sha256=oB3sV1wXr-um-RXio73UG8E5Xx6cF2ZVJveqjNmC-vQ,1086
3
3
  portacode/__main__.py,sha256=jmHTGC1hzmo9iKJLv-SSYe9BSIbPPZ2IOpecI03PlTs,296
4
- portacode/_version.py,sha256=nJBSGzQLAZMH96LyzR0NHlWYQsAa05C0c2ZTZZIV0ss,706
4
+ portacode/_version.py,sha256=1QrJuO_k2xQp0QmWYHVXK99GSTggGIkW59z4kHpoDpc,706
5
5
  portacode/cli.py,sha256=eDqcZMVFHKzqqWxedhhx8ylu5WMVCLqeJQkbPR7RcJE,16333
6
6
  portacode/data.py,sha256=5-s291bv8J354myaHm1Y7CQZTZyRzMU3TGe5U4hb-FA,1591
7
7
  portacode/keypair.py,sha256=PAcOYqlVLOoZTPYi6LvLjfsY6BkrWbLOhSZLb8r5sHs,3635
@@ -11,25 +11,25 @@ portacode/connection/README.md,sha256=f9rbuIEKa7cTm9C98rCiBbEtbiIXQU11esGSNhSMiJ
11
11
  portacode/connection/__init__.py,sha256=atqcVGkViIEd7pRa6cP2do07RJOM0UWpbnz5zXjGktU,250
12
12
  portacode/connection/client.py,sha256=Uqsy5xzN0j5AY0xkYs2_qd67N7SUopcnpkCbkmOnMgg,9102
13
13
  portacode/connection/multiplex.py,sha256=L-TxqJ_ZEbfNEfu1cwxgJ5vUdyRzZjsMy2Kx1diiZys,5237
14
- portacode/connection/terminal.py,sha256=iz4bi9pppxsiAiOtXEkffcHoyaWkvRG9BkojUyfZ784,42421
14
+ portacode/connection/terminal.py,sha256=kUoB2bGe-ZmGnL5fDUuv8JRYr0Qt8rjYaERJW0ahZRQ,42504
15
15
  portacode/connection/handlers/README.md,sha256=HsLZG1QK1JNm67HsgL6WoDg9nxzKXxwkc5fJPFJdX5g,12169
16
- portacode/connection/handlers/WEBSOCKET_PROTOCOL.md,sha256=U-d58S-X2r5T6QAu-6NOzCIJg71FIj_vmOdUGCWFIhw,68211
17
- portacode/connection/handlers/__init__.py,sha256=4nv3Z4TGYjWcauKPWsbL_FbrTXApI94V7j6oiU1Vv-o,2144
16
+ portacode/connection/handlers/WEBSOCKET_PROTOCOL.md,sha256=xnVrY5uR00z88Q1mBJp-RHVGBoUDS0CWn0PD_D49LDk,74136
17
+ portacode/connection/handlers/__init__.py,sha256=U8OLLKNPZHca03Arhzbqfs2xzDObQD_Ze3zr5j_G_a4,2193
18
18
  portacode/connection/handlers/base.py,sha256=oENFb-Fcfzwk99Qx8gJQriEMiwSxwygwjOiuCH36hM4,10231
19
19
  portacode/connection/handlers/chunked_content.py,sha256=h6hXRmxSeOgnIxoU8CkmvEf2Odv-ajPrpHIe_W3GKcA,9251
20
- portacode/connection/handlers/file_handlers.py,sha256=kBj-o3HkqZTKsju2ZxRgBB3Ke42dn4sDQQKGO7APYf8,15451
20
+ portacode/connection/handlers/file_handlers.py,sha256=nAJH8nXnX07xxD28ngLpgIUzcTuRwZBNpEGEKdRqohw,39507
21
21
  portacode/connection/handlers/project_aware_file_handlers.py,sha256=n0M2WmBNWPwzigdIkyZiAsePUQGXVqYSsDyOxm-Nsok,9253
22
22
  portacode/connection/handlers/project_state_handlers.py,sha256=v6ZefGW9i7n1aZLq2jOGumJIjYb6aHlPI4m1jkYewm8,1686
23
23
  portacode/connection/handlers/registry.py,sha256=qXGE60sYEWg6ZtVQzFcZ5YI2XWR6lMgw4hAL9x5qR1I,6181
24
24
  portacode/connection/handlers/session.py,sha256=XWiD4dofzZB9AH7EDqbWeJ-1CrSNPCUTR2nE2UEZh7Y,35568
25
25
  portacode/connection/handlers/system_handlers.py,sha256=65V5ctT0dIBc-oWG91e62MbdvU0z6x6JCTQuIqCWmZ0,5242
26
- portacode/connection/handlers/tab_factory.py,sha256=VBZnwtxgeNJCsfBzUjkFWAAGBdijvai4MS2dXnhFY8U,18000
26
+ portacode/connection/handlers/tab_factory.py,sha256=yn93h6GASjD1VpvW1oqpax3EpoT0r7r97zFXxML1wdA,16173
27
27
  portacode/connection/handlers/terminal_handlers.py,sha256=HRwHW1GiqG1NtHVEqXHKaYkFfQEzCDDH6YIlHcb4XD8,11866
28
28
  portacode/connection/handlers/project_state/README.md,sha256=trdd4ig6ungmwH5SpbSLfyxbL-QgPlGNU-_XrMEiXtw,10114
29
29
  portacode/connection/handlers/project_state/__init__.py,sha256=5ucIqk6Iclqg6bKkL8r_wVs5Tlt6B9J7yQH6yQUt7gc,2541
30
30
  portacode/connection/handlers/project_state/file_system_watcher.py,sha256=2zingW9BoNKRijghHC2eHHdRoyDRdLmIl1yH1y-iuF8,10831
31
31
  portacode/connection/handlers/project_state/git_manager.py,sha256=GO0AEXzHEaKOBGZP043_V2KgGz8zqmSahWJ5KHgC_Cs,88845
32
- portacode/connection/handlers/project_state/handlers.py,sha256=nhs-3yiENdewAzVZnSdn2Ir-e6TQ9Nz_Bxk3iiFPd9c,37985
32
+ portacode/connection/handlers/project_state/handlers.py,sha256=ay9D2KCPnPx-Luhx3_VAVq_HVFgLDDBwJvgaiKeseak,37889
33
33
  portacode/connection/handlers/project_state/manager.py,sha256=XX3wMgGdPbRgBBs_R1dXtQ4D9j-itETrJR_6IfBeDU0,61296
34
34
  portacode/connection/handlers/project_state/models.py,sha256=EZTKvxHKs8QlQUbzI0u2IqfzfRRXZixUIDBwTGCJATI,4313
35
35
  portacode/connection/handlers/project_state/utils.py,sha256=LsbQr9TH9Bz30FqikmtTxco4PlB_n0kUIuPKQ6Fb_mo,1665
@@ -38,7 +38,7 @@ portacode/static/js/utils/ntp-clock.js,sha256=KMeHGT-IlUSlxVRZZ899z25dQCJh6EJbgX
38
38
  portacode/utils/NTP_ARCHITECTURE.md,sha256=WkESTbz5SNAgdmDKk3DrHMhtYOPji_Kt3_a9arWdRig,3894
39
39
  portacode/utils/__init__.py,sha256=NgBlWTuNJESfIYJzP_3adI1yJQJR0XJLRpSdVNaBAN0,33
40
40
  portacode/utils/ntp_clock.py,sha256=6QJOVZr9VQuxIyJt9KNG4dR-nZ3bKNyipMxjqDWP89Y,5152
41
- portacode-1.3.33.dist-info/licenses/LICENSE,sha256=2FGbCnUDgRYuQTkB1O1dUUpu5CVAjK1j4_p6ack9Z54,1066
41
+ portacode-1.3.35.dist-info/licenses/LICENSE,sha256=2FGbCnUDgRYuQTkB1O1dUUpu5CVAjK1j4_p6ack9Z54,1066
42
42
  test_modules/README.md,sha256=Do_agkm9WhSzueXjRAkV_xEj6Emy5zB3N3VKY5Roce8,9274
43
43
  test_modules/__init__.py,sha256=1LcbHodIHsB0g-g4NGjSn6AMuCoGbymvXPYLOb6Z7F0,53
44
44
  test_modules/test_device_online.py,sha256=yiSyVaMwKAugqIX_ZIxmLXiOlmA_8IRXiUp12YmpB98,1653
@@ -50,7 +50,7 @@ test_modules/test_terminal_buffer_performance.py,sha256=YQeDDZVnsQD3ug6udKUZH3NR
50
50
  test_modules/test_terminal_interaction.py,sha256=AxLb63oKhNLjKrny4hBj4hhFhrmHZ5UGStYDA0KzA0w,3163
51
51
  test_modules/test_terminal_loading_race_condition.py,sha256=PsGF8QzWeNNv6G7Fda6kETcBUcXyg_vRYeD-hDHAhCo,4158
52
52
  test_modules/test_terminal_start.py,sha256=y3IqG54UfMk-pAQ_fn5LuoM3kki6xRm11oB5AzfC-iE,1978
53
- testing_framework/.env.example,sha256=lReCwHAx7vxPxRT7TebEKiZ5HpEOQnpgraXJl2k-0xU,541
53
+ testing_framework/.env.example,sha256=zGchLcB-p22YUUCU0JIyHLduLpDuFy8c5xPacctHvfY,708
54
54
  testing_framework/README.md,sha256=7o04mS2siNDuHA1UBh3Uu6XCbGomKjgb8gfl8YbClhE,9662
55
55
  testing_framework/__init__.py,sha256=safHXo_xBMwAwfiF_5rx0xGcPGfpBSOgkMZx04uj4No,575
56
56
  testing_framework/cli.py,sha256=ZHO37QO2IqZpC9VovrAYur2Vfc2AYeDqzi9Nb4lIA-w,13434
@@ -59,12 +59,12 @@ testing_framework/core/__init__.py,sha256=8AJQgqSCa9WgwkQNH_wTsA3JmJ4d4FRCweI-io
59
59
  testing_framework/core/base_test.py,sha256=0kKQDNCdAJyTQfJiMBzx9_2MMRrmaVfQF0cawhvian4,13149
60
60
  testing_framework/core/cli_manager.py,sha256=LDH_tWn-CmO08U_rmBIPpN_O6HLaQKRjdnfKGrtqs8Y,6991
61
61
  testing_framework/core/hierarchical_runner.py,sha256=tCeksh2cXbRspurSiE-mQM1M1BOPeY8mKFbjvaBTVHw,26401
62
- testing_framework/core/playwright_manager.py,sha256=8xl-19b8NQjKNdiRyDjyeXlYyKPZouSSmmlXjDpuI50,19559
62
+ testing_framework/core/playwright_manager.py,sha256=kWKmlxzftDY0ZWS891zlHu_Ctdw7ufUIdx20Tsr7BL8,20626
63
63
  testing_framework/core/runner.py,sha256=j2QwNJmAxVBmJvcbVS7DgPJUKPNzqfLmt_4NNdaKmZU,19297
64
64
  testing_framework/core/shared_cli_manager.py,sha256=BESSNtyQb7BOlaOvZmm04T8Uezjms4KCBs2MzTxvzYQ,8790
65
65
  testing_framework/core/test_discovery.py,sha256=2FZ9fJ8Dp5dloA-fkgXoJ_gCMC_nYPBnA3Hs2xlagzM,4928
66
- portacode-1.3.33.dist-info/METADATA,sha256=pSZ7HgbTSK_h7QT8gblUhywYF5EWMed_z_17PL_i2Ss,6989
67
- portacode-1.3.33.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
- portacode-1.3.33.dist-info/entry_points.txt,sha256=lLUUL-BM6_wwe44Xv0__5NQ1BnAz6jWjSMFvZdWW3zU,48
69
- portacode-1.3.33.dist-info/top_level.txt,sha256=TGhTYUxfW8SyVZc_zGgzjzc24gGT7nSw8Qf73liVRKM,41
70
- portacode-1.3.33.dist-info/RECORD,,
66
+ portacode-1.3.35.dist-info/METADATA,sha256=rXmptmCEAAPKyXY3T2lO4yVKuWZEgz9_n9P_KbclyjI,6989
67
+ portacode-1.3.35.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
+ portacode-1.3.35.dist-info/entry_points.txt,sha256=lLUUL-BM6_wwe44Xv0__5NQ1BnAz6jWjSMFvZdWW3zU,48
69
+ portacode-1.3.35.dist-info/top_level.txt,sha256=TGhTYUxfW8SyVZc_zGgzjzc24gGT7nSw8Qf73liVRKM,41
70
+ portacode-1.3.35.dist-info/RECORD,,
@@ -15,4 +15,7 @@ TEST_HEADLESS=false # true for headless mode, false for visible browser
15
15
  # Optional: Test Output Directories
16
16
  TEST_RESULTS_DIR=test_results
17
17
  TEST_RECORDINGS_DIR=test_recordings
18
- TEST_LOGS_DIR=test_results
18
+ TEST_LOGS_DIR=test_results
19
+
20
+ # Automation testing token (used by the testing framework to bypass captcha. Same token must be defined in ../main.env)
21
+ TEST_RUNNER_BYPASS_TOKEN=same-as-in-main-env
@@ -8,6 +8,7 @@ import logging
8
8
  import json
9
9
  import time
10
10
  from datetime import datetime
11
+ from urllib.parse import urlparse
11
12
 
12
13
  try:
13
14
  from playwright.async_api import async_playwright, Browser, BrowserContext, Page
@@ -71,6 +72,7 @@ class PlaywrightManager:
71
72
  env_headless = os.getenv('TEST_HEADLESS', 'false').lower() in ('true', '1', 'yes')
72
73
  env_video_width = int(os.getenv('TEST_VIDEO_WIDTH', '1920'))
73
74
  env_video_height = int(os.getenv('TEST_VIDEO_HEIGHT', '1080'))
75
+ automation_token = os.getenv('TEST_RUNNER_BYPASS_TOKEN')
74
76
 
75
77
  # Use provided values or fall back to environment
76
78
  self.base_url = url or env_url
@@ -125,14 +127,32 @@ class PlaywrightManager:
125
127
 
126
128
  # Create context with recording enabled and proper viewport
127
129
  video_size = {"width": env_video_width, "height": env_video_height}
128
- self.context = await self.browser.new_context(
129
- record_video_dir=str(self.test_recordings_dir),
130
- record_video_size=video_size,
131
- record_har_path=str(self.har_path),
132
- record_har_omit_content=False,
133
- viewport=video_size
134
- )
135
-
130
+ context_kwargs = {
131
+ "record_video_dir": str(self.test_recordings_dir),
132
+ "record_video_size": video_size,
133
+ "record_har_path": str(self.har_path),
134
+ "record_har_omit_content": False,
135
+ "viewport": video_size
136
+ }
137
+ self.context = await self.browser.new_context(**context_kwargs)
138
+ if automation_token:
139
+ parsed_base = urlparse(self.base_url)
140
+ target_host = parsed_base.hostname
141
+ target_scheme = parsed_base.scheme or "http"
142
+ header_name = "X-Portacode-Automation"
143
+
144
+ async def automation_header_route(route, request):
145
+ headers = dict(request.headers)
146
+ parsed_request = urlparse(request.url)
147
+ if parsed_request.hostname == target_host and parsed_request.scheme == target_scheme:
148
+ headers[header_name] = automation_token
149
+ else:
150
+ headers.pop(header_name, None)
151
+ await route.continue_(headers=headers)
152
+
153
+ await self.context.route("**/*", automation_header_route)
154
+ self.logger.info("Automation bypass header restricted to same-origin requests")
155
+
136
156
  self.logger.info(f"Video recording configured: {env_video_width}x{env_video_height}")
137
157
 
138
158
  # Start tracing
@@ -178,7 +198,7 @@ class PlaywrightManager:
178
198
  """Perform login using provided credentials."""
179
199
  try:
180
200
  # Navigate to login page first
181
- login_url = f"{self.base_url}/accounts/login/"
201
+ login_url = f"{self.base_url}accounts/login/"
182
202
  await self.page.goto(login_url)
183
203
  await self.log_action("navigate_to_login", {"url": login_url})
184
204
  await self.take_screenshot("login_page")