ferp 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. ferp/__init__.py +3 -0
  2. ferp/__main__.py +4 -0
  3. ferp/__version__.py +1 -0
  4. ferp/app.py +9 -0
  5. ferp/cli.py +160 -0
  6. ferp/core/__init__.py +0 -0
  7. ferp/core/app.py +1312 -0
  8. ferp/core/bundle_installer.py +245 -0
  9. ferp/core/command_provider.py +77 -0
  10. ferp/core/dependency_manager.py +59 -0
  11. ferp/core/fs_controller.py +70 -0
  12. ferp/core/fs_watcher.py +144 -0
  13. ferp/core/messages.py +49 -0
  14. ferp/core/path_actions.py +124 -0
  15. ferp/core/paths.py +3 -0
  16. ferp/core/protocols.py +8 -0
  17. ferp/core/script_controller.py +515 -0
  18. ferp/core/script_protocol.py +35 -0
  19. ferp/core/script_runner.py +421 -0
  20. ferp/core/settings.py +16 -0
  21. ferp/core/settings_store.py +69 -0
  22. ferp/core/state.py +156 -0
  23. ferp/core/task_store.py +164 -0
  24. ferp/core/transcript_logger.py +95 -0
  25. ferp/domain/__init__.py +0 -0
  26. ferp/domain/scripts.py +29 -0
  27. ferp/fscp/host/__init__.py +11 -0
  28. ferp/fscp/host/host.py +439 -0
  29. ferp/fscp/host/managed_process.py +113 -0
  30. ferp/fscp/host/process_registry.py +124 -0
  31. ferp/fscp/protocol/__init__.py +13 -0
  32. ferp/fscp/protocol/errors.py +2 -0
  33. ferp/fscp/protocol/messages.py +55 -0
  34. ferp/fscp/protocol/schemas/__init__.py +0 -0
  35. ferp/fscp/protocol/schemas/fscp/1.0/cancel.json +16 -0
  36. ferp/fscp/protocol/schemas/fscp/1.0/definitions.json +29 -0
  37. ferp/fscp/protocol/schemas/fscp/1.0/discriminator.json +14 -0
  38. ferp/fscp/protocol/schemas/fscp/1.0/envelope.json +13 -0
  39. ferp/fscp/protocol/schemas/fscp/1.0/exit.json +20 -0
  40. ferp/fscp/protocol/schemas/fscp/1.0/init.json +36 -0
  41. ferp/fscp/protocol/schemas/fscp/1.0/input_response.json +21 -0
  42. ferp/fscp/protocol/schemas/fscp/1.0/log.json +21 -0
  43. ferp/fscp/protocol/schemas/fscp/1.0/message.json +23 -0
  44. ferp/fscp/protocol/schemas/fscp/1.0/progress.json +23 -0
  45. ferp/fscp/protocol/schemas/fscp/1.0/request_input.json +47 -0
  46. ferp/fscp/protocol/schemas/fscp/1.0/result.json +16 -0
  47. ferp/fscp/protocol/schemas/fscp/__init__.py +0 -0
  48. ferp/fscp/protocol/state.py +16 -0
  49. ferp/fscp/protocol/validator.py +123 -0
  50. ferp/fscp/scripts/__init__.py +0 -0
  51. ferp/fscp/scripts/runtime/__init__.py +4 -0
  52. ferp/fscp/scripts/runtime/__main__.py +40 -0
  53. ferp/fscp/scripts/runtime/errors.py +14 -0
  54. ferp/fscp/scripts/runtime/io.py +64 -0
  55. ferp/fscp/scripts/runtime/script.py +149 -0
  56. ferp/fscp/scripts/runtime/state.py +17 -0
  57. ferp/fscp/scripts/runtime/worker.py +13 -0
  58. ferp/fscp/scripts/sdk.py +548 -0
  59. ferp/fscp/transcript/__init__.py +3 -0
  60. ferp/fscp/transcript/events.py +14 -0
  61. ferp/resources/__init__.py +0 -0
  62. ferp/services/__init__.py +3 -0
  63. ferp/services/file_listing.py +120 -0
  64. ferp/services/monday_sync.py +155 -0
  65. ferp/services/releases.py +214 -0
  66. ferp/services/scripts.py +90 -0
  67. ferp/services/update_check.py +130 -0
  68. ferp/styles/index.tcss +638 -0
  69. ferp/themes/themes.py +238 -0
  70. ferp/widgets/__init__.py +17 -0
  71. ferp/widgets/dialogs.py +167 -0
  72. ferp/widgets/file_tree.py +991 -0
  73. ferp/widgets/forms.py +146 -0
  74. ferp/widgets/output_panel.py +244 -0
  75. ferp/widgets/panels.py +13 -0
  76. ferp/widgets/process_list.py +158 -0
  77. ferp/widgets/readme_modal.py +59 -0
  78. ferp/widgets/scripts.py +192 -0
  79. ferp/widgets/task_capture.py +74 -0
  80. ferp/widgets/task_list.py +493 -0
  81. ferp/widgets/top_bar.py +110 -0
  82. ferp-0.7.1.dist-info/METADATA +128 -0
  83. ferp-0.7.1.dist-info/RECORD +87 -0
  84. ferp-0.7.1.dist-info/WHEEL +5 -0
  85. ferp-0.7.1.dist-info/entry_points.txt +2 -0
  86. ferp-0.7.1.dist-info/licenses/LICENSE +21 -0
  87. ferp-0.7.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,155 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from pathlib import Path
5
+ from typing import Any
6
+ from urllib.request import Request, urlopen
7
+
8
+ MONDAY_REQUIRED_COLUMNS = (
9
+ "Publisher",
10
+ "Territory",
11
+ "Control Type",
12
+ "Effective Date",
13
+ "Expiration Date",
14
+ "Status",
15
+ )
16
+ MONDAY_SUBITEM_COLUMNS = (
17
+ "Effective Date",
18
+ "Territory",
19
+ "Status",
20
+ )
21
+
22
+
23
+ def sync_monday_board(
24
+ api_token: str, board_id: int, cache_path: Path
25
+ ) -> dict[str, object]:
26
+ query = """
27
+ query ($boardId: [ID!], $cursor: String) {
28
+ boards(ids: $boardId) {
29
+ name
30
+ groups { id title }
31
+ items_page(limit: 500, cursor: $cursor) {
32
+ cursor
33
+ items {
34
+ id
35
+ name
36
+ group { id }
37
+ column_values { text column { title } }
38
+ subitems {
39
+ name
40
+ column_values { text column { title } }
41
+ }
42
+ }
43
+ }
44
+ }
45
+ }
46
+ """
47
+ headers = {
48
+ "Authorization": api_token,
49
+ "Content-Type": "application/json",
50
+ }
51
+
52
+ def fetch_page(cursor: str | None) -> dict[str, Any]:
53
+ payload = json.dumps(
54
+ {
55
+ "query": query,
56
+ "variables": {"boardId": [board_id], "cursor": cursor},
57
+ }
58
+ ).encode("utf-8")
59
+ request = Request("https://api.monday.com/v2", data=payload, headers=headers)
60
+ with urlopen(request, timeout=30) as response:
61
+ body = json.loads(response.read().decode("utf-8"))
62
+ if "errors" in body:
63
+ messages = "; ".join(
64
+ error.get("message", "Unknown error") for error in body["errors"]
65
+ )
66
+ raise RuntimeError(f"Monday API error: {messages}")
67
+ return body.get("data", {})
68
+
69
+ data = fetch_page(None)
70
+ boards = data.get("boards") or []
71
+ if not boards:
72
+ raise RuntimeError("Monday board not found.")
73
+
74
+ board = boards[0]
75
+ group_map = {
76
+ group.get("id"): group.get("title")
77
+ for group in board.get("groups", [])
78
+ if group.get("id")
79
+ }
80
+
81
+ result: dict[str, list[dict[str, object]]] = {}
82
+ publisher_count = 0
83
+ skipped = 0
84
+
85
+ def build_col_map(column_values: list[dict[str, Any]]) -> dict[str, str]:
86
+ col_map: dict[str, str] = {}
87
+ for col in column_values:
88
+ column = col.get("column") or {}
89
+ title = column.get("title")
90
+ if not title:
91
+ continue
92
+ col_map[title] = col.get("text") or ""
93
+ return col_map
94
+
95
+ while True:
96
+ items_page = board.get("items_page") or {}
97
+ items = items_page.get("items") or []
98
+ for item in items:
99
+ column_values = item.get("column_values") or []
100
+ col_map = build_col_map(column_values)
101
+
102
+ if "Publisher" not in col_map:
103
+ col_map["Publisher"] = item.get("name") or ""
104
+ publisher = col_map.get("Publisher", "").strip()
105
+ if not publisher:
106
+ skipped += 1
107
+ continue
108
+
109
+ territory_mode = col_map.get("Territory", "").strip()
110
+ subitems = item.get("subitems") or []
111
+ subitem_rows: list[dict[str, str]] = []
112
+ if territory_mode in {"Multiple", "Split"}:
113
+ for subitem in subitems:
114
+ subitem_values = subitem.get("column_values") or []
115
+ sub_map = build_col_map(subitem_values)
116
+ row_data = {
117
+ name.lower(): sub_map.get(name, "")
118
+ for name in MONDAY_SUBITEM_COLUMNS
119
+ }
120
+ row_data["territory_code"] = subitem.get("name") or ""
121
+ subitem_rows.append(row_data)
122
+
123
+ group_info = item.get("group") or {}
124
+ group_name = group_map.get(group_info.get("id"), "Ungrouped")
125
+ group_key = group_name.lower()
126
+ group_bucket = result.setdefault(group_key, [])
127
+ row: dict[str, object] = {
128
+ name.lower(): col_map.get(name, "") for name in MONDAY_REQUIRED_COLUMNS
129
+ }
130
+ if territory_mode == "Multiple" and subitem_rows:
131
+ row["multi_territory"] = subitem_rows
132
+ elif territory_mode == "Split" and subitem_rows:
133
+ row["split_territory"] = subitem_rows
134
+ group_bucket.append(row)
135
+ publisher_count += 1
136
+
137
+ cursor = items_page.get("cursor")
138
+ if not cursor:
139
+ break
140
+ data = fetch_page(cursor)
141
+ boards = data.get("boards") or []
142
+ if not boards:
143
+ break
144
+ board = boards[0]
145
+
146
+ cache_path.parent.mkdir(parents=True, exist_ok=True)
147
+ cache_path.write_text(json.dumps(result, indent=2) + "\n", encoding="utf-8")
148
+
149
+ return {
150
+ "cache_path": str(cache_path),
151
+ "board_name": board.get("name", ""),
152
+ "group_count": len(result),
153
+ "publisher_count": publisher_count,
154
+ "skipped": skipped,
155
+ }
@@ -0,0 +1,214 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import shutil
5
+ import tempfile
6
+ import zipfile
7
+ from pathlib import Path
8
+
9
+ import requests
10
+
11
+
12
+ def update_scripts_from_release(
13
+ repo_url: str, scripts_dir: Path, *, dry_run: bool = False
14
+ ) -> str:
15
+ payload = _fetch_latest_release(repo_url)
16
+
17
+ zip_url = payload.get("zipball_url")
18
+ if not zip_url:
19
+ raise RuntimeError("Latest release is missing a zipball URL.")
20
+
21
+ tag_name = str(payload.get("tag_name") or "").strip()
22
+
23
+ with tempfile.TemporaryDirectory() as tmp_dir:
24
+ tmp_path = Path(tmp_dir)
25
+ archive_path = tmp_path / "scripts.zip"
26
+ try:
27
+ response = requests.get(zip_url, headers=headers, timeout=60)
28
+ response.raise_for_status()
29
+ archive_path.write_bytes(response.content)
30
+ except requests.RequestException as exc:
31
+ raise RuntimeError("Failed to download release archive.") from exc
32
+
33
+ extract_dir = tmp_path / "extract"
34
+ try:
35
+ with zipfile.ZipFile(archive_path) as archive:
36
+ archive.extractall(extract_dir)
37
+ except zipfile.BadZipFile as exc:
38
+ raise RuntimeError("Release archive is not a valid zip file.") from exc
39
+
40
+ source_dir = _find_release_payload_dir(extract_dir)
41
+ if not dry_run:
42
+ _replace_scripts_payload(source_dir, scripts_dir)
43
+
44
+ return tag_name
45
+
46
+
47
+ def fetch_namespace_index(repo_url: str) -> tuple[str, dict]:
48
+ payload = _fetch_latest_release(repo_url)
49
+ tag_name = str(payload.get("tag_name") or "").strip()
50
+ assets = _release_assets(payload)
51
+
52
+ index_url = assets.get("namespaces.json")
53
+ if not index_url:
54
+ raise RuntimeError("Latest release is missing namespaces.json.")
55
+
56
+ try:
57
+ response = requests.get(index_url, timeout=30)
58
+ response.raise_for_status()
59
+ index_payload = response.json()
60
+ except requests.RequestException as exc:
61
+ raise RuntimeError("Failed to download namespaces.json.") from exc
62
+ except json.JSONDecodeError as exc:
63
+ raise RuntimeError("namespaces.json is not valid JSON.") from exc
64
+
65
+ return tag_name, index_payload
66
+
67
+
68
+ def update_scripts_from_namespace_release(
69
+ repo_url: str,
70
+ scripts_dir: Path,
71
+ *,
72
+ namespace: str,
73
+ dry_run: bool = False,
74
+ ) -> str:
75
+ tag_name, index_payload = fetch_namespace_index(repo_url)
76
+ namespaces = index_payload.get("namespaces", [])
77
+ if not isinstance(namespaces, list):
78
+ raise RuntimeError("namespaces.json is missing a namespaces list.")
79
+
80
+ def _find_asset_id(ns_id: str) -> str:
81
+ for entry in namespaces:
82
+ if not isinstance(entry, dict):
83
+ continue
84
+ if str(entry.get("id", "")).strip() == ns_id:
85
+ asset = str(entry.get("asset", "")).strip()
86
+ if asset:
87
+ return asset
88
+ return ""
89
+
90
+ core_asset = _find_asset_id("core")
91
+ if not core_asset:
92
+ raise RuntimeError("namespaces.json does not include a core asset.")
93
+ namespace_asset = _find_asset_id(namespace)
94
+ if not namespace_asset:
95
+ raise RuntimeError(f"namespaces.json does not include '{namespace}'.")
96
+
97
+ payload = _fetch_latest_release(repo_url)
98
+ assets = _release_assets(payload)
99
+ core_url = assets.get(core_asset)
100
+ namespace_url = assets.get(namespace_asset)
101
+ if not core_url or not namespace_url:
102
+ raise RuntimeError("Release assets missing for selected namespace.")
103
+
104
+ with tempfile.TemporaryDirectory() as tmp_dir:
105
+ tmp_path = Path(tmp_dir)
106
+ core_zip = tmp_path / core_asset
107
+ ns_zip = tmp_path / namespace_asset
108
+ _download_asset(core_url, core_zip)
109
+ _download_asset(namespace_url, ns_zip)
110
+
111
+ payload_dir = tmp_path / "payload"
112
+ payload_dir.mkdir(parents=True, exist_ok=True)
113
+
114
+ for archive_path in (core_zip, ns_zip):
115
+ try:
116
+ with zipfile.ZipFile(archive_path) as archive:
117
+ archive.extractall(payload_dir)
118
+ except zipfile.BadZipFile as exc:
119
+ raise RuntimeError("Release asset is not a valid zip file.") from exc
120
+
121
+ if not dry_run:
122
+ _replace_scripts_payload(payload_dir, scripts_dir)
123
+
124
+ return tag_name
125
+
126
+
127
+ def _fetch_latest_release(repo_url: str) -> dict:
128
+ owner, repo = _parse_github_repo(repo_url)
129
+ api_url = f"https://api.github.com/repos/{owner}/{repo}/releases/latest"
130
+ headers = {
131
+ "Accept": "application/vnd.github+json",
132
+ "User-Agent": "ferp",
133
+ }
134
+
135
+ try:
136
+ response = requests.get(api_url, headers=headers, timeout=30)
137
+ response.raise_for_status()
138
+ payload = response.json()
139
+ except requests.RequestException as exc:
140
+ raise RuntimeError("Failed to fetch latest release metadata.") from exc
141
+ if not isinstance(payload, dict):
142
+ raise RuntimeError("Release metadata response is not valid JSON.")
143
+ return payload
144
+
145
+
146
+ def _release_assets(payload: dict) -> dict[str, str]:
147
+ assets = payload.get("assets", [])
148
+ if not isinstance(assets, list):
149
+ return {}
150
+ results: dict[str, str] = {}
151
+ for asset in assets:
152
+ if not isinstance(asset, dict):
153
+ continue
154
+ name = str(asset.get("name") or "").strip()
155
+ url = str(asset.get("browser_download_url") or "").strip()
156
+ if name and url:
157
+ results[name] = url
158
+ return results
159
+
160
+
161
+ def _download_asset(url: str, target: Path) -> None:
162
+ try:
163
+ response = requests.get(url, timeout=60)
164
+ response.raise_for_status()
165
+ target.write_bytes(response.content)
166
+ except requests.RequestException as exc:
167
+ raise RuntimeError(f"Failed to download asset from {url}.") from exc
168
+
169
+
170
+ def _find_release_payload_dir(extract_dir: Path) -> Path:
171
+ root_dirs = [path for path in extract_dir.iterdir() if path.is_dir()]
172
+ if not root_dirs:
173
+ raise RuntimeError("Release archive did not contain any directories.")
174
+
175
+ if len(root_dirs) == 1:
176
+ root = root_dirs[0]
177
+ nested_scripts = root / "scripts"
178
+ if _payload_has_scripts(nested_scripts):
179
+ return nested_scripts
180
+ if _payload_has_scripts(root):
181
+ return root
182
+
183
+ for config_path in extract_dir.rglob("config.json"):
184
+ candidate = config_path.parent
185
+ if _payload_has_scripts(candidate):
186
+ return candidate
187
+ nested = candidate / "scripts"
188
+ if _payload_has_scripts(nested):
189
+ return nested
190
+
191
+ raise RuntimeError("Release archive did not include scripts payload.")
192
+
193
+
194
+ def _payload_has_scripts(candidate: Path) -> bool:
195
+ if not candidate.exists() or not candidate.is_dir():
196
+ return False
197
+ return any(path.name == "script.py" for path in candidate.rglob("script.py"))
198
+
199
+
200
+ def _parse_github_repo(repo_url: str) -> tuple[str, str]:
201
+ url = repo_url.strip().removesuffix(".git")
202
+ if "github.com/" not in url:
203
+ raise ValueError("Only GitHub URLs are supported for release updates.")
204
+ owner_repo = url.split("github.com/", 1)[1].strip("/")
205
+ parts = owner_repo.split("/")
206
+ if len(parts) != 2 or not all(parts):
207
+ raise ValueError("Invalid GitHub repository URL.")
208
+ return parts[0], parts[1]
209
+
210
+
211
+ def _replace_scripts_payload(source_dir: Path, scripts_dir: Path) -> None:
212
+ if scripts_dir.exists():
213
+ shutil.rmtree(scripts_dir)
214
+ shutil.copytree(source_dir, scripts_dir, dirs_exist_ok=True)
@@ -0,0 +1,90 @@
1
+ from dataclasses import dataclass
2
+ from pathlib import Path
3
+ from typing import Literal
4
+
5
+ from ferp.domain.scripts import Script
6
+
7
+
8
+ @dataclass(frozen=True)
9
+ class ScriptExecutionContext:
10
+ """Normalized FSCP execution details for a script."""
11
+
12
+ script: Script
13
+ script_path: Path
14
+ target_path: Path
15
+ target_kind: Literal["file", "directory"]
16
+
17
+
18
+ def build_execution_context(
19
+ *,
20
+ app_root: Path,
21
+ current_path: Path,
22
+ selected_path: Path | None,
23
+ script: Script,
24
+ ) -> ScriptExecutionContext:
25
+ """Resolve script metadata into an execution context for the FSCP runner."""
26
+
27
+ full_path = (app_root / script.script).resolve()
28
+
29
+ if not full_path.exists():
30
+ raise FileNotFoundError(full_path)
31
+
32
+ if full_path.suffix != ".py":
33
+ raise ValueError(
34
+ f"FSCP scripts must be Python files. Unsupported script: {full_path}"
35
+ )
36
+
37
+ if script.target == "current_directory":
38
+ target_path = current_path
39
+ elif script.target in {"highlighted_file", "highlighted_directory"}:
40
+ if selected_path is None:
41
+ raise ValueError("Select a file or directory before running this script.")
42
+ target_path = selected_path
43
+ else:
44
+ raise ValueError(f"Unsupported script target: {script.target}")
45
+
46
+ if not target_path.exists():
47
+ raise FileNotFoundError(target_path)
48
+
49
+ target_kind: Literal["file", "directory"] = (
50
+ "directory" if target_path.is_dir() else "file"
51
+ )
52
+ if script.target == "highlighted_file" and target_kind != "file":
53
+ raise ValueError(
54
+ f"'{script.name}' expects a file. Highlight a file and try again."
55
+ )
56
+ if script.target == "highlighted_directory" and target_kind != "directory":
57
+ raise ValueError(
58
+ f"'{script.name}' expects a directory. Highlight a folder and try again."
59
+ )
60
+ if script.target == "highlighted_file":
61
+ allowed_extensions = _normalize_extensions(script.file_extensions)
62
+ if allowed_extensions:
63
+ name = target_path.name.lower()
64
+ if not any(name.endswith(ext) for ext in allowed_extensions):
65
+ extensions_label = ", ".join(sorted(allowed_extensions))
66
+ raise ValueError(
67
+ f"'{script.name}' expects {extensions_label} file(s). "
68
+ "Highlight a matching file and try again."
69
+ )
70
+
71
+ return ScriptExecutionContext(
72
+ script=script,
73
+ script_path=full_path,
74
+ target_path=target_path,
75
+ target_kind=target_kind,
76
+ )
77
+
78
+
79
+ def _normalize_extensions(extensions: list[str] | None) -> list[str]:
80
+ if not extensions:
81
+ return []
82
+ normalized: list[str] = []
83
+ for ext in extensions:
84
+ cleaned = ext.strip().lower()
85
+ if not cleaned:
86
+ continue
87
+ if not cleaned.startswith("."):
88
+ cleaned = f".{cleaned}"
89
+ normalized.append(cleaned)
90
+ return normalized
@@ -0,0 +1,130 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import urllib.request
5
+ from dataclasses import dataclass
6
+ from datetime import datetime, timezone
7
+ from pathlib import Path
8
+
9
+
10
+ @dataclass(frozen=True)
11
+ class UpdateCheckResult:
12
+ ok: bool
13
+ current: str
14
+ latest: str | None
15
+ is_update: bool
16
+ error: str | None
17
+ checked_at: datetime | None
18
+
19
+
20
+ def check_for_update(
21
+ package: str,
22
+ current: str,
23
+ cache_path: Path,
24
+ *,
25
+ ttl_seconds: int,
26
+ force: bool = False,
27
+ ) -> UpdateCheckResult:
28
+ now = datetime.now(timezone.utc)
29
+ if not force:
30
+ cached = _read_cache(cache_path, ttl_seconds)
31
+ if cached is not None:
32
+ latest, checked_at = cached
33
+ return UpdateCheckResult(
34
+ ok=True,
35
+ current=current,
36
+ latest=latest,
37
+ is_update=is_newer(latest, current),
38
+ error=None,
39
+ checked_at=checked_at,
40
+ )
41
+
42
+ try:
43
+ latest = fetch_latest_version(package)
44
+ except Exception as exc:
45
+ cached = _read_cache(cache_path, None)
46
+ latest = cached[0] if cached else None
47
+ checked_at = cached[1] if cached else None
48
+ return UpdateCheckResult(
49
+ ok=cached is not None,
50
+ current=current,
51
+ latest=latest,
52
+ is_update=is_newer(latest, current) if latest else False,
53
+ error=str(exc),
54
+ checked_at=checked_at,
55
+ )
56
+
57
+ _write_cache(cache_path, latest, now)
58
+ return UpdateCheckResult(
59
+ ok=True,
60
+ current=current,
61
+ latest=latest,
62
+ is_update=is_newer(latest, current),
63
+ error=None,
64
+ checked_at=now,
65
+ )
66
+
67
+
68
+ def fetch_latest_version(package: str) -> str:
69
+ url = f"https://pypi.org/pypi/{package}/json"
70
+ with urllib.request.urlopen(url, timeout=5) as response:
71
+ payload = json.load(response)
72
+ info = payload.get("info", {})
73
+ version = info.get("version")
74
+ if not isinstance(version, str) or not version:
75
+ raise RuntimeError("PyPI response missing latest version.")
76
+ return version
77
+
78
+
79
+ def is_newer(latest: str, current: str) -> bool:
80
+ def normalize(value: str) -> tuple[int, ...]:
81
+ parts: list[int] = []
82
+ for token in value.split("."):
83
+ digits = []
84
+ for ch in token:
85
+ if ch.isdigit():
86
+ digits.append(ch)
87
+ else:
88
+ break
89
+ number = int("".join(digits) or "0")
90
+ parts.append(number)
91
+ while parts and parts[-1] == 0:
92
+ parts.pop()
93
+ return tuple(parts)
94
+
95
+ latest_parts = normalize(latest)
96
+ current_parts = normalize(current)
97
+ max_len = max(len(latest_parts), len(current_parts))
98
+ latest_parts += (0,) * (max_len - len(latest_parts))
99
+ current_parts += (0,) * (max_len - len(current_parts))
100
+ return latest_parts > current_parts
101
+
102
+
103
+ def _read_cache(
104
+ cache_path: Path, ttl_seconds: int | None
105
+ ) -> tuple[str, datetime] | None:
106
+ try:
107
+ raw = json.loads(cache_path.read_text())
108
+ except (OSError, json.JSONDecodeError):
109
+ return None
110
+ latest = raw.get("latest")
111
+ checked_at = raw.get("checked_at")
112
+ if not isinstance(latest, str) or not latest:
113
+ return None
114
+ if not isinstance(checked_at, (int, float)):
115
+ return None
116
+ checked_at_dt = datetime.fromtimestamp(checked_at, tz=timezone.utc)
117
+ if ttl_seconds is not None:
118
+ age = (datetime.now(timezone.utc) - checked_at_dt).total_seconds()
119
+ if age > ttl_seconds:
120
+ return None
121
+ return latest, checked_at_dt
122
+
123
+
124
+ def _write_cache(cache_path: Path, latest: str, checked_at: datetime) -> None:
125
+ cache_path.parent.mkdir(parents=True, exist_ok=True)
126
+ payload = {
127
+ "latest": latest,
128
+ "checked_at": checked_at.timestamp(),
129
+ }
130
+ cache_path.write_text(json.dumps(payload))