llmcode-cli 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. llm_code/__init__.py +2 -0
  2. llm_code/analysis/__init__.py +6 -0
  3. llm_code/analysis/cache.py +33 -0
  4. llm_code/analysis/engine.py +256 -0
  5. llm_code/analysis/go_rules.py +114 -0
  6. llm_code/analysis/js_rules.py +84 -0
  7. llm_code/analysis/python_rules.py +311 -0
  8. llm_code/analysis/rules.py +140 -0
  9. llm_code/analysis/rust_rules.py +108 -0
  10. llm_code/analysis/universal_rules.py +111 -0
  11. llm_code/api/__init__.py +0 -0
  12. llm_code/api/client.py +90 -0
  13. llm_code/api/errors.py +73 -0
  14. llm_code/api/openai_compat.py +390 -0
  15. llm_code/api/provider.py +35 -0
  16. llm_code/api/sse.py +52 -0
  17. llm_code/api/types.py +140 -0
  18. llm_code/cli/__init__.py +0 -0
  19. llm_code/cli/commands.py +70 -0
  20. llm_code/cli/image.py +122 -0
  21. llm_code/cli/render.py +214 -0
  22. llm_code/cli/status_line.py +79 -0
  23. llm_code/cli/streaming.py +92 -0
  24. llm_code/cli/tui_main.py +220 -0
  25. llm_code/computer_use/__init__.py +11 -0
  26. llm_code/computer_use/app_detect.py +49 -0
  27. llm_code/computer_use/app_tier.py +57 -0
  28. llm_code/computer_use/coordinator.py +99 -0
  29. llm_code/computer_use/input_control.py +71 -0
  30. llm_code/computer_use/screenshot.py +93 -0
  31. llm_code/cron/__init__.py +13 -0
  32. llm_code/cron/parser.py +145 -0
  33. llm_code/cron/scheduler.py +135 -0
  34. llm_code/cron/storage.py +126 -0
  35. llm_code/enterprise/__init__.py +1 -0
  36. llm_code/enterprise/audit.py +59 -0
  37. llm_code/enterprise/auth.py +26 -0
  38. llm_code/enterprise/oidc.py +95 -0
  39. llm_code/enterprise/rbac.py +65 -0
  40. llm_code/harness/__init__.py +5 -0
  41. llm_code/harness/config.py +33 -0
  42. llm_code/harness/engine.py +129 -0
  43. llm_code/harness/guides.py +41 -0
  44. llm_code/harness/sensors.py +68 -0
  45. llm_code/harness/templates.py +84 -0
  46. llm_code/hida/__init__.py +1 -0
  47. llm_code/hida/classifier.py +187 -0
  48. llm_code/hida/engine.py +49 -0
  49. llm_code/hida/profiles.py +95 -0
  50. llm_code/hida/types.py +28 -0
  51. llm_code/ide/__init__.py +1 -0
  52. llm_code/ide/bridge.py +80 -0
  53. llm_code/ide/detector.py +76 -0
  54. llm_code/ide/server.py +169 -0
  55. llm_code/logging.py +29 -0
  56. llm_code/lsp/__init__.py +0 -0
  57. llm_code/lsp/client.py +298 -0
  58. llm_code/lsp/detector.py +42 -0
  59. llm_code/lsp/manager.py +56 -0
  60. llm_code/lsp/tools.py +288 -0
  61. llm_code/marketplace/__init__.py +0 -0
  62. llm_code/marketplace/builtin_registry.py +102 -0
  63. llm_code/marketplace/installer.py +162 -0
  64. llm_code/marketplace/plugin.py +78 -0
  65. llm_code/marketplace/registry.py +360 -0
  66. llm_code/mcp/__init__.py +0 -0
  67. llm_code/mcp/bridge.py +87 -0
  68. llm_code/mcp/client.py +117 -0
  69. llm_code/mcp/health.py +120 -0
  70. llm_code/mcp/manager.py +214 -0
  71. llm_code/mcp/oauth.py +219 -0
  72. llm_code/mcp/transport.py +254 -0
  73. llm_code/mcp/types.py +53 -0
  74. llm_code/remote/__init__.py +0 -0
  75. llm_code/remote/client.py +136 -0
  76. llm_code/remote/protocol.py +22 -0
  77. llm_code/remote/server.py +275 -0
  78. llm_code/remote/ssh_proxy.py +56 -0
  79. llm_code/runtime/__init__.py +0 -0
  80. llm_code/runtime/auto_commit.py +56 -0
  81. llm_code/runtime/auto_diagnose.py +62 -0
  82. llm_code/runtime/checkpoint.py +70 -0
  83. llm_code/runtime/checkpoint_recovery.py +142 -0
  84. llm_code/runtime/compaction.py +35 -0
  85. llm_code/runtime/compressor.py +415 -0
  86. llm_code/runtime/config.py +533 -0
  87. llm_code/runtime/context.py +49 -0
  88. llm_code/runtime/conversation.py +921 -0
  89. llm_code/runtime/cost_tracker.py +126 -0
  90. llm_code/runtime/dream.py +127 -0
  91. llm_code/runtime/file_protection.py +150 -0
  92. llm_code/runtime/hardware.py +85 -0
  93. llm_code/runtime/hooks.py +223 -0
  94. llm_code/runtime/indexer.py +230 -0
  95. llm_code/runtime/knowledge_compiler.py +232 -0
  96. llm_code/runtime/memory.py +132 -0
  97. llm_code/runtime/memory_layers.py +467 -0
  98. llm_code/runtime/memory_lint.py +252 -0
  99. llm_code/runtime/model_aliases.py +37 -0
  100. llm_code/runtime/ollama.py +93 -0
  101. llm_code/runtime/overlay.py +124 -0
  102. llm_code/runtime/permissions.py +200 -0
  103. llm_code/runtime/plan.py +45 -0
  104. llm_code/runtime/prompt.py +238 -0
  105. llm_code/runtime/repo_map.py +174 -0
  106. llm_code/runtime/sandbox.py +116 -0
  107. llm_code/runtime/session.py +268 -0
  108. llm_code/runtime/skill_resolver.py +61 -0
  109. llm_code/runtime/skills.py +133 -0
  110. llm_code/runtime/speculative.py +75 -0
  111. llm_code/runtime/streaming_executor.py +216 -0
  112. llm_code/runtime/telemetry.py +196 -0
  113. llm_code/runtime/token_budget.py +26 -0
  114. llm_code/runtime/vcr.py +142 -0
  115. llm_code/runtime/vision.py +102 -0
  116. llm_code/swarm/__init__.py +1 -0
  117. llm_code/swarm/backend_subprocess.py +108 -0
  118. llm_code/swarm/backend_tmux.py +103 -0
  119. llm_code/swarm/backend_worktree.py +306 -0
  120. llm_code/swarm/checkpoint.py +74 -0
  121. llm_code/swarm/coordinator.py +236 -0
  122. llm_code/swarm/mailbox.py +88 -0
  123. llm_code/swarm/manager.py +202 -0
  124. llm_code/swarm/memory_sync.py +80 -0
  125. llm_code/swarm/recovery.py +21 -0
  126. llm_code/swarm/team.py +67 -0
  127. llm_code/swarm/types.py +31 -0
  128. llm_code/task/__init__.py +16 -0
  129. llm_code/task/diagnostics.py +93 -0
  130. llm_code/task/manager.py +162 -0
  131. llm_code/task/types.py +112 -0
  132. llm_code/task/verifier.py +104 -0
  133. llm_code/tools/__init__.py +0 -0
  134. llm_code/tools/agent.py +145 -0
  135. llm_code/tools/agent_roles.py +82 -0
  136. llm_code/tools/base.py +94 -0
  137. llm_code/tools/bash.py +565 -0
  138. llm_code/tools/computer_use_tools.py +278 -0
  139. llm_code/tools/coordinator_tool.py +75 -0
  140. llm_code/tools/cron_create.py +90 -0
  141. llm_code/tools/cron_delete.py +49 -0
  142. llm_code/tools/cron_list.py +51 -0
  143. llm_code/tools/deferred.py +92 -0
  144. llm_code/tools/dump.py +116 -0
  145. llm_code/tools/edit_file.py +282 -0
  146. llm_code/tools/git_tools.py +531 -0
  147. llm_code/tools/glob_search.py +112 -0
  148. llm_code/tools/grep_search.py +144 -0
  149. llm_code/tools/ide_diagnostics.py +59 -0
  150. llm_code/tools/ide_open.py +58 -0
  151. llm_code/tools/ide_selection.py +52 -0
  152. llm_code/tools/memory_tools.py +138 -0
  153. llm_code/tools/multi_edit.py +143 -0
  154. llm_code/tools/notebook_edit.py +107 -0
  155. llm_code/tools/notebook_read.py +81 -0
  156. llm_code/tools/parsing.py +63 -0
  157. llm_code/tools/read_file.py +154 -0
  158. llm_code/tools/registry.py +58 -0
  159. llm_code/tools/search_backends/__init__.py +56 -0
  160. llm_code/tools/search_backends/brave.py +56 -0
  161. llm_code/tools/search_backends/duckduckgo.py +129 -0
  162. llm_code/tools/search_backends/searxng.py +71 -0
  163. llm_code/tools/search_backends/tavily.py +73 -0
  164. llm_code/tools/swarm_create.py +109 -0
  165. llm_code/tools/swarm_delete.py +95 -0
  166. llm_code/tools/swarm_list.py +44 -0
  167. llm_code/tools/swarm_message.py +109 -0
  168. llm_code/tools/task_close.py +79 -0
  169. llm_code/tools/task_plan.py +79 -0
  170. llm_code/tools/task_verify.py +90 -0
  171. llm_code/tools/tool_search.py +65 -0
  172. llm_code/tools/web_common.py +258 -0
  173. llm_code/tools/web_fetch.py +223 -0
  174. llm_code/tools/web_search.py +280 -0
  175. llm_code/tools/write_file.py +118 -0
  176. llm_code/tui/__init__.py +1 -0
  177. llm_code/tui/app.py +2432 -0
  178. llm_code/tui/chat_view.py +82 -0
  179. llm_code/tui/chat_widgets.py +309 -0
  180. llm_code/tui/header_bar.py +46 -0
  181. llm_code/tui/input_bar.py +349 -0
  182. llm_code/tui/keybindings.py +142 -0
  183. llm_code/tui/marketplace.py +210 -0
  184. llm_code/tui/status_bar.py +72 -0
  185. llm_code/tui/theme.py +96 -0
  186. llm_code/utils/__init__.py +0 -0
  187. llm_code/utils/diff.py +111 -0
  188. llm_code/utils/errors.py +70 -0
  189. llm_code/utils/hyperlink.py +73 -0
  190. llm_code/utils/notebook.py +179 -0
  191. llm_code/utils/search.py +69 -0
  192. llm_code/utils/text_normalize.py +28 -0
  193. llm_code/utils/version_check.py +62 -0
  194. llm_code/vim/__init__.py +4 -0
  195. llm_code/vim/engine.py +51 -0
  196. llm_code/vim/motions.py +172 -0
  197. llm_code/vim/operators.py +183 -0
  198. llm_code/vim/text_objects.py +139 -0
  199. llm_code/vim/transitions.py +279 -0
  200. llm_code/vim/types.py +68 -0
  201. llm_code/voice/__init__.py +1 -0
  202. llm_code/voice/languages.py +43 -0
  203. llm_code/voice/recorder.py +136 -0
  204. llm_code/voice/stt.py +36 -0
  205. llm_code/voice/stt_anthropic.py +66 -0
  206. llm_code/voice/stt_google.py +32 -0
  207. llm_code/voice/stt_whisper.py +52 -0
  208. llmcode_cli-1.0.0.dist-info/METADATA +524 -0
  209. llmcode_cli-1.0.0.dist-info/RECORD +212 -0
  210. llmcode_cli-1.0.0.dist-info/WHEEL +4 -0
  211. llmcode_cli-1.0.0.dist-info/entry_points.txt +2 -0
  212. llmcode_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,126 @@
1
+ """Token cost tracking with model pricing — user-customizable via config."""
2
+ from __future__ import annotations
3
+ from dataclasses import dataclass
4
+
5
+ # Built-in fallback pricing per 1M tokens [input, output]
6
+ BUILTIN_PRICING: dict[str, tuple[float, float]] = {
7
+ # OpenAI
8
+ "gpt-4o": (2.50, 10.00),
9
+ "gpt-4o-mini": (0.15, 0.60),
10
+ "gpt-4-turbo": (10.00, 30.00),
11
+ "gpt-3.5-turbo": (0.50, 1.50),
12
+ "o3": (2.00, 8.00),
13
+ "o4-mini": (0.50, 2.00),
14
+ # Anthropic
15
+ "claude-opus-4-6": (15.00, 75.00),
16
+ "claude-sonnet-4-6": (3.00, 15.00),
17
+ "claude-haiku-4-5": (0.80, 4.00),
18
+ # Google
19
+ "gemini-2.5-pro": (1.25, 10.00),
20
+ "gemini-2.5-flash": (0.15, 0.60),
21
+ # DeepSeek
22
+ "deepseek-chat": (0.27, 1.10),
23
+ "deepseek-reasoner": (0.55, 2.19),
24
+ # xAI
25
+ "grok-3": (3.00, 15.00),
26
+ "grok-3-mini": (0.30, 0.50),
27
+ }
28
+
29
+
30
+ class BudgetExceededError(Exception):
31
+ """Raised when the accumulated cost exceeds the configured budget."""
32
+
33
+ def __init__(self, spent: float, budget: float) -> None:
34
+ self.spent = spent
35
+ self.budget = budget
36
+ super().__init__(f"Budget limit (${budget:.2f}) exceeded (spent ${spent:.4f})")
37
+
38
+
39
+ @dataclass
40
+ class CostTracker:
41
+ model: str = ""
42
+ total_input_tokens: int = 0
43
+ total_output_tokens: int = 0
44
+ total_cost_usd: float = 0.0
45
+ custom_pricing: dict | None = None # from config.json "pricing"
46
+ max_budget_usd: float | None = None
47
+
48
+ def add_usage(
49
+ self,
50
+ input_tokens: int,
51
+ output_tokens: int,
52
+ cache_read_tokens: int = 0,
53
+ cache_creation_tokens: int = 0,
54
+ ) -> float:
55
+ """Record token usage and return the cost of this request in USD."""
56
+ self.total_input_tokens += input_tokens
57
+ self.total_output_tokens += output_tokens
58
+ in_price, out_price = self._get_pricing()
59
+ request_cost = (
60
+ input_tokens * in_price
61
+ + output_tokens * out_price
62
+ + cache_read_tokens * in_price * 0.10
63
+ + cache_creation_tokens * in_price * 1.25
64
+ ) / 1_000_000
65
+ self.total_cost_usd += request_cost
66
+ return request_cost
67
+
68
+ def is_budget_exceeded(self) -> bool:
69
+ """Return True if a budget is set and has been exceeded."""
70
+ if self.max_budget_usd is None:
71
+ return False
72
+ return self.total_cost_usd > self.max_budget_usd
73
+
74
+ def remaining_budget(self) -> float | None:
75
+ """Return remaining budget in USD, or None if no budget is set."""
76
+ if self.max_budget_usd is None:
77
+ return None
78
+ return max(0.0, self.max_budget_usd - self.total_cost_usd)
79
+
80
+ def check_budget(self) -> None:
81
+ """Raise BudgetExceededError if the budget has been exceeded."""
82
+ if self.is_budget_exceeded():
83
+ raise BudgetExceededError(
84
+ spent=self.total_cost_usd,
85
+ budget=self.max_budget_usd, # type: ignore[arg-type]
86
+ )
87
+
88
+ def _get_pricing(self) -> tuple[float, float]:
89
+ # 1. User custom pricing (exact match)
90
+ if self.custom_pricing:
91
+ if self.model in self.custom_pricing:
92
+ p = self.custom_pricing[self.model]
93
+ return (p[0], p[1]) if isinstance(p, list) else (0.0, 0.0)
94
+ # Partial match in custom
95
+ for key, p in self.custom_pricing.items():
96
+ if key != "default" and key in self.model:
97
+ return (p[0], p[1]) if isinstance(p, list) else (0.0, 0.0)
98
+ # Custom default
99
+ if "default" in self.custom_pricing:
100
+ p = self.custom_pricing["default"]
101
+ return (p[0], p[1]) if isinstance(p, list) else (0.0, 0.0)
102
+
103
+ # 2. Built-in pricing (exact match)
104
+ if self.model in BUILTIN_PRICING:
105
+ return BUILTIN_PRICING[self.model]
106
+
107
+ # 3. Built-in pricing (partial match)
108
+ model_lower = self.model.lower()
109
+ for key, pricing in BUILTIN_PRICING.items():
110
+ if key in model_lower:
111
+ return pricing
112
+
113
+ # 4. Unknown model = free
114
+ return (0.0, 0.0)
115
+
116
+ def format_cost(self) -> str:
117
+ lines = [f"Tokens — in: {self.total_input_tokens:,} out: {self.total_output_tokens:,}"]
118
+ in_price, out_price = self._get_pricing()
119
+ if self.total_cost_usd > 0.0001:
120
+ lines.append(f" Cost: ${self.total_cost_usd:.4f}")
121
+ lines.append(f" Rate: ${in_price}/1M in · ${out_price}/1M out")
122
+ elif in_price == 0 and out_price == 0:
123
+ lines.append(" Cost: $0 (free / local model)")
124
+ else:
125
+ lines.append(f" Cost: ${self.total_cost_usd:.6f}")
126
+ return " ".join(lines)
@@ -0,0 +1,127 @@
1
+ """DreamTask — background memory consolidation via LLM summarization."""
2
+ from __future__ import annotations
3
+
4
+ import logging
5
+ from datetime import datetime, timezone
6
+ from typing import TYPE_CHECKING
7
+
8
+ from filelock import FileLock
9
+
10
+ from llm_code.api.types import Message, MessageRequest, TextBlock
11
+
12
+ if TYPE_CHECKING:
13
+ from llm_code.api.provider import LLMProvider
14
+ from llm_code.runtime.config import RuntimeConfig
15
+ from llm_code.runtime.memory import MemoryStore
16
+ from llm_code.runtime.session import Session
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ _CONSOLIDATION_SYSTEM_PROMPT = """\
21
+ You are a memory consolidation agent. Given a conversation transcript from a \
22
+ coding session, produce a structured Markdown summary with these sections:
23
+
24
+ ## Summary
25
+ 1-3 sentence overview of what was accomplished.
26
+
27
+ ## Modified Files
28
+ Bulleted list of files that were created, edited, or deleted.
29
+
30
+ ## Decisions
31
+ Key architectural or design decisions made during the session.
32
+
33
+ ## Patterns
34
+ Reusable patterns, idioms, or techniques worth remembering for future sessions.
35
+
36
+ ## Open Items
37
+ Any unfinished work, known issues, or next steps mentioned.
38
+
39
+ Be concise. Focus on facts. Do not invent information not present in the transcript.
40
+ """
41
+
42
+
43
+ class DreamTask:
44
+ """Consolidates a session's conversation into a structured summary via LLM."""
45
+
46
+ async def consolidate(
47
+ self,
48
+ session: "Session",
49
+ memory_store: "MemoryStore",
50
+ provider: "LLMProvider",
51
+ config: "RuntimeConfig",
52
+ ) -> str:
53
+ """Run LLM-powered consolidation on the session.
54
+
55
+ Returns the generated summary string, or empty string if skipped/failed.
56
+ """
57
+ dream_config = config.dream
58
+
59
+ # Guard: disabled
60
+ if not dream_config.enabled:
61
+ return ""
62
+
63
+ # Guard: too few messages
64
+ user_messages = [m for m in session.messages if m.role == "user"]
65
+ if len(user_messages) < dream_config.min_turns:
66
+ return ""
67
+
68
+ # Build transcript from session messages
69
+ transcript_parts: list[str] = []
70
+ for msg in session.messages:
71
+ role_label = "User" if msg.role == "user" else "Assistant"
72
+ for block in msg.content:
73
+ if hasattr(block, "text"):
74
+ transcript_parts.append(f"**{role_label}:** {block.text}")
75
+
76
+ transcript = "\n\n".join(transcript_parts)
77
+
78
+ # Call LLM
79
+ request = MessageRequest(
80
+ model=getattr(config, "model", ""),
81
+ messages=(
82
+ Message(
83
+ role="user",
84
+ content=(TextBlock(text=f"Consolidate this session:\n\n{transcript}"),),
85
+ ),
86
+ ),
87
+ system=_CONSOLIDATION_SYSTEM_PROMPT,
88
+ tools=(),
89
+ max_tokens=2048,
90
+ temperature=0.3,
91
+ )
92
+
93
+ try:
94
+ response = await provider.send_message(request)
95
+ except Exception as exc:
96
+ logger.warning("DreamTask consolidation failed: %s", exc)
97
+ return ""
98
+
99
+ # Extract text from response
100
+ summary_parts: list[str] = []
101
+ for block in response.content:
102
+ if hasattr(block, "text"):
103
+ summary_parts.append(block.text)
104
+ summary = "\n".join(summary_parts)
105
+
106
+ if not summary.strip():
107
+ return ""
108
+
109
+ # Write with file lock
110
+ today = datetime.now(timezone.utc).strftime("%Y-%m-%d")
111
+ lock_path = memory_store.consolidated_dir / f"{today}.md.lock"
112
+ lock = FileLock(str(lock_path), timeout=5)
113
+ with lock:
114
+ memory_store.save_consolidated(summary, date_str=today)
115
+
116
+ # Update last-run timestamp
117
+ memory_store.store(
118
+ "_dream_last_run",
119
+ datetime.now(timezone.utc).isoformat(),
120
+ )
121
+
122
+ logger.info(
123
+ "DreamTask consolidated session to %s/%s.md",
124
+ memory_store.consolidated_dir,
125
+ today,
126
+ )
127
+ return summary
@@ -0,0 +1,150 @@
1
+ """FileProtector — guards sensitive files from accidental reads and writes."""
2
+ from __future__ import annotations
3
+
4
+ import fnmatch
5
+ import os
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+ from typing import Literal
9
+
10
+
11
+ # Glob patterns for dangerous / credential files.
12
+ # Patterns are matched against the basename as well as the full path string.
13
+ SENSITIVE_PATTERNS: tuple[str, ...] = (
14
+ ".env",
15
+ ".env.*",
16
+ "*.key",
17
+ "*.pem",
18
+ "*.p12",
19
+ "credentials.*",
20
+ "*secret*",
21
+ "id_rsa",
22
+ "id_rsa.*",
23
+ "id_ed25519",
24
+ "id_ed25519.*",
25
+ "token.json",
26
+ "*.keystore",
27
+ ".netrc",
28
+ ".pgpass",
29
+ )
30
+
31
+ # Patterns that are always blocked (never allow write)
32
+ _BLOCK_PATTERNS: tuple[str, ...] = (
33
+ ".env",
34
+ ".env.*",
35
+ "*.key",
36
+ "*.pem",
37
+ "*.p12",
38
+ "id_rsa",
39
+ "id_rsa.*",
40
+ "id_ed25519",
41
+ "id_ed25519.*",
42
+ "*.keystore",
43
+ ".netrc",
44
+ ".pgpass",
45
+ )
46
+
47
+ # Path prefixes (expanded) that are always blocked
48
+ _BLOCK_PATH_PREFIXES: tuple[str, ...] = (
49
+ os.path.expanduser("~/.ssh/"),
50
+ os.path.expanduser("~/.aws/"),
51
+ os.path.expanduser("~/.config/gcloud/"),
52
+ )
53
+
54
+
55
+ @dataclass(frozen=True)
56
+ class FileProtectionResult:
57
+ """Result of a file-protection check."""
58
+
59
+ allowed: bool
60
+ reason: str
61
+ severity: Literal["block", "warn", "allow"]
62
+
63
+
64
+ def _matches_any(name: str, patterns: tuple[str, ...]) -> bool:
65
+ """Return True if *name* matches at least one glob pattern."""
66
+ for pattern in patterns:
67
+ if fnmatch.fnmatch(name, pattern):
68
+ return True
69
+ return False
70
+
71
+
72
+ def _is_under_blocked_prefix(path: str) -> bool:
73
+ """Return True if the resolved path sits under a always-blocked directory."""
74
+ try:
75
+ resolved = str(Path(path).resolve())
76
+ except Exception:
77
+ resolved = path
78
+ for prefix in _BLOCK_PATH_PREFIXES:
79
+ if resolved.startswith(prefix):
80
+ return True
81
+ return False
82
+
83
+
84
+ def is_sensitive(path: str) -> bool:
85
+ """Return True if *path* matches any sensitive pattern.
86
+
87
+ Resolves symlinks before checking to prevent bypass via symlink indirection.
88
+ """
89
+ resolved = str(Path(path).resolve())
90
+ basename = Path(resolved).name
91
+ if _matches_any(basename, SENSITIVE_PATTERNS):
92
+ return True
93
+ if _is_under_blocked_prefix(resolved):
94
+ return True
95
+ return False
96
+
97
+
98
+ def check_write(path: str) -> FileProtectionResult:
99
+ """Check whether writing to *path* should be allowed, warned about, or blocked.
100
+
101
+ Resolves symlinks before checking to prevent bypass via symlink indirection.
102
+
103
+ Rules
104
+ -----
105
+ - ``.env`` files and SSH/AWS/GCloud keys → **block** (never write secrets)
106
+ - Other sensitive patterns (``credentials.*``, ``*secret*``, ``token.json``, …)
107
+ → **warn** (needs user confirmation)
108
+ - Everything else → **allow**
109
+ """
110
+ resolved = str(Path(path).resolve())
111
+ basename = Path(resolved).name
112
+
113
+ # Blocked: critical credential / key files
114
+ if _matches_any(basename, _BLOCK_PATTERNS) or _is_under_blocked_prefix(resolved):
115
+ return FileProtectionResult(
116
+ allowed=False,
117
+ reason=f"Writing to '{path}' is blocked: file matches a sensitive credential pattern.",
118
+ severity="block",
119
+ )
120
+
121
+ # Warn: other sensitive patterns
122
+ if is_sensitive(path):
123
+ return FileProtectionResult(
124
+ allowed=True,
125
+ reason=(
126
+ f"Writing to '{path}' requires confirmation: "
127
+ "the file matches a sensitive data pattern."
128
+ ),
129
+ severity="warn",
130
+ )
131
+
132
+ return FileProtectionResult(allowed=True, reason="", severity="allow")
133
+
134
+
135
+ def check_read(path: str) -> FileProtectionResult:
136
+ """Check whether reading *path* should be allowed or warned about.
137
+
138
+ Sensitive file reads are warned (content may leak to LLM context).
139
+ """
140
+ if is_sensitive(path):
141
+ return FileProtectionResult(
142
+ allowed=True,
143
+ reason=(
144
+ f"Reading '{path}' may expose sensitive data to the LLM context. "
145
+ "Proceed with caution."
146
+ ),
147
+ severity="warn",
148
+ )
149
+
150
+ return FileProtectionResult(allowed=True, reason="", severity="allow")
@@ -0,0 +1,85 @@
1
+ """VRAM and system memory detection across platforms."""
2
+ from __future__ import annotations
3
+
4
+ import subprocess
5
+ import sys
6
+
7
+
8
+ def detect_vram_gb() -> float | None:
9
+ """Detect available VRAM/memory in GB.
10
+
11
+ Detection chain (first success wins):
12
+ 1. NVIDIA GPU via nvidia-smi
13
+ 2. Apple Silicon unified memory via sysctl (× 0.75)
14
+ 3. Linux /proc/meminfo (× 0.5)
15
+ 4. None if all fail
16
+ """
17
+ result = _detect_nvidia()
18
+ if result is not None:
19
+ return result
20
+
21
+ result = _detect_apple_silicon()
22
+ if result is not None:
23
+ return result
24
+
25
+ result = _detect_linux_meminfo()
26
+ if result is not None:
27
+ return result
28
+
29
+ return None
30
+
31
+
32
+ def _detect_nvidia() -> float | None:
33
+ """Detect NVIDIA GPU VRAM via nvidia-smi."""
34
+ try:
35
+ proc = subprocess.run(
36
+ ["nvidia-smi", "--query-gpu=memory.total", "--format=csv,noheader,nounits"],
37
+ capture_output=True,
38
+ text=True,
39
+ timeout=2,
40
+ )
41
+ if proc.returncode != 0:
42
+ return None
43
+ first_line = proc.stdout.strip().split("\n")[0].strip()
44
+ mib = float(first_line.replace(" MiB", ""))
45
+ return mib / 1024.0
46
+ except (FileNotFoundError, OSError, ValueError, IndexError):
47
+ return None
48
+
49
+
50
+ def _detect_apple_silicon() -> float | None:
51
+ """Detect Apple Silicon unified memory via sysctl."""
52
+ if sys.platform != "darwin":
53
+ return None
54
+ try:
55
+ proc = subprocess.run(
56
+ ["sysctl", "-n", "hw.memsize"],
57
+ capture_output=True,
58
+ text=True,
59
+ timeout=2,
60
+ )
61
+ if proc.returncode != 0:
62
+ return None
63
+ mem_bytes = int(proc.stdout.strip())
64
+ gb = mem_bytes / (1024**3)
65
+ return gb * 0.75
66
+ except (FileNotFoundError, OSError, ValueError):
67
+ return None
68
+
69
+
70
+ def _detect_linux_meminfo() -> float | None:
71
+ """Detect total RAM from /proc/meminfo on Linux."""
72
+ if sys.platform != "linux":
73
+ return None
74
+ try:
75
+ with open("/proc/meminfo") as f:
76
+ content = f.read()
77
+ for line in content.split("\n"):
78
+ if line.startswith("MemTotal:"):
79
+ parts = line.split()
80
+ kb = int(parts[1])
81
+ gb = kb / (1024**2)
82
+ return gb * 0.5
83
+ return None
84
+ except (FileNotFoundError, OSError, ValueError):
85
+ return None