patchpal 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
patchpal/context.py ADDED
@@ -0,0 +1,431 @@
1
+ """Context window management and token estimation."""
2
+
3
+ import os
4
+ from datetime import datetime
5
+ from typing import Any, Callable, Dict, List, Tuple
6
+
7
+ try:
8
+ import tiktoken
9
+
10
+ TIKTOKEN_AVAILABLE = True
11
+ except ImportError:
12
+ TIKTOKEN_AVAILABLE = False
13
+
14
+
15
+ class TokenEstimator:
16
+ """Estimate tokens in messages for context management."""
17
+
18
+ def __init__(self, model_id: str):
19
+ self.model_id = model_id
20
+ self._encoder = self._get_encoder()
21
+
22
+ def _get_encoder(self):
23
+ """Get appropriate tokenizer based on model."""
24
+ if not TIKTOKEN_AVAILABLE:
25
+ return None
26
+
27
+ try:
28
+ # Map model families to encoders
29
+ model_lower = self.model_id.lower()
30
+
31
+ if "gpt-4" in model_lower or "gpt-3.5" in model_lower:
32
+ return tiktoken.encoding_for_model("gpt-4")
33
+ elif "claude" in model_lower or "anthropic" in model_lower:
34
+ # Anthropic uses similar tokenization to GPT-4
35
+ return tiktoken.encoding_for_model("gpt-4")
36
+ else:
37
+ # Default fallback
38
+ return tiktoken.get_encoding("cl100k_base")
39
+ except Exception:
40
+ return None
41
+
42
+ def estimate_tokens(self, text: str) -> int:
43
+ """Estimate tokens in text.
44
+
45
+ Args:
46
+ text: Text to estimate tokens for
47
+
48
+ Returns:
49
+ Estimated token count
50
+ """
51
+ if not text:
52
+ return 0
53
+
54
+ if self._encoder:
55
+ try:
56
+ return len(self._encoder.encode(str(text)))
57
+ except Exception:
58
+ pass
59
+
60
+ # Fallback: ~4 chars per token average
61
+ return len(str(text)) // 4
62
+
63
+ def estimate_message_tokens(self, message: Dict[str, Any]) -> int:
64
+ """Estimate tokens in a single message.
65
+
66
+ Args:
67
+ message: Message dict with role, content, tool_calls, etc.
68
+
69
+ Returns:
70
+ Estimated token count
71
+ """
72
+ tokens = 0
73
+
74
+ # Role and content
75
+ if "role" in message:
76
+ tokens += 4 # Role overhead
77
+
78
+ if "content" in message and message["content"]:
79
+ tokens += self.estimate_tokens(str(message["content"]))
80
+
81
+ # Tool calls
82
+ if message.get("tool_calls"):
83
+ for tool_call in message["tool_calls"]:
84
+ tokens += 10 # Tool call overhead
85
+ if hasattr(tool_call, "function"):
86
+ tokens += self.estimate_tokens(tool_call.function.name)
87
+ tokens += self.estimate_tokens(tool_call.function.arguments)
88
+
89
+ # Tool call ID
90
+ if message.get("tool_call_id"):
91
+ tokens += 5
92
+
93
+ # Name field
94
+ if message.get("name"):
95
+ tokens += self.estimate_tokens(message["name"])
96
+
97
+ return tokens
98
+
99
+ def estimate_messages_tokens(self, messages: List[Dict[str, Any]]) -> int:
100
+ """Estimate tokens in a list of messages.
101
+
102
+ Args:
103
+ messages: List of message dicts
104
+
105
+ Returns:
106
+ Total estimated token count
107
+ """
108
+ return sum(self.estimate_message_tokens(msg) for msg in messages)
109
+
110
+
111
+ class ContextManager:
112
+ """Manage context window with auto-compaction and pruning."""
113
+
114
+ # OpenCode-inspired thresholds - configurable via environment variables
115
+ PRUNE_PROTECT = int(
116
+ os.getenv("PATCHPAL_PRUNE_PROTECT", "40000")
117
+ ) # Keep last 40k tokens of tool outputs
118
+ PRUNE_MINIMUM = int(
119
+ os.getenv("PATCHPAL_PRUNE_MINIMUM", "20000")
120
+ ) # Minimum tokens to prune to make it worthwhile
121
+ COMPACT_THRESHOLD = float(
122
+ os.getenv("PATCHPAL_COMPACT_THRESHOLD", "0.85")
123
+ ) # Compact at 85% capacity
124
+
125
+ # Model context limits (tokens)
126
+ # From OpenCode's models.dev data - see https://models.dev/api.json
127
+ MODEL_LIMITS = {
128
+ # Anthropic Claude models
129
+ "claude-opus-4": 200_000,
130
+ "claude-sonnet-4": 200_000,
131
+ "claude-haiku-4": 200_000,
132
+ "claude-3-5-sonnet": 200_000,
133
+ "claude-3-5-haiku": 200_000,
134
+ "claude-3-7-sonnet": 200_000,
135
+ "claude-sonnet": 200_000,
136
+ "claude-opus": 200_000,
137
+ "claude-haiku": 200_000,
138
+ # OpenAI GPT models
139
+ "gpt-5": 400_000,
140
+ "gpt-5.1": 128_000,
141
+ "gpt-5.2": 400_000,
142
+ "gpt-5-mini": 400_000,
143
+ "gpt-5-nano": 400_000,
144
+ "gpt-4o": 128_000,
145
+ "gpt-4-turbo": 128_000,
146
+ "gpt-4.1": 128_000,
147
+ "gpt-4": 8_000,
148
+ "gpt-3.5-turbo": 16_385,
149
+ "o3": 128_000,
150
+ "o3-mini": 128_000,
151
+ "o4-mini": 128_000,
152
+ # Google Gemini models
153
+ "gemini-3-pro": 1_000_000,
154
+ "gemini-3-flash": 1_048_576,
155
+ "gemini-2.5-pro": 1_048_576,
156
+ "gemini-2.5-flash": 1_048_576,
157
+ "gemini-2.0-flash": 1_000_000,
158
+ "gemini-1.5-pro": 1_000_000,
159
+ "gemini-1.5-flash": 1_000_000,
160
+ "gemini-pro": 32_000,
161
+ # xAI Grok models
162
+ "grok-4": 256_000,
163
+ "grok-4-fast": 2_000_000,
164
+ "grok-3": 131_072,
165
+ "grok-3-fast": 131_072,
166
+ "grok-3-mini": 131_072,
167
+ "grok-2": 131_072,
168
+ "grok-code-fast": 256_000,
169
+ # DeepSeek models
170
+ "deepseek-v3": 128_000,
171
+ "deepseek-v3.1": 128_000,
172
+ "deepseek-r1": 128_000,
173
+ "deepseek-chat": 128_000,
174
+ "deepseek-coder": 128_000,
175
+ "deepseek-reasoner": 128_000,
176
+ # Qwen models
177
+ "qwen-turbo": 1_000_000,
178
+ "qwen-plus": 1_000_000,
179
+ "qwen-max": 32_768,
180
+ "qwen-flash": 1_000_000,
181
+ "qwen3": 131_072,
182
+ "qwen3-coder": 262_144,
183
+ "qwen2.5": 131_072,
184
+ "qwq": 131_072,
185
+ "qvq": 131_072,
186
+ # Meta Llama models
187
+ "llama-4": 131_072,
188
+ "llama-3.3": 128_000,
189
+ "llama-3.2": 128_000,
190
+ "llama-3.1": 128_000,
191
+ "llama-3": 8_192,
192
+ "llama-guard": 8_192,
193
+ # Mistral models
194
+ "mistral-large": 128_000,
195
+ "mistral-small": 128_000,
196
+ "codestral": 128_000,
197
+ "ministral": 262_144,
198
+ "devstral": 262_144,
199
+ # Cohere models
200
+ "command-r": 128_000,
201
+ "command-r-plus": 128_000,
202
+ "command-r7b": 128_000,
203
+ "command-a": 256_000,
204
+ # OpenAI open-source models
205
+ "gpt-oss": 128_000,
206
+ # MiniMax models
207
+ "minimax": 128_000,
208
+ # Kimi models
209
+ "kimi": 262_144,
210
+ }
211
+
212
+ # Compaction prompt
213
+ COMPACTION_PROMPT = """You are summarizing a coding session to continue it seamlessly.
214
+
215
+ Create a detailed summary of our conversation above. This summary will be the ONLY context
216
+ available when we continue, so include:
217
+
218
+ 1. **What was accomplished**: Completed tasks and changes made
219
+ 2. **Current state**: Files modified, their current status
220
+ 3. **In progress**: What we're working on now
221
+ 4. **Next steps**: Clear actions to take next
222
+ 5. **Key decisions**: Important technical choices and why
223
+ 6. **User preferences**: Any constraints or preferences mentioned
224
+
225
+ Be comprehensive but concise. The goal is to continue work seamlessly without losing context."""
226
+
227
+ def __init__(self, model_id: str, system_prompt: str):
228
+ """Initialize context manager.
229
+
230
+ Args:
231
+ model_id: LiteLLM model identifier
232
+ system_prompt: System prompt text
233
+ """
234
+ self.model_id = model_id
235
+ self.system_prompt = system_prompt
236
+ self.estimator = TokenEstimator(model_id)
237
+ self.context_limit = self._get_context_limit()
238
+ self.output_reserve = 4_096 # Reserve tokens for model output
239
+
240
+ def _get_context_limit(self) -> int:
241
+ """Get context limit for model.
242
+
243
+ Can be overridden with PATCHPAL_CONTEXT_LIMIT env var for testing.
244
+
245
+ Returns:
246
+ Context window size in tokens
247
+ """
248
+ # Allow override for testing
249
+ override = os.getenv("PATCHPAL_CONTEXT_LIMIT")
250
+ if override:
251
+ try:
252
+ return int(override)
253
+ except ValueError:
254
+ pass # Fall through to normal detection
255
+
256
+ model_lower = self.model_id.lower()
257
+
258
+ # Try exact matches first (longest first to match more specific models)
259
+ # Sort keys by length descending to match "gpt-5.1" before "gpt-5"
260
+ for key in sorted(self.MODEL_LIMITS.keys(), key=len, reverse=True):
261
+ if key in model_lower:
262
+ return self.MODEL_LIMITS[key]
263
+
264
+ # Check for model families (fallback for versions not explicitly listed)
265
+ if "claude" in model_lower:
266
+ return 200_000 # Modern Claude models
267
+ elif "gpt-5" in model_lower:
268
+ return 400_000 # GPT-5 family
269
+ elif "gpt-4" in model_lower:
270
+ return 128_000 # GPT-4 family
271
+ elif "gpt-3.5" in model_lower or "gpt-3" in model_lower:
272
+ return 16_385
273
+ elif "gemini-3" in model_lower or "gemini-2" in model_lower or "gemini-1.5" in model_lower:
274
+ return 1_000_000 # Modern Gemini models
275
+ elif "gemini" in model_lower:
276
+ return 32_000 # Older Gemini models
277
+ elif "grok" in model_lower:
278
+ return 131_072 # Grok models
279
+ elif "deepseek" in model_lower:
280
+ return 128_000 # DeepSeek models
281
+ elif "qwen" in model_lower or "qwq" in model_lower or "qvq" in model_lower:
282
+ return 131_072 # Qwen models
283
+ elif "llama" in model_lower:
284
+ return 128_000 # Llama models
285
+ elif "mistral" in model_lower or "codestral" in model_lower or "ministral" in model_lower:
286
+ return 128_000 # Mistral models
287
+ elif "command" in model_lower:
288
+ return 128_000 # Cohere Command models
289
+ elif "kimi" in model_lower:
290
+ return 262_144 # Kimi models
291
+ elif "minimax" in model_lower:
292
+ return 128_000 # MiniMax models
293
+
294
+ # Default conservative limit for unknown models
295
+ return 128_000
296
+
297
+ def needs_compaction(self, messages: List[Dict[str, Any]]) -> bool:
298
+ """Check if context window needs compaction.
299
+
300
+ Args:
301
+ messages: Current message history
302
+
303
+ Returns:
304
+ True if compaction is needed
305
+ """
306
+ # Estimate total tokens
307
+ system_tokens = self.estimator.estimate_tokens(self.system_prompt)
308
+ message_tokens = self.estimator.estimate_messages_tokens(messages)
309
+ total_tokens = system_tokens + message_tokens + self.output_reserve
310
+
311
+ # Check threshold
312
+ usage_ratio = total_tokens / self.context_limit
313
+ return usage_ratio >= self.COMPACT_THRESHOLD
314
+
315
+ def get_usage_stats(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]:
316
+ """Get current context usage statistics.
317
+
318
+ Args:
319
+ messages: Current message history
320
+
321
+ Returns:
322
+ Dict with usage statistics
323
+ """
324
+ system_tokens = self.estimator.estimate_tokens(self.system_prompt)
325
+ message_tokens = self.estimator.estimate_messages_tokens(messages)
326
+ total_tokens = system_tokens + message_tokens + self.output_reserve
327
+
328
+ return {
329
+ "system_tokens": system_tokens,
330
+ "message_tokens": message_tokens,
331
+ "output_reserve": self.output_reserve,
332
+ "total_tokens": total_tokens,
333
+ "context_limit": self.context_limit,
334
+ "usage_ratio": total_tokens / self.context_limit,
335
+ "usage_percent": int((total_tokens / self.context_limit) * 100),
336
+ }
337
+
338
+ def prune_tool_outputs(
339
+ self, messages: List[Dict[str, Any]]
340
+ ) -> Tuple[List[Dict[str, Any]], int]:
341
+ """Prune old tool outputs to reclaim token space.
342
+
343
+ Walks backward through messages and prunes tool outputs beyond
344
+ the PRUNE_PROTECT threshold (keeps last 40k tokens of tool outputs).
345
+
346
+ Args:
347
+ messages: Current message history
348
+
349
+ Returns:
350
+ Tuple of (pruned_messages, tokens_saved)
351
+ """
352
+ # Calculate tokens to protect (recent tool outputs)
353
+ recent_tokens = 0
354
+ prune_candidates = []
355
+
356
+ # Walk backward through messages
357
+ for i in range(len(messages) - 1, -1, -1):
358
+ msg = messages[i]
359
+
360
+ # Only consider tool result messages
361
+ if msg.get("role") != "tool":
362
+ continue
363
+
364
+ # Estimate tokens in tool output
365
+ tokens = self.estimator.estimate_message_tokens(msg)
366
+
367
+ if recent_tokens < self.PRUNE_PROTECT:
368
+ # Still within protected range
369
+ recent_tokens += tokens
370
+ else:
371
+ # Candidate for pruning
372
+ prune_candidates.append((i, tokens, msg))
373
+
374
+ # Check if we can save enough tokens
375
+ prunable_tokens = sum(t for _, t, _ in prune_candidates)
376
+ if prunable_tokens < self.PRUNE_MINIMUM:
377
+ # Not worth pruning
378
+ return messages, 0
379
+
380
+ # Prune by replacing content with marker
381
+ pruned_messages = []
382
+ tokens_saved = 0
383
+
384
+ for i, msg in enumerate(messages):
385
+ if any(idx == i for idx, _, _ in prune_candidates):
386
+ # Replace with pruned marker
387
+ pruned_msg = msg.copy()
388
+ original_content = pruned_msg.get("content", "")
389
+ original_len = len(str(original_content))
390
+ pruned_msg["content"] = f"[Tool output pruned - was {original_len:,} chars]"
391
+ pruned_messages.append(pruned_msg)
392
+ tokens_saved += self.estimator.estimate_tokens(str(original_content))
393
+ else:
394
+ pruned_messages.append(msg)
395
+
396
+ return pruned_messages, tokens_saved
397
+
398
+ def create_compaction(
399
+ self, messages: List[Dict[str, Any]], completion_func: Callable
400
+ ) -> Tuple[Dict[str, Any], str]:
401
+ """Create a compaction summary using the LLM.
402
+
403
+ Args:
404
+ messages: Current message history
405
+ completion_func: Function to call LLM (from agent)
406
+
407
+ Returns:
408
+ Tuple of (summary_message, summary_text)
409
+
410
+ Raises:
411
+ Exception: If LLM call fails
412
+ """
413
+ # Build compaction request
414
+ compact_messages = messages + [{"role": "user", "content": self.COMPACTION_PROMPT}]
415
+
416
+ # Call LLM to generate summary
417
+ response = completion_func(compact_messages)
418
+ summary_text = response.choices[0].message.content
419
+
420
+ # Create summary message
421
+ summary_message = {
422
+ "role": "assistant",
423
+ "content": f"[COMPACTION SUMMARY]\n\n{summary_text}",
424
+ "metadata": {
425
+ "is_compaction": True,
426
+ "original_message_count": len(messages),
427
+ "timestamp": datetime.now().isoformat(),
428
+ },
429
+ }
430
+
431
+ return summary_message, summary_text
@@ -0,0 +1,225 @@
1
+ """Permission management for PatchPal tool execution."""
2
+
3
+ import json
4
+ import os
5
+ from functools import wraps
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+
10
+ class PermissionManager:
11
+ """Manages user permissions for tool execution."""
12
+
13
+ def __init__(self, repo_dir: Path):
14
+ """Initialize permission manager.
15
+
16
+ Args:
17
+ repo_dir: Path to the repository-specific patchpal directory
18
+ """
19
+ self.repo_dir = repo_dir
20
+ self.permissions_file = repo_dir / "permissions.json"
21
+ self.session_grants = {} # In-memory grants for this session
22
+ self.persistent_grants = self._load_persistent_grants()
23
+
24
+ # Check if permissions are globally disabled
25
+ # Using streaming mode in CLI allows permissions to work properly
26
+ self.enabled = os.getenv("PATCHPAL_REQUIRE_PERMISSION", "true").lower() == "true"
27
+
28
+ def _load_persistent_grants(self) -> dict:
29
+ """Load persistent permission grants from file."""
30
+ if self.permissions_file.exists():
31
+ try:
32
+ with open(self.permissions_file, "r") as f:
33
+ return json.load(f)
34
+ except (json.JSONDecodeError, IOError):
35
+ return {}
36
+ return {}
37
+
38
+ def _save_persistent_grants(self):
39
+ """Save persistent permission grants to file."""
40
+ try:
41
+ with open(self.permissions_file, "w") as f:
42
+ json.dump(self.persistent_grants, f, indent=2)
43
+ except IOError as e:
44
+ print(f"Warning: Could not save permissions: {e}")
45
+
46
+ def _check_existing_grant(self, tool_name: str, pattern: Optional[str] = None) -> bool:
47
+ """Check if permission was previously granted.
48
+
49
+ Args:
50
+ tool_name: Name of the tool (e.g., 'run_shell', 'apply_patch')
51
+ pattern: Optional pattern for matching (e.g., 'pytest' for pytest commands)
52
+
53
+ Returns:
54
+ True if permission was previously granted
55
+ """
56
+ # Check session grants first
57
+ if tool_name in self.session_grants:
58
+ if self.session_grants[tool_name] is True: # Granted for all
59
+ return True
60
+ if pattern and isinstance(self.session_grants[tool_name], list):
61
+ if pattern in self.session_grants[tool_name]:
62
+ return True
63
+
64
+ # Check persistent grants
65
+ if tool_name in self.persistent_grants:
66
+ if self.persistent_grants[tool_name] is True: # Granted for all
67
+ return True
68
+ if pattern and isinstance(self.persistent_grants[tool_name], list):
69
+ if pattern in self.persistent_grants[tool_name]:
70
+ return True
71
+
72
+ return False
73
+
74
+ def _grant_permission(
75
+ self, tool_name: str, persistent: bool = False, pattern: Optional[str] = None
76
+ ):
77
+ """Grant permission for a tool.
78
+
79
+ Args:
80
+ tool_name: Name of the tool
81
+ persistent: If True, save to disk for future sessions
82
+ pattern: Optional pattern to grant (e.g., 'pytest' for pytest commands)
83
+ """
84
+ if persistent:
85
+ if pattern:
86
+ if tool_name not in self.persistent_grants:
87
+ self.persistent_grants[tool_name] = []
88
+ if isinstance(self.persistent_grants[tool_name], list):
89
+ if pattern not in self.persistent_grants[tool_name]:
90
+ self.persistent_grants[tool_name].append(pattern)
91
+ else:
92
+ # Already granted for all, no need to add pattern
93
+ pass
94
+ else:
95
+ self.persistent_grants[tool_name] = True
96
+ self._save_persistent_grants()
97
+ else:
98
+ if pattern:
99
+ if tool_name not in self.session_grants:
100
+ self.session_grants[tool_name] = []
101
+ if isinstance(self.session_grants[tool_name], list):
102
+ if pattern not in self.session_grants[tool_name]:
103
+ self.session_grants[tool_name].append(pattern)
104
+ else:
105
+ self.session_grants[tool_name] = True
106
+
107
+ def request_permission(
108
+ self, tool_name: str, description: str, pattern: Optional[str] = None
109
+ ) -> bool:
110
+ """Request permission from user to execute a tool.
111
+
112
+ Args:
113
+ tool_name: Name of the tool (e.g., 'run_shell', 'apply_patch')
114
+ description: Human-readable description of what will be executed
115
+ pattern: Optional pattern for matching (e.g., 'pytest' for pytest commands)
116
+
117
+ Returns:
118
+ True if permission granted, False otherwise
119
+ """
120
+ # If permissions are disabled globally, always grant
121
+ if not self.enabled:
122
+ return True
123
+
124
+ # Check if already granted
125
+ if self._check_existing_grant(tool_name, pattern):
126
+ return True
127
+
128
+ # Display the request - use stderr to avoid Rich console capture
129
+ import sys
130
+
131
+ sys.stderr.write("\n" + "=" * 80 + "\n")
132
+ sys.stderr.write(f"\033[1;33m{tool_name.replace('_', ' ').title()}\033[0m\n")
133
+ sys.stderr.write("-" * 80 + "\n")
134
+ sys.stderr.write(description + "\n")
135
+ sys.stderr.write("-" * 80 + "\n")
136
+
137
+ # Get user input
138
+ sys.stderr.write("\nDo you want to proceed?\n")
139
+ sys.stderr.write(" 1. Yes\n")
140
+ if pattern:
141
+ sys.stderr.write(f" 2. Yes, and don't ask again this session for '{pattern}'\n")
142
+ else:
143
+ sys.stderr.write(f" 2. Yes, and don't ask again this session for {tool_name}\n")
144
+ sys.stderr.write(" 3. No, and tell me what to do differently\n")
145
+ sys.stderr.flush()
146
+
147
+ while True:
148
+ try:
149
+ # Use input() with prompt parameter to avoid terminal issues
150
+ # The prompt parameter ensures the prompt stays visible during editing
151
+ choice = input("\n\033[1;36mChoice [1-3]:\033[0m ").strip()
152
+
153
+ if choice == "1":
154
+ return True
155
+ elif choice == "2":
156
+ # Grant session-only permission (like Claude Code)
157
+ self._grant_permission(tool_name, persistent=False, pattern=pattern)
158
+ return True
159
+ elif choice == "3":
160
+ sys.stderr.write("\n\033[1;31mOperation cancelled.\033[0m\n")
161
+ sys.stderr.flush()
162
+ return False
163
+ else:
164
+ sys.stderr.write("Invalid choice. Please enter 1, 2, or 3.\n")
165
+ sys.stderr.flush()
166
+ except (EOFError, KeyboardInterrupt):
167
+ sys.stderr.write("\n\033[1;31mOperation cancelled.\033[0m\n")
168
+ sys.stderr.flush()
169
+ return False
170
+
171
+
172
+ def require_permission(tool_name: str, get_description, get_pattern=None):
173
+ """Decorator to require user permission before executing a tool.
174
+
175
+ Args:
176
+ tool_name: Name of the tool
177
+ get_description: Function that takes tool args and returns a description string
178
+ get_pattern: Optional function that takes tool args and returns a pattern string
179
+
180
+ Example:
181
+ @require_permission('run_shell',
182
+ get_description=lambda cmd: f" {cmd}",
183
+ get_pattern=lambda cmd: cmd.split()[0] if cmd else None)
184
+ def run_shell(command: str):
185
+ ...
186
+ """
187
+
188
+ def decorator(func):
189
+ @wraps(func)
190
+ def wrapper(*args, **kwargs):
191
+ # Get the permission manager from environment/global state
192
+ # Import here to avoid circular dependency
193
+ from pathlib import Path
194
+
195
+ try:
196
+ # Get patchpal directory (same logic as in tools.py and cli.py)
197
+ repo_root = Path(".").resolve()
198
+ home = Path.home()
199
+ patchpal_root = home / ".patchpal"
200
+ repo_name = repo_root.name
201
+ repo_dir = patchpal_root / repo_name
202
+ repo_dir.mkdir(parents=True, exist_ok=True)
203
+
204
+ manager = PermissionManager(repo_dir)
205
+
206
+ # Get description and pattern
207
+ # First arg is usually 'self', but for @tool decorated functions it's the actual arg
208
+ tool_args = args
209
+ description = get_description(*tool_args, **kwargs)
210
+ pattern = get_pattern(*tool_args, **kwargs) if get_pattern else None
211
+
212
+ # Request permission
213
+ if not manager.request_permission(tool_name, description, pattern):
214
+ return "Operation cancelled by user."
215
+
216
+ except Exception as e:
217
+ # If permission check fails, print warning but continue
218
+ print(f"Warning: Permission check failed: {e}")
219
+
220
+ # Execute the tool
221
+ return func(*args, **kwargs)
222
+
223
+ return wrapper
224
+
225
+ return decorator