patchpal 0.6.0__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
patchpal/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  """PatchPal - An open-source Claude Code clone implemented purely in Python."""
2
2
 
3
- __version__ = "0.6.0"
3
+ __version__ = "0.7.1"
4
4
 
5
5
  from patchpal.agent import create_agent
6
6
  from patchpal.tools import (
patchpal/agent.py CHANGED
@@ -867,6 +867,10 @@ class PatchPalAgent:
867
867
  self.cumulative_input_tokens = 0
868
868
  self.cumulative_output_tokens = 0
869
869
 
870
+ # Track cache-related tokens (for Anthropic/Bedrock models with prompt caching)
871
+ self.cumulative_cache_creation_tokens = 0
872
+ self.cumulative_cache_read_tokens = 0
873
+
870
874
  # LiteLLM settings for models that need parameter dropping
871
875
  self.litellm_kwargs = {}
872
876
  if self.model_id.startswith("bedrock/"):
@@ -878,6 +882,26 @@ class PatchPalAgent:
878
882
  # Custom OpenAI-compatible servers (vLLM, etc.) often don't support all parameters
879
883
  self.litellm_kwargs["drop_params"] = True
880
884
 
885
+ def _prune_tool_outputs_inline(self, max_chars: int, truncation_message: str) -> int:
886
+ """Unified pruning function for tool outputs.
887
+
888
+ Args:
889
+ max_chars: Maximum characters to keep per tool output
890
+ truncation_message: Message to append after truncation
891
+
892
+ Returns:
893
+ Number of characters pruned
894
+ """
895
+ pruned_chars = 0
896
+ for msg in self.messages:
897
+ if msg.get("role") == "tool" and msg.get("content"):
898
+ content_size = len(str(msg["content"]))
899
+ if content_size > max_chars:
900
+ original_size = content_size
901
+ msg["content"] = str(msg["content"])[:max_chars] + truncation_message
902
+ pruned_chars += original_size - len(msg["content"])
903
+ return pruned_chars
904
+
881
905
  def _perform_auto_compaction(self):
882
906
  """Perform automatic context window compaction.
883
907
 
@@ -886,10 +910,32 @@ class PatchPalAgent:
886
910
  """
887
911
  # Don't compact if we have very few messages - compaction summary
888
912
  # could be longer than the messages being removed
889
- if len(self.messages) < 5:
913
+ # Instead, use aggressive pruning since high capacity with few messages
914
+ # indicates large tool outputs rather than conversation depth
915
+ if len(self.messages) < 10:
890
916
  print(
891
- f"\033[2m Skipping compaction - only {len(self.messages)} messages (need at least 5 for effective compaction)\033[0m"
917
+ f"\033[2m Only {len(self.messages)} messages - using aggressive pruning instead of summarization\033[0m"
892
918
  )
919
+
920
+ # Aggressively truncate all large tool outputs (5K chars)
921
+ pruned_chars = self._prune_tool_outputs_inline(
922
+ max_chars=5_000,
923
+ truncation_message="\n\n[... content truncated during compaction. Use read_lines or grep_code for targeted access ...]",
924
+ )
925
+
926
+ stats_after = self.context_manager.get_usage_stats(self.messages)
927
+ if pruned_chars > 0:
928
+ print(
929
+ f"\033[1;32m✓ Context reduced to {stats_after['usage_percent']}% through aggressive pruning (removed ~{pruned_chars:,} chars)\033[0m\n"
930
+ )
931
+ else:
932
+ print(
933
+ f"\033[1;33m⚠️ No large tool outputs to prune. Context at {stats_after['usage_percent']}%.\033[0m"
934
+ )
935
+ print("\033[1;33m Consider using '/clear' to start fresh.\033[0m\n")
936
+
937
+ # Update tracker to prevent immediate re-compaction
938
+ self._last_compaction_message_count = len(self.messages)
893
939
  return
894
940
 
895
941
  # Prevent compaction loops - don't compact again if we just did
@@ -936,6 +982,43 @@ class PatchPalAgent:
936
982
  return
937
983
 
938
984
  # Phase 2: Full compaction needed
985
+ # EMERGENCY: If context is at or over capacity (≥100%), do aggressive pruning first
986
+ # Otherwise the summarization request itself will exceed context limits
987
+ stats_after_prune = self.context_manager.get_usage_stats(self.messages)
988
+ if stats_after_prune["usage_ratio"] >= 1.0:
989
+ print(
990
+ f"\033[1;31m ⚠️ Context at or over capacity ({stats_after_prune['usage_percent']}%)!\033[0m"
991
+ )
992
+ print(
993
+ "\033[2m Emergency: Aggressively pruning recent large tool outputs...\033[0m",
994
+ flush=True,
995
+ )
996
+
997
+ # Truncate large tool outputs (10K chars - less aggressive than 5K for few-messages case)
998
+ emergency_pruned = self._prune_tool_outputs_inline(
999
+ max_chars=10_000,
1000
+ truncation_message="\n\n[... content truncated due to context window limits ...]",
1001
+ )
1002
+
1003
+ if emergency_pruned > 0:
1004
+ print(
1005
+ f"\033[2m Emergency pruned ~{emergency_pruned:,} chars from large tool outputs\033[0m",
1006
+ flush=True,
1007
+ )
1008
+ stats_after_emergency = self.context_manager.get_usage_stats(self.messages)
1009
+ print(
1010
+ f"\033[2m Context now at {stats_after_emergency['usage_percent']}% capacity\033[0m",
1011
+ flush=True,
1012
+ )
1013
+
1014
+ # If still over 150%, give up and recommend /clear
1015
+ if stats_after_emergency["usage_ratio"] > 1.5:
1016
+ print(
1017
+ f"\033[1;31m✗ Context still too large for compaction ({stats_after_emergency['usage_percent']}%)\033[0m"
1018
+ )
1019
+ print("\033[1;33m Please use '/clear' to start a fresh session.\033[0m\n")
1020
+ return
1021
+
939
1022
  print("\033[2m Generating conversation summary...\033[0m", flush=True)
940
1023
 
941
1024
  try:
@@ -958,6 +1041,19 @@ class PatchPalAgent:
958
1041
  self.cumulative_input_tokens += response.usage.prompt_tokens
959
1042
  if hasattr(response.usage, "completion_tokens"):
960
1043
  self.cumulative_output_tokens += response.usage.completion_tokens
1044
+ # Track cache statistics (Anthropic/Bedrock prompt caching)
1045
+ if (
1046
+ hasattr(response.usage, "cache_creation_input_tokens")
1047
+ and response.usage.cache_creation_input_tokens
1048
+ ):
1049
+ self.cumulative_cache_creation_tokens += (
1050
+ response.usage.cache_creation_input_tokens
1051
+ )
1052
+ if (
1053
+ hasattr(response.usage, "cache_read_input_tokens")
1054
+ and response.usage.cache_read_input_tokens
1055
+ ):
1056
+ self.cumulative_cache_read_tokens += response.usage.cache_read_input_tokens
961
1057
 
962
1058
  return response
963
1059
 
@@ -1131,6 +1227,19 @@ class PatchPalAgent:
1131
1227
  self.cumulative_input_tokens += response.usage.prompt_tokens
1132
1228
  if hasattr(response.usage, "completion_tokens"):
1133
1229
  self.cumulative_output_tokens += response.usage.completion_tokens
1230
+ # Track cache statistics (Anthropic/Bedrock prompt caching)
1231
+ if (
1232
+ hasattr(response.usage, "cache_creation_input_tokens")
1233
+ and response.usage.cache_creation_input_tokens
1234
+ ):
1235
+ self.cumulative_cache_creation_tokens += (
1236
+ response.usage.cache_creation_input_tokens
1237
+ )
1238
+ if (
1239
+ hasattr(response.usage, "cache_read_input_tokens")
1240
+ and response.usage.cache_read_input_tokens
1241
+ ):
1242
+ self.cumulative_cache_read_tokens += response.usage.cache_read_input_tokens
1134
1243
 
1135
1244
  except Exception as e:
1136
1245
  return f"Error calling model: {e}"
@@ -1337,12 +1446,40 @@ class PatchPalAgent:
1337
1446
  print(f"\033[1;31m✗ {tool_name}: {e}\033[0m")
1338
1447
 
1339
1448
  # Add tool result to messages
1449
+ # Check if result is extremely large and might blow context
1450
+ result_str = str(tool_result)
1451
+ result_size = len(result_str)
1452
+
1453
+ # Warn if result is > 100K chars (~33K tokens)
1454
+ if result_size > 100_000:
1455
+ print(
1456
+ f"\033[1;33m⚠️ Large tool output: {result_size:,} chars (~{result_size // 3:,} tokens)\033[0m"
1457
+ )
1458
+
1459
+ # If result would push us WAY over capacity, truncate it
1460
+ current_stats = self.context_manager.get_usage_stats(self.messages)
1461
+ # Estimate tokens in this result
1462
+ result_tokens = self.context_manager.estimator.estimate_tokens(result_str)
1463
+ projected_ratio = (
1464
+ current_stats["total_tokens"] + result_tokens
1465
+ ) / current_stats["context_limit"]
1466
+
1467
+ if projected_ratio > 1.5: # Would exceed 150% capacity
1468
+ print(
1469
+ "\033[1;31m⚠️ Tool output would exceed context capacity! Truncating...\033[0m"
1470
+ )
1471
+ # Keep first 50K chars
1472
+ result_str = (
1473
+ result_str[:50_000]
1474
+ + "\n\n[... Output truncated to prevent context window overflow. Use read_lines or grep_code for targeted access ...]"
1475
+ )
1476
+
1340
1477
  self.messages.append(
1341
1478
  {
1342
1479
  "role": "tool",
1343
1480
  "tool_call_id": tool_call.id,
1344
1481
  "name": tool_name,
1345
- "content": str(tool_result),
1482
+ "content": result_str,
1346
1483
  }
1347
1484
  )
1348
1485
 
patchpal/cli.py CHANGED
@@ -401,6 +401,55 @@ Supported models: Any LiteLLM-supported model
401
401
  total_tokens = agent.cumulative_input_tokens + agent.cumulative_output_tokens
402
402
  print(f" Total tokens: {total_tokens:,}")
403
403
 
404
+ # Show cache statistics if available (Anthropic/Bedrock prompt caching)
405
+ has_cache_stats = (
406
+ agent.cumulative_cache_creation_tokens > 0
407
+ or agent.cumulative_cache_read_tokens > 0
408
+ )
409
+ if has_cache_stats:
410
+ print("\n \033[1;36mPrompt Caching Statistics\033[0m")
411
+ print(f" Cache write tokens: {agent.cumulative_cache_creation_tokens:,}")
412
+ print(f" Cache read tokens: {agent.cumulative_cache_read_tokens:,}")
413
+
414
+ # Calculate cache hit rate
415
+ if agent.cumulative_input_tokens > 0:
416
+ cache_hit_rate = (
417
+ agent.cumulative_cache_read_tokens / agent.cumulative_input_tokens
418
+ ) * 100
419
+ print(f" Cache hit rate: {cache_hit_rate:.1f}%")
420
+
421
+ # Show cost-adjusted input tokens (cache reads cost less)
422
+ # Note: This is an approximation - actual pricing varies by model
423
+ # For Anthropic: cache writes = 1.25x, cache reads = 0.1x, regular = 1x
424
+ if "anthropic" in model_id.lower() or "claude" in model_id.lower():
425
+ # Break down: cumulative_input = non_cached + cache_read + cache_write
426
+ non_cached_tokens = (
427
+ agent.cumulative_input_tokens
428
+ - agent.cumulative_cache_read_tokens
429
+ - agent.cumulative_cache_creation_tokens
430
+ )
431
+ # Approximate cost-equivalent tokens (cache reads cost 10%, cache writes cost 125%)
432
+ cost_adjusted = (
433
+ non_cached_tokens
434
+ + (agent.cumulative_cache_read_tokens * 0.1)
435
+ + (agent.cumulative_cache_creation_tokens * 1.25)
436
+ )
437
+ savings_pct = (
438
+ (
439
+ (agent.cumulative_input_tokens - cost_adjusted)
440
+ / agent.cumulative_input_tokens
441
+ * 100
442
+ )
443
+ if agent.cumulative_input_tokens > 0
444
+ else 0
445
+ )
446
+ print(
447
+ f" Cost-adjusted input tokens: {cost_adjusted:,.0f} (~{savings_pct:.0f}% savings)"
448
+ )
449
+ print(
450
+ " \033[2m(Cache reads cost 10% of base price, writes cost 125% of base price)\033[0m"
451
+ )
452
+
404
453
  print("=" * 70 + "\n")
405
454
  continue
406
455
 
patchpal/tools.py CHANGED
@@ -80,7 +80,10 @@ CRITICAL_FILES = {
80
80
  }
81
81
 
82
82
  # Configuration
83
- MAX_FILE_SIZE = int(os.getenv("PATCHPAL_MAX_FILE_SIZE", 10 * 1024 * 1024)) # 10MB default
83
+ # Reduced from 10MB to 500KB to prevent context window explosions
84
+ # A 3.46MB file = ~1.15M tokens which exceeds most model context limits (128K-200K)
85
+ # 500KB ≈ 166K tokens which is safe for most models
86
+ MAX_FILE_SIZE = int(os.getenv("PATCHPAL_MAX_FILE_SIZE", 500 * 1024)) # 500KB default
84
87
  READ_ONLY_MODE = os.getenv("PATCHPAL_READ_ONLY", "false").lower() == "true"
85
88
  ALLOW_SENSITIVE = os.getenv("PATCHPAL_ALLOW_SENSITIVE", "false").lower() == "true"
86
89
  ENABLE_AUDIT_LOG = os.getenv("PATCHPAL_AUDIT_LOG", "true").lower() == "true"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: patchpal
3
- Version: 0.6.0
3
+ Version: 0.7.1
4
4
  Summary: A lean Claude Code clone in pure Python
5
5
  Author: PatchPal Contributors
6
6
  License-Expression: Apache-2.0
@@ -129,7 +129,7 @@ export HOSTED_VLLM_API_KEY=token-abc123 # optional depending on your v
129
129
  patchpal
130
130
 
131
131
  # Use a specific model via command-line argument
132
- patchpal --model openai/gpt-4o # or openai/gpt-5, anthropic/claude-opus-4-5 etc.
132
+ patchpal --model openai/gpt-5.2 # or openai/gpt-5-mini, anthropic/claude-opus-4-5 etc.
133
133
 
134
134
  # Use vLLM (local)
135
135
  # Note: vLLM server must be started with --tool-call-parser and --enable-auto-tool-choice
@@ -143,7 +143,7 @@ export OLLAMA_CONTEXT_LENGTH=32768
143
143
  patchpal --model ollama_chat/qwen3:32b
144
144
 
145
145
  # Or set the model via environment variable
146
- export PATCHPAL_MODEL=openai/gpt-5
146
+ export PATCHPAL_MODEL=openai/gpt-5.2
147
147
  patchpal
148
148
  ```
149
149
 
@@ -155,6 +155,8 @@ The agent has the following tools:
155
155
 
156
156
  ### File Operations
157
157
  - **read_file**: Read contents of files in the repository
158
+ - Limited to 500KB by default (configurable with `PATCHPAL_MAX_FILE_SIZE`)
159
+ - For larger files, use `read_lines` or `grep_code` for targeted access
158
160
  - **read_lines**: Read specific line ranges from a file without loading the entire file
159
161
  - Example: `read_lines("app.py", 100, 150)` - read lines 100-150
160
162
  - More efficient than read_file when you only need a few lines
@@ -281,7 +283,7 @@ cd patchpal
281
283
  # Copy examples to your personal skills directory
282
284
  cp -r examples/skills/commit ~/.patchpal/skills/
283
285
  cp -r examples/skills/review ~/.patchpal/skills/
284
- cp -r examples/skills/add-tests ~/.patchpal/skills/
286
+ cp -r examples/skills/skill-creator ~/.patchpal/skills/
285
287
  ```
286
288
 
287
289
  **View examples online:**
@@ -368,18 +370,28 @@ Custom tools are Python functions with specific requirements:
368
370
  ```python
369
371
  # ~/.patchpal/tools/my_tools.py
370
372
 
371
- def add(x: int, y: int) -> str:
372
- """Add two numbers together.
373
+ def calculator(x: int, y: int, operation: str = "add") -> str:
374
+ """Perform basic arithmetic operations.
373
375
 
374
376
  Args:
375
377
  x: First number
376
378
  y: Second number
379
+ operation: Operation to perform (add, subtract, multiply, divide)
377
380
 
378
381
  Returns:
379
- The sum as a string
382
+ Result as a string
380
383
  """
381
- result = x + y
382
- return f"{x} + {y} = {result}"
384
+ if operation == "add":
385
+ return f"{x} + {y} = {x + y}"
386
+ elif operation == "subtract":
387
+ return f"{x} - {y} = {x - y}"
388
+ elif operation == "multiply":
389
+ return f"{x} * {y} = {x * y}"
390
+ elif operation == "divide":
391
+ if y == 0:
392
+ return "Error: Cannot divide by zero"
393
+ return f"{x} / {y} = {x / y}"
394
+ return "Unknown operation"
383
395
 
384
396
 
385
397
  def convert_currency(amount: float, from_currency: str, to_currency: str) -> str:
@@ -407,11 +419,15 @@ Once loaded, the agent calls your custom tools automatically:
407
419
 
408
420
  ```bash
409
421
  You: What's 15 + 27?
410
- Agent: [Calls the add tool]
422
+ Agent: [Calls calculator(15, 27, "add")]
411
423
  15 + 27 = 42
412
424
 
425
+ You: What's 100 divided by 4?
426
+ Agent: [Calls calculator(100, 4, "divide")]
427
+ 100 / 4 = 25
428
+
413
429
  You: Convert 100 USD to EUR
414
- Agent: [Calls convert_currency tool]
430
+ Agent: [Calls convert_currency(100, "USD", "EUR")]
415
431
  100 USD = 85.00 EUR
416
432
  ```
417
433
 
@@ -514,14 +530,14 @@ PatchPal supports any LiteLLM-compatible model. You can configure the model in t
514
530
 
515
531
  ### 1. Command-line Argument
516
532
  ```bash
517
- patchpal --model openai/gpt-5
533
+ patchpal --model openai/gpt-5.2
518
534
  patchpal --model anthropic/claude-sonnet-4-5
519
535
  patchpal --model hosted_vllm/openai/gpt-oss-20b # local model - no API charges
520
536
  ```
521
537
 
522
538
  ### 2. Environment Variable
523
539
  ```bash
524
- export PATCHPAL_MODEL=openai/gpt-5
540
+ export PATCHPAL_MODEL=openai/gpt-5.2
525
541
  patchpal
526
542
  ```
527
543
 
@@ -533,7 +549,7 @@ If no model is specified, PatchPal uses `anthropic/claude-sonnet-4-5` (Claude So
533
549
  PatchPal works with any model supported by LiteLLM, including:
534
550
 
535
551
  - **Anthropic** (Recommended): `anthropic/claude-sonnet-4-5`, `anthropic/claude-opus-4-5`, `anthropic/claude-3-7-sonnet-latest`
536
- - **OpenAI**: `openai/gpt-5`, `openai/gpt-4o`
552
+ - **OpenAI**: `openai/gpt-5.2`, `openai/gpt-5-mini`
537
553
  - **AWS Bedrock**: `bedrock/anthropic.claude-sonnet-4-5-v1:0`
538
554
  - **vLLM (Local)** (Recommended for local): See vLLM section below for setup
539
555
  - **Ollama (Local)**: See Ollama section below for setup
@@ -1033,7 +1049,7 @@ PatchPal can be configured through `PATCHPAL_*` environment variables to customi
1033
1049
  ### Model Selection
1034
1050
 
1035
1051
  ```bash
1036
- export PATCHPAL_MODEL=openai/gpt-4o # Override default model
1052
+ export PATCHPAL_MODEL=openai/gpt-5.2 # Override default model
1037
1053
  # Priority: CLI arg > PATCHPAL_MODEL env var > default (anthropic/claude-sonnet-4-5)
1038
1054
  ```
1039
1055
 
@@ -1045,11 +1061,12 @@ export PATCHPAL_REQUIRE_PERMISSION=true # Prompt before executing commands/
1045
1061
  # ⚠️ WARNING: Setting to false disables prompts - only use in trusted environments
1046
1062
 
1047
1063
  # File Safety
1048
- export PATCHPAL_MAX_FILE_SIZE=10485760 # Maximum file size in bytes for read/write (default: 10MB)
1064
+ export PATCHPAL_MAX_FILE_SIZE=512000 # Maximum file size in bytes for read/write (default: 500KB)
1065
+ # Reduced from 10MB to prevent context window explosions
1049
1066
  export PATCHPAL_READ_ONLY=true # Prevent ALL file modifications (default: false)
1050
- # Useful for: code review, exploration, security audits
1067
+ # Useful for: code review, exploration, security audits
1051
1068
  export PATCHPAL_ALLOW_SENSITIVE=true # Allow access to .env, credentials (default: false - blocked)
1052
- # Only enable with test/dummy credentials
1069
+ # Only enable with test/dummy credentials
1053
1070
 
1054
1071
  # Command Safety
1055
1072
  export PATCHPAL_ALLOW_SUDO=true # Allow sudo/privilege escalation (default: false - blocked)
@@ -1191,7 +1208,7 @@ PatchPal includes comprehensive security protections enabled by default:
1191
1208
  **Critical Security:**
1192
1209
  - **Permission prompts**: Agent asks for permission before executing commands or modifying files (like Claude Code)
1193
1210
  - **Sensitive file protection**: Blocks access to `.env`, credentials, API keys
1194
- - **File size limits**: Prevents OOM with configurable size limits (10MB default)
1211
+ - **File size limits**: Prevents OOM and context explosions with configurable size limits (500KB default)
1195
1212
  - **Binary file detection**: Blocks reading non-text files
1196
1213
  - **Critical file warnings**: Warns when modifying infrastructure files (package.json, Dockerfile, etc.)
1197
1214
  - **Read-only mode**: Optional mode that prevents all modifications
@@ -1453,7 +1470,7 @@ When using cloud LLM providers (Anthropic, OpenAI, etc.), token usage directly i
1453
1470
  - Use less expensive models for routine tasks:
1454
1471
  ```bash
1455
1472
  patchpal --model anthropic/claude-3-7-sonnet-latest # Cheaper than claude-sonnet-4-5
1456
- patchpal --model openai/gpt-4o-mini # Cheaper than gpt-4o
1473
+ patchpal --model openai/gpt-5-mini # Cheaper than gpt-5.2
1457
1474
  ```
1458
1475
  - Reserve premium models for complex reasoning tasks
1459
1476
 
@@ -0,0 +1,15 @@
1
+ patchpal/__init__.py,sha256=4rzkvbbI76tGtYc-FRxKp_3NVJ9XF1eClqPickmGQDE,606
2
+ patchpal/agent.py,sha256=oNw7QzIBmFE4XAN_3n1vcsx-wyHZBKcnwX_D9K62KHQ,66789
3
+ patchpal/cli.py,sha256=wzZtAHuTDXFsGWrUuvDh0vf7IKOmAk2nP_drUb-3vQE,27056
4
+ patchpal/context.py,sha256=hdTUvyAXXUP47JY1Q3YJDU7noGAcHuBGlNuU272Fjp4,14831
5
+ patchpal/permissions.py,sha256=pVlzit2KFmCpfcbHrHhjPA0LPka04wOtaQdZCf3CCa0,10781
6
+ patchpal/skills.py,sha256=ESLPHkDI8DH4mnAbN8mIcbZ6Bis4vCcqS_NjlYPNCOs,3926
7
+ patchpal/system_prompt.md,sha256=LQzcILr41s65hk7JjaX_WzjUHBHCazVSrx_F_ErqTmA,10850
8
+ patchpal/tool_schema.py,sha256=dGEGYV160G9c7EnSMtnbQ_mYuoR1n6PHHE8T20BriYE,8357
9
+ patchpal/tools.py,sha256=eZ5eh8DKYyqO95Vdu-tn1_6-W6OsBbY4JL5APGyp-tc,94018
10
+ patchpal-0.7.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
11
+ patchpal-0.7.1.dist-info/METADATA,sha256=LUA1x8Rp2HQ_YZkOa64UD4LjvjPvV3ne52m7wigzxCE,58247
12
+ patchpal-0.7.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
13
+ patchpal-0.7.1.dist-info/entry_points.txt,sha256=XcuQikKu5i8Sd8AfHLuKxSE2RWByInTcQgWpP61sr48,47
14
+ patchpal-0.7.1.dist-info/top_level.txt,sha256=YWgv2F-_PIHCu-sF3AF8N1ut5_FbOT-VV6HB70pGSQ8,9
15
+ patchpal-0.7.1.dist-info/RECORD,,
@@ -1,15 +0,0 @@
1
- patchpal/__init__.py,sha256=S3dYO3L8dSQG2Eaosbu4Pbdq5eTxXLmmvxSzh-TIPiI,606
2
- patchpal/agent.py,sha256=ayMkZUoohUsf5Tz4esBjOPZUvBT5n-ijOzoOp3c9LAA,59719
3
- patchpal/cli.py,sha256=6Imrd4hGupIrTi9jnnfwvraNZ_Pq0VJxfo6aSjLRoCY,24131
4
- patchpal/context.py,sha256=hdTUvyAXXUP47JY1Q3YJDU7noGAcHuBGlNuU272Fjp4,14831
5
- patchpal/permissions.py,sha256=pVlzit2KFmCpfcbHrHhjPA0LPka04wOtaQdZCf3CCa0,10781
6
- patchpal/skills.py,sha256=ESLPHkDI8DH4mnAbN8mIcbZ6Bis4vCcqS_NjlYPNCOs,3926
7
- patchpal/system_prompt.md,sha256=LQzcILr41s65hk7JjaX_WzjUHBHCazVSrx_F_ErqTmA,10850
8
- patchpal/tool_schema.py,sha256=dGEGYV160G9c7EnSMtnbQ_mYuoR1n6PHHE8T20BriYE,8357
9
- patchpal/tools.py,sha256=YAUX2-8BBqjZEadIWlUdO-KV2-WHGazgKdMHkYRAExI,93819
10
- patchpal-0.6.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
11
- patchpal-0.6.0.dist-info/METADATA,sha256=hjleiaXTNaavuW0OygY1XPdbuflYxMQb0hAWw9pGWPw,57384
12
- patchpal-0.6.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
13
- patchpal-0.6.0.dist-info/entry_points.txt,sha256=XcuQikKu5i8Sd8AfHLuKxSE2RWByInTcQgWpP61sr48,47
14
- patchpal-0.6.0.dist-info/top_level.txt,sha256=YWgv2F-_PIHCu-sF3AF8N1ut5_FbOT-VV6HB70pGSQ8,9
15
- patchpal-0.6.0.dist-info/RECORD,,