kopipasta 0.26.0__py3-none-any.whl → 0.28.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kopipasta might be problematic. Click here for more details.
- kopipasta/import_parser.py +0 -2
- kopipasta/main.py +226 -427
- kopipasta-0.28.0.dist-info/METADATA +109 -0
- kopipasta-0.28.0.dist-info/RECORD +9 -0
- kopipasta-0.26.0.dist-info/METADATA +0 -171
- kopipasta-0.26.0.dist-info/RECORD +0 -9
- {kopipasta-0.26.0.dist-info → kopipasta-0.28.0.dist-info}/LICENSE +0 -0
- {kopipasta-0.26.0.dist-info → kopipasta-0.28.0.dist-info}/WHEEL +0 -0
- {kopipasta-0.26.0.dist-info → kopipasta-0.28.0.dist-info}/entry_points.txt +0 -0
- {kopipasta-0.26.0.dist-info → kopipasta-0.28.0.dist-info}/top_level.txt +0 -0
kopipasta/import_parser.py
CHANGED
kopipasta/main.py
CHANGED
|
@@ -1,10 +1,8 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
import csv
|
|
3
3
|
import io
|
|
4
|
-
import json
|
|
5
4
|
import os
|
|
6
5
|
import argparse
|
|
7
|
-
import sys
|
|
8
6
|
import re
|
|
9
7
|
import subprocess
|
|
10
8
|
import tempfile
|
|
@@ -18,102 +16,112 @@ import pygments.util
|
|
|
18
16
|
|
|
19
17
|
import requests
|
|
20
18
|
|
|
21
|
-
|
|
22
|
-
import traceback
|
|
23
|
-
from google import genai
|
|
24
|
-
from google.genai.types import GenerateContentConfig
|
|
25
|
-
from prompt_toolkit import prompt # Added for multiline input
|
|
19
|
+
import kopipasta.import_parser as import_parser
|
|
26
20
|
|
|
27
21
|
FileTuple = Tuple[str, bool, Optional[List[str]], str]
|
|
28
22
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class SimplePatchArgs(BaseModel):
|
|
37
|
-
"""A list of proposed code changes."""
|
|
38
|
-
patches: List[SimplePatchItem] = Field(..., description="A list of patches to apply.")
|
|
39
|
-
|
|
40
|
-
def apply_simple_patch(patch_item: SimplePatchItem) -> bool:
|
|
23
|
+
def _propose_and_add_dependencies(
|
|
24
|
+
file_just_added: str,
|
|
25
|
+
project_root_abs: str,
|
|
26
|
+
files_to_include: List[FileTuple],
|
|
27
|
+
current_char_count: int
|
|
28
|
+
) -> Tuple[List[FileTuple], int]:
|
|
41
29
|
"""
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
Validates that the file exists and the original_text is unique.
|
|
30
|
+
Analyzes a file for local dependencies and interactively asks the user to add them.
|
|
45
31
|
"""
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
original_text = patch_item.original_text
|
|
52
|
-
new_text = patch_item.new_text
|
|
53
|
-
|
|
54
|
-
# --- Validation ---
|
|
55
|
-
if not os.path.exists(file_path):
|
|
56
|
-
print(f"❌ Error: File not found: {file_path}")
|
|
57
|
-
print("-" * 20)
|
|
58
|
-
return False
|
|
32
|
+
language = get_language_for_file(file_just_added)
|
|
33
|
+
if language not in ['python', 'typescript', 'javascript', 'tsx', 'jsx']:
|
|
34
|
+
return [], 0 # Only analyze languages we can parse
|
|
35
|
+
|
|
36
|
+
print(f"Analyzing {get_relative_path(file_just_added)} for local dependencies...")
|
|
59
37
|
|
|
60
38
|
try:
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
39
|
+
file_content = read_file_contents(file_just_added)
|
|
40
|
+
if not file_content:
|
|
41
|
+
return [], 0
|
|
42
|
+
|
|
43
|
+
resolved_deps_abs: Set[str] = set()
|
|
44
|
+
if language == 'python':
|
|
45
|
+
resolved_deps_abs = import_parser.parse_python_imports(file_content, file_just_added, project_root_abs)
|
|
46
|
+
elif language in ['typescript', 'javascript', 'tsx', 'jsx']:
|
|
47
|
+
resolved_deps_abs = import_parser.parse_typescript_imports(file_content, file_just_added, project_root_abs)
|
|
48
|
+
|
|
49
|
+
# Filter out dependencies that are already in the context
|
|
50
|
+
included_paths = {os.path.abspath(f[0]) for f in files_to_include}
|
|
51
|
+
suggested_deps = sorted([
|
|
52
|
+
dep for dep in resolved_deps_abs
|
|
53
|
+
if os.path.abspath(dep) not in included_paths and os.path.abspath(dep) != os.path.abspath(file_just_added)
|
|
54
|
+
])
|
|
55
|
+
|
|
56
|
+
if not suggested_deps:
|
|
57
|
+
print("No new local dependencies found.")
|
|
58
|
+
return [], 0
|
|
59
|
+
|
|
60
|
+
print(f"\nFound {len(suggested_deps)} new local {'dependency' if len(suggested_deps) == 1 else 'dependencies'}:")
|
|
61
|
+
for i, dep_path in enumerate(suggested_deps):
|
|
62
|
+
print(f" ({i+1}) {get_relative_path(dep_path)}")
|
|
63
|
+
|
|
64
|
+
while True:
|
|
65
|
+
choice = input("\nAdd dependencies? (a)ll, (n)one, or enter numbers (e.g. 1, 3-4): ").lower()
|
|
66
|
+
|
|
67
|
+
deps_to_add_paths = None
|
|
68
|
+
if choice == 'a':
|
|
69
|
+
deps_to_add_paths = suggested_deps
|
|
70
|
+
break
|
|
71
|
+
if choice == 'n':
|
|
72
|
+
deps_to_add_paths = []
|
|
73
|
+
print(f"Skipped {len(suggested_deps)} dependencies.")
|
|
74
|
+
break
|
|
75
|
+
|
|
76
|
+
# Try to parse the input as numbers directly.
|
|
77
|
+
try:
|
|
78
|
+
selected_indices = set()
|
|
79
|
+
parts = choice.replace(' ', '').split(',')
|
|
80
|
+
if all(p.strip() for p in parts): # Ensure no empty parts like in "1,"
|
|
81
|
+
for part in parts:
|
|
82
|
+
if '-' in part:
|
|
83
|
+
start_str, end_str = part.split('-', 1)
|
|
84
|
+
start = int(start_str)
|
|
85
|
+
end = int(end_str)
|
|
86
|
+
if start > end:
|
|
87
|
+
start, end = end, start
|
|
88
|
+
selected_indices.update(range(start - 1, end))
|
|
89
|
+
else:
|
|
90
|
+
selected_indices.add(int(part) - 1)
|
|
91
|
+
|
|
92
|
+
# Validate that all selected numbers are within the valid range
|
|
93
|
+
if all(0 <= i < len(suggested_deps) for i in selected_indices):
|
|
94
|
+
deps_to_add_paths = [
|
|
95
|
+
suggested_deps[i] for i in sorted(list(selected_indices))
|
|
96
|
+
]
|
|
97
|
+
break # Success! Exit the loop.
|
|
98
|
+
else:
|
|
99
|
+
print(f"Error: Invalid number selection. Please choose numbers between 1 and {len(suggested_deps)}.")
|
|
100
|
+
else:
|
|
101
|
+
raise ValueError("Empty part detected in input.")
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
except ValueError:
|
|
105
|
+
# This will catch any input that isn't 'a', 'n', or a valid number/range.
|
|
106
|
+
print("Invalid choice. Please enter 'a', 'n', or a list/range of numbers (e.g., '1,3' or '2-4').")
|
|
107
|
+
|
|
108
|
+
if not deps_to_add_paths:
|
|
109
|
+
return [], 0 # No dependencies were selected
|
|
110
|
+
|
|
111
|
+
newly_added_files: List[FileTuple] = []
|
|
112
|
+
char_count_delta = 0
|
|
113
|
+
for dep_path in deps_to_add_paths:
|
|
114
|
+
# Assume non-large for now for simplicity, can be enhanced later
|
|
115
|
+
file_size = os.path.getsize(dep_path)
|
|
116
|
+
newly_added_files.append((dep_path, False, None, get_language_for_file(dep_path)))
|
|
117
|
+
char_count_delta += file_size
|
|
118
|
+
print(f"Added dependency: {get_relative_path(dep_path)} ({get_human_readable_size(file_size)})")
|
|
119
|
+
|
|
120
|
+
return newly_added_files, char_count_delta
|
|
79
121
|
|
|
80
|
-
# --- Application ---
|
|
81
|
-
# Replace the single unique occurrence
|
|
82
|
-
new_content = content.replace(original_text, new_text, 1)
|
|
83
|
-
|
|
84
|
-
# Heuristic to check if a newline might be needed at the end
|
|
85
|
-
original_ends_with_newline = content.endswith(('\n', '\r'))
|
|
86
|
-
new_ends_with_newline = new_content.endswith(('\n', '\r'))
|
|
87
|
-
|
|
88
|
-
if original_ends_with_newline and not new_ends_with_newline and new_content:
|
|
89
|
-
# Try to determine the original newline type
|
|
90
|
-
if content.endswith('\r\n'):
|
|
91
|
-
new_content += '\r\n'
|
|
92
|
-
else: # Assume '\n' otherwise
|
|
93
|
-
new_content += '\n'
|
|
94
|
-
elif not original_ends_with_newline and new_ends_with_newline:
|
|
95
|
-
# If original didn't end with newline, remove the one added by replacement
|
|
96
|
-
# This is less common but possible if new_text ends with \n and original_text didn't
|
|
97
|
-
new_content = new_content.rstrip('\r\n')
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
# Write the modified content back
|
|
101
|
-
with open(file_path, 'w', encoding='utf-8', newline='') as f:
|
|
102
|
-
f.write(new_content)
|
|
103
|
-
|
|
104
|
-
print(f"✅ Patch applied successfully to {file_path}.")
|
|
105
|
-
print("-" * 20)
|
|
106
|
-
return True
|
|
107
|
-
|
|
108
|
-
except IOError as e:
|
|
109
|
-
print(f"❌ Error reading or writing file {file_path}: {e}")
|
|
110
|
-
print("-" * 20)
|
|
111
|
-
return False
|
|
112
122
|
except Exception as e:
|
|
113
|
-
print(f"
|
|
114
|
-
|
|
115
|
-
print("-" * 20)
|
|
116
|
-
return False
|
|
123
|
+
print(f"Warning: Could not analyze dependencies for {get_relative_path(file_just_added)}: {e}")
|
|
124
|
+
return [], 0
|
|
117
125
|
|
|
118
126
|
def get_colored_code(file_path, code):
|
|
119
127
|
try:
|
|
@@ -540,7 +548,7 @@ def print_char_count(count):
|
|
|
540
548
|
token_estimate = count // 4
|
|
541
549
|
print(f"\rCurrent prompt size: {count} characters (~ {token_estimate} tokens)", flush=True)
|
|
542
550
|
|
|
543
|
-
def select_files_in_directory(directory: str, ignore_patterns: List[str], current_char_count: int = 0) -> Tuple[List[FileTuple], int]:
|
|
551
|
+
def select_files_in_directory(directory: str, ignore_patterns: List[str], project_root_abs: str, current_char_count: int = 0) -> Tuple[List[FileTuple], int]:
|
|
544
552
|
files = [f for f in os.listdir(directory)
|
|
545
553
|
if os.path.isfile(os.path.join(directory, f)) and not is_ignored(os.path.join(directory, f), ignore_patterns) and not is_binary(os.path.join(directory, f))]
|
|
546
554
|
|
|
@@ -561,7 +569,9 @@ def select_files_in_directory(directory: str, ignore_patterns: List[str], curren
|
|
|
561
569
|
print_char_count(current_char_count)
|
|
562
570
|
choice = input("(y)es add all / (n)o ignore all / (s)elect individually / (q)uit? ").lower()
|
|
563
571
|
selected_files: List[FileTuple] = []
|
|
572
|
+
char_count_delta = 0
|
|
564
573
|
if choice == 'y':
|
|
574
|
+
files_to_add_after_loop = []
|
|
565
575
|
for file in files:
|
|
566
576
|
file_path = os.path.join(directory, file)
|
|
567
577
|
if is_large_file(file_path):
|
|
@@ -571,14 +581,23 @@ def select_files_in_directory(directory: str, ignore_patterns: List[str], curren
|
|
|
571
581
|
break
|
|
572
582
|
print("Invalid choice. Please enter 'f' or 's'.")
|
|
573
583
|
if snippet_choice == 's':
|
|
574
|
-
selected_files.append((
|
|
575
|
-
|
|
584
|
+
selected_files.append((file_path, True, None, get_language_for_file(file_path)))
|
|
585
|
+
char_count_delta += len(get_file_snippet(file_path))
|
|
576
586
|
else:
|
|
577
|
-
selected_files.append((
|
|
578
|
-
|
|
587
|
+
selected_files.append((file_path, False, None, get_language_for_file(file_path)))
|
|
588
|
+
char_count_delta += os.path.getsize(file_path)
|
|
579
589
|
else:
|
|
580
|
-
selected_files.append((
|
|
581
|
-
|
|
590
|
+
selected_files.append((file_path, False, None, get_language_for_file(file_path)))
|
|
591
|
+
char_count_delta += os.path.getsize(file_path)
|
|
592
|
+
files_to_add_after_loop.append(file_path)
|
|
593
|
+
|
|
594
|
+
# Analyze dependencies after the loop
|
|
595
|
+
current_char_count += char_count_delta
|
|
596
|
+
for file_path in files_to_add_after_loop:
|
|
597
|
+
new_deps, deps_char_count = _propose_and_add_dependencies(file_path, project_root_abs, selected_files, current_char_count)
|
|
598
|
+
selected_files.extend(new_deps)
|
|
599
|
+
current_char_count += deps_char_count
|
|
600
|
+
|
|
582
601
|
print(f"Added all files from {directory}")
|
|
583
602
|
return selected_files, current_char_count
|
|
584
603
|
elif choice == 'n':
|
|
@@ -596,6 +615,7 @@ def select_files_in_directory(directory: str, ignore_patterns: List[str], curren
|
|
|
596
615
|
print_char_count(current_char_count)
|
|
597
616
|
file_choice = input(f"{file} ({file_size_readable}, ~{file_char_estimate} chars, ~{file_token_estimate} tokens) (y/n/p/q)? ").lower()
|
|
598
617
|
if file_choice == 'y':
|
|
618
|
+
file_to_add = None
|
|
599
619
|
if is_large_file(file_path):
|
|
600
620
|
while True:
|
|
601
621
|
snippet_choice = input(f"{file} is large. Use (f)ull content or (s)nippet? ").lower()
|
|
@@ -603,14 +623,21 @@ def select_files_in_directory(directory: str, ignore_patterns: List[str], curren
|
|
|
603
623
|
break
|
|
604
624
|
print("Invalid choice. Please enter 'f' or 's'.")
|
|
605
625
|
if snippet_choice == 's':
|
|
606
|
-
|
|
626
|
+
file_to_add = (file_path, True, None, get_language_for_file(file_path))
|
|
607
627
|
current_char_count += len(get_file_snippet(file_path))
|
|
608
628
|
else:
|
|
609
|
-
|
|
629
|
+
file_to_add = (file_path, False, None, get_language_for_file(file_path))
|
|
610
630
|
current_char_count += file_char_estimate
|
|
611
631
|
else:
|
|
612
|
-
|
|
632
|
+
file_to_add = (file_path, False, None, get_language_for_file(file_path))
|
|
613
633
|
current_char_count += file_char_estimate
|
|
634
|
+
|
|
635
|
+
if file_to_add:
|
|
636
|
+
selected_files.append(file_to_add)
|
|
637
|
+
# Analyze dependencies immediately after adding
|
|
638
|
+
new_deps, deps_char_count = _propose_and_add_dependencies(file_path, project_root_abs, selected_files, current_char_count)
|
|
639
|
+
selected_files.extend(new_deps)
|
|
640
|
+
current_char_count += deps_char_count
|
|
614
641
|
break
|
|
615
642
|
elif file_choice == 'n':
|
|
616
643
|
break
|
|
@@ -633,7 +660,7 @@ def select_files_in_directory(directory: str, ignore_patterns: List[str], curren
|
|
|
633
660
|
else:
|
|
634
661
|
print("Invalid choice. Please try again.")
|
|
635
662
|
|
|
636
|
-
def process_directory(directory: str, ignore_patterns: List[str], current_char_count: int = 0) -> Tuple[List[FileTuple], Set[str], int]:
|
|
663
|
+
def process_directory(directory: str, ignore_patterns: List[str], project_root_abs: str, current_char_count: int = 0) -> Tuple[List[FileTuple], Set[str], int]:
|
|
637
664
|
files_to_include: List[FileTuple] = []
|
|
638
665
|
processed_dirs: Set[str] = set()
|
|
639
666
|
|
|
@@ -647,10 +674,10 @@ def process_directory(directory: str, ignore_patterns: List[str], current_char_c
|
|
|
647
674
|
print(f"\nExploring directory: {root}")
|
|
648
675
|
choice = input("(y)es explore / (n)o skip / (q)uit? ").lower()
|
|
649
676
|
if choice == 'y':
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
677
|
+
# Pass project_root_abs down
|
|
678
|
+
selected_files, current_char_count = select_files_in_directory(root, ignore_patterns, project_root_abs, current_char_count)
|
|
679
|
+
# The paths in selected_files are already absolute now
|
|
680
|
+
files_to_include.extend(selected_files)
|
|
654
681
|
processed_dirs.add(root)
|
|
655
682
|
elif choice == 'n':
|
|
656
683
|
dirs[:] = [] # Skip all subdirectories
|
|
@@ -763,47 +790,40 @@ def generate_prompt_template(files_to_include: List[FileTuple], ignore_patterns:
|
|
|
763
790
|
prompt += "\n\n"
|
|
764
791
|
prompt += "## Instructions for Achieving the Task\n\n"
|
|
765
792
|
analysis_text = (
|
|
766
|
-
"###
|
|
767
|
-
"
|
|
768
|
-
"
|
|
769
|
-
"
|
|
770
|
-
"
|
|
771
|
-
"
|
|
772
|
-
"**
|
|
773
|
-
"
|
|
774
|
-
"
|
|
775
|
-
"**
|
|
776
|
-
"-
|
|
777
|
-
"
|
|
778
|
-
"-
|
|
779
|
-
"
|
|
780
|
-
"
|
|
781
|
-
"2. **Plan
|
|
782
|
-
"
|
|
783
|
-
"
|
|
784
|
-
"
|
|
785
|
-
"
|
|
786
|
-
"
|
|
787
|
-
"
|
|
788
|
-
"
|
|
789
|
-
"
|
|
790
|
-
"
|
|
791
|
-
"
|
|
792
|
-
"
|
|
793
|
-
"
|
|
794
|
-
" -
|
|
795
|
-
"
|
|
796
|
-
"
|
|
797
|
-
"
|
|
798
|
-
"
|
|
799
|
-
"
|
|
800
|
-
"### You Have Permission To\n"
|
|
801
|
-
"- Request any file shown in tree but not provided\n"
|
|
802
|
-
"- Ask me to run code and share outputs\n"
|
|
803
|
-
"- Test external dependencies: APIs, databases, services, integration points\n"
|
|
804
|
-
"- Request specific diagnostic information\n"
|
|
805
|
-
"- Suggest pausing when blocked\n"
|
|
806
|
-
"- Ask me to verify assumptions about external systems\n"
|
|
793
|
+
"### Partnership Principles\n\n"
|
|
794
|
+
"We work as collaborative partners. You provide technical expertise and critical thinking. "
|
|
795
|
+
"I have exclusive access to my codebase, real environment, external services, and actual users. "
|
|
796
|
+
"Never assume project file contents - always ask to see them.\n\n"
|
|
797
|
+
"**Critical Thinking**: Challenge poor approaches, identify risks, suggest better alternatives. Don't be a yes-man.\n\n"
|
|
798
|
+
"**Anti-Hallucination**: Never write placeholder code for files in ## Project Structure. Use [STOP - NEED FILE: filename] and wait.\n\n"
|
|
799
|
+
"**Hard Stops**: End with [AWAITING USER RESPONSE] when you need input. Don't continue with assumptions.\n\n"
|
|
800
|
+
"### Development Workflow\n\n"
|
|
801
|
+
"We work in two modes:\n"
|
|
802
|
+
"- **Iterative Mode**: Build incrementally, show only changes\n"
|
|
803
|
+
"- **Consolidation Mode**: When I request, provide clean final version\n\n"
|
|
804
|
+
"1. **Understand & Analyze**:\n"
|
|
805
|
+
" - Rephrase task, identify issues, list needed files\n"
|
|
806
|
+
" - Challenge problematic aspects\n"
|
|
807
|
+
" - End: 'I need: [files]. Is this correct?' [AWAITING USER RESPONSE]\n\n"
|
|
808
|
+
"2. **Plan**:\n"
|
|
809
|
+
" - Present 2-3 approaches with pros/cons\n"
|
|
810
|
+
" - Recommend best approach\n"
|
|
811
|
+
" - End: 'Which approach?' [AWAITING USER RESPONSE]\n\n"
|
|
812
|
+
"3. **Implement Iteratively**:\n"
|
|
813
|
+
" - Small, testable increments\n"
|
|
814
|
+
" - Track failed attempts: `Attempt 1: [FAILED] X→Y (learned: Z)`\n"
|
|
815
|
+
" - After 3 failures, request diagnostics\n\n"
|
|
816
|
+
"4. **Code Presentation**:\n"
|
|
817
|
+
" - Always: `// FILE: path/to/file.ext`\n"
|
|
818
|
+
" - Iterative: Show only changes with context\n"
|
|
819
|
+
" - Consolidation: Smart choice - minimal changes = show patches, extensive = full file\n\n"
|
|
820
|
+
"5. **Test & Validate**:\n"
|
|
821
|
+
" - 'Test with: [command]. Share any errors.' [AWAITING USER RESPONSE]\n"
|
|
822
|
+
" - Include debug outputs\n"
|
|
823
|
+
" - May return to implementation based on results\n\n"
|
|
824
|
+
"### Permissions & Restrictions\n\n"
|
|
825
|
+
"**You MAY**: Request project files, ask me to test code/services, challenge my approach, refuse without info\n\n"
|
|
826
|
+
"**You MUST NOT**: Assume project file contents, continue past [AWAITING USER RESPONSE], be agreeable when you see problems\n"
|
|
807
827
|
)
|
|
808
828
|
prompt += analysis_text
|
|
809
829
|
return prompt, cursor_position
|
|
@@ -834,222 +854,16 @@ def open_editor_for_input(template: str, cursor_position: int) -> str:
|
|
|
834
854
|
finally:
|
|
835
855
|
os.unlink(temp_file_path)
|
|
836
856
|
|
|
837
|
-
def start_chat_session(initial_prompt: str):
|
|
838
|
-
"""Starts an interactive chat session with the Gemini API using google-genai."""
|
|
839
|
-
if not genai:
|
|
840
|
-
# Error message already printed during import if it failed
|
|
841
|
-
sys.exit(1)
|
|
842
|
-
|
|
843
|
-
# The google-genai library automatically uses GOOGLE_API_KEY env var if set
|
|
844
|
-
# We still check if it's set to provide a clearer error message upfront
|
|
845
|
-
if not os.environ.get('GOOGLE_API_KEY'):
|
|
846
|
-
print("Error: GOOGLE_API_KEY environment variable not set.")
|
|
847
|
-
print("Please set the GOOGLE_API_KEY environment variable with your API key.")
|
|
848
|
-
sys.exit(1)
|
|
849
|
-
|
|
850
|
-
try:
|
|
851
|
-
# Create the client - it will use the env var automatically
|
|
852
|
-
client = genai.Client()
|
|
853
|
-
print("Google GenAI Client created (using GOOGLE_API_KEY).")
|
|
854
|
-
# You could add a check here like listing models to verify the key early
|
|
855
|
-
# print("Available models:", [m.name for m in client.models.list()])
|
|
856
|
-
except Exception as e:
|
|
857
|
-
print(f"Error creating Google GenAI client: {e}")
|
|
858
|
-
print("Please ensure your GOOGLE_API_KEY is valid and has permissions.")
|
|
859
|
-
sys.exit(1)
|
|
860
|
-
|
|
861
|
-
model_name = 'gemini-2.5-pro-exp-03-25'
|
|
862
|
-
config = GenerateContentConfig(temperature=0.0)
|
|
863
|
-
print(f"Using model: {model_name}")
|
|
864
|
-
|
|
865
|
-
try:
|
|
866
|
-
# Create a chat session using the client
|
|
867
|
-
chat = client.chats.create(model=model_name, config=config)
|
|
868
|
-
# Note: History is managed by the chat object itself
|
|
869
|
-
|
|
870
|
-
print("\n--- Starting Interactive Chat with Gemini ---")
|
|
871
|
-
print("Type /q to quit, /help or /? for help, /review to make clear summary, /patch to request a diff patch.")
|
|
872
|
-
|
|
873
|
-
# Send the initial prompt using send_message_stream
|
|
874
|
-
print("\n🤖 Gemini:")
|
|
875
|
-
full_response_text = ""
|
|
876
|
-
# Use send_message_stream for streaming responses
|
|
877
|
-
response_stream = chat.send_message_stream(initial_prompt, config=config)
|
|
878
|
-
for chunk in response_stream:
|
|
879
|
-
print(chunk.text, end="", flush=True)
|
|
880
|
-
full_response_text += chunk.text
|
|
881
|
-
print("\n" + "-"*20)
|
|
882
|
-
|
|
883
|
-
while True:
|
|
884
|
-
is_patch_request = False
|
|
885
|
-
try:
|
|
886
|
-
# Print the header on a separate line
|
|
887
|
-
print("👤 You (Submit with Esc+Enter):")
|
|
888
|
-
# Get input using prompt_toolkit with a minimal indicator
|
|
889
|
-
user_input = prompt(">> ", multiline=True)
|
|
890
|
-
# prompt_toolkit raises EOFError on Ctrl+D, so this handler remains correct.
|
|
891
|
-
except EOFError:
|
|
892
|
-
print("\nExiting...")
|
|
893
|
-
break
|
|
894
|
-
except KeyboardInterrupt: # Handle Ctrl+C
|
|
895
|
-
print("\nExiting...")
|
|
896
|
-
break
|
|
897
|
-
|
|
898
|
-
if user_input.lower() == '/q':
|
|
899
|
-
break
|
|
900
|
-
elif user_input.endswith('/patch'):
|
|
901
|
-
is_patch_request = True
|
|
902
|
-
# Extract message before /patch
|
|
903
|
-
user_message = user_input[:-len('/patch')].strip()
|
|
904
|
-
print(f"\n🛠️ Requesting patches... (Context: '{user_message}' if provided)")
|
|
905
|
-
elif user_input.lower() == '/review':
|
|
906
|
-
user_message = user_input = "Review and reflect on the solution. Summarize and write a minimal, complete set of changes needed for the solution. Do not use + and - style diff. Instead use comments to point where to place the code. Make it easy to copy and paste the solution."
|
|
907
|
-
elif not user_input:
|
|
908
|
-
continue # Ignore empty input
|
|
909
|
-
else:
|
|
910
|
-
user_message = user_input # Regular message
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
# --- Handle Patch Request ---
|
|
914
|
-
if is_patch_request:
|
|
915
|
-
print("🤖 Gemini: Thinking... (generating code changes)")
|
|
916
|
-
# Include user message part if it exists
|
|
917
|
-
patch_context = f"Based on our conversation and specifically: \"{user_message}\"\n\n" if user_message else "Based on our conversation,\n\n"
|
|
918
|
-
|
|
919
|
-
patch_request_prompt = (
|
|
920
|
-
patch_context +
|
|
921
|
-
"Generate the necessary code changes to fulfill the request. Provide the changes as a JSON list, where each item "
|
|
922
|
-
"is an object with the following keys:\n"
|
|
923
|
-
"- 'reasoning': Explain why this specific change is needed.\n"
|
|
924
|
-
"- 'file_path': The relative path to the file to modify.\n"
|
|
925
|
-
"- 'original_text': The exact, unique block of text to replace.\n"
|
|
926
|
-
"- 'new_text': The text to replace original_text with. Do not include any temporary comments like '// CHANGE BEGINS' or '/* PATCH START */'.\n"
|
|
927
|
-
"Ensure 'original_text' is unique within the specified 'file_path'. "
|
|
928
|
-
"Respond ONLY with the JSON object conforming to this structure: { \"patches\": [ { patch_item_1 }, { patch_item_2 }, ... ] }"
|
|
929
|
-
)
|
|
930
|
-
|
|
931
|
-
try:
|
|
932
|
-
# Request the response using the new schema
|
|
933
|
-
response = chat.send_message(
|
|
934
|
-
patch_request_prompt,
|
|
935
|
-
config=GenerateContentConfig(
|
|
936
|
-
response_schema=SimplePatchArgs.model_json_schema(),
|
|
937
|
-
response_mime_type='application/json',
|
|
938
|
-
temperature=0.0
|
|
939
|
-
)
|
|
940
|
-
)
|
|
941
|
-
|
|
942
|
-
print("🤖 Gemini: Received potential patches.")
|
|
943
|
-
try:
|
|
944
|
-
# Validate and parse args using the Pydantic model
|
|
945
|
-
# Explicitly validate the dictionary returned by response.parsed
|
|
946
|
-
if isinstance(response.parsed, dict):
|
|
947
|
-
patch_args = SimplePatchArgs.model_validate(response.parsed)
|
|
948
|
-
else:
|
|
949
|
-
# Handle unexpected type if response.parsed isn't a dict
|
|
950
|
-
print(f"❌ Error: Expected a dictionary for patches, but got type {type(response.parsed)}")
|
|
951
|
-
print(f" Content: {response.parsed}")
|
|
952
|
-
continue # Skip further processing for this response
|
|
953
|
-
|
|
954
|
-
if not patch_args or not patch_args.patches:
|
|
955
|
-
print("🤖 Gemini: No patches were proposed in the response.")
|
|
956
|
-
print("-" * 20)
|
|
957
|
-
continue
|
|
958
|
-
|
|
959
|
-
print("\nProposed Patches:")
|
|
960
|
-
print("=" * 30)
|
|
961
|
-
for i, patch_item in enumerate(patch_args.patches):
|
|
962
|
-
print(f"Patch {i+1}/{len(patch_args.patches)}:")
|
|
963
|
-
print(f" File: {patch_item.file_path}")
|
|
964
|
-
print(f" Reasoning: {patch_item.reasoning}")
|
|
965
|
-
# Optionally show snippets of original/new text for review
|
|
966
|
-
print(f" Original (snippet): '{patch_item.original_text[:80].strip()}...'")
|
|
967
|
-
print(f" New (snippet): '{patch_item.new_text[:80].strip()}...'")
|
|
968
|
-
print("-" * 20)
|
|
969
|
-
|
|
970
|
-
confirm = input(f"Apply these {len(patch_args.patches)} patches? (y/N): ").lower()
|
|
971
|
-
if confirm == 'y':
|
|
972
|
-
applied_count = 0
|
|
973
|
-
failed_count = 0
|
|
974
|
-
for patch_item in patch_args.patches:
|
|
975
|
-
# Call the new apply function for each patch
|
|
976
|
-
success = apply_simple_patch(patch_item)
|
|
977
|
-
if success:
|
|
978
|
-
applied_count += 1
|
|
979
|
-
else:
|
|
980
|
-
failed_count += 1
|
|
981
|
-
|
|
982
|
-
print("\nPatch Application Summary:")
|
|
983
|
-
if applied_count > 0:
|
|
984
|
-
print(f"✅ Successfully applied {applied_count} patches.")
|
|
985
|
-
if failed_count > 0:
|
|
986
|
-
print(f"❌ Failed to apply {failed_count} patches.")
|
|
987
|
-
if applied_count == 0 and failed_count == 0: # Should not happen if list wasn't empty
|
|
988
|
-
print("⚪ No patches were applied.")
|
|
989
|
-
print("=" * 30)
|
|
990
|
-
else:
|
|
991
|
-
print("🤖 Gemini: Patches not applied by user.")
|
|
992
|
-
print("-" * 20)
|
|
993
|
-
|
|
994
|
-
except Exception as e: # Catch Pydantic validation errors or other issues
|
|
995
|
-
print(f"❌ Error processing patch response: {e}")
|
|
996
|
-
# Attempt to show the raw response text if parsing failed
|
|
997
|
-
raw_text = ""
|
|
998
|
-
try:
|
|
999
|
-
if response.parts:
|
|
1000
|
-
raw_text = "".join(part.text for part in response.parts if hasattr(part, 'text'))
|
|
1001
|
-
elif hasattr(response, 'text'):
|
|
1002
|
-
raw_text = response.text
|
|
1003
|
-
except Exception:
|
|
1004
|
-
pass # Ignore errors getting raw text
|
|
1005
|
-
if raw_text:
|
|
1006
|
-
print(f" Received response text:\n{raw_text}")
|
|
1007
|
-
else:
|
|
1008
|
-
print(f" Received response content: {response}") # Fallback representation
|
|
1009
|
-
|
|
1010
|
-
except Exception as e:
|
|
1011
|
-
print(f"\n❌ An error occurred while requesting patches from Gemini: {e}")
|
|
1012
|
-
print(" Please check your connection, API key, and model permissions/capabilities.")
|
|
1013
|
-
print("-" * 20)
|
|
1014
|
-
|
|
1015
|
-
continue # Go to next loop iteration after handling /patch
|
|
1016
|
-
elif user_input.strip() in ['/help', '/?']:
|
|
1017
|
-
print("🤖 Gemini: Available commands:")
|
|
1018
|
-
print(" /q - Quit the chat session.")
|
|
1019
|
-
print(" /patch - Request a diff patch (not fully implemented yet).")
|
|
1020
|
-
print(" /review - Pre-fill input with a review/summary prompt template.")
|
|
1021
|
-
print(" /help or /? - Show this help message.")
|
|
1022
|
-
print("-" * 20)
|
|
1023
|
-
continue
|
|
1024
|
-
elif not user_input.strip(): # Ignore empty input
|
|
1025
|
-
continue
|
|
1026
|
-
|
|
1027
|
-
print("\n🤖 Gemini:")
|
|
1028
|
-
full_response_text = ""
|
|
1029
|
-
try:
|
|
1030
|
-
# Use send_message_stream for subsequent messages
|
|
1031
|
-
response_stream = chat.send_message_stream(user_input, config=config)
|
|
1032
|
-
for chunk in response_stream:
|
|
1033
|
-
print(chunk.text, end="", flush=True)
|
|
1034
|
-
full_response_text += chunk.text
|
|
1035
|
-
print("\n" + "-"*20)
|
|
1036
|
-
except Exception as e:
|
|
1037
|
-
print(f"\nAn unexpected error occurred: {e}")
|
|
1038
|
-
print("Try again or type 'exit'.")
|
|
1039
|
-
|
|
1040
|
-
except Exception as e:
|
|
1041
|
-
# Catch other potential errors
|
|
1042
|
-
print(f"\nAn error occurred setting up the chat session: {e}")
|
|
1043
|
-
|
|
1044
857
|
def main():
|
|
1045
858
|
parser = argparse.ArgumentParser(description="Generate a prompt with project structure, file contents, and web content.")
|
|
1046
859
|
parser.add_argument('inputs', nargs='+', help='Files, directories, or URLs to include in the prompt')
|
|
1047
860
|
parser.add_argument('-t', '--task', help='Task description for the AI prompt')
|
|
1048
|
-
parser.add_argument('-I', '--interactive', action='store_true', help='Start an interactive chat session after generating the prompt.')
|
|
1049
861
|
args = parser.parse_args()
|
|
1050
862
|
|
|
1051
863
|
ignore_patterns = read_gitignore()
|
|
1052
864
|
env_vars = read_env_file()
|
|
865
|
+
project_root_abs = os.path.abspath(os.getcwd())
|
|
866
|
+
|
|
1053
867
|
|
|
1054
868
|
files_to_include: List[FileTuple] = []
|
|
1055
869
|
processed_dirs = set()
|
|
@@ -1092,53 +906,59 @@ def main():
|
|
|
1092
906
|
print(f"Added {'snippet of ' if is_snippet else ''}web content from: {input_path}")
|
|
1093
907
|
print_char_count(current_char_count)
|
|
1094
908
|
elif os.path.isfile(input_path):
|
|
1095
|
-
|
|
1096
|
-
if not is_ignored(
|
|
1097
|
-
file_size = os.path.getsize(
|
|
909
|
+
abs_input_path = os.path.abspath(input_path)
|
|
910
|
+
if not is_ignored(abs_input_path, ignore_patterns) and not is_binary(abs_input_path):
|
|
911
|
+
file_size = os.path.getsize(abs_input_path)
|
|
1098
912
|
file_size_readable = get_human_readable_size(file_size)
|
|
1099
913
|
file_char_estimate = file_size
|
|
1100
|
-
language = get_language_for_file(
|
|
914
|
+
language = get_language_for_file(abs_input_path)
|
|
915
|
+
file_to_add = None
|
|
1101
916
|
|
|
1102
|
-
if is_large_file(
|
|
1103
|
-
print(f"\nFile {
|
|
917
|
+
if is_large_file(abs_input_path):
|
|
918
|
+
print(f"\nFile {get_relative_path(abs_input_path)} ({file_size_readable}, ~{file_char_estimate} chars) is large.")
|
|
1104
919
|
print("Preview (first ~50 lines or 4KB):")
|
|
1105
|
-
print(get_colored_file_snippet(
|
|
920
|
+
print(get_colored_file_snippet(abs_input_path))
|
|
1106
921
|
print("-" * 40)
|
|
1107
922
|
while True:
|
|
1108
923
|
print_char_count(current_char_count)
|
|
1109
|
-
choice = input(f"How to include large file {
|
|
924
|
+
choice = input(f"How to include large file {get_relative_path(abs_input_path)}? (f)ull / (s)nippet / (p)atches / (n)o skip: ").lower()
|
|
1110
925
|
if choice == 'f':
|
|
1111
|
-
|
|
926
|
+
file_to_add = (abs_input_path, False, None, language)
|
|
1112
927
|
current_char_count += file_char_estimate
|
|
1113
|
-
print(f"Added full file: {
|
|
928
|
+
print(f"Added full file: {get_relative_path(abs_input_path)}")
|
|
1114
929
|
break
|
|
1115
930
|
elif choice == 's':
|
|
1116
|
-
snippet_content = get_file_snippet(
|
|
1117
|
-
|
|
931
|
+
snippet_content = get_file_snippet(abs_input_path)
|
|
932
|
+
file_to_add = (abs_input_path, True, None, language)
|
|
1118
933
|
current_char_count += len(snippet_content)
|
|
1119
|
-
print(f"Added snippet of file: {
|
|
934
|
+
print(f"Added snippet of file: {get_relative_path(abs_input_path)}")
|
|
1120
935
|
break
|
|
1121
936
|
elif choice == 'p':
|
|
1122
|
-
chunks, char_count = select_file_patches(
|
|
937
|
+
chunks, char_count = select_file_patches(abs_input_path)
|
|
1123
938
|
if chunks:
|
|
1124
|
-
|
|
939
|
+
file_to_add = (abs_input_path, False, chunks, language)
|
|
1125
940
|
current_char_count += char_count
|
|
1126
|
-
print(f"Added selected patches from file: {
|
|
941
|
+
print(f"Added selected patches from file: {get_relative_path(abs_input_path)}")
|
|
1127
942
|
else:
|
|
1128
|
-
print(f"No patches selected for {
|
|
943
|
+
print(f"No patches selected for {get_relative_path(abs_input_path)}. Skipping file.")
|
|
1129
944
|
break
|
|
1130
945
|
elif choice == 'n':
|
|
1131
|
-
print(f"Skipped large file: {
|
|
946
|
+
print(f"Skipped large file: {get_relative_path(abs_input_path)}")
|
|
1132
947
|
break
|
|
1133
948
|
else:
|
|
1134
949
|
print("Invalid choice. Please enter 'f', 's', 'p', or 'n'.")
|
|
1135
950
|
else:
|
|
1136
|
-
|
|
1137
|
-
files_to_include.append((input_path, False, None, language))
|
|
951
|
+
file_to_add = (abs_input_path, False, None, language)
|
|
1138
952
|
current_char_count += file_char_estimate
|
|
1139
|
-
print(f"Added file: {
|
|
1140
|
-
|
|
1141
|
-
|
|
953
|
+
print(f"Added file: {get_relative_path(abs_input_path)} ({file_size_readable})")
|
|
954
|
+
|
|
955
|
+
if file_to_add:
|
|
956
|
+
files_to_include.append(file_to_add)
|
|
957
|
+
# --- NEW: Call dependency analysis ---
|
|
958
|
+
new_deps, deps_char_count = _propose_and_add_dependencies(abs_input_path, project_root_abs, files_to_include, current_char_count)
|
|
959
|
+
files_to_include.extend(new_deps)
|
|
960
|
+
current_char_count += deps_char_count
|
|
961
|
+
|
|
1142
962
|
print_char_count(current_char_count)
|
|
1143
963
|
|
|
1144
964
|
else:
|
|
@@ -1147,10 +967,11 @@ def main():
|
|
|
1147
967
|
elif is_binary(input_path):
|
|
1148
968
|
print(f"Ignoring binary file: {input_path}")
|
|
1149
969
|
else:
|
|
1150
|
-
print(f"Ignoring file: {input_path}")
|
|
970
|
+
print(f"Ignoring file: {input_path}")
|
|
1151
971
|
elif os.path.isdir(input_path):
|
|
1152
972
|
print(f"\nProcessing directory specified directly: {input_path}")
|
|
1153
|
-
|
|
973
|
+
# Pass project_root_abs to process_directory
|
|
974
|
+
dir_files, dir_processed, current_char_count = process_directory(input_path, ignore_patterns, project_root_abs, current_char_count)
|
|
1154
975
|
files_to_include.extend(dir_files)
|
|
1155
976
|
processed_dirs.update(dir_processed)
|
|
1156
977
|
else:
|
|
@@ -1162,64 +983,42 @@ def main():
|
|
|
1162
983
|
|
|
1163
984
|
print("\nFile and web content selection complete.")
|
|
1164
985
|
print_char_count(current_char_count) # Print final count before prompt generation
|
|
1165
|
-
print(f"Summary: Added {len(files_to_include)} files and {len(web_contents)} web sources.")
|
|
1166
986
|
|
|
1167
987
|
added_files_count = len(files_to_include)
|
|
1168
|
-
added_dirs_count = len(processed_dirs)
|
|
988
|
+
added_dirs_count = len(processed_dirs)
|
|
1169
989
|
added_web_count = len(web_contents)
|
|
1170
990
|
print(f"Summary: Added {added_files_count} files/patches from {added_dirs_count} directories and {added_web_count} web sources.")
|
|
1171
991
|
|
|
1172
992
|
prompt_template, cursor_position = generate_prompt_template(files_to_include, ignore_patterns, web_contents, env_vars)
|
|
1173
993
|
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
if
|
|
1179
|
-
|
|
1180
|
-
editor_initial_content = prompt_template[:cursor_position] + args.task + prompt_template[cursor_position:]
|
|
1181
|
-
print("Pre-populating editor with task provided via --task argument.")
|
|
994
|
+
if args.task:
|
|
995
|
+
task_description = args.task
|
|
996
|
+
task_marker = "## Task Instructions\n\n"
|
|
997
|
+
insertion_point = prompt_template.find(task_marker)
|
|
998
|
+
if insertion_point != -1:
|
|
999
|
+
final_prompt = prompt_template[:insertion_point + len(task_marker)] + task_description + "\n\n" + prompt_template[insertion_point + len(task_marker):]
|
|
1182
1000
|
else:
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
print("Opening editor for you to add the task instructions.")
|
|
1186
|
-
|
|
1187
|
-
# Always open the editor in interactive mode
|
|
1188
|
-
initial_chat_prompt = open_editor_for_input(editor_initial_content, cursor_position)
|
|
1189
|
-
print("Editor closed. Starting interactive chat session...")
|
|
1190
|
-
start_chat_session(initial_chat_prompt) # Start the chat with the edited prompt else:
|
|
1001
|
+
final_prompt = prompt_template[:cursor_position] + task_description + prompt_template[cursor_position:]
|
|
1002
|
+
print("\nUsing task description from -t argument.")
|
|
1191
1003
|
else:
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
print("
|
|
1209
|
-
print(
|
|
1210
|
-
print("-" * 80)
|
|
1211
|
-
|
|
1212
|
-
# Copy the prompt to clipboard
|
|
1213
|
-
try:
|
|
1214
|
-
pyperclip.copy(final_prompt)
|
|
1215
|
-
separator = "\n" + "=" * 40 + "\n☕🍝 Kopipasta Complete! 🍝☕\n" + "=" * 40 + "\n"
|
|
1216
|
-
print(separator)
|
|
1217
|
-
final_char_count = len(final_prompt)
|
|
1218
|
-
final_token_estimate = final_char_count // 4
|
|
1219
|
-
print(f"Prompt has been copied to clipboard. Final size: {final_char_count} characters (~ {final_token_estimate} tokens)")
|
|
1220
|
-
except pyperclip.PyperclipException as e:
|
|
1221
|
-
print(f"\nWarning: Failed to copy to clipboard: {e}")
|
|
1222
|
-
print("You can manually copy the prompt above.")
|
|
1004
|
+
print("\nOpening editor for task instructions...")
|
|
1005
|
+
final_prompt = open_editor_for_input(prompt_template, cursor_position)
|
|
1006
|
+
|
|
1007
|
+
print("\n\nGenerated prompt:")
|
|
1008
|
+
print("-" * 80)
|
|
1009
|
+
print(final_prompt)
|
|
1010
|
+
print("-" * 80)
|
|
1011
|
+
|
|
1012
|
+
try:
|
|
1013
|
+
pyperclip.copy(final_prompt)
|
|
1014
|
+
separator = "\n" + "=" * 40 + "\n☕🍝 Kopipasta Complete! 🍝☕\n" + "=" * 40 + "\n"
|
|
1015
|
+
print(separator)
|
|
1016
|
+
final_char_count = len(final_prompt)
|
|
1017
|
+
final_token_estimate = final_char_count // 4
|
|
1018
|
+
print(f"Prompt has been copied to clipboard. Final size: {final_char_count} characters (~ {final_token_estimate} tokens)")
|
|
1019
|
+
except pyperclip.PyperclipException as e:
|
|
1020
|
+
print(f"\nWarning: Failed to copy to clipboard: {e}")
|
|
1021
|
+
print("You can manually copy the prompt above.")
|
|
1223
1022
|
|
|
1224
1023
|
if __name__ == "__main__":
|
|
1225
1024
|
main()
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: kopipasta
|
|
3
|
+
Version: 0.28.0
|
|
4
|
+
Summary: A CLI tool to generate prompts with project structure and file contents
|
|
5
|
+
Home-page: https://github.com/mkorpela/kopipasta
|
|
6
|
+
Author: Mikko Korpela
|
|
7
|
+
Author-email: mikko.korpela@gmail.com
|
|
8
|
+
License: MIT
|
|
9
|
+
Classifier: Development Status :: 3 - Alpha
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Classifier: Operating System :: OS Independent
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Requires-Python: >=3.8
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
License-File: LICENSE
|
|
22
|
+
Requires-Dist: pyperclip==1.9.0
|
|
23
|
+
Requires-Dist: requests==2.32.3
|
|
24
|
+
Requires-Dist: Pygments==2.18.0
|
|
25
|
+
|
|
26
|
+
# kopipasta
|
|
27
|
+
|
|
28
|
+
[](https://pypi.python.org/pypi/kopipasta)
|
|
29
|
+
[](http://pepy.tech/project/kopipasta)
|
|
30
|
+
|
|
31
|
+
A CLI tool for taking **full, transparent control** of your LLM context. No black boxes.
|
|
32
|
+
|
|
33
|
+
<img src="kopipasta.jpg" alt="kopipasta" width="300">
|
|
34
|
+
|
|
35
|
+
- An LLM told me that "kopi" means Coffee in some languages... and a Diffusion model then made this delicious soup.
|
|
36
|
+
|
|
37
|
+
## The Philosophy: You Control the Context
|
|
38
|
+
|
|
39
|
+
Many AI coding assistants use Retrieval-Augmented Generation (RAG) to automatically find what *they think* is relevant context. This is a black box. When the LLM gives a bad answer, you can't debug it because you don't know what context it was actually given.
|
|
40
|
+
|
|
41
|
+
**`kopipasta` is the opposite.** I built it for myself on the principle of **explicit context control**. You are in the driver's seat. You decide *exactly* what files, functions, and snippets go into the prompt. This transparency is the key to getting reliable, debuggable results from an LLM.
|
|
42
|
+
|
|
43
|
+
It's a "smart copy" command for your project, not a magic wand.
|
|
44
|
+
|
|
45
|
+
## How It Works
|
|
46
|
+
|
|
47
|
+
The workflow is dead simple:
|
|
48
|
+
|
|
49
|
+
1. **Gather:** Run `kopipasta` and point it at the files, directories, and URLs that matter for your task.
|
|
50
|
+
2. **Select:** The tool interactively helps you choose what to include. For large files, you can send just a snippet or even hand-pick individual functions.
|
|
51
|
+
3. **Define:** Your default editor (`$EDITOR`) opens for you to write your instructions to the LLM.
|
|
52
|
+
4. **Paste:** The final, comprehensive prompt is now on your clipboard, ready to be pasted into ChatGPT, Gemini, Claude, or your LLM of choice.
|
|
53
|
+
|
|
54
|
+
## Installation
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
# Using pipx (recommended for CLI tools)
|
|
58
|
+
pipx install kopipasta
|
|
59
|
+
|
|
60
|
+
# Or using standard pip
|
|
61
|
+
pip install kopipasta
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
## Usage
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
kopipasta [options] [files_or_directories_or_urls...]
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
**Arguments:**
|
|
71
|
+
|
|
72
|
+
* `[files_or_directories_or_urls...]`: One or more paths to files, directories, or web URLs to use as the starting point for your context.
|
|
73
|
+
|
|
74
|
+
**Options:**
|
|
75
|
+
|
|
76
|
+
* `-t TASK`, `--task TASK`: Provide the task description directly on the command line, skipping the editor.
|
|
77
|
+
|
|
78
|
+
## Key Features
|
|
79
|
+
|
|
80
|
+
* **Total Context Control:** Interactively select files, directories, snippets, or even individual functions. You see everything that goes into the prompt.
|
|
81
|
+
* **Transparent & Explicit:** No hidden RAG. You know exactly what's in the prompt because you built it. This makes debugging LLM failures possible.
|
|
82
|
+
* **Web-Aware:** Pulls in content directly from URLs—perfect for API documentation.
|
|
83
|
+
* **Safety First:**
|
|
84
|
+
* Automatically respects your `.gitignore` rules.
|
|
85
|
+
* Detects if you're about to include secrets from a `.env` file and asks what to do.
|
|
86
|
+
* **Context-Aware:** Keeps a running total of the prompt size (in characters and estimated tokens) so you don't overload the LLM's context window.
|
|
87
|
+
* **Developer-Friendly:**
|
|
88
|
+
* Uses your familiar `$EDITOR` for writing task descriptions.
|
|
89
|
+
* Copies the final prompt directly to your clipboard.
|
|
90
|
+
* Provides syntax highlighting during chunk selection.
|
|
91
|
+
|
|
92
|
+
## A Real-World Example
|
|
93
|
+
|
|
94
|
+
I had a bug where my `setup.py` didn't include all the dependencies from `requirements.txt`.
|
|
95
|
+
|
|
96
|
+
1. I ran `kopipasta -t "Update setup.py to read dependencies dynamically from requirements.txt" setup.py requirements.txt`.
|
|
97
|
+
2. The tool confirmed the inclusion of both files and copied the complete prompt to my clipboard.
|
|
98
|
+
3. I pasted the prompt into my LLM chat window.
|
|
99
|
+
4. I copied the LLM's suggested code back into my local `setup.py`.
|
|
100
|
+
5. I tested the changes and committed.
|
|
101
|
+
|
|
102
|
+
No manual file reading, no clumsy copy-pasting, just a clean, context-rich prompt that I had full control over.
|
|
103
|
+
|
|
104
|
+
## Configuration
|
|
105
|
+
|
|
106
|
+
Set your preferred command-line editor via the `EDITOR` environment variable.
|
|
107
|
+
```bash
|
|
108
|
+
export EDITOR=nvim # or vim, nano, code --wait, etc.
|
|
109
|
+
```
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
kopipasta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
kopipasta/import_parser.py,sha256=yLzkMlQm2avKjfqcpMY0PxbA_2ihV9gSYJplreWIPEQ,12424
|
|
3
|
+
kopipasta/main.py,sha256=MUTi4vj_OWTwb2Y0PqQvm--oaX3FKHSrqAUAIDvcPwU,43910
|
|
4
|
+
kopipasta-0.28.0.dist-info/LICENSE,sha256=xw4C9TAU7LFu4r_MwSbky90uzkzNtRwAo3c51IWR8lk,1091
|
|
5
|
+
kopipasta-0.28.0.dist-info/METADATA,sha256=XxmONaSfjOxhNSh4X31mdahvKxKwfwtQz0IxIA1lpFc,4838
|
|
6
|
+
kopipasta-0.28.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
7
|
+
kopipasta-0.28.0.dist-info/entry_points.txt,sha256=but54qDNz1-F8fVvGstq_QID5tHjczP7bO7rWLFkc6Y,50
|
|
8
|
+
kopipasta-0.28.0.dist-info/top_level.txt,sha256=iXohixMuCdw8UjGDUp0ouICLYBDrx207sgZIJ9lxn0o,10
|
|
9
|
+
kopipasta-0.28.0.dist-info/RECORD,,
|
|
@@ -1,171 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: kopipasta
|
|
3
|
-
Version: 0.26.0
|
|
4
|
-
Summary: A CLI tool to generate prompts with project structure and file contents
|
|
5
|
-
Home-page: https://github.com/mkorpela/kopipasta
|
|
6
|
-
Author: Mikko Korpela
|
|
7
|
-
Author-email: mikko.korpela@gmail.com
|
|
8
|
-
License: MIT
|
|
9
|
-
Classifier: Development Status :: 3 - Alpha
|
|
10
|
-
Classifier: Intended Audience :: Developers
|
|
11
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
-
Classifier: Operating System :: OS Independent
|
|
13
|
-
Classifier: Programming Language :: Python :: 3
|
|
14
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
-
Requires-Python: >=3.8
|
|
20
|
-
Description-Content-Type: text/markdown
|
|
21
|
-
License-File: LICENSE
|
|
22
|
-
Requires-Dist: pyperclip==1.9.0
|
|
23
|
-
Requires-Dist: requests==2.32.3
|
|
24
|
-
Requires-Dist: Pygments==2.18.0
|
|
25
|
-
Requires-Dist: google-genai==1.8.0
|
|
26
|
-
Requires-Dist: prompt-toolkit==3.0.50
|
|
27
|
-
|
|
28
|
-
# kopipasta
|
|
29
|
-
|
|
30
|
-
[](https://pypi.python.org/pypi/kopipasta)
|
|
31
|
-
[](http://pepy.tech/project/kopipasta)
|
|
32
|
-
|
|
33
|
-
Streamline your interaction with LLMs for coding tasks. `kopipasta` helps you provide comprehensive context (project structure, file contents, web content) and facilitates an interactive, patch-based workflow. Go beyond TAB TAB TAB and take control of your LLM context.
|
|
34
|
-
|
|
35
|
-
<img src="kopipasta.jpg" alt="kopipasta" width="300">
|
|
36
|
-
|
|
37
|
-
- An LLM told me that kopi means Coffee in some languages.. and a Diffusion model then made this delicious soup.
|
|
38
|
-
|
|
39
|
-
## Installation
|
|
40
|
-
|
|
41
|
-
You can install kopipasta using pipx (recommended) or pip:
|
|
42
|
-
|
|
43
|
-
```bash
|
|
44
|
-
# Using pipx (recommended)
|
|
45
|
-
pipx install kopipasta
|
|
46
|
-
|
|
47
|
-
# Or using pip
|
|
48
|
-
pip install kopipasta
|
|
49
|
-
```
|
|
50
|
-
|
|
51
|
-
## Usage
|
|
52
|
-
|
|
53
|
-
```bash
|
|
54
|
-
kopipasta [options] [files_or_directories_or_urls...]
|
|
55
|
-
```
|
|
56
|
-
|
|
57
|
-
**Arguments:**
|
|
58
|
-
|
|
59
|
-
* `[files_or_directories_or_urls...]`: Paths to files, directories, or web URLs to include as context.
|
|
60
|
-
|
|
61
|
-
**Options:**
|
|
62
|
-
|
|
63
|
-
* `-t TASK`, `--task TASK`: Provide the task description directly via the command line. If omitted (and not using `-I`), an editor will open for you to write the task.
|
|
64
|
-
* `-I`, `--interactive`: Start an interactive chat session with Google's Gemini model after preparing the context. Requires `GOOGLE_API_KEY` environment variable.
|
|
65
|
-
|
|
66
|
-
**Examples:**
|
|
67
|
-
|
|
68
|
-
1. **Generate prompt and copy to clipboard (classic mode):**
|
|
69
|
-
```bash
|
|
70
|
-
# Interactively select files from src/, include config.json, fetch web content,
|
|
71
|
-
# then open editor for task input. Copy final prompt to clipboard.
|
|
72
|
-
kopipasta src/ config.json https://example.com/api-docs
|
|
73
|
-
|
|
74
|
-
# Provide task directly, include specific files, copy final prompt.
|
|
75
|
-
kopipasta -t "Refactor setup.py to read deps from requirements.txt" setup.py requirements.txt
|
|
76
|
-
```
|
|
77
|
-
|
|
78
|
-
2. **Start an interactive chat session:**
|
|
79
|
-
```bash
|
|
80
|
-
# Interactively select files, provide task directly, then start chat.
|
|
81
|
-
kopipasta -I -t "Implement the apply_simple_patch function" kopipasta/main.py
|
|
82
|
-
|
|
83
|
-
# Interactively select files, open editor for initial task, then start chat.
|
|
84
|
-
kopipasta -I kopipasta/ tests/
|
|
85
|
-
```
|
|
86
|
-
|
|
87
|
-
## Workflow
|
|
88
|
-
|
|
89
|
-
`kopipasta` is designed to support the following workflow when working with LLMs (like Gemini, ChatGPT, Claude, etc.) for coding tasks:
|
|
90
|
-
|
|
91
|
-
1. **Gather Context:** Run `kopipasta` with the relevant files, directories, and URLs. Interactively select exactly what content (full files, snippets, or specific code chunks/patches) should be included.
|
|
92
|
-
2. **Define Task:** Provide your coding task instructions, either via the `-t` flag or through your default editor.
|
|
93
|
-
3. **Interact (if using `-I`):**
|
|
94
|
-
* `kopipasta` prepares the context and your task as an initial prompt.
|
|
95
|
-
* An interactive chat session starts (currently using Google Gemini via `google-genai`).
|
|
96
|
-
* Discuss the task, clarify requirements, and ask the LLM to generate code.
|
|
97
|
-
* The initial prompt includes instructions guiding the LLM to provide incremental changes and clear explanations.
|
|
98
|
-
4. **Request Patches (`-I` mode):**
|
|
99
|
-
* During the chat, use the `/patch` command to ask the LLM to provide the proposed changes in a structured format.
|
|
100
|
-
* `kopipasta` will prompt you to review the proposed patches (file, reasoning, code change).
|
|
101
|
-
5. **Apply Patches (`-I` mode):**
|
|
102
|
-
* If you approve, `kopipasta` will attempt to automatically apply the patches to your local files. It validates that the original code exists and is unique before applying.
|
|
103
|
-
6. **Test & Iterate:** Test the changes locally. If further changes are needed, continue the chat, request new patches, or make manual edits.
|
|
104
|
-
7. **Commit:** Once satisfied, commit the changes.
|
|
105
|
-
|
|
106
|
-
For non-interactive mode, `kopipasta` generates the complete prompt (context + task) and copies it to your clipboard (Step 1 & 2). You can then paste this into your preferred LLM interface and proceed manually from Step 3 onwards.
|
|
107
|
-
|
|
108
|
-
## Features
|
|
109
|
-
|
|
110
|
-
* **Comprehensive Context Generation:** Creates structured prompts including:
|
|
111
|
-
* Project directory tree overview.
|
|
112
|
-
* Selected file contents.
|
|
113
|
-
* Content fetched from web URLs.
|
|
114
|
-
* Your specific task instructions.
|
|
115
|
-
* **Interactive File Selection:**
|
|
116
|
-
* Guides you through selecting files and directories.
|
|
117
|
-
* Option to include full file content, a snippet (first lines/bytes), or **select specific code chunks/patches** for large or complex files.
|
|
118
|
-
* Syntax highlighting during chunk selection for supported languages.
|
|
119
|
-
* Ignores files based on common `.gitignore` patterns and detects binary files.
|
|
120
|
-
* Displays estimated character/token counts during selection.
|
|
121
|
-
* **Web Content Fetching:** Includes content directly from URLs. Handles JSON/CSV content types.
|
|
122
|
-
* **Editor Integration:** Opens your preferred editor (`$EDITOR`) to input task instructions (if not using `-t`).
|
|
123
|
-
* **Environment Variable Handling:** Detects potential secrets from a `.env` file in included content and prompts you to mask, skip, or keep them.
|
|
124
|
-
* **Clipboard Integration:** Automatically copies the generated prompt to the clipboard (non-interactive mode).
|
|
125
|
-
* **Interactive Chat Mode (`-I`, `--interactive`):**
|
|
126
|
-
* Starts a chat session directly after context generation.
|
|
127
|
-
* Uses the `google-genai` library to interact with Google's Gemini models.
|
|
128
|
-
* Requires the `GOOGLE_API_KEY` environment variable to be set.
|
|
129
|
-
* Includes built-in instructions for the LLM to encourage clear, iterative responses.
|
|
130
|
-
* **Patch Management (`-I` mode):**
|
|
131
|
-
* `/patch` command to request structured code changes from the LLM.
|
|
132
|
-
* Prompts user to review proposed patches (reasoning, file, original/new code snippets).
|
|
133
|
-
* **Automatic patch application** to local files upon confirmation.
|
|
134
|
-
|
|
135
|
-
## Configuration
|
|
136
|
-
|
|
137
|
-
* **Editor:** Set the `EDITOR` environment variable to your preferred command-line editor (e.g., `vim`, `nvim`, `nano`, `emacs`, `code --wait`).
|
|
138
|
-
* **API Key (for `-I` mode):** Set the `GOOGLE_API_KEY` environment variable with your Google AI Studio API key to use the interactive chat feature.
|
|
139
|
-
|
|
140
|
-
## Real life example (Non-Interactive)
|
|
141
|
-
|
|
142
|
-
Context: I had a bug where `setup.py` didn't include all dependencies listed in `requirements.txt`.
|
|
143
|
-
|
|
144
|
-
1. `kopipasta -t "Update setup.py to read dependencies dynamically from requirements.txt" setup.py requirements.txt`
|
|
145
|
-
2. Paste the generated prompt (copied to clipboard) into my preferred LLM chat interface.
|
|
146
|
-
3. Review the LLM's proposed code.
|
|
147
|
-
4. Copy the code and update `setup.py` manually.
|
|
148
|
-
5. Test the changes.
|
|
149
|
-
|
|
150
|
-
## Real life example (Interactive)
|
|
151
|
-
|
|
152
|
-
Context: I want to refactor a function in `main.py`.
|
|
153
|
-
|
|
154
|
-
1. `export GOOGLE_API_KEY="YOUR_API_KEY_HERE"` (ensure key is set)
|
|
155
|
-
2. `kopipasta -I -t "Refactor the handle_content function in main.py to be more modular" module/main.py`
|
|
156
|
-
3. The tool gathers context, shows the file size, and confirms inclusion.
|
|
157
|
-
4. An interactive chat session starts with the context and task sent to Gemini.
|
|
158
|
-
5. Chat with the LLM:
|
|
159
|
-
* *User:* "Proceed"
|
|
160
|
-
* *LLM:* "Okay, I understand. My plan is to..."
|
|
161
|
-
* *User:* "Looks good."
|
|
162
|
-
* *LLM:* "Here's the first part of the refactoring..." (shows code)
|
|
163
|
-
6. Use the `/patch` command:
|
|
164
|
-
* *User:* `/patch`
|
|
165
|
-
* `kopipasta` asks the LLM for structured patches.
|
|
166
|
-
* `kopipasta` displays proposed patches: "Apply 1 patch to module/main.py? (y/N):"
|
|
167
|
-
7. Apply the patch:
|
|
168
|
-
* *User:* `y`
|
|
169
|
-
* `kopipasta` applies the change to `module/main.py`.
|
|
170
|
-
8. Test locally. If it works, commit. If not, continue chatting, request more patches, or debug.
|
|
171
|
-
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
kopipasta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
kopipasta/import_parser.py,sha256=Y1YzoEXW34caqCcy-yUXTzw44YbY1SjLJbZubjagDSs,12454
|
|
3
|
-
kopipasta/main.py,sha256=-hDEhSR1wFuAKDOypXc7OfR4fOdBWb37Ux5mIhzsZuM,54527
|
|
4
|
-
kopipasta-0.26.0.dist-info/LICENSE,sha256=xw4C9TAU7LFu4r_MwSbky90uzkzNtRwAo3c51IWR8lk,1091
|
|
5
|
-
kopipasta-0.26.0.dist-info/METADATA,sha256=3WGVQqrL3aeNzzlOrziYBPbu-kRWPDKza-NKVi7GTFI,8610
|
|
6
|
-
kopipasta-0.26.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
7
|
-
kopipasta-0.26.0.dist-info/entry_points.txt,sha256=but54qDNz1-F8fVvGstq_QID5tHjczP7bO7rWLFkc6Y,50
|
|
8
|
-
kopipasta-0.26.0.dist-info/top_level.txt,sha256=iXohixMuCdw8UjGDUp0ouICLYBDrx207sgZIJ9lxn0o,10
|
|
9
|
-
kopipasta-0.26.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|